xref: /linux/arch/x86/kernel/cpu/common.c (revision 9f2bb6c7b364f186aa37c524f6df33bd488d4efa)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* cpu_feature_enabled() cannot be used this early */
3 #define USE_EARLY_PGTABLE_L5
4 
5 #include <linux/memblock.h>
6 #include <linux/linkage.h>
7 #include <linux/bitops.h>
8 #include <linux/kernel.h>
9 #include <linux/export.h>
10 #include <linux/kvm_types.h>
11 #include <linux/percpu.h>
12 #include <linux/string.h>
13 #include <linux/ctype.h>
14 #include <linux/delay.h>
15 #include <linux/sched/mm.h>
16 #include <linux/sched/clock.h>
17 #include <linux/sched/task.h>
18 #include <linux/sched/smt.h>
19 #include <linux/init.h>
20 #include <linux/kprobes.h>
21 #include <linux/kgdb.h>
22 #include <linux/mem_encrypt.h>
23 #include <linux/smp.h>
24 #include <linux/cpu.h>
25 #include <linux/io.h>
26 #include <linux/syscore_ops.h>
27 #include <linux/pgtable.h>
28 #include <linux/stackprotector.h>
29 #include <linux/utsname.h>
30 #include <linux/efi.h>
31 
32 #include <asm/alternative.h>
33 #include <asm/cmdline.h>
34 #include <asm/cpuid/api.h>
35 #include <asm/perf_event.h>
36 #include <asm/mmu_context.h>
37 #include <asm/doublefault.h>
38 #include <asm/archrandom.h>
39 #include <asm/hypervisor.h>
40 #include <asm/processor.h>
41 #include <asm/tlbflush.h>
42 #include <asm/debugreg.h>
43 #include <asm/sections.h>
44 #include <asm/vsyscall.h>
45 #include <linux/topology.h>
46 #include <linux/cpumask.h>
47 #include <linux/atomic.h>
48 #include <asm/proto.h>
49 #include <asm/setup.h>
50 #include <asm/apic.h>
51 #include <asm/desc.h>
52 #include <asm/fpu/api.h>
53 #include <asm/mtrr.h>
54 #include <asm/hwcap2.h>
55 #include <linux/numa.h>
56 #include <asm/numa.h>
57 #include <asm/asm.h>
58 #include <asm/bugs.h>
59 #include <asm/cpu.h>
60 #include <asm/mce.h>
61 #include <asm/msr.h>
62 #include <asm/cacheinfo.h>
63 #include <asm/memtype.h>
64 #include <asm/microcode.h>
65 #include <asm/intel-family.h>
66 #include <asm/cpu_device_id.h>
67 #include <asm/fred.h>
68 #include <asm/uv/uv.h>
69 #include <asm/ia32.h>
70 #include <asm/set_memory.h>
71 #include <asm/traps.h>
72 #include <asm/sev.h>
73 #include <asm/tdx.h>
74 #include <asm/posted_intr.h>
75 #include <asm/runtime-const.h>
76 
77 #include "cpu.h"
78 
79 DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
80 EXPORT_PER_CPU_SYMBOL(cpu_info);
81 
82 /* Used for modules: built-in code uses runtime constants */
83 unsigned long USER_PTR_MAX;
84 EXPORT_SYMBOL(USER_PTR_MAX);
85 
86 u32 elf_hwcap2 __read_mostly;
87 
88 /* Number of siblings per CPU package */
89 unsigned int __max_threads_per_core __ro_after_init = 1;
90 EXPORT_SYMBOL(__max_threads_per_core);
91 
92 unsigned int __max_dies_per_package __ro_after_init = 1;
93 EXPORT_SYMBOL(__max_dies_per_package);
94 
95 unsigned int __max_logical_packages __ro_after_init = 1;
96 EXPORT_SYMBOL(__max_logical_packages);
97 
98 unsigned int __num_nodes_per_package __ro_after_init = 1;
99 EXPORT_SYMBOL(__num_nodes_per_package);
100 
101 unsigned int __num_cores_per_package __ro_after_init = 1;
102 EXPORT_SYMBOL(__num_cores_per_package);
103 
104 unsigned int __num_threads_per_package __ro_after_init = 1;
105 EXPORT_SYMBOL(__num_threads_per_package);
106 
107 static struct ppin_info {
108 	int	feature;
109 	int	msr_ppin_ctl;
110 	int	msr_ppin;
111 } ppin_info[] = {
112 	[X86_VENDOR_INTEL] = {
113 		.feature = X86_FEATURE_INTEL_PPIN,
114 		.msr_ppin_ctl = MSR_PPIN_CTL,
115 		.msr_ppin = MSR_PPIN
116 	},
117 	[X86_VENDOR_AMD] = {
118 		.feature = X86_FEATURE_AMD_PPIN,
119 		.msr_ppin_ctl = MSR_AMD_PPIN_CTL,
120 		.msr_ppin = MSR_AMD_PPIN
121 	},
122 };
123 
124 static const struct x86_cpu_id ppin_cpuids[] = {
125 	X86_MATCH_FEATURE(X86_FEATURE_AMD_PPIN, &ppin_info[X86_VENDOR_AMD]),
126 	X86_MATCH_FEATURE(X86_FEATURE_INTEL_PPIN, &ppin_info[X86_VENDOR_INTEL]),
127 
128 	/* Legacy models without CPUID enumeration */
129 	X86_MATCH_VFM(INTEL_IVYBRIDGE_X, &ppin_info[X86_VENDOR_INTEL]),
130 	X86_MATCH_VFM(INTEL_HASWELL_X, &ppin_info[X86_VENDOR_INTEL]),
131 	X86_MATCH_VFM(INTEL_BROADWELL_D, &ppin_info[X86_VENDOR_INTEL]),
132 	X86_MATCH_VFM(INTEL_BROADWELL_X, &ppin_info[X86_VENDOR_INTEL]),
133 	X86_MATCH_VFM(INTEL_SKYLAKE_X, &ppin_info[X86_VENDOR_INTEL]),
134 	X86_MATCH_VFM(INTEL_ICELAKE_X, &ppin_info[X86_VENDOR_INTEL]),
135 	X86_MATCH_VFM(INTEL_ICELAKE_D, &ppin_info[X86_VENDOR_INTEL]),
136 	X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &ppin_info[X86_VENDOR_INTEL]),
137 	X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &ppin_info[X86_VENDOR_INTEL]),
138 	X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &ppin_info[X86_VENDOR_INTEL]),
139 	X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &ppin_info[X86_VENDOR_INTEL]),
140 
141 	{}
142 };
143 
ppin_init(struct cpuinfo_x86 * c)144 static void ppin_init(struct cpuinfo_x86 *c)
145 {
146 	const struct x86_cpu_id *id;
147 	unsigned long long val;
148 	struct ppin_info *info;
149 
150 	id = x86_match_cpu(ppin_cpuids);
151 	if (!id)
152 		return;
153 
154 	/*
155 	 * Testing the presence of the MSR is not enough. Need to check
156 	 * that the PPIN_CTL allows reading of the PPIN.
157 	 */
158 	info = (struct ppin_info *)id->driver_data;
159 
160 	if (rdmsrq_safe(info->msr_ppin_ctl, &val))
161 		goto clear_ppin;
162 
163 	if ((val & 3UL) == 1UL) {
164 		/* PPIN locked in disabled mode */
165 		goto clear_ppin;
166 	}
167 
168 	/* If PPIN is disabled, try to enable */
169 	if (!(val & 2UL)) {
170 		wrmsrq_safe(info->msr_ppin_ctl,  val | 2UL);
171 		rdmsrq_safe(info->msr_ppin_ctl, &val);
172 	}
173 
174 	/* Is the enable bit set? */
175 	if (val & 2UL) {
176 		c->ppin = native_rdmsrq(info->msr_ppin);
177 		set_cpu_cap(c, info->feature);
178 		return;
179 	}
180 
181 clear_ppin:
182 	setup_clear_cpu_cap(info->feature);
183 }
184 
default_init(struct cpuinfo_x86 * c)185 static void default_init(struct cpuinfo_x86 *c)
186 {
187 #ifdef CONFIG_X86_64
188 	cpu_detect_cache_sizes(c);
189 #else
190 	/* Not much we can do here... */
191 	/* Check if at least it has cpuid */
192 	if (c->cpuid_level == -1) {
193 		/* No cpuid. It must be an ancient CPU */
194 		if (c->x86 == 4)
195 			strcpy(c->x86_model_id, "486");
196 		else if (c->x86 == 3)
197 			strcpy(c->x86_model_id, "386");
198 	}
199 #endif
200 }
201 
202 static const struct cpu_dev default_cpu = {
203 	.c_init		= default_init,
204 	.c_vendor	= "Unknown",
205 	.c_x86_vendor	= X86_VENDOR_UNKNOWN,
206 };
207 
208 static const struct cpu_dev *this_cpu = &default_cpu;
209 
210 DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
211 #ifdef CONFIG_X86_64
212 	/*
213 	 * We need valid kernel segments for data and code in long mode too
214 	 * IRET will check the segment types  kkeil 2000/10/28
215 	 * Also sysret mandates a special GDT layout
216 	 *
217 	 * TLS descriptors are currently at a different place compared to i386.
218 	 * Hopefully nobody expects them at a fixed place (Wine?)
219 	 */
220 	[GDT_ENTRY_KERNEL32_CS]		= GDT_ENTRY_INIT(DESC_CODE32, 0, 0xfffff),
221 	[GDT_ENTRY_KERNEL_CS]		= GDT_ENTRY_INIT(DESC_CODE64, 0, 0xfffff),
222 	[GDT_ENTRY_KERNEL_DS]		= GDT_ENTRY_INIT(DESC_DATA64, 0, 0xfffff),
223 	[GDT_ENTRY_DEFAULT_USER32_CS]	= GDT_ENTRY_INIT(DESC_CODE32 | DESC_USER, 0, 0xfffff),
224 	[GDT_ENTRY_DEFAULT_USER_DS]	= GDT_ENTRY_INIT(DESC_DATA64 | DESC_USER, 0, 0xfffff),
225 	[GDT_ENTRY_DEFAULT_USER_CS]	= GDT_ENTRY_INIT(DESC_CODE64 | DESC_USER, 0, 0xfffff),
226 #else
227 	[GDT_ENTRY_KERNEL_CS]		= GDT_ENTRY_INIT(DESC_CODE32, 0, 0xfffff),
228 	[GDT_ENTRY_KERNEL_DS]		= GDT_ENTRY_INIT(DESC_DATA32, 0, 0xfffff),
229 	[GDT_ENTRY_DEFAULT_USER_CS]	= GDT_ENTRY_INIT(DESC_CODE32 | DESC_USER, 0, 0xfffff),
230 	[GDT_ENTRY_DEFAULT_USER_DS]	= GDT_ENTRY_INIT(DESC_DATA32 | DESC_USER, 0, 0xfffff),
231 	/*
232 	 * Segments used for calling PnP BIOS have byte granularity.
233 	 * They code segments and data segments have fixed 64k limits,
234 	 * the transfer segment sizes are set at run time.
235 	 */
236 	[GDT_ENTRY_PNPBIOS_CS32]	= GDT_ENTRY_INIT(DESC_CODE32_BIOS, 0, 0xffff),
237 	[GDT_ENTRY_PNPBIOS_CS16]	= GDT_ENTRY_INIT(DESC_CODE16, 0, 0xffff),
238 	[GDT_ENTRY_PNPBIOS_DS]		= GDT_ENTRY_INIT(DESC_DATA16, 0, 0xffff),
239 	[GDT_ENTRY_PNPBIOS_TS1]		= GDT_ENTRY_INIT(DESC_DATA16, 0, 0),
240 	[GDT_ENTRY_PNPBIOS_TS2]		= GDT_ENTRY_INIT(DESC_DATA16, 0, 0),
241 	/*
242 	 * The APM segments have byte granularity and their bases
243 	 * are set at run time.  All have 64k limits.
244 	 */
245 	[GDT_ENTRY_APMBIOS_BASE]	= GDT_ENTRY_INIT(DESC_CODE32_BIOS, 0, 0xffff),
246 	[GDT_ENTRY_APMBIOS_BASE+1]	= GDT_ENTRY_INIT(DESC_CODE16, 0, 0xffff),
247 	[GDT_ENTRY_APMBIOS_BASE+2]	= GDT_ENTRY_INIT(DESC_DATA32_BIOS, 0, 0xffff),
248 
249 	[GDT_ENTRY_ESPFIX_SS]		= GDT_ENTRY_INIT(DESC_DATA32, 0, 0xfffff),
250 	[GDT_ENTRY_PERCPU]		= GDT_ENTRY_INIT(DESC_DATA32, 0, 0xfffff),
251 #endif
252 } };
253 EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
254 SYM_PIC_ALIAS(gdt_page);
255 
256 #ifdef CONFIG_X86_64
x86_nopcid_setup(char * s)257 static int __init x86_nopcid_setup(char *s)
258 {
259 	/* nopcid doesn't accept parameters */
260 	if (s)
261 		return -EINVAL;
262 
263 	/* do not emit a message if the feature is not present */
264 	if (!boot_cpu_has(X86_FEATURE_PCID))
265 		return 0;
266 
267 	setup_clear_cpu_cap(X86_FEATURE_PCID);
268 	pr_info("nopcid: PCID feature disabled\n");
269 	return 0;
270 }
271 early_param("nopcid", x86_nopcid_setup);
272 #endif
273 
x86_noinvpcid_setup(char * s)274 static int __init x86_noinvpcid_setup(char *s)
275 {
276 	/* noinvpcid doesn't accept parameters */
277 	if (s)
278 		return -EINVAL;
279 
280 	/* do not emit a message if the feature is not present */
281 	if (!boot_cpu_has(X86_FEATURE_INVPCID))
282 		return 0;
283 
284 	setup_clear_cpu_cap(X86_FEATURE_INVPCID);
285 	pr_info("noinvpcid: INVPCID feature disabled\n");
286 	return 0;
287 }
288 early_param("noinvpcid", x86_noinvpcid_setup);
289 
290 /* Standard macro to see if a specific flag is changeable */
flag_is_changeable_p(unsigned long flag)291 static inline bool flag_is_changeable_p(unsigned long flag)
292 {
293 	unsigned long f1, f2;
294 
295 	if (!IS_ENABLED(CONFIG_X86_32))
296 		return true;
297 
298 	/*
299 	 * Cyrix and IDT cpus allow disabling of CPUID
300 	 * so the code below may return different results
301 	 * when it is executed before and after enabling
302 	 * the CPUID. Add "volatile" to not allow gcc to
303 	 * optimize the subsequent calls to this function.
304 	 */
305 	asm volatile ("pushfl		\n\t"
306 		      "pushfl		\n\t"
307 		      "popl %0		\n\t"
308 		      "movl %0, %1	\n\t"
309 		      "xorl %2, %0	\n\t"
310 		      "pushl %0		\n\t"
311 		      "popfl		\n\t"
312 		      "pushfl		\n\t"
313 		      "popl %0		\n\t"
314 		      "popfl		\n\t"
315 
316 		      : "=&r" (f1), "=&r" (f2)
317 		      : "ir" (flag));
318 
319 	return (f1 ^ f2) & flag;
320 }
321 
322 #ifdef CONFIG_X86_32
323 static int cachesize_override = -1;
324 static int disable_x86_serial_nr = 1;
325 
cachesize_setup(char * str)326 static int __init cachesize_setup(char *str)
327 {
328 	get_option(&str, &cachesize_override);
329 	return 1;
330 }
331 __setup("cachesize=", cachesize_setup);
332 
333 /* Probe for the CPUID instruction */
cpuid_feature(void)334 bool cpuid_feature(void)
335 {
336 	return flag_is_changeable_p(X86_EFLAGS_ID);
337 }
338 
squash_the_stupid_serial_number(struct cpuinfo_x86 * c)339 static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
340 {
341 	unsigned long lo, hi;
342 
343 	if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr)
344 		return;
345 
346 	/* Disable processor serial number: */
347 
348 	rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
349 	lo |= 0x200000;
350 	wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
351 
352 	pr_notice("CPU serial number disabled.\n");
353 	clear_cpu_cap(c, X86_FEATURE_PN);
354 
355 	/* Disabling the serial number may affect the cpuid level */
356 	c->cpuid_level = cpuid_eax(0);
357 }
358 
x86_serial_nr_setup(char * s)359 static int __init x86_serial_nr_setup(char *s)
360 {
361 	disable_x86_serial_nr = 0;
362 	return 1;
363 }
364 __setup("serialnumber", x86_serial_nr_setup);
365 #else
squash_the_stupid_serial_number(struct cpuinfo_x86 * c)366 static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
367 {
368 }
369 #endif
370 
setup_smep(struct cpuinfo_x86 * c)371 static __always_inline void setup_smep(struct cpuinfo_x86 *c)
372 {
373 	if (cpu_has(c, X86_FEATURE_SMEP))
374 		cr4_set_bits(X86_CR4_SMEP);
375 }
376 
setup_smap(struct cpuinfo_x86 * c)377 static __always_inline void setup_smap(struct cpuinfo_x86 *c)
378 {
379 	unsigned long eflags = native_save_fl();
380 
381 	/* This should have been cleared long ago */
382 	BUG_ON(eflags & X86_EFLAGS_AC);
383 
384 	if (cpu_has(c, X86_FEATURE_SMAP))
385 		cr4_set_bits(X86_CR4_SMAP);
386 }
387 
setup_umip(struct cpuinfo_x86 * c)388 static __always_inline void setup_umip(struct cpuinfo_x86 *c)
389 {
390 	/* Check the boot processor, plus build option for UMIP. */
391 	if (!cpu_feature_enabled(X86_FEATURE_UMIP))
392 		goto out;
393 
394 	/* Check the current processor's cpuid bits. */
395 	if (!cpu_has(c, X86_FEATURE_UMIP))
396 		goto out;
397 
398 	cr4_set_bits(X86_CR4_UMIP);
399 
400 	pr_info_once("x86/cpu: User Mode Instruction Prevention (UMIP) activated\n");
401 
402 	return;
403 
404 out:
405 	/*
406 	 * Make sure UMIP is disabled in case it was enabled in a
407 	 * previous boot (e.g., via kexec).
408 	 */
409 	cr4_clear_bits(X86_CR4_UMIP);
410 }
411 
enable_lass(unsigned int cpu)412 static int enable_lass(unsigned int cpu)
413 {
414 	cr4_set_bits(X86_CR4_LASS);
415 
416 	return 0;
417 }
418 
419 /*
420  * Finalize features that need to be enabled just before entering
421  * userspace. Note that this only runs on a single CPU. Use appropriate
422  * callbacks if all the CPUs need to reflect the same change.
423  */
cpu_finalize_pre_userspace(void)424 static int cpu_finalize_pre_userspace(void)
425 {
426 	if (!cpu_feature_enabled(X86_FEATURE_LASS))
427 		return 0;
428 
429 	/* Runs on all online CPUs and future CPUs that come online. */
430 	cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/lass:enable", enable_lass, NULL);
431 
432 	return 0;
433 }
434 late_initcall(cpu_finalize_pre_userspace);
435 
436 /* These bits should not change their value after CPU init is finished. */
437 static const unsigned long cr4_pinned_mask = X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP |
438 					     X86_CR4_FSGSBASE | X86_CR4_CET;
439 
440 /*
441  * The CR pinning protects against ROP on the 'mov %reg, %CRn' instruction(s).
442  * Since you can ROP directly to these instructions (barring shadow stack),
443  * any protection must follow immediately and unconditionally after that.
444  *
445  * Specifically, the CR[04] write functions below will have the value
446  * validation controlled by the @cr_pinning static_branch which is
447  * __ro_after_init, just like the cr4_pinned_bits value.
448  *
449  * Once set, an attacker will have to defeat page-tables to get around these
450  * restrictions. Which is a much bigger ask than 'simple' ROP.
451  */
452 static DEFINE_STATIC_KEY_FALSE_RO(cr_pinning);
453 static unsigned long cr4_pinned_bits __ro_after_init;
454 
native_write_cr0(unsigned long val)455 void native_write_cr0(unsigned long val)
456 {
457 	unsigned long bits_missing = 0;
458 
459 set_register:
460 	asm volatile("mov %0,%%cr0": "+r" (val) : : "memory");
461 
462 	if (static_branch_likely(&cr_pinning)) {
463 		if (unlikely((val & X86_CR0_WP) != X86_CR0_WP)) {
464 			bits_missing = X86_CR0_WP;
465 			val |= bits_missing;
466 			goto set_register;
467 		}
468 		/* Warn after we've set the missing bits. */
469 		WARN_ONCE(bits_missing, "CR0 WP bit went missing!?\n");
470 	}
471 }
472 EXPORT_SYMBOL(native_write_cr0);
473 
native_write_cr4(unsigned long val)474 void __no_profile native_write_cr4(unsigned long val)
475 {
476 	unsigned long bits_changed = 0;
477 
478 set_register:
479 	asm volatile("mov %0,%%cr4": "+r" (val) : : "memory");
480 
481 	if (static_branch_likely(&cr_pinning)) {
482 		if (unlikely((val & cr4_pinned_mask) != cr4_pinned_bits)) {
483 			bits_changed = (val & cr4_pinned_mask) ^ cr4_pinned_bits;
484 			val = (val & ~cr4_pinned_mask) | cr4_pinned_bits;
485 			goto set_register;
486 		}
487 		/* Warn after we've corrected the changed bits. */
488 		WARN_ONCE(bits_changed, "pinned CR4 bits changed: 0x%lx!?\n",
489 			  bits_changed);
490 	}
491 }
492 #if IS_MODULE(CONFIG_LKDTM)
493 EXPORT_SYMBOL_GPL(native_write_cr4);
494 #endif
495 
cr4_update_irqsoff(unsigned long set,unsigned long clear)496 void cr4_update_irqsoff(unsigned long set, unsigned long clear)
497 {
498 	unsigned long newval, cr4 = this_cpu_read(cpu_tlbstate.cr4);
499 
500 	lockdep_assert_irqs_disabled();
501 
502 	newval = (cr4 & ~clear) | set;
503 	if (newval != cr4) {
504 		this_cpu_write(cpu_tlbstate.cr4, newval);
505 		__write_cr4(newval);
506 	}
507 }
508 EXPORT_SYMBOL_FOR_KVM(cr4_update_irqsoff);
509 
510 /* Read the CR4 shadow. */
cr4_read_shadow(void)511 unsigned long cr4_read_shadow(void)
512 {
513 	return this_cpu_read(cpu_tlbstate.cr4);
514 }
515 EXPORT_SYMBOL_FOR_KVM(cr4_read_shadow);
516 
cr4_init(void)517 void cr4_init(void)
518 {
519 	unsigned long cr4 = __read_cr4();
520 
521 	if (boot_cpu_has(X86_FEATURE_PCID))
522 		cr4 |= X86_CR4_PCIDE;
523 	if (static_branch_likely(&cr_pinning))
524 		cr4 = (cr4 & ~cr4_pinned_mask) | cr4_pinned_bits;
525 
526 	__write_cr4(cr4);
527 
528 	/* Initialize cr4 shadow for this CPU. */
529 	this_cpu_write(cpu_tlbstate.cr4, cr4);
530 }
531 
532 /*
533  * Once CPU feature detection is finished (and boot params have been
534  * parsed), record any of the sensitive CR bits that are set, and
535  * enable CR pinning.
536  */
setup_cr_pinning(void)537 static void __init setup_cr_pinning(void)
538 {
539 	cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & cr4_pinned_mask;
540 	static_key_enable(&cr_pinning.key);
541 }
542 
x86_nofsgsbase_setup(char * arg)543 static __init int x86_nofsgsbase_setup(char *arg)
544 {
545 	/* Require an exact match without trailing characters. */
546 	if (strlen(arg))
547 		return 0;
548 
549 	/* Do not emit a message if the feature is not present. */
550 	if (!boot_cpu_has(X86_FEATURE_FSGSBASE))
551 		return 1;
552 
553 	setup_clear_cpu_cap(X86_FEATURE_FSGSBASE);
554 	pr_info("FSGSBASE disabled via kernel command line\n");
555 	return 1;
556 }
557 __setup("nofsgsbase", x86_nofsgsbase_setup);
558 
559 /*
560  * Protection Keys are not available in 32-bit mode.
561  */
562 static bool pku_disabled;
563 
setup_pku(struct cpuinfo_x86 * c)564 static __always_inline void setup_pku(struct cpuinfo_x86 *c)
565 {
566 	if (c == &boot_cpu_data) {
567 		if (pku_disabled || !cpu_feature_enabled(X86_FEATURE_PKU))
568 			return;
569 		/*
570 		 * Setting CR4.PKE will cause the X86_FEATURE_OSPKE cpuid
571 		 * bit to be set.  Enforce it.
572 		 */
573 		setup_force_cpu_cap(X86_FEATURE_OSPKE);
574 
575 	} else if (!cpu_feature_enabled(X86_FEATURE_OSPKE)) {
576 		return;
577 	}
578 
579 	cr4_set_bits(X86_CR4_PKE);
580 	/* Load the default PKRU value */
581 	pkru_write_default();
582 }
583 
584 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
setup_disable_pku(char * arg)585 static __init int setup_disable_pku(char *arg)
586 {
587 	/*
588 	 * Do not clear the X86_FEATURE_PKU bit.  All of the
589 	 * runtime checks are against OSPKE so clearing the
590 	 * bit does nothing.
591 	 *
592 	 * This way, we will see "pku" in cpuinfo, but not
593 	 * "ospke", which is exactly what we want.  It shows
594 	 * that the CPU has PKU, but the OS has not enabled it.
595 	 * This happens to be exactly how a system would look
596 	 * if we disabled the config option.
597 	 */
598 	pr_info("x86: 'nopku' specified, disabling Memory Protection Keys\n");
599 	pku_disabled = true;
600 	return 1;
601 }
602 __setup("nopku", setup_disable_pku);
603 #endif
604 
605 #ifdef CONFIG_X86_KERNEL_IBT
606 
ibt_save(bool disable)607 __noendbr u64 ibt_save(bool disable)
608 {
609 	u64 msr = 0;
610 
611 	if (cpu_feature_enabled(X86_FEATURE_IBT)) {
612 		rdmsrq(MSR_IA32_S_CET, msr);
613 		if (disable)
614 			wrmsrq(MSR_IA32_S_CET, msr & ~CET_ENDBR_EN);
615 	}
616 
617 	return msr;
618 }
619 
ibt_restore(u64 save)620 __noendbr void ibt_restore(u64 save)
621 {
622 	u64 msr;
623 
624 	if (cpu_feature_enabled(X86_FEATURE_IBT)) {
625 		rdmsrq(MSR_IA32_S_CET, msr);
626 		msr &= ~CET_ENDBR_EN;
627 		msr |= (save & CET_ENDBR_EN);
628 		wrmsrq(MSR_IA32_S_CET, msr);
629 	}
630 }
631 
632 #endif
633 
setup_cet(struct cpuinfo_x86 * c)634 static __always_inline void setup_cet(struct cpuinfo_x86 *c)
635 {
636 	bool user_shstk, kernel_ibt;
637 
638 	if (!IS_ENABLED(CONFIG_X86_CET))
639 		return;
640 
641 	kernel_ibt = HAS_KERNEL_IBT && cpu_feature_enabled(X86_FEATURE_IBT);
642 	user_shstk = cpu_feature_enabled(X86_FEATURE_SHSTK) &&
643 		     IS_ENABLED(CONFIG_X86_USER_SHADOW_STACK);
644 
645 	if (!kernel_ibt && !user_shstk)
646 		return;
647 
648 	if (user_shstk)
649 		set_cpu_cap(c, X86_FEATURE_USER_SHSTK);
650 
651 	if (kernel_ibt)
652 		wrmsrq(MSR_IA32_S_CET, CET_ENDBR_EN);
653 	else
654 		wrmsrq(MSR_IA32_S_CET, 0);
655 
656 	cr4_set_bits(X86_CR4_CET);
657 
658 	if (kernel_ibt && ibt_selftest()) {
659 		pr_err("IBT selftest: Failed!\n");
660 		wrmsrq(MSR_IA32_S_CET, 0);
661 		setup_clear_cpu_cap(X86_FEATURE_IBT);
662 	}
663 }
664 
cet_disable(void)665 __noendbr void cet_disable(void)
666 {
667 	if (!(cpu_feature_enabled(X86_FEATURE_IBT) ||
668 	      cpu_feature_enabled(X86_FEATURE_SHSTK)))
669 		return;
670 
671 	wrmsrq(MSR_IA32_S_CET, 0);
672 	wrmsrq(MSR_IA32_U_CET, 0);
673 }
674 
675 /*
676  * Some CPU features depend on higher CPUID levels, which may not always
677  * be available due to CPUID level capping or broken virtualization
678  * software.  Add those features to this table to auto-disable them.
679  */
680 struct cpuid_dependent_feature {
681 	u32 feature;
682 	u32 level;
683 };
684 
685 static const struct cpuid_dependent_feature
686 cpuid_dependent_features[] = {
687 	{ X86_FEATURE_MWAIT,		CPUID_LEAF_MWAIT },
688 	{ X86_FEATURE_DCA,		CPUID_LEAF_DCA },
689 	{ X86_FEATURE_XSAVE,		CPUID_LEAF_XSTATE },
690 	{ 0, 0 }
691 };
692 
filter_cpuid_features(struct cpuinfo_x86 * c,bool warn)693 static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
694 {
695 	const struct cpuid_dependent_feature *df;
696 
697 	for (df = cpuid_dependent_features; df->feature; df++) {
698 
699 		if (!cpu_has(c, df->feature))
700 			continue;
701 		/*
702 		 * Note: cpuid_level is set to -1 if unavailable, but
703 		 * extended_extended_level is set to 0 if unavailable
704 		 * and the legitimate extended levels are all negative
705 		 * when signed; hence the weird messing around with
706 		 * signs here...
707 		 */
708 		if (!((s32)df->level < 0 ?
709 		     (u32)df->level > (u32)c->extended_cpuid_level :
710 		     (s32)df->level > (s32)c->cpuid_level))
711 			continue;
712 
713 		clear_cpu_cap(c, df->feature);
714 		if (!warn)
715 			continue;
716 
717 		pr_warn("CPU: CPU feature %s disabled, no CPUID level 0x%x\n",
718 			x86_cap_flags[df->feature], df->level);
719 	}
720 }
721 
722 /*
723  * Naming convention should be: <Name> [(<Codename>)]
724  * This table only is used unless init_<vendor>() below doesn't set it;
725  * in particular, if CPUID levels 0x80000002..4 are supported, this
726  * isn't used
727  */
728 
729 /* Look up CPU names by table lookup. */
table_lookup_model(struct cpuinfo_x86 * c)730 static const char *table_lookup_model(struct cpuinfo_x86 *c)
731 {
732 #ifdef CONFIG_X86_32
733 	const struct legacy_cpu_model_info *info;
734 
735 	if (c->x86_model >= 16)
736 		return NULL;	/* Range check */
737 
738 	if (!this_cpu)
739 		return NULL;
740 
741 	info = this_cpu->legacy_models;
742 
743 	while (info->family) {
744 		if (info->family == c->x86)
745 			return info->model_names[c->x86_model];
746 		info++;
747 	}
748 #endif
749 	return NULL;		/* Not found */
750 }
751 
752 /* Aligned to unsigned long to avoid split lock in atomic bitmap ops */
753 __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long));
754 __u32 cpu_caps_set[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long));
755 
756 #ifdef CONFIG_X86_32
757 /* The 32-bit entry code needs to find cpu_entry_area. */
758 DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
759 #endif
760 
761 /* Load the original GDT from the per-cpu structure */
load_direct_gdt(int cpu)762 void load_direct_gdt(int cpu)
763 {
764 	struct desc_ptr gdt_descr;
765 
766 	gdt_descr.address = (long)get_cpu_gdt_rw(cpu);
767 	gdt_descr.size = GDT_SIZE - 1;
768 	load_gdt(&gdt_descr);
769 }
770 EXPORT_SYMBOL_FOR_KVM(load_direct_gdt);
771 
772 /* Load a fixmap remapping of the per-cpu GDT */
load_fixmap_gdt(int cpu)773 void load_fixmap_gdt(int cpu)
774 {
775 	struct desc_ptr gdt_descr;
776 
777 	gdt_descr.address = (long)get_cpu_gdt_ro(cpu);
778 	gdt_descr.size = GDT_SIZE - 1;
779 	load_gdt(&gdt_descr);
780 }
781 EXPORT_SYMBOL_GPL(load_fixmap_gdt);
782 
783 /**
784  * switch_gdt_and_percpu_base - Switch to direct GDT and runtime per CPU base
785  * @cpu:	The CPU number for which this is invoked
786  *
787  * Invoked during early boot to switch from early GDT and early per CPU to
788  * the direct GDT and the runtime per CPU area. On 32-bit the percpu base
789  * switch is implicit by loading the direct GDT. On 64bit this requires
790  * to update GSBASE.
791  */
switch_gdt_and_percpu_base(int cpu)792 void __init switch_gdt_and_percpu_base(int cpu)
793 {
794 	load_direct_gdt(cpu);
795 
796 #ifdef CONFIG_X86_64
797 	/*
798 	 * No need to load %gs. It is already correct.
799 	 *
800 	 * Writing %gs on 64bit would zero GSBASE which would make any per
801 	 * CPU operation up to the point of the wrmsrq() fault.
802 	 *
803 	 * Set GSBASE to the new offset. Until the wrmsrq() happens the
804 	 * early mapping is still valid. That means the GSBASE update will
805 	 * lose any prior per CPU data which was not copied over in
806 	 * setup_per_cpu_areas().
807 	 *
808 	 * This works even with stackprotector enabled because the
809 	 * per CPU stack canary is 0 in both per CPU areas.
810 	 */
811 	wrmsrq(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu));
812 #else
813 	/*
814 	 * %fs is already set to __KERNEL_PERCPU, but after switching GDT
815 	 * it is required to load FS again so that the 'hidden' part is
816 	 * updated from the new GDT. Up to this point the early per CPU
817 	 * translation is active. Any content of the early per CPU data
818 	 * which was not copied over in setup_per_cpu_areas() is lost.
819 	 */
820 	loadsegment(fs, __KERNEL_PERCPU);
821 #endif
822 }
823 
824 static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
825 
get_model_name(struct cpuinfo_x86 * c)826 static void get_model_name(struct cpuinfo_x86 *c)
827 {
828 	unsigned int *v;
829 	char *p, *q, *s;
830 
831 	if (c->extended_cpuid_level < 0x80000004)
832 		return;
833 
834 	v = (unsigned int *)c->x86_model_id;
835 	cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
836 	cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
837 	cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
838 	c->x86_model_id[48] = 0;
839 
840 	/* Trim whitespace */
841 	p = q = s = &c->x86_model_id[0];
842 
843 	while (*p == ' ')
844 		p++;
845 
846 	while (*p) {
847 		/* Note the last non-whitespace index */
848 		if (!isspace(*p))
849 			s = q;
850 
851 		*q++ = *p++;
852 	}
853 
854 	*(s + 1) = '\0';
855 }
856 
cpu_detect_cache_sizes(struct cpuinfo_x86 * c)857 void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
858 {
859 	unsigned int n, dummy, ebx, ecx, edx, l2size;
860 
861 	n = c->extended_cpuid_level;
862 
863 	if (n >= 0x80000005) {
864 		cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
865 		c->x86_cache_size = (ecx>>24) + (edx>>24);
866 #ifdef CONFIG_X86_64
867 		/* On K8 L1 TLB is inclusive, so don't count it */
868 		c->x86_tlbsize = 0;
869 #endif
870 	}
871 
872 	if (n < 0x80000006)	/* Some chips just has a large L1. */
873 		return;
874 
875 	cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
876 	l2size = ecx >> 16;
877 
878 #ifdef CONFIG_X86_64
879 	c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
880 #else
881 	/* do processor-specific cache resizing */
882 	if (this_cpu->legacy_cache_size)
883 		l2size = this_cpu->legacy_cache_size(c, l2size);
884 
885 	/* Allow user to override all this if necessary. */
886 	if (cachesize_override != -1)
887 		l2size = cachesize_override;
888 
889 	if (l2size == 0)
890 		return;		/* Again, no L2 cache is possible */
891 #endif
892 
893 	c->x86_cache_size = l2size;
894 }
895 
896 u16 __read_mostly tlb_lli_4k;
897 u16 __read_mostly tlb_lli_2m;
898 u16 __read_mostly tlb_lli_4m;
899 u16 __read_mostly tlb_lld_4k;
900 u16 __read_mostly tlb_lld_2m;
901 u16 __read_mostly tlb_lld_4m;
902 u16 __read_mostly tlb_lld_1g;
903 
cpu_detect_tlb(struct cpuinfo_x86 * c)904 static void cpu_detect_tlb(struct cpuinfo_x86 *c)
905 {
906 	if (this_cpu->c_detect_tlb)
907 		this_cpu->c_detect_tlb(c);
908 
909 	pr_info("Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n",
910 		tlb_lli_4k, tlb_lli_2m, tlb_lli_4m);
911 
912 	pr_info("Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n",
913 		tlb_lld_4k, tlb_lld_2m, tlb_lld_4m, tlb_lld_1g);
914 }
915 
get_cpu_vendor(struct cpuinfo_x86 * c)916 void get_cpu_vendor(struct cpuinfo_x86 *c)
917 {
918 	char *v = c->x86_vendor_id;
919 	int i;
920 
921 	for (i = 0; i < X86_VENDOR_NUM; i++) {
922 		if (!cpu_devs[i])
923 			break;
924 
925 		if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
926 		    (cpu_devs[i]->c_ident[1] &&
927 		     !strcmp(v, cpu_devs[i]->c_ident[1]))) {
928 
929 			this_cpu = cpu_devs[i];
930 			c->x86_vendor = this_cpu->c_x86_vendor;
931 			return;
932 		}
933 	}
934 
935 	pr_err_once("CPU: vendor_id '%s' unknown, using generic init.\n" \
936 		    "CPU: Your system may be unstable.\n", v);
937 
938 	c->x86_vendor = X86_VENDOR_UNKNOWN;
939 	this_cpu = &default_cpu;
940 }
941 
cpu_detect(struct cpuinfo_x86 * c)942 void cpu_detect(struct cpuinfo_x86 *c)
943 {
944 	/* Get vendor name */
945 	cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
946 	      (unsigned int *)&c->x86_vendor_id[0],
947 	      (unsigned int *)&c->x86_vendor_id[8],
948 	      (unsigned int *)&c->x86_vendor_id[4]);
949 
950 	c->x86 = 4;
951 	/* Intel-defined flags: level 0x00000001 */
952 	if (c->cpuid_level >= 0x00000001) {
953 		u32 junk, tfms, cap0, misc;
954 
955 		cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
956 		c->x86		= x86_family(tfms);
957 		c->x86_model	= x86_model(tfms);
958 		c->x86_stepping	= x86_stepping(tfms);
959 
960 		if (cap0 & (1<<19)) {
961 			c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
962 			c->x86_cache_alignment = c->x86_clflush_size;
963 		}
964 	}
965 }
966 
apply_forced_caps(struct cpuinfo_x86 * c)967 static void apply_forced_caps(struct cpuinfo_x86 *c)
968 {
969 	int i;
970 
971 	for (i = 0; i < NCAPINTS + NBUGINTS; i++) {
972 		c->x86_capability[i] &= ~cpu_caps_cleared[i];
973 		c->x86_capability[i] |= cpu_caps_set[i];
974 	}
975 }
976 
init_speculation_control(struct cpuinfo_x86 * c)977 static void init_speculation_control(struct cpuinfo_x86 *c)
978 {
979 	/*
980 	 * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support,
981 	 * and they also have a different bit for STIBP support. Also,
982 	 * a hypervisor might have set the individual AMD bits even on
983 	 * Intel CPUs, for finer-grained selection of what's available.
984 	 */
985 	if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
986 		set_cpu_cap(c, X86_FEATURE_IBRS);
987 		set_cpu_cap(c, X86_FEATURE_IBPB);
988 		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
989 	}
990 
991 	if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
992 		set_cpu_cap(c, X86_FEATURE_STIBP);
993 
994 	if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) ||
995 	    cpu_has(c, X86_FEATURE_VIRT_SSBD))
996 		set_cpu_cap(c, X86_FEATURE_SSBD);
997 
998 	if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
999 		set_cpu_cap(c, X86_FEATURE_IBRS);
1000 		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
1001 	}
1002 
1003 	if (cpu_has(c, X86_FEATURE_AMD_IBPB))
1004 		set_cpu_cap(c, X86_FEATURE_IBPB);
1005 
1006 	if (cpu_has(c, X86_FEATURE_AMD_STIBP)) {
1007 		set_cpu_cap(c, X86_FEATURE_STIBP);
1008 		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
1009 	}
1010 
1011 	if (cpu_has(c, X86_FEATURE_AMD_SSBD)) {
1012 		set_cpu_cap(c, X86_FEATURE_SSBD);
1013 		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
1014 		clear_cpu_cap(c, X86_FEATURE_VIRT_SSBD);
1015 	}
1016 }
1017 
get_cpu_cap(struct cpuinfo_x86 * c)1018 void get_cpu_cap(struct cpuinfo_x86 *c)
1019 {
1020 	u32 eax, ebx, ecx, edx;
1021 
1022 	/* Intel-defined flags: level 0x00000001 */
1023 	if (c->cpuid_level >= 0x00000001) {
1024 		cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
1025 
1026 		c->x86_capability[CPUID_1_ECX] = ecx;
1027 		c->x86_capability[CPUID_1_EDX] = edx;
1028 	}
1029 
1030 	/* Thermal and Power Management Leaf: level 0x00000006 (eax) */
1031 	if (c->cpuid_level >= 0x00000006)
1032 		c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006);
1033 
1034 	/* Additional Intel-defined flags: level 0x00000007 */
1035 	if (c->cpuid_level >= 0x00000007) {
1036 		cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
1037 		c->x86_capability[CPUID_7_0_EBX] = ebx;
1038 		c->x86_capability[CPUID_7_ECX] = ecx;
1039 		c->x86_capability[CPUID_7_EDX] = edx;
1040 
1041 		/* Check valid sub-leaf index before accessing it */
1042 		if (eax >= 1) {
1043 			cpuid_count(0x00000007, 1, &eax, &ebx, &ecx, &edx);
1044 			c->x86_capability[CPUID_7_1_EAX] = eax;
1045 		}
1046 	}
1047 
1048 	/* Extended state features: level 0x0000000d */
1049 	if (c->cpuid_level >= 0x0000000d) {
1050 		cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx);
1051 
1052 		c->x86_capability[CPUID_D_1_EAX] = eax;
1053 	}
1054 
1055 	/*
1056 	 * Check if extended CPUID leaves are implemented: Max extended
1057 	 * CPUID leaf must be in the 0x80000001-0x8000ffff range.
1058 	 */
1059 	eax = cpuid_eax(0x80000000);
1060 	c->extended_cpuid_level = ((eax & 0xffff0000) == 0x80000000) ? eax : 0;
1061 
1062 	if (c->extended_cpuid_level >= 0x80000001) {
1063 		cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
1064 
1065 		c->x86_capability[CPUID_8000_0001_ECX] = ecx;
1066 		c->x86_capability[CPUID_8000_0001_EDX] = edx;
1067 	}
1068 
1069 	if (c->extended_cpuid_level >= 0x80000007)
1070 		c->x86_power = cpuid_edx(0x80000007);
1071 
1072 	if (c->extended_cpuid_level >= 0x80000008) {
1073 		cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
1074 		c->x86_capability[CPUID_8000_0008_EBX] = ebx;
1075 	}
1076 
1077 	if (c->extended_cpuid_level >= 0x8000000a)
1078 		c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
1079 
1080 	if (c->extended_cpuid_level >= 0x8000001f)
1081 		c->x86_capability[CPUID_8000_001F_EAX] = cpuid_eax(0x8000001f);
1082 
1083 	if (c->extended_cpuid_level >= 0x80000021)
1084 		c->x86_capability[CPUID_8000_0021_EAX] = cpuid_eax(0x80000021);
1085 
1086 	init_scattered_cpuid_features(c);
1087 	init_speculation_control(c);
1088 
1089 	if (IS_ENABLED(CONFIG_X86_64) || cpu_has(c, X86_FEATURE_SEP))
1090 		set_cpu_cap(c, X86_FEATURE_SYSFAST32);
1091 
1092 	/*
1093 	 * Clear/Set all flags overridden by options, after probe.
1094 	 * This needs to happen each time we re-probe, which may happen
1095 	 * several times during CPU initialization.
1096 	 */
1097 	apply_forced_caps(c);
1098 }
1099 
get_cpu_address_sizes(struct cpuinfo_x86 * c)1100 void get_cpu_address_sizes(struct cpuinfo_x86 *c)
1101 {
1102 	u32 eax, ebx, ecx, edx;
1103 
1104 	if (!cpu_has(c, X86_FEATURE_CPUID) ||
1105 	    (c->extended_cpuid_level < 0x80000008)) {
1106 		if (IS_ENABLED(CONFIG_X86_64)) {
1107 			c->x86_clflush_size = 64;
1108 			c->x86_phys_bits = 36;
1109 			c->x86_virt_bits = 48;
1110 		} else {
1111 			c->x86_clflush_size = 32;
1112 			c->x86_virt_bits = 32;
1113 			c->x86_phys_bits = 32;
1114 
1115 			if (cpu_has(c, X86_FEATURE_PAE) ||
1116 			    cpu_has(c, X86_FEATURE_PSE36))
1117 				c->x86_phys_bits = 36;
1118 		}
1119 	} else {
1120 		cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
1121 
1122 		c->x86_virt_bits = (eax >> 8) & 0xff;
1123 		c->x86_phys_bits = eax & 0xff;
1124 
1125 		/* Provide a sane default if not enumerated: */
1126 		if (!c->x86_clflush_size)
1127 			c->x86_clflush_size = 32;
1128 	}
1129 
1130 	c->x86_cache_bits = c->x86_phys_bits;
1131 	c->x86_cache_alignment = c->x86_clflush_size;
1132 }
1133 
identify_cpu_without_cpuid(struct cpuinfo_x86 * c)1134 static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
1135 {
1136 	int i;
1137 
1138 	/*
1139 	 * First of all, decide if this is a 486 or higher
1140 	 * It's a 486 if we can modify the AC flag
1141 	 */
1142 	if (flag_is_changeable_p(X86_EFLAGS_AC))
1143 		c->x86 = 4;
1144 	else
1145 		c->x86 = 3;
1146 
1147 	for (i = 0; i < X86_VENDOR_NUM; i++)
1148 		if (cpu_devs[i] && cpu_devs[i]->c_identify) {
1149 			c->x86_vendor_id[0] = 0;
1150 			cpu_devs[i]->c_identify(c);
1151 			if (c->x86_vendor_id[0]) {
1152 				get_cpu_vendor(c);
1153 				break;
1154 			}
1155 		}
1156 }
1157 
1158 #define NO_SPECULATION		BIT(0)
1159 #define NO_MELTDOWN		BIT(1)
1160 #define NO_SSB			BIT(2)
1161 #define NO_L1TF			BIT(3)
1162 #define NO_MDS			BIT(4)
1163 #define MSBDS_ONLY		BIT(5)
1164 #define NO_SWAPGS		BIT(6)
1165 #define NO_ITLB_MULTIHIT	BIT(7)
1166 #define NO_SPECTRE_V2		BIT(8)
1167 #define NO_MMIO			BIT(9)
1168 #define NO_EIBRS_PBRSB		BIT(10)
1169 #define NO_BHI			BIT(11)
1170 
1171 #define VULNWL(vendor, family, model, whitelist)	\
1172 	X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, whitelist)
1173 
1174 #define VULNWL_INTEL(vfm, whitelist)		\
1175 	X86_MATCH_VFM(vfm, whitelist)
1176 
1177 #define VULNWL_AMD(family, whitelist)		\
1178 	VULNWL(AMD, family, X86_MODEL_ANY, whitelist)
1179 
1180 #define VULNWL_HYGON(family, whitelist)		\
1181 	VULNWL(HYGON, family, X86_MODEL_ANY, whitelist)
1182 
1183 static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
1184 	VULNWL(ANY,	4, X86_MODEL_ANY,	NO_SPECULATION),
1185 	VULNWL(CENTAUR,	5, X86_MODEL_ANY,	NO_SPECULATION),
1186 	VULNWL(INTEL,	5, X86_MODEL_ANY,	NO_SPECULATION),
1187 	VULNWL(NSC,	5, X86_MODEL_ANY,	NO_SPECULATION),
1188 	VULNWL(VORTEX,	5, X86_MODEL_ANY,	NO_SPECULATION),
1189 	VULNWL(VORTEX,	6, X86_MODEL_ANY,	NO_SPECULATION),
1190 
1191 	/* Intel Family 6 */
1192 	VULNWL_INTEL(INTEL_TIGERLAKE,		NO_MMIO),
1193 	VULNWL_INTEL(INTEL_TIGERLAKE_L,		NO_MMIO),
1194 	VULNWL_INTEL(INTEL_ALDERLAKE,		NO_MMIO),
1195 	VULNWL_INTEL(INTEL_ALDERLAKE_L,		NO_MMIO),
1196 
1197 	VULNWL_INTEL(INTEL_ATOM_SALTWELL,	NO_SPECULATION | NO_ITLB_MULTIHIT),
1198 	VULNWL_INTEL(INTEL_ATOM_SALTWELL_TABLET, NO_SPECULATION | NO_ITLB_MULTIHIT),
1199 	VULNWL_INTEL(INTEL_ATOM_SALTWELL_MID,	NO_SPECULATION | NO_ITLB_MULTIHIT),
1200 	VULNWL_INTEL(INTEL_ATOM_BONNELL,	NO_SPECULATION | NO_ITLB_MULTIHIT),
1201 	VULNWL_INTEL(INTEL_ATOM_BONNELL_MID,	NO_SPECULATION | NO_ITLB_MULTIHIT),
1202 
1203 	VULNWL_INTEL(INTEL_ATOM_SILVERMONT,	NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1204 	VULNWL_INTEL(INTEL_ATOM_SILVERMONT_D,	NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1205 	VULNWL_INTEL(INTEL_ATOM_SILVERMONT_MID,	NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1206 	VULNWL_INTEL(INTEL_ATOM_AIRMONT,	NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1207 	VULNWL_INTEL(INTEL_XEON_PHI_KNL,	NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1208 	VULNWL_INTEL(INTEL_XEON_PHI_KNM,	NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1209 
1210 	VULNWL_INTEL(INTEL_CORE_YONAH,		NO_SSB),
1211 
1212 	VULNWL_INTEL(INTEL_ATOM_SILVERMONT_MID2,NO_SSB | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | MSBDS_ONLY),
1213 	VULNWL_INTEL(INTEL_ATOM_AIRMONT_NP,	NO_SSB | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
1214 
1215 	VULNWL_INTEL(INTEL_ATOM_GOLDMONT,	NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
1216 	VULNWL_INTEL(INTEL_ATOM_GOLDMONT_D,	NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
1217 	VULNWL_INTEL(INTEL_ATOM_GOLDMONT_PLUS,	NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
1218 
1219 	/*
1220 	 * Technically, swapgs isn't serializing on AMD (despite it previously
1221 	 * being documented as such in the APM).  But according to AMD, %gs is
1222 	 * updated non-speculatively, and the issuing of %gs-relative memory
1223 	 * operands will be blocked until the %gs update completes, which is
1224 	 * good enough for our purposes.
1225 	 */
1226 
1227 	VULNWL_INTEL(INTEL_ATOM_TREMONT,	NO_EIBRS_PBRSB),
1228 	VULNWL_INTEL(INTEL_ATOM_TREMONT_L,	NO_EIBRS_PBRSB),
1229 	VULNWL_INTEL(INTEL_ATOM_TREMONT_D,	NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
1230 
1231 	/* AMD Family 0xf - 0x12 */
1232 	VULNWL_AMD(0x0f,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI),
1233 	VULNWL_AMD(0x10,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI),
1234 	VULNWL_AMD(0x11,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI),
1235 	VULNWL_AMD(0x12,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI),
1236 
1237 	/* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
1238 	VULNWL_AMD(X86_FAMILY_ANY,	NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB | NO_BHI),
1239 	VULNWL_HYGON(X86_FAMILY_ANY,	NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB | NO_BHI),
1240 
1241 	/* Zhaoxin Family 7 */
1242 	VULNWL(CENTAUR,	7, X86_MODEL_ANY,	NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO | NO_BHI),
1243 	VULNWL(ZHAOXIN,	7, X86_MODEL_ANY,	NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO | NO_BHI),
1244 	{}
1245 };
1246 
1247 #define VULNBL(vendor, family, model, blacklist)	\
1248 	X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, blacklist)
1249 
1250 #define VULNBL_INTEL_STEPS(vfm, max_stepping, issues)		   \
1251 	X86_MATCH_VFM_STEPS(vfm, X86_STEP_MIN, max_stepping, issues)
1252 
1253 #define VULNBL_INTEL_TYPE(vfm, cpu_type, issues)	\
1254 	X86_MATCH_VFM_CPU_TYPE(vfm, INTEL_CPU_TYPE_##cpu_type, issues)
1255 
1256 #define VULNBL_AMD(family, blacklist)		\
1257 	VULNBL(AMD, family, X86_MODEL_ANY, blacklist)
1258 
1259 #define VULNBL_HYGON(family, blacklist)		\
1260 	VULNBL(HYGON, family, X86_MODEL_ANY, blacklist)
1261 
1262 #define SRBDS		BIT(0)
1263 /* CPU is affected by X86_BUG_MMIO_STALE_DATA */
1264 #define MMIO		BIT(1)
1265 /* CPU is affected by Shared Buffers Data Sampling (SBDS), a variant of X86_BUG_MMIO_STALE_DATA */
1266 #define MMIO_SBDS	BIT(2)
1267 /* CPU is affected by RETbleed, speculating where you would not expect it */
1268 #define RETBLEED	BIT(3)
1269 /* CPU is affected by SMT (cross-thread) return predictions */
1270 #define SMT_RSB		BIT(4)
1271 /* CPU is affected by SRSO */
1272 #define SRSO		BIT(5)
1273 /* CPU is affected by GDS */
1274 #define GDS		BIT(6)
1275 /* CPU is affected by Register File Data Sampling */
1276 #define RFDS		BIT(7)
1277 /* CPU is affected by Indirect Target Selection */
1278 #define ITS		BIT(8)
1279 /* CPU is affected by Indirect Target Selection, but guest-host isolation is not affected */
1280 #define ITS_NATIVE_ONLY	BIT(9)
1281 /* CPU is affected by Transient Scheduler Attacks */
1282 #define TSA		BIT(10)
1283 /* CPU is affected by VMSCAPE */
1284 #define VMSCAPE		BIT(11)
1285 
1286 static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
1287 	VULNBL_INTEL_STEPS(INTEL_SANDYBRIDGE_X,	     X86_STEP_MAX,	VMSCAPE),
1288 	VULNBL_INTEL_STEPS(INTEL_SANDYBRIDGE,	     X86_STEP_MAX,	VMSCAPE),
1289 	VULNBL_INTEL_STEPS(INTEL_IVYBRIDGE_X,	     X86_STEP_MAX,	VMSCAPE),
1290 	VULNBL_INTEL_STEPS(INTEL_IVYBRIDGE,	     X86_STEP_MAX,	SRBDS | VMSCAPE),
1291 	VULNBL_INTEL_STEPS(INTEL_HASWELL,	     X86_STEP_MAX,	SRBDS | VMSCAPE),
1292 	VULNBL_INTEL_STEPS(INTEL_HASWELL_L,	     X86_STEP_MAX,	SRBDS | VMSCAPE),
1293 	VULNBL_INTEL_STEPS(INTEL_HASWELL_G,	     X86_STEP_MAX,	SRBDS | VMSCAPE),
1294 	VULNBL_INTEL_STEPS(INTEL_HASWELL_X,	     X86_STEP_MAX,	MMIO | VMSCAPE),
1295 	VULNBL_INTEL_STEPS(INTEL_BROADWELL_D,	     X86_STEP_MAX,	MMIO | VMSCAPE),
1296 	VULNBL_INTEL_STEPS(INTEL_BROADWELL_X,	     X86_STEP_MAX,	MMIO | VMSCAPE),
1297 	VULNBL_INTEL_STEPS(INTEL_BROADWELL_G,	     X86_STEP_MAX,	SRBDS | VMSCAPE),
1298 	VULNBL_INTEL_STEPS(INTEL_BROADWELL,	     X86_STEP_MAX,	SRBDS | VMSCAPE),
1299 	VULNBL_INTEL_STEPS(INTEL_SKYLAKE_X,		      0x5,	MMIO | RETBLEED | GDS | VMSCAPE),
1300 	VULNBL_INTEL_STEPS(INTEL_SKYLAKE_X,	     X86_STEP_MAX,	MMIO | RETBLEED | GDS | ITS | VMSCAPE),
1301 	VULNBL_INTEL_STEPS(INTEL_SKYLAKE_L,	     X86_STEP_MAX,	MMIO | RETBLEED | GDS | SRBDS | VMSCAPE),
1302 	VULNBL_INTEL_STEPS(INTEL_SKYLAKE,	     X86_STEP_MAX,	MMIO | RETBLEED | GDS | SRBDS | VMSCAPE),
1303 	VULNBL_INTEL_STEPS(INTEL_KABYLAKE_L,		      0xb,	MMIO | RETBLEED | GDS | SRBDS | VMSCAPE),
1304 	VULNBL_INTEL_STEPS(INTEL_KABYLAKE_L,	     X86_STEP_MAX,	MMIO | RETBLEED | GDS | SRBDS | ITS | VMSCAPE),
1305 	VULNBL_INTEL_STEPS(INTEL_KABYLAKE,		      0xc,	MMIO | RETBLEED | GDS | SRBDS | VMSCAPE),
1306 	VULNBL_INTEL_STEPS(INTEL_KABYLAKE,	     X86_STEP_MAX,	MMIO | RETBLEED | GDS | SRBDS | ITS | VMSCAPE),
1307 	VULNBL_INTEL_STEPS(INTEL_CANNONLAKE_L,	     X86_STEP_MAX,	RETBLEED | VMSCAPE),
1308 	VULNBL_INTEL_STEPS(INTEL_ICELAKE_L,	     X86_STEP_MAX,	MMIO | MMIO_SBDS | RETBLEED | GDS | ITS | ITS_NATIVE_ONLY),
1309 	VULNBL_INTEL_STEPS(INTEL_ICELAKE_D,	     X86_STEP_MAX,	MMIO | GDS | ITS | ITS_NATIVE_ONLY),
1310 	VULNBL_INTEL_STEPS(INTEL_ICELAKE_X,	     X86_STEP_MAX,	MMIO | GDS | ITS | ITS_NATIVE_ONLY),
1311 	VULNBL_INTEL_STEPS(INTEL_COMETLAKE,	     X86_STEP_MAX,	MMIO | MMIO_SBDS | RETBLEED | GDS | ITS | VMSCAPE),
1312 	VULNBL_INTEL_STEPS(INTEL_COMETLAKE_L,		      0x0,	MMIO | RETBLEED | ITS | VMSCAPE),
1313 	VULNBL_INTEL_STEPS(INTEL_COMETLAKE_L,	     X86_STEP_MAX,	MMIO | MMIO_SBDS | RETBLEED | GDS | ITS | VMSCAPE),
1314 	VULNBL_INTEL_STEPS(INTEL_TIGERLAKE_L,	     X86_STEP_MAX,	GDS | ITS | ITS_NATIVE_ONLY),
1315 	VULNBL_INTEL_STEPS(INTEL_TIGERLAKE,	     X86_STEP_MAX,	GDS | ITS | ITS_NATIVE_ONLY),
1316 	VULNBL_INTEL_STEPS(INTEL_LAKEFIELD,	     X86_STEP_MAX,	MMIO | MMIO_SBDS | RETBLEED),
1317 	VULNBL_INTEL_STEPS(INTEL_ROCKETLAKE,	     X86_STEP_MAX,	MMIO | RETBLEED | GDS | ITS | ITS_NATIVE_ONLY),
1318 	VULNBL_INTEL_TYPE(INTEL_ALDERLAKE,		     ATOM,	RFDS | VMSCAPE),
1319 	VULNBL_INTEL_STEPS(INTEL_ALDERLAKE,	     X86_STEP_MAX,	VMSCAPE),
1320 	VULNBL_INTEL_STEPS(INTEL_ALDERLAKE_L,	     X86_STEP_MAX,	RFDS | VMSCAPE),
1321 	VULNBL_INTEL_TYPE(INTEL_RAPTORLAKE,		     ATOM,	RFDS | VMSCAPE),
1322 	VULNBL_INTEL_STEPS(INTEL_RAPTORLAKE,	     X86_STEP_MAX,	VMSCAPE),
1323 	VULNBL_INTEL_STEPS(INTEL_RAPTORLAKE_P,	     X86_STEP_MAX,	RFDS | VMSCAPE),
1324 	VULNBL_INTEL_STEPS(INTEL_RAPTORLAKE_S,	     X86_STEP_MAX,	RFDS | VMSCAPE),
1325 	VULNBL_INTEL_STEPS(INTEL_METEORLAKE_L,	     X86_STEP_MAX,	VMSCAPE),
1326 	VULNBL_INTEL_STEPS(INTEL_ARROWLAKE_H,	     X86_STEP_MAX,	VMSCAPE),
1327 	VULNBL_INTEL_STEPS(INTEL_ARROWLAKE,	     X86_STEP_MAX,	VMSCAPE),
1328 	VULNBL_INTEL_STEPS(INTEL_ARROWLAKE_U,	     X86_STEP_MAX,	VMSCAPE),
1329 	VULNBL_INTEL_STEPS(INTEL_LUNARLAKE_M,	     X86_STEP_MAX,	VMSCAPE),
1330 	VULNBL_INTEL_STEPS(INTEL_SAPPHIRERAPIDS_X,   X86_STEP_MAX,	VMSCAPE),
1331 	VULNBL_INTEL_STEPS(INTEL_GRANITERAPIDS_X,    X86_STEP_MAX,	VMSCAPE),
1332 	VULNBL_INTEL_STEPS(INTEL_EMERALDRAPIDS_X,    X86_STEP_MAX,	VMSCAPE),
1333 	VULNBL_INTEL_STEPS(INTEL_ATOM_GRACEMONT,     X86_STEP_MAX,	RFDS | VMSCAPE),
1334 	VULNBL_INTEL_STEPS(INTEL_ATOM_TREMONT,	     X86_STEP_MAX,	MMIO | MMIO_SBDS | RFDS),
1335 	VULNBL_INTEL_STEPS(INTEL_ATOM_TREMONT_D,     X86_STEP_MAX,	MMIO | RFDS),
1336 	VULNBL_INTEL_STEPS(INTEL_ATOM_TREMONT_L,     X86_STEP_MAX,	MMIO | MMIO_SBDS | RFDS),
1337 	VULNBL_INTEL_STEPS(INTEL_ATOM_GOLDMONT,      X86_STEP_MAX,	RFDS),
1338 	VULNBL_INTEL_STEPS(INTEL_ATOM_GOLDMONT_D,    X86_STEP_MAX,	RFDS),
1339 	VULNBL_INTEL_STEPS(INTEL_ATOM_GOLDMONT_PLUS, X86_STEP_MAX,	RFDS),
1340 	VULNBL_INTEL_STEPS(INTEL_ATOM_CRESTMONT_X,   X86_STEP_MAX,	VMSCAPE),
1341 
1342 	VULNBL_AMD(0x15, RETBLEED),
1343 	VULNBL_AMD(0x16, RETBLEED),
1344 	VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO | VMSCAPE),
1345 	VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO | VMSCAPE),
1346 	VULNBL_AMD(0x19, SRSO | TSA | VMSCAPE),
1347 	VULNBL_AMD(0x1a, SRSO | VMSCAPE),
1348 	{}
1349 };
1350 
cpu_matches(const struct x86_cpu_id * table,unsigned long which)1351 static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long which)
1352 {
1353 	const struct x86_cpu_id *m = x86_match_cpu(table);
1354 
1355 	return m && !!(m->driver_data & which);
1356 }
1357 
x86_read_arch_cap_msr(void)1358 u64 x86_read_arch_cap_msr(void)
1359 {
1360 	u64 x86_arch_cap_msr = 0;
1361 
1362 	if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
1363 		rdmsrq(MSR_IA32_ARCH_CAPABILITIES, x86_arch_cap_msr);
1364 
1365 	return x86_arch_cap_msr;
1366 }
1367 
arch_cap_mmio_immune(u64 x86_arch_cap_msr)1368 static bool arch_cap_mmio_immune(u64 x86_arch_cap_msr)
1369 {
1370 	return (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO &&
1371 		x86_arch_cap_msr & ARCH_CAP_PSDP_NO &&
1372 		x86_arch_cap_msr & ARCH_CAP_SBDR_SSDP_NO);
1373 }
1374 
vulnerable_to_rfds(u64 x86_arch_cap_msr)1375 static bool __init vulnerable_to_rfds(u64 x86_arch_cap_msr)
1376 {
1377 	/* The "immunity" bit trumps everything else: */
1378 	if (x86_arch_cap_msr & ARCH_CAP_RFDS_NO)
1379 		return false;
1380 
1381 	/*
1382 	 * VMMs set ARCH_CAP_RFDS_CLEAR for processors not in the blacklist to
1383 	 * indicate that mitigation is needed because guest is running on a
1384 	 * vulnerable hardware or may migrate to such hardware:
1385 	 */
1386 	if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR)
1387 		return true;
1388 
1389 	/* Only consult the blacklist when there is no enumeration: */
1390 	return cpu_matches(cpu_vuln_blacklist, RFDS);
1391 }
1392 
vulnerable_to_its(u64 x86_arch_cap_msr)1393 static bool __init vulnerable_to_its(u64 x86_arch_cap_msr)
1394 {
1395 	/* The "immunity" bit trumps everything else: */
1396 	if (x86_arch_cap_msr & ARCH_CAP_ITS_NO)
1397 		return false;
1398 	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
1399 		return false;
1400 
1401 	/* None of the affected CPUs have BHI_CTRL */
1402 	if (boot_cpu_has(X86_FEATURE_BHI_CTRL))
1403 		return false;
1404 
1405 	/*
1406 	 * If a VMM did not expose ITS_NO, assume that a guest could
1407 	 * be running on a vulnerable hardware or may migrate to such
1408 	 * hardware.
1409 	 */
1410 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
1411 		return true;
1412 
1413 	if (cpu_matches(cpu_vuln_blacklist, ITS))
1414 		return true;
1415 
1416 	return false;
1417 }
1418 
1419 static struct x86_cpu_id cpu_latest_microcode[] = {
1420 #include "microcode/intel-ucode-defs.h"
1421 	{}
1422 };
1423 
cpu_has_old_microcode(void)1424 static bool __init cpu_has_old_microcode(void)
1425 {
1426 	const struct x86_cpu_id *m = x86_match_cpu(cpu_latest_microcode);
1427 
1428 	/* Give unknown CPUs a pass: */
1429 	if (!m) {
1430 		/* Intel CPUs should be in the list. Warn if not: */
1431 		if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
1432 			pr_info("x86/CPU: Model not found in latest microcode list\n");
1433 		return false;
1434 	}
1435 
1436 	/*
1437 	 * Hosts usually lie to guests with a super high microcode
1438 	 * version. Just ignore what hosts tell guests:
1439 	 */
1440 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
1441 		return false;
1442 
1443 	/* Consider all debug microcode to be old: */
1444 	if (boot_cpu_data.microcode & BIT(31))
1445 		return true;
1446 
1447 	/* Give new microcode a pass: */
1448 	if (boot_cpu_data.microcode >= m->driver_data)
1449 		return false;
1450 
1451 	/* Uh oh, too old: */
1452 	return true;
1453 }
1454 
cpu_set_bug_bits(struct cpuinfo_x86 * c)1455 static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
1456 {
1457 	u64 x86_arch_cap_msr = x86_read_arch_cap_msr();
1458 
1459 	if (cpu_has_old_microcode()) {
1460 		pr_warn("x86/CPU: Running old microcode\n");
1461 		setup_force_cpu_bug(X86_BUG_OLD_MICROCODE);
1462 		add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
1463 	}
1464 
1465 	/* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
1466 	if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) &&
1467 	    !(x86_arch_cap_msr & ARCH_CAP_PSCHANGE_MC_NO))
1468 		setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT);
1469 
1470 	if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION))
1471 		return;
1472 
1473 	setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
1474 
1475 	if (!cpu_matches(cpu_vuln_whitelist, NO_SPECTRE_V2)) {
1476 		setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
1477 		setup_force_cpu_bug(X86_BUG_SPECTRE_V2_USER);
1478 	}
1479 
1480 	if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) &&
1481 	    !(x86_arch_cap_msr & ARCH_CAP_SSB_NO) &&
1482 	   !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
1483 		setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
1484 
1485 	/*
1486 	 * AMD's AutoIBRS is equivalent to Intel's eIBRS - use the Intel feature
1487 	 * flag and protect from vendor-specific bugs via the whitelist.
1488 	 *
1489 	 * Don't use AutoIBRS when SNP is enabled because it degrades host
1490 	 * userspace indirect branch performance.
1491 	 */
1492 	if ((x86_arch_cap_msr & ARCH_CAP_IBRS_ALL) ||
1493 	    (cpu_has(c, X86_FEATURE_AUTOIBRS) &&
1494 	     !cpu_feature_enabled(X86_FEATURE_SEV_SNP))) {
1495 		setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
1496 		if (!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
1497 		    !(x86_arch_cap_msr & ARCH_CAP_PBRSB_NO))
1498 			setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
1499 	}
1500 
1501 	if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
1502 	    !(x86_arch_cap_msr & ARCH_CAP_MDS_NO)) {
1503 		setup_force_cpu_bug(X86_BUG_MDS);
1504 		if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY))
1505 			setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
1506 	}
1507 
1508 	if (!cpu_matches(cpu_vuln_whitelist, NO_SWAPGS))
1509 		setup_force_cpu_bug(X86_BUG_SWAPGS);
1510 
1511 	/*
1512 	 * When the CPU is not mitigated for TAA (TAA_NO=0) set TAA bug when:
1513 	 *	- TSX is supported or
1514 	 *	- TSX_CTRL is present
1515 	 *
1516 	 * TSX_CTRL check is needed for cases when TSX could be disabled before
1517 	 * the kernel boot e.g. kexec.
1518 	 * TSX_CTRL check alone is not sufficient for cases when the microcode
1519 	 * update is not present or running as guest that don't get TSX_CTRL.
1520 	 */
1521 	if (!(x86_arch_cap_msr & ARCH_CAP_TAA_NO) &&
1522 	    (cpu_has(c, X86_FEATURE_RTM) ||
1523 	     (x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR)))
1524 		setup_force_cpu_bug(X86_BUG_TAA);
1525 
1526 	/*
1527 	 * SRBDS affects CPUs which support RDRAND or RDSEED and are listed
1528 	 * in the vulnerability blacklist.
1529 	 *
1530 	 * Some of the implications and mitigation of Shared Buffers Data
1531 	 * Sampling (SBDS) are similar to SRBDS. Give SBDS same treatment as
1532 	 * SRBDS.
1533 	 */
1534 	if ((cpu_has(c, X86_FEATURE_RDRAND) ||
1535 	     cpu_has(c, X86_FEATURE_RDSEED)) &&
1536 	    cpu_matches(cpu_vuln_blacklist, SRBDS | MMIO_SBDS))
1537 		    setup_force_cpu_bug(X86_BUG_SRBDS);
1538 
1539 	/*
1540 	 * Processor MMIO Stale Data bug enumeration
1541 	 *
1542 	 * Affected CPU list is generally enough to enumerate the vulnerability,
1543 	 * but for virtualization case check for ARCH_CAP MSR bits also, VMM may
1544 	 * not want the guest to enumerate the bug.
1545 	 */
1546 	if (!arch_cap_mmio_immune(x86_arch_cap_msr)) {
1547 		if (cpu_matches(cpu_vuln_blacklist, MMIO))
1548 			setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
1549 	}
1550 
1551 	if (!cpu_has(c, X86_FEATURE_BTC_NO)) {
1552 		if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (x86_arch_cap_msr & ARCH_CAP_RSBA))
1553 			setup_force_cpu_bug(X86_BUG_RETBLEED);
1554 	}
1555 
1556 	if (cpu_matches(cpu_vuln_blacklist, SMT_RSB))
1557 		setup_force_cpu_bug(X86_BUG_SMT_RSB);
1558 
1559 	if (!cpu_has(c, X86_FEATURE_SRSO_NO)) {
1560 		if (cpu_matches(cpu_vuln_blacklist, SRSO))
1561 			setup_force_cpu_bug(X86_BUG_SRSO);
1562 	}
1563 
1564 	/*
1565 	 * Check if CPU is vulnerable to GDS. If running in a virtual machine on
1566 	 * an affected processor, the VMM may have disabled the use of GATHER by
1567 	 * disabling AVX2. The only way to do this in HW is to clear XCR0[2],
1568 	 * which means that AVX will be disabled.
1569 	 */
1570 	if (cpu_matches(cpu_vuln_blacklist, GDS) && !(x86_arch_cap_msr & ARCH_CAP_GDS_NO) &&
1571 	    boot_cpu_has(X86_FEATURE_AVX))
1572 		setup_force_cpu_bug(X86_BUG_GDS);
1573 
1574 	if (vulnerable_to_rfds(x86_arch_cap_msr))
1575 		setup_force_cpu_bug(X86_BUG_RFDS);
1576 
1577 	/*
1578 	 * Intel parts with eIBRS are vulnerable to BHI attacks. Parts with
1579 	 * BHI_NO still need to use the BHI mitigation to prevent Intra-mode
1580 	 * attacks.  When virtualized, eIBRS could be hidden, assume vulnerable.
1581 	 */
1582 	if (!cpu_matches(cpu_vuln_whitelist, NO_BHI) &&
1583 	    (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED) ||
1584 	     boot_cpu_has(X86_FEATURE_HYPERVISOR)))
1585 		setup_force_cpu_bug(X86_BUG_BHI);
1586 
1587 	if (cpu_has(c, X86_FEATURE_AMD_IBPB) && !cpu_has(c, X86_FEATURE_AMD_IBPB_RET))
1588 		setup_force_cpu_bug(X86_BUG_IBPB_NO_RET);
1589 
1590 	if (vulnerable_to_its(x86_arch_cap_msr)) {
1591 		setup_force_cpu_bug(X86_BUG_ITS);
1592 		if (cpu_matches(cpu_vuln_blacklist, ITS_NATIVE_ONLY))
1593 			setup_force_cpu_bug(X86_BUG_ITS_NATIVE_ONLY);
1594 	}
1595 
1596 	if (c->x86_vendor == X86_VENDOR_AMD) {
1597 		if (!cpu_has(c, X86_FEATURE_TSA_SQ_NO) ||
1598 		    !cpu_has(c, X86_FEATURE_TSA_L1_NO)) {
1599 			if (cpu_matches(cpu_vuln_blacklist, TSA) ||
1600 			    /* Enable bug on Zen guests to allow for live migration. */
1601 			    (cpu_has(c, X86_FEATURE_HYPERVISOR) && cpu_has(c, X86_FEATURE_ZEN)))
1602 				setup_force_cpu_bug(X86_BUG_TSA);
1603 		}
1604 	}
1605 
1606 	/*
1607 	 * Set the bug only on bare-metal. A nested hypervisor should already be
1608 	 * deploying IBPB to isolate itself from nested guests.
1609 	 */
1610 	if (cpu_matches(cpu_vuln_blacklist, VMSCAPE) &&
1611 	    !boot_cpu_has(X86_FEATURE_HYPERVISOR))
1612 		setup_force_cpu_bug(X86_BUG_VMSCAPE);
1613 
1614 	if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
1615 		return;
1616 
1617 	/* Rogue Data Cache Load? No! */
1618 	if (x86_arch_cap_msr & ARCH_CAP_RDCL_NO)
1619 		return;
1620 
1621 	setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
1622 
1623 	if (cpu_matches(cpu_vuln_whitelist, NO_L1TF))
1624 		return;
1625 
1626 	setup_force_cpu_bug(X86_BUG_L1TF);
1627 }
1628 
1629 /*
1630  * The NOPL instruction is supposed to exist on all CPUs of family >= 6;
1631  * unfortunately, that's not true in practice because of early VIA
1632  * chips and (more importantly) broken virtualizers that are not easy
1633  * to detect. In the latter case it doesn't even *fail* reliably, so
1634  * probing for it doesn't even work. Disable it completely on 32-bit
1635  * unless we can find a reliable way to detect all the broken cases.
1636  * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
1637  */
detect_nopl(void)1638 static void detect_nopl(void)
1639 {
1640 #ifdef CONFIG_X86_32
1641 	setup_clear_cpu_cap(X86_FEATURE_NOPL);
1642 #else
1643 	setup_force_cpu_cap(X86_FEATURE_NOPL);
1644 #endif
1645 }
1646 
parse_set_clear_cpuid(char * arg,bool set)1647 static inline bool parse_set_clear_cpuid(char *arg, bool set)
1648 {
1649 	char *opt;
1650 	int taint = 0;
1651 
1652 	while (arg) {
1653 		bool found __maybe_unused = false;
1654 		unsigned int bit;
1655 
1656 		opt = strsep(&arg, ",");
1657 
1658 		/*
1659 		 * Handle naked numbers first for feature flags which don't
1660 		 * have names. It doesn't make sense for a bug not to have a
1661 		 * name so don't handle bug flags here.
1662 		 */
1663 		if (!kstrtouint(opt, 10, &bit)) {
1664 			if (bit < NCAPINTS * 32) {
1665 
1666 				if (set) {
1667 					pr_warn("setcpuid: force-enabling CPU feature flag:");
1668 					setup_force_cpu_cap(bit);
1669 				} else {
1670 					pr_warn("clearcpuid: force-disabling CPU feature flag:");
1671 					setup_clear_cpu_cap(bit);
1672 				}
1673 				/* empty-string, i.e., ""-defined feature flags */
1674 				if (!x86_cap_flags[bit])
1675 					pr_cont(" %d:%d\n", bit >> 5, bit & 31);
1676 				else
1677 					pr_cont(" %s\n", x86_cap_flags[bit]);
1678 
1679 				taint++;
1680 			}
1681 			/*
1682 			 * The assumption is that there are no feature names with only
1683 			 * numbers in the name thus go to the next argument.
1684 			 */
1685 			continue;
1686 		}
1687 
1688 		for (bit = 0; bit < 32 * (NCAPINTS + NBUGINTS); bit++) {
1689 			const char *flag;
1690 			const char *kind;
1691 
1692 			if (bit < 32 * NCAPINTS) {
1693 				flag = x86_cap_flags[bit];
1694 				kind = "feature";
1695 			} else {
1696 				kind = "bug";
1697 				flag = x86_bug_flags[bit - (32 * NCAPINTS)];
1698 			}
1699 
1700 			if (!flag)
1701 				continue;
1702 
1703 			if (strcmp(flag, opt))
1704 				continue;
1705 
1706 			if (set) {
1707 				pr_warn("setcpuid: force-enabling CPU %s flag: %s\n",
1708 					kind, flag);
1709 				setup_force_cpu_cap(bit);
1710 			} else {
1711 				pr_warn("clearcpuid: force-disabling CPU %s flag: %s\n",
1712 					kind, flag);
1713 				setup_clear_cpu_cap(bit);
1714 			}
1715 			taint++;
1716 			found = true;
1717 			break;
1718 		}
1719 
1720 		if (!found)
1721 			pr_warn("%s: unknown CPU flag: %s", set ? "setcpuid" : "clearcpuid", opt);
1722 	}
1723 
1724 	return taint;
1725 }
1726 
1727 
1728 /*
1729  * We parse cpu parameters early because fpu__init_system() is executed
1730  * before parse_early_param().
1731  */
cpu_parse_early_param(void)1732 static void __init cpu_parse_early_param(void)
1733 {
1734 	bool cpuid_taint = false;
1735 	char arg[128];
1736 	int arglen;
1737 
1738 #ifdef CONFIG_X86_32
1739 	if (cmdline_find_option_bool(boot_command_line, "no387"))
1740 #ifdef CONFIG_MATH_EMULATION
1741 		setup_clear_cpu_cap(X86_FEATURE_FPU);
1742 #else
1743 		pr_err("Option 'no387' required CONFIG_MATH_EMULATION enabled.\n");
1744 #endif
1745 
1746 	if (cmdline_find_option_bool(boot_command_line, "nofxsr"))
1747 		setup_clear_cpu_cap(X86_FEATURE_FXSR);
1748 #endif
1749 
1750 	if (cmdline_find_option_bool(boot_command_line, "noxsave"))
1751 		setup_clear_cpu_cap(X86_FEATURE_XSAVE);
1752 
1753 	if (cmdline_find_option_bool(boot_command_line, "noxsaveopt"))
1754 		setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
1755 
1756 	if (cmdline_find_option_bool(boot_command_line, "noxsaves"))
1757 		setup_clear_cpu_cap(X86_FEATURE_XSAVES);
1758 
1759 	if (cmdline_find_option_bool(boot_command_line, "nousershstk"))
1760 		setup_clear_cpu_cap(X86_FEATURE_USER_SHSTK);
1761 
1762 	/* Minimize the gap between FRED is available and available but disabled. */
1763 	arglen = cmdline_find_option(boot_command_line, "fred", arg, sizeof(arg));
1764 	if (arglen != 2 || strncmp(arg, "on", 2))
1765 		setup_clear_cpu_cap(X86_FEATURE_FRED);
1766 
1767 	arglen = cmdline_find_option(boot_command_line, "clearcpuid", arg, sizeof(arg));
1768 	if (arglen > 0)
1769 		cpuid_taint |= parse_set_clear_cpuid(arg, false);
1770 
1771 	arglen = cmdline_find_option(boot_command_line, "setcpuid", arg, sizeof(arg));
1772 	if (arglen > 0)
1773 		cpuid_taint |= parse_set_clear_cpuid(arg, true);
1774 
1775 	if (cpuid_taint) {
1776 		pr_warn("!!! setcpuid=/clearcpuid= in use, this is for TESTING ONLY, may break things horribly. Tainting kernel.\n");
1777 		add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
1778 	}
1779 }
1780 
1781 /*
1782  * Do minimum CPU detection early.
1783  * Fields really needed: vendor, cpuid_level, family, model, mask,
1784  * cache alignment.
1785  * The others are not touched to avoid unwanted side effects.
1786  *
1787  * WARNING: this function is only called on the boot CPU.  Don't add code
1788  * here that is supposed to run on all CPUs.
1789  */
early_identify_cpu(struct cpuinfo_x86 * c)1790 static void __init early_identify_cpu(struct cpuinfo_x86 *c)
1791 {
1792 	memset(&c->x86_capability, 0, sizeof(c->x86_capability));
1793 	c->extended_cpuid_level = 0;
1794 
1795 	if (!cpuid_feature())
1796 		identify_cpu_without_cpuid(c);
1797 
1798 	/* cyrix could have cpuid enabled via c_identify()*/
1799 	if (cpuid_feature()) {
1800 		cpu_detect(c);
1801 		get_cpu_vendor(c);
1802 		intel_unlock_cpuid_leafs(c);
1803 		get_cpu_cap(c);
1804 		setup_force_cpu_cap(X86_FEATURE_CPUID);
1805 		get_cpu_address_sizes(c);
1806 		cpu_parse_early_param();
1807 
1808 		cpu_init_topology(c);
1809 
1810 		if (this_cpu->c_early_init)
1811 			this_cpu->c_early_init(c);
1812 
1813 		c->cpu_index = 0;
1814 		filter_cpuid_features(c, false);
1815 		check_cpufeature_deps(c);
1816 
1817 		if (this_cpu->c_bsp_init)
1818 			this_cpu->c_bsp_init(c);
1819 	} else {
1820 		setup_clear_cpu_cap(X86_FEATURE_CPUID);
1821 		get_cpu_address_sizes(c);
1822 		cpu_init_topology(c);
1823 	}
1824 
1825 	setup_force_cpu_cap(X86_FEATURE_ALWAYS);
1826 
1827 	cpu_set_bug_bits(c);
1828 
1829 	sld_setup(c);
1830 
1831 #ifdef CONFIG_X86_32
1832 	/*
1833 	 * Regardless of whether PCID is enumerated, the SDM says
1834 	 * that it can't be enabled in 32-bit mode.
1835 	 */
1836 	setup_clear_cpu_cap(X86_FEATURE_PCID);
1837 
1838 	/*
1839 	 * Never use SYSCALL on a 32-bit kernel
1840 	 */
1841 	setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
1842 #endif
1843 
1844 	/*
1845 	 * Later in the boot process pgtable_l5_enabled() relies on
1846 	 * cpu_feature_enabled(X86_FEATURE_LA57). If 5-level paging is not
1847 	 * enabled by this point we need to clear the feature bit to avoid
1848 	 * false-positives at the later stage.
1849 	 *
1850 	 * pgtable_l5_enabled() can be false here for several reasons:
1851 	 *  - 5-level paging is disabled compile-time;
1852 	 *  - it's 32-bit kernel;
1853 	 *  - machine doesn't support 5-level paging;
1854 	 *  - user specified 'no5lvl' in kernel command line.
1855 	 */
1856 	if (!pgtable_l5_enabled())
1857 		setup_clear_cpu_cap(X86_FEATURE_LA57);
1858 
1859 	detect_nopl();
1860 	mca_bsp_init(c);
1861 }
1862 
init_cpu_devs(void)1863 void __init init_cpu_devs(void)
1864 {
1865 	const struct cpu_dev *const *cdev;
1866 	int count = 0;
1867 
1868 	for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
1869 		const struct cpu_dev *cpudev = *cdev;
1870 
1871 		if (count >= X86_VENDOR_NUM)
1872 			break;
1873 		cpu_devs[count] = cpudev;
1874 		count++;
1875 	}
1876 }
1877 
early_cpu_init(void)1878 void __init early_cpu_init(void)
1879 {
1880 #ifdef CONFIG_PROCESSOR_SELECT
1881 	unsigned int i, j;
1882 
1883 	pr_info("KERNEL supported cpus:\n");
1884 #endif
1885 
1886 	init_cpu_devs();
1887 
1888 #ifdef CONFIG_PROCESSOR_SELECT
1889 	for (i = 0; i < X86_VENDOR_NUM && cpu_devs[i]; i++) {
1890 		for (j = 0; j < 2; j++) {
1891 			if (!cpu_devs[i]->c_ident[j])
1892 				continue;
1893 			pr_info("  %s %s\n", cpu_devs[i]->c_vendor,
1894 				cpu_devs[i]->c_ident[j]);
1895 		}
1896 	}
1897 #endif
1898 
1899 	early_identify_cpu(&boot_cpu_data);
1900 }
1901 
detect_null_seg_behavior(void)1902 static bool detect_null_seg_behavior(void)
1903 {
1904 	/*
1905 	 * Empirically, writing zero to a segment selector on AMD does
1906 	 * not clear the base, whereas writing zero to a segment
1907 	 * selector on Intel does clear the base.  Intel's behavior
1908 	 * allows slightly faster context switches in the common case
1909 	 * where GS is unused by the prev and next threads.
1910 	 *
1911 	 * Since neither vendor documents this anywhere that I can see,
1912 	 * detect it directly instead of hard-coding the choice by
1913 	 * vendor.
1914 	 *
1915 	 * I've designated AMD's behavior as the "bug" because it's
1916 	 * counterintuitive and less friendly.
1917 	 */
1918 
1919 	unsigned long old_base, tmp;
1920 	rdmsrq(MSR_FS_BASE, old_base);
1921 	wrmsrq(MSR_FS_BASE, 1);
1922 	loadsegment(fs, 0);
1923 	rdmsrq(MSR_FS_BASE, tmp);
1924 	wrmsrq(MSR_FS_BASE, old_base);
1925 	return tmp == 0;
1926 }
1927 
check_null_seg_clears_base(struct cpuinfo_x86 * c)1928 void check_null_seg_clears_base(struct cpuinfo_x86 *c)
1929 {
1930 	/* BUG_NULL_SEG is only relevant with 64bit userspace */
1931 	if (!IS_ENABLED(CONFIG_X86_64))
1932 		return;
1933 
1934 	if (cpu_has(c, X86_FEATURE_NULL_SEL_CLR_BASE))
1935 		return;
1936 
1937 	/*
1938 	 * CPUID bit above wasn't set. If this kernel is still running
1939 	 * as a HV guest, then the HV has decided not to advertize
1940 	 * that CPUID bit for whatever reason.	For example, one
1941 	 * member of the migration pool might be vulnerable.  Which
1942 	 * means, the bug is present: set the BUG flag and return.
1943 	 */
1944 	if (cpu_has(c, X86_FEATURE_HYPERVISOR)) {
1945 		set_cpu_bug(c, X86_BUG_NULL_SEG);
1946 		return;
1947 	}
1948 
1949 	/*
1950 	 * Zen2 CPUs also have this behaviour, but no CPUID bit.
1951 	 * 0x18 is the respective family for Hygon.
1952 	 */
1953 	if ((c->x86 == 0x17 || c->x86 == 0x18) &&
1954 	    detect_null_seg_behavior())
1955 		return;
1956 
1957 	/* All the remaining ones are affected */
1958 	set_cpu_bug(c, X86_BUG_NULL_SEG);
1959 }
1960 
generic_identify(struct cpuinfo_x86 * c)1961 static void generic_identify(struct cpuinfo_x86 *c)
1962 {
1963 	c->extended_cpuid_level = 0;
1964 
1965 	if (!cpuid_feature())
1966 		identify_cpu_without_cpuid(c);
1967 
1968 	/* cyrix could have cpuid enabled via c_identify()*/
1969 	if (!cpuid_feature())
1970 		return;
1971 
1972 	cpu_detect(c);
1973 
1974 	get_cpu_vendor(c);
1975 	intel_unlock_cpuid_leafs(c);
1976 	get_cpu_cap(c);
1977 
1978 	get_cpu_address_sizes(c);
1979 
1980 	get_model_name(c); /* Default name */
1981 
1982 	/*
1983 	 * ESPFIX is a strange bug.  All real CPUs have it.  Paravirt
1984 	 * systems that run Linux at CPL > 0 may or may not have the
1985 	 * issue, but, even if they have the issue, there's absolutely
1986 	 * nothing we can do about it because we can't use the real IRET
1987 	 * instruction.
1988 	 *
1989 	 * NB: For the time being, only 32-bit kernels support
1990 	 * X86_BUG_ESPFIX as such.  64-bit kernels directly choose
1991 	 * whether to apply espfix using paravirt hooks.  If any
1992 	 * non-paravirt system ever shows up that does *not* have the
1993 	 * ESPFIX issue, we can change this.
1994 	 */
1995 #ifdef CONFIG_X86_32
1996 	set_cpu_bug(c, X86_BUG_ESPFIX);
1997 #endif
1998 }
1999 
2000 /*
2001  * This does the hard work of actually picking apart the CPU stuff...
2002  */
identify_cpu(struct cpuinfo_x86 * c)2003 static void identify_cpu(struct cpuinfo_x86 *c)
2004 {
2005 	int i;
2006 
2007 	c->loops_per_jiffy = loops_per_jiffy;
2008 	c->x86_cache_size = 0;
2009 	c->x86_vendor = X86_VENDOR_UNKNOWN;
2010 	c->x86_model = c->x86_stepping = 0;	/* So far unknown... */
2011 	c->x86_vendor_id[0] = '\0'; /* Unset */
2012 	c->x86_model_id[0] = '\0';  /* Unset */
2013 #ifdef CONFIG_X86_64
2014 	c->x86_clflush_size = 64;
2015 	c->x86_phys_bits = 36;
2016 	c->x86_virt_bits = 48;
2017 #else
2018 	c->cpuid_level = -1;	/* CPUID not detected */
2019 	c->x86_clflush_size = 32;
2020 	c->x86_phys_bits = 32;
2021 	c->x86_virt_bits = 32;
2022 #endif
2023 	c->x86_cache_alignment = c->x86_clflush_size;
2024 	memset(&c->x86_capability, 0, sizeof(c->x86_capability));
2025 #ifdef CONFIG_X86_VMX_FEATURE_NAMES
2026 	memset(&c->vmx_capability, 0, sizeof(c->vmx_capability));
2027 #endif
2028 
2029 	generic_identify(c);
2030 
2031 	cpu_parse_topology(c);
2032 
2033 	if (this_cpu->c_identify)
2034 		this_cpu->c_identify(c);
2035 
2036 	/* Clear/Set all flags overridden by options, after probe */
2037 	apply_forced_caps(c);
2038 
2039 	/*
2040 	 * Set default APIC and TSC_DEADLINE MSR fencing flag. AMD and
2041 	 * Hygon will clear it in ->c_init() below.
2042 	 */
2043 	set_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE);
2044 
2045 	/*
2046 	 * Vendor-specific initialization.  In this section we
2047 	 * canonicalize the feature flags, meaning if there are
2048 	 * features a certain CPU supports which CPUID doesn't
2049 	 * tell us, CPUID claiming incorrect flags, or other bugs,
2050 	 * we handle them here.
2051 	 *
2052 	 * At the end of this section, c->x86_capability better
2053 	 * indicate the features this CPU genuinely supports!
2054 	 */
2055 	if (this_cpu->c_init)
2056 		this_cpu->c_init(c);
2057 
2058 	bus_lock_init();
2059 
2060 	/* Disable the PN if appropriate */
2061 	squash_the_stupid_serial_number(c);
2062 
2063 	setup_smep(c);
2064 	setup_smap(c);
2065 	setup_umip(c);
2066 
2067 	/*
2068 	 * The vendor-specific functions might have changed features.
2069 	 * Now we do "generic changes."
2070 	 */
2071 
2072 	/* Filter out anything that depends on CPUID levels we don't have */
2073 	filter_cpuid_features(c, true);
2074 
2075 	/* Check for unmet dependencies based on the CPUID dependency table */
2076 	check_cpufeature_deps(c);
2077 
2078 	/* If the model name is still unset, do table lookup. */
2079 	if (!c->x86_model_id[0]) {
2080 		const char *p;
2081 		p = table_lookup_model(c);
2082 		if (p)
2083 			strcpy(c->x86_model_id, p);
2084 		else
2085 			/* Last resort... */
2086 			sprintf(c->x86_model_id, "%02x/%02x",
2087 				c->x86, c->x86_model);
2088 	}
2089 
2090 	x86_init_rdrand(c);
2091 	setup_pku(c);
2092 	setup_cet(c);
2093 
2094 	/*
2095 	 * Clear/Set all flags overridden by options, need do it
2096 	 * before following smp all cpus cap AND.
2097 	 */
2098 	apply_forced_caps(c);
2099 
2100 	/*
2101 	 * On SMP, boot_cpu_data holds the common feature set between
2102 	 * all CPUs; so make sure that we indicate which features are
2103 	 * common between the CPUs.  The first time this routine gets
2104 	 * executed, c == &boot_cpu_data.
2105 	 */
2106 	if (c != &boot_cpu_data) {
2107 		/* AND the already accumulated flags with these */
2108 		for (i = 0; i < NCAPINTS; i++)
2109 			boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
2110 
2111 		/* OR, i.e. replicate the bug flags */
2112 		for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++)
2113 			c->x86_capability[i] |= boot_cpu_data.x86_capability[i];
2114 	}
2115 
2116 	ppin_init(c);
2117 
2118 	/* Init Machine Check Exception if available. */
2119 	mcheck_cpu_init(c);
2120 
2121 	numa_add_cpu(smp_processor_id());
2122 }
2123 
2124 /*
2125  * Set up the CPU state needed to execute SYSENTER/SYSEXIT instructions
2126  * on 32-bit kernels:
2127  */
2128 #ifdef CONFIG_X86_32
enable_sep_cpu(void)2129 void enable_sep_cpu(void)
2130 {
2131 	struct tss_struct *tss;
2132 	int cpu;
2133 
2134 	if (!boot_cpu_has(X86_FEATURE_SEP))
2135 		return;
2136 
2137 	cpu = get_cpu();
2138 	tss = &per_cpu(cpu_tss_rw, cpu);
2139 
2140 	/*
2141 	 * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field --
2142 	 * see the big comment in struct x86_hw_tss's definition.
2143 	 */
2144 
2145 	tss->x86_tss.ss1 = __KERNEL_CS;
2146 	wrmsrq(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1);
2147 	wrmsrq(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1));
2148 	wrmsrq(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32);
2149 
2150 	put_cpu();
2151 }
2152 #endif
2153 
identify_boot_cpu(void)2154 static __init void identify_boot_cpu(void)
2155 {
2156 	identify_cpu(&boot_cpu_data);
2157 	if (HAS_KERNEL_IBT && cpu_feature_enabled(X86_FEATURE_IBT))
2158 		pr_info("CET detected: Indirect Branch Tracking enabled\n");
2159 #ifdef CONFIG_X86_32
2160 	enable_sep_cpu();
2161 #endif
2162 	cpu_detect_tlb(&boot_cpu_data);
2163 	setup_cr_pinning();
2164 
2165 	tsx_init();
2166 	tdx_init();
2167 	lkgs_init();
2168 }
2169 
identify_secondary_cpu(unsigned int cpu)2170 void identify_secondary_cpu(unsigned int cpu)
2171 {
2172 	struct cpuinfo_x86 *c = &cpu_data(cpu);
2173 
2174 	/* Copy boot_cpu_data only on the first bringup */
2175 	if (!c->initialized)
2176 		*c = boot_cpu_data;
2177 	c->cpu_index = cpu;
2178 
2179 	identify_cpu(c);
2180 #ifdef CONFIG_X86_32
2181 	enable_sep_cpu();
2182 #endif
2183 	x86_spec_ctrl_setup_ap();
2184 	update_srbds_msr();
2185 	if (boot_cpu_has_bug(X86_BUG_GDS))
2186 		update_gds_msr();
2187 
2188 	tsx_ap_init();
2189 	c->initialized = true;
2190 }
2191 
print_cpu_info(struct cpuinfo_x86 * c)2192 void print_cpu_info(struct cpuinfo_x86 *c)
2193 {
2194 	const char *vendor = NULL;
2195 
2196 	if (c->x86_vendor < X86_VENDOR_NUM) {
2197 		vendor = this_cpu->c_vendor;
2198 	} else {
2199 		if (c->cpuid_level >= 0)
2200 			vendor = c->x86_vendor_id;
2201 	}
2202 
2203 	if (vendor && !strstr(c->x86_model_id, vendor))
2204 		pr_cont("%s ", vendor);
2205 
2206 	if (c->x86_model_id[0])
2207 		pr_cont("%s", c->x86_model_id);
2208 	else
2209 		pr_cont("%d86", c->x86);
2210 
2211 	pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
2212 
2213 	if (c->x86_stepping || c->cpuid_level >= 0)
2214 		pr_cont(", stepping: 0x%x)\n", c->x86_stepping);
2215 	else
2216 		pr_cont(")\n");
2217 }
2218 
2219 /*
2220  * clearcpuid= and setcpuid= were already parsed in cpu_parse_early_param().
2221  * These dummy functions prevent them from becoming an environment variable for
2222  * init.
2223  */
2224 
setup_clearcpuid(char * arg)2225 static __init int setup_clearcpuid(char *arg)
2226 {
2227 	return 1;
2228 }
2229 __setup("clearcpuid=", setup_clearcpuid);
2230 
setup_setcpuid(char * arg)2231 static __init int setup_setcpuid(char *arg)
2232 {
2233 	return 1;
2234 }
2235 __setup("setcpuid=", setup_setcpuid);
2236 
2237 DEFINE_PER_CPU_CACHE_HOT(struct task_struct *, current_task) = &init_task;
2238 EXPORT_PER_CPU_SYMBOL(current_task);
2239 EXPORT_PER_CPU_SYMBOL(const_current_task);
2240 
2241 DEFINE_PER_CPU_CACHE_HOT(int, __preempt_count) = INIT_PREEMPT_COUNT;
2242 EXPORT_PER_CPU_SYMBOL(__preempt_count);
2243 
2244 DEFINE_PER_CPU_CACHE_HOT(unsigned long, cpu_current_top_of_stack) = TOP_OF_INIT_STACK;
2245 
2246 #ifdef CONFIG_X86_64
2247 /*
2248  * Note: Do not make this dependant on CONFIG_MITIGATION_CALL_DEPTH_TRACKING
2249  * so that this space is reserved in the hot cache section even when the
2250  * mitigation is disabled.
2251  */
2252 DEFINE_PER_CPU_CACHE_HOT(u64, __x86_call_depth);
2253 EXPORT_PER_CPU_SYMBOL(__x86_call_depth);
2254 
wrmsrq_cstar(unsigned long val)2255 static void wrmsrq_cstar(unsigned long val)
2256 {
2257 	/*
2258 	 * Intel CPUs do not support 32-bit SYSCALL. Writing to MSR_CSTAR
2259 	 * is so far ignored by the CPU, but raises a #VE trap in a TDX
2260 	 * guest. Avoid the pointless write on all Intel CPUs.
2261 	 */
2262 	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
2263 		wrmsrq(MSR_CSTAR, val);
2264 }
2265 
idt_syscall_init(void)2266 static inline void idt_syscall_init(void)
2267 {
2268 	wrmsrq(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
2269 
2270 	if (ia32_enabled()) {
2271 		wrmsrq_cstar((unsigned long)entry_SYSCALL_compat);
2272 		/*
2273 		 * This only works on Intel CPUs.
2274 		 * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP.
2275 		 * This does not cause SYSENTER to jump to the wrong location, because
2276 		 * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit).
2277 		 */
2278 		wrmsrq_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
2279 		wrmsrq_safe(MSR_IA32_SYSENTER_ESP,
2280 			    (unsigned long)(cpu_entry_stack(smp_processor_id()) + 1));
2281 		wrmsrq_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
2282 	} else {
2283 		wrmsrq_cstar((unsigned long)entry_SYSCALL32_ignore);
2284 		wrmsrq_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG);
2285 		wrmsrq_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
2286 		wrmsrq_safe(MSR_IA32_SYSENTER_EIP, 0ULL);
2287 	}
2288 
2289 	/*
2290 	 * Flags to clear on syscall; clear as much as possible
2291 	 * to minimize user space-kernel interference.
2292 	 */
2293 	wrmsrq(MSR_SYSCALL_MASK,
2294 	       X86_EFLAGS_CF|X86_EFLAGS_PF|X86_EFLAGS_AF|
2295 	       X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_TF|
2296 	       X86_EFLAGS_IF|X86_EFLAGS_DF|X86_EFLAGS_OF|
2297 	       X86_EFLAGS_IOPL|X86_EFLAGS_NT|X86_EFLAGS_RF|
2298 	       X86_EFLAGS_AC|X86_EFLAGS_ID);
2299 }
2300 
2301 /* May not be marked __init: used by software suspend */
syscall_init(void)2302 void syscall_init(void)
2303 {
2304 	/* The default user and kernel segments */
2305 	wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
2306 
2307 	/*
2308 	 * Except the IA32_STAR MSR, there is NO need to setup SYSCALL and
2309 	 * SYSENTER MSRs for FRED, because FRED uses the ring 3 FRED
2310 	 * entrypoint for SYSCALL and SYSENTER, and ERETU is the only legit
2311 	 * instruction to return to ring 3 (both sysexit and sysret cause
2312 	 * #UD when FRED is enabled).
2313 	 */
2314 	if (!cpu_feature_enabled(X86_FEATURE_FRED))
2315 		idt_syscall_init();
2316 }
2317 #endif /* CONFIG_X86_64 */
2318 
2319 #ifdef CONFIG_STACKPROTECTOR
2320 DEFINE_PER_CPU_CACHE_HOT(unsigned long, __stack_chk_guard);
2321 #ifndef CONFIG_SMP
2322 EXPORT_PER_CPU_SYMBOL(__stack_chk_guard);
2323 #endif
2324 #endif
2325 
initialize_debug_regs(void)2326 static void initialize_debug_regs(void)
2327 {
2328 	/* Control register first -- to make sure everything is disabled. */
2329 	set_debugreg(DR7_FIXED_1, 7);
2330 	set_debugreg(DR6_RESERVED, 6);
2331 	/* dr5 and dr4 don't exist */
2332 	set_debugreg(0, 3);
2333 	set_debugreg(0, 2);
2334 	set_debugreg(0, 1);
2335 	set_debugreg(0, 0);
2336 }
2337 
2338 #ifdef CONFIG_KGDB
2339 /*
2340  * Restore debug regs if using kgdbwait and you have a kernel debugger
2341  * connection established.
2342  */
dbg_restore_debug_regs(void)2343 static void dbg_restore_debug_regs(void)
2344 {
2345 	if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break))
2346 		arch_kgdb_ops.correct_hw_break();
2347 }
2348 #else /* ! CONFIG_KGDB */
2349 #define dbg_restore_debug_regs()
2350 #endif /* ! CONFIG_KGDB */
2351 
setup_getcpu(int cpu)2352 static inline void setup_getcpu(int cpu)
2353 {
2354 	unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu));
2355 	struct desc_struct d = { };
2356 
2357 	if (boot_cpu_has(X86_FEATURE_RDTSCP) || boot_cpu_has(X86_FEATURE_RDPID))
2358 		wrmsrq(MSR_TSC_AUX, cpudata);
2359 
2360 	/* Store CPU and node number in limit. */
2361 	d.limit0 = cpudata;
2362 	d.limit1 = cpudata >> 16;
2363 
2364 	d.type = 5;		/* RO data, expand down, accessed */
2365 	d.dpl = 3;		/* Visible to user code */
2366 	d.s = 1;		/* Not a system segment */
2367 	d.p = 1;		/* Present */
2368 	d.d = 1;		/* 32-bit */
2369 
2370 	write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_CPUNODE, &d, DESCTYPE_S);
2371 }
2372 
2373 #ifdef CONFIG_X86_64
tss_setup_ist(struct tss_struct * tss)2374 static inline void tss_setup_ist(struct tss_struct *tss)
2375 {
2376 	/* Set up the per-CPU TSS IST stacks */
2377 	tss->x86_tss.ist[IST_INDEX_DF] = __this_cpu_ist_top_va(DF);
2378 	tss->x86_tss.ist[IST_INDEX_NMI] = __this_cpu_ist_top_va(NMI);
2379 	tss->x86_tss.ist[IST_INDEX_DB] = __this_cpu_ist_top_va(DB);
2380 	tss->x86_tss.ist[IST_INDEX_MCE] = __this_cpu_ist_top_va(MCE);
2381 	/* Only mapped when SEV-ES is active */
2382 	tss->x86_tss.ist[IST_INDEX_VC] = __this_cpu_ist_top_va(VC);
2383 }
2384 #else /* CONFIG_X86_64 */
tss_setup_ist(struct tss_struct * tss)2385 static inline void tss_setup_ist(struct tss_struct *tss) { }
2386 #endif /* !CONFIG_X86_64 */
2387 
tss_setup_io_bitmap(struct tss_struct * tss)2388 static inline void tss_setup_io_bitmap(struct tss_struct *tss)
2389 {
2390 	tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET_INVALID;
2391 
2392 #ifdef CONFIG_X86_IOPL_IOPERM
2393 	tss->io_bitmap.prev_max = 0;
2394 	tss->io_bitmap.prev_sequence = 0;
2395 	memset(tss->io_bitmap.bitmap, 0xff, sizeof(tss->io_bitmap.bitmap));
2396 	/*
2397 	 * Invalidate the extra array entry past the end of the all
2398 	 * permission bitmap as required by the hardware.
2399 	 */
2400 	tss->io_bitmap.mapall[IO_BITMAP_LONGS] = ~0UL;
2401 #endif
2402 }
2403 
2404 /*
2405  * Setup everything needed to handle exceptions from the IDT, including the IST
2406  * exceptions which use paranoid_entry().
2407  */
cpu_init_exception_handling(bool boot_cpu)2408 void cpu_init_exception_handling(bool boot_cpu)
2409 {
2410 	struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
2411 	int cpu = raw_smp_processor_id();
2412 
2413 	/* paranoid_entry() gets the CPU number from the GDT */
2414 	setup_getcpu(cpu);
2415 
2416 	/* For IDT mode, IST vectors need to be set in TSS. */
2417 	if (!cpu_feature_enabled(X86_FEATURE_FRED))
2418 		tss_setup_ist(tss);
2419 	tss_setup_io_bitmap(tss);
2420 	set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
2421 
2422 	load_TR_desc();
2423 
2424 	/* GHCB needs to be setup to handle #VC. */
2425 	setup_ghcb();
2426 
2427 	/*
2428 	 * On CPUs with FSGSBASE support, paranoid_entry() uses
2429 	 * ALTERNATIVE-patched RDGSBASE/WRGSBASE instructions. Secondary CPUs
2430 	 * boot after alternatives are patched globally, so early exceptions
2431 	 * execute patched code that depends on FSGSBASE. Enable the feature
2432 	 * before any exceptions occur.
2433 	 */
2434 	if (cpu_feature_enabled(X86_FEATURE_FSGSBASE)) {
2435 		cr4_set_bits(X86_CR4_FSGSBASE);
2436 		elf_hwcap2 |= HWCAP2_FSGSBASE;
2437 	}
2438 
2439 	if (cpu_feature_enabled(X86_FEATURE_FRED)) {
2440 		/* The boot CPU has enabled FRED during early boot */
2441 		if (!boot_cpu)
2442 			cpu_init_fred_exceptions();
2443 
2444 		cpu_init_fred_rsps();
2445 	} else {
2446 		load_current_idt();
2447 	}
2448 }
2449 
cpu_init_replace_early_idt(void)2450 void __init cpu_init_replace_early_idt(void)
2451 {
2452 	if (cpu_feature_enabled(X86_FEATURE_FRED))
2453 		cpu_init_fred_exceptions();
2454 	else
2455 		idt_setup_early_pf();
2456 }
2457 
2458 /*
2459  * cpu_init() initializes state that is per-CPU. Some data is already
2460  * initialized (naturally) in the bootstrap process, such as the GDT.  We
2461  * reload it nevertheless, this function acts as a 'CPU state barrier',
2462  * nothing should get across.
2463  */
cpu_init(void)2464 void cpu_init(void)
2465 {
2466 	struct task_struct *cur = current;
2467 	int cpu = raw_smp_processor_id();
2468 
2469 #ifdef CONFIG_NUMA
2470 	if (this_cpu_read(numa_node) == 0 &&
2471 	    early_cpu_to_node(cpu) != NUMA_NO_NODE)
2472 		set_numa_node(early_cpu_to_node(cpu));
2473 #endif
2474 	pr_debug("Initializing CPU#%d\n", cpu);
2475 
2476 	if (IS_ENABLED(CONFIG_X86_64) || cpu_feature_enabled(X86_FEATURE_VME) ||
2477 	    boot_cpu_has(X86_FEATURE_TSC) || boot_cpu_has(X86_FEATURE_DE))
2478 		cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
2479 
2480 	if (IS_ENABLED(CONFIG_X86_64)) {
2481 		loadsegment(fs, 0);
2482 		memset(cur->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
2483 		syscall_init();
2484 
2485 		wrmsrq(MSR_FS_BASE, 0);
2486 		wrmsrq(MSR_KERNEL_GS_BASE, 0);
2487 		barrier();
2488 
2489 		x2apic_setup();
2490 
2491 		intel_posted_msi_init();
2492 	}
2493 
2494 	mmgrab(&init_mm);
2495 	cur->active_mm = &init_mm;
2496 	BUG_ON(cur->mm);
2497 	initialize_tlbstate_and_flush();
2498 	enter_lazy_tlb(&init_mm, cur);
2499 
2500 	/*
2501 	 * sp0 points to the entry trampoline stack regardless of what task
2502 	 * is running.
2503 	 */
2504 	load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
2505 
2506 	load_mm_ldt(&init_mm);
2507 
2508 	initialize_debug_regs();
2509 	dbg_restore_debug_regs();
2510 
2511 	doublefault_init_cpu_tss();
2512 
2513 	if (is_uv_system())
2514 		uv_cpu_init();
2515 
2516 	load_fixmap_gdt(cpu);
2517 }
2518 
2519 #ifdef CONFIG_MICROCODE_LATE_LOADING
2520 /**
2521  * store_cpu_caps() - Store a snapshot of CPU capabilities
2522  * @curr_info: Pointer where to store it
2523  *
2524  * Returns: None
2525  */
store_cpu_caps(struct cpuinfo_x86 * curr_info)2526 void store_cpu_caps(struct cpuinfo_x86 *curr_info)
2527 {
2528 	/* Reload CPUID max function as it might've changed. */
2529 	curr_info->cpuid_level = cpuid_eax(0);
2530 
2531 	/* Copy all capability leafs and pick up the synthetic ones. */
2532 	memcpy(&curr_info->x86_capability, &boot_cpu_data.x86_capability,
2533 	       sizeof(curr_info->x86_capability));
2534 
2535 	/* Get the hardware CPUID leafs */
2536 	get_cpu_cap(curr_info);
2537 }
2538 
2539 /**
2540  * microcode_check() - Check if any CPU capabilities changed after an update.
2541  * @prev_info:	CPU capabilities stored before an update.
2542  *
2543  * The microcode loader calls this upon late microcode load to recheck features,
2544  * only when microcode has been updated. Caller holds and CPU hotplug lock.
2545  *
2546  * Return: None
2547  */
microcode_check(struct cpuinfo_x86 * prev_info)2548 void microcode_check(struct cpuinfo_x86 *prev_info)
2549 {
2550 	struct cpuinfo_x86 curr_info;
2551 
2552 	perf_check_microcode();
2553 
2554 	amd_check_microcode();
2555 
2556 	store_cpu_caps(&curr_info);
2557 
2558 	if (!memcmp(&prev_info->x86_capability, &curr_info.x86_capability,
2559 		    sizeof(prev_info->x86_capability)))
2560 		return;
2561 
2562 	pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n");
2563 	pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
2564 }
2565 #endif
2566 
2567 /*
2568  * Invoked from core CPU hotplug code after hotplug operations
2569  */
arch_smt_update(void)2570 void arch_smt_update(void)
2571 {
2572 	/* Handle the speculative execution misfeatures */
2573 	cpu_bugs_smt_update();
2574 	/* Check whether IPI broadcasting can be enabled */
2575 	apic_smt_update();
2576 }
2577 
arch_cpu_finalize_init(void)2578 void __init arch_cpu_finalize_init(void)
2579 {
2580 	struct cpuinfo_x86 *c = this_cpu_ptr(&cpu_info);
2581 
2582 	identify_boot_cpu();
2583 
2584 	select_idle_routine();
2585 
2586 	/*
2587 	 * identify_boot_cpu() initialized SMT support information, let the
2588 	 * core code know.
2589 	 */
2590 	cpu_smt_set_num_threads(__max_threads_per_core, __max_threads_per_core);
2591 
2592 	if (!IS_ENABLED(CONFIG_SMP)) {
2593 		pr_info("CPU: ");
2594 		print_cpu_info(&boot_cpu_data);
2595 	}
2596 
2597 	cpu_select_mitigations();
2598 
2599 	arch_smt_update();
2600 
2601 	if (IS_ENABLED(CONFIG_X86_32)) {
2602 		/*
2603 		 * Check whether this is a real i386 which is not longer
2604 		 * supported and fixup the utsname.
2605 		 */
2606 		if (boot_cpu_data.x86 < 4)
2607 			panic("Kernel requires i486+ for 'invlpg' and other features");
2608 
2609 		init_utsname()->machine[1] =
2610 			'0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
2611 	}
2612 
2613 	/*
2614 	 * Must be before alternatives because it might set or clear
2615 	 * feature bits.
2616 	 */
2617 	fpu__init_system();
2618 	fpu__init_cpu();
2619 
2620 	/*
2621 	 * This needs to follow the FPU initializtion, since EFI depends on it.
2622 	 */
2623 	if (efi_enabled(EFI_RUNTIME_SERVICES))
2624 		efi_enter_virtual_mode();
2625 
2626 	/*
2627 	 * Ensure that access to the per CPU representation has the initial
2628 	 * boot CPU configuration.
2629 	 */
2630 	*c = boot_cpu_data;
2631 	c->initialized = true;
2632 
2633 	alternative_instructions();
2634 
2635 	if (IS_ENABLED(CONFIG_X86_64)) {
2636 		USER_PTR_MAX = TASK_SIZE_MAX;
2637 
2638 		/*
2639 		 * Enable this when LAM is gated on LASS support
2640 		if (cpu_feature_enabled(X86_FEATURE_LAM))
2641 			USER_PTR_MAX = (1ul << 63) - PAGE_SIZE;
2642 		 */
2643 		runtime_const_init(ptr, USER_PTR_MAX);
2644 
2645 		/*
2646 		 * Make sure the first 2MB area is not mapped by huge pages
2647 		 * There are typically fixed size MTRRs in there and overlapping
2648 		 * MTRRs into large pages causes slow downs.
2649 		 *
2650 		 * Right now we don't do that with gbpages because there seems
2651 		 * very little benefit for that case.
2652 		 */
2653 		if (!direct_gbpages)
2654 			set_memory_4k((unsigned long)__va(0), 1);
2655 	} else {
2656 		fpu__init_check_bugs();
2657 	}
2658 
2659 	/*
2660 	 * This needs to be called before any devices perform DMA
2661 	 * operations that might use the SWIOTLB bounce buffers. It will
2662 	 * mark the bounce buffers as decrypted so that their usage will
2663 	 * not cause "plain-text" data to be decrypted when accessed. It
2664 	 * must be called after late_time_init() so that Hyper-V x86/x64
2665 	 * hypercalls work when the SWIOTLB bounce buffers are decrypted.
2666 	 */
2667 	mem_encrypt_init();
2668 }
2669