xref: /linux/arch/x86/kernel/cpu/intel.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/bitops.h>
4 #include <linux/init.h>
5 #include <linux/kernel.h>
6 #include <linux/minmax.h>
7 #include <linux/smp.h>
8 #include <linux/string.h>
9 #include <linux/types.h>
10 
11 #ifdef CONFIG_X86_64
12 #include <linux/topology.h>
13 #endif
14 
15 #include <asm/bugs.h>
16 #include <asm/cpu_device_id.h>
17 #include <asm/cpufeature.h>
18 #include <asm/cpu.h>
19 #include <asm/cpuid/api.h>
20 #include <asm/hwcap2.h>
21 #include <asm/intel-family.h>
22 #include <asm/microcode.h>
23 #include <asm/msr.h>
24 #include <asm/numa.h>
25 #include <asm/resctrl.h>
26 #include <asm/thermal.h>
27 #include <asm/uaccess.h>
28 
29 #include "cpu.h"
30 
31 /*
32  * Processors which have self-snooping capability can handle conflicting
33  * memory type across CPUs by snooping its own cache. However, there exists
34  * CPU models in which having conflicting memory types still leads to
35  * unpredictable behavior, machine check errors, or hangs. Clear this
36  * feature to prevent its use on machines with known erratas.
37  */
check_memory_type_self_snoop_errata(struct cpuinfo_x86 * c)38 static void check_memory_type_self_snoop_errata(struct cpuinfo_x86 *c)
39 {
40 	switch (c->x86_vfm) {
41 	case INTEL_CORE_YONAH:
42 	case INTEL_CORE2_MEROM:
43 	case INTEL_CORE2_MEROM_L:
44 	case INTEL_CORE2_PENRYN:
45 	case INTEL_CORE2_DUNNINGTON:
46 	case INTEL_NEHALEM:
47 	case INTEL_NEHALEM_G:
48 	case INTEL_NEHALEM_EP:
49 	case INTEL_NEHALEM_EX:
50 	case INTEL_WESTMERE:
51 	case INTEL_WESTMERE_EP:
52 	case INTEL_SANDYBRIDGE:
53 		setup_clear_cpu_cap(X86_FEATURE_SELFSNOOP);
54 	}
55 }
56 
57 static bool ring3mwait_disabled __read_mostly;
58 
ring3mwait_disable(char * __unused)59 static int __init ring3mwait_disable(char *__unused)
60 {
61 	ring3mwait_disabled = true;
62 	return 1;
63 }
64 __setup("ring3mwait=disable", ring3mwait_disable);
65 
probe_xeon_phi_r3mwait(struct cpuinfo_x86 * c)66 static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
67 {
68 	/*
69 	 * Ring 3 MONITOR/MWAIT feature cannot be detected without
70 	 * cpu model and family comparison.
71 	 */
72 	if (c->x86 != 6)
73 		return;
74 	switch (c->x86_vfm) {
75 	case INTEL_XEON_PHI_KNL:
76 	case INTEL_XEON_PHI_KNM:
77 		break;
78 	default:
79 		return;
80 	}
81 
82 	if (ring3mwait_disabled)
83 		return;
84 
85 	set_cpu_cap(c, X86_FEATURE_RING3MWAIT);
86 	this_cpu_or(msr_misc_features_shadow,
87 		    1UL << MSR_MISC_FEATURES_ENABLES_RING3MWAIT_BIT);
88 
89 	if (c == &boot_cpu_data)
90 		ELF_HWCAP2 |= HWCAP2_RING3MWAIT;
91 }
92 
93 /*
94  * Early microcode releases for the Spectre v2 mitigation were broken.
95  * Information taken from;
96  * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf
97  * - https://kb.vmware.com/s/article/52345
98  * - Microcode revisions observed in the wild
99  * - Release note from 20180108 microcode release
100  */
101 struct sku_microcode {
102 	u32 vfm;
103 	u8 stepping;
104 	u32 microcode;
105 };
106 static const struct sku_microcode spectre_bad_microcodes[] = {
107 	{ INTEL_KABYLAKE,	0x0B,	0x80 },
108 	{ INTEL_KABYLAKE,	0x0A,	0x80 },
109 	{ INTEL_KABYLAKE,	0x09,	0x80 },
110 	{ INTEL_KABYLAKE_L,	0x0A,	0x80 },
111 	{ INTEL_KABYLAKE_L,	0x09,	0x80 },
112 	{ INTEL_SKYLAKE_X,	0x03,	0x0100013e },
113 	{ INTEL_SKYLAKE_X,	0x04,	0x0200003c },
114 	{ INTEL_BROADWELL,	0x04,	0x28 },
115 	{ INTEL_BROADWELL_G,	0x01,	0x1b },
116 	{ INTEL_BROADWELL_D,	0x02,	0x14 },
117 	{ INTEL_BROADWELL_D,	0x03,	0x07000011 },
118 	{ INTEL_BROADWELL_X,	0x01,	0x0b000025 },
119 	{ INTEL_HASWELL_L,	0x01,	0x21 },
120 	{ INTEL_HASWELL_G,	0x01,	0x18 },
121 	{ INTEL_HASWELL,	0x03,	0x23 },
122 	{ INTEL_HASWELL_X,	0x02,	0x3b },
123 	{ INTEL_HASWELL_X,	0x04,	0x10 },
124 	{ INTEL_IVYBRIDGE_X,	0x04,	0x42a },
125 	/* Observed in the wild */
126 	{ INTEL_SANDYBRIDGE_X,	0x06,	0x61b },
127 	{ INTEL_SANDYBRIDGE_X,	0x07,	0x712 },
128 };
129 
bad_spectre_microcode(struct cpuinfo_x86 * c)130 static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
131 {
132 	int i;
133 
134 	/*
135 	 * We know that the hypervisor lie to us on the microcode version so
136 	 * we may as well hope that it is running the correct version.
137 	 */
138 	if (cpu_has(c, X86_FEATURE_HYPERVISOR))
139 		return false;
140 
141 	for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
142 		if (c->x86_vfm == spectre_bad_microcodes[i].vfm &&
143 		    c->x86_stepping == spectre_bad_microcodes[i].stepping)
144 			return (c->microcode <= spectre_bad_microcodes[i].microcode);
145 	}
146 	return false;
147 }
148 
149 #define MSR_IA32_TME_ACTIVATE		0x982
150 
151 /* Helpers to access TME_ACTIVATE MSR */
152 #define TME_ACTIVATE_LOCKED(x)		(x & 0x1)
153 #define TME_ACTIVATE_ENABLED(x)		(x & 0x2)
154 
155 #define TME_ACTIVATE_KEYID_BITS(x)	((x >> 32) & 0xf)	/* Bits 35:32 */
156 
detect_tme_early(struct cpuinfo_x86 * c)157 static void detect_tme_early(struct cpuinfo_x86 *c)
158 {
159 	u64 tme_activate;
160 	int keyid_bits;
161 
162 	rdmsrq(MSR_IA32_TME_ACTIVATE, tme_activate);
163 
164 	if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
165 		pr_info_once("x86/tme: not enabled by BIOS\n");
166 		clear_cpu_cap(c, X86_FEATURE_TME);
167 		return;
168 	}
169 	pr_info_once("x86/tme: enabled by BIOS\n");
170 	keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
171 	if (!keyid_bits)
172 		return;
173 
174 	/*
175 	 * KeyID bits are set by BIOS and can be present regardless
176 	 * of whether the kernel is using them. They effectively lower
177 	 * the number of physical address bits.
178 	 *
179 	 * Update cpuinfo_x86::x86_phys_bits accordingly.
180 	 */
181 	c->x86_phys_bits -= keyid_bits;
182 	pr_info_once("x86/mktme: BIOS enabled: x86_phys_bits reduced by %d\n",
183 		     keyid_bits);
184 }
185 
intel_unlock_cpuid_leafs(struct cpuinfo_x86 * c)186 void intel_unlock_cpuid_leafs(struct cpuinfo_x86 *c)
187 {
188 	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
189 		return;
190 
191 	if (c->x86_vfm < INTEL_PENTIUM_M_DOTHAN)
192 		return;
193 
194 	/*
195 	 * The BIOS can have limited CPUID to leaf 2, which breaks feature
196 	 * enumeration. Unlock it and update the maximum leaf info.
197 	 */
198 	if (msr_clear_bit(MSR_IA32_MISC_ENABLE, MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0)
199 		c->cpuid_level = cpuid_eax(0);
200 }
201 
early_init_intel(struct cpuinfo_x86 * c)202 static void early_init_intel(struct cpuinfo_x86 *c)
203 {
204 	u64 misc_enable;
205 
206 	if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64))
207 		c->microcode = intel_get_microcode_revision();
208 
209 	/* Now if any of them are set, check the blacklist and clear the lot */
210 	if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) ||
211 	     cpu_has(c, X86_FEATURE_INTEL_STIBP) ||
212 	     cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) ||
213 	     cpu_has(c, X86_FEATURE_STIBP)) && bad_spectre_microcode(c)) {
214 		pr_warn("Intel Spectre v2 broken microcode detected; disabling Speculation Control\n");
215 		setup_clear_cpu_cap(X86_FEATURE_IBRS);
216 		setup_clear_cpu_cap(X86_FEATURE_IBPB);
217 		setup_clear_cpu_cap(X86_FEATURE_STIBP);
218 		setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
219 		setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
220 		setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
221 		setup_clear_cpu_cap(X86_FEATURE_SSBD);
222 		setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD);
223 	}
224 
225 	/*
226 	 * Atom erratum AAE44/AAF40/AAG38/AAH41:
227 	 *
228 	 * A race condition between speculative fetches and invalidating
229 	 * a large page.  This is worked around in microcode, but we
230 	 * need the microcode to have already been loaded... so if it is
231 	 * not, recommend a BIOS update and disable large pages.
232 	 */
233 	if (c->x86_vfm == INTEL_ATOM_BONNELL && c->x86_stepping <= 2 &&
234 	    c->microcode < 0x20e) {
235 		pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n");
236 		clear_cpu_cap(c, X86_FEATURE_PSE);
237 	}
238 
239 #ifdef CONFIG_X86_64
240 	set_cpu_cap(c, X86_FEATURE_SYSENTER32);
241 #else
242 	/* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
243 	if (c->x86 == 15 && c->x86_cache_alignment == 64)
244 		c->x86_cache_alignment = 128;
245 #endif
246 
247 	/* CPUID workaround for 0F33/0F34 CPU */
248 	if (c->x86_vfm == INTEL_P4_PRESCOTT &&
249 	    (c->x86_stepping == 0x3 || c->x86_stepping == 0x4))
250 		c->x86_phys_bits = 36;
251 
252 	/*
253 	 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
254 	 * with P/T states and does not stop in deep C-states.
255 	 *
256 	 * It is also reliable across cores and sockets. (but not across
257 	 * cabinets - we turn it off in that case explicitly.)
258 	 *
259 	 * Use a model-specific check for some older CPUs that have invariant
260 	 * TSC but may not report it architecturally via 8000_0007.
261 	 */
262 	if (c->x86_power & (1 << 8)) {
263 		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
264 		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
265 	} else if ((c->x86_vfm >= INTEL_P4_PRESCOTT && c->x86_vfm <= INTEL_P4_WILLAMETTE) ||
266 		   (c->x86_vfm >= INTEL_CORE_YONAH  && c->x86_vfm <= INTEL_IVYBRIDGE)) {
267 		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
268 	}
269 
270 	/* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
271 	switch (c->x86_vfm) {
272 	case INTEL_ATOM_SALTWELL_MID:
273 	case INTEL_ATOM_SALTWELL_TABLET:
274 	case INTEL_ATOM_SILVERMONT_MID:
275 	case INTEL_ATOM_AIRMONT_NP:
276 		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3);
277 		break;
278 	}
279 
280 	/*
281 	 * PAT is broken on early family 6 CPUs, the last of which
282 	 * is "Yonah" where the erratum is named "AN7":
283 	 *
284 	 * 	Page with PAT (Page Attribute Table) Set to USWC
285 	 * 	(Uncacheable Speculative Write Combine) While
286 	 * 	Associated MTRR (Memory Type Range Register) Is UC
287 	 * 	(Uncacheable) May Consolidate to UC
288 	 *
289 	 * Disable PAT and fall back to MTRR on these CPUs.
290 	 */
291 	if (c->x86_vfm >= INTEL_PENTIUM_PRO &&
292 	    c->x86_vfm <= INTEL_CORE_YONAH)
293 		clear_cpu_cap(c, X86_FEATURE_PAT);
294 
295 	/*
296 	 * Modern CPUs are generally expected to have a sane fast string
297 	 * implementation. However, BIOSes typically have a knob to tweak
298 	 * the architectural MISC_ENABLE.FAST_STRING enable bit.
299 	 *
300 	 * Adhere to the preference and program the Linux-defined fast
301 	 * string flag and enhanced fast string capabilities accordingly.
302 	 */
303 	if (c->x86_vfm >= INTEL_PENTIUM_M_DOTHAN) {
304 		rdmsrq(MSR_IA32_MISC_ENABLE, misc_enable);
305 		if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) {
306 			/* X86_FEATURE_ERMS is set based on CPUID */
307 			set_cpu_cap(c, X86_FEATURE_REP_GOOD);
308 		} else {
309 			pr_info("Disabled fast string operations\n");
310 			setup_clear_cpu_cap(X86_FEATURE_REP_GOOD);
311 			setup_clear_cpu_cap(X86_FEATURE_ERMS);
312 		}
313 	}
314 
315 	/*
316 	 * Intel Quark Core DevMan_001.pdf section 6.4.11
317 	 * "The operating system also is required to invalidate (i.e., flush)
318 	 *  the TLB when any changes are made to any of the page table entries.
319 	 *  The operating system must reload CR3 to cause the TLB to be flushed"
320 	 *
321 	 * As a result, boot_cpu_has(X86_FEATURE_PGE) in arch/x86/include/asm/tlbflush.h
322 	 * should be false so that __flush_tlb_all() causes CR3 instead of CR4.PGE
323 	 * to be modified.
324 	 */
325 	if (c->x86_vfm == INTEL_QUARK_X1000) {
326 		pr_info("Disabling PGE capability bit\n");
327 		setup_clear_cpu_cap(X86_FEATURE_PGE);
328 	}
329 
330 	check_memory_type_self_snoop_errata(c);
331 
332 	/*
333 	 * Adjust the number of physical bits early because it affects the
334 	 * valid bits of the MTRR mask registers.
335 	 */
336 	if (cpu_has(c, X86_FEATURE_TME))
337 		detect_tme_early(c);
338 }
339 
bsp_init_intel(struct cpuinfo_x86 * c)340 static void bsp_init_intel(struct cpuinfo_x86 *c)
341 {
342 	resctrl_cpu_detect(c);
343 }
344 
345 #ifdef CONFIG_X86_32
346 /*
347  *	Early probe support logic for ppro memory erratum #50
348  *
349  *	This is called before we do cpu ident work
350  */
351 
ppro_with_ram_bug(void)352 int ppro_with_ram_bug(void)
353 {
354 	/* Uses data from early_cpu_detect now */
355 	if (boot_cpu_data.x86_vfm == INTEL_PENTIUM_PRO &&
356 	    boot_cpu_data.x86_stepping < 8) {
357 		pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
358 		return 1;
359 	}
360 	return 0;
361 }
362 
intel_smp_check(struct cpuinfo_x86 * c)363 static void intel_smp_check(struct cpuinfo_x86 *c)
364 {
365 	/* calling is from identify_secondary_cpu() ? */
366 	if (!c->cpu_index)
367 		return;
368 
369 	/*
370 	 * Mask B, Pentium, but not Pentium MMX
371 	 */
372 	if (c->x86_vfm >= INTEL_FAM5_START && c->x86_vfm < INTEL_PENTIUM_MMX &&
373 	    c->x86_stepping >= 1 && c->x86_stepping <= 4) {
374 		/*
375 		 * Remember we have B step Pentia with bugs
376 		 */
377 		WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
378 				    "with B stepping processors.\n");
379 	}
380 }
381 
382 static int forcepae;
forcepae_setup(char * __unused)383 static int __init forcepae_setup(char *__unused)
384 {
385 	forcepae = 1;
386 	return 1;
387 }
388 __setup("forcepae", forcepae_setup);
389 
intel_workarounds(struct cpuinfo_x86 * c)390 static void intel_workarounds(struct cpuinfo_x86 *c)
391 {
392 #ifdef CONFIG_X86_F00F_BUG
393 	/*
394 	 * All models of Pentium and Pentium with MMX technology CPUs
395 	 * have the F0 0F bug, which lets nonprivileged users lock up the
396 	 * system. Announce that the fault handler will be checking for it.
397 	 * The Quark is also family 5, but does not have the same bug.
398 	 */
399 	clear_cpu_bug(c, X86_BUG_F00F);
400 	if (c->x86_vfm >= INTEL_FAM5_START && c->x86_vfm < INTEL_QUARK_X1000) {
401 		static int f00f_workaround_enabled;
402 
403 		set_cpu_bug(c, X86_BUG_F00F);
404 		if (!f00f_workaround_enabled) {
405 			pr_notice("Intel Pentium with F0 0F bug - workaround enabled.\n");
406 			f00f_workaround_enabled = 1;
407 		}
408 	}
409 #endif
410 
411 	/*
412 	 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
413 	 * model 3 mask 3
414 	 */
415 	if ((c->x86_vfm == INTEL_PENTIUM_II_KLAMATH && c->x86_stepping < 3) ||
416 	    c->x86_vfm < INTEL_PENTIUM_II_KLAMATH)
417 		clear_cpu_cap(c, X86_FEATURE_SEP);
418 
419 	/*
420 	 * PAE CPUID issue: many Pentium M report no PAE but may have a
421 	 * functionally usable PAE implementation.
422 	 * Forcefully enable PAE if kernel parameter "forcepae" is present.
423 	 */
424 	if (forcepae) {
425 		pr_warn("PAE forced!\n");
426 		set_cpu_cap(c, X86_FEATURE_PAE);
427 		add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
428 	}
429 
430 	/*
431 	 * P4 Xeon erratum 037 workaround.
432 	 * Hardware prefetcher may cause stale data to be loaded into the cache.
433 	 */
434 	if (c->x86_vfm == INTEL_P4_WILLAMETTE && c->x86_stepping == 1) {
435 		if (msr_set_bit(MSR_IA32_MISC_ENABLE,
436 				MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) {
437 			pr_info("CPU: C0 stepping P4 Xeon detected.\n");
438 			pr_info("CPU: Disabling hardware prefetching (Erratum 037)\n");
439 		}
440 	}
441 
442 	/*
443 	 * See if we have a good local APIC by checking for buggy Pentia,
444 	 * i.e. all B steppings and the C2 stepping of P54C when using their
445 	 * integrated APIC (see 11AP erratum in "Pentium Processor
446 	 * Specification Update").
447 	 */
448 	if (boot_cpu_has(X86_FEATURE_APIC) && c->x86_vfm == INTEL_PENTIUM_75 &&
449 	    (c->x86_stepping < 0x6 || c->x86_stepping == 0xb))
450 		set_cpu_bug(c, X86_BUG_11AP);
451 
452 #ifdef CONFIG_X86_INTEL_USERCOPY
453 	/*
454 	 * MOVSL bulk memory moves can be slow when source and dest are not
455 	 * both 8-byte aligned. PII/PIII only like MOVSL with 8-byte alignment.
456 	 *
457 	 * Set the preferred alignment for Pentium Pro and newer processors, as
458 	 * it has only been tested on these.
459 	 */
460 	if (c->x86_vfm >= INTEL_PENTIUM_PRO)
461 		movsl_mask.mask = 7;
462 #endif
463 
464 	intel_smp_check(c);
465 }
466 #else
intel_workarounds(struct cpuinfo_x86 * c)467 static void intel_workarounds(struct cpuinfo_x86 *c)
468 {
469 }
470 #endif
471 
srat_detect_node(struct cpuinfo_x86 * c)472 static void srat_detect_node(struct cpuinfo_x86 *c)
473 {
474 #ifdef CONFIG_NUMA
475 	unsigned node;
476 	int cpu = smp_processor_id();
477 
478 	/* Don't do the funky fallback heuristics the AMD version employs
479 	   for now. */
480 	node = numa_cpu_node(cpu);
481 	if (node == NUMA_NO_NODE || !node_online(node)) {
482 		/* reuse the value from init_cpu_to_node() */
483 		node = cpu_to_node(cpu);
484 	}
485 	numa_set_node(cpu, node);
486 #endif
487 }
488 
init_cpuid_fault(struct cpuinfo_x86 * c)489 static void init_cpuid_fault(struct cpuinfo_x86 *c)
490 {
491 	u64 msr;
492 
493 	if (!rdmsrq_safe(MSR_PLATFORM_INFO, &msr)) {
494 		if (msr & MSR_PLATFORM_INFO_CPUID_FAULT)
495 			set_cpu_cap(c, X86_FEATURE_CPUID_FAULT);
496 	}
497 }
498 
init_intel_misc_features(struct cpuinfo_x86 * c)499 static void init_intel_misc_features(struct cpuinfo_x86 *c)
500 {
501 	u64 msr;
502 
503 	if (rdmsrq_safe(MSR_MISC_FEATURES_ENABLES, &msr))
504 		return;
505 
506 	/* Clear all MISC features */
507 	this_cpu_write(msr_misc_features_shadow, 0);
508 
509 	/* Check features and update capabilities and shadow control bits */
510 	init_cpuid_fault(c);
511 	probe_xeon_phi_r3mwait(c);
512 
513 	msr = this_cpu_read(msr_misc_features_shadow);
514 	wrmsrq(MSR_MISC_FEATURES_ENABLES, msr);
515 }
516 
517 /*
518  * This is a list of Intel CPUs that are known to suffer from downclocking when
519  * ZMM registers (512-bit vectors) are used.  On these CPUs, when the kernel
520  * executes SIMD-optimized code such as cryptography functions or CRCs, it
521  * should prefer 256-bit (YMM) code to 512-bit (ZMM) code.
522  */
523 static const struct x86_cpu_id zmm_exclusion_list[] = {
524 	X86_MATCH_VFM(INTEL_SKYLAKE_X,		0),
525 	X86_MATCH_VFM(INTEL_ICELAKE_X,		0),
526 	X86_MATCH_VFM(INTEL_ICELAKE_D,		0),
527 	X86_MATCH_VFM(INTEL_ICELAKE,		0),
528 	X86_MATCH_VFM(INTEL_ICELAKE_L,		0),
529 	X86_MATCH_VFM(INTEL_ICELAKE_NNPI,	0),
530 	X86_MATCH_VFM(INTEL_TIGERLAKE_L,	0),
531 	X86_MATCH_VFM(INTEL_TIGERLAKE,		0),
532 	/* Allow Rocket Lake and later, and Sapphire Rapids and later. */
533 	{},
534 };
535 
init_intel(struct cpuinfo_x86 * c)536 static void init_intel(struct cpuinfo_x86 *c)
537 {
538 	early_init_intel(c);
539 
540 	intel_workarounds(c);
541 
542 	init_intel_cacheinfo(c);
543 
544 	if (c->cpuid_level > 9) {
545 		unsigned eax = cpuid_eax(10);
546 		/* Check for version and the number of counters */
547 		if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
548 			set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
549 	}
550 
551 	if (cpu_has(c, X86_FEATURE_XMM2))
552 		set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
553 
554 	if (boot_cpu_has(X86_FEATURE_DS)) {
555 		unsigned int l1, l2;
556 
557 		rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
558 		if (!(l1 & MSR_IA32_MISC_ENABLE_BTS_UNAVAIL))
559 			set_cpu_cap(c, X86_FEATURE_BTS);
560 		if (!(l1 & MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL))
561 			set_cpu_cap(c, X86_FEATURE_PEBS);
562 	}
563 
564 	if (boot_cpu_has(X86_FEATURE_CLFLUSH) &&
565 	    (c->x86_vfm == INTEL_CORE2_DUNNINGTON ||
566 	     c->x86_vfm == INTEL_NEHALEM_EX ||
567 	     c->x86_vfm == INTEL_WESTMERE_EX))
568 		set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR);
569 
570 	if (boot_cpu_has(X86_FEATURE_MWAIT) &&
571 	    (c->x86_vfm == INTEL_ATOM_GOLDMONT ||
572 	     c->x86_vfm == INTEL_LUNARLAKE_M))
573 		set_cpu_bug(c, X86_BUG_MONITOR);
574 
575 #ifdef CONFIG_X86_64
576 	if (c->x86 == 15)
577 		c->x86_cache_alignment = c->x86_clflush_size * 2;
578 #else
579 	/*
580 	 * Names for the Pentium II/Celeron processors
581 	 * detectable only by also checking the cache size.
582 	 * Dixon is NOT a Celeron.
583 	 */
584 	if (c->x86 == 6) {
585 		unsigned int l2 = c->x86_cache_size;
586 		char *p = NULL;
587 
588 		switch (c->x86_model) {
589 		case 5:
590 			if (l2 == 0)
591 				p = "Celeron (Covington)";
592 			else if (l2 == 256)
593 				p = "Mobile Pentium II (Dixon)";
594 			break;
595 
596 		case 6:
597 			if (l2 == 128)
598 				p = "Celeron (Mendocino)";
599 			else if (c->x86_stepping == 0 || c->x86_stepping == 5)
600 				p = "Celeron-A";
601 			break;
602 
603 		case 8:
604 			if (l2 == 128)
605 				p = "Celeron (Coppermine)";
606 			break;
607 		}
608 
609 		if (p)
610 			strcpy(c->x86_model_id, p);
611 	}
612 #endif
613 
614 	if (x86_match_cpu(zmm_exclusion_list))
615 		set_cpu_cap(c, X86_FEATURE_PREFER_YMM);
616 
617 	/* Work around errata */
618 	srat_detect_node(c);
619 
620 	init_ia32_feat_ctl(c);
621 
622 	init_intel_misc_features(c);
623 
624 	split_lock_init();
625 
626 	intel_init_thermal(c);
627 }
628 
629 #ifdef CONFIG_X86_32
intel_size_cache(struct cpuinfo_x86 * c,unsigned int size)630 static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
631 {
632 	/*
633 	 * Intel PIII Tualatin. This comes in two flavours.
634 	 * One has 256kb of cache, the other 512. We have no way
635 	 * to determine which, so we use a boottime override
636 	 * for the 512kb model, and assume 256 otherwise.
637 	 */
638 	if (c->x86_vfm == INTEL_PENTIUM_III_TUALATIN && size == 0)
639 		size = 256;
640 
641 	/*
642 	 * Intel Quark SoC X1000 contains a 4-way set associative
643 	 * 16K cache with a 16 byte cache line and 256 lines per tag
644 	 */
645 	if (c->x86_vfm == INTEL_QUARK_X1000)
646 		size = 16;
647 	return size;
648 }
649 #endif
650 
intel_tlb_lookup(const struct leaf_0x2_table * desc)651 static void intel_tlb_lookup(const struct leaf_0x2_table *desc)
652 {
653 	short entries = desc->entries;
654 
655 	switch (desc->t_type) {
656 	case STLB_4K:
657 		tlb_lli_4k = max(tlb_lli_4k, entries);
658 		tlb_lld_4k = max(tlb_lld_4k, entries);
659 		break;
660 	case STLB_4K_2M:
661 		tlb_lli_4k = max(tlb_lli_4k, entries);
662 		tlb_lld_4k = max(tlb_lld_4k, entries);
663 		tlb_lli_2m = max(tlb_lli_2m, entries);
664 		tlb_lld_2m = max(tlb_lld_2m, entries);
665 		tlb_lli_4m = max(tlb_lli_4m, entries);
666 		tlb_lld_4m = max(tlb_lld_4m, entries);
667 		break;
668 	case TLB_INST_ALL:
669 		tlb_lli_4k = max(tlb_lli_4k, entries);
670 		tlb_lli_2m = max(tlb_lli_2m, entries);
671 		tlb_lli_4m = max(tlb_lli_4m, entries);
672 		break;
673 	case TLB_INST_4K:
674 		tlb_lli_4k = max(tlb_lli_4k, entries);
675 		break;
676 	case TLB_INST_4M:
677 		tlb_lli_4m = max(tlb_lli_4m, entries);
678 		break;
679 	case TLB_INST_2M_4M:
680 		tlb_lli_2m = max(tlb_lli_2m, entries);
681 		tlb_lli_4m = max(tlb_lli_4m, entries);
682 		break;
683 	case TLB_DATA_4K:
684 	case TLB_DATA0_4K:
685 		tlb_lld_4k = max(tlb_lld_4k, entries);
686 		break;
687 	case TLB_DATA_4M:
688 	case TLB_DATA0_4M:
689 		tlb_lld_4m = max(tlb_lld_4m, entries);
690 		break;
691 	case TLB_DATA_2M_4M:
692 	case TLB_DATA0_2M_4M:
693 		tlb_lld_2m = max(tlb_lld_2m, entries);
694 		tlb_lld_4m = max(tlb_lld_4m, entries);
695 		break;
696 	case TLB_DATA_4K_4M:
697 		tlb_lld_4k = max(tlb_lld_4k, entries);
698 		tlb_lld_4m = max(tlb_lld_4m, entries);
699 		break;
700 	case TLB_DATA_1G_2M_4M:
701 		tlb_lld_2m = max(tlb_lld_2m, TLB_0x63_2M_4M_ENTRIES);
702 		tlb_lld_4m = max(tlb_lld_4m, TLB_0x63_2M_4M_ENTRIES);
703 		fallthrough;
704 	case TLB_DATA_1G:
705 		tlb_lld_1g = max(tlb_lld_1g, entries);
706 		break;
707 	}
708 }
709 
intel_detect_tlb(struct cpuinfo_x86 * c)710 static void intel_detect_tlb(struct cpuinfo_x86 *c)
711 {
712 	const struct leaf_0x2_table *desc;
713 	union leaf_0x2_regs regs;
714 	u8 *ptr;
715 
716 	if (c->cpuid_level < 2)
717 		return;
718 
719 	cpuid_leaf_0x2(&regs);
720 	for_each_cpuid_0x2_desc(regs, ptr, desc)
721 		intel_tlb_lookup(desc);
722 }
723 
724 static const struct cpu_dev intel_cpu_dev = {
725 	.c_vendor	= "Intel",
726 	.c_ident	= { "GenuineIntel" },
727 #ifdef CONFIG_X86_32
728 	.legacy_models = {
729 		{ .family = 4, .model_names =
730 		  {
731 			  [0] = "486 DX-25/33",
732 			  [1] = "486 DX-50",
733 			  [2] = "486 SX",
734 			  [3] = "486 DX/2",
735 			  [4] = "486 SL",
736 			  [5] = "486 SX/2",
737 			  [7] = "486 DX/2-WB",
738 			  [8] = "486 DX/4",
739 			  [9] = "486 DX/4-WB"
740 		  }
741 		},
742 		{ .family = 5, .model_names =
743 		  {
744 			  [0] = "Pentium 60/66 A-step",
745 			  [1] = "Pentium 60/66",
746 			  [2] = "Pentium 75 - 200",
747 			  [3] = "OverDrive PODP5V83",
748 			  [4] = "Pentium MMX",
749 			  [7] = "Mobile Pentium 75 - 200",
750 			  [8] = "Mobile Pentium MMX",
751 			  [9] = "Quark SoC X1000",
752 		  }
753 		},
754 		{ .family = 6, .model_names =
755 		  {
756 			  [0] = "Pentium Pro A-step",
757 			  [1] = "Pentium Pro",
758 			  [3] = "Pentium II (Klamath)",
759 			  [4] = "Pentium II (Deschutes)",
760 			  [5] = "Pentium II (Deschutes)",
761 			  [6] = "Mobile Pentium II",
762 			  [7] = "Pentium III (Katmai)",
763 			  [8] = "Pentium III (Coppermine)",
764 			  [10] = "Pentium III (Cascades)",
765 			  [11] = "Pentium III (Tualatin)",
766 		  }
767 		},
768 		{ .family = 15, .model_names =
769 		  {
770 			  [0] = "Pentium 4 (Unknown)",
771 			  [1] = "Pentium 4 (Willamette)",
772 			  [2] = "Pentium 4 (Northwood)",
773 			  [4] = "Pentium 4 (Foster)",
774 			  [5] = "Pentium 4 (Foster)",
775 		  }
776 		},
777 	},
778 	.legacy_cache_size = intel_size_cache,
779 #endif
780 	.c_detect_tlb	= intel_detect_tlb,
781 	.c_early_init   = early_init_intel,
782 	.c_bsp_init	= bsp_init_intel,
783 	.c_init		= init_intel,
784 	.c_x86_vendor	= X86_VENDOR_INTEL,
785 };
786 
787 cpu_dev_register(intel_cpu_dev);
788