1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/bitops.h>
4 #include <linux/init.h>
5 #include <linux/kernel.h>
6 #include <linux/minmax.h>
7 #include <linux/smp.h>
8 #include <linux/string.h>
9 #include <linux/types.h>
10
11 #ifdef CONFIG_X86_64
12 #include <linux/topology.h>
13 #endif
14
15 #include <asm/bugs.h>
16 #include <asm/cpu_device_id.h>
17 #include <asm/cpufeature.h>
18 #include <asm/cpu.h>
19 #include <asm/cpuid/api.h>
20 #include <asm/hwcap2.h>
21 #include <asm/intel-family.h>
22 #include <asm/microcode.h>
23 #include <asm/msr.h>
24 #include <asm/numa.h>
25 #include <asm/resctrl.h>
26 #include <asm/thermal.h>
27 #include <asm/uaccess.h>
28
29 #include "cpu.h"
30
31 /*
32 * Processors which have self-snooping capability can handle conflicting
33 * memory type across CPUs by snooping its own cache. However, there exists
34 * CPU models in which having conflicting memory types still leads to
35 * unpredictable behavior, machine check errors, or hangs. Clear this
36 * feature to prevent its use on machines with known erratas.
37 */
check_memory_type_self_snoop_errata(struct cpuinfo_x86 * c)38 static void check_memory_type_self_snoop_errata(struct cpuinfo_x86 *c)
39 {
40 switch (c->x86_vfm) {
41 case INTEL_CORE_YONAH:
42 case INTEL_CORE2_MEROM:
43 case INTEL_CORE2_MEROM_L:
44 case INTEL_CORE2_PENRYN:
45 case INTEL_CORE2_DUNNINGTON:
46 case INTEL_NEHALEM:
47 case INTEL_NEHALEM_G:
48 case INTEL_NEHALEM_EP:
49 case INTEL_NEHALEM_EX:
50 case INTEL_WESTMERE:
51 case INTEL_WESTMERE_EP:
52 case INTEL_SANDYBRIDGE:
53 setup_clear_cpu_cap(X86_FEATURE_SELFSNOOP);
54 }
55 }
56
57 static bool ring3mwait_disabled __read_mostly;
58
ring3mwait_disable(char * __unused)59 static int __init ring3mwait_disable(char *__unused)
60 {
61 ring3mwait_disabled = true;
62 return 1;
63 }
64 __setup("ring3mwait=disable", ring3mwait_disable);
65
probe_xeon_phi_r3mwait(struct cpuinfo_x86 * c)66 static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
67 {
68 /*
69 * Ring 3 MONITOR/MWAIT feature cannot be detected without
70 * cpu model and family comparison.
71 */
72 if (c->x86 != 6)
73 return;
74 switch (c->x86_vfm) {
75 case INTEL_XEON_PHI_KNL:
76 case INTEL_XEON_PHI_KNM:
77 break;
78 default:
79 return;
80 }
81
82 if (ring3mwait_disabled)
83 return;
84
85 set_cpu_cap(c, X86_FEATURE_RING3MWAIT);
86 this_cpu_or(msr_misc_features_shadow,
87 1UL << MSR_MISC_FEATURES_ENABLES_RING3MWAIT_BIT);
88
89 if (c == &boot_cpu_data)
90 ELF_HWCAP2 |= HWCAP2_RING3MWAIT;
91 }
92
93 /*
94 * Early microcode releases for the Spectre v2 mitigation were broken.
95 * Information taken from;
96 * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf
97 * - https://kb.vmware.com/s/article/52345
98 * - Microcode revisions observed in the wild
99 * - Release note from 20180108 microcode release
100 */
101 struct sku_microcode {
102 u32 vfm;
103 u8 stepping;
104 u32 microcode;
105 };
106 static const struct sku_microcode spectre_bad_microcodes[] = {
107 { INTEL_KABYLAKE, 0x0B, 0x80 },
108 { INTEL_KABYLAKE, 0x0A, 0x80 },
109 { INTEL_KABYLAKE, 0x09, 0x80 },
110 { INTEL_KABYLAKE_L, 0x0A, 0x80 },
111 { INTEL_KABYLAKE_L, 0x09, 0x80 },
112 { INTEL_SKYLAKE_X, 0x03, 0x0100013e },
113 { INTEL_SKYLAKE_X, 0x04, 0x0200003c },
114 { INTEL_BROADWELL, 0x04, 0x28 },
115 { INTEL_BROADWELL_G, 0x01, 0x1b },
116 { INTEL_BROADWELL_D, 0x02, 0x14 },
117 { INTEL_BROADWELL_D, 0x03, 0x07000011 },
118 { INTEL_BROADWELL_X, 0x01, 0x0b000025 },
119 { INTEL_HASWELL_L, 0x01, 0x21 },
120 { INTEL_HASWELL_G, 0x01, 0x18 },
121 { INTEL_HASWELL, 0x03, 0x23 },
122 { INTEL_HASWELL_X, 0x02, 0x3b },
123 { INTEL_HASWELL_X, 0x04, 0x10 },
124 { INTEL_IVYBRIDGE_X, 0x04, 0x42a },
125 /* Observed in the wild */
126 { INTEL_SANDYBRIDGE_X, 0x06, 0x61b },
127 { INTEL_SANDYBRIDGE_X, 0x07, 0x712 },
128 };
129
bad_spectre_microcode(struct cpuinfo_x86 * c)130 static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
131 {
132 int i;
133
134 /*
135 * We know that the hypervisor lie to us on the microcode version so
136 * we may as well hope that it is running the correct version.
137 */
138 if (cpu_has(c, X86_FEATURE_HYPERVISOR))
139 return false;
140
141 for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
142 if (c->x86_vfm == spectre_bad_microcodes[i].vfm &&
143 c->x86_stepping == spectre_bad_microcodes[i].stepping)
144 return (c->microcode <= spectre_bad_microcodes[i].microcode);
145 }
146 return false;
147 }
148
149 #define MSR_IA32_TME_ACTIVATE 0x982
150
151 /* Helpers to access TME_ACTIVATE MSR */
152 #define TME_ACTIVATE_LOCKED(x) (x & 0x1)
153 #define TME_ACTIVATE_ENABLED(x) (x & 0x2)
154
155 #define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */
156
detect_tme_early(struct cpuinfo_x86 * c)157 static void detect_tme_early(struct cpuinfo_x86 *c)
158 {
159 u64 tme_activate;
160 int keyid_bits;
161
162 rdmsrq(MSR_IA32_TME_ACTIVATE, tme_activate);
163
164 if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
165 pr_info_once("x86/tme: not enabled by BIOS\n");
166 clear_cpu_cap(c, X86_FEATURE_TME);
167 return;
168 }
169 pr_info_once("x86/tme: enabled by BIOS\n");
170 keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
171 if (!keyid_bits)
172 return;
173
174 /*
175 * KeyID bits are set by BIOS and can be present regardless
176 * of whether the kernel is using them. They effectively lower
177 * the number of physical address bits.
178 *
179 * Update cpuinfo_x86::x86_phys_bits accordingly.
180 */
181 c->x86_phys_bits -= keyid_bits;
182 pr_info_once("x86/mktme: BIOS enabled: x86_phys_bits reduced by %d\n",
183 keyid_bits);
184 }
185
intel_unlock_cpuid_leafs(struct cpuinfo_x86 * c)186 void intel_unlock_cpuid_leafs(struct cpuinfo_x86 *c)
187 {
188 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
189 return;
190
191 if (c->x86_vfm < INTEL_PENTIUM_M_DOTHAN)
192 return;
193
194 /*
195 * The BIOS can have limited CPUID to leaf 2, which breaks feature
196 * enumeration. Unlock it and update the maximum leaf info.
197 */
198 if (msr_clear_bit(MSR_IA32_MISC_ENABLE, MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0)
199 c->cpuid_level = cpuid_eax(0);
200 }
201
early_init_intel(struct cpuinfo_x86 * c)202 static void early_init_intel(struct cpuinfo_x86 *c)
203 {
204 u64 misc_enable;
205
206 if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64))
207 c->microcode = intel_get_microcode_revision();
208
209 /* Now if any of them are set, check the blacklist and clear the lot */
210 if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) ||
211 cpu_has(c, X86_FEATURE_INTEL_STIBP) ||
212 cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) ||
213 cpu_has(c, X86_FEATURE_STIBP)) && bad_spectre_microcode(c)) {
214 pr_warn("Intel Spectre v2 broken microcode detected; disabling Speculation Control\n");
215 setup_clear_cpu_cap(X86_FEATURE_IBRS);
216 setup_clear_cpu_cap(X86_FEATURE_IBPB);
217 setup_clear_cpu_cap(X86_FEATURE_STIBP);
218 setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
219 setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
220 setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
221 setup_clear_cpu_cap(X86_FEATURE_SSBD);
222 setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD);
223 }
224
225 /*
226 * Atom erratum AAE44/AAF40/AAG38/AAH41:
227 *
228 * A race condition between speculative fetches and invalidating
229 * a large page. This is worked around in microcode, but we
230 * need the microcode to have already been loaded... so if it is
231 * not, recommend a BIOS update and disable large pages.
232 */
233 if (c->x86_vfm == INTEL_ATOM_BONNELL && c->x86_stepping <= 2 &&
234 c->microcode < 0x20e) {
235 pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n");
236 clear_cpu_cap(c, X86_FEATURE_PSE);
237 }
238
239 #ifndef CONFIG_X86_64
240 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
241 if (c->x86 == 15 && c->x86_cache_alignment == 64)
242 c->x86_cache_alignment = 128;
243 #endif
244
245 /* CPUID workaround for 0F33/0F34 CPU */
246 if (c->x86_vfm == INTEL_P4_PRESCOTT &&
247 (c->x86_stepping == 0x3 || c->x86_stepping == 0x4))
248 c->x86_phys_bits = 36;
249
250 /*
251 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
252 * with P/T states and does not stop in deep C-states.
253 *
254 * It is also reliable across cores and sockets. (but not across
255 * cabinets - we turn it off in that case explicitly.)
256 *
257 * Use a model-specific check for some older CPUs that have invariant
258 * TSC but may not report it architecturally via 8000_0007.
259 */
260 if (c->x86_power & (1 << 8)) {
261 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
262 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
263 } else if ((c->x86_vfm >= INTEL_P4_PRESCOTT && c->x86_vfm <= INTEL_P4_CEDARMILL) ||
264 (c->x86_vfm >= INTEL_CORE_YONAH && c->x86_vfm <= INTEL_IVYBRIDGE)) {
265 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
266 }
267
268 /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
269 switch (c->x86_vfm) {
270 case INTEL_ATOM_SALTWELL_MID:
271 case INTEL_ATOM_SALTWELL_TABLET:
272 case INTEL_ATOM_SILVERMONT_MID:
273 case INTEL_ATOM_AIRMONT_NP:
274 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3);
275 break;
276 }
277
278 /*
279 * PAT is broken on early family 6 CPUs, the last of which
280 * is "Yonah" where the erratum is named "AN7":
281 *
282 * Page with PAT (Page Attribute Table) Set to USWC
283 * (Uncacheable Speculative Write Combine) While
284 * Associated MTRR (Memory Type Range Register) Is UC
285 * (Uncacheable) May Consolidate to UC
286 *
287 * Disable PAT and fall back to MTRR on these CPUs.
288 */
289 if (c->x86_vfm >= INTEL_PENTIUM_PRO &&
290 c->x86_vfm <= INTEL_CORE_YONAH)
291 clear_cpu_cap(c, X86_FEATURE_PAT);
292
293 /*
294 * Modern CPUs are generally expected to have a sane fast string
295 * implementation. However, BIOSes typically have a knob to tweak
296 * the architectural MISC_ENABLE.FAST_STRING enable bit.
297 *
298 * Adhere to the preference and program the Linux-defined fast
299 * string flag and enhanced fast string capabilities accordingly.
300 */
301 if (c->x86_vfm >= INTEL_PENTIUM_M_DOTHAN) {
302 rdmsrq(MSR_IA32_MISC_ENABLE, misc_enable);
303 if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) {
304 /* X86_FEATURE_ERMS is set based on CPUID */
305 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
306 } else {
307 pr_info("Disabled fast string operations\n");
308 setup_clear_cpu_cap(X86_FEATURE_REP_GOOD);
309 setup_clear_cpu_cap(X86_FEATURE_ERMS);
310 }
311 }
312
313 /*
314 * Intel Quark Core DevMan_001.pdf section 6.4.11
315 * "The operating system also is required to invalidate (i.e., flush)
316 * the TLB when any changes are made to any of the page table entries.
317 * The operating system must reload CR3 to cause the TLB to be flushed"
318 *
319 * As a result, boot_cpu_has(X86_FEATURE_PGE) in arch/x86/include/asm/tlbflush.h
320 * should be false so that __flush_tlb_all() causes CR3 instead of CR4.PGE
321 * to be modified.
322 */
323 if (c->x86_vfm == INTEL_QUARK_X1000) {
324 pr_info("Disabling PGE capability bit\n");
325 setup_clear_cpu_cap(X86_FEATURE_PGE);
326 }
327
328 check_memory_type_self_snoop_errata(c);
329
330 /*
331 * Adjust the number of physical bits early because it affects the
332 * valid bits of the MTRR mask registers.
333 */
334 if (cpu_has(c, X86_FEATURE_TME))
335 detect_tme_early(c);
336 }
337
bsp_init_intel(struct cpuinfo_x86 * c)338 static void bsp_init_intel(struct cpuinfo_x86 *c)
339 {
340 resctrl_cpu_detect(c);
341 }
342
343 #ifdef CONFIG_X86_32
344 /*
345 * Early probe support logic for ppro memory erratum #50
346 *
347 * This is called before we do cpu ident work
348 */
349
ppro_with_ram_bug(void)350 int ppro_with_ram_bug(void)
351 {
352 /* Uses data from early_cpu_detect now */
353 if (boot_cpu_data.x86_vfm == INTEL_PENTIUM_PRO &&
354 boot_cpu_data.x86_stepping < 8) {
355 pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
356 return 1;
357 }
358 return 0;
359 }
360
intel_smp_check(struct cpuinfo_x86 * c)361 static void intel_smp_check(struct cpuinfo_x86 *c)
362 {
363 /* calling is from identify_secondary_cpu() ? */
364 if (!c->cpu_index)
365 return;
366
367 /*
368 * Mask B, Pentium, but not Pentium MMX
369 */
370 if (c->x86_vfm >= INTEL_FAM5_START && c->x86_vfm < INTEL_PENTIUM_MMX &&
371 c->x86_stepping >= 1 && c->x86_stepping <= 4) {
372 /*
373 * Remember we have B step Pentia with bugs
374 */
375 WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
376 "with B stepping processors.\n");
377 }
378 }
379
380 static int forcepae;
forcepae_setup(char * __unused)381 static int __init forcepae_setup(char *__unused)
382 {
383 forcepae = 1;
384 return 1;
385 }
386 __setup("forcepae", forcepae_setup);
387
intel_workarounds(struct cpuinfo_x86 * c)388 static void intel_workarounds(struct cpuinfo_x86 *c)
389 {
390 #ifdef CONFIG_X86_F00F_BUG
391 /*
392 * All models of Pentium and Pentium with MMX technology CPUs
393 * have the F0 0F bug, which lets nonprivileged users lock up the
394 * system. Announce that the fault handler will be checking for it.
395 * The Quark is also family 5, but does not have the same bug.
396 */
397 clear_cpu_bug(c, X86_BUG_F00F);
398 if (c->x86_vfm >= INTEL_FAM5_START && c->x86_vfm < INTEL_QUARK_X1000) {
399 static int f00f_workaround_enabled;
400
401 set_cpu_bug(c, X86_BUG_F00F);
402 if (!f00f_workaround_enabled) {
403 pr_notice("Intel Pentium with F0 0F bug - workaround enabled.\n");
404 f00f_workaround_enabled = 1;
405 }
406 }
407 #endif
408
409 /*
410 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
411 * model 3 mask 3
412 */
413 if ((c->x86_vfm == INTEL_PENTIUM_II_KLAMATH && c->x86_stepping < 3) ||
414 c->x86_vfm < INTEL_PENTIUM_II_KLAMATH)
415 clear_cpu_cap(c, X86_FEATURE_SEP);
416
417 /*
418 * PAE CPUID issue: many Pentium M report no PAE but may have a
419 * functionally usable PAE implementation.
420 * Forcefully enable PAE if kernel parameter "forcepae" is present.
421 */
422 if (forcepae) {
423 pr_warn("PAE forced!\n");
424 set_cpu_cap(c, X86_FEATURE_PAE);
425 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
426 }
427
428 /*
429 * P4 Xeon erratum 037 workaround.
430 * Hardware prefetcher may cause stale data to be loaded into the cache.
431 */
432 if (c->x86_vfm == INTEL_P4_WILLAMETTE && c->x86_stepping == 1) {
433 if (msr_set_bit(MSR_IA32_MISC_ENABLE,
434 MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) {
435 pr_info("CPU: C0 stepping P4 Xeon detected.\n");
436 pr_info("CPU: Disabling hardware prefetching (Erratum 037)\n");
437 }
438 }
439
440 /*
441 * See if we have a good local APIC by checking for buggy Pentia,
442 * i.e. all B steppings and the C2 stepping of P54C when using their
443 * integrated APIC (see 11AP erratum in "Pentium Processor
444 * Specification Update").
445 */
446 if (boot_cpu_has(X86_FEATURE_APIC) && c->x86_vfm == INTEL_PENTIUM_75 &&
447 (c->x86_stepping < 0x6 || c->x86_stepping == 0xb))
448 set_cpu_bug(c, X86_BUG_11AP);
449
450 #ifdef CONFIG_X86_INTEL_USERCOPY
451 /*
452 * MOVSL bulk memory moves can be slow when source and dest are not
453 * both 8-byte aligned. PII/PIII only like MOVSL with 8-byte alignment.
454 *
455 * Set the preferred alignment for Pentium Pro and newer processors, as
456 * it has only been tested on these.
457 */
458 if (c->x86_vfm >= INTEL_PENTIUM_PRO)
459 movsl_mask.mask = 7;
460 #endif
461
462 intel_smp_check(c);
463 }
464 #else
intel_workarounds(struct cpuinfo_x86 * c)465 static void intel_workarounds(struct cpuinfo_x86 *c)
466 {
467 }
468 #endif
469
srat_detect_node(struct cpuinfo_x86 * c)470 static void srat_detect_node(struct cpuinfo_x86 *c)
471 {
472 #ifdef CONFIG_NUMA
473 unsigned node;
474 int cpu = smp_processor_id();
475
476 /* Don't do the funky fallback heuristics the AMD version employs
477 for now. */
478 node = numa_cpu_node(cpu);
479 if (node == NUMA_NO_NODE || !node_online(node)) {
480 /* reuse the value from init_cpu_to_node() */
481 node = cpu_to_node(cpu);
482 }
483 numa_set_node(cpu, node);
484 #endif
485 }
486
init_cpuid_fault(struct cpuinfo_x86 * c)487 static void init_cpuid_fault(struct cpuinfo_x86 *c)
488 {
489 u64 msr;
490
491 if (!rdmsrq_safe(MSR_PLATFORM_INFO, &msr)) {
492 if (msr & MSR_PLATFORM_INFO_CPUID_FAULT)
493 set_cpu_cap(c, X86_FEATURE_CPUID_FAULT);
494 }
495 }
496
init_intel_misc_features(struct cpuinfo_x86 * c)497 static void init_intel_misc_features(struct cpuinfo_x86 *c)
498 {
499 u64 msr;
500
501 if (rdmsrq_safe(MSR_MISC_FEATURES_ENABLES, &msr))
502 return;
503
504 /* Clear all MISC features */
505 this_cpu_write(msr_misc_features_shadow, 0);
506
507 /* Check features and update capabilities and shadow control bits */
508 init_cpuid_fault(c);
509 probe_xeon_phi_r3mwait(c);
510
511 msr = this_cpu_read(msr_misc_features_shadow);
512 wrmsrq(MSR_MISC_FEATURES_ENABLES, msr);
513 }
514
515 /*
516 * This is a list of Intel CPUs that are known to suffer from downclocking when
517 * ZMM registers (512-bit vectors) are used. On these CPUs, when the kernel
518 * executes SIMD-optimized code such as cryptography functions or CRCs, it
519 * should prefer 256-bit (YMM) code to 512-bit (ZMM) code.
520 */
521 static const struct x86_cpu_id zmm_exclusion_list[] = {
522 X86_MATCH_VFM(INTEL_SKYLAKE_X, 0),
523 X86_MATCH_VFM(INTEL_ICELAKE_X, 0),
524 X86_MATCH_VFM(INTEL_ICELAKE_D, 0),
525 X86_MATCH_VFM(INTEL_ICELAKE, 0),
526 X86_MATCH_VFM(INTEL_ICELAKE_L, 0),
527 X86_MATCH_VFM(INTEL_ICELAKE_NNPI, 0),
528 X86_MATCH_VFM(INTEL_TIGERLAKE_L, 0),
529 X86_MATCH_VFM(INTEL_TIGERLAKE, 0),
530 /* Allow Rocket Lake and later, and Sapphire Rapids and later. */
531 {},
532 };
533
init_intel(struct cpuinfo_x86 * c)534 static void init_intel(struct cpuinfo_x86 *c)
535 {
536 early_init_intel(c);
537
538 intel_workarounds(c);
539
540 init_intel_cacheinfo(c);
541
542 if (c->cpuid_level > 9) {
543 unsigned eax = cpuid_eax(10);
544 /* Check for version and the number of counters */
545 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
546 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
547 }
548
549 if (cpu_has(c, X86_FEATURE_XMM2))
550 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
551
552 if (boot_cpu_has(X86_FEATURE_DS)) {
553 unsigned int l1, l2;
554
555 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
556 if (!(l1 & MSR_IA32_MISC_ENABLE_BTS_UNAVAIL))
557 set_cpu_cap(c, X86_FEATURE_BTS);
558 if (!(l1 & MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL))
559 set_cpu_cap(c, X86_FEATURE_PEBS);
560 }
561
562 if (boot_cpu_has(X86_FEATURE_CLFLUSH) &&
563 (c->x86_vfm == INTEL_CORE2_DUNNINGTON ||
564 c->x86_vfm == INTEL_NEHALEM_EX ||
565 c->x86_vfm == INTEL_WESTMERE_EX))
566 set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR);
567
568 if (boot_cpu_has(X86_FEATURE_MWAIT) &&
569 (c->x86_vfm == INTEL_ATOM_GOLDMONT ||
570 c->x86_vfm == INTEL_LUNARLAKE_M))
571 set_cpu_bug(c, X86_BUG_MONITOR);
572
573 #ifdef CONFIG_X86_64
574 if (c->x86 == 15)
575 c->x86_cache_alignment = c->x86_clflush_size * 2;
576 #else
577 /*
578 * Names for the Pentium II/Celeron processors
579 * detectable only by also checking the cache size.
580 * Dixon is NOT a Celeron.
581 */
582 if (c->x86 == 6) {
583 unsigned int l2 = c->x86_cache_size;
584 char *p = NULL;
585
586 switch (c->x86_model) {
587 case 5:
588 if (l2 == 0)
589 p = "Celeron (Covington)";
590 else if (l2 == 256)
591 p = "Mobile Pentium II (Dixon)";
592 break;
593
594 case 6:
595 if (l2 == 128)
596 p = "Celeron (Mendocino)";
597 else if (c->x86_stepping == 0 || c->x86_stepping == 5)
598 p = "Celeron-A";
599 break;
600
601 case 8:
602 if (l2 == 128)
603 p = "Celeron (Coppermine)";
604 break;
605 }
606
607 if (p)
608 strcpy(c->x86_model_id, p);
609 }
610 #endif
611
612 if (x86_match_cpu(zmm_exclusion_list))
613 set_cpu_cap(c, X86_FEATURE_PREFER_YMM);
614
615 /* Work around errata */
616 srat_detect_node(c);
617
618 init_ia32_feat_ctl(c);
619
620 init_intel_misc_features(c);
621
622 split_lock_init();
623
624 intel_init_thermal(c);
625 }
626
627 #ifdef CONFIG_X86_32
intel_size_cache(struct cpuinfo_x86 * c,unsigned int size)628 static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
629 {
630 /*
631 * Intel PIII Tualatin. This comes in two flavours.
632 * One has 256kb of cache, the other 512. We have no way
633 * to determine which, so we use a boottime override
634 * for the 512kb model, and assume 256 otherwise.
635 */
636 if (c->x86_vfm == INTEL_PENTIUM_III_TUALATIN && size == 0)
637 size = 256;
638
639 /*
640 * Intel Quark SoC X1000 contains a 4-way set associative
641 * 16K cache with a 16 byte cache line and 256 lines per tag
642 */
643 if (c->x86_vfm == INTEL_QUARK_X1000)
644 size = 16;
645 return size;
646 }
647 #endif
648
intel_tlb_lookup(const struct leaf_0x2_table * desc)649 static void intel_tlb_lookup(const struct leaf_0x2_table *desc)
650 {
651 short entries = desc->entries;
652
653 switch (desc->t_type) {
654 case STLB_4K:
655 tlb_lli_4k = max(tlb_lli_4k, entries);
656 tlb_lld_4k = max(tlb_lld_4k, entries);
657 break;
658 case STLB_4K_2M:
659 tlb_lli_4k = max(tlb_lli_4k, entries);
660 tlb_lld_4k = max(tlb_lld_4k, entries);
661 tlb_lli_2m = max(tlb_lli_2m, entries);
662 tlb_lld_2m = max(tlb_lld_2m, entries);
663 tlb_lli_4m = max(tlb_lli_4m, entries);
664 tlb_lld_4m = max(tlb_lld_4m, entries);
665 break;
666 case TLB_INST_ALL:
667 tlb_lli_4k = max(tlb_lli_4k, entries);
668 tlb_lli_2m = max(tlb_lli_2m, entries);
669 tlb_lli_4m = max(tlb_lli_4m, entries);
670 break;
671 case TLB_INST_4K:
672 tlb_lli_4k = max(tlb_lli_4k, entries);
673 break;
674 case TLB_INST_4M:
675 tlb_lli_4m = max(tlb_lli_4m, entries);
676 break;
677 case TLB_INST_2M_4M:
678 tlb_lli_2m = max(tlb_lli_2m, entries);
679 tlb_lli_4m = max(tlb_lli_4m, entries);
680 break;
681 case TLB_DATA_4K:
682 case TLB_DATA0_4K:
683 tlb_lld_4k = max(tlb_lld_4k, entries);
684 break;
685 case TLB_DATA_4M:
686 case TLB_DATA0_4M:
687 tlb_lld_4m = max(tlb_lld_4m, entries);
688 break;
689 case TLB_DATA_2M_4M:
690 case TLB_DATA0_2M_4M:
691 tlb_lld_2m = max(tlb_lld_2m, entries);
692 tlb_lld_4m = max(tlb_lld_4m, entries);
693 break;
694 case TLB_DATA_4K_4M:
695 tlb_lld_4k = max(tlb_lld_4k, entries);
696 tlb_lld_4m = max(tlb_lld_4m, entries);
697 break;
698 case TLB_DATA_1G_2M_4M:
699 tlb_lld_2m = max(tlb_lld_2m, TLB_0x63_2M_4M_ENTRIES);
700 tlb_lld_4m = max(tlb_lld_4m, TLB_0x63_2M_4M_ENTRIES);
701 fallthrough;
702 case TLB_DATA_1G:
703 tlb_lld_1g = max(tlb_lld_1g, entries);
704 break;
705 }
706 }
707
intel_detect_tlb(struct cpuinfo_x86 * c)708 static void intel_detect_tlb(struct cpuinfo_x86 *c)
709 {
710 const struct leaf_0x2_table *desc;
711 union leaf_0x2_regs regs;
712 u8 *ptr;
713
714 if (c->cpuid_level < 2)
715 return;
716
717 cpuid_leaf_0x2(®s);
718 for_each_cpuid_0x2_desc(regs, ptr, desc)
719 intel_tlb_lookup(desc);
720 }
721
722 static const struct cpu_dev intel_cpu_dev = {
723 .c_vendor = "Intel",
724 .c_ident = { "GenuineIntel" },
725 #ifdef CONFIG_X86_32
726 .legacy_models = {
727 { .family = 4, .model_names =
728 {
729 [0] = "486 DX-25/33",
730 [1] = "486 DX-50",
731 [2] = "486 SX",
732 [3] = "486 DX/2",
733 [4] = "486 SL",
734 [5] = "486 SX/2",
735 [7] = "486 DX/2-WB",
736 [8] = "486 DX/4",
737 [9] = "486 DX/4-WB"
738 }
739 },
740 { .family = 5, .model_names =
741 {
742 [0] = "Pentium 60/66 A-step",
743 [1] = "Pentium 60/66",
744 [2] = "Pentium 75 - 200",
745 [3] = "OverDrive PODP5V83",
746 [4] = "Pentium MMX",
747 [7] = "Mobile Pentium 75 - 200",
748 [8] = "Mobile Pentium MMX",
749 [9] = "Quark SoC X1000",
750 }
751 },
752 { .family = 6, .model_names =
753 {
754 [0] = "Pentium Pro A-step",
755 [1] = "Pentium Pro",
756 [3] = "Pentium II (Klamath)",
757 [4] = "Pentium II (Deschutes)",
758 [5] = "Pentium II (Deschutes)",
759 [6] = "Mobile Pentium II",
760 [7] = "Pentium III (Katmai)",
761 [8] = "Pentium III (Coppermine)",
762 [10] = "Pentium III (Cascades)",
763 [11] = "Pentium III (Tualatin)",
764 }
765 },
766 { .family = 15, .model_names =
767 {
768 [0] = "Pentium 4 (Unknown)",
769 [1] = "Pentium 4 (Willamette)",
770 [2] = "Pentium 4 (Northwood)",
771 [4] = "Pentium 4 (Foster)",
772 [5] = "Pentium 4 (Foster)",
773 }
774 },
775 },
776 .legacy_cache_size = intel_size_cache,
777 #endif
778 .c_detect_tlb = intel_detect_tlb,
779 .c_early_init = early_init_intel,
780 .c_bsp_init = bsp_init_intel,
781 .c_init = init_intel,
782 .c_x86_vendor = X86_VENDOR_INTEL,
783 };
784
785 cpu_dev_register(intel_cpu_dev);
786