Lines Matching +full:ecx +full:- +full:2000
1 // SPDX-License-Identifier: GPL-2.0-only
63 #include <asm/intel-family.h>
73 #include <asm/runtime-const.h>
149 info = (struct ppin_info *)id->driver_data; in ppin_init()
151 if (rdmsrl_safe(info->msr_ppin_ctl, &val)) in ppin_init()
161 wrmsrl_safe(info->msr_ppin_ctl, val | 2UL); in ppin_init()
162 rdmsrl_safe(info->msr_ppin_ctl, &val); in ppin_init()
167 c->ppin = __rdmsr(info->msr_ppin); in ppin_init()
168 set_cpu_cap(c, info->feature); in ppin_init()
173 setup_clear_cpu_cap(info->feature); in ppin_init()
183 if (c->cpuid_level == -1) { in default_init()
185 if (c->x86 == 4) in default_init()
186 strcpy(c->x86_model_id, "486"); in default_init()
187 else if (c->x86 == 3) in default_init()
188 strcpy(c->x86_model_id, "386"); in default_init()
205 * IRET will check the segment types kkeil 2000/10/28
251 return -EINVAL; in x86_nopcid_setup()
268 return -EINVAL; in x86_noinvpcid_setup()
313 static int cachesize_override = -1;
346 c->cpuid_level = cpuid_eax(0); in squash_the_stupid_serial_number()
513 * Protection Keys are not available in 32-bit mode.
631 * software. Add those features to this table to auto-disable them.
650 for (df = cpuid_dependent_features; df->feature; df++) { in filter_cpuid_features()
652 if (!cpu_has(c, df->feature)) in filter_cpuid_features()
655 * Note: cpuid_level is set to -1 if unavailable, but in filter_cpuid_features()
661 if (!((s32)df->level < 0 ? in filter_cpuid_features()
662 (u32)df->level > (u32)c->extended_cpuid_level : in filter_cpuid_features()
663 (s32)df->level > (s32)c->cpuid_level)) in filter_cpuid_features()
666 clear_cpu_cap(c, df->feature); in filter_cpuid_features()
671 x86_cap_flags[df->feature], df->level); in filter_cpuid_features()
688 if (c->x86_model >= 16) in table_lookup_model()
694 info = this_cpu->legacy_models; in table_lookup_model()
696 while (info->family) { in table_lookup_model()
697 if (info->family == c->x86) in table_lookup_model()
698 return info->model_names[c->x86_model]; in table_lookup_model()
710 /* The 32-bit entry code needs to find cpu_entry_area. */
714 /* Load the original GDT from the per-cpu structure */
720 gdt_descr.size = GDT_SIZE - 1; in load_direct_gdt()
725 /* Load a fixmap remapping of the per-cpu GDT */
731 gdt_descr.size = GDT_SIZE - 1; in load_fixmap_gdt()
737 * switch_gdt_and_percpu_base - Switch to direct GDT and runtime per CPU base
741 * the direct GDT and the runtime per CPU area. On 32-bit the percpu base
784 if (c->extended_cpuid_level < 0x80000004) in get_model_name()
787 v = (unsigned int *)c->x86_model_id; in get_model_name()
791 c->x86_model_id[48] = 0; in get_model_name()
794 p = q = s = &c->x86_model_id[0]; in get_model_name()
800 /* Note the last non-whitespace index */ in get_model_name()
812 unsigned int n, dummy, ebx, ecx, edx, l2size; in cpu_detect_cache_sizes() local
814 n = c->extended_cpuid_level; in cpu_detect_cache_sizes()
817 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); in cpu_detect_cache_sizes()
818 c->x86_cache_size = (ecx>>24) + (edx>>24); in cpu_detect_cache_sizes()
821 c->x86_tlbsize = 0; in cpu_detect_cache_sizes()
828 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); in cpu_detect_cache_sizes()
829 l2size = ecx >> 16; in cpu_detect_cache_sizes()
832 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff); in cpu_detect_cache_sizes()
834 /* do processor-specific cache resizing */ in cpu_detect_cache_sizes()
835 if (this_cpu->legacy_cache_size) in cpu_detect_cache_sizes()
836 l2size = this_cpu->legacy_cache_size(c, l2size); in cpu_detect_cache_sizes()
839 if (cachesize_override != -1) in cpu_detect_cache_sizes()
846 c->x86_cache_size = l2size; in cpu_detect_cache_sizes()
859 if (this_cpu->c_detect_tlb) in cpu_detect_tlb()
860 this_cpu->c_detect_tlb(c); in cpu_detect_tlb()
871 char *v = c->x86_vendor_id; in get_cpu_vendor()
878 if (!strcmp(v, cpu_devs[i]->c_ident[0]) || in get_cpu_vendor()
879 (cpu_devs[i]->c_ident[1] && in get_cpu_vendor()
880 !strcmp(v, cpu_devs[i]->c_ident[1]))) { in get_cpu_vendor()
883 c->x86_vendor = this_cpu->c_x86_vendor; in get_cpu_vendor()
891 c->x86_vendor = X86_VENDOR_UNKNOWN; in get_cpu_vendor()
898 cpuid(0x00000000, (unsigned int *)&c->cpuid_level, in cpu_detect()
899 (unsigned int *)&c->x86_vendor_id[0], in cpu_detect()
900 (unsigned int *)&c->x86_vendor_id[8], in cpu_detect()
901 (unsigned int *)&c->x86_vendor_id[4]); in cpu_detect()
903 c->x86 = 4; in cpu_detect()
904 /* Intel-defined flags: level 0x00000001 */ in cpu_detect()
905 if (c->cpuid_level >= 0x00000001) { in cpu_detect()
909 c->x86 = x86_family(tfms); in cpu_detect()
910 c->x86_model = x86_model(tfms); in cpu_detect()
911 c->x86_stepping = x86_stepping(tfms); in cpu_detect()
914 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; in cpu_detect()
915 c->x86_cache_alignment = c->x86_clflush_size; in cpu_detect()
925 c->x86_capability[i] &= ~cpu_caps_cleared[i]; in apply_forced_caps()
926 c->x86_capability[i] |= cpu_caps_set[i]; in apply_forced_caps()
936 * Intel CPUs, for finer-grained selection of what's available. in init_speculation_control()
973 u32 eax, ebx, ecx, edx; in get_cpu_cap() local
975 /* Intel-defined flags: level 0x00000001 */ in get_cpu_cap()
976 if (c->cpuid_level >= 0x00000001) { in get_cpu_cap()
977 cpuid(0x00000001, &eax, &ebx, &ecx, &edx); in get_cpu_cap()
979 c->x86_capability[CPUID_1_ECX] = ecx; in get_cpu_cap()
980 c->x86_capability[CPUID_1_EDX] = edx; in get_cpu_cap()
984 if (c->cpuid_level >= 0x00000006) in get_cpu_cap()
985 c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006); in get_cpu_cap()
987 /* Additional Intel-defined flags: level 0x00000007 */ in get_cpu_cap()
988 if (c->cpuid_level >= 0x00000007) { in get_cpu_cap()
989 cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); in get_cpu_cap()
990 c->x86_capability[CPUID_7_0_EBX] = ebx; in get_cpu_cap()
991 c->x86_capability[CPUID_7_ECX] = ecx; in get_cpu_cap()
992 c->x86_capability[CPUID_7_EDX] = edx; in get_cpu_cap()
994 /* Check valid sub-leaf index before accessing it */ in get_cpu_cap()
996 cpuid_count(0x00000007, 1, &eax, &ebx, &ecx, &edx); in get_cpu_cap()
997 c->x86_capability[CPUID_7_1_EAX] = eax; in get_cpu_cap()
1002 if (c->cpuid_level >= 0x0000000d) { in get_cpu_cap()
1003 cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx); in get_cpu_cap()
1005 c->x86_capability[CPUID_D_1_EAX] = eax; in get_cpu_cap()
1008 /* AMD-defined flags: level 0x80000001 */ in get_cpu_cap()
1010 c->extended_cpuid_level = eax; in get_cpu_cap()
1014 cpuid(0x80000001, &eax, &ebx, &ecx, &edx); in get_cpu_cap()
1016 c->x86_capability[CPUID_8000_0001_ECX] = ecx; in get_cpu_cap()
1017 c->x86_capability[CPUID_8000_0001_EDX] = edx; in get_cpu_cap()
1021 if (c->extended_cpuid_level >= 0x80000007) { in get_cpu_cap()
1022 cpuid(0x80000007, &eax, &ebx, &ecx, &edx); in get_cpu_cap()
1024 c->x86_capability[CPUID_8000_0007_EBX] = ebx; in get_cpu_cap()
1025 c->x86_power = edx; in get_cpu_cap()
1028 if (c->extended_cpuid_level >= 0x80000008) { in get_cpu_cap()
1029 cpuid(0x80000008, &eax, &ebx, &ecx, &edx); in get_cpu_cap()
1030 c->x86_capability[CPUID_8000_0008_EBX] = ebx; in get_cpu_cap()
1033 if (c->extended_cpuid_level >= 0x8000000a) in get_cpu_cap()
1034 c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a); in get_cpu_cap()
1036 if (c->extended_cpuid_level >= 0x8000001f) in get_cpu_cap()
1037 c->x86_capability[CPUID_8000_001F_EAX] = cpuid_eax(0x8000001f); in get_cpu_cap()
1039 if (c->extended_cpuid_level >= 0x80000021) in get_cpu_cap()
1040 c->x86_capability[CPUID_8000_0021_EAX] = cpuid_eax(0x80000021); in get_cpu_cap()
1047 * This needs to happen each time we re-probe, which may happen in get_cpu_cap()
1055 u32 eax, ebx, ecx, edx; in get_cpu_address_sizes() local
1058 (c->extended_cpuid_level < 0x80000008)) { in get_cpu_address_sizes()
1060 c->x86_clflush_size = 64; in get_cpu_address_sizes()
1061 c->x86_phys_bits = 36; in get_cpu_address_sizes()
1062 c->x86_virt_bits = 48; in get_cpu_address_sizes()
1064 c->x86_clflush_size = 32; in get_cpu_address_sizes()
1065 c->x86_virt_bits = 32; in get_cpu_address_sizes()
1066 c->x86_phys_bits = 32; in get_cpu_address_sizes()
1070 c->x86_phys_bits = 36; in get_cpu_address_sizes()
1073 cpuid(0x80000008, &eax, &ebx, &ecx, &edx); in get_cpu_address_sizes()
1075 c->x86_virt_bits = (eax >> 8) & 0xff; in get_cpu_address_sizes()
1076 c->x86_phys_bits = eax & 0xff; in get_cpu_address_sizes()
1079 if (!c->x86_clflush_size) in get_cpu_address_sizes()
1080 c->x86_clflush_size = 32; in get_cpu_address_sizes()
1083 c->x86_cache_bits = c->x86_phys_bits; in get_cpu_address_sizes()
1084 c->x86_cache_alignment = c->x86_clflush_size; in get_cpu_address_sizes()
1096 c->x86 = 4; in identify_cpu_without_cpuid()
1098 c->x86 = 3; in identify_cpu_without_cpuid()
1101 if (cpu_devs[i] && cpu_devs[i]->c_identify) { in identify_cpu_without_cpuid()
1102 c->x86_vendor_id[0] = 0; in identify_cpu_without_cpuid()
1103 cpu_devs[i]->c_identify(c); in identify_cpu_without_cpuid()
1104 if (c->x86_vendor_id[0]) { in identify_cpu_without_cpuid()
1175 * updated non-speculatively, and the issuing of %gs-relative memory
1184 /* AMD Family 0xf - 0x12 */
1190 /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
1222 /* CPU is affected by SMT (cross-thread) return predictions */
1232 /* CPU is affected by Indirect Target Selection, but guest-host isolation is not affected */
1290 return m && !!(m->driver_data & which); in cpu_matches()
1379 * AMD's AutoIBRS is equivalent to Intel's eIBRS - use the Intel feature in cpu_set_bug_bits()
1380 * flag and protect from vendor-specific bugs via the whitelist. in cpu_set_bug_bits()
1406 * - TSX is supported or in cpu_set_bug_bits()
1407 * - TSX_CTRL is present in cpu_set_bug_bits()
1477 * BHI_NO still need to use the BHI mitigation to prevent Intra-mode in cpu_set_bug_bits()
1514 * probing for it doesn't even work. Disable it completely on 32-bit
1516 * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
1547 pr_warn("setcpuid: force-enabling CPU feature flag:"); in parse_set_clear_cpuid()
1550 pr_warn("clearcpuid: force-disabling CPU feature flag:"); in parse_set_clear_cpuid()
1553 /* empty-string, i.e., ""-defined feature flags */ in parse_set_clear_cpuid()
1577 flag = x86_bug_flags[bit - (32 * NCAPINTS)]; in parse_set_clear_cpuid()
1587 pr_warn("setcpuid: force-enabling CPU %s flag: %s\n", in parse_set_clear_cpuid()
1591 pr_warn("clearcpuid: force-disabling CPU %s flag: %s\n", in parse_set_clear_cpuid()
1672 memset(&c->x86_capability, 0, sizeof(c->x86_capability)); in early_identify_cpu()
1673 c->extended_cpuid_level = 0; in early_identify_cpu()
1690 if (this_cpu->c_early_init) in early_identify_cpu()
1691 this_cpu->c_early_init(c); in early_identify_cpu()
1693 c->cpu_index = 0; in early_identify_cpu()
1697 if (this_cpu->c_bsp_init) in early_identify_cpu()
1698 this_cpu->c_bsp_init(c); in early_identify_cpu()
1714 * that it can't be enabled in 32-bit mode. in early_identify_cpu()
1721 * cpu_feature_enabled(X86_FEATURE_LA57). If 5-level paging is not in early_identify_cpu()
1723 * false-positives at the later stage. in early_identify_cpu()
1726 * - 5-level paging is disabled compile-time; in early_identify_cpu()
1727 * - it's 32-bit kernel; in early_identify_cpu()
1728 * - machine doesn't support 5-level paging; in early_identify_cpu()
1729 * - user specified 'no5lvl' in kernel command line. in early_identify_cpu()
1765 if (!cpu_devs[i]->c_ident[j]) in early_cpu_init()
1767 pr_info(" %s %s\n", cpu_devs[i]->c_vendor, in early_cpu_init()
1768 cpu_devs[i]->c_ident[j]); in early_cpu_init()
1786 * detect it directly instead of hard-coding the choice by in detect_null_seg_behavior()
1827 if ((c->x86 == 0x17 || c->x86 == 0x18) && in check_null_seg_clears_base()
1837 c->extended_cpuid_level = 0; in generic_identify()
1863 * NB: For the time being, only 32-bit kernels support in generic_identify()
1864 * X86_BUG_ESPFIX as such. 64-bit kernels directly choose in generic_identify()
1866 * non-paravirt system ever shows up that does *not* have the in generic_identify()
1881 c->loops_per_jiffy = loops_per_jiffy; in identify_cpu()
1882 c->x86_cache_size = 0; in identify_cpu()
1883 c->x86_vendor = X86_VENDOR_UNKNOWN; in identify_cpu()
1884 c->x86_model = c->x86_stepping = 0; /* So far unknown... */ in identify_cpu()
1885 c->x86_vendor_id[0] = '\0'; /* Unset */ in identify_cpu()
1886 c->x86_model_id[0] = '\0'; /* Unset */ in identify_cpu()
1888 c->x86_clflush_size = 64; in identify_cpu()
1889 c->x86_phys_bits = 36; in identify_cpu()
1890 c->x86_virt_bits = 48; in identify_cpu()
1892 c->cpuid_level = -1; /* CPUID not detected */ in identify_cpu()
1893 c->x86_clflush_size = 32; in identify_cpu()
1894 c->x86_phys_bits = 32; in identify_cpu()
1895 c->x86_virt_bits = 32; in identify_cpu()
1897 c->x86_cache_alignment = c->x86_clflush_size; in identify_cpu()
1898 memset(&c->x86_capability, 0, sizeof(c->x86_capability)); in identify_cpu()
1900 memset(&c->vmx_capability, 0, sizeof(c->vmx_capability)); in identify_cpu()
1907 if (this_cpu->c_identify) in identify_cpu()
1908 this_cpu->c_identify(c); in identify_cpu()
1915 * Hygon will clear it in ->c_init() below. in identify_cpu()
1920 * Vendor-specific initialization. In this section we in identify_cpu()
1926 * At the end of this section, c->x86_capability better in identify_cpu()
1929 if (this_cpu->c_init) in identify_cpu()
1930 this_cpu->c_init(c); in identify_cpu()
1949 * The vendor-specific functions might have changed features. in identify_cpu()
1960 if (!c->x86_model_id[0]) { in identify_cpu()
1964 strcpy(c->x86_model_id, p); in identify_cpu()
1967 sprintf(c->x86_model_id, "%02x/%02x", in identify_cpu()
1968 c->x86, c->x86_model); in identify_cpu()
1990 boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; in identify_cpu()
1994 c->x86_capability[i] |= boot_cpu_data.x86_capability[i]; in identify_cpu()
2007 * on 32-bit kernels:
2022 * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field -- in enable_sep_cpu()
2026 tss->x86_tss.ss1 = __KERNEL_CS; in enable_sep_cpu()
2027 wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0); in enable_sep_cpu()
2056 if (!c->initialized) in identify_secondary_cpu()
2058 c->cpu_index = cpu; in identify_secondary_cpu()
2070 c->initialized = true; in identify_secondary_cpu()
2077 if (c->x86_vendor < X86_VENDOR_NUM) { in print_cpu_info()
2078 vendor = this_cpu->c_vendor; in print_cpu_info()
2080 if (c->cpuid_level >= 0) in print_cpu_info()
2081 vendor = c->x86_vendor_id; in print_cpu_info()
2084 if (vendor && !strstr(c->x86_model_id, vendor)) in print_cpu_info()
2087 if (c->x86_model_id[0]) in print_cpu_info()
2088 pr_cont("%s", c->x86_model_id); in print_cpu_info()
2090 pr_cont("%d86", c->x86); in print_cpu_info()
2092 pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model); in print_cpu_info()
2094 if (c->x86_stepping || c->cpuid_level >= 0) in print_cpu_info()
2095 pr_cont(", stepping: 0x%x)\n", c->x86_stepping); in print_cpu_info()
2139 * Intel CPUs do not support 32-bit SYSCALL. Writing to MSR_CSTAR in wrmsrl_cstar()
2155 * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP. in idt_syscall_init()
2157 * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit). in idt_syscall_init()
2172 * to minimize user space-kernel interference. in idt_syscall_init()
2253 d.d = 1; /* 32-bit */ in setup_getcpu()
2261 /* Set up the per-CPU TSS IST stacks */ in tss_setup_ist()
2262 tss->x86_tss.ist[IST_INDEX_DF] = __this_cpu_ist_top_va(DF); in tss_setup_ist()
2263 tss->x86_tss.ist[IST_INDEX_NMI] = __this_cpu_ist_top_va(NMI); in tss_setup_ist()
2264 tss->x86_tss.ist[IST_INDEX_DB] = __this_cpu_ist_top_va(DB); in tss_setup_ist()
2265 tss->x86_tss.ist[IST_INDEX_MCE] = __this_cpu_ist_top_va(MCE); in tss_setup_ist()
2266 /* Only mapped when SEV-ES is active */ in tss_setup_ist()
2267 tss->x86_tss.ist[IST_INDEX_VC] = __this_cpu_ist_top_va(VC); in tss_setup_ist()
2275 tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET_INVALID; in tss_setup_io_bitmap()
2278 tss->io_bitmap.prev_max = 0; in tss_setup_io_bitmap()
2279 tss->io_bitmap.prev_sequence = 0; in tss_setup_io_bitmap()
2280 memset(tss->io_bitmap.bitmap, 0xff, sizeof(tss->io_bitmap.bitmap)); in tss_setup_io_bitmap()
2285 tss->io_bitmap.mapall[IO_BITMAP_LONGS] = ~0UL; in tss_setup_io_bitmap()
2305 set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); in cpu_init_exception_handling()
2332 * cpu_init() initializes state that is per-CPU. Some data is already
2355 memset(cur->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); in cpu_init()
2368 cur->active_mm = &init_mm; in cpu_init()
2369 BUG_ON(cur->mm); in cpu_init()
2394 * store_cpu_caps() - Store a snapshot of CPU capabilities
2402 curr_info->cpuid_level = cpuid_eax(0); in store_cpu_caps()
2405 memcpy(&curr_info->x86_capability, &boot_cpu_data.x86_capability, in store_cpu_caps()
2406 sizeof(curr_info->x86_capability)); in store_cpu_caps()
2413 * microcode_check() - Check if any CPU capabilities changed after an update.
2431 if (!memcmp(&prev_info->x86_capability, &curr_info.x86_capability, in microcode_check()
2432 sizeof(prev_info->x86_capability))) in microcode_check()
2436 …pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS… in microcode_check()
2482 init_utsname()->machine[1] = in arch_cpu_finalize_init()
2498 c->initialized = true; in arch_cpu_finalize_init()
2508 USER_PTR_MAX = (1ul << 63) - PAGE_SIZE; in arch_cpu_finalize_init()
2530 * not cause "plain-text" data to be decrypted when accessed. It in arch_cpu_finalize_init()
2531 * must be called after late_time_init() so that Hyper-V x86/x64 in arch_cpu_finalize_init()