Lines Matching +full:0 +full:x8000000a

211 	[GDT_ENTRY_KERNEL32_CS]		= GDT_ENTRY_INIT(DESC_CODE32, 0, 0xfffff),
212 [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(DESC_CODE64, 0, 0xfffff),
213 [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(DESC_DATA64, 0, 0xfffff),
214 [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(DESC_CODE32 | DESC_USER, 0, 0xfffff),
215 [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(DESC_DATA64 | DESC_USER, 0, 0xfffff),
216 [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(DESC_CODE64 | DESC_USER, 0, 0xfffff),
218 [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(DESC_CODE32, 0, 0xfffff),
219 [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(DESC_DATA32, 0, 0xfffff),
220 [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(DESC_CODE32 | DESC_USER, 0, 0xfffff),
221 [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(DESC_DATA32 | DESC_USER, 0, 0xfffff),
227 [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(DESC_CODE32_BIOS, 0, 0xffff),
228 [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(DESC_CODE16, 0, 0xffff),
229 [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(DESC_DATA16, 0, 0xffff),
230 [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(DESC_DATA16, 0, 0),
231 [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(DESC_DATA16, 0, 0),
236 [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(DESC_CODE32_BIOS, 0, 0xffff),
237 [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(DESC_CODE16, 0, 0xffff),
238 [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(DESC_DATA32_BIOS, 0, 0xffff),
240 [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(DESC_DATA32, 0, 0xfffff),
241 [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(DESC_DATA32, 0, 0xfffff),
255 return 0; in x86_nopcid_setup()
259 return 0; in x86_nopcid_setup()
272 return 0; in x86_noinvpcid_setup()
276 return 0; in x86_noinvpcid_setup()
297 "popl %0 \n\t" in flag_is_changeable_p()
298 "movl %0, %1 \n\t" in flag_is_changeable_p()
299 "xorl %2, %0 \n\t" in flag_is_changeable_p()
300 "pushl %0 \n\t" in flag_is_changeable_p()
303 "popl %0 \n\t" in flag_is_changeable_p()
339 lo |= 0x200000; in squash_the_stupid_serial_number()
346 c->cpuid_level = cpuid_eax(0); in squash_the_stupid_serial_number()
351 disable_x86_serial_nr = 0; in x86_serial_nr_setup()
410 unsigned long bits_missing = 0; in native_write_cr0()
413 asm volatile("mov %0,%%cr0": "+r" (val) : : "memory"); in native_write_cr0()
429 unsigned long bits_changed = 0; in native_write_cr4()
432 asm volatile("mov %0,%%cr4": "+r" (val) : : "memory"); in native_write_cr4()
441 WARN_ONCE(bits_changed, "pinned CR4 bits changed: 0x%lx!?\n", in native_write_cr4()
500 return 0; in x86_nofsgsbase_setup()
562 u64 msr = 0; in ibt_save()
607 wrmsrl(MSR_IA32_S_CET, 0); in setup_cet()
613 wrmsrl(MSR_IA32_S_CET, 0); in setup_cet()
624 wrmsrl(MSR_IA32_S_CET, 0); in cet_disable()
625 wrmsrl(MSR_IA32_U_CET, 0); in cet_disable()
643 { 0, 0 }
656 * extended_extended_level is set to 0 if unavailable in filter_cpuid_features()
661 if (!((s32)df->level < 0 ? in filter_cpuid_features()
670 pr_warn("CPU: CPU feature %s disabled, no CPUID level 0x%x\n", in filter_cpuid_features()
678 * in particular, if CPUID levels 0x80000002..4 are supported, this
762 * per CPU stack canary is 0 in both per CPU areas. in switch_gdt_and_percpu_base()
784 if (c->extended_cpuid_level < 0x80000004) in get_model_name()
788 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); in get_model_name()
789 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); in get_model_name()
790 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); in get_model_name()
791 c->x86_model_id[48] = 0; in get_model_name()
794 p = q = s = &c->x86_model_id[0]; in get_model_name()
807 *(s + 1) = '\0'; in get_model_name()
816 if (n >= 0x80000005) { in cpu_detect_cache_sizes()
817 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); in cpu_detect_cache_sizes()
821 c->x86_tlbsize = 0; in cpu_detect_cache_sizes()
825 if (n < 0x80000006) /* Some chips just has a large L1. */ in cpu_detect_cache_sizes()
828 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); in cpu_detect_cache_sizes()
832 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff); in cpu_detect_cache_sizes()
842 if (l2size == 0) in cpu_detect_cache_sizes()
874 for (i = 0; i < X86_VENDOR_NUM; i++) { in get_cpu_vendor()
878 if (!strcmp(v, cpu_devs[i]->c_ident[0]) || in get_cpu_vendor()
898 cpuid(0x00000000, (unsigned int *)&c->cpuid_level, in cpu_detect()
899 (unsigned int *)&c->x86_vendor_id[0], in cpu_detect()
904 /* Intel-defined flags: level 0x00000001 */ in cpu_detect()
905 if (c->cpuid_level >= 0x00000001) { in cpu_detect()
908 cpuid(0x00000001, &tfms, &misc, &junk, &cap0); in cpu_detect()
914 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; in cpu_detect()
924 for (i = 0; i < NCAPINTS + NBUGINTS; i++) { in apply_forced_caps()
975 /* Intel-defined flags: level 0x00000001 */ in get_cpu_cap()
976 if (c->cpuid_level >= 0x00000001) { in get_cpu_cap()
977 cpuid(0x00000001, &eax, &ebx, &ecx, &edx); in get_cpu_cap()
983 /* Thermal and Power Management Leaf: level 0x00000006 (eax) */ in get_cpu_cap()
984 if (c->cpuid_level >= 0x00000006) in get_cpu_cap()
985 c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006); in get_cpu_cap()
987 /* Additional Intel-defined flags: level 0x00000007 */ in get_cpu_cap()
988 if (c->cpuid_level >= 0x00000007) { in get_cpu_cap()
989 cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); in get_cpu_cap()
996 cpuid_count(0x00000007, 1, &eax, &ebx, &ecx, &edx); in get_cpu_cap()
1001 /* Extended state features: level 0x0000000d */ in get_cpu_cap()
1002 if (c->cpuid_level >= 0x0000000d) { in get_cpu_cap()
1003 cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx); in get_cpu_cap()
1008 /* AMD-defined flags: level 0x80000001 */ in get_cpu_cap()
1009 eax = cpuid_eax(0x80000000); in get_cpu_cap()
1012 if ((eax & 0xffff0000) == 0x80000000) { in get_cpu_cap()
1013 if (eax >= 0x80000001) { in get_cpu_cap()
1014 cpuid(0x80000001, &eax, &ebx, &ecx, &edx); in get_cpu_cap()
1021 if (c->extended_cpuid_level >= 0x80000007) { in get_cpu_cap()
1022 cpuid(0x80000007, &eax, &ebx, &ecx, &edx); in get_cpu_cap()
1028 if (c->extended_cpuid_level >= 0x80000008) { in get_cpu_cap()
1029 cpuid(0x80000008, &eax, &ebx, &ecx, &edx); in get_cpu_cap()
1033 if (c->extended_cpuid_level >= 0x8000000a) in get_cpu_cap()
1034 c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a); in get_cpu_cap()
1036 if (c->extended_cpuid_level >= 0x8000001f) in get_cpu_cap()
1037 c->x86_capability[CPUID_8000_001F_EAX] = cpuid_eax(0x8000001f); in get_cpu_cap()
1039 if (c->extended_cpuid_level >= 0x80000021) in get_cpu_cap()
1040 c->x86_capability[CPUID_8000_0021_EAX] = cpuid_eax(0x80000021); in get_cpu_cap()
1058 (c->extended_cpuid_level < 0x80000008)) { in get_cpu_address_sizes()
1073 cpuid(0x80000008, &eax, &ebx, &ecx, &edx); in get_cpu_address_sizes()
1075 c->x86_virt_bits = (eax >> 8) & 0xff; in get_cpu_address_sizes()
1076 c->x86_phys_bits = eax & 0xff; in get_cpu_address_sizes()
1100 for (i = 0; i < X86_VENDOR_NUM; i++) in identify_cpu_without_cpuid()
1102 c->x86_vendor_id[0] = 0; in identify_cpu_without_cpuid()
1104 if (c->x86_vendor_id[0]) { in identify_cpu_without_cpuid()
1111 #define NO_SPECULATION BIT(0)
1184 /* AMD Family 0xf - 0x12 */
1185 …VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO …
1186 …VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO …
1187 …VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO …
1188 …VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO …
1190 /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
1215 #define SRBDS BIT(0)
1245 VULNBL_INTEL_STEPS(INTEL_SKYLAKE_X, 0x5, MMIO | RETBLEED | GDS),
1249 VULNBL_INTEL_STEPS(INTEL_KABYLAKE_L, 0xb, MMIO | RETBLEED | GDS | SRBDS),
1251 VULNBL_INTEL_STEPS(INTEL_KABYLAKE, 0xc, MMIO | RETBLEED | GDS | SRBDS),
1258 VULNBL_INTEL_STEPS(INTEL_COMETLAKE_L, 0x0, MMIO | RETBLEED | ITS),
1277 VULNBL_AMD(0x15, RETBLEED),
1278 VULNBL_AMD(0x16, RETBLEED),
1279 VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO),
1280 VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO),
1281 VULNBL_AMD(0x19, SRSO),
1282 VULNBL_AMD(0x1a, SRSO),
1295 u64 x86_arch_cap_msr = 0; in x86_read_arch_cap_msr()
1405 * When the CPU is not mitigated for TAA (TAA_NO=0) set TAA bug when: in cpu_set_bug_bits()
1530 int taint = 0; in parse_set_clear_cpuid()
1568 for (bit = 0; bit < 32 * (NCAPINTS + NBUGINTS); bit++) { in parse_set_clear_cpuid()
1648 if (arglen > 0) in cpu_parse_early_param()
1652 if (arglen > 0) in cpu_parse_early_param()
1672 memset(&c->x86_capability, 0, sizeof(c->x86_capability)); in early_identify_cpu()
1673 c->extended_cpuid_level = 0; in early_identify_cpu()
1693 c->cpu_index = 0; in early_identify_cpu()
1740 int count = 0; in init_cpu_devs()
1763 for (i = 0; i < X86_VENDOR_NUM && cpu_devs[i]; i++) { in early_cpu_init()
1764 for (j = 0; j < 2; j++) { in early_cpu_init()
1796 loadsegment(fs, 0); in detect_null_seg_behavior()
1799 return tmp == 0; in detect_null_seg_behavior()
1825 * 0x18 is the respective family for Hygon. in check_null_seg_clears_base()
1827 if ((c->x86 == 0x17 || c->x86 == 0x18) && in check_null_seg_clears_base()
1837 c->extended_cpuid_level = 0; in generic_identify()
1858 * systems that run Linux at CPL > 0 may or may not have the in generic_identify()
1882 c->x86_cache_size = 0; in identify_cpu()
1884 c->x86_model = c->x86_stepping = 0; /* So far unknown... */ in identify_cpu()
1885 c->x86_vendor_id[0] = '\0'; /* Unset */ in identify_cpu()
1886 c->x86_model_id[0] = '\0'; /* Unset */ in identify_cpu()
1898 memset(&c->x86_capability, 0, sizeof(c->x86_capability)); in identify_cpu()
1900 memset(&c->vmx_capability, 0, sizeof(c->vmx_capability)); in identify_cpu()
1960 if (!c->x86_model_id[0]) { in identify_cpu()
1989 for (i = 0; i < NCAPINTS; i++) in identify_cpu()
2027 wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0); in enable_sep_cpu()
2028 wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0); in enable_sep_cpu()
2029 wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0); in enable_sep_cpu()
2080 if (c->cpuid_level >= 0) in print_cpu_info()
2087 if (c->x86_model_id[0]) in print_cpu_info()
2092 pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model); in print_cpu_info()
2094 if (c->x86_stepping || c->cpuid_level >= 0) in print_cpu_info()
2095 pr_cont(", stepping: 0x%x)\n", c->x86_stepping); in print_cpu_info()
2166 wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); in idt_syscall_init()
2167 wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL); in idt_syscall_init()
2186 wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS); in syscall_init()
2214 for (i = 0; i < 8; i++) { in clear_all_debug_regs()
2219 set_debugreg(0, i); in clear_all_debug_regs()
2243 wrmsr(MSR_TSC_AUX, cpudata, 0); in setup_getcpu()
2278 tss->io_bitmap.prev_max = 0; in tss_setup_io_bitmap()
2279 tss->io_bitmap.prev_sequence = 0; in tss_setup_io_bitmap()
2280 memset(tss->io_bitmap.bitmap, 0xff, sizeof(tss->io_bitmap.bitmap)); in tss_setup_io_bitmap()
2285 tss->io_bitmap.mapall[IO_BITMAP_LONGS] = ~0UL; in tss_setup_io_bitmap()
2343 if (this_cpu_read(numa_node) == 0 && in cpu_init()
2354 loadsegment(fs, 0); in cpu_init()
2355 memset(cur->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); in cpu_init()
2358 wrmsrl(MSR_FS_BASE, 0); in cpu_init()
2359 wrmsrl(MSR_KERNEL_GS_BASE, 0); in cpu_init()
2402 curr_info->cpuid_level = cpuid_eax(0); in store_cpu_caps()
2483 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); in arch_cpu_finalize_init()
2521 set_memory_4k((unsigned long)__va(0), 1); in arch_cpu_finalize_init()