Lines Matching +full:0 +full:x8000000a
36 int feature_bit = 0; in xstate_required_size()
41 if (xstate_bv & 0x1) { in xstate_required_size()
43 cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx); in xstate_required_size()
63 for (i = 0; i < nent; i++) { in cpuid_entry2_find()
82 best = cpuid_entry2_find(entries, nent, 0x80000008, 0); in kvm_check_cpuid()
84 int vaddr_bits = (best->eax & 0xff00) >> 8; in kvm_check_cpuid()
86 if (vaddr_bits != 48 && vaddr_bits != 57 && vaddr_bits != 0) in kvm_check_cpuid()
90 return 0; in kvm_check_cpuid()
97 best = kvm_find_cpuid_entry(vcpu, KVM_CPUID_FEATURES, 0); in kvm_update_pv_runtime()
111 best = kvm_find_cpuid_entry(vcpu, 1, 0); in kvm_update_cpuid_runtime()
122 best = kvm_find_cpuid_entry(vcpu, 7, 0); in kvm_update_cpuid_runtime()
123 if (best && boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7) in kvm_update_cpuid_runtime()
127 best = kvm_find_cpuid_entry(vcpu, 0xD, 0); in kvm_update_cpuid_runtime()
131 best = kvm_find_cpuid_entry(vcpu, 0xD, 1); in kvm_update_cpuid_runtime()
136 best = kvm_find_cpuid_entry(vcpu, KVM_CPUID_FEATURES, 0); in kvm_update_cpuid_runtime()
142 best = kvm_find_cpuid_entry(vcpu, 0x1, 0); in kvm_update_cpuid_runtime()
155 best = kvm_find_cpuid_entry(vcpu, 1, 0); in kvm_vcpu_after_set_cpuid()
165 best = kvm_find_cpuid_entry(vcpu, 0xD, 0); in kvm_vcpu_after_set_cpuid()
167 vcpu->arch.guest_supported_xcr0 = 0; in kvm_vcpu_after_set_cpuid()
198 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) { in cpuid_fix_nx_cap()
200 if (e->function == 0x80000001) { in cpuid_fix_nx_cap()
215 best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0); in cpuid_query_maxphyaddr()
216 if (!best || best->eax < 0x80000008) in cpuid_query_maxphyaddr()
218 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0); in cpuid_query_maxphyaddr()
220 return best->eax & 0xff; in cpuid_query_maxphyaddr()
248 for (i = 0; i < cpuid->nent; i++) { in kvm_vcpu_ioctl_set_cpuid()
254 e2[i].index = 0; in kvm_vcpu_ioctl_set_cpuid()
255 e2[i].flags = 0; in kvm_vcpu_ioctl_set_cpuid()
256 e2[i].padding[0] = 0; in kvm_vcpu_ioctl_set_cpuid()
257 e2[i].padding[1] = 0; in kvm_vcpu_ioctl_set_cpuid()
258 e2[i].padding[2] = 0; in kvm_vcpu_ioctl_set_cpuid()
310 return 0; in kvm_vcpu_ioctl_set_cpuid2()
326 return 0; in kvm_vcpu_ioctl_get_cpuid2()
349 unsigned int f_nx = is_efer_nx() ? F(NX) : 0; in kvm_set_cpu_caps()
354 unsigned int f_gbpages = 0; in kvm_set_cpu_caps()
355 unsigned int f_lm = 0; in kvm_set_cpu_caps()
369 F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ | in kvm_set_cpu_caps()
370 0 /* DS-CPL, VMX, SMX, EST */ | in kvm_set_cpu_caps()
371 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ | in kvm_set_cpu_caps()
372 F(FMA) | F(CX16) | 0 /* xTPR Update */ | F(PDCM) | in kvm_set_cpu_caps()
373 F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) | in kvm_set_cpu_caps()
375 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) | in kvm_set_cpu_caps()
384 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) | in kvm_set_cpu_caps()
386 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLUSH) | in kvm_set_cpu_caps()
387 0 /* Reserved, DS, ACPI */ | F(MMX) | in kvm_set_cpu_caps()
389 0 /* HTT, TM, Reserved, PBE */ in kvm_set_cpu_caps()
394 F(BMI2) | F(ERMS) | 0 /*INVPCID*/ | F(RTM) | 0 /*MPX*/ | F(RDSEED) | in kvm_set_cpu_caps()
397 F(SHA_NI) | F(AVX512BW) | F(AVX512VL) | 0 /*INTEL_PT*/ in kvm_set_cpu_caps()
401 F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) | in kvm_set_cpu_caps()
404 F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/ in kvm_set_cpu_caps()
444 F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ | in kvm_set_cpu_caps()
446 F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) | in kvm_set_cpu_caps()
447 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM) | in kvm_set_cpu_caps()
454 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) | in kvm_set_cpu_caps()
456 F(PAT) | F(PSE36) | 0 /* Reserved */ | in kvm_set_cpu_caps()
457 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) | in kvm_set_cpu_caps()
459 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW) in kvm_set_cpu_caps()
498 kvm_cpu_cap_mask(CPUID_8000_000A_EDX, 0); in kvm_set_cpu_caps()
526 entry->flags = 0; in do_host_cpuid()
534 case 0xb: in do_host_cpuid()
535 case 0xd: in do_host_cpuid()
536 case 0xf: in do_host_cpuid()
537 case 0x10: in do_host_cpuid()
538 case 0x12: in do_host_cpuid()
539 case 0x14: in do_host_cpuid()
540 case 0x17: in do_host_cpuid()
541 case 0x18: in do_host_cpuid()
542 case 0x1f: in do_host_cpuid()
543 case 0x8000001d: in do_host_cpuid()
560 entry->index = 0; in __do_cpuid_func_emulated()
561 entry->flags = 0; in __do_cpuid_func_emulated()
564 case 0: in __do_cpuid_func_emulated()
574 entry->eax = 0; in __do_cpuid_func_emulated()
581 return 0; in __do_cpuid_func_emulated()
594 entry = do_host_cpuid(array, function, 0); in __do_cpuid_func()
599 case 0: in __do_cpuid_func()
601 entry->eax = min(entry->eax, 0x1fU); in __do_cpuid_func()
610 * CPUID(function=2, index=0) may return different results each in __do_cpuid_func()
612 * number of times software should do CPUID(2, 0). in __do_cpuid_func()
615 * idiotic. Intel's SDM states that EAX & 0xff "will always in __do_cpuid_func()
621 * a stateful CPUID.0x2 is encountered. in __do_cpuid_func()
623 WARN_ON_ONCE((entry->eax & 0xff) > 1); in __do_cpuid_func()
625 /* functions 4 and 0x8000001d have additional index. */ in __do_cpuid_func()
627 case 0x8000001d: in __do_cpuid_func()
632 for (i = 1; entry->eax & 0x1f; ++i) { in __do_cpuid_func()
639 entry->eax = 0x4; /* allow ARAT */ in __do_cpuid_func()
640 entry->ebx = 0; in __do_cpuid_func()
641 entry->ecx = 0; in __do_cpuid_func()
642 entry->edx = 0; in __do_cpuid_func()
651 /* KVM only supports 0x7.0 and 0x7.1, capped above via min(). */ in __do_cpuid_func()
658 entry->ebx = 0; in __do_cpuid_func()
659 entry->ecx = 0; in __do_cpuid_func()
660 entry->edx = 0; in __do_cpuid_func()
665 case 0xa: { /* Architectural Performance Monitoring */ in __do_cpuid_func()
677 memset(&cap, 0, sizeof(cap)); in __do_cpuid_func()
687 edx.split.reserved1 = 0; in __do_cpuid_func()
688 edx.split.reserved2 = 0; in __do_cpuid_func()
692 entry->ecx = 0; in __do_cpuid_func()
697 * Per Intel's SDM, the 0x1f is a superset of 0xb, in __do_cpuid_func()
700 case 0x1f: in __do_cpuid_func()
701 case 0xb: in __do_cpuid_func()
704 * previous entry is zero. Note, CPUID EAX.{0x1f,0xb}.0 is in __do_cpuid_func()
707 for (i = 1; entry->ecx & 0xff00; ++i) { in __do_cpuid_func()
713 case 0xd: in __do_cpuid_func()
730 WARN_ON_ONCE(supported_xss != 0); in __do_cpuid_func()
731 entry->ebx = 0; in __do_cpuid_func()
757 if (WARN_ON_ONCE(!entry->eax || (entry->ecx & 0x1) != s_state)) { in __do_cpuid_func()
761 entry->edx = 0; in __do_cpuid_func()
765 case 0x14: in __do_cpuid_func()
767 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; in __do_cpuid_func()
777 static const char signature[12] = "KVMKVMKVM\0\0"; in __do_cpuid_func()
780 entry->ebx = sigptr[0]; in __do_cpuid_func()
803 entry->ebx = 0; in __do_cpuid_func()
804 entry->ecx = 0; in __do_cpuid_func()
805 entry->edx = 0; in __do_cpuid_func()
807 case 0x80000000: in __do_cpuid_func()
808 entry->eax = min(entry->eax, 0x8000001f); in __do_cpuid_func()
810 case 0x80000001: in __do_cpuid_func()
814 case 0x80000006: in __do_cpuid_func()
817 case 0x80000007: /* Advanced power management */ in __do_cpuid_func()
822 entry->eax = entry->ebx = entry->ecx = 0; in __do_cpuid_func()
824 case 0x80000008: { in __do_cpuid_func()
825 unsigned g_phys_as = (entry->eax >> 16) & 0xff; in __do_cpuid_func()
826 unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U); in __do_cpuid_func()
827 unsigned phys_as = entry->eax & 0xff; in __do_cpuid_func()
832 entry->edx = 0; in __do_cpuid_func()
836 case 0x8000000A: in __do_cpuid_func()
838 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; in __do_cpuid_func()
844 entry->ecx = 0; /* Reserved */ in __do_cpuid_func()
847 case 0x80000019: in __do_cpuid_func()
848 entry->ecx = entry->edx = 0; in __do_cpuid_func()
850 case 0x8000001a: in __do_cpuid_func()
851 case 0x8000001e: in __do_cpuid_func()
854 case 0x8000001F: in __do_cpuid_func()
856 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; in __do_cpuid_func()
859 case 0xC0000000: in __do_cpuid_func()
860 /*Just support up to 0xC0000004 now*/ in __do_cpuid_func()
861 entry->eax = min(entry->eax, 0xC0000004); in __do_cpuid_func()
863 case 0xC0000001: in __do_cpuid_func()
868 case 0xC0000002: in __do_cpuid_func()
869 case 0xC0000003: in __do_cpuid_func()
870 case 0xC0000004: in __do_cpuid_func()
872 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; in __do_cpuid_func()
876 r = 0; in __do_cpuid_func()
893 #define CENTAUR_CPUID_SIGNATURE 0xC0000000
903 return 0; in get_cpuid_func()
936 for (i = 0; i < num_entries; i++) { in sanity_check_entries()
940 if (pad[0] || pad[1] || pad[2]) in sanity_check_entries()
951 0, 0x80000000, CENTAUR_CPUID_SIGNATURE, KVM_CPUID_SIGNATURE, in kvm_dev_ioctl_get_cpuid()
955 .nent = 0, in kvm_dev_ioctl_get_cpuid()
974 for (i = 0; i < ARRAY_SIZE(funcs); i++) { in kvm_dev_ioctl_get_cpuid()
1000 * highest basic leaf (i.e. CPUID.0H:EAX) were requested. AMD CPUID semantics
1013 * - Basic: 0x00000000 - 0x3fffffff, 0x50000000 - 0x7fffffff
1014 * - Hypervisor: 0x40000000 - 0x4fffffff
1015 * - Extended: 0x80000000 - 0xbfffffff
1016 * - Centaur: 0xc0000000 - 0xcfffffff
1019 * their own indepdent class associated with a 0x100 byte range. E.g. if Qemu
1023 * - HyperV: 0x40000000 - 0x400000ff
1024 * - KVM: 0x40000100 - 0x400001ff
1032 basic = kvm_find_cpuid_entry(vcpu, 0, 0); in get_out_of_range_cpuid_entry()
1040 if (function >= 0x40000000 && function <= 0x4fffffff) in get_out_of_range_cpuid_entry()
1041 class = kvm_find_cpuid_entry(vcpu, function & 0xffffff00, 0); in get_out_of_range_cpuid_entry()
1042 else if (function >= 0xc0000000) in get_out_of_range_cpuid_entry()
1043 class = kvm_find_cpuid_entry(vcpu, 0xc0000000, 0); in get_out_of_range_cpuid_entry()
1045 class = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0); in get_out_of_range_cpuid_entry()
1052 * max basic entry, e.g. if the max basic leaf is 0xb but there is no in get_out_of_range_cpuid_entry()
1053 * entry for CPUID.0xb.index (see below), then the output value for EDX in get_out_of_range_cpuid_entry()
1054 * needs to be pulled from CPUID.0xb.1. in get_out_of_range_cpuid_entry()
1086 if (function == 7 && index == 0) { in kvm_cpuid()
1093 *eax = *ebx = *ecx = *edx = 0; in kvm_cpuid()
1095 * When leaf 0BH or 1FH is defined, CL is pass-through in kvm_cpuid()
1101 if (function == 0xb || function == 0x1f) { in kvm_cpuid()
1104 *ecx = index & 0xff; in kvm_cpuid()
1119 if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, 0)) in kvm_emulate_cpuid()