Lines Matching +full:ecx +full:- +full:1000

1 /* SPDX-License-Identifier: GPL-2.0-only */
13 #include <asm/msr-index.h>
105 * Pack the information into a 64-bit value so that each X86_FEATURE_XXX can be
134 #define X86_FEATURE_MWAIT KVM_X86_CPU_FEATURE(0x1, 0, ECX, 3)
135 #define X86_FEATURE_VMX KVM_X86_CPU_FEATURE(0x1, 0, ECX, 5)
136 #define X86_FEATURE_SMX KVM_X86_CPU_FEATURE(0x1, 0, ECX, 6)
137 #define X86_FEATURE_PDCM KVM_X86_CPU_FEATURE(0x1, 0, ECX, 15)
138 #define X86_FEATURE_PCID KVM_X86_CPU_FEATURE(0x1, 0, ECX, 17)
139 #define X86_FEATURE_X2APIC KVM_X86_CPU_FEATURE(0x1, 0, ECX, 21)
140 #define X86_FEATURE_MOVBE KVM_X86_CPU_FEATURE(0x1, 0, ECX, 22)
141 #define X86_FEATURE_TSC_DEADLINE_TIMER KVM_X86_CPU_FEATURE(0x1, 0, ECX, 24)
142 #define X86_FEATURE_XSAVE KVM_X86_CPU_FEATURE(0x1, 0, ECX, 26)
143 #define X86_FEATURE_OSXSAVE KVM_X86_CPU_FEATURE(0x1, 0, ECX, 27)
144 #define X86_FEATURE_RDRAND KVM_X86_CPU_FEATURE(0x1, 0, ECX, 30)
145 #define X86_FEATURE_HYPERVISOR KVM_X86_CPU_FEATURE(0x1, 0, ECX, 31)
164 #define X86_FEATURE_UMIP KVM_X86_CPU_FEATURE(0x7, 0, ECX, 2)
165 #define X86_FEATURE_PKU KVM_X86_CPU_FEATURE(0x7, 0, ECX, 3)
166 #define X86_FEATURE_OSPKE KVM_X86_CPU_FEATURE(0x7, 0, ECX, 4)
167 #define X86_FEATURE_LA57 KVM_X86_CPU_FEATURE(0x7, 0, ECX, 16)
168 #define X86_FEATURE_RDPID KVM_X86_CPU_FEATURE(0x7, 0, ECX, 22)
169 #define X86_FEATURE_SGX_LC KVM_X86_CPU_FEATURE(0x7, 0, ECX, 30)
170 #define X86_FEATURE_SHSTK KVM_X86_CPU_FEATURE(0x7, 0, ECX, 7)
175 #define X86_FEATURE_PKS KVM_X86_CPU_FEATURE(0x7, 0, ECX, 31)
180 #define X86_FEATURE_XTILEDATA_XFD KVM_X86_CPU_FEATURE(0xD, 18, ECX, 2)
185 #define X86_FEATURE_SVM KVM_X86_CPU_FEATURE(0x80000001, 0, ECX, 2)
186 #define X86_FEATURE_PERFCTR_CORE KVM_X86_CPU_FEATURE(0x80000001, 0, ECX, 23)
187 #define X86_FEATURE_PERFCTR_NB KVM_X86_CPU_FEATURE(0x80000001, 0, ECX, 24)
188 #define X86_FEATURE_PERFCTR_LLC KVM_X86_CPU_FEATURE(0x80000001, 0, ECX, 28)
232 * Same idea as X86_FEATURE_XXX, but X86_PROPERTY_XXX retrieves a multi-bit
233 * value/property as opposed to a single-bit feature. Again, pack the info
234 * into a 64-bit value to pass by value with no overhead.
268 #define X86_PROPERTY_PMU_FIXED_COUNTERS_BITMASK KVM_X86_CPU_PROPERTY(0xa, 0, ECX, 0, 31)
274 #define X86_PROPERTY_XSTATE_MAX_SIZE KVM_X86_CPU_PROPERTY(0xd, 0, ECX, 0, 31)
284 #define X86_PROPERTY_AMX_MAX_ROWS KVM_X86_CPU_PROPERTY(0x1d, 1, ECX, 0, 15)
335 #define X86_PMU_FEATURE_INSNS_RETIRED_FIXED KVM_X86_PMU_FEATURE(ECX, 0)
336 #define X86_PMU_FEATURE_CPU_CYCLES_FIXED KVM_X86_PMU_FEATURE(ECX, 1)
337 #define X86_PMU_FEATURE_REFERENCE_TSC_CYCLES_FIXED KVM_X86_PMU_FEATURE(ECX, 2)
338 #define X86_PMU_FEATURE_TOPDOWN_SLOTS_FIXED KVM_X86_PMU_FEATURE(ECX, 3)
371 #define PAGE_MASK (~(PAGE_SIZE-1) & PHYSICAL_PAGE_MASK)
373 #define HUGEPAGE_SHIFT(x) (PAGE_SHIFT + (((x) - 1) * 9))
375 #define HUGEPAGE_MASK(x) (~(HUGEPAGE_SIZE(x) - 1) & PHYSICAL_PAGE_MASK)
380 /* General Registers in 64-Bit Mode */
431 return ((uint64_t)desc->base3 << 32) | in get_desc64_base()
432 (desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24)); in get_desc64_base()
607 /* Note, ECX and EDX are architecturally required to be '0'. */ in wrpkru()
635 uint32_t *ecx, uint32_t *edx) in __cpuid() argument
638 *ecx = index; in __cpuid()
643 "=c" (*ecx), in __cpuid()
645 : "0" (*eax), "2" (*ecx) in __cpuid()
651 uint32_t *ecx, uint32_t *edx) in cpuid() argument
653 return __cpuid(function, 0, eax, ebx, ecx, edx); in cpuid()
658 uint32_t eax, ebx, ecx, edx; in this_cpu_fms() local
660 cpuid(1, &eax, &ebx, &ecx, &edx); in this_cpu_fms()
677 uint32_t eax, ebx, ecx, edx; in this_cpu_vendor_string_is() local
679 cpuid(0, &eax, &ebx, &ecx, &edx); in this_cpu_vendor_string_is()
680 return (ebx == chunk[0] && edx == chunk[1] && ecx == chunk[2]); in this_cpu_vendor_string_is()
842 cycles = guest_tsc_khz / 1000 * usec; in udelay()
846 * as accurate as possible, e.g. doesn't trigger PAUSE-Loop VM-Exits. in udelay()
851 } while (now - start < cycles); in udelay()
878 TEST_ASSERT(r == msrs->nmsrs, in vcpu_msrs_get()
880 r, r < 0 || r >= msrs->nmsrs ? -1 : msrs->entries[r].index); in vcpu_msrs_get()
886 TEST_ASSERT(r == msrs->nmsrs, in vcpu_msrs_set()
888 r, r < 0 || r >= msrs->nmsrs ? -1 : msrs->entries[r].index); in vcpu_msrs_set()
931 return get_cpuid_entry(kvm_get_supported_cpuid(), 0x1, 0)->eax; in kvm_cpu_fms()
1010 * Allocate a "struct kvm_cpuid2* instance, with the 0-length arrary of
1019 TEST_ASSERT(cpuid, "-ENOMEM when allocating kvm_cpuid2"); in allocate_kvm_cpuid2()
1021 cpuid->nent = nr_entries; in allocate_kvm_cpuid2()
1030 vcpu_ioctl(vcpu, KVM_GET_CPUID2, vcpu->cpuid); in vcpu_get_cpuid()
1037 TEST_ASSERT(vcpu->cpuid, "Must do vcpu_init_cpuid() first (or equivalent)"); in __vcpu_get_cpuid_entry()
1041 return (struct kvm_cpuid_entry2 *)get_cpuid_entry(vcpu->cpuid, in __vcpu_get_cpuid_entry()
1055 TEST_ASSERT(vcpu->cpuid, "Must do vcpu_init_cpuid() first"); in __vcpu_set_cpuid()
1056 r = __vcpu_ioctl(vcpu, KVM_SET_CPUID2, vcpu->cpuid); in __vcpu_set_cpuid()
1067 TEST_ASSERT(vcpu->cpuid, "Must do vcpu_init_cpuid() first"); in vcpu_set_cpuid()
1068 vcpu_ioctl(vcpu, KVM_SET_CPUID2, vcpu->cpuid); in vcpu_set_cpuid()
1087 return *((&entry->eax) + feature.reg) & BIT(feature.bit); in vcpu_cpuid_has()
1187 * handler, versus the kernel's in-memory tables and KVM-Unit-Tests's in-memory
1188 * per-CPU data. Using only registers avoids having to map memory into the
1192 * instruction. But, selftests are 64-bit only, making register* pressure a
1193 * minor concern. Use r9-r11 as they are volatile, i.e. don't need to be saved
1195 * instructions. Ideally, fixup would use r8-r10 and thus avoid implicit
1196 * parameters entirely, but Hyper-V's hypercall ABI uses r8 and testing Hyper-V
1197 * is higher priority than testing non-faulting SYSCALL/SYSRET.
1200 * is guaranteed to be non-zero on fault.
1208 * r9 = exception vector (non-zero)
1292 * use ECX as in input index, and EDX:EAX as a 64-bit output.
1304 return kvm_asm_safe("wrmsr", "a"(val & -1u), "d"(val >> 32), "c"(msr)); in BUILD_READ_U64_SAFE_HELPERS()
1404 #define PG_LEVEL_SHIFT(_level) ((_level - 1) * 9 + 12)
1426 #define X86_CR0_NW (1UL<<29) /* Not Write-through */