17ada359dSArthur Chunqi Li /*
27ada359dSArthur Chunqi Li * x86/vmx.c : Framework for testing nested virtualization
37ada359dSArthur Chunqi Li * This is a framework to test nested VMX for KVM, which
47ada359dSArthur Chunqi Li * started as a project of GSoC 2013. All test cases should
57ada359dSArthur Chunqi Li * be located in x86/vmx_tests.c and framework related
67ada359dSArthur Chunqi Li * functions should be in this file.
77ada359dSArthur Chunqi Li *
87ada359dSArthur Chunqi Li * How to write test cases?
97ada359dSArthur Chunqi Li * Add callbacks of test suite in variant "vmx_tests". You can
107ada359dSArthur Chunqi Li * write:
117ada359dSArthur Chunqi Li * 1. init function used for initializing test suite
127ada359dSArthur Chunqi Li * 2. main function for codes running in L2 guest,
137ada359dSArthur Chunqi Li * 3. exit_handler to handle vmexit of L2 to L1
147ada359dSArthur Chunqi Li * 4. syscall handler to handle L2 syscall vmexit
157ada359dSArthur Chunqi Li * 5. vmenter fail handler to handle direct failure of vmenter
167ada359dSArthur Chunqi Li * 6. guest_regs is loaded when vmenter and saved when
177ada359dSArthur Chunqi Li * vmexit, you can read and set it in exit_handler
187ada359dSArthur Chunqi Li * If no special function is needed for a test suite, use
197ada359dSArthur Chunqi Li * coressponding basic_* functions as callback. More handlers
207ada359dSArthur Chunqi Li * can be added to "vmx_tests", see details of "struct vmx_test"
217ada359dSArthur Chunqi Li * and function test_run().
227ada359dSArthur Chunqi Li *
237ada359dSArthur Chunqi Li * Currently, vmx test framework only set up one VCPU and one
247ada359dSArthur Chunqi Li * concurrent guest test environment with same paging for L2 and
257ada359dSArthur Chunqi Li * L1. For usage of EPT, only 1:1 mapped paging is used from VFN
267ada359dSArthur Chunqi Li * to PFN.
277ada359dSArthur Chunqi Li *
287ada359dSArthur Chunqi Li * Author : Arthur Chunqi Li <yzt356@gmail.com>
297ada359dSArthur Chunqi Li */
307ada359dSArthur Chunqi Li
319d7eaa29SArthur Chunqi Li #include "libcflat.h"
329d7eaa29SArthur Chunqi Li #include "processor.h"
335aca024eSPaolo Bonzini #include "alloc_page.h"
349d7eaa29SArthur Chunqi Li #include "vm.h"
353652250bSSimon Smith #include "vmalloc.h"
369d7eaa29SArthur Chunqi Li #include "desc.h"
379d7eaa29SArthur Chunqi Li #include "vmx.h"
389d7eaa29SArthur Chunqi Li #include "msr.h"
399d7eaa29SArthur Chunqi Li #include "smp.h"
407371c622SVitaly Kuznetsov #include "apic.h"
419d7eaa29SArthur Chunqi Li
42c937d495SLiran Alon u64 *bsp_vmxon_region;
439d7eaa29SArthur Chunqi Li struct vmcs *vmcs_root;
449d7eaa29SArthur Chunqi Li u32 vpid_cnt;
45342db9a1SAaron Lewis u64 guest_stack_top, guest_syscall_stack_top;
469d7eaa29SArthur Chunqi Li u32 ctrl_pin, ctrl_enter, ctrl_exit, ctrl_cpu[2];
479d7eaa29SArthur Chunqi Li struct regs regs;
48794c67a9SPeter Feiner
499d7eaa29SArthur Chunqi Li struct vmx_test *current;
50794c67a9SPeter Feiner
51794c67a9SPeter Feiner #define MAX_TEST_TEARDOWN_STEPS 10
52794c67a9SPeter Feiner
53794c67a9SPeter Feiner struct test_teardown_step {
54794c67a9SPeter Feiner test_teardown_func func;
55794c67a9SPeter Feiner void *data;
56794c67a9SPeter Feiner };
57794c67a9SPeter Feiner
58794c67a9SPeter Feiner static int teardown_count;
59794c67a9SPeter Feiner static struct test_teardown_step teardown_steps[MAX_TEST_TEARDOWN_STEPS];
60794c67a9SPeter Feiner
61794c67a9SPeter Feiner static test_guest_func v2_guest_main;
62794c67a9SPeter Feiner
633ee34093SArthur Chunqi Li u64 hypercall_field;
649d7eaa29SArthur Chunqi Li bool launched;
65c04259ffSDavid Matlack static int matched;
66794c67a9SPeter Feiner static int guest_finished;
67794c67a9SPeter Feiner static int in_guest;
689d7eaa29SArthur Chunqi Li
690903962dSYang Weijiang union vmx_basic_msr basic_msr;
705f18e779SJan Kiszka union vmx_ctrl_msr ctrl_pin_rev;
715f18e779SJan Kiszka union vmx_ctrl_msr ctrl_cpu_rev[2];
725f18e779SJan Kiszka union vmx_ctrl_msr ctrl_exit_rev;
735f18e779SJan Kiszka union vmx_ctrl_msr ctrl_enter_rev;
743ee34093SArthur Chunqi Li union vmx_ept_vpid ept_vpid;
753ee34093SArthur Chunqi Li
765ed10141SPaolo Bonzini extern struct descriptor_table_ptr gdt_descr;
77337166aaSJan Kiszka extern struct descriptor_table_ptr idt_descr;
789d7eaa29SArthur Chunqi Li extern void *vmx_return;
799d7eaa29SArthur Chunqi Li extern void *entry_sysenter;
809d7eaa29SArthur Chunqi Li extern void *guest_entry;
819d7eaa29SArthur Chunqi Li
82ffb1a9e0SJan Kiszka static volatile u32 stage;
83ffb1a9e0SJan Kiszka
84794c67a9SPeter Feiner static jmp_buf abort_target;
85794c67a9SPeter Feiner
86ecd5b431SDavid Matlack struct vmcs_field {
87ecd5b431SDavid Matlack u64 mask;
88ecd5b431SDavid Matlack u64 encoding;
89ecd5b431SDavid Matlack };
90ecd5b431SDavid Matlack
91ecd5b431SDavid Matlack #define MASK(_bits) GENMASK_ULL((_bits) - 1, 0)
92ecd5b431SDavid Matlack #define MASK_NATURAL MASK(sizeof(unsigned long) * 8)
93ecd5b431SDavid Matlack
94ecd5b431SDavid Matlack static struct vmcs_field vmcs_fields[] = {
95ecd5b431SDavid Matlack { MASK(16), VPID },
96ecd5b431SDavid Matlack { MASK(16), PINV },
97ecd5b431SDavid Matlack { MASK(16), EPTP_IDX },
98ecd5b431SDavid Matlack
99ecd5b431SDavid Matlack { MASK(16), GUEST_SEL_ES },
100ecd5b431SDavid Matlack { MASK(16), GUEST_SEL_CS },
101ecd5b431SDavid Matlack { MASK(16), GUEST_SEL_SS },
102ecd5b431SDavid Matlack { MASK(16), GUEST_SEL_DS },
103ecd5b431SDavid Matlack { MASK(16), GUEST_SEL_FS },
104ecd5b431SDavid Matlack { MASK(16), GUEST_SEL_GS },
105ecd5b431SDavid Matlack { MASK(16), GUEST_SEL_LDTR },
106ecd5b431SDavid Matlack { MASK(16), GUEST_SEL_TR },
107ecd5b431SDavid Matlack { MASK(16), GUEST_INT_STATUS },
108ecd5b431SDavid Matlack
109ecd5b431SDavid Matlack { MASK(16), HOST_SEL_ES },
110ecd5b431SDavid Matlack { MASK(16), HOST_SEL_CS },
111ecd5b431SDavid Matlack { MASK(16), HOST_SEL_SS },
112ecd5b431SDavid Matlack { MASK(16), HOST_SEL_DS },
113ecd5b431SDavid Matlack { MASK(16), HOST_SEL_FS },
114ecd5b431SDavid Matlack { MASK(16), HOST_SEL_GS },
115ecd5b431SDavid Matlack { MASK(16), HOST_SEL_TR },
116ecd5b431SDavid Matlack
117ecd5b431SDavid Matlack { MASK(64), IO_BITMAP_A },
118ecd5b431SDavid Matlack { MASK(64), IO_BITMAP_B },
119ecd5b431SDavid Matlack { MASK(64), MSR_BITMAP },
120ecd5b431SDavid Matlack { MASK(64), EXIT_MSR_ST_ADDR },
121ecd5b431SDavid Matlack { MASK(64), EXIT_MSR_LD_ADDR },
122ecd5b431SDavid Matlack { MASK(64), ENTER_MSR_LD_ADDR },
123ecd5b431SDavid Matlack { MASK(64), VMCS_EXEC_PTR },
124ecd5b431SDavid Matlack { MASK(64), TSC_OFFSET },
125ecd5b431SDavid Matlack { MASK(64), APIC_VIRT_ADDR },
126ecd5b431SDavid Matlack { MASK(64), APIC_ACCS_ADDR },
127ecd5b431SDavid Matlack { MASK(64), EPTP },
128ecd5b431SDavid Matlack
129faea4fc6SLiran Alon { MASK(64), INFO_PHYS_ADDR },
130ecd5b431SDavid Matlack
131ecd5b431SDavid Matlack { MASK(64), VMCS_LINK_PTR },
132ecd5b431SDavid Matlack { MASK(64), GUEST_DEBUGCTL },
133ecd5b431SDavid Matlack { MASK(64), GUEST_EFER },
134ecd5b431SDavid Matlack { MASK(64), GUEST_PAT },
135ecd5b431SDavid Matlack { MASK(64), GUEST_PERF_GLOBAL_CTRL },
136ecd5b431SDavid Matlack { MASK(64), GUEST_PDPTE },
137ecd5b431SDavid Matlack
138ecd5b431SDavid Matlack { MASK(64), HOST_PAT },
139ecd5b431SDavid Matlack { MASK(64), HOST_EFER },
140ecd5b431SDavid Matlack { MASK(64), HOST_PERF_GLOBAL_CTRL },
141ecd5b431SDavid Matlack
142ecd5b431SDavid Matlack { MASK(32), PIN_CONTROLS },
143ecd5b431SDavid Matlack { MASK(32), CPU_EXEC_CTRL0 },
144ecd5b431SDavid Matlack { MASK(32), EXC_BITMAP },
145ecd5b431SDavid Matlack { MASK(32), PF_ERROR_MASK },
146ecd5b431SDavid Matlack { MASK(32), PF_ERROR_MATCH },
147ecd5b431SDavid Matlack { MASK(32), CR3_TARGET_COUNT },
148ecd5b431SDavid Matlack { MASK(32), EXI_CONTROLS },
149ecd5b431SDavid Matlack { MASK(32), EXI_MSR_ST_CNT },
150ecd5b431SDavid Matlack { MASK(32), EXI_MSR_LD_CNT },
151ecd5b431SDavid Matlack { MASK(32), ENT_CONTROLS },
152ecd5b431SDavid Matlack { MASK(32), ENT_MSR_LD_CNT },
153ecd5b431SDavid Matlack { MASK(32), ENT_INTR_INFO },
154ecd5b431SDavid Matlack { MASK(32), ENT_INTR_ERROR },
155ecd5b431SDavid Matlack { MASK(32), ENT_INST_LEN },
156ecd5b431SDavid Matlack { MASK(32), TPR_THRESHOLD },
157ecd5b431SDavid Matlack { MASK(32), CPU_EXEC_CTRL1 },
158ecd5b431SDavid Matlack
159faea4fc6SLiran Alon { MASK(32), VMX_INST_ERROR },
160faea4fc6SLiran Alon { MASK(32), EXI_REASON },
161faea4fc6SLiran Alon { MASK(32), EXI_INTR_INFO },
162faea4fc6SLiran Alon { MASK(32), EXI_INTR_ERROR },
163faea4fc6SLiran Alon { MASK(32), IDT_VECT_INFO },
164faea4fc6SLiran Alon { MASK(32), IDT_VECT_ERROR },
165faea4fc6SLiran Alon { MASK(32), EXI_INST_LEN },
166faea4fc6SLiran Alon { MASK(32), EXI_INST_INFO },
167ecd5b431SDavid Matlack
168ecd5b431SDavid Matlack { MASK(32), GUEST_LIMIT_ES },
169ecd5b431SDavid Matlack { MASK(32), GUEST_LIMIT_CS },
170ecd5b431SDavid Matlack { MASK(32), GUEST_LIMIT_SS },
171ecd5b431SDavid Matlack { MASK(32), GUEST_LIMIT_DS },
172ecd5b431SDavid Matlack { MASK(32), GUEST_LIMIT_FS },
173ecd5b431SDavid Matlack { MASK(32), GUEST_LIMIT_GS },
174ecd5b431SDavid Matlack { MASK(32), GUEST_LIMIT_LDTR },
175ecd5b431SDavid Matlack { MASK(32), GUEST_LIMIT_TR },
176ecd5b431SDavid Matlack { MASK(32), GUEST_LIMIT_GDTR },
177ecd5b431SDavid Matlack { MASK(32), GUEST_LIMIT_IDTR },
178ecd5b431SDavid Matlack { 0x1d0ff, GUEST_AR_ES },
179ecd5b431SDavid Matlack { 0x1f0ff, GUEST_AR_CS },
180ecd5b431SDavid Matlack { 0x1d0ff, GUEST_AR_SS },
181ecd5b431SDavid Matlack { 0x1d0ff, GUEST_AR_DS },
182ecd5b431SDavid Matlack { 0x1d0ff, GUEST_AR_FS },
183ecd5b431SDavid Matlack { 0x1d0ff, GUEST_AR_GS },
184ecd5b431SDavid Matlack { 0x1d0ff, GUEST_AR_LDTR },
185ecd5b431SDavid Matlack { 0x1d0ff, GUEST_AR_TR },
186ecd5b431SDavid Matlack { MASK(32), GUEST_INTR_STATE },
187ecd5b431SDavid Matlack { MASK(32), GUEST_ACTV_STATE },
188ecd5b431SDavid Matlack { MASK(32), GUEST_SMBASE },
189ecd5b431SDavid Matlack { MASK(32), GUEST_SYSENTER_CS },
190ecd5b431SDavid Matlack { MASK(32), PREEMPT_TIMER_VALUE },
191ecd5b431SDavid Matlack
192ecd5b431SDavid Matlack { MASK(32), HOST_SYSENTER_CS },
193ecd5b431SDavid Matlack
194ecd5b431SDavid Matlack { MASK_NATURAL, CR0_MASK },
195ecd5b431SDavid Matlack { MASK_NATURAL, CR4_MASK },
196ecd5b431SDavid Matlack { MASK_NATURAL, CR0_READ_SHADOW },
197ecd5b431SDavid Matlack { MASK_NATURAL, CR4_READ_SHADOW },
198ecd5b431SDavid Matlack { MASK_NATURAL, CR3_TARGET_0 },
199ecd5b431SDavid Matlack { MASK_NATURAL, CR3_TARGET_1 },
200ecd5b431SDavid Matlack { MASK_NATURAL, CR3_TARGET_2 },
201ecd5b431SDavid Matlack { MASK_NATURAL, CR3_TARGET_3 },
202ecd5b431SDavid Matlack
203faea4fc6SLiran Alon { MASK_NATURAL, EXI_QUALIFICATION },
204faea4fc6SLiran Alon { MASK_NATURAL, IO_RCX },
205faea4fc6SLiran Alon { MASK_NATURAL, IO_RSI },
206faea4fc6SLiran Alon { MASK_NATURAL, IO_RDI },
207faea4fc6SLiran Alon { MASK_NATURAL, IO_RIP },
208faea4fc6SLiran Alon { MASK_NATURAL, GUEST_LINEAR_ADDRESS },
209ecd5b431SDavid Matlack
210ecd5b431SDavid Matlack { MASK_NATURAL, GUEST_CR0 },
211ecd5b431SDavid Matlack { MASK_NATURAL, GUEST_CR3 },
212ecd5b431SDavid Matlack { MASK_NATURAL, GUEST_CR4 },
213ecd5b431SDavid Matlack { MASK_NATURAL, GUEST_BASE_ES },
214ecd5b431SDavid Matlack { MASK_NATURAL, GUEST_BASE_CS },
215ecd5b431SDavid Matlack { MASK_NATURAL, GUEST_BASE_SS },
216ecd5b431SDavid Matlack { MASK_NATURAL, GUEST_BASE_DS },
217ecd5b431SDavid Matlack { MASK_NATURAL, GUEST_BASE_FS },
218ecd5b431SDavid Matlack { MASK_NATURAL, GUEST_BASE_GS },
219ecd5b431SDavid Matlack { MASK_NATURAL, GUEST_BASE_LDTR },
220ecd5b431SDavid Matlack { MASK_NATURAL, GUEST_BASE_TR },
221ecd5b431SDavid Matlack { MASK_NATURAL, GUEST_BASE_GDTR },
222ecd5b431SDavid Matlack { MASK_NATURAL, GUEST_BASE_IDTR },
223ecd5b431SDavid Matlack { MASK_NATURAL, GUEST_DR7 },
224ecd5b431SDavid Matlack { MASK_NATURAL, GUEST_RSP },
225ecd5b431SDavid Matlack { MASK_NATURAL, GUEST_RIP },
226ecd5b431SDavid Matlack { MASK_NATURAL, GUEST_RFLAGS },
227ecd5b431SDavid Matlack { MASK_NATURAL, GUEST_PENDING_DEBUG },
228ecd5b431SDavid Matlack { MASK_NATURAL, GUEST_SYSENTER_ESP },
229ecd5b431SDavid Matlack { MASK_NATURAL, GUEST_SYSENTER_EIP },
230ecd5b431SDavid Matlack
231ecd5b431SDavid Matlack { MASK_NATURAL, HOST_CR0 },
232ecd5b431SDavid Matlack { MASK_NATURAL, HOST_CR3 },
233ecd5b431SDavid Matlack { MASK_NATURAL, HOST_CR4 },
234ecd5b431SDavid Matlack { MASK_NATURAL, HOST_BASE_FS },
235ecd5b431SDavid Matlack { MASK_NATURAL, HOST_BASE_GS },
236ecd5b431SDavid Matlack { MASK_NATURAL, HOST_BASE_TR },
237ecd5b431SDavid Matlack { MASK_NATURAL, HOST_BASE_GDTR },
238ecd5b431SDavid Matlack { MASK_NATURAL, HOST_BASE_IDTR },
239ecd5b431SDavid Matlack { MASK_NATURAL, HOST_SYSENTER_ESP },
240ecd5b431SDavid Matlack { MASK_NATURAL, HOST_SYSENTER_EIP },
241ecd5b431SDavid Matlack { MASK_NATURAL, HOST_RSP },
242ecd5b431SDavid Matlack { MASK_NATURAL, HOST_RIP },
243ecd5b431SDavid Matlack };
244ecd5b431SDavid Matlack
245faea4fc6SLiran Alon enum vmcs_field_type {
246faea4fc6SLiran Alon VMCS_FIELD_TYPE_CONTROL = 0,
247faea4fc6SLiran Alon VMCS_FIELD_TYPE_READ_ONLY_DATA = 1,
248faea4fc6SLiran Alon VMCS_FIELD_TYPE_GUEST = 2,
249faea4fc6SLiran Alon VMCS_FIELD_TYPE_HOST = 3,
250faea4fc6SLiran Alon VMCS_FIELD_TYPES,
251faea4fc6SLiran Alon };
252faea4fc6SLiran Alon
vmcs_field_type(struct vmcs_field * f)253faea4fc6SLiran Alon static inline int vmcs_field_type(struct vmcs_field *f)
254faea4fc6SLiran Alon {
255faea4fc6SLiran Alon return (f->encoding >> VMCS_FIELD_TYPE_SHIFT) & 0x3;
256faea4fc6SLiran Alon }
257faea4fc6SLiran Alon
vmcs_field_readonly(struct vmcs_field * f)258faea4fc6SLiran Alon static int vmcs_field_readonly(struct vmcs_field *f)
259faea4fc6SLiran Alon {
260faea4fc6SLiran Alon u64 ia32_vmx_misc;
261faea4fc6SLiran Alon
262faea4fc6SLiran Alon ia32_vmx_misc = rdmsr(MSR_IA32_VMX_MISC);
263faea4fc6SLiran Alon return !(ia32_vmx_misc & MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS) &&
264faea4fc6SLiran Alon (vmcs_field_type(f) == VMCS_FIELD_TYPE_READ_ONLY_DATA);
265faea4fc6SLiran Alon }
266faea4fc6SLiran Alon
vmcs_field_value(struct vmcs_field * f,u8 cookie)267ecd5b431SDavid Matlack static inline u64 vmcs_field_value(struct vmcs_field *f, u8 cookie)
268ecd5b431SDavid Matlack {
269ecd5b431SDavid Matlack u64 value;
270ecd5b431SDavid Matlack
271ecd5b431SDavid Matlack /* Incorporate the cookie and the field encoding into the value. */
272ecd5b431SDavid Matlack value = cookie;
273ecd5b431SDavid Matlack value |= (f->encoding << 8);
274ecd5b431SDavid Matlack value |= 0xdeadbeefull << 32;
275ecd5b431SDavid Matlack
276ecd5b431SDavid Matlack return value & f->mask;
277ecd5b431SDavid Matlack }
278ecd5b431SDavid Matlack
set_vmcs_field(struct vmcs_field * f,u8 cookie)279ecd5b431SDavid Matlack static void set_vmcs_field(struct vmcs_field *f, u8 cookie)
280ecd5b431SDavid Matlack {
281ecd5b431SDavid Matlack vmcs_write(f->encoding, vmcs_field_value(f, cookie));
282ecd5b431SDavid Matlack }
283ecd5b431SDavid Matlack
check_vmcs_field(struct vmcs_field * f,u8 cookie)28493655697SNadav Amit static bool check_vmcs_field(struct vmcs_field *f, u8 cookie)
285ecd5b431SDavid Matlack {
286ecd5b431SDavid Matlack u64 expected;
287ecd5b431SDavid Matlack u64 actual;
288ecd5b431SDavid Matlack int ret;
289ecd5b431SDavid Matlack
290faea4fc6SLiran Alon if (f->encoding == VMX_INST_ERROR) {
291faea4fc6SLiran Alon printf("Skipping volatile field %lx\n", f->encoding);
292faea4fc6SLiran Alon return true;
293faea4fc6SLiran Alon }
294faea4fc6SLiran Alon
2954143fbfdSSean Christopherson ret = vmcs_read_safe(f->encoding, &actual);
296ecd5b431SDavid Matlack assert(!(ret & X86_EFLAGS_CF));
297ecd5b431SDavid Matlack /* Skip VMCS fields that aren't recognized by the CPU */
298ecd5b431SDavid Matlack if (ret & X86_EFLAGS_ZF)
299ecd5b431SDavid Matlack return true;
300ecd5b431SDavid Matlack
30185cd1cf9SSean Christopherson if (vmcs_field_readonly(f)) {
30285cd1cf9SSean Christopherson printf("Skipping read-only field %lx\n", f->encoding);
30385cd1cf9SSean Christopherson return true;
30485cd1cf9SSean Christopherson }
30585cd1cf9SSean Christopherson
306ecd5b431SDavid Matlack expected = vmcs_field_value(f, cookie);
307ecd5b431SDavid Matlack actual &= f->mask;
308ecd5b431SDavid Matlack
309ecd5b431SDavid Matlack if (expected == actual)
310ecd5b431SDavid Matlack return true;
311ecd5b431SDavid Matlack
312d4ab68adSDavid Matlack printf("FAIL: VMWRITE/VMREAD %lx (expected: %lx, actual: %lx)\n",
313ecd5b431SDavid Matlack f->encoding, (unsigned long) expected, (unsigned long) actual);
314ecd5b431SDavid Matlack
315ecd5b431SDavid Matlack return false;
316ecd5b431SDavid Matlack }
317ecd5b431SDavid Matlack
set_all_vmcs_fields(u8 cookie)318ecd5b431SDavid Matlack static void set_all_vmcs_fields(u8 cookie)
319ecd5b431SDavid Matlack {
320ecd5b431SDavid Matlack int i;
321ecd5b431SDavid Matlack
322ecd5b431SDavid Matlack for (i = 0; i < ARRAY_SIZE(vmcs_fields); i++)
323ecd5b431SDavid Matlack set_vmcs_field(&vmcs_fields[i], cookie);
324ecd5b431SDavid Matlack }
325ecd5b431SDavid Matlack
check_all_vmcs_fields(u8 cookie)32693655697SNadav Amit static bool check_all_vmcs_fields(u8 cookie)
327ecd5b431SDavid Matlack {
328ecd5b431SDavid Matlack bool pass = true;
329ecd5b431SDavid Matlack int i;
330ecd5b431SDavid Matlack
331ecd5b431SDavid Matlack for (i = 0; i < ARRAY_SIZE(vmcs_fields); i++) {
33293655697SNadav Amit if (!check_vmcs_field(&vmcs_fields[i], cookie))
333ecd5b431SDavid Matlack pass = false;
334ecd5b431SDavid Matlack }
335ecd5b431SDavid Matlack
336ecd5b431SDavid Matlack return pass;
337ecd5b431SDavid Matlack }
338ecd5b431SDavid Matlack
find_vmcs_max_index(void)3392b0418e4SNadav Amit static u32 find_vmcs_max_index(void)
3402b0418e4SNadav Amit {
3412b0418e4SNadav Amit u32 idx, width, type, enc;
3422b0418e4SNadav Amit u64 actual;
3432b0418e4SNadav Amit int ret;
3442b0418e4SNadav Amit
3452b0418e4SNadav Amit /* scan backwards and stop when found */
3462b0418e4SNadav Amit for (idx = (1 << 9) - 1; idx >= 0; idx--) {
3472b0418e4SNadav Amit
3482b0418e4SNadav Amit /* try all combinations of width and type */
3492b0418e4SNadav Amit for (type = 0; type < (1 << 2); type++) {
3502b0418e4SNadav Amit for (width = 0; width < (1 << 2) ; width++) {
3512b0418e4SNadav Amit enc = (idx << VMCS_FIELD_INDEX_SHIFT) |
3522b0418e4SNadav Amit (type << VMCS_FIELD_TYPE_SHIFT) |
3532b0418e4SNadav Amit (width << VMCS_FIELD_WIDTH_SHIFT);
3542b0418e4SNadav Amit
3554143fbfdSSean Christopherson ret = vmcs_read_safe(enc, &actual);
3562b0418e4SNadav Amit assert(!(ret & X86_EFLAGS_CF));
3572b0418e4SNadav Amit if (!(ret & X86_EFLAGS_ZF))
3582b0418e4SNadav Amit return idx;
3592b0418e4SNadav Amit }
3602b0418e4SNadav Amit }
3612b0418e4SNadav Amit }
3622b0418e4SNadav Amit /* some VMCS fields should exist */
3632b0418e4SNadav Amit assert(0);
3642b0418e4SNadav Amit return 0;
3652b0418e4SNadav Amit }
3662b0418e4SNadav Amit
test_vmwrite_vmread(void)367b29804b8SThomas Huth static void test_vmwrite_vmread(void)
368ecd5b431SDavid Matlack {
369ecd5b431SDavid Matlack struct vmcs *vmcs = alloc_page();
37085cd1cf9SSean Christopherson u32 vmcs_enum_max, max_index = 0;
371ecd5b431SDavid Matlack
3720903962dSYang Weijiang vmcs->hdr.revision_id = basic_msr.revision;
373ecd5b431SDavid Matlack assert(!vmcs_clear(vmcs));
374ecd5b431SDavid Matlack assert(!make_vmcs_current(vmcs));
375ecd5b431SDavid Matlack
376ecd5b431SDavid Matlack set_all_vmcs_fields(0x42);
3772b0418e4SNadav Amit report(check_all_vmcs_fields(0x42), "VMWRITE/VMREAD");
37885cd1cf9SSean Christopherson
3792b0418e4SNadav Amit vmcs_enum_max = (rdmsr(MSR_IA32_VMX_VMCS_ENUM) & VMCS_FIELD_INDEX_MASK)
3802b0418e4SNadav Amit >> VMCS_FIELD_INDEX_SHIFT;
3812b0418e4SNadav Amit max_index = find_vmcs_max_index();
3822b0418e4SNadav Amit report(vmcs_enum_max == max_index,
3832b0418e4SNadav Amit "VMX_VMCS_ENUM.MAX_INDEX expected: %x, actual: %x",
384a299895bSThomas Huth max_index, vmcs_enum_max);
385ecd5b431SDavid Matlack
386ecd5b431SDavid Matlack assert(!vmcs_clear(vmcs));
387ecd5b431SDavid Matlack free_page(vmcs);
388ecd5b431SDavid Matlack }
389ecd5b431SDavid Matlack
__test_vmread_vmwrite_pf(bool vmread,u64 * val,u8 sentinel)390d123cb36SSean Christopherson static void __test_vmread_vmwrite_pf(bool vmread, u64 *val, u8 sentinel)
3913652250bSSimon Smith {
392fcbb5de7SSean Christopherson unsigned long flags = sentinel;
393fcbb5de7SSean Christopherson unsigned int vector;
3943652250bSSimon Smith
3953652250bSSimon Smith /*
396d123cb36SSean Christopherson * Execute VMREAD/VMWRITE with a not-PRESENT memory operand, and verify
397d123cb36SSean Christopherson * a #PF occurred and RFLAGS were not modified.
3983652250bSSimon Smith */
399d123cb36SSean Christopherson if (vmread)
400fcbb5de7SSean Christopherson asm volatile ("sahf\n\t"
401fcbb5de7SSean Christopherson ASM_TRY("1f")
402fcbb5de7SSean Christopherson "vmread %[enc], %[val]\n\t"
403fcbb5de7SSean Christopherson "1: lahf"
404d123cb36SSean Christopherson : [val] "=m" (*val),
405fcbb5de7SSean Christopherson [flags] "+a" (flags)
406fcbb5de7SSean Christopherson : [enc] "r" ((u64)GUEST_SEL_SS)
407fcbb5de7SSean Christopherson : "cc");
408d123cb36SSean Christopherson else
409fcbb5de7SSean Christopherson asm volatile ("sahf\n\t"
410fcbb5de7SSean Christopherson ASM_TRY("1f")
411fcbb5de7SSean Christopherson "vmwrite %[val], %[enc]\n\t"
412fcbb5de7SSean Christopherson "1: lahf"
413d123cb36SSean Christopherson : [val] "=m" (*val),
414fcbb5de7SSean Christopherson [flags] "+a" (flags)
415fcbb5de7SSean Christopherson : [enc] "r" ((u64)GUEST_SEL_SS)
416fcbb5de7SSean Christopherson : "cc");
4173652250bSSimon Smith
418fcbb5de7SSean Christopherson vector = exception_vector();
419fcbb5de7SSean Christopherson report(vector == PF_VECTOR,
420d123cb36SSean Christopherson "Expected #PF on %s, got exception '0x%x'\n",
421d123cb36SSean Christopherson vmread ? "VMREAD" : "VMWRITE", vector);
4223652250bSSimon Smith
423fcbb5de7SSean Christopherson report((u8)flags == sentinel,
424fcbb5de7SSean Christopherson "Expected RFLAGS 0x%x, got 0x%x", sentinel, (u8)flags);
4253652250bSSimon Smith }
4263652250bSSimon Smith
test_vmread_vmwrite_pf(bool vmread)427d123cb36SSean Christopherson static void test_vmread_vmwrite_pf(bool vmread)
4283652250bSSimon Smith {
429d123cb36SSean Christopherson struct vmcs *vmcs = alloc_page();
430d123cb36SSean Christopherson void *vpage = alloc_vpage();
431d123cb36SSean Christopherson
432d123cb36SSean Christopherson memset(vmcs, 0, PAGE_SIZE);
4330903962dSYang Weijiang vmcs->hdr.revision_id = basic_msr.revision;
434d123cb36SSean Christopherson assert(!vmcs_clear(vmcs));
435d123cb36SSean Christopherson assert(!make_vmcs_current(vmcs));
436d123cb36SSean Christopherson
4373652250bSSimon Smith /*
438fcbb5de7SSean Christopherson * Test with two values to candy-stripe the 5 flags stored/loaded by
439fcbb5de7SSean Christopherson * SAHF/LAHF.
4403652250bSSimon Smith */
441d123cb36SSean Christopherson __test_vmread_vmwrite_pf(vmread, vpage, 0x91);
442d123cb36SSean Christopherson __test_vmread_vmwrite_pf(vmread, vpage, 0x45);
443d123cb36SSean Christopherson }
444d123cb36SSean Christopherson
test_vmread_flags_touch(void)445d123cb36SSean Christopherson static void test_vmread_flags_touch(void)
446d123cb36SSean Christopherson {
447d123cb36SSean Christopherson test_vmread_vmwrite_pf(true);
448d123cb36SSean Christopherson }
449d123cb36SSean Christopherson
test_vmwrite_flags_touch(void)450d123cb36SSean Christopherson static void test_vmwrite_flags_touch(void)
451d123cb36SSean Christopherson {
452d123cb36SSean Christopherson test_vmread_vmwrite_pf(false);
4533652250bSSimon Smith }
4543652250bSSimon Smith
test_vmcs_high(void)455b29804b8SThomas Huth static void test_vmcs_high(void)
45659161cfaSJim Mattson {
45759161cfaSJim Mattson struct vmcs *vmcs = alloc_page();
45859161cfaSJim Mattson
4590903962dSYang Weijiang vmcs->hdr.revision_id = basic_msr.revision;
46059161cfaSJim Mattson assert(!vmcs_clear(vmcs));
46159161cfaSJim Mattson assert(!make_vmcs_current(vmcs));
46259161cfaSJim Mattson
46359161cfaSJim Mattson vmcs_write(TSC_OFFSET, 0x0123456789ABCDEFull);
464a299895bSThomas Huth report(vmcs_read(TSC_OFFSET) == 0x0123456789ABCDEFull,
465a299895bSThomas Huth "VMREAD TSC_OFFSET after VMWRITE TSC_OFFSET");
466a299895bSThomas Huth report(vmcs_read(TSC_OFFSET_HI) == 0x01234567ull,
467a299895bSThomas Huth "VMREAD TSC_OFFSET_HI after VMWRITE TSC_OFFSET");
46859161cfaSJim Mattson vmcs_write(TSC_OFFSET_HI, 0x76543210ul);
469a299895bSThomas Huth report(vmcs_read(TSC_OFFSET_HI) == 0x76543210ul,
470a299895bSThomas Huth "VMREAD TSC_OFFSET_HI after VMWRITE TSC_OFFSET_HI");
471a299895bSThomas Huth report(vmcs_read(TSC_OFFSET) == 0x7654321089ABCDEFull,
472a299895bSThomas Huth "VMREAD TSC_OFFSET after VMWRITE TSC_OFFSET_HI");
47359161cfaSJim Mattson
47459161cfaSJim Mattson assert(!vmcs_clear(vmcs));
47559161cfaSJim Mattson free_page(vmcs);
47659161cfaSJim Mattson }
47759161cfaSJim Mattson
test_vmcs_lifecycle(void)478b29804b8SThomas Huth static void test_vmcs_lifecycle(void)
4796b72cf76SDavid Matlack {
4806b72cf76SDavid Matlack struct vmcs *vmcs[2] = {};
4816b72cf76SDavid Matlack int i;
4826b72cf76SDavid Matlack
4836b72cf76SDavid Matlack for (i = 0; i < ARRAY_SIZE(vmcs); i++) {
4846b72cf76SDavid Matlack vmcs[i] = alloc_page();
4850903962dSYang Weijiang vmcs[i]->hdr.revision_id = basic_msr.revision;
4866b72cf76SDavid Matlack }
4876b72cf76SDavid Matlack
4886b72cf76SDavid Matlack #define VMPTRLD(_i) do { \
4896b72cf76SDavid Matlack assert(_i < ARRAY_SIZE(vmcs)); \
4906b72cf76SDavid Matlack assert(!make_vmcs_current(vmcs[_i])); \
4916b72cf76SDavid Matlack printf("VMPTRLD VMCS%d\n", (_i)); \
4926b72cf76SDavid Matlack } while (0)
4936b72cf76SDavid Matlack
4946b72cf76SDavid Matlack #define VMCLEAR(_i) do { \
4956b72cf76SDavid Matlack assert(_i < ARRAY_SIZE(vmcs)); \
4966b72cf76SDavid Matlack assert(!vmcs_clear(vmcs[_i])); \
4976b72cf76SDavid Matlack printf("VMCLEAR VMCS%d\n", (_i)); \
4986b72cf76SDavid Matlack } while (0)
4996b72cf76SDavid Matlack
5006b72cf76SDavid Matlack VMCLEAR(0);
5016b72cf76SDavid Matlack VMPTRLD(0);
5026b72cf76SDavid Matlack set_all_vmcs_fields(0);
503a299895bSThomas Huth report(check_all_vmcs_fields(0), "current:VMCS0 active:[VMCS0]");
5046b72cf76SDavid Matlack
5056b72cf76SDavid Matlack VMCLEAR(0);
5066b72cf76SDavid Matlack VMPTRLD(0);
507a299895bSThomas Huth report(check_all_vmcs_fields(0), "current:VMCS0 active:[VMCS0]");
5086b72cf76SDavid Matlack
5096b72cf76SDavid Matlack VMCLEAR(1);
510a299895bSThomas Huth report(check_all_vmcs_fields(0), "current:VMCS0 active:[VMCS0]");
5116b72cf76SDavid Matlack
5126b72cf76SDavid Matlack VMPTRLD(1);
5136b72cf76SDavid Matlack set_all_vmcs_fields(1);
514a299895bSThomas Huth report(check_all_vmcs_fields(1), "current:VMCS1 active:[VMCS0,VCMS1]");
5156b72cf76SDavid Matlack
5166b72cf76SDavid Matlack VMPTRLD(0);
517a299895bSThomas Huth report(check_all_vmcs_fields(0), "current:VMCS0 active:[VMCS0,VCMS1]");
5186b72cf76SDavid Matlack VMPTRLD(1);
519a299895bSThomas Huth report(check_all_vmcs_fields(1), "current:VMCS1 active:[VMCS0,VCMS1]");
5206b72cf76SDavid Matlack VMPTRLD(1);
521a299895bSThomas Huth report(check_all_vmcs_fields(1), "current:VMCS1 active:[VMCS0,VCMS1]");
5226b72cf76SDavid Matlack
5236b72cf76SDavid Matlack VMCLEAR(0);
524a299895bSThomas Huth report(check_all_vmcs_fields(1), "current:VMCS1 active:[VCMS1]");
5256b72cf76SDavid Matlack
526d4ab68adSDavid Matlack /* VMPTRLD should not erase VMWRITEs to the current VMCS */
527d4ab68adSDavid Matlack set_all_vmcs_fields(2);
528d4ab68adSDavid Matlack VMPTRLD(1);
529a299895bSThomas Huth report(check_all_vmcs_fields(2), "current:VMCS1 active:[VCMS1]");
530d4ab68adSDavid Matlack
5316b72cf76SDavid Matlack for (i = 0; i < ARRAY_SIZE(vmcs); i++) {
5326b72cf76SDavid Matlack VMCLEAR(i);
5336b72cf76SDavid Matlack free_page(vmcs[i]);
5346b72cf76SDavid Matlack }
5356b72cf76SDavid Matlack
5366b72cf76SDavid Matlack #undef VMPTRLD
5376b72cf76SDavid Matlack #undef VMCLEAR
5386b72cf76SDavid Matlack }
5396b72cf76SDavid Matlack
vmx_set_test_stage(u32 s)540ffb1a9e0SJan Kiszka void vmx_set_test_stage(u32 s)
541ffb1a9e0SJan Kiszka {
542ffb1a9e0SJan Kiszka barrier();
543ffb1a9e0SJan Kiszka stage = s;
544ffb1a9e0SJan Kiszka barrier();
545ffb1a9e0SJan Kiszka }
546ffb1a9e0SJan Kiszka
vmx_get_test_stage(void)547ffb1a9e0SJan Kiszka u32 vmx_get_test_stage(void)
548ffb1a9e0SJan Kiszka {
549ffb1a9e0SJan Kiszka u32 s;
550ffb1a9e0SJan Kiszka
551ffb1a9e0SJan Kiszka barrier();
552ffb1a9e0SJan Kiszka s = stage;
553ffb1a9e0SJan Kiszka barrier();
554ffb1a9e0SJan Kiszka return s;
555ffb1a9e0SJan Kiszka }
556ffb1a9e0SJan Kiszka
vmx_inc_test_stage(void)557ffb1a9e0SJan Kiszka void vmx_inc_test_stage(void)
558ffb1a9e0SJan Kiszka {
559ffb1a9e0SJan Kiszka barrier();
560ffb1a9e0SJan Kiszka stage++;
561ffb1a9e0SJan Kiszka barrier();
562ffb1a9e0SJan Kiszka }
563ffb1a9e0SJan Kiszka
5649d7eaa29SArthur Chunqi Li /* entry_sysenter */
5659d7eaa29SArthur Chunqi Li asm(
5669d7eaa29SArthur Chunqi Li ".align 4, 0x90\n\t"
5679d7eaa29SArthur Chunqi Li ".globl entry_sysenter\n\t"
5689d7eaa29SArthur Chunqi Li "entry_sysenter:\n\t"
5699d7eaa29SArthur Chunqi Li SAVE_GPR
5709d7eaa29SArthur Chunqi Li " and $0xf, %rax\n\t"
5719d7eaa29SArthur Chunqi Li " mov %rax, %rdi\n\t"
5729d7eaa29SArthur Chunqi Li " call syscall_handler\n\t"
5739d7eaa29SArthur Chunqi Li LOAD_GPR
5749d7eaa29SArthur Chunqi Li " vmresume\n\t"
5759d7eaa29SArthur Chunqi Li );
5769d7eaa29SArthur Chunqi Li
syscall_handler(u64 syscall_no)5779d7eaa29SArthur Chunqi Li static void __attribute__((__used__)) syscall_handler(u64 syscall_no)
5789d7eaa29SArthur Chunqi Li {
579d5315e3dSJan Kiszka if (current->syscall_handler)
5809d7eaa29SArthur Chunqi Li current->syscall_handler(syscall_no);
5819d7eaa29SArthur Chunqi Li }
5829d7eaa29SArthur Chunqi Li
5837e207ec1SPeter Feiner static const char * const exit_reason_descriptions[] = {
5847e207ec1SPeter Feiner [VMX_EXC_NMI] = "VMX_EXC_NMI",
5857e207ec1SPeter Feiner [VMX_EXTINT] = "VMX_EXTINT",
5867e207ec1SPeter Feiner [VMX_TRIPLE_FAULT] = "VMX_TRIPLE_FAULT",
5877e207ec1SPeter Feiner [VMX_INIT] = "VMX_INIT",
5887e207ec1SPeter Feiner [VMX_SIPI] = "VMX_SIPI",
5897e207ec1SPeter Feiner [VMX_SMI_IO] = "VMX_SMI_IO",
5907e207ec1SPeter Feiner [VMX_SMI_OTHER] = "VMX_SMI_OTHER",
5917e207ec1SPeter Feiner [VMX_INTR_WINDOW] = "VMX_INTR_WINDOW",
5927e207ec1SPeter Feiner [VMX_NMI_WINDOW] = "VMX_NMI_WINDOW",
5937e207ec1SPeter Feiner [VMX_TASK_SWITCH] = "VMX_TASK_SWITCH",
5947e207ec1SPeter Feiner [VMX_CPUID] = "VMX_CPUID",
5957e207ec1SPeter Feiner [VMX_GETSEC] = "VMX_GETSEC",
5967e207ec1SPeter Feiner [VMX_HLT] = "VMX_HLT",
5977e207ec1SPeter Feiner [VMX_INVD] = "VMX_INVD",
5987e207ec1SPeter Feiner [VMX_INVLPG] = "VMX_INVLPG",
5997e207ec1SPeter Feiner [VMX_RDPMC] = "VMX_RDPMC",
6007e207ec1SPeter Feiner [VMX_RDTSC] = "VMX_RDTSC",
6017e207ec1SPeter Feiner [VMX_RSM] = "VMX_RSM",
6027e207ec1SPeter Feiner [VMX_VMCALL] = "VMX_VMCALL",
6037e207ec1SPeter Feiner [VMX_VMCLEAR] = "VMX_VMCLEAR",
6047e207ec1SPeter Feiner [VMX_VMLAUNCH] = "VMX_VMLAUNCH",
6057e207ec1SPeter Feiner [VMX_VMPTRLD] = "VMX_VMPTRLD",
6067e207ec1SPeter Feiner [VMX_VMPTRST] = "VMX_VMPTRST",
6077e207ec1SPeter Feiner [VMX_VMREAD] = "VMX_VMREAD",
6087e207ec1SPeter Feiner [VMX_VMRESUME] = "VMX_VMRESUME",
6097e207ec1SPeter Feiner [VMX_VMWRITE] = "VMX_VMWRITE",
6107e207ec1SPeter Feiner [VMX_VMXOFF] = "VMX_VMXOFF",
6117e207ec1SPeter Feiner [VMX_VMXON] = "VMX_VMXON",
6127e207ec1SPeter Feiner [VMX_CR] = "VMX_CR",
6137e207ec1SPeter Feiner [VMX_DR] = "VMX_DR",
6147e207ec1SPeter Feiner [VMX_IO] = "VMX_IO",
6157e207ec1SPeter Feiner [VMX_RDMSR] = "VMX_RDMSR",
6167e207ec1SPeter Feiner [VMX_WRMSR] = "VMX_WRMSR",
6177e207ec1SPeter Feiner [VMX_FAIL_STATE] = "VMX_FAIL_STATE",
6187e207ec1SPeter Feiner [VMX_FAIL_MSR] = "VMX_FAIL_MSR",
6197e207ec1SPeter Feiner [VMX_MWAIT] = "VMX_MWAIT",
6207e207ec1SPeter Feiner [VMX_MTF] = "VMX_MTF",
6217e207ec1SPeter Feiner [VMX_MONITOR] = "VMX_MONITOR",
6227e207ec1SPeter Feiner [VMX_PAUSE] = "VMX_PAUSE",
6237e207ec1SPeter Feiner [VMX_FAIL_MCHECK] = "VMX_FAIL_MCHECK",
6247e207ec1SPeter Feiner [VMX_TPR_THRESHOLD] = "VMX_TPR_THRESHOLD",
6257e207ec1SPeter Feiner [VMX_APIC_ACCESS] = "VMX_APIC_ACCESS",
62667fdc49eSArbel Moshe [VMX_EOI_INDUCED] = "VMX_EOI_INDUCED",
6277e207ec1SPeter Feiner [VMX_GDTR_IDTR] = "VMX_GDTR_IDTR",
6287e207ec1SPeter Feiner [VMX_LDTR_TR] = "VMX_LDTR_TR",
6297e207ec1SPeter Feiner [VMX_EPT_VIOLATION] = "VMX_EPT_VIOLATION",
6307e207ec1SPeter Feiner [VMX_EPT_MISCONFIG] = "VMX_EPT_MISCONFIG",
6317e207ec1SPeter Feiner [VMX_INVEPT] = "VMX_INVEPT",
6327e207ec1SPeter Feiner [VMX_PREEMPT] = "VMX_PREEMPT",
6337e207ec1SPeter Feiner [VMX_INVVPID] = "VMX_INVVPID",
6347e207ec1SPeter Feiner [VMX_WBINVD] = "VMX_WBINVD",
6357e207ec1SPeter Feiner [VMX_XSETBV] = "VMX_XSETBV",
6367e207ec1SPeter Feiner [VMX_APIC_WRITE] = "VMX_APIC_WRITE",
6377e207ec1SPeter Feiner [VMX_RDRAND] = "VMX_RDRAND",
6387e207ec1SPeter Feiner [VMX_INVPCID] = "VMX_INVPCID",
6397e207ec1SPeter Feiner [VMX_VMFUNC] = "VMX_VMFUNC",
6407e207ec1SPeter Feiner [VMX_RDSEED] = "VMX_RDSEED",
6417e207ec1SPeter Feiner [VMX_PML_FULL] = "VMX_PML_FULL",
6427e207ec1SPeter Feiner [VMX_XSAVES] = "VMX_XSAVES",
6437e207ec1SPeter Feiner [VMX_XRSTORS] = "VMX_XRSTORS",
6447e207ec1SPeter Feiner };
6457e207ec1SPeter Feiner
exit_reason_description(u64 reason)6467e207ec1SPeter Feiner const char *exit_reason_description(u64 reason)
6477e207ec1SPeter Feiner {
6487e207ec1SPeter Feiner if (reason >= ARRAY_SIZE(exit_reason_descriptions))
6497e207ec1SPeter Feiner return "(unknown)";
6507e207ec1SPeter Feiner return exit_reason_descriptions[reason] ? : "(unused)";
6517e207ec1SPeter Feiner }
6527e207ec1SPeter Feiner
print_vmexit_info(union exit_reason exit_reason)653ef5d77a0SSean Christopherson void print_vmexit_info(union exit_reason exit_reason)
6549d7eaa29SArthur Chunqi Li {
6559d7eaa29SArthur Chunqi Li u64 guest_rip, guest_rsp;
6569d7eaa29SArthur Chunqi Li ulong exit_qual = vmcs_read(EXI_QUALIFICATION);
6579d7eaa29SArthur Chunqi Li guest_rip = vmcs_read(GUEST_RIP);
6589d7eaa29SArthur Chunqi Li guest_rsp = vmcs_read(GUEST_RSP);
6599d7eaa29SArthur Chunqi Li printf("VMEXIT info:\n");
660ef5d77a0SSean Christopherson printf("\tvmexit reason = %u\n", exit_reason.basic);
661ef5d77a0SSean Christopherson printf("\tfailed vmentry = %u\n", !!exit_reason.failed_vmentry);
662fd6aada0SRadim Krčmář printf("\texit qualification = %#lx\n", exit_qual);
663fd6aada0SRadim Krčmář printf("\tguest_rip = %#lx\n", guest_rip);
664fd6aada0SRadim Krčmář printf("\tRAX=%#lx RBX=%#lx RCX=%#lx RDX=%#lx\n",
6659d7eaa29SArthur Chunqi Li regs.rax, regs.rbx, regs.rcx, regs.rdx);
666fd6aada0SRadim Krčmář printf("\tRSP=%#lx RBP=%#lx RSI=%#lx RDI=%#lx\n",
6679d7eaa29SArthur Chunqi Li guest_rsp, regs.rbp, regs.rsi, regs.rdi);
668fd6aada0SRadim Krčmář printf("\tR8 =%#lx R9 =%#lx R10=%#lx R11=%#lx\n",
6699d7eaa29SArthur Chunqi Li regs.r8, regs.r9, regs.r10, regs.r11);
670fd6aada0SRadim Krčmář printf("\tR12=%#lx R13=%#lx R14=%#lx R15=%#lx\n",
6719d7eaa29SArthur Chunqi Li regs.r12, regs.r13, regs.r14, regs.r15);
6729d7eaa29SArthur Chunqi Li }
6739d7eaa29SArthur Chunqi Li
print_vmentry_failure_info(struct vmentry_result * result)6740e0ea94bSSean Christopherson void print_vmentry_failure_info(struct vmentry_result *result)
6750e0ea94bSSean Christopherson {
6760e0ea94bSSean Christopherson if (result->entered)
6770e0ea94bSSean Christopherson return;
6780e0ea94bSSean Christopherson
6790e0ea94bSSean Christopherson if (result->vm_fail) {
6800e0ea94bSSean Christopherson printf("VM-Fail on %s: ", result->instr);
6810e0ea94bSSean Christopherson switch (result->flags & VMX_ENTRY_FLAGS) {
682ce154ba8SPaolo Bonzini case X86_EFLAGS_CF:
6833b50efe3SPeter Feiner printf("current-VMCS pointer is not valid.\n");
6843b50efe3SPeter Feiner break;
685ce154ba8SPaolo Bonzini case X86_EFLAGS_ZF:
6863b50efe3SPeter Feiner printf("error number is %ld. See Intel 30.4.\n",
6873b50efe3SPeter Feiner vmcs_read(VMX_INST_ERROR));
6883b50efe3SPeter Feiner break;
6893b50efe3SPeter Feiner default:
6900e0ea94bSSean Christopherson printf("unexpected flags %lx!\n", result->flags);
6913b50efe3SPeter Feiner }
6923b50efe3SPeter Feiner } else {
6933b50efe3SPeter Feiner u64 qual = vmcs_read(EXI_QUALIFICATION);
6943b50efe3SPeter Feiner
6950e0ea94bSSean Christopherson printf("VM-Exit failure on %s (reason=%#x, qual=%#lx): ",
6960e0ea94bSSean Christopherson result->instr, result->exit_reason.full, qual);
6973b50efe3SPeter Feiner
6980e0ea94bSSean Christopherson switch (result->exit_reason.basic) {
6993b50efe3SPeter Feiner case VMX_FAIL_STATE:
7003b50efe3SPeter Feiner printf("invalid guest state\n");
7013b50efe3SPeter Feiner break;
7023b50efe3SPeter Feiner case VMX_FAIL_MSR:
7033b50efe3SPeter Feiner printf("MSR loading\n");
7043b50efe3SPeter Feiner break;
7053b50efe3SPeter Feiner case VMX_FAIL_MCHECK:
7063b50efe3SPeter Feiner printf("machine-check event\n");
7073b50efe3SPeter Feiner break;
7083b50efe3SPeter Feiner default:
7090e0ea94bSSean Christopherson printf("unexpected basic exit reason %u\n",
7100e0ea94bSSean Christopherson result->exit_reason.basic);
7113b50efe3SPeter Feiner }
7123b50efe3SPeter Feiner
7130e0ea94bSSean Christopherson if (!result->exit_reason.failed_vmentry)
7143b50efe3SPeter Feiner printf("\tVMX_ENTRY_FAILURE BIT NOT SET!\n");
7153b50efe3SPeter Feiner
7160e0ea94bSSean Christopherson if (result->exit_reason.full & 0x7fff0000)
7173b50efe3SPeter Feiner printf("\tRESERVED BITS SET!\n");
7183b50efe3SPeter Feiner }
7193b50efe3SPeter Feiner }
7203b50efe3SPeter Feiner
7212f6828d7SDavid Matlack /*
7222f6828d7SDavid Matlack * VMCLEAR should ensures all VMCS state is flushed to the VMCS
7232f6828d7SDavid Matlack * region in memory.
7242f6828d7SDavid Matlack */
test_vmclear_flushing(void)7252f6828d7SDavid Matlack static void test_vmclear_flushing(void)
7262f6828d7SDavid Matlack {
7272f6828d7SDavid Matlack struct vmcs *vmcs[3] = {};
7282f6828d7SDavid Matlack int i;
7292f6828d7SDavid Matlack
7302f6828d7SDavid Matlack for (i = 0; i < ARRAY_SIZE(vmcs); i++) {
7312f6828d7SDavid Matlack vmcs[i] = alloc_page();
7322f6828d7SDavid Matlack }
7332f6828d7SDavid Matlack
7340903962dSYang Weijiang vmcs[0]->hdr.revision_id = basic_msr.revision;
7352f6828d7SDavid Matlack assert(!vmcs_clear(vmcs[0]));
7362f6828d7SDavid Matlack assert(!make_vmcs_current(vmcs[0]));
7372f6828d7SDavid Matlack set_all_vmcs_fields(0x86);
7382f6828d7SDavid Matlack
7392f6828d7SDavid Matlack assert(!vmcs_clear(vmcs[0]));
7400903962dSYang Weijiang memcpy(vmcs[1], vmcs[0], basic_msr.size);
7412f6828d7SDavid Matlack assert(!make_vmcs_current(vmcs[1]));
742a299895bSThomas Huth report(check_all_vmcs_fields(0x86),
743a299895bSThomas Huth "test vmclear flush (current VMCS)");
7442f6828d7SDavid Matlack
7452f6828d7SDavid Matlack set_all_vmcs_fields(0x87);
7462f6828d7SDavid Matlack assert(!make_vmcs_current(vmcs[0]));
7472f6828d7SDavid Matlack assert(!vmcs_clear(vmcs[1]));
7480903962dSYang Weijiang memcpy(vmcs[2], vmcs[1], basic_msr.size);
7492f6828d7SDavid Matlack assert(!make_vmcs_current(vmcs[2]));
750a299895bSThomas Huth report(check_all_vmcs_fields(0x87),
751a299895bSThomas Huth "test vmclear flush (!current VMCS)");
7522f6828d7SDavid Matlack
7532f6828d7SDavid Matlack for (i = 0; i < ARRAY_SIZE(vmcs); i++) {
7542f6828d7SDavid Matlack assert(!vmcs_clear(vmcs[i]));
7552f6828d7SDavid Matlack free_page(vmcs[i]);
7562f6828d7SDavid Matlack }
7572f6828d7SDavid Matlack }
7583b50efe3SPeter Feiner
test_vmclear(void)7599d7eaa29SArthur Chunqi Li static void test_vmclear(void)
7609d7eaa29SArthur Chunqi Li {
761daeec979SBandan Das struct vmcs *tmp_root;
762e2cf1c9dSEduardo Habkost int width = cpuid_maxphyaddr();
763daeec979SBandan Das
764daeec979SBandan Das /*
765daeec979SBandan Das * Note- The tests below do not necessarily have a
766daeec979SBandan Das * valid VMCS, but that's ok since the invalid vmcs
767daeec979SBandan Das * is only used for a specific test and is discarded
768daeec979SBandan Das * without touching its contents
769daeec979SBandan Das */
770daeec979SBandan Das
771daeec979SBandan Das /* Unaligned page access */
772daeec979SBandan Das tmp_root = (struct vmcs *)((intptr_t)vmcs_root + 1);
773a299895bSThomas Huth report(vmcs_clear(tmp_root) == 1, "test vmclear with unaligned vmcs");
774daeec979SBandan Das
775daeec979SBandan Das /* gpa bits beyond physical address width are set*/
776daeec979SBandan Das tmp_root = (struct vmcs *)((intptr_t)vmcs_root |
777daeec979SBandan Das ((u64)1 << (width+1)));
778a299895bSThomas Huth report(vmcs_clear(tmp_root) == 1,
779a299895bSThomas Huth "test vmclear with vmcs address bits set beyond physical address width");
780daeec979SBandan Das
781daeec979SBandan Das /* Pass VMXON region */
782c937d495SLiran Alon tmp_root = (struct vmcs *)bsp_vmxon_region;
783a299895bSThomas Huth report(vmcs_clear(tmp_root) == 1, "test vmclear with vmxon region");
784daeec979SBandan Das
785daeec979SBandan Das /* Valid VMCS */
786a299895bSThomas Huth report(vmcs_clear(vmcs_root) == 0,
787a299895bSThomas Huth "test vmclear with valid vmcs region");
788daeec979SBandan Das
7892f6828d7SDavid Matlack test_vmclear_flushing();
7909d7eaa29SArthur Chunqi Li }
7919d7eaa29SArthur Chunqi Li
guest_main(void)7929d7eaa29SArthur Chunqi Li static void __attribute__((__used__)) guest_main(void)
7939d7eaa29SArthur Chunqi Li {
794794c67a9SPeter Feiner if (current->v2)
795794c67a9SPeter Feiner v2_guest_main();
796794c67a9SPeter Feiner else
7979d7eaa29SArthur Chunqi Li current->guest_main();
7989d7eaa29SArthur Chunqi Li }
7999d7eaa29SArthur Chunqi Li
8009d7eaa29SArthur Chunqi Li /* guest_entry */
8019d7eaa29SArthur Chunqi Li asm(
8029d7eaa29SArthur Chunqi Li ".align 4, 0x90\n\t"
8039d7eaa29SArthur Chunqi Li ".globl entry_guest\n\t"
8049d7eaa29SArthur Chunqi Li "guest_entry:\n\t"
8059d7eaa29SArthur Chunqi Li " call guest_main\n\t"
8069d7eaa29SArthur Chunqi Li " mov $1, %edi\n\t"
8079d7eaa29SArthur Chunqi Li " call hypercall\n\t"
8089d7eaa29SArthur Chunqi Li );
8099d7eaa29SArthur Chunqi Li
8106884af61SArthur Chunqi Li /* EPT paging structure related functions */
81169c531c8SPeter Feiner /* split_large_ept_entry: Split a 2M/1G large page into 512 smaller PTEs.
81269c531c8SPeter Feiner @ptep : large page table entry to split
81369c531c8SPeter Feiner @level : level of ptep (2 or 3)
81469c531c8SPeter Feiner */
split_large_ept_entry(unsigned long * ptep,int level)81569c531c8SPeter Feiner static void split_large_ept_entry(unsigned long *ptep, int level)
81669c531c8SPeter Feiner {
81769c531c8SPeter Feiner unsigned long *new_pt;
81869c531c8SPeter Feiner unsigned long gpa;
81969c531c8SPeter Feiner unsigned long pte;
82069c531c8SPeter Feiner unsigned long prototype;
82169c531c8SPeter Feiner int i;
82269c531c8SPeter Feiner
82369c531c8SPeter Feiner pte = *ptep;
82469c531c8SPeter Feiner assert(pte & EPT_PRESENT);
82569c531c8SPeter Feiner assert(pte & EPT_LARGE_PAGE);
82669c531c8SPeter Feiner assert(level == 2 || level == 3);
82769c531c8SPeter Feiner
82869c531c8SPeter Feiner new_pt = alloc_page();
82969c531c8SPeter Feiner assert(new_pt);
83069c531c8SPeter Feiner
83169c531c8SPeter Feiner prototype = pte & ~EPT_ADDR_MASK;
83269c531c8SPeter Feiner if (level == 2)
83369c531c8SPeter Feiner prototype &= ~EPT_LARGE_PAGE;
83469c531c8SPeter Feiner
83569c531c8SPeter Feiner gpa = pte & EPT_ADDR_MASK;
83669c531c8SPeter Feiner for (i = 0; i < EPT_PGDIR_ENTRIES; i++) {
83769c531c8SPeter Feiner new_pt[i] = prototype | gpa;
83869c531c8SPeter Feiner gpa += 1ul << EPT_LEVEL_SHIFT(level - 1);
83969c531c8SPeter Feiner }
84069c531c8SPeter Feiner
84169c531c8SPeter Feiner pte &= ~EPT_LARGE_PAGE;
84269c531c8SPeter Feiner pte &= ~EPT_ADDR_MASK;
84369c531c8SPeter Feiner pte |= virt_to_phys(new_pt);
84469c531c8SPeter Feiner
84569c531c8SPeter Feiner *ptep = pte;
84669c531c8SPeter Feiner }
84769c531c8SPeter Feiner
8486884af61SArthur Chunqi Li /* install_ept_entry : Install a page to a given level in EPT
8496884af61SArthur Chunqi Li @pml4 : addr of pml4 table
8506884af61SArthur Chunqi Li @pte_level : level of PTE to set
8516884af61SArthur Chunqi Li @guest_addr : physical address of guest
8526884af61SArthur Chunqi Li @pte : pte value to set
8536884af61SArthur Chunqi Li @pt_page : address of page table, NULL for a new page
8546884af61SArthur Chunqi Li */
install_ept_entry(unsigned long * pml4,int pte_level,unsigned long guest_addr,unsigned long pte,unsigned long * pt_page)8556884af61SArthur Chunqi Li void install_ept_entry(unsigned long *pml4,
8566884af61SArthur Chunqi Li int pte_level,
8576884af61SArthur Chunqi Li unsigned long guest_addr,
8586884af61SArthur Chunqi Li unsigned long pte,
8596884af61SArthur Chunqi Li unsigned long *pt_page)
8606884af61SArthur Chunqi Li {
8616884af61SArthur Chunqi Li int level;
8626884af61SArthur Chunqi Li unsigned long *pt = pml4;
8636884af61SArthur Chunqi Li unsigned offset;
8646884af61SArthur Chunqi Li
865dff740c0SPeter Feiner /* EPT only uses 48 bits of GPA. */
866dff740c0SPeter Feiner assert(guest_addr < (1ul << 48));
867dff740c0SPeter Feiner
8686884af61SArthur Chunqi Li for (level = EPT_PAGE_LEVEL; level > pte_level; --level) {
869a969e087SPeter Feiner offset = (guest_addr >> EPT_LEVEL_SHIFT(level))
8706884af61SArthur Chunqi Li & EPT_PGDIR_MASK;
8716884af61SArthur Chunqi Li if (!(pt[offset] & (EPT_PRESENT))) {
8726884af61SArthur Chunqi Li unsigned long *new_pt = pt_page;
8736884af61SArthur Chunqi Li if (!new_pt)
8746884af61SArthur Chunqi Li new_pt = alloc_page();
8756884af61SArthur Chunqi Li else
8766884af61SArthur Chunqi Li pt_page = 0;
8776884af61SArthur Chunqi Li memset(new_pt, 0, PAGE_SIZE);
8786884af61SArthur Chunqi Li pt[offset] = virt_to_phys(new_pt)
8796884af61SArthur Chunqi Li | EPT_RA | EPT_WA | EPT_EA;
88069c531c8SPeter Feiner } else if (pt[offset] & EPT_LARGE_PAGE)
88169c531c8SPeter Feiner split_large_ept_entry(&pt[offset], level);
88200b5c590SPeter Feiner pt = phys_to_virt(pt[offset] & EPT_ADDR_MASK);
8836884af61SArthur Chunqi Li }
884a969e087SPeter Feiner offset = (guest_addr >> EPT_LEVEL_SHIFT(level)) & EPT_PGDIR_MASK;
8856884af61SArthur Chunqi Li pt[offset] = pte;
8866884af61SArthur Chunqi Li }
8876884af61SArthur Chunqi Li
8886884af61SArthur Chunqi Li /* Map a page, @perm is the permission of the page */
install_ept(unsigned long * pml4,unsigned long phys,unsigned long guest_addr,u64 perm)8896884af61SArthur Chunqi Li void install_ept(unsigned long *pml4,
8906884af61SArthur Chunqi Li unsigned long phys,
8916884af61SArthur Chunqi Li unsigned long guest_addr,
8926884af61SArthur Chunqi Li u64 perm)
8936884af61SArthur Chunqi Li {
8946884af61SArthur Chunqi Li install_ept_entry(pml4, 1, guest_addr, (phys & PAGE_MASK) | perm, 0);
8956884af61SArthur Chunqi Li }
8966884af61SArthur Chunqi Li
8976884af61SArthur Chunqi Li /* Map a 1G-size page */
install_1g_ept(unsigned long * pml4,unsigned long phys,unsigned long guest_addr,u64 perm)8986884af61SArthur Chunqi Li void install_1g_ept(unsigned long *pml4,
8996884af61SArthur Chunqi Li unsigned long phys,
9006884af61SArthur Chunqi Li unsigned long guest_addr,
9016884af61SArthur Chunqi Li u64 perm)
9026884af61SArthur Chunqi Li {
9036884af61SArthur Chunqi Li install_ept_entry(pml4, 3, guest_addr,
9046884af61SArthur Chunqi Li (phys & PAGE_MASK) | perm | EPT_LARGE_PAGE, 0);
9056884af61SArthur Chunqi Li }
9066884af61SArthur Chunqi Li
9076884af61SArthur Chunqi Li /* Map a 2M-size page */
install_2m_ept(unsigned long * pml4,unsigned long phys,unsigned long guest_addr,u64 perm)9086884af61SArthur Chunqi Li void install_2m_ept(unsigned long *pml4,
9096884af61SArthur Chunqi Li unsigned long phys,
9106884af61SArthur Chunqi Li unsigned long guest_addr,
9116884af61SArthur Chunqi Li u64 perm)
9126884af61SArthur Chunqi Li {
9136884af61SArthur Chunqi Li install_ept_entry(pml4, 2, guest_addr,
9146884af61SArthur Chunqi Li (phys & PAGE_MASK) | perm | EPT_LARGE_PAGE, 0);
9156884af61SArthur Chunqi Li }
9166884af61SArthur Chunqi Li
9176884af61SArthur Chunqi Li /* setup_ept_range : Setup a range of 1:1 mapped page to EPT paging structure.
9186884af61SArthur Chunqi Li @start : start address of guest page
9196884af61SArthur Chunqi Li @len : length of address to be mapped
9206884af61SArthur Chunqi Li @map_1g : whether 1G page map is used
9216884af61SArthur Chunqi Li @map_2m : whether 2M page map is used
9226884af61SArthur Chunqi Li @perm : permission for every page
9236884af61SArthur Chunqi Li */
setup_ept_range(unsigned long * pml4,unsigned long start,unsigned long len,int map_1g,int map_2m,u64 perm)924b947e241SJan Kiszka void setup_ept_range(unsigned long *pml4, unsigned long start,
9256884af61SArthur Chunqi Li unsigned long len, int map_1g, int map_2m, u64 perm)
9266884af61SArthur Chunqi Li {
9276884af61SArthur Chunqi Li u64 phys = start;
9286884af61SArthur Chunqi Li u64 max = (u64)len + (u64)start;
9296884af61SArthur Chunqi Li
9306884af61SArthur Chunqi Li if (map_1g) {
9316884af61SArthur Chunqi Li while (phys + PAGE_SIZE_1G <= max) {
9326884af61SArthur Chunqi Li install_1g_ept(pml4, phys, phys, perm);
9336884af61SArthur Chunqi Li phys += PAGE_SIZE_1G;
9346884af61SArthur Chunqi Li }
9356884af61SArthur Chunqi Li }
9366884af61SArthur Chunqi Li if (map_2m) {
9376884af61SArthur Chunqi Li while (phys + PAGE_SIZE_2M <= max) {
9386884af61SArthur Chunqi Li install_2m_ept(pml4, phys, phys, perm);
9396884af61SArthur Chunqi Li phys += PAGE_SIZE_2M;
9406884af61SArthur Chunqi Li }
9416884af61SArthur Chunqi Li }
9426884af61SArthur Chunqi Li while (phys + PAGE_SIZE <= max) {
9436884af61SArthur Chunqi Li install_ept(pml4, phys, phys, perm);
9446884af61SArthur Chunqi Li phys += PAGE_SIZE;
9456884af61SArthur Chunqi Li }
9466884af61SArthur Chunqi Li }
9476884af61SArthur Chunqi Li
9486884af61SArthur Chunqi Li /* get_ept_pte : Get the PTE of a given level in EPT,
9496884af61SArthur Chunqi Li @level == 1 means get the latest level*/
get_ept_pte(unsigned long * pml4,unsigned long guest_addr,int level,unsigned long * pte)950b4a405c3SRadim Krčmář bool get_ept_pte(unsigned long *pml4, unsigned long guest_addr, int level,
951b4a405c3SRadim Krčmář unsigned long *pte)
9526884af61SArthur Chunqi Li {
9536884af61SArthur Chunqi Li int l;
954b4a405c3SRadim Krčmář unsigned long *pt = pml4, iter_pte;
9556884af61SArthur Chunqi Li unsigned offset;
9566884af61SArthur Chunqi Li
957dff740c0SPeter Feiner assert(level >= 1 && level <= 4);
958dff740c0SPeter Feiner
9592ca6f1f3SPaolo Bonzini for (l = EPT_PAGE_LEVEL; ; --l) {
960a969e087SPeter Feiner offset = (guest_addr >> EPT_LEVEL_SHIFT(l)) & EPT_PGDIR_MASK;
961b4a405c3SRadim Krčmář iter_pte = pt[offset];
9626884af61SArthur Chunqi Li if (l == level)
9632ca6f1f3SPaolo Bonzini break;
964b4a405c3SRadim Krčmář if (l < 4 && (iter_pte & EPT_LARGE_PAGE))
965b4a405c3SRadim Krčmář return false;
9668922f1fbSRadim Krčmář if (!(iter_pte & (EPT_PRESENT)))
9678922f1fbSRadim Krčmář return false;
968b4a405c3SRadim Krčmář pt = (unsigned long *)(iter_pte & EPT_ADDR_MASK);
9696884af61SArthur Chunqi Li }
970a969e087SPeter Feiner offset = (guest_addr >> EPT_LEVEL_SHIFT(l)) & EPT_PGDIR_MASK;
971b4a405c3SRadim Krčmář if (pte)
972b4a405c3SRadim Krčmář *pte = pt[offset];
973b4a405c3SRadim Krčmář return true;
9746884af61SArthur Chunqi Li }
9756884af61SArthur Chunqi Li
clear_ept_ad_pte(unsigned long * pml4,unsigned long guest_addr)976521820dbSPaolo Bonzini static void clear_ept_ad_pte(unsigned long *pml4, unsigned long guest_addr)
977521820dbSPaolo Bonzini {
978521820dbSPaolo Bonzini int l;
979521820dbSPaolo Bonzini unsigned long *pt = pml4;
980521820dbSPaolo Bonzini u64 pte;
981521820dbSPaolo Bonzini unsigned offset;
982521820dbSPaolo Bonzini
983521820dbSPaolo Bonzini for (l = EPT_PAGE_LEVEL; ; --l) {
984521820dbSPaolo Bonzini offset = (guest_addr >> EPT_LEVEL_SHIFT(l)) & EPT_PGDIR_MASK;
985521820dbSPaolo Bonzini pt[offset] &= ~(EPT_ACCESS_FLAG|EPT_DIRTY_FLAG);
986521820dbSPaolo Bonzini pte = pt[offset];
987521820dbSPaolo Bonzini if (l == 1 || (l < 4 && (pte & EPT_LARGE_PAGE)))
988521820dbSPaolo Bonzini break;
989521820dbSPaolo Bonzini pt = (unsigned long *)(pte & EPT_ADDR_MASK);
990521820dbSPaolo Bonzini }
991521820dbSPaolo Bonzini }
992521820dbSPaolo Bonzini
993521820dbSPaolo Bonzini /* clear_ept_ad : Clear EPT A/D bits for the page table walk and the
994521820dbSPaolo Bonzini final GPA of a guest address. */
clear_ept_ad(unsigned long * pml4,u64 guest_cr3,unsigned long guest_addr)995521820dbSPaolo Bonzini void clear_ept_ad(unsigned long *pml4, u64 guest_cr3,
996521820dbSPaolo Bonzini unsigned long guest_addr)
997521820dbSPaolo Bonzini {
998521820dbSPaolo Bonzini int l;
999521820dbSPaolo Bonzini unsigned long *pt = (unsigned long *)guest_cr3, gpa;
1000521820dbSPaolo Bonzini u64 pte, offset_in_page;
1001521820dbSPaolo Bonzini unsigned offset;
1002521820dbSPaolo Bonzini
1003521820dbSPaolo Bonzini for (l = EPT_PAGE_LEVEL; ; --l) {
1004521820dbSPaolo Bonzini offset = (guest_addr >> EPT_LEVEL_SHIFT(l)) & EPT_PGDIR_MASK;
1005521820dbSPaolo Bonzini
1006521820dbSPaolo Bonzini clear_ept_ad_pte(pml4, (u64) &pt[offset]);
1007521820dbSPaolo Bonzini pte = pt[offset];
1008521820dbSPaolo Bonzini if (l == 1 || (l < 4 && (pte & PT_PAGE_SIZE_MASK)))
1009521820dbSPaolo Bonzini break;
1010521820dbSPaolo Bonzini if (!(pte & PT_PRESENT_MASK))
1011521820dbSPaolo Bonzini return;
1012521820dbSPaolo Bonzini pt = (unsigned long *)(pte & PT_ADDR_MASK);
1013521820dbSPaolo Bonzini }
1014521820dbSPaolo Bonzini
1015521820dbSPaolo Bonzini offset = (guest_addr >> EPT_LEVEL_SHIFT(l)) & EPT_PGDIR_MASK;
1016521820dbSPaolo Bonzini offset_in_page = guest_addr & ((1 << EPT_LEVEL_SHIFT(l)) - 1);
1017521820dbSPaolo Bonzini gpa = (pt[offset] & PT_ADDR_MASK) | (guest_addr & offset_in_page);
1018521820dbSPaolo Bonzini clear_ept_ad_pte(pml4, gpa);
1019521820dbSPaolo Bonzini }
1020521820dbSPaolo Bonzini
1021521820dbSPaolo Bonzini /* check_ept_ad : Check the content of EPT A/D bits for the page table
1022521820dbSPaolo Bonzini walk and the final GPA of a guest address. */
check_ept_ad(unsigned long * pml4,u64 guest_cr3,unsigned long guest_addr,int expected_gpa_ad,int expected_pt_ad)1023521820dbSPaolo Bonzini void check_ept_ad(unsigned long *pml4, u64 guest_cr3,
1024521820dbSPaolo Bonzini unsigned long guest_addr, int expected_gpa_ad,
1025521820dbSPaolo Bonzini int expected_pt_ad)
1026521820dbSPaolo Bonzini {
1027521820dbSPaolo Bonzini int l;
1028521820dbSPaolo Bonzini unsigned long *pt = (unsigned long *)guest_cr3, gpa;
1029521820dbSPaolo Bonzini u64 ept_pte, pte, offset_in_page;
1030521820dbSPaolo Bonzini unsigned offset;
1031521820dbSPaolo Bonzini bool bad_pt_ad = false;
1032521820dbSPaolo Bonzini
1033521820dbSPaolo Bonzini for (l = EPT_PAGE_LEVEL; ; --l) {
1034521820dbSPaolo Bonzini offset = (guest_addr >> EPT_LEVEL_SHIFT(l)) & EPT_PGDIR_MASK;
1035521820dbSPaolo Bonzini
1036b4a405c3SRadim Krčmář if (!get_ept_pte(pml4, (u64) &pt[offset], 1, &ept_pte)) {
1037b4a405c3SRadim Krčmář printf("EPT - guest level %d page table is not mapped.\n", l);
1038521820dbSPaolo Bonzini return;
1039b4a405c3SRadim Krčmář }
1040521820dbSPaolo Bonzini
1041521820dbSPaolo Bonzini if (!bad_pt_ad) {
1042521820dbSPaolo Bonzini bad_pt_ad |= (ept_pte & (EPT_ACCESS_FLAG|EPT_DIRTY_FLAG)) != expected_pt_ad;
1043521820dbSPaolo Bonzini if (bad_pt_ad)
1044198dfd0eSJanis Schoetterl-Glausch report_fail("EPT - guest level %d page table A=%d/D=%d",
1045a299895bSThomas Huth l,
1046521820dbSPaolo Bonzini !!(expected_pt_ad & EPT_ACCESS_FLAG),
1047521820dbSPaolo Bonzini !!(expected_pt_ad & EPT_DIRTY_FLAG));
1048521820dbSPaolo Bonzini }
1049521820dbSPaolo Bonzini
1050521820dbSPaolo Bonzini pte = pt[offset];
1051521820dbSPaolo Bonzini if (l == 1 || (l < 4 && (pte & PT_PAGE_SIZE_MASK)))
1052521820dbSPaolo Bonzini break;
1053521820dbSPaolo Bonzini if (!(pte & PT_PRESENT_MASK))
1054521820dbSPaolo Bonzini return;
1055521820dbSPaolo Bonzini pt = (unsigned long *)(pte & PT_ADDR_MASK);
1056521820dbSPaolo Bonzini }
1057521820dbSPaolo Bonzini
1058521820dbSPaolo Bonzini if (!bad_pt_ad)
10595c3582f0SJanis Schoetterl-Glausch report_pass("EPT - guest page table structures A=%d/D=%d",
1060521820dbSPaolo Bonzini !!(expected_pt_ad & EPT_ACCESS_FLAG),
1061521820dbSPaolo Bonzini !!(expected_pt_ad & EPT_DIRTY_FLAG));
1062521820dbSPaolo Bonzini
1063521820dbSPaolo Bonzini offset = (guest_addr >> EPT_LEVEL_SHIFT(l)) & EPT_PGDIR_MASK;
1064521820dbSPaolo Bonzini offset_in_page = guest_addr & ((1 << EPT_LEVEL_SHIFT(l)) - 1);
1065521820dbSPaolo Bonzini gpa = (pt[offset] & PT_ADDR_MASK) | (guest_addr & offset_in_page);
1066521820dbSPaolo Bonzini
1067b4a405c3SRadim Krčmář if (!get_ept_pte(pml4, gpa, 1, &ept_pte)) {
1068198dfd0eSJanis Schoetterl-Glausch report_fail("EPT - guest physical address is not mapped");
1069b4a405c3SRadim Krčmář return;
1070b4a405c3SRadim Krčmář }
1071a299895bSThomas Huth report((ept_pte & (EPT_ACCESS_FLAG | EPT_DIRTY_FLAG)) == expected_gpa_ad,
1072a299895bSThomas Huth "EPT - guest physical address A=%d/D=%d",
1073521820dbSPaolo Bonzini !!(expected_gpa_ad & EPT_ACCESS_FLAG),
1074521820dbSPaolo Bonzini !!(expected_gpa_ad & EPT_DIRTY_FLAG));
1075521820dbSPaolo Bonzini }
1076521820dbSPaolo Bonzini
set_ept_pte(unsigned long * pml4,unsigned long guest_addr,int level,u64 pte_val)1077dff740c0SPeter Feiner void set_ept_pte(unsigned long *pml4, unsigned long guest_addr,
10786884af61SArthur Chunqi Li int level, u64 pte_val)
10796884af61SArthur Chunqi Li {
10806884af61SArthur Chunqi Li int l;
10816884af61SArthur Chunqi Li unsigned long *pt = pml4;
10826884af61SArthur Chunqi Li unsigned offset;
10836884af61SArthur Chunqi Li
1084dff740c0SPeter Feiner assert(level >= 1 && level <= 4);
1085dff740c0SPeter Feiner
10862ca6f1f3SPaolo Bonzini for (l = EPT_PAGE_LEVEL; ; --l) {
1087a969e087SPeter Feiner offset = (guest_addr >> EPT_LEVEL_SHIFT(l)) & EPT_PGDIR_MASK;
10882ca6f1f3SPaolo Bonzini if (l == level)
10892ca6f1f3SPaolo Bonzini break;
1090dff740c0SPeter Feiner assert(pt[offset] & EPT_PRESENT);
109100b5c590SPeter Feiner pt = (unsigned long *)(pt[offset] & EPT_ADDR_MASK);
10926884af61SArthur Chunqi Li }
1093a969e087SPeter Feiner offset = (guest_addr >> EPT_LEVEL_SHIFT(l)) & EPT_PGDIR_MASK;
10946884af61SArthur Chunqi Li pt[offset] = pte_val;
10956884af61SArthur Chunqi Li }
10966884af61SArthur Chunqi Li
init_vmcs_ctrl(void)10979d7eaa29SArthur Chunqi Li static void init_vmcs_ctrl(void)
10989d7eaa29SArthur Chunqi Li {
10999d7eaa29SArthur Chunqi Li /* 26.2 CHECKS ON VMX CONTROLS AND HOST-STATE AREA */
11009d7eaa29SArthur Chunqi Li /* 26.2.1.1 */
11019d7eaa29SArthur Chunqi Li vmcs_write(PIN_CONTROLS, ctrl_pin);
11029d7eaa29SArthur Chunqi Li /* Disable VMEXIT of IO instruction */
11039d7eaa29SArthur Chunqi Li vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu[0]);
11049d7eaa29SArthur Chunqi Li if (ctrl_cpu_rev[0].set & CPU_SECONDARY) {
11056884af61SArthur Chunqi Li ctrl_cpu[1] = (ctrl_cpu[1] | ctrl_cpu_rev[1].set) &
11066884af61SArthur Chunqi Li ctrl_cpu_rev[1].clr;
11079d7eaa29SArthur Chunqi Li vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu[1]);
11089d7eaa29SArthur Chunqi Li }
11099d7eaa29SArthur Chunqi Li vmcs_write(CR3_TARGET_COUNT, 0);
11109d7eaa29SArthur Chunqi Li vmcs_write(VPID, ++vpid_cnt);
11119d7eaa29SArthur Chunqi Li }
11129d7eaa29SArthur Chunqi Li
init_vmcs_host(void)11139d7eaa29SArthur Chunqi Li static void init_vmcs_host(void)
11149d7eaa29SArthur Chunqi Li {
11159d7eaa29SArthur Chunqi Li /* 26.2 CHECKS ON VMX CONTROLS AND HOST-STATE AREA */
11169d7eaa29SArthur Chunqi Li /* 26.2.1.2 */
11179d7eaa29SArthur Chunqi Li vmcs_write(HOST_EFER, rdmsr(MSR_EFER));
11189d7eaa29SArthur Chunqi Li
11199d7eaa29SArthur Chunqi Li /* 26.2.1.3 */
11209d7eaa29SArthur Chunqi Li vmcs_write(ENT_CONTROLS, ctrl_enter);
11219d7eaa29SArthur Chunqi Li vmcs_write(EXI_CONTROLS, ctrl_exit);
11229d7eaa29SArthur Chunqi Li
11239d7eaa29SArthur Chunqi Li /* 26.2.2 */
11249d7eaa29SArthur Chunqi Li vmcs_write(HOST_CR0, read_cr0());
11259d7eaa29SArthur Chunqi Li vmcs_write(HOST_CR3, read_cr3());
11269d7eaa29SArthur Chunqi Li vmcs_write(HOST_CR4, read_cr4());
11279d7eaa29SArthur Chunqi Li vmcs_write(HOST_SYSENTER_EIP, (u64)(&entry_sysenter));
112869d8fe0eSPaolo Bonzini vmcs_write(HOST_SYSENTER_CS, KERNEL_CS);
1129*184ee0d5SSean Christopherson if (ctrl_exit_rev.clr & EXI_LOAD_PAT)
1130*184ee0d5SSean Christopherson vmcs_write(HOST_PAT, rdmsr(MSR_IA32_CR_PAT));
11319d7eaa29SArthur Chunqi Li
11329d7eaa29SArthur Chunqi Li /* 26.2.3 */
113369d8fe0eSPaolo Bonzini vmcs_write(HOST_SEL_CS, KERNEL_CS);
113469d8fe0eSPaolo Bonzini vmcs_write(HOST_SEL_SS, KERNEL_DS);
113569d8fe0eSPaolo Bonzini vmcs_write(HOST_SEL_DS, KERNEL_DS);
113669d8fe0eSPaolo Bonzini vmcs_write(HOST_SEL_ES, KERNEL_DS);
113769d8fe0eSPaolo Bonzini vmcs_write(HOST_SEL_FS, KERNEL_DS);
113869d8fe0eSPaolo Bonzini vmcs_write(HOST_SEL_GS, KERNEL_DS);
113969d8fe0eSPaolo Bonzini vmcs_write(HOST_SEL_TR, TSS_MAIN);
1140a7f32d87SPaolo Bonzini vmcs_write(HOST_BASE_TR, get_gdt_entry_base(get_tss_descr()));
11415ed10141SPaolo Bonzini vmcs_write(HOST_BASE_GDTR, gdt_descr.base);
1142337166aaSJan Kiszka vmcs_write(HOST_BASE_IDTR, idt_descr.base);
11439d7eaa29SArthur Chunqi Li vmcs_write(HOST_BASE_FS, 0);
1144624778fdSSean Christopherson vmcs_write(HOST_BASE_GS, rdmsr(MSR_GS_BASE));
11459d7eaa29SArthur Chunqi Li
11469d7eaa29SArthur Chunqi Li /* Set other vmcs area */
11479d7eaa29SArthur Chunqi Li vmcs_write(PF_ERROR_MASK, 0);
11489d7eaa29SArthur Chunqi Li vmcs_write(PF_ERROR_MATCH, 0);
11499d7eaa29SArthur Chunqi Li vmcs_write(VMCS_LINK_PTR, ~0ul);
11509d7eaa29SArthur Chunqi Li vmcs_write(VMCS_LINK_PTR_HI, ~0ul);
11519d7eaa29SArthur Chunqi Li vmcs_write(HOST_RIP, (u64)(&vmx_return));
11529d7eaa29SArthur Chunqi Li }
11539d7eaa29SArthur Chunqi Li
init_vmcs_guest(void)11549d7eaa29SArthur Chunqi Li static void init_vmcs_guest(void)
11559d7eaa29SArthur Chunqi Li {
1156a7f32d87SPaolo Bonzini gdt_entry_t *tss_descr = get_tss_descr();
1157a7f32d87SPaolo Bonzini
11589d7eaa29SArthur Chunqi Li /* 26.3 CHECKING AND LOADING GUEST STATE */
11599d7eaa29SArthur Chunqi Li ulong guest_cr0, guest_cr4, guest_cr3;
11609d7eaa29SArthur Chunqi Li /* 26.3.1.1 */
11619d7eaa29SArthur Chunqi Li guest_cr0 = read_cr0();
11629d7eaa29SArthur Chunqi Li guest_cr4 = read_cr4();
11639d7eaa29SArthur Chunqi Li guest_cr3 = read_cr3();
11649d7eaa29SArthur Chunqi Li if (ctrl_enter & ENT_GUEST_64) {
11659d7eaa29SArthur Chunqi Li guest_cr0 |= X86_CR0_PG;
11669d7eaa29SArthur Chunqi Li guest_cr4 |= X86_CR4_PAE;
11679d7eaa29SArthur Chunqi Li }
11689d7eaa29SArthur Chunqi Li if ((ctrl_enter & ENT_GUEST_64) == 0)
11699d7eaa29SArthur Chunqi Li guest_cr4 &= (~X86_CR4_PCIDE);
11709d7eaa29SArthur Chunqi Li if (guest_cr0 & X86_CR0_PG)
11719d7eaa29SArthur Chunqi Li guest_cr0 |= X86_CR0_PE;
11729d7eaa29SArthur Chunqi Li vmcs_write(GUEST_CR0, guest_cr0);
11739d7eaa29SArthur Chunqi Li vmcs_write(GUEST_CR3, guest_cr3);
11749d7eaa29SArthur Chunqi Li vmcs_write(GUEST_CR4, guest_cr4);
117569d8fe0eSPaolo Bonzini vmcs_write(GUEST_SYSENTER_CS, KERNEL_CS);
1176342db9a1SAaron Lewis vmcs_write(GUEST_SYSENTER_ESP, guest_syscall_stack_top);
11779d7eaa29SArthur Chunqi Li vmcs_write(GUEST_SYSENTER_EIP, (u64)(&entry_sysenter));
11789d7eaa29SArthur Chunqi Li vmcs_write(GUEST_DR7, 0);
11799d7eaa29SArthur Chunqi Li vmcs_write(GUEST_EFER, rdmsr(MSR_EFER));
11809d7eaa29SArthur Chunqi Li
11819d7eaa29SArthur Chunqi Li /* 26.3.1.2 */
118269d8fe0eSPaolo Bonzini vmcs_write(GUEST_SEL_CS, KERNEL_CS);
118369d8fe0eSPaolo Bonzini vmcs_write(GUEST_SEL_SS, KERNEL_DS);
118469d8fe0eSPaolo Bonzini vmcs_write(GUEST_SEL_DS, KERNEL_DS);
118569d8fe0eSPaolo Bonzini vmcs_write(GUEST_SEL_ES, KERNEL_DS);
118669d8fe0eSPaolo Bonzini vmcs_write(GUEST_SEL_FS, KERNEL_DS);
118769d8fe0eSPaolo Bonzini vmcs_write(GUEST_SEL_GS, KERNEL_DS);
118869d8fe0eSPaolo Bonzini vmcs_write(GUEST_SEL_TR, TSS_MAIN);
11899d7eaa29SArthur Chunqi Li vmcs_write(GUEST_SEL_LDTR, 0);
11909d7eaa29SArthur Chunqi Li
11919d7eaa29SArthur Chunqi Li vmcs_write(GUEST_BASE_CS, 0);
11929d7eaa29SArthur Chunqi Li vmcs_write(GUEST_BASE_ES, 0);
11939d7eaa29SArthur Chunqi Li vmcs_write(GUEST_BASE_SS, 0);
11949d7eaa29SArthur Chunqi Li vmcs_write(GUEST_BASE_DS, 0);
11959d7eaa29SArthur Chunqi Li vmcs_write(GUEST_BASE_FS, 0);
1196624778fdSSean Christopherson vmcs_write(GUEST_BASE_GS, rdmsr(MSR_GS_BASE));
1197a7f32d87SPaolo Bonzini vmcs_write(GUEST_BASE_TR, get_gdt_entry_base(tss_descr));
11989d7eaa29SArthur Chunqi Li vmcs_write(GUEST_BASE_LDTR, 0);
11999d7eaa29SArthur Chunqi Li
12009d7eaa29SArthur Chunqi Li vmcs_write(GUEST_LIMIT_CS, 0xFFFFFFFF);
12019d7eaa29SArthur Chunqi Li vmcs_write(GUEST_LIMIT_DS, 0xFFFFFFFF);
12029d7eaa29SArthur Chunqi Li vmcs_write(GUEST_LIMIT_ES, 0xFFFFFFFF);
12039d7eaa29SArthur Chunqi Li vmcs_write(GUEST_LIMIT_SS, 0xFFFFFFFF);
12049d7eaa29SArthur Chunqi Li vmcs_write(GUEST_LIMIT_FS, 0xFFFFFFFF);
12059d7eaa29SArthur Chunqi Li vmcs_write(GUEST_LIMIT_GS, 0xFFFFFFFF);
12069d7eaa29SArthur Chunqi Li vmcs_write(GUEST_LIMIT_LDTR, 0xffff);
1207a7f32d87SPaolo Bonzini vmcs_write(GUEST_LIMIT_TR, get_gdt_entry_limit(tss_descr));
12089d7eaa29SArthur Chunqi Li
12099d7eaa29SArthur Chunqi Li vmcs_write(GUEST_AR_CS, 0xa09b);
12109d7eaa29SArthur Chunqi Li vmcs_write(GUEST_AR_DS, 0xc093);
12119d7eaa29SArthur Chunqi Li vmcs_write(GUEST_AR_ES, 0xc093);
12129d7eaa29SArthur Chunqi Li vmcs_write(GUEST_AR_FS, 0xc093);
12139d7eaa29SArthur Chunqi Li vmcs_write(GUEST_AR_GS, 0xc093);
12149d7eaa29SArthur Chunqi Li vmcs_write(GUEST_AR_SS, 0xc093);
12159d7eaa29SArthur Chunqi Li vmcs_write(GUEST_AR_LDTR, 0x82);
12169d7eaa29SArthur Chunqi Li vmcs_write(GUEST_AR_TR, 0x8b);
12179d7eaa29SArthur Chunqi Li
12189d7eaa29SArthur Chunqi Li /* 26.3.1.3 */
12195ed10141SPaolo Bonzini vmcs_write(GUEST_BASE_GDTR, gdt_descr.base);
1220337166aaSJan Kiszka vmcs_write(GUEST_BASE_IDTR, idt_descr.base);
12215ed10141SPaolo Bonzini vmcs_write(GUEST_LIMIT_GDTR, gdt_descr.limit);
1222337166aaSJan Kiszka vmcs_write(GUEST_LIMIT_IDTR, idt_descr.limit);
12239d7eaa29SArthur Chunqi Li
12249d7eaa29SArthur Chunqi Li /* 26.3.1.4 */
12259d7eaa29SArthur Chunqi Li vmcs_write(GUEST_RIP, (u64)(&guest_entry));
1226342db9a1SAaron Lewis vmcs_write(GUEST_RSP, guest_stack_top);
1227a12e1d61SKrish Sadhukhan vmcs_write(GUEST_RFLAGS, X86_EFLAGS_FIXED);
12289d7eaa29SArthur Chunqi Li
12299d7eaa29SArthur Chunqi Li /* 26.3.1.5 */
123017ba0dd0SJan Kiszka vmcs_write(GUEST_ACTV_STATE, ACTV_ACTIVE);
12319d7eaa29SArthur Chunqi Li vmcs_write(GUEST_INTR_STATE, 0);
12329d7eaa29SArthur Chunqi Li }
12339d7eaa29SArthur Chunqi Li
init_vmcs(struct vmcs ** vmcs)12341c320e18SYadong Qi int init_vmcs(struct vmcs **vmcs)
12359d7eaa29SArthur Chunqi Li {
12369d7eaa29SArthur Chunqi Li *vmcs = alloc_page();
12370903962dSYang Weijiang (*vmcs)->hdr.revision_id = basic_msr.revision;
12389d7eaa29SArthur Chunqi Li /* vmclear first to init vmcs */
12399d7eaa29SArthur Chunqi Li if (vmcs_clear(*vmcs)) {
12409d7eaa29SArthur Chunqi Li printf("%s : vmcs_clear error\n", __func__);
12419d7eaa29SArthur Chunqi Li return 1;
12429d7eaa29SArthur Chunqi Li }
12439d7eaa29SArthur Chunqi Li
12449d7eaa29SArthur Chunqi Li if (make_vmcs_current(*vmcs)) {
12459d7eaa29SArthur Chunqi Li printf("%s : make_vmcs_current error\n", __func__);
12469d7eaa29SArthur Chunqi Li return 1;
12479d7eaa29SArthur Chunqi Li }
12489d7eaa29SArthur Chunqi Li
12499d7eaa29SArthur Chunqi Li /* All settings to pin/exit/enter/cpu
12509d7eaa29SArthur Chunqi Li control fields should be placed here */
12519d7eaa29SArthur Chunqi Li ctrl_pin |= PIN_EXTINT | PIN_NMI | PIN_VIRT_NMI;
1252*184ee0d5SSean Christopherson ctrl_exit = EXI_LOAD_EFER | EXI_HOST_64 | EXI_LOAD_PAT;
12539d7eaa29SArthur Chunqi Li ctrl_enter = (ENT_LOAD_EFER | ENT_GUEST_64);
12549d7eaa29SArthur Chunqi Li /* DIsable IO instruction VMEXIT now */
12559d7eaa29SArthur Chunqi Li ctrl_cpu[0] &= (~(CPU_IO | CPU_IO_BITMAP));
12569d7eaa29SArthur Chunqi Li ctrl_cpu[1] = 0;
12579d7eaa29SArthur Chunqi Li
12589d7eaa29SArthur Chunqi Li ctrl_pin = (ctrl_pin | ctrl_pin_rev.set) & ctrl_pin_rev.clr;
12599d7eaa29SArthur Chunqi Li ctrl_enter = (ctrl_enter | ctrl_enter_rev.set) & ctrl_enter_rev.clr;
12609d7eaa29SArthur Chunqi Li ctrl_exit = (ctrl_exit | ctrl_exit_rev.set) & ctrl_exit_rev.clr;
12619d7eaa29SArthur Chunqi Li ctrl_cpu[0] = (ctrl_cpu[0] | ctrl_cpu_rev[0].set) & ctrl_cpu_rev[0].clr;
12629d7eaa29SArthur Chunqi Li
12639d7eaa29SArthur Chunqi Li init_vmcs_ctrl();
12649d7eaa29SArthur Chunqi Li init_vmcs_host();
12659d7eaa29SArthur Chunqi Li init_vmcs_guest();
12669d7eaa29SArthur Chunqi Li return 0;
12679d7eaa29SArthur Chunqi Li }
12689d7eaa29SArthur Chunqi Li
enable_vmx(void)1269883f3fccSLiran Alon void enable_vmx(void)
1270883f3fccSLiran Alon {
1271883f3fccSLiran Alon bool vmx_enabled =
1272883f3fccSLiran Alon rdmsr(MSR_IA32_FEATURE_CONTROL) &
1273883f3fccSLiran Alon FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
1274883f3fccSLiran Alon
1275883f3fccSLiran Alon if (!vmx_enabled) {
1276883f3fccSLiran Alon wrmsr(MSR_IA32_FEATURE_CONTROL,
1277883f3fccSLiran Alon FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX |
1278883f3fccSLiran Alon FEATURE_CONTROL_LOCKED);
1279883f3fccSLiran Alon }
1280883f3fccSLiran Alon }
1281883f3fccSLiran Alon
init_vmx_caps(void)1282e836e27cSLiran Alon static void init_vmx_caps(void)
12839d7eaa29SArthur Chunqi Li {
12840903962dSYang Weijiang basic_msr.val = rdmsr(MSR_IA32_VMX_BASIC);
12850903962dSYang Weijiang ctrl_pin_rev.val = rdmsr(basic_msr.ctrl ? MSR_IA32_VMX_TRUE_PIN
12869d7eaa29SArthur Chunqi Li : MSR_IA32_VMX_PINBASED_CTLS);
12870903962dSYang Weijiang ctrl_exit_rev.val = rdmsr(basic_msr.ctrl ? MSR_IA32_VMX_TRUE_EXIT
12889d7eaa29SArthur Chunqi Li : MSR_IA32_VMX_EXIT_CTLS);
12890903962dSYang Weijiang ctrl_enter_rev.val = rdmsr(basic_msr.ctrl ? MSR_IA32_VMX_TRUE_ENTRY
12909d7eaa29SArthur Chunqi Li : MSR_IA32_VMX_ENTRY_CTLS);
12910903962dSYang Weijiang ctrl_cpu_rev[0].val = rdmsr(basic_msr.ctrl ? MSR_IA32_VMX_TRUE_PROC
12929d7eaa29SArthur Chunqi Li : MSR_IA32_VMX_PROCBASED_CTLS);
12936884af61SArthur Chunqi Li if ((ctrl_cpu_rev[0].clr & CPU_SECONDARY) != 0)
12949d7eaa29SArthur Chunqi Li ctrl_cpu_rev[1].val = rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2);
12956884af61SArthur Chunqi Li else
12966884af61SArthur Chunqi Li ctrl_cpu_rev[1].val = 0;
12976884af61SArthur Chunqi Li if ((ctrl_cpu_rev[1].clr & (CPU_EPT | CPU_VPID)) != 0)
12989d7eaa29SArthur Chunqi Li ept_vpid.val = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP);
12996884af61SArthur Chunqi Li else
13006884af61SArthur Chunqi Li ept_vpid.val = 0;
1301e836e27cSLiran Alon }
1302e836e27cSLiran Alon
init_vmx(u64 * vmxon_region)13034f18f5deSLiran Alon void init_vmx(u64 *vmxon_region)
1304e836e27cSLiran Alon {
1305e836e27cSLiran Alon ulong fix_cr0_set, fix_cr0_clr;
1306e836e27cSLiran Alon ulong fix_cr4_set, fix_cr4_clr;
1307e836e27cSLiran Alon
1308e836e27cSLiran Alon fix_cr0_set = rdmsr(MSR_IA32_VMX_CR0_FIXED0);
1309e836e27cSLiran Alon fix_cr0_clr = rdmsr(MSR_IA32_VMX_CR0_FIXED1);
1310e836e27cSLiran Alon fix_cr4_set = rdmsr(MSR_IA32_VMX_CR4_FIXED0);
1311e836e27cSLiran Alon fix_cr4_clr = rdmsr(MSR_IA32_VMX_CR4_FIXED1);
1312e836e27cSLiran Alon
13139d7eaa29SArthur Chunqi Li write_cr0((read_cr0() & fix_cr0_clr) | fix_cr0_set);
13149d7eaa29SArthur Chunqi Li write_cr4((read_cr4() & fix_cr4_clr) | fix_cr4_set | X86_CR4_VMXE);
13159d7eaa29SArthur Chunqi Li
13160903962dSYang Weijiang *vmxon_region = basic_msr.revision;
131793f10d6fSLiran Alon }
13189d7eaa29SArthur Chunqi Li
alloc_bsp_vmx_pages(void)131993f10d6fSLiran Alon static void alloc_bsp_vmx_pages(void)
132093f10d6fSLiran Alon {
1321c937d495SLiran Alon bsp_vmxon_region = alloc_page();
1322342db9a1SAaron Lewis guest_stack_top = (uintptr_t)alloc_page() + PAGE_SIZE;
1323342db9a1SAaron Lewis guest_syscall_stack_top = (uintptr_t)alloc_page() + PAGE_SIZE;
132493f10d6fSLiran Alon vmcs_root = alloc_page();
132593f10d6fSLiran Alon }
132693f10d6fSLiran Alon
init_bsp_vmx(void)132793f10d6fSLiran Alon static void init_bsp_vmx(void)
132893f10d6fSLiran Alon {
132993f10d6fSLiran Alon init_vmx_caps();
133093f10d6fSLiran Alon alloc_bsp_vmx_pages();
1331c937d495SLiran Alon init_vmx(bsp_vmxon_region);
13329d7eaa29SArthur Chunqi Li }
13339d7eaa29SArthur Chunqi Li
do_vmxon_off(void * data)1334e3f363c4SJan Kiszka static void do_vmxon_off(void *data)
13359d7eaa29SArthur Chunqi Li {
133634946f9bSSean Christopherson TEST_ASSERT(!vmx_on());
133734946f9bSSean Christopherson TEST_ASSERT(!vmx_off());
133803f37ef2SPaolo Bonzini }
13393b127446SJan Kiszka
do_write_feature_control(void * data)1340e3f363c4SJan Kiszka static void do_write_feature_control(void *data)
13413b127446SJan Kiszka {
13423b127446SJan Kiszka wrmsr(MSR_IA32_FEATURE_CONTROL, 0);
134303f37ef2SPaolo Bonzini }
13443b127446SJan Kiszka
test_vmx_feature_control(void)13453b127446SJan Kiszka static int test_vmx_feature_control(void)
13463b127446SJan Kiszka {
13473b127446SJan Kiszka u64 ia32_feature_control;
13483b127446SJan Kiszka bool vmx_enabled;
13494e38e9dfSLiran Alon bool feature_control_locked;
13503b127446SJan Kiszka
13513b127446SJan Kiszka ia32_feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
13524e38e9dfSLiran Alon vmx_enabled =
13534e38e9dfSLiran Alon ia32_feature_control & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
13544e38e9dfSLiran Alon feature_control_locked =
13554e38e9dfSLiran Alon ia32_feature_control & FEATURE_CONTROL_LOCKED;
13564e38e9dfSLiran Alon
13574e38e9dfSLiran Alon if (vmx_enabled && feature_control_locked) {
13583b127446SJan Kiszka printf("VMX enabled and locked by BIOS\n");
13593b127446SJan Kiszka return 0;
13604e38e9dfSLiran Alon } else if (feature_control_locked) {
13613b127446SJan Kiszka printf("ERROR: VMX locked out by BIOS!?\n");
13623b127446SJan Kiszka return 1;
13633b127446SJan Kiszka }
13643b127446SJan Kiszka
13653b127446SJan Kiszka wrmsr(MSR_IA32_FEATURE_CONTROL, 0);
1366a299895bSThomas Huth report(test_for_exception(GP_VECTOR, &do_vmxon_off, NULL),
1367a299895bSThomas Huth "test vmxon with FEATURE_CONTROL cleared");
13683b127446SJan Kiszka
13694e38e9dfSLiran Alon wrmsr(MSR_IA32_FEATURE_CONTROL, FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX);
1370a299895bSThomas Huth report(test_for_exception(GP_VECTOR, &do_vmxon_off, NULL),
1371a299895bSThomas Huth "test vmxon without FEATURE_CONTROL lock");
13723b127446SJan Kiszka
13734e38e9dfSLiran Alon wrmsr(MSR_IA32_FEATURE_CONTROL,
13744e38e9dfSLiran Alon FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX |
13754e38e9dfSLiran Alon FEATURE_CONTROL_LOCKED);
13764e38e9dfSLiran Alon
13774e38e9dfSLiran Alon ia32_feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
13784e38e9dfSLiran Alon vmx_enabled =
13794e38e9dfSLiran Alon ia32_feature_control & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
1380a299895bSThomas Huth report(vmx_enabled, "test enable VMX in FEATURE_CONTROL");
13813b127446SJan Kiszka
1382a299895bSThomas Huth report(test_for_exception(GP_VECTOR, &do_write_feature_control, NULL),
1383a299895bSThomas Huth "test FEATURE_CONTROL lock bit");
13843b127446SJan Kiszka
13853b127446SJan Kiszka return !vmx_enabled;
13869d7eaa29SArthur Chunqi Li }
13879d7eaa29SArthur Chunqi Li
1388f7b730bcSSean Christopherson
write_cr(int cr_number,unsigned long val)1389f7b730bcSSean Christopherson static void write_cr(int cr_number, unsigned long val)
1390f7b730bcSSean Christopherson {
1391f7b730bcSSean Christopherson if (!cr_number)
1392f7b730bcSSean Christopherson write_cr0(val);
1393f7b730bcSSean Christopherson else
1394f7b730bcSSean Christopherson write_cr4(val);
1395f7b730bcSSean Christopherson }
1396f7b730bcSSean Christopherson
write_cr_safe(int cr_number,unsigned long val)1397f7b730bcSSean Christopherson static int write_cr_safe(int cr_number, unsigned long val)
1398f7b730bcSSean Christopherson {
1399f7b730bcSSean Christopherson if (!cr_number)
1400f7b730bcSSean Christopherson return write_cr0_safe(val);
1401f7b730bcSSean Christopherson else
1402f7b730bcSSean Christopherson return write_cr4_safe(val);
1403f7b730bcSSean Christopherson }
1404f7b730bcSSean Christopherson
test_vmxon_bad_cr(int cr_number,unsigned long orig_cr,unsigned long * flexible_bits)1405f7b730bcSSean Christopherson static int test_vmxon_bad_cr(int cr_number, unsigned long orig_cr,
1406f7b730bcSSean Christopherson unsigned long *flexible_bits)
1407f7b730bcSSean Christopherson {
1408f7b730bcSSean Christopherson unsigned long required1, disallowed1, val, bit;
1409fdf81dabSSean Christopherson int ret, i, expected;
1410f7b730bcSSean Christopherson
1411f7b730bcSSean Christopherson if (!cr_number) {
1412f7b730bcSSean Christopherson required1 = rdmsr(MSR_IA32_VMX_CR0_FIXED0);
1413f7b730bcSSean Christopherson disallowed1 = ~rdmsr(MSR_IA32_VMX_CR0_FIXED1);
1414f7b730bcSSean Christopherson } else {
1415f7b730bcSSean Christopherson required1 = rdmsr(MSR_IA32_VMX_CR4_FIXED0);
1416f7b730bcSSean Christopherson disallowed1 = ~rdmsr(MSR_IA32_VMX_CR4_FIXED1);
1417f7b730bcSSean Christopherson }
1418f7b730bcSSean Christopherson
1419f7b730bcSSean Christopherson *flexible_bits = 0;
1420f7b730bcSSean Christopherson
1421f7b730bcSSean Christopherson for (i = 0; i < BITS_PER_LONG; i++) {
1422f7b730bcSSean Christopherson bit = BIT(i);
1423f7b730bcSSean Christopherson
1424f7b730bcSSean Christopherson /*
1425f7b730bcSSean Christopherson * Don't touch bits that will affect the current paging mode,
1426f7b730bcSSean Christopherson * toggling them will send the test into the weeds before it
1427f7b730bcSSean Christopherson * gets to VMXON. nVMX tests are 64-bit only, so CR4.PAE is
1428f7b730bcSSean Christopherson * guaranteed to be '1', i.e. PSE is fair game. PKU/PKS are
1429f7b730bcSSean Christopherson * also fair game as KVM doesn't configure any keys. SMAP and
1430f7b730bcSSean Christopherson * SMEP are off limits because the page tables have the USER
1431f7b730bcSSean Christopherson * bit set at all levels.
1432f7b730bcSSean Christopherson */
1433f7b730bcSSean Christopherson if ((cr_number == 0 && (bit == X86_CR0_PE || bit == X86_CR0_PG)) ||
14348696e6c8SPaolo Bonzini (cr_number == 4 && (bit == X86_CR4_PAE || bit == X86_CR4_SMAP ||
1435b518eb13SYang Weijiang bit == X86_CR4_SMEP || bit == X86_CR4_CET)))
1436f7b730bcSSean Christopherson continue;
1437f7b730bcSSean Christopherson
1438f7b730bcSSean Christopherson if (!(bit & required1) && !(bit & disallowed1)) {
1439f7b730bcSSean Christopherson if (!write_cr_safe(cr_number, orig_cr ^ bit)) {
1440f7b730bcSSean Christopherson *flexible_bits |= bit;
1441f7b730bcSSean Christopherson write_cr(cr_number, orig_cr);
1442f7b730bcSSean Christopherson }
1443f7b730bcSSean Christopherson continue;
1444f7b730bcSSean Christopherson }
1445f7b730bcSSean Christopherson
1446f7b730bcSSean Christopherson assert(!(required1 & disallowed1));
1447f7b730bcSSean Christopherson
1448f7b730bcSSean Christopherson if (required1 & bit)
1449f7b730bcSSean Christopherson val = orig_cr & ~bit;
1450f7b730bcSSean Christopherson else
1451f7b730bcSSean Christopherson val = orig_cr | bit;
1452f7b730bcSSean Christopherson
1453f7b730bcSSean Christopherson if (write_cr_safe(cr_number, val))
1454f7b730bcSSean Christopherson continue;
1455f7b730bcSSean Christopherson
1456fdf81dabSSean Christopherson /*
1457fdf81dabSSean Christopherson * CR0.PE==0 and CR4.VMXE==0 result in #UD, all other invalid
1458fdf81dabSSean Christopherson * CR0/CR4 bits result in #GP. Include CR0.PE even though it's
1459fdf81dabSSean Christopherson * dead code (see above) for completeness.
1460fdf81dabSSean Christopherson */
1461fdf81dabSSean Christopherson if ((cr_number == 0 && bit == X86_CR0_PE) ||
1462fdf81dabSSean Christopherson (cr_number == 4 && bit == X86_CR4_VMXE))
1463fdf81dabSSean Christopherson expected = UD_VECTOR;
1464fdf81dabSSean Christopherson else
1465fdf81dabSSean Christopherson expected = GP_VECTOR;
1466fdf81dabSSean Christopherson
1467f7b730bcSSean Christopherson ret = vmx_on();
1468fdf81dabSSean Christopherson report(ret == expected,
1469fdf81dabSSean Christopherson "VMXON with CR%d bit %d %s should %s, got '%d'",
1470fdf81dabSSean Christopherson cr_number, i, (required1 & bit) ? "cleared" : "set",
1471fdf81dabSSean Christopherson expected == UD_VECTOR ? "UD" : "#GP", ret);
1472f7b730bcSSean Christopherson
1473f7b730bcSSean Christopherson write_cr(cr_number, orig_cr);
1474f7b730bcSSean Christopherson
1475f7b730bcSSean Christopherson if (ret <= 0)
1476f7b730bcSSean Christopherson return 1;
1477f7b730bcSSean Christopherson }
1478f7b730bcSSean Christopherson return 0;
1479f7b730bcSSean Christopherson }
1480f7b730bcSSean Christopherson
test_vmxon(void)14819d7eaa29SArthur Chunqi Li static int test_vmxon(void)
14829d7eaa29SArthur Chunqi Li {
1483f7b730bcSSean Christopherson unsigned long orig_cr0, flexible_cr0, orig_cr4, flexible_cr4;
1484e2cf1c9dSEduardo Habkost int width = cpuid_maxphyaddr();
1485f7b730bcSSean Christopherson u64 *vmxon_region;
14864dbaa4e6SSean Christopherson int ret;
14879d7eaa29SArthur Chunqi Li
1488f7b730bcSSean Christopherson orig_cr0 = read_cr0();
1489f7b730bcSSean Christopherson if (test_vmxon_bad_cr(0, orig_cr0, &flexible_cr0))
1490f7b730bcSSean Christopherson return 1;
1491f7b730bcSSean Christopherson
1492f7b730bcSSean Christopherson orig_cr4 = read_cr4();
1493f7b730bcSSean Christopherson if (test_vmxon_bad_cr(4, orig_cr4, &flexible_cr4))
1494f7b730bcSSean Christopherson return 1;
1495f7b730bcSSean Christopherson
1496ce21d809SBandan Das /* Unaligned page access */
1497c937d495SLiran Alon vmxon_region = (u64 *)((intptr_t)bsp_vmxon_region + 1);
14984dbaa4e6SSean Christopherson ret = __vmxon_safe(vmxon_region);
14994dbaa4e6SSean Christopherson report(ret < 0, "test vmxon with unaligned vmxon region");
15004dbaa4e6SSean Christopherson if (ret >= 0)
15014dbaa4e6SSean Christopherson return 1;
1502ce21d809SBandan Das
1503ce21d809SBandan Das /* gpa bits beyond physical address width are set*/
1504c937d495SLiran Alon vmxon_region = (u64 *)((intptr_t)bsp_vmxon_region | ((u64)1 << (width+1)));
15054dbaa4e6SSean Christopherson ret = __vmxon_safe(vmxon_region);
15064dbaa4e6SSean Christopherson report(ret < 0, "test vmxon with bits set beyond physical address width");
15074dbaa4e6SSean Christopherson if (ret >= 0)
15084dbaa4e6SSean Christopherson return 1;
1509ce21d809SBandan Das
1510912c0d72SThomas Huth /* invalid revision identifier */
1511c937d495SLiran Alon *bsp_vmxon_region = 0xba9da9;
15124dbaa4e6SSean Christopherson ret = vmxon_safe();
15134dbaa4e6SSean Christopherson report(ret < 0, "test vmxon with invalid revision identifier");
15144dbaa4e6SSean Christopherson if (ret >= 0)
15154dbaa4e6SSean Christopherson return 1;
1516ce21d809SBandan Das
1517f7b730bcSSean Christopherson /* and finally a valid region, with valid-but-tweaked cr0/cr4 */
1518f7b730bcSSean Christopherson write_cr0(orig_cr0 ^ flexible_cr0);
1519f7b730bcSSean Christopherson write_cr4(orig_cr4 ^ flexible_cr4);
15200903962dSYang Weijiang *bsp_vmxon_region = basic_msr.revision;
15212171b69bSSean Christopherson ret = vmxon_safe();
1522a299895bSThomas Huth report(!ret, "test vmxon with valid vmxon region");
1523f7b730bcSSean Christopherson write_cr0(orig_cr0);
1524f7b730bcSSean Christopherson write_cr4(orig_cr4);
15259d7eaa29SArthur Chunqi Li return ret;
15269d7eaa29SArthur Chunqi Li }
15279d7eaa29SArthur Chunqi Li
test_vmptrld(void)15289d7eaa29SArthur Chunqi Li static void test_vmptrld(void)
15299d7eaa29SArthur Chunqi Li {
1530daeec979SBandan Das struct vmcs *vmcs, *tmp_root;
1531e2cf1c9dSEduardo Habkost int width = cpuid_maxphyaddr();
15329d7eaa29SArthur Chunqi Li
15339d7eaa29SArthur Chunqi Li vmcs = alloc_page();
15340903962dSYang Weijiang vmcs->hdr.revision_id = basic_msr.revision;
1535daeec979SBandan Das
1536daeec979SBandan Das /* Unaligned page access */
1537daeec979SBandan Das tmp_root = (struct vmcs *)((intptr_t)vmcs + 1);
1538a299895bSThomas Huth report(make_vmcs_current(tmp_root) == 1,
1539a299895bSThomas Huth "test vmptrld with unaligned vmcs");
1540daeec979SBandan Das
1541daeec979SBandan Das /* gpa bits beyond physical address width are set*/
1542daeec979SBandan Das tmp_root = (struct vmcs *)((intptr_t)vmcs |
1543daeec979SBandan Das ((u64)1 << (width+1)));
1544a299895bSThomas Huth report(make_vmcs_current(tmp_root) == 1,
1545a299895bSThomas Huth "test vmptrld with vmcs address bits set beyond physical address width");
1546daeec979SBandan Das
1547daeec979SBandan Das /* Pass VMXON region */
15481c90aec0SJim Mattson assert(!vmcs_clear(vmcs));
15491c90aec0SJim Mattson assert(!make_vmcs_current(vmcs));
1550c937d495SLiran Alon tmp_root = (struct vmcs *)bsp_vmxon_region;
1551a299895bSThomas Huth report(make_vmcs_current(tmp_root) == 1,
1552a299895bSThomas Huth "test vmptrld with vmxon region");
1553a299895bSThomas Huth report(vmcs_read(VMX_INST_ERROR) == VMXERR_VMPTRLD_VMXON_POINTER,
1554a299895bSThomas Huth "test vmptrld with vmxon region vm-instruction error");
1555daeec979SBandan Das
1556a299895bSThomas Huth report(make_vmcs_current(vmcs) == 0,
1557a299895bSThomas Huth "test vmptrld with valid vmcs region");
15589d7eaa29SArthur Chunqi Li }
15599d7eaa29SArthur Chunqi Li
test_vmptrst(void)15609d7eaa29SArthur Chunqi Li static void test_vmptrst(void)
15619d7eaa29SArthur Chunqi Li {
15629d7eaa29SArthur Chunqi Li int ret;
15639d7eaa29SArthur Chunqi Li struct vmcs *vmcs1, *vmcs2;
15649d7eaa29SArthur Chunqi Li
15659d7eaa29SArthur Chunqi Li vmcs1 = alloc_page();
15669d7eaa29SArthur Chunqi Li init_vmcs(&vmcs1);
15679d7eaa29SArthur Chunqi Li ret = vmcs_save(&vmcs2);
1568a299895bSThomas Huth report((!ret) && (vmcs1 == vmcs2), "test vmptrst");
15699d7eaa29SArthur Chunqi Li }
15709d7eaa29SArthur Chunqi Li
157169c8d31cSJan Kiszka struct vmx_ctl_msr {
157269c8d31cSJan Kiszka const char *name;
157369c8d31cSJan Kiszka u32 index, true_index;
157469c8d31cSJan Kiszka u32 default1;
157569c8d31cSJan Kiszka } vmx_ctl_msr[] = {
157669c8d31cSJan Kiszka { "MSR_IA32_VMX_PINBASED_CTLS", MSR_IA32_VMX_PINBASED_CTLS,
157769c8d31cSJan Kiszka MSR_IA32_VMX_TRUE_PIN, 0x16 },
157869c8d31cSJan Kiszka { "MSR_IA32_VMX_PROCBASED_CTLS", MSR_IA32_VMX_PROCBASED_CTLS,
157969c8d31cSJan Kiszka MSR_IA32_VMX_TRUE_PROC, 0x401e172 },
158069c8d31cSJan Kiszka { "MSR_IA32_VMX_PROCBASED_CTLS2", MSR_IA32_VMX_PROCBASED_CTLS2,
158169c8d31cSJan Kiszka MSR_IA32_VMX_PROCBASED_CTLS2, 0 },
158269c8d31cSJan Kiszka { "MSR_IA32_VMX_EXIT_CTLS", MSR_IA32_VMX_EXIT_CTLS,
158369c8d31cSJan Kiszka MSR_IA32_VMX_TRUE_EXIT, 0x36dff },
158469c8d31cSJan Kiszka { "MSR_IA32_VMX_ENTRY_CTLS", MSR_IA32_VMX_ENTRY_CTLS,
158569c8d31cSJan Kiszka MSR_IA32_VMX_TRUE_ENTRY, 0x11ff },
158669c8d31cSJan Kiszka };
158769c8d31cSJan Kiszka
test_vmx_caps(void)158869c8d31cSJan Kiszka static void test_vmx_caps(void)
158969c8d31cSJan Kiszka {
159069c8d31cSJan Kiszka u64 val, default1, fixed0, fixed1;
159169c8d31cSJan Kiszka union vmx_ctrl_msr ctrl, true_ctrl;
159269c8d31cSJan Kiszka unsigned int n;
159369c8d31cSJan Kiszka bool ok;
159469c8d31cSJan Kiszka
159569c8d31cSJan Kiszka printf("\nTest suite: VMX capability reporting\n");
159669c8d31cSJan Kiszka
15970903962dSYang Weijiang report((basic_msr.revision & (1ul << 31)) == 0 &&
15980903962dSYang Weijiang basic_msr.size > 0 && basic_msr.size <= 4096 &&
15990903962dSYang Weijiang (basic_msr.type == 0 || basic_msr.type == 6) &&
16000903962dSYang Weijiang basic_msr.reserved1 == 0 && basic_msr.reserved2 == 0,
1601a299895bSThomas Huth "MSR_IA32_VMX_BASIC");
160269c8d31cSJan Kiszka
160369c8d31cSJan Kiszka val = rdmsr(MSR_IA32_VMX_MISC);
1604a299895bSThomas Huth report((!(ctrl_cpu_rev[1].clr & CPU_URG) || val & (1ul << 5)) &&
160569c8d31cSJan Kiszka ((val >> 16) & 0x1ff) <= 256 &&
1606a299895bSThomas Huth (val & 0x80007e00) == 0,
1607a299895bSThomas Huth "MSR_IA32_VMX_MISC");
160869c8d31cSJan Kiszka
160969c8d31cSJan Kiszka for (n = 0; n < ARRAY_SIZE(vmx_ctl_msr); n++) {
161069c8d31cSJan Kiszka ctrl.val = rdmsr(vmx_ctl_msr[n].index);
161169c8d31cSJan Kiszka default1 = vmx_ctl_msr[n].default1;
161269c8d31cSJan Kiszka ok = (ctrl.set & default1) == default1;
161369c8d31cSJan Kiszka ok = ok && (ctrl.set & ~ctrl.clr) == 0;
16140903962dSYang Weijiang if (ok && basic_msr.ctrl) {
161569c8d31cSJan Kiszka true_ctrl.val = rdmsr(vmx_ctl_msr[n].true_index);
161669c8d31cSJan Kiszka ok = ctrl.clr == true_ctrl.clr;
161769c8d31cSJan Kiszka ok = ok && ctrl.set == (true_ctrl.set | default1);
161869c8d31cSJan Kiszka }
1619a299895bSThomas Huth report(ok, "%s", vmx_ctl_msr[n].name);
162069c8d31cSJan Kiszka }
162169c8d31cSJan Kiszka
162269c8d31cSJan Kiszka fixed0 = rdmsr(MSR_IA32_VMX_CR0_FIXED0);
162369c8d31cSJan Kiszka fixed1 = rdmsr(MSR_IA32_VMX_CR0_FIXED1);
1624a299895bSThomas Huth report(((fixed0 ^ fixed1) & ~fixed1) == 0,
1625a299895bSThomas Huth "MSR_IA32_VMX_IA32_VMX_CR0_FIXED0/1");
162669c8d31cSJan Kiszka
162769c8d31cSJan Kiszka fixed0 = rdmsr(MSR_IA32_VMX_CR4_FIXED0);
162869c8d31cSJan Kiszka fixed1 = rdmsr(MSR_IA32_VMX_CR4_FIXED1);
1629a299895bSThomas Huth report(((fixed0 ^ fixed1) & ~fixed1) == 0,
1630a299895bSThomas Huth "MSR_IA32_VMX_IA32_VMX_CR4_FIXED0/1");
163169c8d31cSJan Kiszka
163269c8d31cSJan Kiszka val = rdmsr(MSR_IA32_VMX_VMCS_ENUM);
1633a299895bSThomas Huth report((val & VMCS_FIELD_INDEX_MASK) >= 0x2a &&
1634a299895bSThomas Huth (val & 0xfffffffffffffc01Ull) == 0,
1635a299895bSThomas Huth "MSR_IA32_VMX_VMCS_ENUM");
163669c8d31cSJan Kiszka
1637592cb377SSean Christopherson fixed0 = -1ull;
1638c08f83c9SSean Christopherson fixed0 &= ~(EPT_CAP_EXEC_ONLY |
1639592cb377SSean Christopherson EPT_CAP_PWL4 |
1640a434c431SSean Christopherson EPT_CAP_PWL5 |
1641592cb377SSean Christopherson EPT_CAP_UC |
1642592cb377SSean Christopherson EPT_CAP_WB |
1643592cb377SSean Christopherson EPT_CAP_2M_PAGE |
1644592cb377SSean Christopherson EPT_CAP_1G_PAGE |
1645592cb377SSean Christopherson EPT_CAP_INVEPT |
1646592cb377SSean Christopherson EPT_CAP_AD_FLAG |
1647592cb377SSean Christopherson EPT_CAP_ADV_EPT_INFO |
1648592cb377SSean Christopherson EPT_CAP_INVEPT_SINGLE |
1649592cb377SSean Christopherson EPT_CAP_INVEPT_ALL |
1650592cb377SSean Christopherson VPID_CAP_INVVPID |
1651592cb377SSean Christopherson VPID_CAP_INVVPID_ADDR |
1652592cb377SSean Christopherson VPID_CAP_INVVPID_CXTGLB |
1653592cb377SSean Christopherson VPID_CAP_INVVPID_ALL |
1654592cb377SSean Christopherson VPID_CAP_INVVPID_CXTLOC);
1655592cb377SSean Christopherson
165669c8d31cSJan Kiszka val = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP);
1657592cb377SSean Christopherson report((val & fixed0) == 0,
1658a299895bSThomas Huth "MSR_IA32_VMX_EPT_VPID_CAP");
165969c8d31cSJan Kiszka }
166069c8d31cSJan Kiszka
16619d7eaa29SArthur Chunqi Li /* This function can only be called in guest */
hypercall(u32 hypercall_no)1662f441716dSKrish Sadhukhan void __attribute__((__used__)) hypercall(u32 hypercall_no)
16639d7eaa29SArthur Chunqi Li {
16649d7eaa29SArthur Chunqi Li u64 val = 0;
16659d7eaa29SArthur Chunqi Li val = (hypercall_no & HYPERCALL_MASK) | HYPERCALL_BIT;
16669d7eaa29SArthur Chunqi Li hypercall_field = val;
16679d7eaa29SArthur Chunqi Li asm volatile("vmcall\n\t");
16689d7eaa29SArthur Chunqi Li }
16699d7eaa29SArthur Chunqi Li
is_hypercall(union exit_reason exit_reason)167038e505ddSSean Christopherson static bool is_hypercall(union exit_reason exit_reason)
16719d7eaa29SArthur Chunqi Li {
167238e505ddSSean Christopherson return exit_reason.basic == VMX_VMCALL &&
167338e505ddSSean Christopherson (hypercall_field & HYPERCALL_BIT);
16749d7eaa29SArthur Chunqi Li }
16759d7eaa29SArthur Chunqi Li
handle_hypercall(void)16767db17e21SThomas Huth static int handle_hypercall(void)
16779d7eaa29SArthur Chunqi Li {
16789d7eaa29SArthur Chunqi Li ulong hypercall_no;
16799d7eaa29SArthur Chunqi Li
16809d7eaa29SArthur Chunqi Li hypercall_no = hypercall_field & HYPERCALL_MASK;
16819d7eaa29SArthur Chunqi Li hypercall_field = 0;
16829d7eaa29SArthur Chunqi Li switch (hypercall_no) {
16839d7eaa29SArthur Chunqi Li case HYPERCALL_VMEXIT:
16849d7eaa29SArthur Chunqi Li return VMX_TEST_VMEXIT;
1685794c67a9SPeter Feiner case HYPERCALL_VMABORT:
1686794c67a9SPeter Feiner return VMX_TEST_VMABORT;
1687794c67a9SPeter Feiner case HYPERCALL_VMSKIP:
1688794c67a9SPeter Feiner return VMX_TEST_VMSKIP;
16899d7eaa29SArthur Chunqi Li default:
1690b006d7ebSAndrew Jones printf("ERROR : Invalid hypercall number : %ld\n", hypercall_no);
16919d7eaa29SArthur Chunqi Li }
16929d7eaa29SArthur Chunqi Li return VMX_TEST_EXIT;
16939d7eaa29SArthur Chunqi Li }
16949d7eaa29SArthur Chunqi Li
continue_abort(void)1695794c67a9SPeter Feiner static void continue_abort(void)
1696794c67a9SPeter Feiner {
1697794c67a9SPeter Feiner assert(!in_guest);
1698794c67a9SPeter Feiner printf("Host was here when guest aborted:\n");
1699794c67a9SPeter Feiner dump_stack();
1700794c67a9SPeter Feiner longjmp(abort_target, 1);
1701794c67a9SPeter Feiner abort();
1702794c67a9SPeter Feiner }
1703794c67a9SPeter Feiner
__abort_test(void)1704794c67a9SPeter Feiner void __abort_test(void)
1705794c67a9SPeter Feiner {
1706794c67a9SPeter Feiner if (in_guest)
1707794c67a9SPeter Feiner hypercall(HYPERCALL_VMABORT);
1708794c67a9SPeter Feiner else
1709794c67a9SPeter Feiner longjmp(abort_target, 1);
1710794c67a9SPeter Feiner abort();
1711794c67a9SPeter Feiner }
1712794c67a9SPeter Feiner
continue_skip(void)1713794c67a9SPeter Feiner static void continue_skip(void)
1714794c67a9SPeter Feiner {
1715794c67a9SPeter Feiner assert(!in_guest);
1716794c67a9SPeter Feiner longjmp(abort_target, 1);
1717794c67a9SPeter Feiner abort();
1718794c67a9SPeter Feiner }
1719794c67a9SPeter Feiner
test_skip(const char * msg)1720794c67a9SPeter Feiner void test_skip(const char *msg)
1721794c67a9SPeter Feiner {
1722794c67a9SPeter Feiner printf("%s skipping test: %s\n", in_guest ? "Guest" : "Host", msg);
1723794c67a9SPeter Feiner if (in_guest)
1724794c67a9SPeter Feiner hypercall(HYPERCALL_VMABORT);
1725794c67a9SPeter Feiner else
1726794c67a9SPeter Feiner longjmp(abort_target, 1);
1727794c67a9SPeter Feiner abort();
1728794c67a9SPeter Feiner }
1729794c67a9SPeter Feiner
exit_handler(union exit_reason exit_reason)1730e0e2af90SSean Christopherson static int exit_handler(union exit_reason exit_reason)
17319d7eaa29SArthur Chunqi Li {
17329d7eaa29SArthur Chunqi Li int ret;
17339d7eaa29SArthur Chunqi Li
17349d7eaa29SArthur Chunqi Li current->exits++;
17351d9284d0SArthur Chunqi Li regs.rflags = vmcs_read(GUEST_RFLAGS);
173638e505ddSSean Christopherson if (is_hypercall(exit_reason))
17379d7eaa29SArthur Chunqi Li ret = handle_hypercall();
17389d7eaa29SArthur Chunqi Li else
1739e0e2af90SSean Christopherson ret = current->exit_handler(exit_reason);
17401d9284d0SArthur Chunqi Li vmcs_write(GUEST_RFLAGS, regs.rflags);
17413b50efe3SPeter Feiner
17429d7eaa29SArthur Chunqi Li return ret;
17439d7eaa29SArthur Chunqi Li }
17443b50efe3SPeter Feiner
17453b50efe3SPeter Feiner /*
17460e0ea94bSSean Christopherson * Tries to enter the guest, populates @result with VM-Fail, VM-Exit, entered,
17470e0ea94bSSean Christopherson * etc...
1748c76ddf06SPeter Feiner */
vmx_enter_guest(struct vmentry_result * result)1749273abd51SBill Wendling static noinline void vmx_enter_guest(struct vmentry_result *result)
17509d7eaa29SArthur Chunqi Li {
17510e0ea94bSSean Christopherson memset(result, 0, sizeof(*result));
17524e809db5SPeter Feiner
1753794c67a9SPeter Feiner in_guest = 1;
17549d7eaa29SArthur Chunqi Li asm volatile (
1755897d8365SPeter Feiner "mov %[HOST_RSP], %%rdi\n\t"
1756897d8365SPeter Feiner "vmwrite %%rsp, %%rdi\n\t"
17579d7eaa29SArthur Chunqi Li LOAD_GPR_C
175844417388SPaolo Bonzini "cmpb $0, %[launched]\n\t"
17599d7eaa29SArthur Chunqi Li "jne 1f\n\t"
17609d7eaa29SArthur Chunqi Li "vmlaunch\n\t"
17619d7eaa29SArthur Chunqi Li "jmp 2f\n\t"
17629d7eaa29SArthur Chunqi Li "1: "
17639d7eaa29SArthur Chunqi Li "vmresume\n\t"
17649d7eaa29SArthur Chunqi Li "2: "
1765f37cf4e2SPeter Feiner SAVE_GPR_C
1766897d8365SPeter Feiner "pushf\n\t"
1767897d8365SPeter Feiner "pop %%rdi\n\t"
17680e0ea94bSSean Christopherson "mov %%rdi, %[vm_fail_flags]\n\t"
17690e0ea94bSSean Christopherson "movl $1, %[vm_fail]\n\t"
1770f37cf4e2SPeter Feiner "jmp 3f\n\t"
17719d7eaa29SArthur Chunqi Li "vmx_return:\n\t"
17729d7eaa29SArthur Chunqi Li SAVE_GPR_C
1773f37cf4e2SPeter Feiner "3: \n\t"
17740e0ea94bSSean Christopherson : [vm_fail]"+m"(result->vm_fail),
17750e0ea94bSSean Christopherson [vm_fail_flags]"=m"(result->flags)
1776897d8365SPeter Feiner : [launched]"m"(launched), [HOST_RSP]"i"(HOST_RSP)
1777897d8365SPeter Feiner : "rdi", "memory", "cc"
17789d7eaa29SArthur Chunqi Li );
1779794c67a9SPeter Feiner in_guest = 0;
17803b50efe3SPeter Feiner
17810e0ea94bSSean Christopherson result->vmlaunch = !launched;
17820e0ea94bSSean Christopherson result->instr = launched ? "vmresume" : "vmlaunch";
17830e0ea94bSSean Christopherson result->exit_reason.full = result->vm_fail ? 0xdead :
17840e0ea94bSSean Christopherson vmcs_read(EXI_REASON);
17850e0ea94bSSean Christopherson result->entered = !result->vm_fail &&
17860e0ea94bSSean Christopherson !result->exit_reason.failed_vmentry;
1787c76ddf06SPeter Feiner }
1788c76ddf06SPeter Feiner
vmx_run(void)17897db17e21SThomas Huth static int vmx_run(void)
1790c76ddf06SPeter Feiner {
17910e0ea94bSSean Christopherson struct vmentry_result result;
1792c76ddf06SPeter Feiner u32 ret;
1793c76ddf06SPeter Feiner
17940e0ea94bSSean Christopherson while (1) {
17950e0ea94bSSean Christopherson vmx_enter_guest(&result);
17960e0ea94bSSean Christopherson if (result.entered) {
17973b50efe3SPeter Feiner /*
17983b50efe3SPeter Feiner * VMCS isn't in "launched" state if there's been any
17993b50efe3SPeter Feiner * entry failure (early or otherwise).
18003b50efe3SPeter Feiner */
18019d7eaa29SArthur Chunqi Li launched = 1;
1802e0e2af90SSean Christopherson ret = exit_handler(result.exit_reason);
1803db6f75d8SSean Christopherson } else if (current->entry_failure_handler) {
18040e0ea94bSSean Christopherson ret = current->entry_failure_handler(&result);
18053b50efe3SPeter Feiner } else {
1806db6f75d8SSean Christopherson ret = VMX_TEST_EXIT;
18079d7eaa29SArthur Chunqi Li }
18083b50efe3SPeter Feiner
18099d7eaa29SArthur Chunqi Li switch (ret) {
18103b50efe3SPeter Feiner case VMX_TEST_RESUME:
18113b50efe3SPeter Feiner continue;
18129d7eaa29SArthur Chunqi Li case VMX_TEST_VMEXIT:
1813794c67a9SPeter Feiner guest_finished = 1;
18149d7eaa29SArthur Chunqi Li return 0;
18153b50efe3SPeter Feiner case VMX_TEST_EXIT:
18169d7eaa29SArthur Chunqi Li break;
18179d7eaa29SArthur Chunqi Li default:
18183b50efe3SPeter Feiner printf("ERROR : Invalid %s_handler return val %d.\n",
18190e0ea94bSSean Christopherson result.entered ? "exit" : "entry_failure",
18203b50efe3SPeter Feiner ret);
18219d7eaa29SArthur Chunqi Li break;
18229d7eaa29SArthur Chunqi Li }
18233b50efe3SPeter Feiner
18240e0ea94bSSean Christopherson if (result.entered)
1825ef5d77a0SSean Christopherson print_vmexit_info(result.exit_reason);
18263b50efe3SPeter Feiner else
18270e0ea94bSSean Christopherson print_vmentry_failure_info(&result);
18283b50efe3SPeter Feiner abort();
18293b50efe3SPeter Feiner }
18309d7eaa29SArthur Chunqi Li }
18319d7eaa29SArthur Chunqi Li
run_teardown_step(struct test_teardown_step * step)1832794c67a9SPeter Feiner static void run_teardown_step(struct test_teardown_step *step)
1833794c67a9SPeter Feiner {
1834794c67a9SPeter Feiner step->func(step->data);
1835794c67a9SPeter Feiner }
1836794c67a9SPeter Feiner
test_run(struct vmx_test * test)18379d7eaa29SArthur Chunqi Li static int test_run(struct vmx_test *test)
18389d7eaa29SArthur Chunqi Li {
1839794c67a9SPeter Feiner int r;
1840794c67a9SPeter Feiner
1841794c67a9SPeter Feiner /* Validate V2 interface. */
1842794c67a9SPeter Feiner if (test->v2) {
1843794c67a9SPeter Feiner int ret = 0;
1844794c67a9SPeter Feiner if (test->init || test->guest_main || test->exit_handler ||
1845794c67a9SPeter Feiner test->syscall_handler) {
1846198dfd0eSJanis Schoetterl-Glausch report_fail("V2 test cannot specify V1 callbacks.");
1847794c67a9SPeter Feiner ret = 1;
1848794c67a9SPeter Feiner }
1849794c67a9SPeter Feiner if (ret)
1850794c67a9SPeter Feiner return ret;
1851794c67a9SPeter Feiner }
1852794c67a9SPeter Feiner
18539d7eaa29SArthur Chunqi Li if (test->name == NULL)
18549d7eaa29SArthur Chunqi Li test->name = "(no name)";
18559d7eaa29SArthur Chunqi Li if (vmx_on()) {
18569d7eaa29SArthur Chunqi Li printf("%s : vmxon failed.\n", __func__);
18579d7eaa29SArthur Chunqi Li return 1;
18589d7eaa29SArthur Chunqi Li }
1859794c67a9SPeter Feiner
18609d7eaa29SArthur Chunqi Li init_vmcs(&(test->vmcs));
18619d7eaa29SArthur Chunqi Li /* Directly call test->init is ok here, init_vmcs has done
18629d7eaa29SArthur Chunqi Li vmcs init, vmclear and vmptrld*/
1863c592c151SJan Kiszka if (test->init && test->init(test->vmcs) != VMX_TEST_START)
1864a0e30e71SPaolo Bonzini goto out;
1865794c67a9SPeter Feiner teardown_count = 0;
1866794c67a9SPeter Feiner v2_guest_main = NULL;
18679d7eaa29SArthur Chunqi Li test->exits = 0;
18689d7eaa29SArthur Chunqi Li current = test;
18699d7eaa29SArthur Chunqi Li regs = test->guest_regs;
1870a12e1d61SKrish Sadhukhan vmcs_write(GUEST_RFLAGS, regs.rflags | X86_EFLAGS_FIXED);
18719d7eaa29SArthur Chunqi Li launched = 0;
1872794c67a9SPeter Feiner guest_finished = 0;
18739d7eaa29SArthur Chunqi Li printf("\nTest suite: %s\n", test->name);
1874794c67a9SPeter Feiner
1875794c67a9SPeter Feiner r = setjmp(abort_target);
1876794c67a9SPeter Feiner if (r) {
1877794c67a9SPeter Feiner assert(!in_guest);
1878794c67a9SPeter Feiner goto out;
1879794c67a9SPeter Feiner }
1880794c67a9SPeter Feiner
1881794c67a9SPeter Feiner
1882794c67a9SPeter Feiner if (test->v2)
1883794c67a9SPeter Feiner test->v2();
1884794c67a9SPeter Feiner else
18859d7eaa29SArthur Chunqi Li vmx_run();
1886794c67a9SPeter Feiner
1887794c67a9SPeter Feiner while (teardown_count > 0)
1888794c67a9SPeter Feiner run_teardown_step(&teardown_steps[--teardown_count]);
1889794c67a9SPeter Feiner
1890794c67a9SPeter Feiner if (launched && !guest_finished)
1891198dfd0eSJanis Schoetterl-Glausch report_fail("Guest didn't run to completion.");
1892794c67a9SPeter Feiner
1893a0e30e71SPaolo Bonzini out:
18949d7eaa29SArthur Chunqi Li if (vmx_off()) {
18959d7eaa29SArthur Chunqi Li printf("%s : vmxoff failed.\n", __func__);
18969d7eaa29SArthur Chunqi Li return 1;
18979d7eaa29SArthur Chunqi Li }
18989d7eaa29SArthur Chunqi Li return 0;
18999d7eaa29SArthur Chunqi Li }
19009d7eaa29SArthur Chunqi Li
1901794c67a9SPeter Feiner /*
1902794c67a9SPeter Feiner * Add a teardown step. Executed after the test's main function returns.
1903794c67a9SPeter Feiner * Teardown steps executed in reverse order.
1904794c67a9SPeter Feiner */
test_add_teardown(test_teardown_func func,void * data)1905794c67a9SPeter Feiner void test_add_teardown(test_teardown_func func, void *data)
1906794c67a9SPeter Feiner {
1907794c67a9SPeter Feiner struct test_teardown_step *step;
1908794c67a9SPeter Feiner
1909794c67a9SPeter Feiner TEST_ASSERT_MSG(teardown_count < MAX_TEST_TEARDOWN_STEPS,
1910794c67a9SPeter Feiner "There are already %d teardown steps.",
1911794c67a9SPeter Feiner teardown_count);
1912794c67a9SPeter Feiner step = &teardown_steps[teardown_count++];
1913794c67a9SPeter Feiner step->func = func;
1914794c67a9SPeter Feiner step->data = data;
1915794c67a9SPeter Feiner }
1916794c67a9SPeter Feiner
__test_set_guest(test_guest_func func)191754132d57SAaron Lewis static void __test_set_guest(test_guest_func func)
191854132d57SAaron Lewis {
191954132d57SAaron Lewis assert(current->v2);
192054132d57SAaron Lewis v2_guest_main = func;
192154132d57SAaron Lewis }
192254132d57SAaron Lewis
1923794c67a9SPeter Feiner /*
1924794c67a9SPeter Feiner * Set the target of the first enter_guest call. Can only be called once per
1925794c67a9SPeter Feiner * test. Must be called before first enter_guest call.
1926794c67a9SPeter Feiner */
test_set_guest(test_guest_func func)1927794c67a9SPeter Feiner void test_set_guest(test_guest_func func)
1928794c67a9SPeter Feiner {
1929794c67a9SPeter Feiner TEST_ASSERT_MSG(!v2_guest_main, "Already set guest func.");
193054132d57SAaron Lewis __test_set_guest(func);
193154132d57SAaron Lewis }
193254132d57SAaron Lewis
193354132d57SAaron Lewis /*
193454132d57SAaron Lewis * Set the target of the enter_guest call and reset the RIP so 'func' will
193554132d57SAaron Lewis * start from the beginning. This can be called multiple times per test.
193654132d57SAaron Lewis */
test_override_guest(test_guest_func func)193754132d57SAaron Lewis void test_override_guest(test_guest_func func)
193854132d57SAaron Lewis {
193954132d57SAaron Lewis __test_set_guest(func);
194054132d57SAaron Lewis init_vmcs_guest();
1941794c67a9SPeter Feiner }
1942794c67a9SPeter Feiner
test_set_guest_finished(void)1943e57cd644SAaron Lewis void test_set_guest_finished(void)
1944e57cd644SAaron Lewis {
1945e57cd644SAaron Lewis guest_finished = 1;
1946e57cd644SAaron Lewis }
1947e57cd644SAaron Lewis
check_for_guest_termination(union exit_reason exit_reason)194838e505ddSSean Christopherson static void check_for_guest_termination(union exit_reason exit_reason)
19494ce739beSMarc Orr {
195038e505ddSSean Christopherson if (is_hypercall(exit_reason)) {
19514ce739beSMarc Orr int ret;
19524ce739beSMarc Orr
19534ce739beSMarc Orr ret = handle_hypercall();
19544ce739beSMarc Orr switch (ret) {
19554ce739beSMarc Orr case VMX_TEST_VMEXIT:
19564ce739beSMarc Orr guest_finished = 1;
19574ce739beSMarc Orr break;
19584ce739beSMarc Orr case VMX_TEST_VMABORT:
19594ce739beSMarc Orr continue_abort();
19604ce739beSMarc Orr break;
19614ce739beSMarc Orr case VMX_TEST_VMSKIP:
19624ce739beSMarc Orr continue_skip();
19634ce739beSMarc Orr break;
19644ce739beSMarc Orr default:
19654ce739beSMarc Orr printf("ERROR : Invalid handle_hypercall return %d.\n",
19664ce739beSMarc Orr ret);
19674ce739beSMarc Orr abort();
19684ce739beSMarc Orr }
19694ce739beSMarc Orr }
19704ce739beSMarc Orr }
19714ce739beSMarc Orr
1972794c67a9SPeter Feiner /*
1973794c67a9SPeter Feiner * Enters the guest (or launches it for the first time). Error to call once the
197474f7e9b2SKrish Sadhukhan * guest has returned (i.e., run past the end of its guest() function).
1975794c67a9SPeter Feiner */
__enter_guest(u8 abort_flag,struct vmentry_result * result)1976fdd5a394SSean Christopherson void __enter_guest(u8 abort_flag, struct vmentry_result *result)
1977794c67a9SPeter Feiner {
1978794c67a9SPeter Feiner TEST_ASSERT_MSG(v2_guest_main,
1979794c67a9SPeter Feiner "Never called test_set_guest_func!");
1980794c67a9SPeter Feiner
1981794c67a9SPeter Feiner TEST_ASSERT_MSG(!guest_finished,
1982794c67a9SPeter Feiner "Called enter_guest() after guest returned.");
1983794c67a9SPeter Feiner
19840e0ea94bSSean Christopherson vmx_enter_guest(result);
198574f7e9b2SKrish Sadhukhan
19860e0ea94bSSean Christopherson if (result->vm_fail) {
19870e0ea94bSSean Christopherson if (abort_flag & ABORT_ON_EARLY_VMENTRY_FAIL)
19880e0ea94bSSean Christopherson goto do_abort;
19890e0ea94bSSean Christopherson return;
19900e0ea94bSSean Christopherson }
19910e0ea94bSSean Christopherson if (result->exit_reason.failed_vmentry) {
19920e0ea94bSSean Christopherson if ((abort_flag & ABORT_ON_INVALID_GUEST_STATE) ||
19930e0ea94bSSean Christopherson result->exit_reason.basic != VMX_FAIL_STATE)
19940e0ea94bSSean Christopherson goto do_abort;
19950e0ea94bSSean Christopherson return;
1996794c67a9SPeter Feiner }
1997794c67a9SPeter Feiner
1998794c67a9SPeter Feiner launched = 1;
199938e505ddSSean Christopherson check_for_guest_termination(result->exit_reason);
20000e0ea94bSSean Christopherson return;
20010e0ea94bSSean Christopherson
20020e0ea94bSSean Christopherson do_abort:
20030e0ea94bSSean Christopherson print_vmentry_failure_info(result);
20040e0ea94bSSean Christopherson abort();
200574f7e9b2SKrish Sadhukhan }
2006794c67a9SPeter Feiner
enter_guest_with_bad_controls(void)20074ce739beSMarc Orr void enter_guest_with_bad_controls(void)
20084ce739beSMarc Orr {
20090e0ea94bSSean Christopherson struct vmentry_result result;
20104ce739beSMarc Orr
20114ce739beSMarc Orr TEST_ASSERT_MSG(v2_guest_main,
20124ce739beSMarc Orr "Never called test_set_guest_func!");
20134ce739beSMarc Orr
20144ce739beSMarc Orr TEST_ASSERT_MSG(!guest_finished,
20154ce739beSMarc Orr "Called enter_guest() after guest returned.");
20164ce739beSMarc Orr
20170e0ea94bSSean Christopherson __enter_guest(ABORT_ON_INVALID_GUEST_STATE, &result);
20180e0ea94bSSean Christopherson report(result.vm_fail, "VM-Fail occurred as expected");
20190e0ea94bSSean Christopherson report((result.flags & VMX_ENTRY_FLAGS) == X86_EFLAGS_ZF,
20200e0ea94bSSean Christopherson "FLAGS set correctly on VM-Fail");
2021a299895bSThomas Huth report(vmcs_read(VMX_INST_ERROR) == VMXERR_ENTRY_INVALID_CONTROL_FIELD,
2022a299895bSThomas Huth "VM-Inst Error # is %d (VM entry with invalid control field(s))",
20234ce739beSMarc Orr VMXERR_ENTRY_INVALID_CONTROL_FIELD);
2024794c67a9SPeter Feiner }
2025794c67a9SPeter Feiner
enter_guest(void)202674f7e9b2SKrish Sadhukhan void enter_guest(void)
202774f7e9b2SKrish Sadhukhan {
20280e0ea94bSSean Christopherson struct vmentry_result result;
202974f7e9b2SKrish Sadhukhan
203074f7e9b2SKrish Sadhukhan __enter_guest(ABORT_ON_EARLY_VMENTRY_FAIL |
20310e0ea94bSSean Christopherson ABORT_ON_INVALID_GUEST_STATE, &result);
203274f7e9b2SKrish Sadhukhan }
203374f7e9b2SKrish Sadhukhan
20343ee34093SArthur Chunqi Li extern struct vmx_test vmx_tests[];
20359d7eaa29SArthur Chunqi Li
2036875b97b3SPeter Feiner static bool
test_wanted(const char * name,const char * filters[],int filter_count)2037875b97b3SPeter Feiner test_wanted(const char *name, const char *filters[], int filter_count)
20388029cac7SPeter Feiner {
2039875b97b3SPeter Feiner int i;
2040875b97b3SPeter Feiner bool positive = false;
2041875b97b3SPeter Feiner bool match = false;
2042875b97b3SPeter Feiner char clean_name[strlen(name) + 1];
2043875b97b3SPeter Feiner char *c;
20448029cac7SPeter Feiner const char *n;
20458029cac7SPeter Feiner
20460e0ea94bSSean Christopherson printf("filter = %s, test = %s\n", filters[0], name);
20470e0ea94bSSean Christopherson
2048875b97b3SPeter Feiner /* Replace spaces with underscores. */
2049875b97b3SPeter Feiner n = name;
2050875b97b3SPeter Feiner c = &clean_name[0];
2051875b97b3SPeter Feiner do *c++ = (*n == ' ') ? '_' : *n;
2052875b97b3SPeter Feiner while (*n++);
2053875b97b3SPeter Feiner
2054875b97b3SPeter Feiner for (i = 0; i < filter_count; i++) {
2055875b97b3SPeter Feiner const char *filter = filters[i];
2056875b97b3SPeter Feiner
2057875b97b3SPeter Feiner if (filter[0] == '-') {
2058875b97b3SPeter Feiner if (simple_glob(clean_name, filter + 1))
2059875b97b3SPeter Feiner return false;
2060875b97b3SPeter Feiner } else {
2061875b97b3SPeter Feiner positive = true;
2062875b97b3SPeter Feiner match |= simple_glob(clean_name, filter);
2063875b97b3SPeter Feiner }
2064875b97b3SPeter Feiner }
2065875b97b3SPeter Feiner
2066875b97b3SPeter Feiner if (!positive || match) {
2067875b97b3SPeter Feiner matched++;
2068875b97b3SPeter Feiner return true;
2069875b97b3SPeter Feiner } else {
20708029cac7SPeter Feiner return false;
20718029cac7SPeter Feiner }
20728029cac7SPeter Feiner }
20738029cac7SPeter Feiner
main(int argc,const char * argv[])2074875b97b3SPeter Feiner int main(int argc, const char *argv[])
20759d7eaa29SArthur Chunqi Li {
20763ee34093SArthur Chunqi Li int i = 0;
20779d7eaa29SArthur Chunqi Li
20789d7eaa29SArthur Chunqi Li setup_vm();
20793ee34093SArthur Chunqi Li hypercall_field = 0;
20809d7eaa29SArthur Chunqi Li
20817371c622SVitaly Kuznetsov /* We want xAPIC mode to test MMIO passthrough from L1 (us) to L2. */
208274e79380SPaolo Bonzini smp_reset_apic();
20837371c622SVitaly Kuznetsov
2084c04259ffSDavid Matlack argv++;
2085c04259ffSDavid Matlack argc--;
2086c04259ffSDavid Matlack
2087badc98caSKrish Sadhukhan if (!this_cpu_has(X86_FEATURE_VMX)) {
20883b127446SJan Kiszka printf("WARNING: vmx not supported, add '-cpu host'\n");
20899d7eaa29SArthur Chunqi Li goto exit;
20909d7eaa29SArthur Chunqi Li }
209193f10d6fSLiran Alon init_bsp_vmx();
2092c04259ffSDavid Matlack if (test_wanted("test_vmx_feature_control", argv, argc)) {
2093c04259ffSDavid Matlack /* Sets MSR_IA32_FEATURE_CONTROL to 0x5 */
20943b127446SJan Kiszka if (test_vmx_feature_control() != 0)
20953b127446SJan Kiszka goto exit;
2096c04259ffSDavid Matlack } else {
2097883f3fccSLiran Alon enable_vmx();
2098c04259ffSDavid Matlack }
2099c04259ffSDavid Matlack
2100c04259ffSDavid Matlack if (test_wanted("test_vmxon", argv, argc)) {
2101c04259ffSDavid Matlack /* Enables VMX */
21029d7eaa29SArthur Chunqi Li if (test_vmxon() != 0)
21039d7eaa29SArthur Chunqi Li goto exit;
2104c04259ffSDavid Matlack } else {
2105c04259ffSDavid Matlack if (vmx_on()) {
2106198dfd0eSJanis Schoetterl-Glausch report_fail("vmxon");
2107c04259ffSDavid Matlack goto exit;
2108c04259ffSDavid Matlack }
2109c04259ffSDavid Matlack }
2110c04259ffSDavid Matlack
2111c04259ffSDavid Matlack if (test_wanted("test_vmptrld", argv, argc))
21129d7eaa29SArthur Chunqi Li test_vmptrld();
2113c04259ffSDavid Matlack if (test_wanted("test_vmclear", argv, argc))
21149d7eaa29SArthur Chunqi Li test_vmclear();
2115c04259ffSDavid Matlack if (test_wanted("test_vmptrst", argv, argc))
21169d7eaa29SArthur Chunqi Li test_vmptrst();
2117ecd5b431SDavid Matlack if (test_wanted("test_vmwrite_vmread", argv, argc))
2118ecd5b431SDavid Matlack test_vmwrite_vmread();
211959161cfaSJim Mattson if (test_wanted("test_vmcs_high", argv, argc))
212059161cfaSJim Mattson test_vmcs_high();
21216b72cf76SDavid Matlack if (test_wanted("test_vmcs_lifecycle", argv, argc))
21226b72cf76SDavid Matlack test_vmcs_lifecycle();
2123c04259ffSDavid Matlack if (test_wanted("test_vmx_caps", argv, argc))
212469c8d31cSJan Kiszka test_vmx_caps();
21253652250bSSimon Smith if (test_wanted("test_vmread_flags_touch", argv, argc))
21263652250bSSimon Smith test_vmread_flags_touch();
21273652250bSSimon Smith if (test_wanted("test_vmwrite_flags_touch", argv, argc))
21283652250bSSimon Smith test_vmwrite_flags_touch();
21299d7eaa29SArthur Chunqi Li
213034439b1aSPeter Feiner /* Balance vmxon from test_vmxon. */
213134439b1aSPeter Feiner vmx_off();
213234439b1aSPeter Feiner
213334439b1aSPeter Feiner for (; vmx_tests[i].name != NULL; i++) {
2134c04259ffSDavid Matlack if (!test_wanted(vmx_tests[i].name, argv, argc))
21358029cac7SPeter Feiner continue;
21369d7eaa29SArthur Chunqi Li if (test_run(&vmx_tests[i]))
21379d7eaa29SArthur Chunqi Li goto exit;
21388029cac7SPeter Feiner }
21398029cac7SPeter Feiner
21408029cac7SPeter Feiner if (!matched)
2141a299895bSThomas Huth report(matched, "command line didn't match any tests!");
21429d7eaa29SArthur Chunqi Li
21439d7eaa29SArthur Chunqi Li exit:
2144f3cdd159SJan Kiszka return report_summary();
21459d7eaa29SArthur Chunqi Li }
2146