xref: /kvm-unit-tests/x86/vmx.c (revision b4a405c3f289562c67ef2b9d34b69a4326c01c84)
17ada359dSArthur Chunqi Li /*
27ada359dSArthur Chunqi Li  * x86/vmx.c : Framework for testing nested virtualization
37ada359dSArthur Chunqi Li  *	This is a framework to test nested VMX for KVM, which
47ada359dSArthur Chunqi Li  * 	started as a project of GSoC 2013. All test cases should
57ada359dSArthur Chunqi Li  *	be located in x86/vmx_tests.c and framework related
67ada359dSArthur Chunqi Li  *	functions should be in this file.
77ada359dSArthur Chunqi Li  *
87ada359dSArthur Chunqi Li  * How to write test cases?
97ada359dSArthur Chunqi Li  *	Add callbacks of test suite in variant "vmx_tests". You can
107ada359dSArthur Chunqi Li  *	write:
117ada359dSArthur Chunqi Li  *		1. init function used for initializing test suite
127ada359dSArthur Chunqi Li  *		2. main function for codes running in L2 guest,
137ada359dSArthur Chunqi Li  *		3. exit_handler to handle vmexit of L2 to L1
147ada359dSArthur Chunqi Li  *		4. syscall handler to handle L2 syscall vmexit
157ada359dSArthur Chunqi Li  *		5. vmenter fail handler to handle direct failure of vmenter
167ada359dSArthur Chunqi Li  *		6. guest_regs is loaded when vmenter and saved when
177ada359dSArthur Chunqi Li  *			vmexit, you can read and set it in exit_handler
187ada359dSArthur Chunqi Li  *	If no special function is needed for a test suite, use
197ada359dSArthur Chunqi Li  *	coressponding basic_* functions as callback. More handlers
207ada359dSArthur Chunqi Li  *	can be added to "vmx_tests", see details of "struct vmx_test"
217ada359dSArthur Chunqi Li  *	and function test_run().
227ada359dSArthur Chunqi Li  *
237ada359dSArthur Chunqi Li  * Currently, vmx test framework only set up one VCPU and one
247ada359dSArthur Chunqi Li  * concurrent guest test environment with same paging for L2 and
257ada359dSArthur Chunqi Li  * L1. For usage of EPT, only 1:1 mapped paging is used from VFN
267ada359dSArthur Chunqi Li  * to PFN.
277ada359dSArthur Chunqi Li  *
287ada359dSArthur Chunqi Li  * Author : Arthur Chunqi Li <yzt356@gmail.com>
297ada359dSArthur Chunqi Li  */
307ada359dSArthur Chunqi Li 
319d7eaa29SArthur Chunqi Li #include "libcflat.h"
329d7eaa29SArthur Chunqi Li #include "processor.h"
339d7eaa29SArthur Chunqi Li #include "vm.h"
349d7eaa29SArthur Chunqi Li #include "desc.h"
359d7eaa29SArthur Chunqi Li #include "vmx.h"
369d7eaa29SArthur Chunqi Li #include "msr.h"
379d7eaa29SArthur Chunqi Li #include "smp.h"
389d7eaa29SArthur Chunqi Li 
39ce21d809SBandan Das u64 *vmxon_region;
409d7eaa29SArthur Chunqi Li struct vmcs *vmcs_root;
419d7eaa29SArthur Chunqi Li u32 vpid_cnt;
429d7eaa29SArthur Chunqi Li void *guest_stack, *guest_syscall_stack;
439d7eaa29SArthur Chunqi Li u32 ctrl_pin, ctrl_enter, ctrl_exit, ctrl_cpu[2];
449d7eaa29SArthur Chunqi Li struct regs regs;
45794c67a9SPeter Feiner 
469d7eaa29SArthur Chunqi Li struct vmx_test *current;
47794c67a9SPeter Feiner 
48794c67a9SPeter Feiner #define MAX_TEST_TEARDOWN_STEPS 10
49794c67a9SPeter Feiner 
50794c67a9SPeter Feiner struct test_teardown_step {
51794c67a9SPeter Feiner 	test_teardown_func func;
52794c67a9SPeter Feiner 	void *data;
53794c67a9SPeter Feiner };
54794c67a9SPeter Feiner 
55794c67a9SPeter Feiner static int teardown_count;
56794c67a9SPeter Feiner static struct test_teardown_step teardown_steps[MAX_TEST_TEARDOWN_STEPS];
57794c67a9SPeter Feiner 
58794c67a9SPeter Feiner static test_guest_func v2_guest_main;
59794c67a9SPeter Feiner 
603ee34093SArthur Chunqi Li u64 hypercall_field;
619d7eaa29SArthur Chunqi Li bool launched;
62c04259ffSDavid Matlack static int matched;
63794c67a9SPeter Feiner static int guest_finished;
64794c67a9SPeter Feiner static int in_guest;
659d7eaa29SArthur Chunqi Li 
663ee34093SArthur Chunqi Li union vmx_basic basic;
675f18e779SJan Kiszka union vmx_ctrl_msr ctrl_pin_rev;
685f18e779SJan Kiszka union vmx_ctrl_msr ctrl_cpu_rev[2];
695f18e779SJan Kiszka union vmx_ctrl_msr ctrl_exit_rev;
705f18e779SJan Kiszka union vmx_ctrl_msr ctrl_enter_rev;
713ee34093SArthur Chunqi Li union vmx_ept_vpid  ept_vpid;
723ee34093SArthur Chunqi Li 
73337166aaSJan Kiszka extern struct descriptor_table_ptr gdt64_desc;
74337166aaSJan Kiszka extern struct descriptor_table_ptr idt_descr;
75337166aaSJan Kiszka extern struct descriptor_table_ptr tss_descr;
769d7eaa29SArthur Chunqi Li extern void *vmx_return;
779d7eaa29SArthur Chunqi Li extern void *entry_sysenter;
789d7eaa29SArthur Chunqi Li extern void *guest_entry;
799d7eaa29SArthur Chunqi Li 
80ffb1a9e0SJan Kiszka static volatile u32 stage;
81ffb1a9e0SJan Kiszka 
82794c67a9SPeter Feiner static jmp_buf abort_target;
83794c67a9SPeter Feiner 
84ecd5b431SDavid Matlack struct vmcs_field {
85ecd5b431SDavid Matlack 	u64 mask;
86ecd5b431SDavid Matlack 	u64 encoding;
87ecd5b431SDavid Matlack };
88ecd5b431SDavid Matlack 
89ecd5b431SDavid Matlack #define MASK(_bits) GENMASK_ULL((_bits) - 1, 0)
90ecd5b431SDavid Matlack #define MASK_NATURAL MASK(sizeof(unsigned long) * 8)
91ecd5b431SDavid Matlack 
92ecd5b431SDavid Matlack static struct vmcs_field vmcs_fields[] = {
93ecd5b431SDavid Matlack 	{ MASK(16), VPID },
94ecd5b431SDavid Matlack 	{ MASK(16), PINV },
95ecd5b431SDavid Matlack 	{ MASK(16), EPTP_IDX },
96ecd5b431SDavid Matlack 
97ecd5b431SDavid Matlack 	{ MASK(16), GUEST_SEL_ES },
98ecd5b431SDavid Matlack 	{ MASK(16), GUEST_SEL_CS },
99ecd5b431SDavid Matlack 	{ MASK(16), GUEST_SEL_SS },
100ecd5b431SDavid Matlack 	{ MASK(16), GUEST_SEL_DS },
101ecd5b431SDavid Matlack 	{ MASK(16), GUEST_SEL_FS },
102ecd5b431SDavid Matlack 	{ MASK(16), GUEST_SEL_GS },
103ecd5b431SDavid Matlack 	{ MASK(16), GUEST_SEL_LDTR },
104ecd5b431SDavid Matlack 	{ MASK(16), GUEST_SEL_TR },
105ecd5b431SDavid Matlack 	{ MASK(16), GUEST_INT_STATUS },
106ecd5b431SDavid Matlack 
107ecd5b431SDavid Matlack 	{ MASK(16), HOST_SEL_ES },
108ecd5b431SDavid Matlack 	{ MASK(16), HOST_SEL_CS },
109ecd5b431SDavid Matlack 	{ MASK(16), HOST_SEL_SS },
110ecd5b431SDavid Matlack 	{ MASK(16), HOST_SEL_DS },
111ecd5b431SDavid Matlack 	{ MASK(16), HOST_SEL_FS },
112ecd5b431SDavid Matlack 	{ MASK(16), HOST_SEL_GS },
113ecd5b431SDavid Matlack 	{ MASK(16), HOST_SEL_TR },
114ecd5b431SDavid Matlack 
115ecd5b431SDavid Matlack 	{ MASK(64), IO_BITMAP_A },
116ecd5b431SDavid Matlack 	{ MASK(64), IO_BITMAP_B },
117ecd5b431SDavid Matlack 	{ MASK(64), MSR_BITMAP },
118ecd5b431SDavid Matlack 	{ MASK(64), EXIT_MSR_ST_ADDR },
119ecd5b431SDavid Matlack 	{ MASK(64), EXIT_MSR_LD_ADDR },
120ecd5b431SDavid Matlack 	{ MASK(64), ENTER_MSR_LD_ADDR },
121ecd5b431SDavid Matlack 	{ MASK(64), VMCS_EXEC_PTR },
122ecd5b431SDavid Matlack 	{ MASK(64), TSC_OFFSET },
123ecd5b431SDavid Matlack 	{ MASK(64), APIC_VIRT_ADDR },
124ecd5b431SDavid Matlack 	{ MASK(64), APIC_ACCS_ADDR },
125ecd5b431SDavid Matlack 	{ MASK(64), EPTP },
126ecd5b431SDavid Matlack 
127ecd5b431SDavid Matlack 	{ 0 /* read-only */, INFO_PHYS_ADDR },
128ecd5b431SDavid Matlack 
129ecd5b431SDavid Matlack 	{ MASK(64), VMCS_LINK_PTR },
130ecd5b431SDavid Matlack 	{ MASK(64), GUEST_DEBUGCTL },
131ecd5b431SDavid Matlack 	{ MASK(64), GUEST_EFER },
132ecd5b431SDavid Matlack 	{ MASK(64), GUEST_PAT },
133ecd5b431SDavid Matlack 	{ MASK(64), GUEST_PERF_GLOBAL_CTRL },
134ecd5b431SDavid Matlack 	{ MASK(64), GUEST_PDPTE },
135ecd5b431SDavid Matlack 
136ecd5b431SDavid Matlack 	{ MASK(64), HOST_PAT },
137ecd5b431SDavid Matlack 	{ MASK(64), HOST_EFER },
138ecd5b431SDavid Matlack 	{ MASK(64), HOST_PERF_GLOBAL_CTRL },
139ecd5b431SDavid Matlack 
140ecd5b431SDavid Matlack 	{ MASK(32), PIN_CONTROLS },
141ecd5b431SDavid Matlack 	{ MASK(32), CPU_EXEC_CTRL0 },
142ecd5b431SDavid Matlack 	{ MASK(32), EXC_BITMAP },
143ecd5b431SDavid Matlack 	{ MASK(32), PF_ERROR_MASK },
144ecd5b431SDavid Matlack 	{ MASK(32), PF_ERROR_MATCH },
145ecd5b431SDavid Matlack 	{ MASK(32), CR3_TARGET_COUNT },
146ecd5b431SDavid Matlack 	{ MASK(32), EXI_CONTROLS },
147ecd5b431SDavid Matlack 	{ MASK(32), EXI_MSR_ST_CNT },
148ecd5b431SDavid Matlack 	{ MASK(32), EXI_MSR_LD_CNT },
149ecd5b431SDavid Matlack 	{ MASK(32), ENT_CONTROLS },
150ecd5b431SDavid Matlack 	{ MASK(32), ENT_MSR_LD_CNT },
151ecd5b431SDavid Matlack 	{ MASK(32), ENT_INTR_INFO },
152ecd5b431SDavid Matlack 	{ MASK(32), ENT_INTR_ERROR },
153ecd5b431SDavid Matlack 	{ MASK(32), ENT_INST_LEN },
154ecd5b431SDavid Matlack 	{ MASK(32), TPR_THRESHOLD },
155ecd5b431SDavid Matlack 	{ MASK(32), CPU_EXEC_CTRL1 },
156ecd5b431SDavid Matlack 
157ecd5b431SDavid Matlack 	{ 0 /* read-only */, VMX_INST_ERROR },
158ecd5b431SDavid Matlack 	{ 0 /* read-only */, EXI_REASON },
159ecd5b431SDavid Matlack 	{ 0 /* read-only */, EXI_INTR_INFO },
160ecd5b431SDavid Matlack 	{ 0 /* read-only */, EXI_INTR_ERROR },
161ecd5b431SDavid Matlack 	{ 0 /* read-only */, IDT_VECT_INFO },
162ecd5b431SDavid Matlack 	{ 0 /* read-only */, IDT_VECT_ERROR },
163ecd5b431SDavid Matlack 	{ 0 /* read-only */, EXI_INST_LEN },
164ecd5b431SDavid Matlack 	{ 0 /* read-only */, EXI_INST_INFO },
165ecd5b431SDavid Matlack 
166ecd5b431SDavid Matlack 	{ MASK(32), GUEST_LIMIT_ES },
167ecd5b431SDavid Matlack 	{ MASK(32), GUEST_LIMIT_CS },
168ecd5b431SDavid Matlack 	{ MASK(32), GUEST_LIMIT_SS },
169ecd5b431SDavid Matlack 	{ MASK(32), GUEST_LIMIT_DS },
170ecd5b431SDavid Matlack 	{ MASK(32), GUEST_LIMIT_FS },
171ecd5b431SDavid Matlack 	{ MASK(32), GUEST_LIMIT_GS },
172ecd5b431SDavid Matlack 	{ MASK(32), GUEST_LIMIT_LDTR },
173ecd5b431SDavid Matlack 	{ MASK(32), GUEST_LIMIT_TR },
174ecd5b431SDavid Matlack 	{ MASK(32), GUEST_LIMIT_GDTR },
175ecd5b431SDavid Matlack 	{ MASK(32), GUEST_LIMIT_IDTR },
176ecd5b431SDavid Matlack 	{ 0x1d0ff, GUEST_AR_ES },
177ecd5b431SDavid Matlack 	{ 0x1f0ff, GUEST_AR_CS },
178ecd5b431SDavid Matlack 	{ 0x1d0ff, GUEST_AR_SS },
179ecd5b431SDavid Matlack 	{ 0x1d0ff, GUEST_AR_DS },
180ecd5b431SDavid Matlack 	{ 0x1d0ff, GUEST_AR_FS },
181ecd5b431SDavid Matlack 	{ 0x1d0ff, GUEST_AR_GS },
182ecd5b431SDavid Matlack 	{ 0x1d0ff, GUEST_AR_LDTR },
183ecd5b431SDavid Matlack 	{ 0x1d0ff, GUEST_AR_TR },
184ecd5b431SDavid Matlack 	{ MASK(32), GUEST_INTR_STATE },
185ecd5b431SDavid Matlack 	{ MASK(32), GUEST_ACTV_STATE },
186ecd5b431SDavid Matlack 	{ MASK(32), GUEST_SMBASE },
187ecd5b431SDavid Matlack 	{ MASK(32), GUEST_SYSENTER_CS },
188ecd5b431SDavid Matlack 	{ MASK(32), PREEMPT_TIMER_VALUE },
189ecd5b431SDavid Matlack 
190ecd5b431SDavid Matlack 	{ MASK(32), HOST_SYSENTER_CS },
191ecd5b431SDavid Matlack 
192ecd5b431SDavid Matlack 	{ MASK_NATURAL, CR0_MASK },
193ecd5b431SDavid Matlack 	{ MASK_NATURAL, CR4_MASK },
194ecd5b431SDavid Matlack 	{ MASK_NATURAL, CR0_READ_SHADOW },
195ecd5b431SDavid Matlack 	{ MASK_NATURAL, CR4_READ_SHADOW },
196ecd5b431SDavid Matlack 	{ MASK_NATURAL, CR3_TARGET_0 },
197ecd5b431SDavid Matlack 	{ MASK_NATURAL, CR3_TARGET_1 },
198ecd5b431SDavid Matlack 	{ MASK_NATURAL, CR3_TARGET_2 },
199ecd5b431SDavid Matlack 	{ MASK_NATURAL, CR3_TARGET_3 },
200ecd5b431SDavid Matlack 
201ecd5b431SDavid Matlack 	{ 0 /* read-only */, EXI_QUALIFICATION },
202ecd5b431SDavid Matlack 	{ 0 /* read-only */, IO_RCX },
203ecd5b431SDavid Matlack 	{ 0 /* read-only */, IO_RSI },
204ecd5b431SDavid Matlack 	{ 0 /* read-only */, IO_RDI },
205ecd5b431SDavid Matlack 	{ 0 /* read-only */, IO_RIP },
206ecd5b431SDavid Matlack 	{ 0 /* read-only */, GUEST_LINEAR_ADDRESS },
207ecd5b431SDavid Matlack 
208ecd5b431SDavid Matlack 	{ MASK_NATURAL, GUEST_CR0 },
209ecd5b431SDavid Matlack 	{ MASK_NATURAL, GUEST_CR3 },
210ecd5b431SDavid Matlack 	{ MASK_NATURAL, GUEST_CR4 },
211ecd5b431SDavid Matlack 	{ MASK_NATURAL, GUEST_BASE_ES },
212ecd5b431SDavid Matlack 	{ MASK_NATURAL, GUEST_BASE_CS },
213ecd5b431SDavid Matlack 	{ MASK_NATURAL, GUEST_BASE_SS },
214ecd5b431SDavid Matlack 	{ MASK_NATURAL, GUEST_BASE_DS },
215ecd5b431SDavid Matlack 	{ MASK_NATURAL, GUEST_BASE_FS },
216ecd5b431SDavid Matlack 	{ MASK_NATURAL, GUEST_BASE_GS },
217ecd5b431SDavid Matlack 	{ MASK_NATURAL, GUEST_BASE_LDTR },
218ecd5b431SDavid Matlack 	{ MASK_NATURAL, GUEST_BASE_TR },
219ecd5b431SDavid Matlack 	{ MASK_NATURAL, GUEST_BASE_GDTR },
220ecd5b431SDavid Matlack 	{ MASK_NATURAL, GUEST_BASE_IDTR },
221ecd5b431SDavid Matlack 	{ MASK_NATURAL, GUEST_DR7 },
222ecd5b431SDavid Matlack 	{ MASK_NATURAL, GUEST_RSP },
223ecd5b431SDavid Matlack 	{ MASK_NATURAL, GUEST_RIP },
224ecd5b431SDavid Matlack 	{ MASK_NATURAL, GUEST_RFLAGS },
225ecd5b431SDavid Matlack 	{ MASK_NATURAL, GUEST_PENDING_DEBUG },
226ecd5b431SDavid Matlack 	{ MASK_NATURAL, GUEST_SYSENTER_ESP },
227ecd5b431SDavid Matlack 	{ MASK_NATURAL, GUEST_SYSENTER_EIP },
228ecd5b431SDavid Matlack 
229ecd5b431SDavid Matlack 	{ MASK_NATURAL, HOST_CR0 },
230ecd5b431SDavid Matlack 	{ MASK_NATURAL, HOST_CR3 },
231ecd5b431SDavid Matlack 	{ MASK_NATURAL, HOST_CR4 },
232ecd5b431SDavid Matlack 	{ MASK_NATURAL, HOST_BASE_FS },
233ecd5b431SDavid Matlack 	{ MASK_NATURAL, HOST_BASE_GS },
234ecd5b431SDavid Matlack 	{ MASK_NATURAL, HOST_BASE_TR },
235ecd5b431SDavid Matlack 	{ MASK_NATURAL, HOST_BASE_GDTR },
236ecd5b431SDavid Matlack 	{ MASK_NATURAL, HOST_BASE_IDTR },
237ecd5b431SDavid Matlack 	{ MASK_NATURAL, HOST_SYSENTER_ESP },
238ecd5b431SDavid Matlack 	{ MASK_NATURAL, HOST_SYSENTER_EIP },
239ecd5b431SDavid Matlack 	{ MASK_NATURAL, HOST_RSP },
240ecd5b431SDavid Matlack 	{ MASK_NATURAL, HOST_RIP },
241ecd5b431SDavid Matlack };
242ecd5b431SDavid Matlack 
243ecd5b431SDavid Matlack static inline u64 vmcs_field_value(struct vmcs_field *f, u8 cookie)
244ecd5b431SDavid Matlack {
245ecd5b431SDavid Matlack 	u64 value;
246ecd5b431SDavid Matlack 
247ecd5b431SDavid Matlack 	/* Incorporate the cookie and the field encoding into the value. */
248ecd5b431SDavid Matlack 	value = cookie;
249ecd5b431SDavid Matlack 	value |= (f->encoding << 8);
250ecd5b431SDavid Matlack 	value |= 0xdeadbeefull << 32;
251ecd5b431SDavid Matlack 
252ecd5b431SDavid Matlack 	return value & f->mask;
253ecd5b431SDavid Matlack }
254ecd5b431SDavid Matlack 
255ecd5b431SDavid Matlack static void set_vmcs_field(struct vmcs_field *f, u8 cookie)
256ecd5b431SDavid Matlack {
257ecd5b431SDavid Matlack 	vmcs_write(f->encoding, vmcs_field_value(f, cookie));
258ecd5b431SDavid Matlack }
259ecd5b431SDavid Matlack 
260ecd5b431SDavid Matlack static bool check_vmcs_field(struct vmcs_field *f, u8 cookie)
261ecd5b431SDavid Matlack {
262ecd5b431SDavid Matlack 	u64 expected;
263ecd5b431SDavid Matlack 	u64 actual;
264ecd5b431SDavid Matlack 	int ret;
265ecd5b431SDavid Matlack 
266ecd5b431SDavid Matlack 	ret = vmcs_read_checking(f->encoding, &actual);
267ecd5b431SDavid Matlack 	assert(!(ret & X86_EFLAGS_CF));
268ecd5b431SDavid Matlack 	/* Skip VMCS fields that aren't recognized by the CPU */
269ecd5b431SDavid Matlack 	if (ret & X86_EFLAGS_ZF)
270ecd5b431SDavid Matlack 		return true;
271ecd5b431SDavid Matlack 
272ecd5b431SDavid Matlack 	expected = vmcs_field_value(f, cookie);
273ecd5b431SDavid Matlack 	actual &= f->mask;
274ecd5b431SDavid Matlack 
275ecd5b431SDavid Matlack 	if (expected == actual)
276ecd5b431SDavid Matlack 		return true;
277ecd5b431SDavid Matlack 
278d4ab68adSDavid Matlack 	printf("FAIL: VMWRITE/VMREAD %lx (expected: %lx, actual: %lx)\n",
279ecd5b431SDavid Matlack 	       f->encoding, (unsigned long) expected, (unsigned long) actual);
280ecd5b431SDavid Matlack 
281ecd5b431SDavid Matlack 	return false;
282ecd5b431SDavid Matlack }
283ecd5b431SDavid Matlack 
284ecd5b431SDavid Matlack static void set_all_vmcs_fields(u8 cookie)
285ecd5b431SDavid Matlack {
286ecd5b431SDavid Matlack 	int i;
287ecd5b431SDavid Matlack 
288ecd5b431SDavid Matlack 	for (i = 0; i < ARRAY_SIZE(vmcs_fields); i++)
289ecd5b431SDavid Matlack 		set_vmcs_field(&vmcs_fields[i], cookie);
290ecd5b431SDavid Matlack }
291ecd5b431SDavid Matlack 
292ecd5b431SDavid Matlack static bool check_all_vmcs_fields(u8 cookie)
293ecd5b431SDavid Matlack {
294ecd5b431SDavid Matlack 	bool pass = true;
295ecd5b431SDavid Matlack 	int i;
296ecd5b431SDavid Matlack 
297ecd5b431SDavid Matlack 	for (i = 0; i < ARRAY_SIZE(vmcs_fields); i++) {
298ecd5b431SDavid Matlack 		if (!check_vmcs_field(&vmcs_fields[i], cookie))
299ecd5b431SDavid Matlack 			pass = false;
300ecd5b431SDavid Matlack 	}
301ecd5b431SDavid Matlack 
302ecd5b431SDavid Matlack 	return pass;
303ecd5b431SDavid Matlack }
304ecd5b431SDavid Matlack 
305ecd5b431SDavid Matlack void test_vmwrite_vmread(void)
306ecd5b431SDavid Matlack {
307ecd5b431SDavid Matlack 	struct vmcs *vmcs = alloc_page();
308ecd5b431SDavid Matlack 
309ecd5b431SDavid Matlack 	memset(vmcs, 0, PAGE_SIZE);
310ecd5b431SDavid Matlack 	vmcs->revision_id = basic.revision;
311ecd5b431SDavid Matlack 	assert(!vmcs_clear(vmcs));
312ecd5b431SDavid Matlack 	assert(!make_vmcs_current(vmcs));
313ecd5b431SDavid Matlack 
314ecd5b431SDavid Matlack 	set_all_vmcs_fields(0x42);
315ecd5b431SDavid Matlack 	report("VMWRITE/VMREAD", check_all_vmcs_fields(0x42));
316ecd5b431SDavid Matlack 
317ecd5b431SDavid Matlack 	assert(!vmcs_clear(vmcs));
318ecd5b431SDavid Matlack 	free_page(vmcs);
319ecd5b431SDavid Matlack }
320ecd5b431SDavid Matlack 
3216b72cf76SDavid Matlack void test_vmcs_lifecycle(void)
3226b72cf76SDavid Matlack {
3236b72cf76SDavid Matlack 	struct vmcs *vmcs[2] = {};
3246b72cf76SDavid Matlack 	int i;
3256b72cf76SDavid Matlack 
3266b72cf76SDavid Matlack 	for (i = 0; i < ARRAY_SIZE(vmcs); i++) {
3276b72cf76SDavid Matlack 		vmcs[i] = alloc_page();
3286b72cf76SDavid Matlack 		memset(vmcs[i], 0, PAGE_SIZE);
3296b72cf76SDavid Matlack 		vmcs[i]->revision_id = basic.revision;
3306b72cf76SDavid Matlack 	}
3316b72cf76SDavid Matlack 
3326b72cf76SDavid Matlack #define VMPTRLD(_i) do { \
3336b72cf76SDavid Matlack 	assert(_i < ARRAY_SIZE(vmcs)); \
3346b72cf76SDavid Matlack 	assert(!make_vmcs_current(vmcs[_i])); \
3356b72cf76SDavid Matlack 	printf("VMPTRLD VMCS%d\n", (_i)); \
3366b72cf76SDavid Matlack } while (0)
3376b72cf76SDavid Matlack 
3386b72cf76SDavid Matlack #define VMCLEAR(_i) do { \
3396b72cf76SDavid Matlack 	assert(_i < ARRAY_SIZE(vmcs)); \
3406b72cf76SDavid Matlack 	assert(!vmcs_clear(vmcs[_i])); \
3416b72cf76SDavid Matlack 	printf("VMCLEAR VMCS%d\n", (_i)); \
3426b72cf76SDavid Matlack } while (0)
3436b72cf76SDavid Matlack 
3446b72cf76SDavid Matlack 	VMCLEAR(0);
3456b72cf76SDavid Matlack 	VMPTRLD(0);
3466b72cf76SDavid Matlack 	set_all_vmcs_fields(0);
3476b72cf76SDavid Matlack 	report("current:VMCS0 active:[VMCS0]", check_all_vmcs_fields(0));
3486b72cf76SDavid Matlack 
3496b72cf76SDavid Matlack 	VMCLEAR(0);
3506b72cf76SDavid Matlack 	VMPTRLD(0);
3516b72cf76SDavid Matlack 	report("current:VMCS0 active:[VMCS0]", check_all_vmcs_fields(0));
3526b72cf76SDavid Matlack 
3536b72cf76SDavid Matlack 	VMCLEAR(1);
3546b72cf76SDavid Matlack 	report("current:VMCS0 active:[VMCS0]", check_all_vmcs_fields(0));
3556b72cf76SDavid Matlack 
3566b72cf76SDavid Matlack 	VMPTRLD(1);
3576b72cf76SDavid Matlack 	set_all_vmcs_fields(1);
3586b72cf76SDavid Matlack 	report("current:VMCS1 active:[VMCS0,VCMS1]", check_all_vmcs_fields(1));
3596b72cf76SDavid Matlack 
3606b72cf76SDavid Matlack 	VMPTRLD(0);
3616b72cf76SDavid Matlack 	report("current:VMCS0 active:[VMCS0,VCMS1]", check_all_vmcs_fields(0));
3626b72cf76SDavid Matlack 	VMPTRLD(1);
3636b72cf76SDavid Matlack 	report("current:VMCS1 active:[VMCS0,VCMS1]", check_all_vmcs_fields(1));
3646b72cf76SDavid Matlack 	VMPTRLD(1);
3656b72cf76SDavid Matlack 	report("current:VMCS1 active:[VMCS0,VCMS1]", check_all_vmcs_fields(1));
3666b72cf76SDavid Matlack 
3676b72cf76SDavid Matlack 	VMCLEAR(0);
3686b72cf76SDavid Matlack 	report("current:VMCS1 active:[VCMS1]", check_all_vmcs_fields(1));
3696b72cf76SDavid Matlack 
370d4ab68adSDavid Matlack 	/* VMPTRLD should not erase VMWRITEs to the current VMCS */
371d4ab68adSDavid Matlack 	set_all_vmcs_fields(2);
372d4ab68adSDavid Matlack 	VMPTRLD(1);
373d4ab68adSDavid Matlack 	report("current:VMCS1 active:[VCMS1]", check_all_vmcs_fields(2));
374d4ab68adSDavid Matlack 
3756b72cf76SDavid Matlack 	for (i = 0; i < ARRAY_SIZE(vmcs); i++) {
3766b72cf76SDavid Matlack 		VMCLEAR(i);
3776b72cf76SDavid Matlack 		free_page(vmcs[i]);
3786b72cf76SDavid Matlack 	}
3796b72cf76SDavid Matlack 
3806b72cf76SDavid Matlack #undef VMPTRLD
3816b72cf76SDavid Matlack #undef VMCLEAR
3826b72cf76SDavid Matlack }
3836b72cf76SDavid Matlack 
384ffb1a9e0SJan Kiszka void vmx_set_test_stage(u32 s)
385ffb1a9e0SJan Kiszka {
386ffb1a9e0SJan Kiszka 	barrier();
387ffb1a9e0SJan Kiszka 	stage = s;
388ffb1a9e0SJan Kiszka 	barrier();
389ffb1a9e0SJan Kiszka }
390ffb1a9e0SJan Kiszka 
391ffb1a9e0SJan Kiszka u32 vmx_get_test_stage(void)
392ffb1a9e0SJan Kiszka {
393ffb1a9e0SJan Kiszka 	u32 s;
394ffb1a9e0SJan Kiszka 
395ffb1a9e0SJan Kiszka 	barrier();
396ffb1a9e0SJan Kiszka 	s = stage;
397ffb1a9e0SJan Kiszka 	barrier();
398ffb1a9e0SJan Kiszka 	return s;
399ffb1a9e0SJan Kiszka }
400ffb1a9e0SJan Kiszka 
401ffb1a9e0SJan Kiszka void vmx_inc_test_stage(void)
402ffb1a9e0SJan Kiszka {
403ffb1a9e0SJan Kiszka 	barrier();
404ffb1a9e0SJan Kiszka 	stage++;
405ffb1a9e0SJan Kiszka 	barrier();
406ffb1a9e0SJan Kiszka }
407ffb1a9e0SJan Kiszka 
4089d7eaa29SArthur Chunqi Li /* entry_sysenter */
4099d7eaa29SArthur Chunqi Li asm(
4109d7eaa29SArthur Chunqi Li 	".align	4, 0x90\n\t"
4119d7eaa29SArthur Chunqi Li 	".globl	entry_sysenter\n\t"
4129d7eaa29SArthur Chunqi Li 	"entry_sysenter:\n\t"
4139d7eaa29SArthur Chunqi Li 	SAVE_GPR
4149d7eaa29SArthur Chunqi Li 	"	and	$0xf, %rax\n\t"
4159d7eaa29SArthur Chunqi Li 	"	mov	%rax, %rdi\n\t"
4169d7eaa29SArthur Chunqi Li 	"	call	syscall_handler\n\t"
4179d7eaa29SArthur Chunqi Li 	LOAD_GPR
4189d7eaa29SArthur Chunqi Li 	"	vmresume\n\t"
4199d7eaa29SArthur Chunqi Li );
4209d7eaa29SArthur Chunqi Li 
4219d7eaa29SArthur Chunqi Li static void __attribute__((__used__)) syscall_handler(u64 syscall_no)
4229d7eaa29SArthur Chunqi Li {
423d5315e3dSJan Kiszka 	if (current->syscall_handler)
4249d7eaa29SArthur Chunqi Li 		current->syscall_handler(syscall_no);
4259d7eaa29SArthur Chunqi Li }
4269d7eaa29SArthur Chunqi Li 
4277e207ec1SPeter Feiner static const char * const exit_reason_descriptions[] = {
4287e207ec1SPeter Feiner 	[VMX_EXC_NMI]		= "VMX_EXC_NMI",
4297e207ec1SPeter Feiner 	[VMX_EXTINT]		= "VMX_EXTINT",
4307e207ec1SPeter Feiner 	[VMX_TRIPLE_FAULT]	= "VMX_TRIPLE_FAULT",
4317e207ec1SPeter Feiner 	[VMX_INIT]		= "VMX_INIT",
4327e207ec1SPeter Feiner 	[VMX_SIPI]		= "VMX_SIPI",
4337e207ec1SPeter Feiner 	[VMX_SMI_IO]		= "VMX_SMI_IO",
4347e207ec1SPeter Feiner 	[VMX_SMI_OTHER]		= "VMX_SMI_OTHER",
4357e207ec1SPeter Feiner 	[VMX_INTR_WINDOW]	= "VMX_INTR_WINDOW",
4367e207ec1SPeter Feiner 	[VMX_NMI_WINDOW]	= "VMX_NMI_WINDOW",
4377e207ec1SPeter Feiner 	[VMX_TASK_SWITCH]	= "VMX_TASK_SWITCH",
4387e207ec1SPeter Feiner 	[VMX_CPUID]		= "VMX_CPUID",
4397e207ec1SPeter Feiner 	[VMX_GETSEC]		= "VMX_GETSEC",
4407e207ec1SPeter Feiner 	[VMX_HLT]		= "VMX_HLT",
4417e207ec1SPeter Feiner 	[VMX_INVD]		= "VMX_INVD",
4427e207ec1SPeter Feiner 	[VMX_INVLPG]		= "VMX_INVLPG",
4437e207ec1SPeter Feiner 	[VMX_RDPMC]		= "VMX_RDPMC",
4447e207ec1SPeter Feiner 	[VMX_RDTSC]		= "VMX_RDTSC",
4457e207ec1SPeter Feiner 	[VMX_RSM]		= "VMX_RSM",
4467e207ec1SPeter Feiner 	[VMX_VMCALL]		= "VMX_VMCALL",
4477e207ec1SPeter Feiner 	[VMX_VMCLEAR]		= "VMX_VMCLEAR",
4487e207ec1SPeter Feiner 	[VMX_VMLAUNCH]		= "VMX_VMLAUNCH",
4497e207ec1SPeter Feiner 	[VMX_VMPTRLD]		= "VMX_VMPTRLD",
4507e207ec1SPeter Feiner 	[VMX_VMPTRST]		= "VMX_VMPTRST",
4517e207ec1SPeter Feiner 	[VMX_VMREAD]		= "VMX_VMREAD",
4527e207ec1SPeter Feiner 	[VMX_VMRESUME]		= "VMX_VMRESUME",
4537e207ec1SPeter Feiner 	[VMX_VMWRITE]		= "VMX_VMWRITE",
4547e207ec1SPeter Feiner 	[VMX_VMXOFF]		= "VMX_VMXOFF",
4557e207ec1SPeter Feiner 	[VMX_VMXON]		= "VMX_VMXON",
4567e207ec1SPeter Feiner 	[VMX_CR]		= "VMX_CR",
4577e207ec1SPeter Feiner 	[VMX_DR]		= "VMX_DR",
4587e207ec1SPeter Feiner 	[VMX_IO]		= "VMX_IO",
4597e207ec1SPeter Feiner 	[VMX_RDMSR]		= "VMX_RDMSR",
4607e207ec1SPeter Feiner 	[VMX_WRMSR]		= "VMX_WRMSR",
4617e207ec1SPeter Feiner 	[VMX_FAIL_STATE]	= "VMX_FAIL_STATE",
4627e207ec1SPeter Feiner 	[VMX_FAIL_MSR]		= "VMX_FAIL_MSR",
4637e207ec1SPeter Feiner 	[VMX_MWAIT]		= "VMX_MWAIT",
4647e207ec1SPeter Feiner 	[VMX_MTF]		= "VMX_MTF",
4657e207ec1SPeter Feiner 	[VMX_MONITOR]		= "VMX_MONITOR",
4667e207ec1SPeter Feiner 	[VMX_PAUSE]		= "VMX_PAUSE",
4677e207ec1SPeter Feiner 	[VMX_FAIL_MCHECK]	= "VMX_FAIL_MCHECK",
4687e207ec1SPeter Feiner 	[VMX_TPR_THRESHOLD]	= "VMX_TPR_THRESHOLD",
4697e207ec1SPeter Feiner 	[VMX_APIC_ACCESS]	= "VMX_APIC_ACCESS",
4707e207ec1SPeter Feiner 	[VMX_GDTR_IDTR]		= "VMX_GDTR_IDTR",
4717e207ec1SPeter Feiner 	[VMX_LDTR_TR]		= "VMX_LDTR_TR",
4727e207ec1SPeter Feiner 	[VMX_EPT_VIOLATION]	= "VMX_EPT_VIOLATION",
4737e207ec1SPeter Feiner 	[VMX_EPT_MISCONFIG]	= "VMX_EPT_MISCONFIG",
4747e207ec1SPeter Feiner 	[VMX_INVEPT]		= "VMX_INVEPT",
4757e207ec1SPeter Feiner 	[VMX_PREEMPT]		= "VMX_PREEMPT",
4767e207ec1SPeter Feiner 	[VMX_INVVPID]		= "VMX_INVVPID",
4777e207ec1SPeter Feiner 	[VMX_WBINVD]		= "VMX_WBINVD",
4787e207ec1SPeter Feiner 	[VMX_XSETBV]		= "VMX_XSETBV",
4797e207ec1SPeter Feiner 	[VMX_APIC_WRITE]	= "VMX_APIC_WRITE",
4807e207ec1SPeter Feiner 	[VMX_RDRAND]		= "VMX_RDRAND",
4817e207ec1SPeter Feiner 	[VMX_INVPCID]		= "VMX_INVPCID",
4827e207ec1SPeter Feiner 	[VMX_VMFUNC]		= "VMX_VMFUNC",
4837e207ec1SPeter Feiner 	[VMX_RDSEED]		= "VMX_RDSEED",
4847e207ec1SPeter Feiner 	[VMX_PML_FULL]		= "VMX_PML_FULL",
4857e207ec1SPeter Feiner 	[VMX_XSAVES]		= "VMX_XSAVES",
4867e207ec1SPeter Feiner 	[VMX_XRSTORS]		= "VMX_XRSTORS",
4877e207ec1SPeter Feiner };
4887e207ec1SPeter Feiner 
4897e207ec1SPeter Feiner const char *exit_reason_description(u64 reason)
4907e207ec1SPeter Feiner {
4917e207ec1SPeter Feiner 	if (reason >= ARRAY_SIZE(exit_reason_descriptions))
4927e207ec1SPeter Feiner 		return "(unknown)";
4937e207ec1SPeter Feiner 	return exit_reason_descriptions[reason] ? : "(unused)";
4947e207ec1SPeter Feiner }
4957e207ec1SPeter Feiner 
4963ee34093SArthur Chunqi Li void print_vmexit_info()
4979d7eaa29SArthur Chunqi Li {
4989d7eaa29SArthur Chunqi Li 	u64 guest_rip, guest_rsp;
4999d7eaa29SArthur Chunqi Li 	ulong reason = vmcs_read(EXI_REASON) & 0xff;
5009d7eaa29SArthur Chunqi Li 	ulong exit_qual = vmcs_read(EXI_QUALIFICATION);
5019d7eaa29SArthur Chunqi Li 	guest_rip = vmcs_read(GUEST_RIP);
5029d7eaa29SArthur Chunqi Li 	guest_rsp = vmcs_read(GUEST_RSP);
5039d7eaa29SArthur Chunqi Li 	printf("VMEXIT info:\n");
504b006d7ebSAndrew Jones 	printf("\tvmexit reason = %ld\n", reason);
505fd6aada0SRadim Krčmář 	printf("\texit qualification = %#lx\n", exit_qual);
506b006d7ebSAndrew Jones 	printf("\tBit 31 of reason = %lx\n", (vmcs_read(EXI_REASON) >> 31) & 1);
507fd6aada0SRadim Krčmář 	printf("\tguest_rip = %#lx\n", guest_rip);
508fd6aada0SRadim Krčmář 	printf("\tRAX=%#lx    RBX=%#lx    RCX=%#lx    RDX=%#lx\n",
5099d7eaa29SArthur Chunqi Li 		regs.rax, regs.rbx, regs.rcx, regs.rdx);
510fd6aada0SRadim Krčmář 	printf("\tRSP=%#lx    RBP=%#lx    RSI=%#lx    RDI=%#lx\n",
5119d7eaa29SArthur Chunqi Li 		guest_rsp, regs.rbp, regs.rsi, regs.rdi);
512fd6aada0SRadim Krčmář 	printf("\tR8 =%#lx    R9 =%#lx    R10=%#lx    R11=%#lx\n",
5139d7eaa29SArthur Chunqi Li 		regs.r8, regs.r9, regs.r10, regs.r11);
514fd6aada0SRadim Krčmář 	printf("\tR12=%#lx    R13=%#lx    R14=%#lx    R15=%#lx\n",
5159d7eaa29SArthur Chunqi Li 		regs.r12, regs.r13, regs.r14, regs.r15);
5169d7eaa29SArthur Chunqi Li }
5179d7eaa29SArthur Chunqi Li 
5183b50efe3SPeter Feiner void
5193b50efe3SPeter Feiner print_vmentry_failure_info(struct vmentry_failure *failure) {
5203b50efe3SPeter Feiner 	if (failure->early) {
5213b50efe3SPeter Feiner 		printf("Early %s failure: ", failure->instr);
5223b50efe3SPeter Feiner 		switch (failure->flags & VMX_ENTRY_FLAGS) {
523ce154ba8SPaolo Bonzini 		case X86_EFLAGS_CF:
5243b50efe3SPeter Feiner 			printf("current-VMCS pointer is not valid.\n");
5253b50efe3SPeter Feiner 			break;
526ce154ba8SPaolo Bonzini 		case X86_EFLAGS_ZF:
5273b50efe3SPeter Feiner 			printf("error number is %ld. See Intel 30.4.\n",
5283b50efe3SPeter Feiner 			       vmcs_read(VMX_INST_ERROR));
5293b50efe3SPeter Feiner 			break;
5303b50efe3SPeter Feiner 		default:
5313b50efe3SPeter Feiner 			printf("unexpected flags %lx!\n", failure->flags);
5323b50efe3SPeter Feiner 		}
5333b50efe3SPeter Feiner 	} else {
5343b50efe3SPeter Feiner 		u64 reason = vmcs_read(EXI_REASON);
5353b50efe3SPeter Feiner 		u64 qual = vmcs_read(EXI_QUALIFICATION);
5363b50efe3SPeter Feiner 
537fd6aada0SRadim Krčmář 		printf("Non-early %s failure (reason=%#lx, qual=%#lx): ",
5383b50efe3SPeter Feiner 			failure->instr, reason, qual);
5393b50efe3SPeter Feiner 
5403b50efe3SPeter Feiner 		switch (reason & 0xff) {
5413b50efe3SPeter Feiner 		case VMX_FAIL_STATE:
5423b50efe3SPeter Feiner 			printf("invalid guest state\n");
5433b50efe3SPeter Feiner 			break;
5443b50efe3SPeter Feiner 		case VMX_FAIL_MSR:
5453b50efe3SPeter Feiner 			printf("MSR loading\n");
5463b50efe3SPeter Feiner 			break;
5473b50efe3SPeter Feiner 		case VMX_FAIL_MCHECK:
5483b50efe3SPeter Feiner 			printf("machine-check event\n");
5493b50efe3SPeter Feiner 			break;
5503b50efe3SPeter Feiner 		default:
5513b50efe3SPeter Feiner 			printf("unexpected basic exit reason %ld\n",
5523b50efe3SPeter Feiner 			       reason & 0xff);
5533b50efe3SPeter Feiner 		}
5543b50efe3SPeter Feiner 
5553b50efe3SPeter Feiner 		if (!(reason & VMX_ENTRY_FAILURE))
5563b50efe3SPeter Feiner 			printf("\tVMX_ENTRY_FAILURE BIT NOT SET!\n");
5573b50efe3SPeter Feiner 
5583b50efe3SPeter Feiner 		if (reason & 0x7fff0000)
5593b50efe3SPeter Feiner 			printf("\tRESERVED BITS SET!\n");
5603b50efe3SPeter Feiner 	}
5613b50efe3SPeter Feiner }
5623b50efe3SPeter Feiner 
5632f6828d7SDavid Matlack /*
5642f6828d7SDavid Matlack  * VMCLEAR should ensures all VMCS state is flushed to the VMCS
5652f6828d7SDavid Matlack  * region in memory.
5662f6828d7SDavid Matlack  */
5672f6828d7SDavid Matlack static void test_vmclear_flushing(void)
5682f6828d7SDavid Matlack {
5692f6828d7SDavid Matlack 	struct vmcs *vmcs[3] = {};
5702f6828d7SDavid Matlack 	int i;
5712f6828d7SDavid Matlack 
5722f6828d7SDavid Matlack 	for (i = 0; i < ARRAY_SIZE(vmcs); i++) {
5732f6828d7SDavid Matlack 		vmcs[i] = alloc_page();
5742f6828d7SDavid Matlack 		memset(vmcs[i], 0, PAGE_SIZE);
5752f6828d7SDavid Matlack 	}
5762f6828d7SDavid Matlack 
5772f6828d7SDavid Matlack 	vmcs[0]->revision_id = basic.revision;
5782f6828d7SDavid Matlack 	assert(!vmcs_clear(vmcs[0]));
5792f6828d7SDavid Matlack 	assert(!make_vmcs_current(vmcs[0]));
5802f6828d7SDavid Matlack 	set_all_vmcs_fields(0x86);
5812f6828d7SDavid Matlack 
5822f6828d7SDavid Matlack 	assert(!vmcs_clear(vmcs[0]));
5832f6828d7SDavid Matlack 	memcpy(vmcs[1], vmcs[0], basic.size);
5842f6828d7SDavid Matlack 	assert(!make_vmcs_current(vmcs[1]));
5852f6828d7SDavid Matlack 	report("test vmclear flush (current VMCS)", check_all_vmcs_fields(0x86));
5862f6828d7SDavid Matlack 
5872f6828d7SDavid Matlack 	set_all_vmcs_fields(0x87);
5882f6828d7SDavid Matlack 	assert(!make_vmcs_current(vmcs[0]));
5892f6828d7SDavid Matlack 	assert(!vmcs_clear(vmcs[1]));
5902f6828d7SDavid Matlack 	memcpy(vmcs[2], vmcs[1], basic.size);
5912f6828d7SDavid Matlack 	assert(!make_vmcs_current(vmcs[2]));
5922f6828d7SDavid Matlack 	report("test vmclear flush (!current VMCS)", check_all_vmcs_fields(0x87));
5932f6828d7SDavid Matlack 
5942f6828d7SDavid Matlack 	for (i = 0; i < ARRAY_SIZE(vmcs); i++) {
5952f6828d7SDavid Matlack 		assert(!vmcs_clear(vmcs[i]));
5962f6828d7SDavid Matlack 		free_page(vmcs[i]);
5972f6828d7SDavid Matlack 	}
5982f6828d7SDavid Matlack }
5993b50efe3SPeter Feiner 
6009d7eaa29SArthur Chunqi Li static void test_vmclear(void)
6019d7eaa29SArthur Chunqi Li {
602daeec979SBandan Das 	struct vmcs *tmp_root;
603e2cf1c9dSEduardo Habkost 	int width = cpuid_maxphyaddr();
604daeec979SBandan Das 
605daeec979SBandan Das 	/*
606daeec979SBandan Das 	 * Note- The tests below do not necessarily have a
607daeec979SBandan Das 	 * valid VMCS, but that's ok since the invalid vmcs
608daeec979SBandan Das 	 * is only used for a specific test and is discarded
609daeec979SBandan Das 	 * without touching its contents
610daeec979SBandan Das 	 */
611daeec979SBandan Das 
612daeec979SBandan Das 	/* Unaligned page access */
613daeec979SBandan Das 	tmp_root = (struct vmcs *)((intptr_t)vmcs_root + 1);
614daeec979SBandan Das 	report("test vmclear with unaligned vmcs",
615daeec979SBandan Das 	       vmcs_clear(tmp_root) == 1);
616daeec979SBandan Das 
617daeec979SBandan Das 	/* gpa bits beyond physical address width are set*/
618daeec979SBandan Das 	tmp_root = (struct vmcs *)((intptr_t)vmcs_root |
619daeec979SBandan Das 				   ((u64)1 << (width+1)));
620daeec979SBandan Das 	report("test vmclear with vmcs address bits set beyond physical address width",
621daeec979SBandan Das 	       vmcs_clear(tmp_root) == 1);
622daeec979SBandan Das 
623daeec979SBandan Das 	/* Pass VMXON region */
624daeec979SBandan Das 	tmp_root = (struct vmcs *)vmxon_region;
625daeec979SBandan Das 	report("test vmclear with vmxon region",
626daeec979SBandan Das 	       vmcs_clear(tmp_root) == 1);
627daeec979SBandan Das 
628daeec979SBandan Das 	/* Valid VMCS */
629daeec979SBandan Das 	report("test vmclear with valid vmcs region", vmcs_clear(vmcs_root) == 0);
630daeec979SBandan Das 
6312f6828d7SDavid Matlack 	test_vmclear_flushing();
6329d7eaa29SArthur Chunqi Li }
6339d7eaa29SArthur Chunqi Li 
6349d7eaa29SArthur Chunqi Li static void __attribute__((__used__)) guest_main(void)
6359d7eaa29SArthur Chunqi Li {
636794c67a9SPeter Feiner 	if (current->v2)
637794c67a9SPeter Feiner 		v2_guest_main();
638794c67a9SPeter Feiner 	else
6399d7eaa29SArthur Chunqi Li 		current->guest_main();
6409d7eaa29SArthur Chunqi Li }
6419d7eaa29SArthur Chunqi Li 
6429d7eaa29SArthur Chunqi Li /* guest_entry */
6439d7eaa29SArthur Chunqi Li asm(
6449d7eaa29SArthur Chunqi Li 	".align	4, 0x90\n\t"
6459d7eaa29SArthur Chunqi Li 	".globl	entry_guest\n\t"
6469d7eaa29SArthur Chunqi Li 	"guest_entry:\n\t"
6479d7eaa29SArthur Chunqi Li 	"	call guest_main\n\t"
6489d7eaa29SArthur Chunqi Li 	"	mov $1, %edi\n\t"
6499d7eaa29SArthur Chunqi Li 	"	call hypercall\n\t"
6509d7eaa29SArthur Chunqi Li );
6519d7eaa29SArthur Chunqi Li 
6526884af61SArthur Chunqi Li /* EPT paging structure related functions */
65369c531c8SPeter Feiner /* split_large_ept_entry: Split a 2M/1G large page into 512 smaller PTEs.
65469c531c8SPeter Feiner 		@ptep : large page table entry to split
65569c531c8SPeter Feiner 		@level : level of ptep (2 or 3)
65669c531c8SPeter Feiner  */
65769c531c8SPeter Feiner static void split_large_ept_entry(unsigned long *ptep, int level)
65869c531c8SPeter Feiner {
65969c531c8SPeter Feiner 	unsigned long *new_pt;
66069c531c8SPeter Feiner 	unsigned long gpa;
66169c531c8SPeter Feiner 	unsigned long pte;
66269c531c8SPeter Feiner 	unsigned long prototype;
66369c531c8SPeter Feiner 	int i;
66469c531c8SPeter Feiner 
66569c531c8SPeter Feiner 	pte = *ptep;
66669c531c8SPeter Feiner 	assert(pte & EPT_PRESENT);
66769c531c8SPeter Feiner 	assert(pte & EPT_LARGE_PAGE);
66869c531c8SPeter Feiner 	assert(level == 2 || level == 3);
66969c531c8SPeter Feiner 
67069c531c8SPeter Feiner 	new_pt = alloc_page();
67169c531c8SPeter Feiner 	assert(new_pt);
67269c531c8SPeter Feiner 	memset(new_pt, 0, PAGE_SIZE);
67369c531c8SPeter Feiner 
67469c531c8SPeter Feiner 	prototype = pte & ~EPT_ADDR_MASK;
67569c531c8SPeter Feiner 	if (level == 2)
67669c531c8SPeter Feiner 		prototype &= ~EPT_LARGE_PAGE;
67769c531c8SPeter Feiner 
67869c531c8SPeter Feiner 	gpa = pte & EPT_ADDR_MASK;
67969c531c8SPeter Feiner 	for (i = 0; i < EPT_PGDIR_ENTRIES; i++) {
68069c531c8SPeter Feiner 		new_pt[i] = prototype | gpa;
68169c531c8SPeter Feiner 		gpa += 1ul << EPT_LEVEL_SHIFT(level - 1);
68269c531c8SPeter Feiner 	}
68369c531c8SPeter Feiner 
68469c531c8SPeter Feiner 	pte &= ~EPT_LARGE_PAGE;
68569c531c8SPeter Feiner 	pte &= ~EPT_ADDR_MASK;
68669c531c8SPeter Feiner 	pte |= virt_to_phys(new_pt);
68769c531c8SPeter Feiner 
68869c531c8SPeter Feiner 	*ptep = pte;
68969c531c8SPeter Feiner }
69069c531c8SPeter Feiner 
6916884af61SArthur Chunqi Li /* install_ept_entry : Install a page to a given level in EPT
6926884af61SArthur Chunqi Li 		@pml4 : addr of pml4 table
6936884af61SArthur Chunqi Li 		@pte_level : level of PTE to set
6946884af61SArthur Chunqi Li 		@guest_addr : physical address of guest
6956884af61SArthur Chunqi Li 		@pte : pte value to set
6966884af61SArthur Chunqi Li 		@pt_page : address of page table, NULL for a new page
6976884af61SArthur Chunqi Li  */
6986884af61SArthur Chunqi Li void install_ept_entry(unsigned long *pml4,
6996884af61SArthur Chunqi Li 		int pte_level,
7006884af61SArthur Chunqi Li 		unsigned long guest_addr,
7016884af61SArthur Chunqi Li 		unsigned long pte,
7026884af61SArthur Chunqi Li 		unsigned long *pt_page)
7036884af61SArthur Chunqi Li {
7046884af61SArthur Chunqi Li 	int level;
7056884af61SArthur Chunqi Li 	unsigned long *pt = pml4;
7066884af61SArthur Chunqi Li 	unsigned offset;
7076884af61SArthur Chunqi Li 
708dff740c0SPeter Feiner 	/* EPT only uses 48 bits of GPA. */
709dff740c0SPeter Feiner 	assert(guest_addr < (1ul << 48));
710dff740c0SPeter Feiner 
7116884af61SArthur Chunqi Li 	for (level = EPT_PAGE_LEVEL; level > pte_level; --level) {
712a969e087SPeter Feiner 		offset = (guest_addr >> EPT_LEVEL_SHIFT(level))
7136884af61SArthur Chunqi Li 				& EPT_PGDIR_MASK;
7146884af61SArthur Chunqi Li 		if (!(pt[offset] & (EPT_PRESENT))) {
7156884af61SArthur Chunqi Li 			unsigned long *new_pt = pt_page;
7166884af61SArthur Chunqi Li 			if (!new_pt)
7176884af61SArthur Chunqi Li 				new_pt = alloc_page();
7186884af61SArthur Chunqi Li 			else
7196884af61SArthur Chunqi Li 				pt_page = 0;
7206884af61SArthur Chunqi Li 			memset(new_pt, 0, PAGE_SIZE);
7216884af61SArthur Chunqi Li 			pt[offset] = virt_to_phys(new_pt)
7226884af61SArthur Chunqi Li 					| EPT_RA | EPT_WA | EPT_EA;
72369c531c8SPeter Feiner 		} else if (pt[offset] & EPT_LARGE_PAGE)
72469c531c8SPeter Feiner 			split_large_ept_entry(&pt[offset], level);
72500b5c590SPeter Feiner 		pt = phys_to_virt(pt[offset] & EPT_ADDR_MASK);
7266884af61SArthur Chunqi Li 	}
727a969e087SPeter Feiner 	offset = (guest_addr >> EPT_LEVEL_SHIFT(level)) & EPT_PGDIR_MASK;
7286884af61SArthur Chunqi Li 	pt[offset] = pte;
7296884af61SArthur Chunqi Li }
7306884af61SArthur Chunqi Li 
7316884af61SArthur Chunqi Li /* Map a page, @perm is the permission of the page */
7326884af61SArthur Chunqi Li void install_ept(unsigned long *pml4,
7336884af61SArthur Chunqi Li 		unsigned long phys,
7346884af61SArthur Chunqi Li 		unsigned long guest_addr,
7356884af61SArthur Chunqi Li 		u64 perm)
7366884af61SArthur Chunqi Li {
7376884af61SArthur Chunqi Li 	install_ept_entry(pml4, 1, guest_addr, (phys & PAGE_MASK) | perm, 0);
7386884af61SArthur Chunqi Li }
7396884af61SArthur Chunqi Li 
7406884af61SArthur Chunqi Li /* Map a 1G-size page */
7416884af61SArthur Chunqi Li void install_1g_ept(unsigned long *pml4,
7426884af61SArthur Chunqi Li 		unsigned long phys,
7436884af61SArthur Chunqi Li 		unsigned long guest_addr,
7446884af61SArthur Chunqi Li 		u64 perm)
7456884af61SArthur Chunqi Li {
7466884af61SArthur Chunqi Li 	install_ept_entry(pml4, 3, guest_addr,
7476884af61SArthur Chunqi Li 			(phys & PAGE_MASK) | perm | EPT_LARGE_PAGE, 0);
7486884af61SArthur Chunqi Li }
7496884af61SArthur Chunqi Li 
7506884af61SArthur Chunqi Li /* Map a 2M-size page */
7516884af61SArthur Chunqi Li void install_2m_ept(unsigned long *pml4,
7526884af61SArthur Chunqi Li 		unsigned long phys,
7536884af61SArthur Chunqi Li 		unsigned long guest_addr,
7546884af61SArthur Chunqi Li 		u64 perm)
7556884af61SArthur Chunqi Li {
7566884af61SArthur Chunqi Li 	install_ept_entry(pml4, 2, guest_addr,
7576884af61SArthur Chunqi Li 			(phys & PAGE_MASK) | perm | EPT_LARGE_PAGE, 0);
7586884af61SArthur Chunqi Li }
7596884af61SArthur Chunqi Li 
7606884af61SArthur Chunqi Li /* setup_ept_range : Setup a range of 1:1 mapped page to EPT paging structure.
7616884af61SArthur Chunqi Li 		@start : start address of guest page
7626884af61SArthur Chunqi Li 		@len : length of address to be mapped
7636884af61SArthur Chunqi Li 		@map_1g : whether 1G page map is used
7646884af61SArthur Chunqi Li 		@map_2m : whether 2M page map is used
7656884af61SArthur Chunqi Li 		@perm : permission for every page
7666884af61SArthur Chunqi Li  */
767b947e241SJan Kiszka void setup_ept_range(unsigned long *pml4, unsigned long start,
7686884af61SArthur Chunqi Li 		     unsigned long len, int map_1g, int map_2m, u64 perm)
7696884af61SArthur Chunqi Li {
7706884af61SArthur Chunqi Li 	u64 phys = start;
7716884af61SArthur Chunqi Li 	u64 max = (u64)len + (u64)start;
7726884af61SArthur Chunqi Li 
7736884af61SArthur Chunqi Li 	if (map_1g) {
7746884af61SArthur Chunqi Li 		while (phys + PAGE_SIZE_1G <= max) {
7756884af61SArthur Chunqi Li 			install_1g_ept(pml4, phys, phys, perm);
7766884af61SArthur Chunqi Li 			phys += PAGE_SIZE_1G;
7776884af61SArthur Chunqi Li 		}
7786884af61SArthur Chunqi Li 	}
7796884af61SArthur Chunqi Li 	if (map_2m) {
7806884af61SArthur Chunqi Li 		while (phys + PAGE_SIZE_2M <= max) {
7816884af61SArthur Chunqi Li 			install_2m_ept(pml4, phys, phys, perm);
7826884af61SArthur Chunqi Li 			phys += PAGE_SIZE_2M;
7836884af61SArthur Chunqi Li 		}
7846884af61SArthur Chunqi Li 	}
7856884af61SArthur Chunqi Li 	while (phys + PAGE_SIZE <= max) {
7866884af61SArthur Chunqi Li 		install_ept(pml4, phys, phys, perm);
7876884af61SArthur Chunqi Li 		phys += PAGE_SIZE;
7886884af61SArthur Chunqi Li 	}
7896884af61SArthur Chunqi Li }
7906884af61SArthur Chunqi Li 
7916884af61SArthur Chunqi Li /* get_ept_pte : Get the PTE of a given level in EPT,
7926884af61SArthur Chunqi Li     @level == 1 means get the latest level*/
793*b4a405c3SRadim Krčmář bool get_ept_pte(unsigned long *pml4, unsigned long guest_addr, int level,
794*b4a405c3SRadim Krčmář 		unsigned long *pte)
7956884af61SArthur Chunqi Li {
7966884af61SArthur Chunqi Li 	int l;
797*b4a405c3SRadim Krčmář 	unsigned long *pt = pml4, iter_pte;
7986884af61SArthur Chunqi Li 	unsigned offset;
7996884af61SArthur Chunqi Li 
800dff740c0SPeter Feiner 	assert(level >= 1 && level <= 4);
801dff740c0SPeter Feiner 
8022ca6f1f3SPaolo Bonzini 	for (l = EPT_PAGE_LEVEL; ; --l) {
803a969e087SPeter Feiner 		offset = (guest_addr >> EPT_LEVEL_SHIFT(l)) & EPT_PGDIR_MASK;
804*b4a405c3SRadim Krčmář 		iter_pte = pt[offset];
805*b4a405c3SRadim Krčmář 		if (!(iter_pte & (EPT_PRESENT)))
806*b4a405c3SRadim Krčmář 			return false;
8076884af61SArthur Chunqi Li 		if (l == level)
8082ca6f1f3SPaolo Bonzini 			break;
809*b4a405c3SRadim Krčmář 		if (l < 4 && (iter_pte & EPT_LARGE_PAGE))
810*b4a405c3SRadim Krčmář 			return false;
811*b4a405c3SRadim Krčmář 		pt = (unsigned long *)(iter_pte & EPT_ADDR_MASK);
8126884af61SArthur Chunqi Li 	}
813a969e087SPeter Feiner 	offset = (guest_addr >> EPT_LEVEL_SHIFT(l)) & EPT_PGDIR_MASK;
814*b4a405c3SRadim Krčmář 	if (pte)
815*b4a405c3SRadim Krčmář 		*pte = pt[offset];
816*b4a405c3SRadim Krčmář 	return true;
8176884af61SArthur Chunqi Li }
8186884af61SArthur Chunqi Li 
819521820dbSPaolo Bonzini static void clear_ept_ad_pte(unsigned long *pml4, unsigned long guest_addr)
820521820dbSPaolo Bonzini {
821521820dbSPaolo Bonzini 	int l;
822521820dbSPaolo Bonzini 	unsigned long *pt = pml4;
823521820dbSPaolo Bonzini 	u64 pte;
824521820dbSPaolo Bonzini 	unsigned offset;
825521820dbSPaolo Bonzini 
826521820dbSPaolo Bonzini 	for (l = EPT_PAGE_LEVEL; ; --l) {
827521820dbSPaolo Bonzini 		offset = (guest_addr >> EPT_LEVEL_SHIFT(l)) & EPT_PGDIR_MASK;
828521820dbSPaolo Bonzini 		pt[offset] &= ~(EPT_ACCESS_FLAG|EPT_DIRTY_FLAG);
829521820dbSPaolo Bonzini 		pte = pt[offset];
830521820dbSPaolo Bonzini 		if (l == 1 || (l < 4 && (pte & EPT_LARGE_PAGE)))
831521820dbSPaolo Bonzini 			break;
832521820dbSPaolo Bonzini 		pt = (unsigned long *)(pte & EPT_ADDR_MASK);
833521820dbSPaolo Bonzini 	}
834521820dbSPaolo Bonzini }
835521820dbSPaolo Bonzini 
836521820dbSPaolo Bonzini /* clear_ept_ad : Clear EPT A/D bits for the page table walk and the
837521820dbSPaolo Bonzini    final GPA of a guest address.  */
838521820dbSPaolo Bonzini void clear_ept_ad(unsigned long *pml4, u64 guest_cr3,
839521820dbSPaolo Bonzini 		  unsigned long guest_addr)
840521820dbSPaolo Bonzini {
841521820dbSPaolo Bonzini 	int l;
842521820dbSPaolo Bonzini 	unsigned long *pt = (unsigned long *)guest_cr3, gpa;
843521820dbSPaolo Bonzini 	u64 pte, offset_in_page;
844521820dbSPaolo Bonzini 	unsigned offset;
845521820dbSPaolo Bonzini 
846521820dbSPaolo Bonzini 	for (l = EPT_PAGE_LEVEL; ; --l) {
847521820dbSPaolo Bonzini 		offset = (guest_addr >> EPT_LEVEL_SHIFT(l)) & EPT_PGDIR_MASK;
848521820dbSPaolo Bonzini 
849521820dbSPaolo Bonzini 		clear_ept_ad_pte(pml4, (u64) &pt[offset]);
850521820dbSPaolo Bonzini 		pte = pt[offset];
851521820dbSPaolo Bonzini 		if (l == 1 || (l < 4 && (pte & PT_PAGE_SIZE_MASK)))
852521820dbSPaolo Bonzini 			break;
853521820dbSPaolo Bonzini 		if (!(pte & PT_PRESENT_MASK))
854521820dbSPaolo Bonzini 			return;
855521820dbSPaolo Bonzini 		pt = (unsigned long *)(pte & PT_ADDR_MASK);
856521820dbSPaolo Bonzini 	}
857521820dbSPaolo Bonzini 
858521820dbSPaolo Bonzini 	offset = (guest_addr >> EPT_LEVEL_SHIFT(l)) & EPT_PGDIR_MASK;
859521820dbSPaolo Bonzini 	offset_in_page = guest_addr & ((1 << EPT_LEVEL_SHIFT(l)) - 1);
860521820dbSPaolo Bonzini 	gpa = (pt[offset] & PT_ADDR_MASK) | (guest_addr & offset_in_page);
861521820dbSPaolo Bonzini 	clear_ept_ad_pte(pml4, gpa);
862521820dbSPaolo Bonzini }
863521820dbSPaolo Bonzini 
864521820dbSPaolo Bonzini /* check_ept_ad : Check the content of EPT A/D bits for the page table
865521820dbSPaolo Bonzini    walk and the final GPA of a guest address.  */
866521820dbSPaolo Bonzini void check_ept_ad(unsigned long *pml4, u64 guest_cr3,
867521820dbSPaolo Bonzini 		  unsigned long guest_addr, int expected_gpa_ad,
868521820dbSPaolo Bonzini 		  int expected_pt_ad)
869521820dbSPaolo Bonzini {
870521820dbSPaolo Bonzini 	int l;
871521820dbSPaolo Bonzini 	unsigned long *pt = (unsigned long *)guest_cr3, gpa;
872521820dbSPaolo Bonzini 	u64 ept_pte, pte, offset_in_page;
873521820dbSPaolo Bonzini 	unsigned offset;
874521820dbSPaolo Bonzini 	bool bad_pt_ad = false;
875521820dbSPaolo Bonzini 
876521820dbSPaolo Bonzini 	for (l = EPT_PAGE_LEVEL; ; --l) {
877521820dbSPaolo Bonzini 		offset = (guest_addr >> EPT_LEVEL_SHIFT(l)) & EPT_PGDIR_MASK;
878521820dbSPaolo Bonzini 
879*b4a405c3SRadim Krčmář 		if (!get_ept_pte(pml4, (u64) &pt[offset], 1, &ept_pte)) {
880*b4a405c3SRadim Krčmář 			printf("EPT - guest level %d page table is not mapped.\n", l);
881521820dbSPaolo Bonzini 			return;
882*b4a405c3SRadim Krčmář 		}
883521820dbSPaolo Bonzini 
884521820dbSPaolo Bonzini 		if (!bad_pt_ad) {
885521820dbSPaolo Bonzini 			bad_pt_ad |= (ept_pte & (EPT_ACCESS_FLAG|EPT_DIRTY_FLAG)) != expected_pt_ad;
886521820dbSPaolo Bonzini 			if (bad_pt_ad)
887521820dbSPaolo Bonzini 				report("EPT - guest level %d page table A=%d/D=%d",
888521820dbSPaolo Bonzini 				       false, l,
889521820dbSPaolo Bonzini 				       !!(expected_pt_ad & EPT_ACCESS_FLAG),
890521820dbSPaolo Bonzini 				       !!(expected_pt_ad & EPT_DIRTY_FLAG));
891521820dbSPaolo Bonzini 		}
892521820dbSPaolo Bonzini 
893521820dbSPaolo Bonzini 		pte = pt[offset];
894521820dbSPaolo Bonzini 		if (l == 1 || (l < 4 && (pte & PT_PAGE_SIZE_MASK)))
895521820dbSPaolo Bonzini 			break;
896521820dbSPaolo Bonzini 		if (!(pte & PT_PRESENT_MASK))
897521820dbSPaolo Bonzini 			return;
898521820dbSPaolo Bonzini 		pt = (unsigned long *)(pte & PT_ADDR_MASK);
899521820dbSPaolo Bonzini 	}
900521820dbSPaolo Bonzini 
901521820dbSPaolo Bonzini 	if (!bad_pt_ad)
902521820dbSPaolo Bonzini 		report("EPT - guest page table structures A=%d/D=%d",
903521820dbSPaolo Bonzini 		       true,
904521820dbSPaolo Bonzini 		       !!(expected_pt_ad & EPT_ACCESS_FLAG),
905521820dbSPaolo Bonzini 		       !!(expected_pt_ad & EPT_DIRTY_FLAG));
906521820dbSPaolo Bonzini 
907521820dbSPaolo Bonzini 	offset = (guest_addr >> EPT_LEVEL_SHIFT(l)) & EPT_PGDIR_MASK;
908521820dbSPaolo Bonzini 	offset_in_page = guest_addr & ((1 << EPT_LEVEL_SHIFT(l)) - 1);
909521820dbSPaolo Bonzini 	gpa = (pt[offset] & PT_ADDR_MASK) | (guest_addr & offset_in_page);
910521820dbSPaolo Bonzini 
911*b4a405c3SRadim Krčmář 	if (!get_ept_pte(pml4, gpa, 1, &ept_pte)) {
912*b4a405c3SRadim Krčmář 		report("EPT - guest physical address is not mapped", false);
913*b4a405c3SRadim Krčmář 		return;
914*b4a405c3SRadim Krčmář 	}
915521820dbSPaolo Bonzini 	report("EPT - guest physical address A=%d/D=%d",
916521820dbSPaolo Bonzini 	       (ept_pte & (EPT_ACCESS_FLAG|EPT_DIRTY_FLAG)) == expected_gpa_ad,
917521820dbSPaolo Bonzini 	       !!(expected_gpa_ad & EPT_ACCESS_FLAG),
918521820dbSPaolo Bonzini 	       !!(expected_gpa_ad & EPT_DIRTY_FLAG));
919521820dbSPaolo Bonzini }
920521820dbSPaolo Bonzini 
921521820dbSPaolo Bonzini 
9222f888fccSBandan Das void ept_sync(int type, u64 eptp)
9232f888fccSBandan Das {
9242f888fccSBandan Das 	switch (type) {
9252f888fccSBandan Das 	case INVEPT_SINGLE:
9262f888fccSBandan Das 		if (ept_vpid.val & EPT_CAP_INVEPT_SINGLE) {
9272f888fccSBandan Das 			invept(INVEPT_SINGLE, eptp);
9282f888fccSBandan Das 			break;
9292f888fccSBandan Das 		}
9302f888fccSBandan Das 		/* else fall through */
9312f888fccSBandan Das 	case INVEPT_GLOBAL:
9322f888fccSBandan Das 		if (ept_vpid.val & EPT_CAP_INVEPT_ALL) {
9332f888fccSBandan Das 			invept(INVEPT_GLOBAL, eptp);
9342f888fccSBandan Das 			break;
9352f888fccSBandan Das 		}
9362f888fccSBandan Das 		/* else fall through */
9372f888fccSBandan Das 	default:
9382f888fccSBandan Das 		printf("WARNING: invept is not supported!\n");
9392f888fccSBandan Das 	}
9402f888fccSBandan Das }
9412f888fccSBandan Das 
942dff740c0SPeter Feiner void set_ept_pte(unsigned long *pml4, unsigned long guest_addr,
9436884af61SArthur Chunqi Li 		 int level, u64 pte_val)
9446884af61SArthur Chunqi Li {
9456884af61SArthur Chunqi Li 	int l;
9466884af61SArthur Chunqi Li 	unsigned long *pt = pml4;
9476884af61SArthur Chunqi Li 	unsigned offset;
9486884af61SArthur Chunqi Li 
949dff740c0SPeter Feiner 	assert(level >= 1 && level <= 4);
950dff740c0SPeter Feiner 
9512ca6f1f3SPaolo Bonzini 	for (l = EPT_PAGE_LEVEL; ; --l) {
952a969e087SPeter Feiner 		offset = (guest_addr >> EPT_LEVEL_SHIFT(l)) & EPT_PGDIR_MASK;
9532ca6f1f3SPaolo Bonzini 		if (l == level)
9542ca6f1f3SPaolo Bonzini 			break;
955dff740c0SPeter Feiner 		assert(pt[offset] & EPT_PRESENT);
95600b5c590SPeter Feiner 		pt = (unsigned long *)(pt[offset] & EPT_ADDR_MASK);
9576884af61SArthur Chunqi Li 	}
958a969e087SPeter Feiner 	offset = (guest_addr >> EPT_LEVEL_SHIFT(l)) & EPT_PGDIR_MASK;
9596884af61SArthur Chunqi Li 	pt[offset] = pte_val;
9606884af61SArthur Chunqi Li }
9616884af61SArthur Chunqi Li 
9628ab53b95SPeter Feiner bool ept_2m_supported(void)
9638ab53b95SPeter Feiner {
9648ab53b95SPeter Feiner 	return ept_vpid.val & EPT_CAP_2M_PAGE;
9658ab53b95SPeter Feiner }
9668ab53b95SPeter Feiner 
9678ab53b95SPeter Feiner bool ept_1g_supported(void)
9688ab53b95SPeter Feiner {
9698ab53b95SPeter Feiner 	return ept_vpid.val & EPT_CAP_1G_PAGE;
9708ab53b95SPeter Feiner }
9718ab53b95SPeter Feiner 
9728ab53b95SPeter Feiner bool ept_huge_pages_supported(int level)
9738ab53b95SPeter Feiner {
9748ab53b95SPeter Feiner 	if (level == 2)
9758ab53b95SPeter Feiner 		return ept_2m_supported();
9768ab53b95SPeter Feiner 	else if (level == 3)
9778ab53b95SPeter Feiner 		return ept_1g_supported();
9788ab53b95SPeter Feiner 	else
9798ab53b95SPeter Feiner 		return false;
9808ab53b95SPeter Feiner }
9818ab53b95SPeter Feiner 
9828ab53b95SPeter Feiner bool ept_execute_only_supported(void)
9838ab53b95SPeter Feiner {
9848ab53b95SPeter Feiner 	return ept_vpid.val & EPT_CAP_WT;
9858ab53b95SPeter Feiner }
9868ab53b95SPeter Feiner 
9878ab53b95SPeter Feiner bool ept_ad_bits_supported(void)
9888ab53b95SPeter Feiner {
9898ab53b95SPeter Feiner 	return ept_vpid.val & EPT_CAP_AD_FLAG;
9908ab53b95SPeter Feiner }
9918ab53b95SPeter Feiner 
992b093c6ceSWanpeng Li void vpid_sync(int type, u16 vpid)
993b093c6ceSWanpeng Li {
994b093c6ceSWanpeng Li 	switch(type) {
995aedfd771SJim Mattson 	case INVVPID_CONTEXT_GLOBAL:
996aedfd771SJim Mattson 		if (ept_vpid.val & VPID_CAP_INVVPID_CXTGLB) {
997aedfd771SJim Mattson 			invvpid(INVVPID_CONTEXT_GLOBAL, vpid, 0);
998b093c6ceSWanpeng Li 			break;
999b093c6ceSWanpeng Li 		}
1000b093c6ceSWanpeng Li 	case INVVPID_ALL:
1001b093c6ceSWanpeng Li 		if (ept_vpid.val & VPID_CAP_INVVPID_ALL) {
1002b093c6ceSWanpeng Li 			invvpid(INVVPID_ALL, vpid, 0);
1003b093c6ceSWanpeng Li 			break;
1004b093c6ceSWanpeng Li 		}
1005b093c6ceSWanpeng Li 	default:
1006b093c6ceSWanpeng Li 		printf("WARNING: invvpid is not supported\n");
1007b093c6ceSWanpeng Li 	}
1008b093c6ceSWanpeng Li }
10096884af61SArthur Chunqi Li 
10109d7eaa29SArthur Chunqi Li static void init_vmcs_ctrl(void)
10119d7eaa29SArthur Chunqi Li {
10129d7eaa29SArthur Chunqi Li 	/* 26.2 CHECKS ON VMX CONTROLS AND HOST-STATE AREA */
10139d7eaa29SArthur Chunqi Li 	/* 26.2.1.1 */
10149d7eaa29SArthur Chunqi Li 	vmcs_write(PIN_CONTROLS, ctrl_pin);
10159d7eaa29SArthur Chunqi Li 	/* Disable VMEXIT of IO instruction */
10169d7eaa29SArthur Chunqi Li 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu[0]);
10179d7eaa29SArthur Chunqi Li 	if (ctrl_cpu_rev[0].set & CPU_SECONDARY) {
10186884af61SArthur Chunqi Li 		ctrl_cpu[1] = (ctrl_cpu[1] | ctrl_cpu_rev[1].set) &
10196884af61SArthur Chunqi Li 			ctrl_cpu_rev[1].clr;
10209d7eaa29SArthur Chunqi Li 		vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu[1]);
10219d7eaa29SArthur Chunqi Li 	}
10229d7eaa29SArthur Chunqi Li 	vmcs_write(CR3_TARGET_COUNT, 0);
10239d7eaa29SArthur Chunqi Li 	vmcs_write(VPID, ++vpid_cnt);
10249d7eaa29SArthur Chunqi Li }
10259d7eaa29SArthur Chunqi Li 
10269d7eaa29SArthur Chunqi Li static void init_vmcs_host(void)
10279d7eaa29SArthur Chunqi Li {
10289d7eaa29SArthur Chunqi Li 	/* 26.2 CHECKS ON VMX CONTROLS AND HOST-STATE AREA */
10299d7eaa29SArthur Chunqi Li 	/* 26.2.1.2 */
10309d7eaa29SArthur Chunqi Li 	vmcs_write(HOST_EFER, rdmsr(MSR_EFER));
10319d7eaa29SArthur Chunqi Li 
10329d7eaa29SArthur Chunqi Li 	/* 26.2.1.3 */
10339d7eaa29SArthur Chunqi Li 	vmcs_write(ENT_CONTROLS, ctrl_enter);
10349d7eaa29SArthur Chunqi Li 	vmcs_write(EXI_CONTROLS, ctrl_exit);
10359d7eaa29SArthur Chunqi Li 
10369d7eaa29SArthur Chunqi Li 	/* 26.2.2 */
10379d7eaa29SArthur Chunqi Li 	vmcs_write(HOST_CR0, read_cr0());
10389d7eaa29SArthur Chunqi Li 	vmcs_write(HOST_CR3, read_cr3());
10399d7eaa29SArthur Chunqi Li 	vmcs_write(HOST_CR4, read_cr4());
10409d7eaa29SArthur Chunqi Li 	vmcs_write(HOST_SYSENTER_EIP, (u64)(&entry_sysenter));
104169d8fe0eSPaolo Bonzini 	vmcs_write(HOST_SYSENTER_CS,  KERNEL_CS);
10429d7eaa29SArthur Chunqi Li 
10439d7eaa29SArthur Chunqi Li 	/* 26.2.3 */
104469d8fe0eSPaolo Bonzini 	vmcs_write(HOST_SEL_CS, KERNEL_CS);
104569d8fe0eSPaolo Bonzini 	vmcs_write(HOST_SEL_SS, KERNEL_DS);
104669d8fe0eSPaolo Bonzini 	vmcs_write(HOST_SEL_DS, KERNEL_DS);
104769d8fe0eSPaolo Bonzini 	vmcs_write(HOST_SEL_ES, KERNEL_DS);
104869d8fe0eSPaolo Bonzini 	vmcs_write(HOST_SEL_FS, KERNEL_DS);
104969d8fe0eSPaolo Bonzini 	vmcs_write(HOST_SEL_GS, KERNEL_DS);
105069d8fe0eSPaolo Bonzini 	vmcs_write(HOST_SEL_TR, TSS_MAIN);
1051337166aaSJan Kiszka 	vmcs_write(HOST_BASE_TR, tss_descr.base);
1052337166aaSJan Kiszka 	vmcs_write(HOST_BASE_GDTR, gdt64_desc.base);
1053337166aaSJan Kiszka 	vmcs_write(HOST_BASE_IDTR, idt_descr.base);
10549d7eaa29SArthur Chunqi Li 	vmcs_write(HOST_BASE_FS, 0);
10559d7eaa29SArthur Chunqi Li 	vmcs_write(HOST_BASE_GS, 0);
10569d7eaa29SArthur Chunqi Li 
10579d7eaa29SArthur Chunqi Li 	/* Set other vmcs area */
10589d7eaa29SArthur Chunqi Li 	vmcs_write(PF_ERROR_MASK, 0);
10599d7eaa29SArthur Chunqi Li 	vmcs_write(PF_ERROR_MATCH, 0);
10609d7eaa29SArthur Chunqi Li 	vmcs_write(VMCS_LINK_PTR, ~0ul);
10619d7eaa29SArthur Chunqi Li 	vmcs_write(VMCS_LINK_PTR_HI, ~0ul);
10629d7eaa29SArthur Chunqi Li 	vmcs_write(HOST_RIP, (u64)(&vmx_return));
10639d7eaa29SArthur Chunqi Li }
10649d7eaa29SArthur Chunqi Li 
10659d7eaa29SArthur Chunqi Li static void init_vmcs_guest(void)
10669d7eaa29SArthur Chunqi Li {
10679d7eaa29SArthur Chunqi Li 	/* 26.3 CHECKING AND LOADING GUEST STATE */
10689d7eaa29SArthur Chunqi Li 	ulong guest_cr0, guest_cr4, guest_cr3;
10699d7eaa29SArthur Chunqi Li 	/* 26.3.1.1 */
10709d7eaa29SArthur Chunqi Li 	guest_cr0 = read_cr0();
10719d7eaa29SArthur Chunqi Li 	guest_cr4 = read_cr4();
10729d7eaa29SArthur Chunqi Li 	guest_cr3 = read_cr3();
10739d7eaa29SArthur Chunqi Li 	if (ctrl_enter & ENT_GUEST_64) {
10749d7eaa29SArthur Chunqi Li 		guest_cr0 |= X86_CR0_PG;
10759d7eaa29SArthur Chunqi Li 		guest_cr4 |= X86_CR4_PAE;
10769d7eaa29SArthur Chunqi Li 	}
10779d7eaa29SArthur Chunqi Li 	if ((ctrl_enter & ENT_GUEST_64) == 0)
10789d7eaa29SArthur Chunqi Li 		guest_cr4 &= (~X86_CR4_PCIDE);
10799d7eaa29SArthur Chunqi Li 	if (guest_cr0 & X86_CR0_PG)
10809d7eaa29SArthur Chunqi Li 		guest_cr0 |= X86_CR0_PE;
10819d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_CR0, guest_cr0);
10829d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_CR3, guest_cr3);
10839d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_CR4, guest_cr4);
108469d8fe0eSPaolo Bonzini 	vmcs_write(GUEST_SYSENTER_CS,  KERNEL_CS);
10859d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_SYSENTER_ESP,
10869d7eaa29SArthur Chunqi Li 		(u64)(guest_syscall_stack + PAGE_SIZE - 1));
10879d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_SYSENTER_EIP, (u64)(&entry_sysenter));
10889d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_DR7, 0);
10899d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_EFER, rdmsr(MSR_EFER));
10909d7eaa29SArthur Chunqi Li 
10919d7eaa29SArthur Chunqi Li 	/* 26.3.1.2 */
109269d8fe0eSPaolo Bonzini 	vmcs_write(GUEST_SEL_CS, KERNEL_CS);
109369d8fe0eSPaolo Bonzini 	vmcs_write(GUEST_SEL_SS, KERNEL_DS);
109469d8fe0eSPaolo Bonzini 	vmcs_write(GUEST_SEL_DS, KERNEL_DS);
109569d8fe0eSPaolo Bonzini 	vmcs_write(GUEST_SEL_ES, KERNEL_DS);
109669d8fe0eSPaolo Bonzini 	vmcs_write(GUEST_SEL_FS, KERNEL_DS);
109769d8fe0eSPaolo Bonzini 	vmcs_write(GUEST_SEL_GS, KERNEL_DS);
109869d8fe0eSPaolo Bonzini 	vmcs_write(GUEST_SEL_TR, TSS_MAIN);
10999d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_SEL_LDTR, 0);
11009d7eaa29SArthur Chunqi Li 
11019d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_BASE_CS, 0);
11029d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_BASE_ES, 0);
11039d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_BASE_SS, 0);
11049d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_BASE_DS, 0);
11059d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_BASE_FS, 0);
11069d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_BASE_GS, 0);
1107337166aaSJan Kiszka 	vmcs_write(GUEST_BASE_TR, tss_descr.base);
11089d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_BASE_LDTR, 0);
11099d7eaa29SArthur Chunqi Li 
11109d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_LIMIT_CS, 0xFFFFFFFF);
11119d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_LIMIT_DS, 0xFFFFFFFF);
11129d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_LIMIT_ES, 0xFFFFFFFF);
11139d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_LIMIT_SS, 0xFFFFFFFF);
11149d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_LIMIT_FS, 0xFFFFFFFF);
11159d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_LIMIT_GS, 0xFFFFFFFF);
11169d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_LIMIT_LDTR, 0xffff);
1117337166aaSJan Kiszka 	vmcs_write(GUEST_LIMIT_TR, tss_descr.limit);
11189d7eaa29SArthur Chunqi Li 
11199d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_AR_CS, 0xa09b);
11209d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_AR_DS, 0xc093);
11219d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_AR_ES, 0xc093);
11229d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_AR_FS, 0xc093);
11239d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_AR_GS, 0xc093);
11249d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_AR_SS, 0xc093);
11259d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_AR_LDTR, 0x82);
11269d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_AR_TR, 0x8b);
11279d7eaa29SArthur Chunqi Li 
11289d7eaa29SArthur Chunqi Li 	/* 26.3.1.3 */
1129337166aaSJan Kiszka 	vmcs_write(GUEST_BASE_GDTR, gdt64_desc.base);
1130337166aaSJan Kiszka 	vmcs_write(GUEST_BASE_IDTR, idt_descr.base);
1131337166aaSJan Kiszka 	vmcs_write(GUEST_LIMIT_GDTR, gdt64_desc.limit);
1132337166aaSJan Kiszka 	vmcs_write(GUEST_LIMIT_IDTR, idt_descr.limit);
11339d7eaa29SArthur Chunqi Li 
11349d7eaa29SArthur Chunqi Li 	/* 26.3.1.4 */
11359d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_RIP, (u64)(&guest_entry));
11369d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_RSP, (u64)(guest_stack + PAGE_SIZE - 1));
11379d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_RFLAGS, 0x2);
11389d7eaa29SArthur Chunqi Li 
11399d7eaa29SArthur Chunqi Li 	/* 26.3.1.5 */
114017ba0dd0SJan Kiszka 	vmcs_write(GUEST_ACTV_STATE, ACTV_ACTIVE);
11419d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_INTR_STATE, 0);
11429d7eaa29SArthur Chunqi Li }
11439d7eaa29SArthur Chunqi Li 
11449d7eaa29SArthur Chunqi Li static int init_vmcs(struct vmcs **vmcs)
11459d7eaa29SArthur Chunqi Li {
11469d7eaa29SArthur Chunqi Li 	*vmcs = alloc_page();
11479d7eaa29SArthur Chunqi Li 	memset(*vmcs, 0, PAGE_SIZE);
11489d7eaa29SArthur Chunqi Li 	(*vmcs)->revision_id = basic.revision;
11499d7eaa29SArthur Chunqi Li 	/* vmclear first to init vmcs */
11509d7eaa29SArthur Chunqi Li 	if (vmcs_clear(*vmcs)) {
11519d7eaa29SArthur Chunqi Li 		printf("%s : vmcs_clear error\n", __func__);
11529d7eaa29SArthur Chunqi Li 		return 1;
11539d7eaa29SArthur Chunqi Li 	}
11549d7eaa29SArthur Chunqi Li 
11559d7eaa29SArthur Chunqi Li 	if (make_vmcs_current(*vmcs)) {
11569d7eaa29SArthur Chunqi Li 		printf("%s : make_vmcs_current error\n", __func__);
11579d7eaa29SArthur Chunqi Li 		return 1;
11589d7eaa29SArthur Chunqi Li 	}
11599d7eaa29SArthur Chunqi Li 
11609d7eaa29SArthur Chunqi Li 	/* All settings to pin/exit/enter/cpu
11619d7eaa29SArthur Chunqi Li 	   control fields should be placed here */
11629d7eaa29SArthur Chunqi Li 	ctrl_pin |= PIN_EXTINT | PIN_NMI | PIN_VIRT_NMI;
11639d7eaa29SArthur Chunqi Li 	ctrl_exit = EXI_LOAD_EFER | EXI_HOST_64;
11649d7eaa29SArthur Chunqi Li 	ctrl_enter = (ENT_LOAD_EFER | ENT_GUEST_64);
11659d7eaa29SArthur Chunqi Li 	/* DIsable IO instruction VMEXIT now */
11669d7eaa29SArthur Chunqi Li 	ctrl_cpu[0] &= (~(CPU_IO | CPU_IO_BITMAP));
11679d7eaa29SArthur Chunqi Li 	ctrl_cpu[1] = 0;
11689d7eaa29SArthur Chunqi Li 
11699d7eaa29SArthur Chunqi Li 	ctrl_pin = (ctrl_pin | ctrl_pin_rev.set) & ctrl_pin_rev.clr;
11709d7eaa29SArthur Chunqi Li 	ctrl_enter = (ctrl_enter | ctrl_enter_rev.set) & ctrl_enter_rev.clr;
11719d7eaa29SArthur Chunqi Li 	ctrl_exit = (ctrl_exit | ctrl_exit_rev.set) & ctrl_exit_rev.clr;
11729d7eaa29SArthur Chunqi Li 	ctrl_cpu[0] = (ctrl_cpu[0] | ctrl_cpu_rev[0].set) & ctrl_cpu_rev[0].clr;
11739d7eaa29SArthur Chunqi Li 
11749d7eaa29SArthur Chunqi Li 	init_vmcs_ctrl();
11759d7eaa29SArthur Chunqi Li 	init_vmcs_host();
11769d7eaa29SArthur Chunqi Li 	init_vmcs_guest();
11779d7eaa29SArthur Chunqi Li 	return 0;
11789d7eaa29SArthur Chunqi Li }
11799d7eaa29SArthur Chunqi Li 
11809d7eaa29SArthur Chunqi Li static void init_vmx(void)
11819d7eaa29SArthur Chunqi Li {
11823ee34093SArthur Chunqi Li 	ulong fix_cr0_set, fix_cr0_clr;
11833ee34093SArthur Chunqi Li 	ulong fix_cr4_set, fix_cr4_clr;
11843ee34093SArthur Chunqi Li 
11859d7eaa29SArthur Chunqi Li 	vmxon_region = alloc_page();
11869d7eaa29SArthur Chunqi Li 	memset(vmxon_region, 0, PAGE_SIZE);
11879d7eaa29SArthur Chunqi Li 
11889d7eaa29SArthur Chunqi Li 	fix_cr0_set =  rdmsr(MSR_IA32_VMX_CR0_FIXED0);
11899d7eaa29SArthur Chunqi Li 	fix_cr0_clr =  rdmsr(MSR_IA32_VMX_CR0_FIXED1);
11909d7eaa29SArthur Chunqi Li 	fix_cr4_set =  rdmsr(MSR_IA32_VMX_CR4_FIXED0);
11919d7eaa29SArthur Chunqi Li 	fix_cr4_clr = rdmsr(MSR_IA32_VMX_CR4_FIXED1);
11929d7eaa29SArthur Chunqi Li 	basic.val = rdmsr(MSR_IA32_VMX_BASIC);
11939d7eaa29SArthur Chunqi Li 	ctrl_pin_rev.val = rdmsr(basic.ctrl ? MSR_IA32_VMX_TRUE_PIN
11949d7eaa29SArthur Chunqi Li 			: MSR_IA32_VMX_PINBASED_CTLS);
11959d7eaa29SArthur Chunqi Li 	ctrl_exit_rev.val = rdmsr(basic.ctrl ? MSR_IA32_VMX_TRUE_EXIT
11969d7eaa29SArthur Chunqi Li 			: MSR_IA32_VMX_EXIT_CTLS);
11979d7eaa29SArthur Chunqi Li 	ctrl_enter_rev.val = rdmsr(basic.ctrl ? MSR_IA32_VMX_TRUE_ENTRY
11989d7eaa29SArthur Chunqi Li 			: MSR_IA32_VMX_ENTRY_CTLS);
11999d7eaa29SArthur Chunqi Li 	ctrl_cpu_rev[0].val = rdmsr(basic.ctrl ? MSR_IA32_VMX_TRUE_PROC
12009d7eaa29SArthur Chunqi Li 			: MSR_IA32_VMX_PROCBASED_CTLS);
12016884af61SArthur Chunqi Li 	if ((ctrl_cpu_rev[0].clr & CPU_SECONDARY) != 0)
12029d7eaa29SArthur Chunqi Li 		ctrl_cpu_rev[1].val = rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2);
12036884af61SArthur Chunqi Li 	else
12046884af61SArthur Chunqi Li 		ctrl_cpu_rev[1].val = 0;
12056884af61SArthur Chunqi Li 	if ((ctrl_cpu_rev[1].clr & (CPU_EPT | CPU_VPID)) != 0)
12069d7eaa29SArthur Chunqi Li 		ept_vpid.val = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP);
12076884af61SArthur Chunqi Li 	else
12086884af61SArthur Chunqi Li 		ept_vpid.val = 0;
12099d7eaa29SArthur Chunqi Li 
12109d7eaa29SArthur Chunqi Li 	write_cr0((read_cr0() & fix_cr0_clr) | fix_cr0_set);
12119d7eaa29SArthur Chunqi Li 	write_cr4((read_cr4() & fix_cr4_clr) | fix_cr4_set | X86_CR4_VMXE);
12129d7eaa29SArthur Chunqi Li 
12139d7eaa29SArthur Chunqi Li 	*vmxon_region = basic.revision;
12149d7eaa29SArthur Chunqi Li 
12159d7eaa29SArthur Chunqi Li 	guest_stack = alloc_page();
12169d7eaa29SArthur Chunqi Li 	memset(guest_stack, 0, PAGE_SIZE);
12179d7eaa29SArthur Chunqi Li 	guest_syscall_stack = alloc_page();
12189d7eaa29SArthur Chunqi Li 	memset(guest_syscall_stack, 0, PAGE_SIZE);
12199d7eaa29SArthur Chunqi Li }
12209d7eaa29SArthur Chunqi Li 
1221e3f363c4SJan Kiszka static void do_vmxon_off(void *data)
12229d7eaa29SArthur Chunqi Li {
12233b127446SJan Kiszka 	vmx_on();
12243b127446SJan Kiszka 	vmx_off();
122503f37ef2SPaolo Bonzini }
12263b127446SJan Kiszka 
1227e3f363c4SJan Kiszka static void do_write_feature_control(void *data)
12283b127446SJan Kiszka {
12293b127446SJan Kiszka 	wrmsr(MSR_IA32_FEATURE_CONTROL, 0);
123003f37ef2SPaolo Bonzini }
12313b127446SJan Kiszka 
12323b127446SJan Kiszka static int test_vmx_feature_control(void)
12333b127446SJan Kiszka {
12343b127446SJan Kiszka 	u64 ia32_feature_control;
12353b127446SJan Kiszka 	bool vmx_enabled;
12363b127446SJan Kiszka 
12373b127446SJan Kiszka 	ia32_feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
12383b127446SJan Kiszka 	vmx_enabled = ((ia32_feature_control & 0x5) == 0x5);
12393b127446SJan Kiszka 	if ((ia32_feature_control & 0x5) == 0x5) {
12403b127446SJan Kiszka 		printf("VMX enabled and locked by BIOS\n");
12413b127446SJan Kiszka 		return 0;
12423b127446SJan Kiszka 	} else if (ia32_feature_control & 0x1) {
12433b127446SJan Kiszka 		printf("ERROR: VMX locked out by BIOS!?\n");
12443b127446SJan Kiszka 		return 1;
12453b127446SJan Kiszka 	}
12463b127446SJan Kiszka 
12473b127446SJan Kiszka 	wrmsr(MSR_IA32_FEATURE_CONTROL, 0);
12483b127446SJan Kiszka 	report("test vmxon with FEATURE_CONTROL cleared",
1249e3f363c4SJan Kiszka 	       test_for_exception(GP_VECTOR, &do_vmxon_off, NULL));
12503b127446SJan Kiszka 
12513b127446SJan Kiszka 	wrmsr(MSR_IA32_FEATURE_CONTROL, 0x4);
12523b127446SJan Kiszka 	report("test vmxon without FEATURE_CONTROL lock",
1253e3f363c4SJan Kiszka 	       test_for_exception(GP_VECTOR, &do_vmxon_off, NULL));
12543b127446SJan Kiszka 
12553b127446SJan Kiszka 	wrmsr(MSR_IA32_FEATURE_CONTROL, 0x5);
12563b127446SJan Kiszka 	vmx_enabled = ((rdmsr(MSR_IA32_FEATURE_CONTROL) & 0x5) == 0x5);
12573b127446SJan Kiszka 	report("test enable VMX in FEATURE_CONTROL", vmx_enabled);
12583b127446SJan Kiszka 
12593b127446SJan Kiszka 	report("test FEATURE_CONTROL lock bit",
1260e3f363c4SJan Kiszka 	       test_for_exception(GP_VECTOR, &do_write_feature_control, NULL));
12613b127446SJan Kiszka 
12623b127446SJan Kiszka 	return !vmx_enabled;
12639d7eaa29SArthur Chunqi Li }
12649d7eaa29SArthur Chunqi Li 
12659d7eaa29SArthur Chunqi Li static int test_vmxon(void)
12669d7eaa29SArthur Chunqi Li {
1267ce21d809SBandan Das 	int ret, ret1;
1268ce21d809SBandan Das 	u64 *tmp_region = vmxon_region;
1269e2cf1c9dSEduardo Habkost 	int width = cpuid_maxphyaddr();
12709d7eaa29SArthur Chunqi Li 
1271ce21d809SBandan Das 	/* Unaligned page access */
1272ce21d809SBandan Das 	vmxon_region = (u64 *)((intptr_t)vmxon_region + 1);
1273ce21d809SBandan Das 	ret1 = vmx_on();
1274ce21d809SBandan Das 	report("test vmxon with unaligned vmxon region", ret1);
1275ce21d809SBandan Das 	if (!ret1) {
1276ce21d809SBandan Das 		ret = 1;
1277ce21d809SBandan Das 		goto out;
1278ce21d809SBandan Das 	}
1279ce21d809SBandan Das 
1280ce21d809SBandan Das 	/* gpa bits beyond physical address width are set*/
1281ce21d809SBandan Das 	vmxon_region = (u64 *)((intptr_t)tmp_region | ((u64)1 << (width+1)));
1282ce21d809SBandan Das 	ret1 = vmx_on();
1283ce21d809SBandan Das 	report("test vmxon with bits set beyond physical address width", ret1);
1284ce21d809SBandan Das 	if (!ret1) {
1285ce21d809SBandan Das 		ret = 1;
1286ce21d809SBandan Das 		goto out;
1287ce21d809SBandan Das 	}
1288ce21d809SBandan Das 
1289ce21d809SBandan Das 	/* invalid revision indentifier */
1290ce21d809SBandan Das 	vmxon_region = tmp_region;
1291ce21d809SBandan Das 	*vmxon_region = 0xba9da9;
1292ce21d809SBandan Das 	ret1 = vmx_on();
1293ce21d809SBandan Das 	report("test vmxon with invalid revision identifier", ret1);
1294ce21d809SBandan Das 	if (!ret1) {
1295ce21d809SBandan Das 		ret = 1;
1296ce21d809SBandan Das 		goto out;
1297ce21d809SBandan Das 	}
1298ce21d809SBandan Das 
1299ce21d809SBandan Das 	/* and finally a valid region */
1300ce21d809SBandan Das 	*vmxon_region = basic.revision;
13019d7eaa29SArthur Chunqi Li 	ret = vmx_on();
1302ce21d809SBandan Das 	report("test vmxon with valid vmxon region", !ret);
1303ce21d809SBandan Das 
1304ce21d809SBandan Das out:
13059d7eaa29SArthur Chunqi Li 	return ret;
13069d7eaa29SArthur Chunqi Li }
13079d7eaa29SArthur Chunqi Li 
13089d7eaa29SArthur Chunqi Li static void test_vmptrld(void)
13099d7eaa29SArthur Chunqi Li {
1310daeec979SBandan Das 	struct vmcs *vmcs, *tmp_root;
1311e2cf1c9dSEduardo Habkost 	int width = cpuid_maxphyaddr();
13129d7eaa29SArthur Chunqi Li 
13139d7eaa29SArthur Chunqi Li 	vmcs = alloc_page();
13149d7eaa29SArthur Chunqi Li 	vmcs->revision_id = basic.revision;
1315daeec979SBandan Das 
1316daeec979SBandan Das 	/* Unaligned page access */
1317daeec979SBandan Das 	tmp_root = (struct vmcs *)((intptr_t)vmcs + 1);
1318daeec979SBandan Das 	report("test vmptrld with unaligned vmcs",
13199c305952SPaolo Bonzini 	       make_vmcs_current(tmp_root) == 1);
1320daeec979SBandan Das 
1321daeec979SBandan Das 	/* gpa bits beyond physical address width are set*/
1322daeec979SBandan Das 	tmp_root = (struct vmcs *)((intptr_t)vmcs |
1323daeec979SBandan Das 				   ((u64)1 << (width+1)));
1324daeec979SBandan Das 	report("test vmptrld with vmcs address bits set beyond physical address width",
13259c305952SPaolo Bonzini 	       make_vmcs_current(tmp_root) == 1);
1326daeec979SBandan Das 
1327daeec979SBandan Das 	/* Pass VMXON region */
1328799a84f8SGanShun 	make_vmcs_current(vmcs);
1329daeec979SBandan Das 	tmp_root = (struct vmcs *)vmxon_region;
1330daeec979SBandan Das 	report("test vmptrld with vmxon region",
13319c305952SPaolo Bonzini 	       make_vmcs_current(tmp_root) == 1);
1332799a84f8SGanShun 	report("test vmptrld with vmxon region vm-instruction error",
1333799a84f8SGanShun 	       vmcs_read(VMX_INST_ERROR) == VMXERR_VMPTRLD_VMXON_POINTER);
1334daeec979SBandan Das 
1335daeec979SBandan Das 	report("test vmptrld with valid vmcs region", make_vmcs_current(vmcs) == 0);
13369d7eaa29SArthur Chunqi Li }
13379d7eaa29SArthur Chunqi Li 
13389d7eaa29SArthur Chunqi Li static void test_vmptrst(void)
13399d7eaa29SArthur Chunqi Li {
13409d7eaa29SArthur Chunqi Li 	int ret;
13419d7eaa29SArthur Chunqi Li 	struct vmcs *vmcs1, *vmcs2;
13429d7eaa29SArthur Chunqi Li 
13439d7eaa29SArthur Chunqi Li 	vmcs1 = alloc_page();
13449d7eaa29SArthur Chunqi Li 	memset(vmcs1, 0, PAGE_SIZE);
13459d7eaa29SArthur Chunqi Li 	init_vmcs(&vmcs1);
13469d7eaa29SArthur Chunqi Li 	ret = vmcs_save(&vmcs2);
13479d7eaa29SArthur Chunqi Li 	report("test vmptrst", (!ret) && (vmcs1 == vmcs2));
13489d7eaa29SArthur Chunqi Li }
13499d7eaa29SArthur Chunqi Li 
135069c8d31cSJan Kiszka struct vmx_ctl_msr {
135169c8d31cSJan Kiszka 	const char *name;
135269c8d31cSJan Kiszka 	u32 index, true_index;
135369c8d31cSJan Kiszka 	u32 default1;
135469c8d31cSJan Kiszka } vmx_ctl_msr[] = {
135569c8d31cSJan Kiszka 	{ "MSR_IA32_VMX_PINBASED_CTLS", MSR_IA32_VMX_PINBASED_CTLS,
135669c8d31cSJan Kiszka 	  MSR_IA32_VMX_TRUE_PIN, 0x16 },
135769c8d31cSJan Kiszka 	{ "MSR_IA32_VMX_PROCBASED_CTLS", MSR_IA32_VMX_PROCBASED_CTLS,
135869c8d31cSJan Kiszka 	  MSR_IA32_VMX_TRUE_PROC, 0x401e172 },
135969c8d31cSJan Kiszka 	{ "MSR_IA32_VMX_PROCBASED_CTLS2", MSR_IA32_VMX_PROCBASED_CTLS2,
136069c8d31cSJan Kiszka 	  MSR_IA32_VMX_PROCBASED_CTLS2, 0 },
136169c8d31cSJan Kiszka 	{ "MSR_IA32_VMX_EXIT_CTLS", MSR_IA32_VMX_EXIT_CTLS,
136269c8d31cSJan Kiszka 	  MSR_IA32_VMX_TRUE_EXIT, 0x36dff },
136369c8d31cSJan Kiszka 	{ "MSR_IA32_VMX_ENTRY_CTLS", MSR_IA32_VMX_ENTRY_CTLS,
136469c8d31cSJan Kiszka 	  MSR_IA32_VMX_TRUE_ENTRY, 0x11ff },
136569c8d31cSJan Kiszka };
136669c8d31cSJan Kiszka 
136769c8d31cSJan Kiszka static void test_vmx_caps(void)
136869c8d31cSJan Kiszka {
136969c8d31cSJan Kiszka 	u64 val, default1, fixed0, fixed1;
137069c8d31cSJan Kiszka 	union vmx_ctrl_msr ctrl, true_ctrl;
137169c8d31cSJan Kiszka 	unsigned int n;
137269c8d31cSJan Kiszka 	bool ok;
137369c8d31cSJan Kiszka 
137469c8d31cSJan Kiszka 	printf("\nTest suite: VMX capability reporting\n");
137569c8d31cSJan Kiszka 
137669c8d31cSJan Kiszka 	report("MSR_IA32_VMX_BASIC",
137769c8d31cSJan Kiszka 	       (basic.revision & (1ul << 31)) == 0 &&
137869c8d31cSJan Kiszka 	       basic.size > 0 && basic.size <= 4096 &&
137969c8d31cSJan Kiszka 	       (basic.type == 0 || basic.type == 6) &&
138069c8d31cSJan Kiszka 	       basic.reserved1 == 0 && basic.reserved2 == 0);
138169c8d31cSJan Kiszka 
138269c8d31cSJan Kiszka 	val = rdmsr(MSR_IA32_VMX_MISC);
138369c8d31cSJan Kiszka 	report("MSR_IA32_VMX_MISC",
138469c8d31cSJan Kiszka 	       (!(ctrl_cpu_rev[1].clr & CPU_URG) || val & (1ul << 5)) &&
138569c8d31cSJan Kiszka 	       ((val >> 16) & 0x1ff) <= 256 &&
138669c8d31cSJan Kiszka 	       (val & 0xc0007e00) == 0);
138769c8d31cSJan Kiszka 
138869c8d31cSJan Kiszka 	for (n = 0; n < ARRAY_SIZE(vmx_ctl_msr); n++) {
138969c8d31cSJan Kiszka 		ctrl.val = rdmsr(vmx_ctl_msr[n].index);
139069c8d31cSJan Kiszka 		default1 = vmx_ctl_msr[n].default1;
139169c8d31cSJan Kiszka 		ok = (ctrl.set & default1) == default1;
139269c8d31cSJan Kiszka 		ok = ok && (ctrl.set & ~ctrl.clr) == 0;
139369c8d31cSJan Kiszka 		if (ok && basic.ctrl) {
139469c8d31cSJan Kiszka 			true_ctrl.val = rdmsr(vmx_ctl_msr[n].true_index);
139569c8d31cSJan Kiszka 			ok = ctrl.clr == true_ctrl.clr;
139669c8d31cSJan Kiszka 			ok = ok && ctrl.set == (true_ctrl.set | default1);
139769c8d31cSJan Kiszka 		}
1398dd3de932SDavid Matlack 		report("%s", ok, vmx_ctl_msr[n].name);
139969c8d31cSJan Kiszka 	}
140069c8d31cSJan Kiszka 
140169c8d31cSJan Kiszka 	fixed0 = rdmsr(MSR_IA32_VMX_CR0_FIXED0);
140269c8d31cSJan Kiszka 	fixed1 = rdmsr(MSR_IA32_VMX_CR0_FIXED1);
140369c8d31cSJan Kiszka 	report("MSR_IA32_VMX_IA32_VMX_CR0_FIXED0/1",
140469c8d31cSJan Kiszka 	       ((fixed0 ^ fixed1) & ~fixed1) == 0);
140569c8d31cSJan Kiszka 
140669c8d31cSJan Kiszka 	fixed0 = rdmsr(MSR_IA32_VMX_CR4_FIXED0);
140769c8d31cSJan Kiszka 	fixed1 = rdmsr(MSR_IA32_VMX_CR4_FIXED1);
140869c8d31cSJan Kiszka 	report("MSR_IA32_VMX_IA32_VMX_CR4_FIXED0/1",
140969c8d31cSJan Kiszka 	       ((fixed0 ^ fixed1) & ~fixed1) == 0);
141069c8d31cSJan Kiszka 
141169c8d31cSJan Kiszka 	val = rdmsr(MSR_IA32_VMX_VMCS_ENUM);
141269c8d31cSJan Kiszka 	report("MSR_IA32_VMX_VMCS_ENUM",
141369c8d31cSJan Kiszka 	       (val & 0x3e) >= 0x2a &&
141469c8d31cSJan Kiszka 	       (val & 0xfffffffffffffc01Ull) == 0);
141569c8d31cSJan Kiszka 
141669c8d31cSJan Kiszka 	val = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP);
141769c8d31cSJan Kiszka 	report("MSR_IA32_VMX_EPT_VPID_CAP",
1418625f52abSPaolo Bonzini 	       (val & 0xfffff07ef98cbebeUll) == 0);
141969c8d31cSJan Kiszka }
142069c8d31cSJan Kiszka 
14219d7eaa29SArthur Chunqi Li /* This function can only be called in guest */
14229d7eaa29SArthur Chunqi Li static void __attribute__((__used__)) hypercall(u32 hypercall_no)
14239d7eaa29SArthur Chunqi Li {
14249d7eaa29SArthur Chunqi Li 	u64 val = 0;
14259d7eaa29SArthur Chunqi Li 	val = (hypercall_no & HYPERCALL_MASK) | HYPERCALL_BIT;
14269d7eaa29SArthur Chunqi Li 	hypercall_field = val;
14279d7eaa29SArthur Chunqi Li 	asm volatile("vmcall\n\t");
14289d7eaa29SArthur Chunqi Li }
14299d7eaa29SArthur Chunqi Li 
14309d7eaa29SArthur Chunqi Li static bool is_hypercall()
14319d7eaa29SArthur Chunqi Li {
14329d7eaa29SArthur Chunqi Li 	ulong reason, hyper_bit;
14339d7eaa29SArthur Chunqi Li 
14349d7eaa29SArthur Chunqi Li 	reason = vmcs_read(EXI_REASON) & 0xff;
14359d7eaa29SArthur Chunqi Li 	hyper_bit = hypercall_field & HYPERCALL_BIT;
14369d7eaa29SArthur Chunqi Li 	if (reason == VMX_VMCALL && hyper_bit)
14379d7eaa29SArthur Chunqi Li 		return true;
14389d7eaa29SArthur Chunqi Li 	return false;
14399d7eaa29SArthur Chunqi Li }
14409d7eaa29SArthur Chunqi Li 
14419d7eaa29SArthur Chunqi Li static int handle_hypercall()
14429d7eaa29SArthur Chunqi Li {
14439d7eaa29SArthur Chunqi Li 	ulong hypercall_no;
14449d7eaa29SArthur Chunqi Li 
14459d7eaa29SArthur Chunqi Li 	hypercall_no = hypercall_field & HYPERCALL_MASK;
14469d7eaa29SArthur Chunqi Li 	hypercall_field = 0;
14479d7eaa29SArthur Chunqi Li 	switch (hypercall_no) {
14489d7eaa29SArthur Chunqi Li 	case HYPERCALL_VMEXIT:
14499d7eaa29SArthur Chunqi Li 		return VMX_TEST_VMEXIT;
1450794c67a9SPeter Feiner 	case HYPERCALL_VMABORT:
1451794c67a9SPeter Feiner 		return VMX_TEST_VMABORT;
1452794c67a9SPeter Feiner 	case HYPERCALL_VMSKIP:
1453794c67a9SPeter Feiner 		return VMX_TEST_VMSKIP;
14549d7eaa29SArthur Chunqi Li 	default:
1455b006d7ebSAndrew Jones 		printf("ERROR : Invalid hypercall number : %ld\n", hypercall_no);
14569d7eaa29SArthur Chunqi Li 	}
14579d7eaa29SArthur Chunqi Li 	return VMX_TEST_EXIT;
14589d7eaa29SArthur Chunqi Li }
14599d7eaa29SArthur Chunqi Li 
1460794c67a9SPeter Feiner static void continue_abort(void)
1461794c67a9SPeter Feiner {
1462794c67a9SPeter Feiner 	assert(!in_guest);
1463794c67a9SPeter Feiner 	printf("Host was here when guest aborted:\n");
1464794c67a9SPeter Feiner 	dump_stack();
1465794c67a9SPeter Feiner 	longjmp(abort_target, 1);
1466794c67a9SPeter Feiner 	abort();
1467794c67a9SPeter Feiner }
1468794c67a9SPeter Feiner 
1469794c67a9SPeter Feiner void __abort_test(void)
1470794c67a9SPeter Feiner {
1471794c67a9SPeter Feiner 	if (in_guest)
1472794c67a9SPeter Feiner 		hypercall(HYPERCALL_VMABORT);
1473794c67a9SPeter Feiner 	else
1474794c67a9SPeter Feiner 		longjmp(abort_target, 1);
1475794c67a9SPeter Feiner 	abort();
1476794c67a9SPeter Feiner }
1477794c67a9SPeter Feiner 
1478794c67a9SPeter Feiner static void continue_skip(void)
1479794c67a9SPeter Feiner {
1480794c67a9SPeter Feiner 	assert(!in_guest);
1481794c67a9SPeter Feiner 	longjmp(abort_target, 1);
1482794c67a9SPeter Feiner 	abort();
1483794c67a9SPeter Feiner }
1484794c67a9SPeter Feiner 
1485794c67a9SPeter Feiner void test_skip(const char *msg)
1486794c67a9SPeter Feiner {
1487794c67a9SPeter Feiner 	printf("%s skipping test: %s\n", in_guest ? "Guest" : "Host", msg);
1488794c67a9SPeter Feiner 	if (in_guest)
1489794c67a9SPeter Feiner 		hypercall(HYPERCALL_VMABORT);
1490794c67a9SPeter Feiner 	else
1491794c67a9SPeter Feiner 		longjmp(abort_target, 1);
1492794c67a9SPeter Feiner 	abort();
1493794c67a9SPeter Feiner }
1494794c67a9SPeter Feiner 
14959d7eaa29SArthur Chunqi Li static int exit_handler()
14969d7eaa29SArthur Chunqi Li {
14979d7eaa29SArthur Chunqi Li 	int ret;
14989d7eaa29SArthur Chunqi Li 
14999d7eaa29SArthur Chunqi Li 	current->exits++;
15001d9284d0SArthur Chunqi Li 	regs.rflags = vmcs_read(GUEST_RFLAGS);
15019d7eaa29SArthur Chunqi Li 	if (is_hypercall())
15029d7eaa29SArthur Chunqi Li 		ret = handle_hypercall();
15039d7eaa29SArthur Chunqi Li 	else
15049d7eaa29SArthur Chunqi Li 		ret = current->exit_handler();
15051d9284d0SArthur Chunqi Li 	vmcs_write(GUEST_RFLAGS, regs.rflags);
15063b50efe3SPeter Feiner 
15079d7eaa29SArthur Chunqi Li 	return ret;
15089d7eaa29SArthur Chunqi Li }
15093b50efe3SPeter Feiner 
15103b50efe3SPeter Feiner /*
15113b50efe3SPeter Feiner  * Called if vmlaunch or vmresume fails.
15123b50efe3SPeter Feiner  *	@early    - failure due to "VMX controls and host-state area" (26.2)
15133b50efe3SPeter Feiner  *	@vmlaunch - was this a vmlaunch or vmresume
15143b50efe3SPeter Feiner  *	@rflags   - host rflags
15153b50efe3SPeter Feiner  */
15163b50efe3SPeter Feiner static int
15173b50efe3SPeter Feiner entry_failure_handler(struct vmentry_failure *failure)
15183b50efe3SPeter Feiner {
15193b50efe3SPeter Feiner 	if (current->entry_failure_handler)
15203b50efe3SPeter Feiner 		return current->entry_failure_handler(failure);
15213b50efe3SPeter Feiner 	else
15223b50efe3SPeter Feiner 		return VMX_TEST_EXIT;
15239d7eaa29SArthur Chunqi Li }
15249d7eaa29SArthur Chunqi Li 
1525c76ddf06SPeter Feiner /*
1526c76ddf06SPeter Feiner  * Tries to enter the guest. Returns true iff entry succeeded. Otherwise,
1527c76ddf06SPeter Feiner  * populates @failure.
1528c76ddf06SPeter Feiner  */
1529c76ddf06SPeter Feiner static bool vmx_enter_guest(struct vmentry_failure *failure)
15309d7eaa29SArthur Chunqi Li {
1531c76ddf06SPeter Feiner 	failure->early = 0;
15324e809db5SPeter Feiner 
1533794c67a9SPeter Feiner 	in_guest = 1;
15349d7eaa29SArthur Chunqi Li 	asm volatile (
1535897d8365SPeter Feiner 		"mov %[HOST_RSP], %%rdi\n\t"
1536897d8365SPeter Feiner 		"vmwrite %%rsp, %%rdi\n\t"
15379d7eaa29SArthur Chunqi Li 		LOAD_GPR_C
153844417388SPaolo Bonzini 		"cmpb $0, %[launched]\n\t"
15399d7eaa29SArthur Chunqi Li 		"jne 1f\n\t"
15409d7eaa29SArthur Chunqi Li 		"vmlaunch\n\t"
15419d7eaa29SArthur Chunqi Li 		"jmp 2f\n\t"
15429d7eaa29SArthur Chunqi Li 		"1: "
15439d7eaa29SArthur Chunqi Li 		"vmresume\n\t"
15449d7eaa29SArthur Chunqi Li 		"2: "
1545f37cf4e2SPeter Feiner 		SAVE_GPR_C
1546897d8365SPeter Feiner 		"pushf\n\t"
1547897d8365SPeter Feiner 		"pop %%rdi\n\t"
1548c76ddf06SPeter Feiner 		"mov %%rdi, %[failure_flags]\n\t"
1549c76ddf06SPeter Feiner 		"movl $1, %[failure_flags]\n\t"
1550f37cf4e2SPeter Feiner 		"jmp 3f\n\t"
15519d7eaa29SArthur Chunqi Li 		"vmx_return:\n\t"
15529d7eaa29SArthur Chunqi Li 		SAVE_GPR_C
1553f37cf4e2SPeter Feiner 		"3: \n\t"
1554c76ddf06SPeter Feiner 		: [failure_early]"+m"(failure->early),
1555c76ddf06SPeter Feiner 		  [failure_flags]"=m"(failure->flags)
1556897d8365SPeter Feiner 		: [launched]"m"(launched), [HOST_RSP]"i"(HOST_RSP)
1557897d8365SPeter Feiner 		: "rdi", "memory", "cc"
15589d7eaa29SArthur Chunqi Li 	);
1559794c67a9SPeter Feiner 	in_guest = 0;
15603b50efe3SPeter Feiner 
1561c76ddf06SPeter Feiner 	failure->vmlaunch = !launched;
1562c76ddf06SPeter Feiner 	failure->instr = launched ? "vmresume" : "vmlaunch";
1563c76ddf06SPeter Feiner 
1564c76ddf06SPeter Feiner 	return !failure->early && !(vmcs_read(EXI_REASON) & VMX_ENTRY_FAILURE);
1565c76ddf06SPeter Feiner }
1566c76ddf06SPeter Feiner 
1567c76ddf06SPeter Feiner static int vmx_run()
1568c76ddf06SPeter Feiner {
1569c76ddf06SPeter Feiner 	while (1) {
1570c76ddf06SPeter Feiner 		u32 ret;
1571c76ddf06SPeter Feiner 		bool entered;
1572c76ddf06SPeter Feiner 		struct vmentry_failure failure;
1573c76ddf06SPeter Feiner 
1574c76ddf06SPeter Feiner 		entered = vmx_enter_guest(&failure);
15753b50efe3SPeter Feiner 
15763b50efe3SPeter Feiner 		if (entered) {
15773b50efe3SPeter Feiner 			/*
15783b50efe3SPeter Feiner 			 * VMCS isn't in "launched" state if there's been any
15793b50efe3SPeter Feiner 			 * entry failure (early or otherwise).
15803b50efe3SPeter Feiner 			 */
15819d7eaa29SArthur Chunqi Li 			launched = 1;
15829d7eaa29SArthur Chunqi Li 			ret = exit_handler();
15833b50efe3SPeter Feiner 		} else {
15843b50efe3SPeter Feiner 			ret = entry_failure_handler(&failure);
15859d7eaa29SArthur Chunqi Li 		}
15863b50efe3SPeter Feiner 
15879d7eaa29SArthur Chunqi Li 		switch (ret) {
15883b50efe3SPeter Feiner 		case VMX_TEST_RESUME:
15893b50efe3SPeter Feiner 			continue;
15909d7eaa29SArthur Chunqi Li 		case VMX_TEST_VMEXIT:
1591794c67a9SPeter Feiner 			guest_finished = 1;
15929d7eaa29SArthur Chunqi Li 			return 0;
15933b50efe3SPeter Feiner 		case VMX_TEST_EXIT:
15949d7eaa29SArthur Chunqi Li 			break;
15959d7eaa29SArthur Chunqi Li 		default:
15963b50efe3SPeter Feiner 			printf("ERROR : Invalid %s_handler return val %d.\n",
15973b50efe3SPeter Feiner 			       entered ? "exit" : "entry_failure",
15983b50efe3SPeter Feiner 			       ret);
15999d7eaa29SArthur Chunqi Li 			break;
16009d7eaa29SArthur Chunqi Li 		}
16013b50efe3SPeter Feiner 
16023b50efe3SPeter Feiner 		if (entered)
16033b50efe3SPeter Feiner 			print_vmexit_info();
16043b50efe3SPeter Feiner 		else
16053b50efe3SPeter Feiner 			print_vmentry_failure_info(&failure);
16063b50efe3SPeter Feiner 		abort();
16073b50efe3SPeter Feiner 	}
16089d7eaa29SArthur Chunqi Li }
16099d7eaa29SArthur Chunqi Li 
1610794c67a9SPeter Feiner static void run_teardown_step(struct test_teardown_step *step)
1611794c67a9SPeter Feiner {
1612794c67a9SPeter Feiner 	step->func(step->data);
1613794c67a9SPeter Feiner }
1614794c67a9SPeter Feiner 
16159d7eaa29SArthur Chunqi Li static int test_run(struct vmx_test *test)
16169d7eaa29SArthur Chunqi Li {
1617794c67a9SPeter Feiner 	int r;
1618794c67a9SPeter Feiner 
1619794c67a9SPeter Feiner 	/* Validate V2 interface. */
1620794c67a9SPeter Feiner 	if (test->v2) {
1621794c67a9SPeter Feiner 		int ret = 0;
1622794c67a9SPeter Feiner 		if (test->init || test->guest_main || test->exit_handler ||
1623794c67a9SPeter Feiner 		    test->syscall_handler) {
1624794c67a9SPeter Feiner 			report("V2 test cannot specify V1 callbacks.", 0);
1625794c67a9SPeter Feiner 			ret = 1;
1626794c67a9SPeter Feiner 		}
1627794c67a9SPeter Feiner 		if (ret)
1628794c67a9SPeter Feiner 			return ret;
1629794c67a9SPeter Feiner 	}
1630794c67a9SPeter Feiner 
16319d7eaa29SArthur Chunqi Li 	if (test->name == NULL)
16329d7eaa29SArthur Chunqi Li 		test->name = "(no name)";
16339d7eaa29SArthur Chunqi Li 	if (vmx_on()) {
16349d7eaa29SArthur Chunqi Li 		printf("%s : vmxon failed.\n", __func__);
16359d7eaa29SArthur Chunqi Li 		return 1;
16369d7eaa29SArthur Chunqi Li 	}
1637794c67a9SPeter Feiner 
16389d7eaa29SArthur Chunqi Li 	init_vmcs(&(test->vmcs));
16399d7eaa29SArthur Chunqi Li 	/* Directly call test->init is ok here, init_vmcs has done
16409d7eaa29SArthur Chunqi Li 	   vmcs init, vmclear and vmptrld*/
1641c592c151SJan Kiszka 	if (test->init && test->init(test->vmcs) != VMX_TEST_START)
1642a0e30e71SPaolo Bonzini 		goto out;
1643794c67a9SPeter Feiner 	teardown_count = 0;
1644794c67a9SPeter Feiner 	v2_guest_main = NULL;
16459d7eaa29SArthur Chunqi Li 	test->exits = 0;
16469d7eaa29SArthur Chunqi Li 	current = test;
16479d7eaa29SArthur Chunqi Li 	regs = test->guest_regs;
16489d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_RFLAGS, regs.rflags | 0x2);
16499d7eaa29SArthur Chunqi Li 	launched = 0;
1650794c67a9SPeter Feiner 	guest_finished = 0;
16519d7eaa29SArthur Chunqi Li 	printf("\nTest suite: %s\n", test->name);
1652794c67a9SPeter Feiner 
1653794c67a9SPeter Feiner 	r = setjmp(abort_target);
1654794c67a9SPeter Feiner 	if (r) {
1655794c67a9SPeter Feiner 		assert(!in_guest);
1656794c67a9SPeter Feiner 		goto out;
1657794c67a9SPeter Feiner 	}
1658794c67a9SPeter Feiner 
1659794c67a9SPeter Feiner 
1660794c67a9SPeter Feiner 	if (test->v2)
1661794c67a9SPeter Feiner 		test->v2();
1662794c67a9SPeter Feiner 	else
16639d7eaa29SArthur Chunqi Li 		vmx_run();
1664794c67a9SPeter Feiner 
1665794c67a9SPeter Feiner 	while (teardown_count > 0)
1666794c67a9SPeter Feiner 		run_teardown_step(&teardown_steps[--teardown_count]);
1667794c67a9SPeter Feiner 
1668794c67a9SPeter Feiner 	if (launched && !guest_finished)
1669794c67a9SPeter Feiner 		report("Guest didn't run to completion.", 0);
1670794c67a9SPeter Feiner 
1671a0e30e71SPaolo Bonzini out:
16729d7eaa29SArthur Chunqi Li 	if (vmx_off()) {
16739d7eaa29SArthur Chunqi Li 		printf("%s : vmxoff failed.\n", __func__);
16749d7eaa29SArthur Chunqi Li 		return 1;
16759d7eaa29SArthur Chunqi Li 	}
16769d7eaa29SArthur Chunqi Li 	return 0;
16779d7eaa29SArthur Chunqi Li }
16789d7eaa29SArthur Chunqi Li 
1679794c67a9SPeter Feiner /*
1680794c67a9SPeter Feiner  * Add a teardown step. Executed after the test's main function returns.
1681794c67a9SPeter Feiner  * Teardown steps executed in reverse order.
1682794c67a9SPeter Feiner  */
1683794c67a9SPeter Feiner void test_add_teardown(test_teardown_func func, void *data)
1684794c67a9SPeter Feiner {
1685794c67a9SPeter Feiner 	struct test_teardown_step *step;
1686794c67a9SPeter Feiner 
1687794c67a9SPeter Feiner 	TEST_ASSERT_MSG(teardown_count < MAX_TEST_TEARDOWN_STEPS,
1688794c67a9SPeter Feiner 			"There are already %d teardown steps.",
1689794c67a9SPeter Feiner 			teardown_count);
1690794c67a9SPeter Feiner 	step = &teardown_steps[teardown_count++];
1691794c67a9SPeter Feiner 	step->func = func;
1692794c67a9SPeter Feiner 	step->data = data;
1693794c67a9SPeter Feiner }
1694794c67a9SPeter Feiner 
1695794c67a9SPeter Feiner /*
1696794c67a9SPeter Feiner  * Set the target of the first enter_guest call. Can only be called once per
1697794c67a9SPeter Feiner  * test. Must be called before first enter_guest call.
1698794c67a9SPeter Feiner  */
1699794c67a9SPeter Feiner void test_set_guest(test_guest_func func)
1700794c67a9SPeter Feiner {
1701794c67a9SPeter Feiner 	assert(current->v2);
1702794c67a9SPeter Feiner 	TEST_ASSERT_MSG(!v2_guest_main, "Already set guest func.");
1703794c67a9SPeter Feiner 	v2_guest_main = func;
1704794c67a9SPeter Feiner }
1705794c67a9SPeter Feiner 
1706794c67a9SPeter Feiner /*
1707794c67a9SPeter Feiner  * Enters the guest (or launches it for the first time). Error to call once the
1708794c67a9SPeter Feiner  * guest has returned (i.e., run past the end of its guest() function). Also
1709794c67a9SPeter Feiner  * aborts if guest entry fails.
1710794c67a9SPeter Feiner  */
1711794c67a9SPeter Feiner void enter_guest(void)
1712794c67a9SPeter Feiner {
1713794c67a9SPeter Feiner 	struct vmentry_failure failure;
1714794c67a9SPeter Feiner 
1715794c67a9SPeter Feiner 	TEST_ASSERT_MSG(v2_guest_main,
1716794c67a9SPeter Feiner 			"Never called test_set_guest_func!");
1717794c67a9SPeter Feiner 
1718794c67a9SPeter Feiner 	TEST_ASSERT_MSG(!guest_finished,
1719794c67a9SPeter Feiner 			"Called enter_guest() after guest returned.");
1720794c67a9SPeter Feiner 
1721794c67a9SPeter Feiner 	if (!vmx_enter_guest(&failure)) {
1722794c67a9SPeter Feiner 		print_vmentry_failure_info(&failure);
1723794c67a9SPeter Feiner 		abort();
1724794c67a9SPeter Feiner 	}
1725794c67a9SPeter Feiner 
1726794c67a9SPeter Feiner 	launched = 1;
1727794c67a9SPeter Feiner 
1728794c67a9SPeter Feiner 	if (is_hypercall()) {
1729794c67a9SPeter Feiner 		int ret;
1730794c67a9SPeter Feiner 
1731794c67a9SPeter Feiner 		ret = handle_hypercall();
1732794c67a9SPeter Feiner 		switch (ret) {
1733794c67a9SPeter Feiner 		case VMX_TEST_VMEXIT:
1734794c67a9SPeter Feiner 			guest_finished = 1;
1735794c67a9SPeter Feiner 			break;
1736794c67a9SPeter Feiner 		case VMX_TEST_VMABORT:
1737794c67a9SPeter Feiner 			continue_abort();
1738794c67a9SPeter Feiner 			break;
1739794c67a9SPeter Feiner 		case VMX_TEST_VMSKIP:
1740794c67a9SPeter Feiner 			continue_skip();
1741794c67a9SPeter Feiner 			break;
1742794c67a9SPeter Feiner 		default:
1743794c67a9SPeter Feiner 			printf("ERROR : Invalid handle_hypercall return %d.\n",
1744794c67a9SPeter Feiner 			       ret);
1745794c67a9SPeter Feiner 			abort();
1746794c67a9SPeter Feiner 		}
1747794c67a9SPeter Feiner 	}
1748794c67a9SPeter Feiner }
1749794c67a9SPeter Feiner 
17503ee34093SArthur Chunqi Li extern struct vmx_test vmx_tests[];
17519d7eaa29SArthur Chunqi Li 
1752875b97b3SPeter Feiner static bool
1753875b97b3SPeter Feiner test_wanted(const char *name, const char *filters[], int filter_count)
17548029cac7SPeter Feiner {
1755875b97b3SPeter Feiner 	int i;
1756875b97b3SPeter Feiner 	bool positive = false;
1757875b97b3SPeter Feiner 	bool match = false;
1758875b97b3SPeter Feiner 	char clean_name[strlen(name) + 1];
1759875b97b3SPeter Feiner 	char *c;
17608029cac7SPeter Feiner 	const char *n;
17618029cac7SPeter Feiner 
1762875b97b3SPeter Feiner 	/* Replace spaces with underscores. */
1763875b97b3SPeter Feiner 	n = name;
1764875b97b3SPeter Feiner 	c = &clean_name[0];
1765875b97b3SPeter Feiner 	do *c++ = (*n == ' ') ? '_' : *n;
1766875b97b3SPeter Feiner 	while (*n++);
1767875b97b3SPeter Feiner 
1768875b97b3SPeter Feiner 	for (i = 0; i < filter_count; i++) {
1769875b97b3SPeter Feiner 		const char *filter = filters[i];
1770875b97b3SPeter Feiner 
1771875b97b3SPeter Feiner 		if (filter[0] == '-') {
1772875b97b3SPeter Feiner 			if (simple_glob(clean_name, filter + 1))
1773875b97b3SPeter Feiner 				return false;
1774875b97b3SPeter Feiner 		} else {
1775875b97b3SPeter Feiner 			positive = true;
1776875b97b3SPeter Feiner 			match |= simple_glob(clean_name, filter);
1777875b97b3SPeter Feiner 		}
1778875b97b3SPeter Feiner 	}
1779875b97b3SPeter Feiner 
1780875b97b3SPeter Feiner 	if (!positive || match) {
1781875b97b3SPeter Feiner 		matched++;
1782875b97b3SPeter Feiner 		return true;
1783875b97b3SPeter Feiner 	} else {
17848029cac7SPeter Feiner 		return false;
17858029cac7SPeter Feiner 	}
17868029cac7SPeter Feiner }
17878029cac7SPeter Feiner 
1788875b97b3SPeter Feiner int main(int argc, const char *argv[])
17899d7eaa29SArthur Chunqi Li {
17903ee34093SArthur Chunqi Li 	int i = 0;
17919d7eaa29SArthur Chunqi Li 
17929d7eaa29SArthur Chunqi Li 	setup_vm();
17939d7eaa29SArthur Chunqi Li 	setup_idt();
17943ee34093SArthur Chunqi Li 	hypercall_field = 0;
17959d7eaa29SArthur Chunqi Li 
1796c04259ffSDavid Matlack 	argv++;
1797c04259ffSDavid Matlack 	argc--;
1798c04259ffSDavid Matlack 
17993b127446SJan Kiszka 	if (!(cpuid(1).c & (1 << 5))) {
18003b127446SJan Kiszka 		printf("WARNING: vmx not supported, add '-cpu host'\n");
18019d7eaa29SArthur Chunqi Li 		goto exit;
18029d7eaa29SArthur Chunqi Li 	}
18039d7eaa29SArthur Chunqi Li 	init_vmx();
1804c04259ffSDavid Matlack 	if (test_wanted("test_vmx_feature_control", argv, argc)) {
1805c04259ffSDavid Matlack 		/* Sets MSR_IA32_FEATURE_CONTROL to 0x5 */
18063b127446SJan Kiszka 		if (test_vmx_feature_control() != 0)
18073b127446SJan Kiszka 			goto exit;
1808c04259ffSDavid Matlack 	} else {
1809c04259ffSDavid Matlack 		if ((rdmsr(MSR_IA32_FEATURE_CONTROL) & 0x5) != 0x5)
1810c04259ffSDavid Matlack 			wrmsr(MSR_IA32_FEATURE_CONTROL, 0x5);
1811c04259ffSDavid Matlack 	}
1812c04259ffSDavid Matlack 
1813c04259ffSDavid Matlack 	if (test_wanted("test_vmxon", argv, argc)) {
1814c04259ffSDavid Matlack 		/* Enables VMX */
18159d7eaa29SArthur Chunqi Li 		if (test_vmxon() != 0)
18169d7eaa29SArthur Chunqi Li 			goto exit;
1817c04259ffSDavid Matlack 	} else {
1818c04259ffSDavid Matlack 		if (vmx_on()) {
1819c04259ffSDavid Matlack 			report("vmxon", 0);
1820c04259ffSDavid Matlack 			goto exit;
1821c04259ffSDavid Matlack 		}
1822c04259ffSDavid Matlack 	}
1823c04259ffSDavid Matlack 
1824c04259ffSDavid Matlack 	if (test_wanted("test_vmptrld", argv, argc))
18259d7eaa29SArthur Chunqi Li 		test_vmptrld();
1826c04259ffSDavid Matlack 	if (test_wanted("test_vmclear", argv, argc))
18279d7eaa29SArthur Chunqi Li 		test_vmclear();
1828c04259ffSDavid Matlack 	if (test_wanted("test_vmptrst", argv, argc))
18299d7eaa29SArthur Chunqi Li 		test_vmptrst();
1830ecd5b431SDavid Matlack 	if (test_wanted("test_vmwrite_vmread", argv, argc))
1831ecd5b431SDavid Matlack 		test_vmwrite_vmread();
18326b72cf76SDavid Matlack 	if (test_wanted("test_vmcs_lifecycle", argv, argc))
18336b72cf76SDavid Matlack 		test_vmcs_lifecycle();
1834c04259ffSDavid Matlack 	if (test_wanted("test_vmx_caps", argv, argc))
183569c8d31cSJan Kiszka 		test_vmx_caps();
18369d7eaa29SArthur Chunqi Li 
183734439b1aSPeter Feiner 	/* Balance vmxon from test_vmxon. */
183834439b1aSPeter Feiner 	vmx_off();
183934439b1aSPeter Feiner 
184034439b1aSPeter Feiner 	for (; vmx_tests[i].name != NULL; i++) {
1841c04259ffSDavid Matlack 		if (!test_wanted(vmx_tests[i].name, argv, argc))
18428029cac7SPeter Feiner 			continue;
18439d7eaa29SArthur Chunqi Li 		if (test_run(&vmx_tests[i]))
18449d7eaa29SArthur Chunqi Li 			goto exit;
18458029cac7SPeter Feiner 	}
18468029cac7SPeter Feiner 
18478029cac7SPeter Feiner 	if (!matched)
18488029cac7SPeter Feiner 		report("command line didn't match any tests!", matched);
18499d7eaa29SArthur Chunqi Li 
18509d7eaa29SArthur Chunqi Li exit:
1851f3cdd159SJan Kiszka 	return report_summary();
18529d7eaa29SArthur Chunqi Li }
1853