xref: /kvm-unit-tests/x86/vmx.c (revision 794c67a93e0fbfed31ea308a4fbd043af96ec9aa)
17ada359dSArthur Chunqi Li /*
27ada359dSArthur Chunqi Li  * x86/vmx.c : Framework for testing nested virtualization
37ada359dSArthur Chunqi Li  *	This is a framework to test nested VMX for KVM, which
47ada359dSArthur Chunqi Li  * 	started as a project of GSoC 2013. All test cases should
57ada359dSArthur Chunqi Li  *	be located in x86/vmx_tests.c and framework related
67ada359dSArthur Chunqi Li  *	functions should be in this file.
77ada359dSArthur Chunqi Li  *
87ada359dSArthur Chunqi Li  * How to write test cases?
97ada359dSArthur Chunqi Li  *	Add callbacks of test suite in variant "vmx_tests". You can
107ada359dSArthur Chunqi Li  *	write:
117ada359dSArthur Chunqi Li  *		1. init function used for initializing test suite
127ada359dSArthur Chunqi Li  *		2. main function for codes running in L2 guest,
137ada359dSArthur Chunqi Li  *		3. exit_handler to handle vmexit of L2 to L1
147ada359dSArthur Chunqi Li  *		4. syscall handler to handle L2 syscall vmexit
157ada359dSArthur Chunqi Li  *		5. vmenter fail handler to handle direct failure of vmenter
167ada359dSArthur Chunqi Li  *		6. guest_regs is loaded when vmenter and saved when
177ada359dSArthur Chunqi Li  *			vmexit, you can read and set it in exit_handler
187ada359dSArthur Chunqi Li  *	If no special function is needed for a test suite, use
197ada359dSArthur Chunqi Li  *	coressponding basic_* functions as callback. More handlers
207ada359dSArthur Chunqi Li  *	can be added to "vmx_tests", see details of "struct vmx_test"
217ada359dSArthur Chunqi Li  *	and function test_run().
227ada359dSArthur Chunqi Li  *
237ada359dSArthur Chunqi Li  * Currently, vmx test framework only set up one VCPU and one
247ada359dSArthur Chunqi Li  * concurrent guest test environment with same paging for L2 and
257ada359dSArthur Chunqi Li  * L1. For usage of EPT, only 1:1 mapped paging is used from VFN
267ada359dSArthur Chunqi Li  * to PFN.
277ada359dSArthur Chunqi Li  *
287ada359dSArthur Chunqi Li  * Author : Arthur Chunqi Li <yzt356@gmail.com>
297ada359dSArthur Chunqi Li  */
307ada359dSArthur Chunqi Li 
319d7eaa29SArthur Chunqi Li #include "libcflat.h"
329d7eaa29SArthur Chunqi Li #include "processor.h"
339d7eaa29SArthur Chunqi Li #include "vm.h"
349d7eaa29SArthur Chunqi Li #include "desc.h"
359d7eaa29SArthur Chunqi Li #include "vmx.h"
369d7eaa29SArthur Chunqi Li #include "msr.h"
379d7eaa29SArthur Chunqi Li #include "smp.h"
389d7eaa29SArthur Chunqi Li 
39ce21d809SBandan Das u64 *vmxon_region;
409d7eaa29SArthur Chunqi Li struct vmcs *vmcs_root;
419d7eaa29SArthur Chunqi Li u32 vpid_cnt;
429d7eaa29SArthur Chunqi Li void *guest_stack, *guest_syscall_stack;
439d7eaa29SArthur Chunqi Li u32 ctrl_pin, ctrl_enter, ctrl_exit, ctrl_cpu[2];
449d7eaa29SArthur Chunqi Li struct regs regs;
45*794c67a9SPeter Feiner 
469d7eaa29SArthur Chunqi Li struct vmx_test *current;
47*794c67a9SPeter Feiner 
48*794c67a9SPeter Feiner #define MAX_TEST_TEARDOWN_STEPS 10
49*794c67a9SPeter Feiner 
50*794c67a9SPeter Feiner struct test_teardown_step {
51*794c67a9SPeter Feiner 	test_teardown_func func;
52*794c67a9SPeter Feiner 	void *data;
53*794c67a9SPeter Feiner };
54*794c67a9SPeter Feiner 
55*794c67a9SPeter Feiner static int teardown_count;
56*794c67a9SPeter Feiner static struct test_teardown_step teardown_steps[MAX_TEST_TEARDOWN_STEPS];
57*794c67a9SPeter Feiner 
58*794c67a9SPeter Feiner static test_guest_func v2_guest_main;
59*794c67a9SPeter Feiner 
603ee34093SArthur Chunqi Li u64 hypercall_field;
619d7eaa29SArthur Chunqi Li bool launched;
62c04259ffSDavid Matlack static int matched;
63*794c67a9SPeter Feiner static int guest_finished;
64*794c67a9SPeter Feiner static int in_guest;
659d7eaa29SArthur Chunqi Li 
663ee34093SArthur Chunqi Li union vmx_basic basic;
675f18e779SJan Kiszka union vmx_ctrl_msr ctrl_pin_rev;
685f18e779SJan Kiszka union vmx_ctrl_msr ctrl_cpu_rev[2];
695f18e779SJan Kiszka union vmx_ctrl_msr ctrl_exit_rev;
705f18e779SJan Kiszka union vmx_ctrl_msr ctrl_enter_rev;
713ee34093SArthur Chunqi Li union vmx_ept_vpid  ept_vpid;
723ee34093SArthur Chunqi Li 
73337166aaSJan Kiszka extern struct descriptor_table_ptr gdt64_desc;
74337166aaSJan Kiszka extern struct descriptor_table_ptr idt_descr;
75337166aaSJan Kiszka extern struct descriptor_table_ptr tss_descr;
769d7eaa29SArthur Chunqi Li extern void *vmx_return;
779d7eaa29SArthur Chunqi Li extern void *entry_sysenter;
789d7eaa29SArthur Chunqi Li extern void *guest_entry;
799d7eaa29SArthur Chunqi Li 
80ffb1a9e0SJan Kiszka static volatile u32 stage;
81ffb1a9e0SJan Kiszka 
82*794c67a9SPeter Feiner static jmp_buf abort_target;
83*794c67a9SPeter Feiner 
84ecd5b431SDavid Matlack struct vmcs_field {
85ecd5b431SDavid Matlack 	u64 mask;
86ecd5b431SDavid Matlack 	u64 encoding;
87ecd5b431SDavid Matlack };
88ecd5b431SDavid Matlack 
89ecd5b431SDavid Matlack #define MASK(_bits) GENMASK_ULL((_bits) - 1, 0)
90ecd5b431SDavid Matlack #define MASK_NATURAL MASK(sizeof(unsigned long) * 8)
91ecd5b431SDavid Matlack 
92ecd5b431SDavid Matlack static struct vmcs_field vmcs_fields[] = {
93ecd5b431SDavid Matlack 	{ MASK(16), VPID },
94ecd5b431SDavid Matlack 	{ MASK(16), PINV },
95ecd5b431SDavid Matlack 	{ MASK(16), EPTP_IDX },
96ecd5b431SDavid Matlack 
97ecd5b431SDavid Matlack 	{ MASK(16), GUEST_SEL_ES },
98ecd5b431SDavid Matlack 	{ MASK(16), GUEST_SEL_CS },
99ecd5b431SDavid Matlack 	{ MASK(16), GUEST_SEL_SS },
100ecd5b431SDavid Matlack 	{ MASK(16), GUEST_SEL_DS },
101ecd5b431SDavid Matlack 	{ MASK(16), GUEST_SEL_FS },
102ecd5b431SDavid Matlack 	{ MASK(16), GUEST_SEL_GS },
103ecd5b431SDavid Matlack 	{ MASK(16), GUEST_SEL_LDTR },
104ecd5b431SDavid Matlack 	{ MASK(16), GUEST_SEL_TR },
105ecd5b431SDavid Matlack 	{ MASK(16), GUEST_INT_STATUS },
106ecd5b431SDavid Matlack 
107ecd5b431SDavid Matlack 	{ MASK(16), HOST_SEL_ES },
108ecd5b431SDavid Matlack 	{ MASK(16), HOST_SEL_CS },
109ecd5b431SDavid Matlack 	{ MASK(16), HOST_SEL_SS },
110ecd5b431SDavid Matlack 	{ MASK(16), HOST_SEL_DS },
111ecd5b431SDavid Matlack 	{ MASK(16), HOST_SEL_FS },
112ecd5b431SDavid Matlack 	{ MASK(16), HOST_SEL_GS },
113ecd5b431SDavid Matlack 	{ MASK(16), HOST_SEL_TR },
114ecd5b431SDavid Matlack 
115ecd5b431SDavid Matlack 	{ MASK(64), IO_BITMAP_A },
116ecd5b431SDavid Matlack 	{ MASK(64), IO_BITMAP_B },
117ecd5b431SDavid Matlack 	{ MASK(64), MSR_BITMAP },
118ecd5b431SDavid Matlack 	{ MASK(64), EXIT_MSR_ST_ADDR },
119ecd5b431SDavid Matlack 	{ MASK(64), EXIT_MSR_LD_ADDR },
120ecd5b431SDavid Matlack 	{ MASK(64), ENTER_MSR_LD_ADDR },
121ecd5b431SDavid Matlack 	{ MASK(64), VMCS_EXEC_PTR },
122ecd5b431SDavid Matlack 	{ MASK(64), TSC_OFFSET },
123ecd5b431SDavid Matlack 	{ MASK(64), APIC_VIRT_ADDR },
124ecd5b431SDavid Matlack 	{ MASK(64), APIC_ACCS_ADDR },
125ecd5b431SDavid Matlack 	{ MASK(64), EPTP },
126ecd5b431SDavid Matlack 
127ecd5b431SDavid Matlack 	{ 0 /* read-only */, INFO_PHYS_ADDR },
128ecd5b431SDavid Matlack 
129ecd5b431SDavid Matlack 	{ MASK(64), VMCS_LINK_PTR },
130ecd5b431SDavid Matlack 	{ MASK(64), GUEST_DEBUGCTL },
131ecd5b431SDavid Matlack 	{ MASK(64), GUEST_EFER },
132ecd5b431SDavid Matlack 	{ MASK(64), GUEST_PAT },
133ecd5b431SDavid Matlack 	{ MASK(64), GUEST_PERF_GLOBAL_CTRL },
134ecd5b431SDavid Matlack 	{ MASK(64), GUEST_PDPTE },
135ecd5b431SDavid Matlack 
136ecd5b431SDavid Matlack 	{ MASK(64), HOST_PAT },
137ecd5b431SDavid Matlack 	{ MASK(64), HOST_EFER },
138ecd5b431SDavid Matlack 	{ MASK(64), HOST_PERF_GLOBAL_CTRL },
139ecd5b431SDavid Matlack 
140ecd5b431SDavid Matlack 	{ MASK(32), PIN_CONTROLS },
141ecd5b431SDavid Matlack 	{ MASK(32), CPU_EXEC_CTRL0 },
142ecd5b431SDavid Matlack 	{ MASK(32), EXC_BITMAP },
143ecd5b431SDavid Matlack 	{ MASK(32), PF_ERROR_MASK },
144ecd5b431SDavid Matlack 	{ MASK(32), PF_ERROR_MATCH },
145ecd5b431SDavid Matlack 	{ MASK(32), CR3_TARGET_COUNT },
146ecd5b431SDavid Matlack 	{ MASK(32), EXI_CONTROLS },
147ecd5b431SDavid Matlack 	{ MASK(32), EXI_MSR_ST_CNT },
148ecd5b431SDavid Matlack 	{ MASK(32), EXI_MSR_LD_CNT },
149ecd5b431SDavid Matlack 	{ MASK(32), ENT_CONTROLS },
150ecd5b431SDavid Matlack 	{ MASK(32), ENT_MSR_LD_CNT },
151ecd5b431SDavid Matlack 	{ MASK(32), ENT_INTR_INFO },
152ecd5b431SDavid Matlack 	{ MASK(32), ENT_INTR_ERROR },
153ecd5b431SDavid Matlack 	{ MASK(32), ENT_INST_LEN },
154ecd5b431SDavid Matlack 	{ MASK(32), TPR_THRESHOLD },
155ecd5b431SDavid Matlack 	{ MASK(32), CPU_EXEC_CTRL1 },
156ecd5b431SDavid Matlack 
157ecd5b431SDavid Matlack 	{ 0 /* read-only */, VMX_INST_ERROR },
158ecd5b431SDavid Matlack 	{ 0 /* read-only */, EXI_REASON },
159ecd5b431SDavid Matlack 	{ 0 /* read-only */, EXI_INTR_INFO },
160ecd5b431SDavid Matlack 	{ 0 /* read-only */, EXI_INTR_ERROR },
161ecd5b431SDavid Matlack 	{ 0 /* read-only */, IDT_VECT_INFO },
162ecd5b431SDavid Matlack 	{ 0 /* read-only */, IDT_VECT_ERROR },
163ecd5b431SDavid Matlack 	{ 0 /* read-only */, EXI_INST_LEN },
164ecd5b431SDavid Matlack 	{ 0 /* read-only */, EXI_INST_INFO },
165ecd5b431SDavid Matlack 
166ecd5b431SDavid Matlack 	{ MASK(32), GUEST_LIMIT_ES },
167ecd5b431SDavid Matlack 	{ MASK(32), GUEST_LIMIT_CS },
168ecd5b431SDavid Matlack 	{ MASK(32), GUEST_LIMIT_SS },
169ecd5b431SDavid Matlack 	{ MASK(32), GUEST_LIMIT_DS },
170ecd5b431SDavid Matlack 	{ MASK(32), GUEST_LIMIT_FS },
171ecd5b431SDavid Matlack 	{ MASK(32), GUEST_LIMIT_GS },
172ecd5b431SDavid Matlack 	{ MASK(32), GUEST_LIMIT_LDTR },
173ecd5b431SDavid Matlack 	{ MASK(32), GUEST_LIMIT_TR },
174ecd5b431SDavid Matlack 	{ MASK(32), GUEST_LIMIT_GDTR },
175ecd5b431SDavid Matlack 	{ MASK(32), GUEST_LIMIT_IDTR },
176ecd5b431SDavid Matlack 	{ 0x1d0ff, GUEST_AR_ES },
177ecd5b431SDavid Matlack 	{ 0x1f0ff, GUEST_AR_CS },
178ecd5b431SDavid Matlack 	{ 0x1d0ff, GUEST_AR_SS },
179ecd5b431SDavid Matlack 	{ 0x1d0ff, GUEST_AR_DS },
180ecd5b431SDavid Matlack 	{ 0x1d0ff, GUEST_AR_FS },
181ecd5b431SDavid Matlack 	{ 0x1d0ff, GUEST_AR_GS },
182ecd5b431SDavid Matlack 	{ 0x1d0ff, GUEST_AR_LDTR },
183ecd5b431SDavid Matlack 	{ 0x1d0ff, GUEST_AR_TR },
184ecd5b431SDavid Matlack 	{ MASK(32), GUEST_INTR_STATE },
185ecd5b431SDavid Matlack 	{ MASK(32), GUEST_ACTV_STATE },
186ecd5b431SDavid Matlack 	{ MASK(32), GUEST_SMBASE },
187ecd5b431SDavid Matlack 	{ MASK(32), GUEST_SYSENTER_CS },
188ecd5b431SDavid Matlack 	{ MASK(32), PREEMPT_TIMER_VALUE },
189ecd5b431SDavid Matlack 
190ecd5b431SDavid Matlack 	{ MASK(32), HOST_SYSENTER_CS },
191ecd5b431SDavid Matlack 
192ecd5b431SDavid Matlack 	{ MASK_NATURAL, CR0_MASK },
193ecd5b431SDavid Matlack 	{ MASK_NATURAL, CR4_MASK },
194ecd5b431SDavid Matlack 	{ MASK_NATURAL, CR0_READ_SHADOW },
195ecd5b431SDavid Matlack 	{ MASK_NATURAL, CR4_READ_SHADOW },
196ecd5b431SDavid Matlack 	{ MASK_NATURAL, CR3_TARGET_0 },
197ecd5b431SDavid Matlack 	{ MASK_NATURAL, CR3_TARGET_1 },
198ecd5b431SDavid Matlack 	{ MASK_NATURAL, CR3_TARGET_2 },
199ecd5b431SDavid Matlack 	{ MASK_NATURAL, CR3_TARGET_3 },
200ecd5b431SDavid Matlack 
201ecd5b431SDavid Matlack 	{ 0 /* read-only */, EXI_QUALIFICATION },
202ecd5b431SDavid Matlack 	{ 0 /* read-only */, IO_RCX },
203ecd5b431SDavid Matlack 	{ 0 /* read-only */, IO_RSI },
204ecd5b431SDavid Matlack 	{ 0 /* read-only */, IO_RDI },
205ecd5b431SDavid Matlack 	{ 0 /* read-only */, IO_RIP },
206ecd5b431SDavid Matlack 	{ 0 /* read-only */, GUEST_LINEAR_ADDRESS },
207ecd5b431SDavid Matlack 
208ecd5b431SDavid Matlack 	{ MASK_NATURAL, GUEST_CR0 },
209ecd5b431SDavid Matlack 	{ MASK_NATURAL, GUEST_CR3 },
210ecd5b431SDavid Matlack 	{ MASK_NATURAL, GUEST_CR4 },
211ecd5b431SDavid Matlack 	{ MASK_NATURAL, GUEST_BASE_ES },
212ecd5b431SDavid Matlack 	{ MASK_NATURAL, GUEST_BASE_CS },
213ecd5b431SDavid Matlack 	{ MASK_NATURAL, GUEST_BASE_SS },
214ecd5b431SDavid Matlack 	{ MASK_NATURAL, GUEST_BASE_DS },
215ecd5b431SDavid Matlack 	{ MASK_NATURAL, GUEST_BASE_FS },
216ecd5b431SDavid Matlack 	{ MASK_NATURAL, GUEST_BASE_GS },
217ecd5b431SDavid Matlack 	{ MASK_NATURAL, GUEST_BASE_LDTR },
218ecd5b431SDavid Matlack 	{ MASK_NATURAL, GUEST_BASE_TR },
219ecd5b431SDavid Matlack 	{ MASK_NATURAL, GUEST_BASE_GDTR },
220ecd5b431SDavid Matlack 	{ MASK_NATURAL, GUEST_BASE_IDTR },
221ecd5b431SDavid Matlack 	{ MASK_NATURAL, GUEST_DR7 },
222ecd5b431SDavid Matlack 	{ MASK_NATURAL, GUEST_RSP },
223ecd5b431SDavid Matlack 	{ MASK_NATURAL, GUEST_RIP },
224ecd5b431SDavid Matlack 	{ MASK_NATURAL, GUEST_RFLAGS },
225ecd5b431SDavid Matlack 	{ MASK_NATURAL, GUEST_PENDING_DEBUG },
226ecd5b431SDavid Matlack 	{ MASK_NATURAL, GUEST_SYSENTER_ESP },
227ecd5b431SDavid Matlack 	{ MASK_NATURAL, GUEST_SYSENTER_EIP },
228ecd5b431SDavid Matlack 
229ecd5b431SDavid Matlack 	{ MASK_NATURAL, HOST_CR0 },
230ecd5b431SDavid Matlack 	{ MASK_NATURAL, HOST_CR3 },
231ecd5b431SDavid Matlack 	{ MASK_NATURAL, HOST_CR4 },
232ecd5b431SDavid Matlack 	{ MASK_NATURAL, HOST_BASE_FS },
233ecd5b431SDavid Matlack 	{ MASK_NATURAL, HOST_BASE_GS },
234ecd5b431SDavid Matlack 	{ MASK_NATURAL, HOST_BASE_TR },
235ecd5b431SDavid Matlack 	{ MASK_NATURAL, HOST_BASE_GDTR },
236ecd5b431SDavid Matlack 	{ MASK_NATURAL, HOST_BASE_IDTR },
237ecd5b431SDavid Matlack 	{ MASK_NATURAL, HOST_SYSENTER_ESP },
238ecd5b431SDavid Matlack 	{ MASK_NATURAL, HOST_SYSENTER_EIP },
239ecd5b431SDavid Matlack 	{ MASK_NATURAL, HOST_RSP },
240ecd5b431SDavid Matlack 	{ MASK_NATURAL, HOST_RIP },
241ecd5b431SDavid Matlack };
242ecd5b431SDavid Matlack 
243ecd5b431SDavid Matlack static inline u64 vmcs_field_value(struct vmcs_field *f, u8 cookie)
244ecd5b431SDavid Matlack {
245ecd5b431SDavid Matlack 	u64 value;
246ecd5b431SDavid Matlack 
247ecd5b431SDavid Matlack 	/* Incorporate the cookie and the field encoding into the value. */
248ecd5b431SDavid Matlack 	value = cookie;
249ecd5b431SDavid Matlack 	value |= (f->encoding << 8);
250ecd5b431SDavid Matlack 	value |= 0xdeadbeefull << 32;
251ecd5b431SDavid Matlack 
252ecd5b431SDavid Matlack 	return value & f->mask;
253ecd5b431SDavid Matlack }
254ecd5b431SDavid Matlack 
255ecd5b431SDavid Matlack static void set_vmcs_field(struct vmcs_field *f, u8 cookie)
256ecd5b431SDavid Matlack {
257ecd5b431SDavid Matlack 	vmcs_write(f->encoding, vmcs_field_value(f, cookie));
258ecd5b431SDavid Matlack }
259ecd5b431SDavid Matlack 
260ecd5b431SDavid Matlack static bool check_vmcs_field(struct vmcs_field *f, u8 cookie)
261ecd5b431SDavid Matlack {
262ecd5b431SDavid Matlack 	u64 expected;
263ecd5b431SDavid Matlack 	u64 actual;
264ecd5b431SDavid Matlack 	int ret;
265ecd5b431SDavid Matlack 
266ecd5b431SDavid Matlack 	ret = vmcs_read_checking(f->encoding, &actual);
267ecd5b431SDavid Matlack 	assert(!(ret & X86_EFLAGS_CF));
268ecd5b431SDavid Matlack 	/* Skip VMCS fields that aren't recognized by the CPU */
269ecd5b431SDavid Matlack 	if (ret & X86_EFLAGS_ZF)
270ecd5b431SDavid Matlack 		return true;
271ecd5b431SDavid Matlack 
272ecd5b431SDavid Matlack 	expected = vmcs_field_value(f, cookie);
273ecd5b431SDavid Matlack 	actual &= f->mask;
274ecd5b431SDavid Matlack 
275ecd5b431SDavid Matlack 	if (expected == actual)
276ecd5b431SDavid Matlack 		return true;
277ecd5b431SDavid Matlack 
278d4ab68adSDavid Matlack 	printf("FAIL: VMWRITE/VMREAD %lx (expected: %lx, actual: %lx)\n",
279ecd5b431SDavid Matlack 	       f->encoding, (unsigned long) expected, (unsigned long) actual);
280ecd5b431SDavid Matlack 
281ecd5b431SDavid Matlack 	return false;
282ecd5b431SDavid Matlack }
283ecd5b431SDavid Matlack 
284ecd5b431SDavid Matlack static void set_all_vmcs_fields(u8 cookie)
285ecd5b431SDavid Matlack {
286ecd5b431SDavid Matlack 	int i;
287ecd5b431SDavid Matlack 
288ecd5b431SDavid Matlack 	for (i = 0; i < ARRAY_SIZE(vmcs_fields); i++)
289ecd5b431SDavid Matlack 		set_vmcs_field(&vmcs_fields[i], cookie);
290ecd5b431SDavid Matlack }
291ecd5b431SDavid Matlack 
292ecd5b431SDavid Matlack static bool check_all_vmcs_fields(u8 cookie)
293ecd5b431SDavid Matlack {
294ecd5b431SDavid Matlack 	bool pass = true;
295ecd5b431SDavid Matlack 	int i;
296ecd5b431SDavid Matlack 
297ecd5b431SDavid Matlack 	for (i = 0; i < ARRAY_SIZE(vmcs_fields); i++) {
298ecd5b431SDavid Matlack 		if (!check_vmcs_field(&vmcs_fields[i], cookie))
299ecd5b431SDavid Matlack 			pass = false;
300ecd5b431SDavid Matlack 	}
301ecd5b431SDavid Matlack 
302ecd5b431SDavid Matlack 	return pass;
303ecd5b431SDavid Matlack }
304ecd5b431SDavid Matlack 
305ecd5b431SDavid Matlack void test_vmwrite_vmread(void)
306ecd5b431SDavid Matlack {
307ecd5b431SDavid Matlack 	struct vmcs *vmcs = alloc_page();
308ecd5b431SDavid Matlack 
309ecd5b431SDavid Matlack 	memset(vmcs, 0, PAGE_SIZE);
310ecd5b431SDavid Matlack 	vmcs->revision_id = basic.revision;
311ecd5b431SDavid Matlack 	assert(!vmcs_clear(vmcs));
312ecd5b431SDavid Matlack 	assert(!make_vmcs_current(vmcs));
313ecd5b431SDavid Matlack 
314ecd5b431SDavid Matlack 	set_all_vmcs_fields(0x42);
315ecd5b431SDavid Matlack 	report("VMWRITE/VMREAD", check_all_vmcs_fields(0x42));
316ecd5b431SDavid Matlack 
317ecd5b431SDavid Matlack 	assert(!vmcs_clear(vmcs));
318ecd5b431SDavid Matlack 	free_page(vmcs);
319ecd5b431SDavid Matlack }
320ecd5b431SDavid Matlack 
3216b72cf76SDavid Matlack void test_vmcs_lifecycle(void)
3226b72cf76SDavid Matlack {
3236b72cf76SDavid Matlack 	struct vmcs *vmcs[2] = {};
3246b72cf76SDavid Matlack 	int i;
3256b72cf76SDavid Matlack 
3266b72cf76SDavid Matlack 	for (i = 0; i < ARRAY_SIZE(vmcs); i++) {
3276b72cf76SDavid Matlack 		vmcs[i] = alloc_page();
3286b72cf76SDavid Matlack 		memset(vmcs[i], 0, PAGE_SIZE);
3296b72cf76SDavid Matlack 		vmcs[i]->revision_id = basic.revision;
3306b72cf76SDavid Matlack 	}
3316b72cf76SDavid Matlack 
3326b72cf76SDavid Matlack #define VMPTRLD(_i) do { \
3336b72cf76SDavid Matlack 	assert(_i < ARRAY_SIZE(vmcs)); \
3346b72cf76SDavid Matlack 	assert(!make_vmcs_current(vmcs[_i])); \
3356b72cf76SDavid Matlack 	printf("VMPTRLD VMCS%d\n", (_i)); \
3366b72cf76SDavid Matlack } while (0)
3376b72cf76SDavid Matlack 
3386b72cf76SDavid Matlack #define VMCLEAR(_i) do { \
3396b72cf76SDavid Matlack 	assert(_i < ARRAY_SIZE(vmcs)); \
3406b72cf76SDavid Matlack 	assert(!vmcs_clear(vmcs[_i])); \
3416b72cf76SDavid Matlack 	printf("VMCLEAR VMCS%d\n", (_i)); \
3426b72cf76SDavid Matlack } while (0)
3436b72cf76SDavid Matlack 
3446b72cf76SDavid Matlack 	VMCLEAR(0);
3456b72cf76SDavid Matlack 	VMPTRLD(0);
3466b72cf76SDavid Matlack 	set_all_vmcs_fields(0);
3476b72cf76SDavid Matlack 	report("current:VMCS0 active:[VMCS0]", check_all_vmcs_fields(0));
3486b72cf76SDavid Matlack 
3496b72cf76SDavid Matlack 	VMCLEAR(0);
3506b72cf76SDavid Matlack 	VMPTRLD(0);
3516b72cf76SDavid Matlack 	report("current:VMCS0 active:[VMCS0]", check_all_vmcs_fields(0));
3526b72cf76SDavid Matlack 
3536b72cf76SDavid Matlack 	VMCLEAR(1);
3546b72cf76SDavid Matlack 	report("current:VMCS0 active:[VMCS0]", check_all_vmcs_fields(0));
3556b72cf76SDavid Matlack 
3566b72cf76SDavid Matlack 	VMPTRLD(1);
3576b72cf76SDavid Matlack 	set_all_vmcs_fields(1);
3586b72cf76SDavid Matlack 	report("current:VMCS1 active:[VMCS0,VCMS1]", check_all_vmcs_fields(1));
3596b72cf76SDavid Matlack 
3606b72cf76SDavid Matlack 	VMPTRLD(0);
3616b72cf76SDavid Matlack 	report("current:VMCS0 active:[VMCS0,VCMS1]", check_all_vmcs_fields(0));
3626b72cf76SDavid Matlack 	VMPTRLD(1);
3636b72cf76SDavid Matlack 	report("current:VMCS1 active:[VMCS0,VCMS1]", check_all_vmcs_fields(1));
3646b72cf76SDavid Matlack 	VMPTRLD(1);
3656b72cf76SDavid Matlack 	report("current:VMCS1 active:[VMCS0,VCMS1]", check_all_vmcs_fields(1));
3666b72cf76SDavid Matlack 
3676b72cf76SDavid Matlack 	VMCLEAR(0);
3686b72cf76SDavid Matlack 	report("current:VMCS1 active:[VCMS1]", check_all_vmcs_fields(1));
3696b72cf76SDavid Matlack 
370d4ab68adSDavid Matlack 	/* VMPTRLD should not erase VMWRITEs to the current VMCS */
371d4ab68adSDavid Matlack 	set_all_vmcs_fields(2);
372d4ab68adSDavid Matlack 	VMPTRLD(1);
373d4ab68adSDavid Matlack 	report("current:VMCS1 active:[VCMS1]", check_all_vmcs_fields(2));
374d4ab68adSDavid Matlack 
3756b72cf76SDavid Matlack 	for (i = 0; i < ARRAY_SIZE(vmcs); i++) {
3766b72cf76SDavid Matlack 		VMCLEAR(i);
3776b72cf76SDavid Matlack 		free_page(vmcs[i]);
3786b72cf76SDavid Matlack 	}
3796b72cf76SDavid Matlack 
3806b72cf76SDavid Matlack #undef VMPTRLD
3816b72cf76SDavid Matlack #undef VMCLEAR
3826b72cf76SDavid Matlack }
3836b72cf76SDavid Matlack 
384ffb1a9e0SJan Kiszka void vmx_set_test_stage(u32 s)
385ffb1a9e0SJan Kiszka {
386ffb1a9e0SJan Kiszka 	barrier();
387ffb1a9e0SJan Kiszka 	stage = s;
388ffb1a9e0SJan Kiszka 	barrier();
389ffb1a9e0SJan Kiszka }
390ffb1a9e0SJan Kiszka 
391ffb1a9e0SJan Kiszka u32 vmx_get_test_stage(void)
392ffb1a9e0SJan Kiszka {
393ffb1a9e0SJan Kiszka 	u32 s;
394ffb1a9e0SJan Kiszka 
395ffb1a9e0SJan Kiszka 	barrier();
396ffb1a9e0SJan Kiszka 	s = stage;
397ffb1a9e0SJan Kiszka 	barrier();
398ffb1a9e0SJan Kiszka 	return s;
399ffb1a9e0SJan Kiszka }
400ffb1a9e0SJan Kiszka 
401ffb1a9e0SJan Kiszka void vmx_inc_test_stage(void)
402ffb1a9e0SJan Kiszka {
403ffb1a9e0SJan Kiszka 	barrier();
404ffb1a9e0SJan Kiszka 	stage++;
405ffb1a9e0SJan Kiszka 	barrier();
406ffb1a9e0SJan Kiszka }
407ffb1a9e0SJan Kiszka 
4089d7eaa29SArthur Chunqi Li /* entry_sysenter */
4099d7eaa29SArthur Chunqi Li asm(
4109d7eaa29SArthur Chunqi Li 	".align	4, 0x90\n\t"
4119d7eaa29SArthur Chunqi Li 	".globl	entry_sysenter\n\t"
4129d7eaa29SArthur Chunqi Li 	"entry_sysenter:\n\t"
4139d7eaa29SArthur Chunqi Li 	SAVE_GPR
4149d7eaa29SArthur Chunqi Li 	"	and	$0xf, %rax\n\t"
4159d7eaa29SArthur Chunqi Li 	"	mov	%rax, %rdi\n\t"
4169d7eaa29SArthur Chunqi Li 	"	call	syscall_handler\n\t"
4179d7eaa29SArthur Chunqi Li 	LOAD_GPR
4189d7eaa29SArthur Chunqi Li 	"	vmresume\n\t"
4199d7eaa29SArthur Chunqi Li );
4209d7eaa29SArthur Chunqi Li 
4219d7eaa29SArthur Chunqi Li static void __attribute__((__used__)) syscall_handler(u64 syscall_no)
4229d7eaa29SArthur Chunqi Li {
423d5315e3dSJan Kiszka 	if (current->syscall_handler)
4249d7eaa29SArthur Chunqi Li 		current->syscall_handler(syscall_no);
4259d7eaa29SArthur Chunqi Li }
4269d7eaa29SArthur Chunqi Li 
4279d7eaa29SArthur Chunqi Li static inline int vmx_on()
4289d7eaa29SArthur Chunqi Li {
4299d7eaa29SArthur Chunqi Li 	bool ret;
430a739f560SBandan Das 	u64 rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF;
431a739f560SBandan Das 	asm volatile ("push %1; popf; vmxon %2; setbe %0\n\t"
432a739f560SBandan Das 		      : "=q" (ret) : "q" (rflags), "m" (vmxon_region) : "cc");
4339d7eaa29SArthur Chunqi Li 	return ret;
4349d7eaa29SArthur Chunqi Li }
4359d7eaa29SArthur Chunqi Li 
4369d7eaa29SArthur Chunqi Li static inline int vmx_off()
4379d7eaa29SArthur Chunqi Li {
4389d7eaa29SArthur Chunqi Li 	bool ret;
439a739f560SBandan Das 	u64 rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF;
440a739f560SBandan Das 
441a739f560SBandan Das 	asm volatile("push %1; popf; vmxoff; setbe %0\n\t"
442a739f560SBandan Das 		     : "=q"(ret) : "q" (rflags) : "cc");
4439d7eaa29SArthur Chunqi Li 	return ret;
4449d7eaa29SArthur Chunqi Li }
4459d7eaa29SArthur Chunqi Li 
4467e207ec1SPeter Feiner static const char * const exit_reason_descriptions[] = {
4477e207ec1SPeter Feiner 	[VMX_EXC_NMI]		= "VMX_EXC_NMI",
4487e207ec1SPeter Feiner 	[VMX_EXTINT]		= "VMX_EXTINT",
4497e207ec1SPeter Feiner 	[VMX_TRIPLE_FAULT]	= "VMX_TRIPLE_FAULT",
4507e207ec1SPeter Feiner 	[VMX_INIT]		= "VMX_INIT",
4517e207ec1SPeter Feiner 	[VMX_SIPI]		= "VMX_SIPI",
4527e207ec1SPeter Feiner 	[VMX_SMI_IO]		= "VMX_SMI_IO",
4537e207ec1SPeter Feiner 	[VMX_SMI_OTHER]		= "VMX_SMI_OTHER",
4547e207ec1SPeter Feiner 	[VMX_INTR_WINDOW]	= "VMX_INTR_WINDOW",
4557e207ec1SPeter Feiner 	[VMX_NMI_WINDOW]	= "VMX_NMI_WINDOW",
4567e207ec1SPeter Feiner 	[VMX_TASK_SWITCH]	= "VMX_TASK_SWITCH",
4577e207ec1SPeter Feiner 	[VMX_CPUID]		= "VMX_CPUID",
4587e207ec1SPeter Feiner 	[VMX_GETSEC]		= "VMX_GETSEC",
4597e207ec1SPeter Feiner 	[VMX_HLT]		= "VMX_HLT",
4607e207ec1SPeter Feiner 	[VMX_INVD]		= "VMX_INVD",
4617e207ec1SPeter Feiner 	[VMX_INVLPG]		= "VMX_INVLPG",
4627e207ec1SPeter Feiner 	[VMX_RDPMC]		= "VMX_RDPMC",
4637e207ec1SPeter Feiner 	[VMX_RDTSC]		= "VMX_RDTSC",
4647e207ec1SPeter Feiner 	[VMX_RSM]		= "VMX_RSM",
4657e207ec1SPeter Feiner 	[VMX_VMCALL]		= "VMX_VMCALL",
4667e207ec1SPeter Feiner 	[VMX_VMCLEAR]		= "VMX_VMCLEAR",
4677e207ec1SPeter Feiner 	[VMX_VMLAUNCH]		= "VMX_VMLAUNCH",
4687e207ec1SPeter Feiner 	[VMX_VMPTRLD]		= "VMX_VMPTRLD",
4697e207ec1SPeter Feiner 	[VMX_VMPTRST]		= "VMX_VMPTRST",
4707e207ec1SPeter Feiner 	[VMX_VMREAD]		= "VMX_VMREAD",
4717e207ec1SPeter Feiner 	[VMX_VMRESUME]		= "VMX_VMRESUME",
4727e207ec1SPeter Feiner 	[VMX_VMWRITE]		= "VMX_VMWRITE",
4737e207ec1SPeter Feiner 	[VMX_VMXOFF]		= "VMX_VMXOFF",
4747e207ec1SPeter Feiner 	[VMX_VMXON]		= "VMX_VMXON",
4757e207ec1SPeter Feiner 	[VMX_CR]		= "VMX_CR",
4767e207ec1SPeter Feiner 	[VMX_DR]		= "VMX_DR",
4777e207ec1SPeter Feiner 	[VMX_IO]		= "VMX_IO",
4787e207ec1SPeter Feiner 	[VMX_RDMSR]		= "VMX_RDMSR",
4797e207ec1SPeter Feiner 	[VMX_WRMSR]		= "VMX_WRMSR",
4807e207ec1SPeter Feiner 	[VMX_FAIL_STATE]	= "VMX_FAIL_STATE",
4817e207ec1SPeter Feiner 	[VMX_FAIL_MSR]		= "VMX_FAIL_MSR",
4827e207ec1SPeter Feiner 	[VMX_MWAIT]		= "VMX_MWAIT",
4837e207ec1SPeter Feiner 	[VMX_MTF]		= "VMX_MTF",
4847e207ec1SPeter Feiner 	[VMX_MONITOR]		= "VMX_MONITOR",
4857e207ec1SPeter Feiner 	[VMX_PAUSE]		= "VMX_PAUSE",
4867e207ec1SPeter Feiner 	[VMX_FAIL_MCHECK]	= "VMX_FAIL_MCHECK",
4877e207ec1SPeter Feiner 	[VMX_TPR_THRESHOLD]	= "VMX_TPR_THRESHOLD",
4887e207ec1SPeter Feiner 	[VMX_APIC_ACCESS]	= "VMX_APIC_ACCESS",
4897e207ec1SPeter Feiner 	[VMX_GDTR_IDTR]		= "VMX_GDTR_IDTR",
4907e207ec1SPeter Feiner 	[VMX_LDTR_TR]		= "VMX_LDTR_TR",
4917e207ec1SPeter Feiner 	[VMX_EPT_VIOLATION]	= "VMX_EPT_VIOLATION",
4927e207ec1SPeter Feiner 	[VMX_EPT_MISCONFIG]	= "VMX_EPT_MISCONFIG",
4937e207ec1SPeter Feiner 	[VMX_INVEPT]		= "VMX_INVEPT",
4947e207ec1SPeter Feiner 	[VMX_PREEMPT]		= "VMX_PREEMPT",
4957e207ec1SPeter Feiner 	[VMX_INVVPID]		= "VMX_INVVPID",
4967e207ec1SPeter Feiner 	[VMX_WBINVD]		= "VMX_WBINVD",
4977e207ec1SPeter Feiner 	[VMX_XSETBV]		= "VMX_XSETBV",
4987e207ec1SPeter Feiner 	[VMX_APIC_WRITE]	= "VMX_APIC_WRITE",
4997e207ec1SPeter Feiner 	[VMX_RDRAND]		= "VMX_RDRAND",
5007e207ec1SPeter Feiner 	[VMX_INVPCID]		= "VMX_INVPCID",
5017e207ec1SPeter Feiner 	[VMX_VMFUNC]		= "VMX_VMFUNC",
5027e207ec1SPeter Feiner 	[VMX_RDSEED]		= "VMX_RDSEED",
5037e207ec1SPeter Feiner 	[VMX_PML_FULL]		= "VMX_PML_FULL",
5047e207ec1SPeter Feiner 	[VMX_XSAVES]		= "VMX_XSAVES",
5057e207ec1SPeter Feiner 	[VMX_XRSTORS]		= "VMX_XRSTORS",
5067e207ec1SPeter Feiner };
5077e207ec1SPeter Feiner 
5087e207ec1SPeter Feiner const char *exit_reason_description(u64 reason)
5097e207ec1SPeter Feiner {
5107e207ec1SPeter Feiner 	if (reason >= ARRAY_SIZE(exit_reason_descriptions))
5117e207ec1SPeter Feiner 		return "(unknown)";
5127e207ec1SPeter Feiner 	return exit_reason_descriptions[reason] ? : "(unused)";
5137e207ec1SPeter Feiner }
5147e207ec1SPeter Feiner 
5153ee34093SArthur Chunqi Li void print_vmexit_info()
5169d7eaa29SArthur Chunqi Li {
5179d7eaa29SArthur Chunqi Li 	u64 guest_rip, guest_rsp;
5189d7eaa29SArthur Chunqi Li 	ulong reason = vmcs_read(EXI_REASON) & 0xff;
5199d7eaa29SArthur Chunqi Li 	ulong exit_qual = vmcs_read(EXI_QUALIFICATION);
5209d7eaa29SArthur Chunqi Li 	guest_rip = vmcs_read(GUEST_RIP);
5219d7eaa29SArthur Chunqi Li 	guest_rsp = vmcs_read(GUEST_RSP);
5229d7eaa29SArthur Chunqi Li 	printf("VMEXIT info:\n");
523b006d7ebSAndrew Jones 	printf("\tvmexit reason = %ld\n", reason);
524b006d7ebSAndrew Jones 	printf("\texit qualification = 0x%lx\n", exit_qual);
525b006d7ebSAndrew Jones 	printf("\tBit 31 of reason = %lx\n", (vmcs_read(EXI_REASON) >> 31) & 1);
526b006d7ebSAndrew Jones 	printf("\tguest_rip = 0x%lx\n", guest_rip);
527b006d7ebSAndrew Jones 	printf("\tRAX=0x%lx    RBX=0x%lx    RCX=0x%lx    RDX=0x%lx\n",
5289d7eaa29SArthur Chunqi Li 		regs.rax, regs.rbx, regs.rcx, regs.rdx);
529b006d7ebSAndrew Jones 	printf("\tRSP=0x%lx    RBP=0x%lx    RSI=0x%lx    RDI=0x%lx\n",
5309d7eaa29SArthur Chunqi Li 		guest_rsp, regs.rbp, regs.rsi, regs.rdi);
531b006d7ebSAndrew Jones 	printf("\tR8 =0x%lx    R9 =0x%lx    R10=0x%lx    R11=0x%lx\n",
5329d7eaa29SArthur Chunqi Li 		regs.r8, regs.r9, regs.r10, regs.r11);
533b006d7ebSAndrew Jones 	printf("\tR12=0x%lx    R13=0x%lx    R14=0x%lx    R15=0x%lx\n",
5349d7eaa29SArthur Chunqi Li 		regs.r12, regs.r13, regs.r14, regs.r15);
5359d7eaa29SArthur Chunqi Li }
5369d7eaa29SArthur Chunqi Li 
5373b50efe3SPeter Feiner void
5383b50efe3SPeter Feiner print_vmentry_failure_info(struct vmentry_failure *failure) {
5393b50efe3SPeter Feiner 	if (failure->early) {
5403b50efe3SPeter Feiner 		printf("Early %s failure: ", failure->instr);
5413b50efe3SPeter Feiner 		switch (failure->flags & VMX_ENTRY_FLAGS) {
542ce154ba8SPaolo Bonzini 		case X86_EFLAGS_CF:
5433b50efe3SPeter Feiner 			printf("current-VMCS pointer is not valid.\n");
5443b50efe3SPeter Feiner 			break;
545ce154ba8SPaolo Bonzini 		case X86_EFLAGS_ZF:
5463b50efe3SPeter Feiner 			printf("error number is %ld. See Intel 30.4.\n",
5473b50efe3SPeter Feiner 			       vmcs_read(VMX_INST_ERROR));
5483b50efe3SPeter Feiner 			break;
5493b50efe3SPeter Feiner 		default:
5503b50efe3SPeter Feiner 			printf("unexpected flags %lx!\n", failure->flags);
5513b50efe3SPeter Feiner 		}
5523b50efe3SPeter Feiner 	} else {
5533b50efe3SPeter Feiner 		u64 reason = vmcs_read(EXI_REASON);
5543b50efe3SPeter Feiner 		u64 qual = vmcs_read(EXI_QUALIFICATION);
5553b50efe3SPeter Feiner 
5563b50efe3SPeter Feiner 		printf("Non-early %s failure (reason=0x%lx, qual=0x%lx): ",
5573b50efe3SPeter Feiner 			failure->instr, reason, qual);
5583b50efe3SPeter Feiner 
5593b50efe3SPeter Feiner 		switch (reason & 0xff) {
5603b50efe3SPeter Feiner 		case VMX_FAIL_STATE:
5613b50efe3SPeter Feiner 			printf("invalid guest state\n");
5623b50efe3SPeter Feiner 			break;
5633b50efe3SPeter Feiner 		case VMX_FAIL_MSR:
5643b50efe3SPeter Feiner 			printf("MSR loading\n");
5653b50efe3SPeter Feiner 			break;
5663b50efe3SPeter Feiner 		case VMX_FAIL_MCHECK:
5673b50efe3SPeter Feiner 			printf("machine-check event\n");
5683b50efe3SPeter Feiner 			break;
5693b50efe3SPeter Feiner 		default:
5703b50efe3SPeter Feiner 			printf("unexpected basic exit reason %ld\n",
5713b50efe3SPeter Feiner 			       reason & 0xff);
5723b50efe3SPeter Feiner 		}
5733b50efe3SPeter Feiner 
5743b50efe3SPeter Feiner 		if (!(reason & VMX_ENTRY_FAILURE))
5753b50efe3SPeter Feiner 			printf("\tVMX_ENTRY_FAILURE BIT NOT SET!\n");
5763b50efe3SPeter Feiner 
5773b50efe3SPeter Feiner 		if (reason & 0x7fff0000)
5783b50efe3SPeter Feiner 			printf("\tRESERVED BITS SET!\n");
5793b50efe3SPeter Feiner 	}
5803b50efe3SPeter Feiner }
5813b50efe3SPeter Feiner 
5822f6828d7SDavid Matlack /*
5832f6828d7SDavid Matlack  * VMCLEAR should ensures all VMCS state is flushed to the VMCS
5842f6828d7SDavid Matlack  * region in memory.
5852f6828d7SDavid Matlack  */
5862f6828d7SDavid Matlack static void test_vmclear_flushing(void)
5872f6828d7SDavid Matlack {
5882f6828d7SDavid Matlack 	struct vmcs *vmcs[3] = {};
5892f6828d7SDavid Matlack 	int i;
5902f6828d7SDavid Matlack 
5912f6828d7SDavid Matlack 	for (i = 0; i < ARRAY_SIZE(vmcs); i++) {
5922f6828d7SDavid Matlack 		vmcs[i] = alloc_page();
5932f6828d7SDavid Matlack 		memset(vmcs[i], 0, PAGE_SIZE);
5942f6828d7SDavid Matlack 	}
5952f6828d7SDavid Matlack 
5962f6828d7SDavid Matlack 	vmcs[0]->revision_id = basic.revision;
5972f6828d7SDavid Matlack 	assert(!vmcs_clear(vmcs[0]));
5982f6828d7SDavid Matlack 	assert(!make_vmcs_current(vmcs[0]));
5992f6828d7SDavid Matlack 	set_all_vmcs_fields(0x86);
6002f6828d7SDavid Matlack 
6012f6828d7SDavid Matlack 	assert(!vmcs_clear(vmcs[0]));
6022f6828d7SDavid Matlack 	memcpy(vmcs[1], vmcs[0], basic.size);
6032f6828d7SDavid Matlack 	assert(!make_vmcs_current(vmcs[1]));
6042f6828d7SDavid Matlack 	report("test vmclear flush (current VMCS)", check_all_vmcs_fields(0x86));
6052f6828d7SDavid Matlack 
6062f6828d7SDavid Matlack 	set_all_vmcs_fields(0x87);
6072f6828d7SDavid Matlack 	assert(!make_vmcs_current(vmcs[0]));
6082f6828d7SDavid Matlack 	assert(!vmcs_clear(vmcs[1]));
6092f6828d7SDavid Matlack 	memcpy(vmcs[2], vmcs[1], basic.size);
6102f6828d7SDavid Matlack 	assert(!make_vmcs_current(vmcs[2]));
6112f6828d7SDavid Matlack 	report("test vmclear flush (!current VMCS)", check_all_vmcs_fields(0x87));
6122f6828d7SDavid Matlack 
6132f6828d7SDavid Matlack 	for (i = 0; i < ARRAY_SIZE(vmcs); i++) {
6142f6828d7SDavid Matlack 		assert(!vmcs_clear(vmcs[i]));
6152f6828d7SDavid Matlack 		free_page(vmcs[i]);
6162f6828d7SDavid Matlack 	}
6172f6828d7SDavid Matlack }
6183b50efe3SPeter Feiner 
6199d7eaa29SArthur Chunqi Li static void test_vmclear(void)
6209d7eaa29SArthur Chunqi Li {
621daeec979SBandan Das 	struct vmcs *tmp_root;
622e2cf1c9dSEduardo Habkost 	int width = cpuid_maxphyaddr();
623daeec979SBandan Das 
624daeec979SBandan Das 	/*
625daeec979SBandan Das 	 * Note- The tests below do not necessarily have a
626daeec979SBandan Das 	 * valid VMCS, but that's ok since the invalid vmcs
627daeec979SBandan Das 	 * is only used for a specific test and is discarded
628daeec979SBandan Das 	 * without touching its contents
629daeec979SBandan Das 	 */
630daeec979SBandan Das 
631daeec979SBandan Das 	/* Unaligned page access */
632daeec979SBandan Das 	tmp_root = (struct vmcs *)((intptr_t)vmcs_root + 1);
633daeec979SBandan Das 	report("test vmclear with unaligned vmcs",
634daeec979SBandan Das 	       vmcs_clear(tmp_root) == 1);
635daeec979SBandan Das 
636daeec979SBandan Das 	/* gpa bits beyond physical address width are set*/
637daeec979SBandan Das 	tmp_root = (struct vmcs *)((intptr_t)vmcs_root |
638daeec979SBandan Das 				   ((u64)1 << (width+1)));
639daeec979SBandan Das 	report("test vmclear with vmcs address bits set beyond physical address width",
640daeec979SBandan Das 	       vmcs_clear(tmp_root) == 1);
641daeec979SBandan Das 
642daeec979SBandan Das 	/* Pass VMXON region */
643daeec979SBandan Das 	tmp_root = (struct vmcs *)vmxon_region;
644daeec979SBandan Das 	report("test vmclear with vmxon region",
645daeec979SBandan Das 	       vmcs_clear(tmp_root) == 1);
646daeec979SBandan Das 
647daeec979SBandan Das 	/* Valid VMCS */
648daeec979SBandan Das 	report("test vmclear with valid vmcs region", vmcs_clear(vmcs_root) == 0);
649daeec979SBandan Das 
6502f6828d7SDavid Matlack 	test_vmclear_flushing();
6519d7eaa29SArthur Chunqi Li }
6529d7eaa29SArthur Chunqi Li 
6539d7eaa29SArthur Chunqi Li static void __attribute__((__used__)) guest_main(void)
6549d7eaa29SArthur Chunqi Li {
655*794c67a9SPeter Feiner 	if (current->v2)
656*794c67a9SPeter Feiner 		v2_guest_main();
657*794c67a9SPeter Feiner 	else
6589d7eaa29SArthur Chunqi Li 		current->guest_main();
6599d7eaa29SArthur Chunqi Li }
6609d7eaa29SArthur Chunqi Li 
6619d7eaa29SArthur Chunqi Li /* guest_entry */
6629d7eaa29SArthur Chunqi Li asm(
6639d7eaa29SArthur Chunqi Li 	".align	4, 0x90\n\t"
6649d7eaa29SArthur Chunqi Li 	".globl	entry_guest\n\t"
6659d7eaa29SArthur Chunqi Li 	"guest_entry:\n\t"
6669d7eaa29SArthur Chunqi Li 	"	call guest_main\n\t"
6679d7eaa29SArthur Chunqi Li 	"	mov $1, %edi\n\t"
6689d7eaa29SArthur Chunqi Li 	"	call hypercall\n\t"
6699d7eaa29SArthur Chunqi Li );
6709d7eaa29SArthur Chunqi Li 
6716884af61SArthur Chunqi Li /* EPT paging structure related functions */
67269c531c8SPeter Feiner /* split_large_ept_entry: Split a 2M/1G large page into 512 smaller PTEs.
67369c531c8SPeter Feiner 		@ptep : large page table entry to split
67469c531c8SPeter Feiner 		@level : level of ptep (2 or 3)
67569c531c8SPeter Feiner  */
67669c531c8SPeter Feiner static void split_large_ept_entry(unsigned long *ptep, int level)
67769c531c8SPeter Feiner {
67869c531c8SPeter Feiner 	unsigned long *new_pt;
67969c531c8SPeter Feiner 	unsigned long gpa;
68069c531c8SPeter Feiner 	unsigned long pte;
68169c531c8SPeter Feiner 	unsigned long prototype;
68269c531c8SPeter Feiner 	int i;
68369c531c8SPeter Feiner 
68469c531c8SPeter Feiner 	pte = *ptep;
68569c531c8SPeter Feiner 	assert(pte & EPT_PRESENT);
68669c531c8SPeter Feiner 	assert(pte & EPT_LARGE_PAGE);
68769c531c8SPeter Feiner 	assert(level == 2 || level == 3);
68869c531c8SPeter Feiner 
68969c531c8SPeter Feiner 	new_pt = alloc_page();
69069c531c8SPeter Feiner 	assert(new_pt);
69169c531c8SPeter Feiner 	memset(new_pt, 0, PAGE_SIZE);
69269c531c8SPeter Feiner 
69369c531c8SPeter Feiner 	prototype = pte & ~EPT_ADDR_MASK;
69469c531c8SPeter Feiner 	if (level == 2)
69569c531c8SPeter Feiner 		prototype &= ~EPT_LARGE_PAGE;
69669c531c8SPeter Feiner 
69769c531c8SPeter Feiner 	gpa = pte & EPT_ADDR_MASK;
69869c531c8SPeter Feiner 	for (i = 0; i < EPT_PGDIR_ENTRIES; i++) {
69969c531c8SPeter Feiner 		new_pt[i] = prototype | gpa;
70069c531c8SPeter Feiner 		gpa += 1ul << EPT_LEVEL_SHIFT(level - 1);
70169c531c8SPeter Feiner 	}
70269c531c8SPeter Feiner 
70369c531c8SPeter Feiner 	pte &= ~EPT_LARGE_PAGE;
70469c531c8SPeter Feiner 	pte &= ~EPT_ADDR_MASK;
70569c531c8SPeter Feiner 	pte |= virt_to_phys(new_pt);
70669c531c8SPeter Feiner 
70769c531c8SPeter Feiner 	*ptep = pte;
70869c531c8SPeter Feiner }
70969c531c8SPeter Feiner 
7106884af61SArthur Chunqi Li /* install_ept_entry : Install a page to a given level in EPT
7116884af61SArthur Chunqi Li 		@pml4 : addr of pml4 table
7126884af61SArthur Chunqi Li 		@pte_level : level of PTE to set
7136884af61SArthur Chunqi Li 		@guest_addr : physical address of guest
7146884af61SArthur Chunqi Li 		@pte : pte value to set
7156884af61SArthur Chunqi Li 		@pt_page : address of page table, NULL for a new page
7166884af61SArthur Chunqi Li  */
7176884af61SArthur Chunqi Li void install_ept_entry(unsigned long *pml4,
7186884af61SArthur Chunqi Li 		int pte_level,
7196884af61SArthur Chunqi Li 		unsigned long guest_addr,
7206884af61SArthur Chunqi Li 		unsigned long pte,
7216884af61SArthur Chunqi Li 		unsigned long *pt_page)
7226884af61SArthur Chunqi Li {
7236884af61SArthur Chunqi Li 	int level;
7246884af61SArthur Chunqi Li 	unsigned long *pt = pml4;
7256884af61SArthur Chunqi Li 	unsigned offset;
7266884af61SArthur Chunqi Li 
7276884af61SArthur Chunqi Li 	for (level = EPT_PAGE_LEVEL; level > pte_level; --level) {
728a969e087SPeter Feiner 		offset = (guest_addr >> EPT_LEVEL_SHIFT(level))
7296884af61SArthur Chunqi Li 				& EPT_PGDIR_MASK;
7306884af61SArthur Chunqi Li 		if (!(pt[offset] & (EPT_PRESENT))) {
7316884af61SArthur Chunqi Li 			unsigned long *new_pt = pt_page;
7326884af61SArthur Chunqi Li 			if (!new_pt)
7336884af61SArthur Chunqi Li 				new_pt = alloc_page();
7346884af61SArthur Chunqi Li 			else
7356884af61SArthur Chunqi Li 				pt_page = 0;
7366884af61SArthur Chunqi Li 			memset(new_pt, 0, PAGE_SIZE);
7376884af61SArthur Chunqi Li 			pt[offset] = virt_to_phys(new_pt)
7386884af61SArthur Chunqi Li 					| EPT_RA | EPT_WA | EPT_EA;
73969c531c8SPeter Feiner 		} else if (pt[offset] & EPT_LARGE_PAGE)
74069c531c8SPeter Feiner 			split_large_ept_entry(&pt[offset], level);
74100b5c590SPeter Feiner 		pt = phys_to_virt(pt[offset] & EPT_ADDR_MASK);
7426884af61SArthur Chunqi Li 	}
743a969e087SPeter Feiner 	offset = (guest_addr >> EPT_LEVEL_SHIFT(level)) & EPT_PGDIR_MASK;
7446884af61SArthur Chunqi Li 	pt[offset] = pte;
7456884af61SArthur Chunqi Li }
7466884af61SArthur Chunqi Li 
7476884af61SArthur Chunqi Li /* Map a page, @perm is the permission of the page */
7486884af61SArthur Chunqi Li void install_ept(unsigned long *pml4,
7496884af61SArthur Chunqi Li 		unsigned long phys,
7506884af61SArthur Chunqi Li 		unsigned long guest_addr,
7516884af61SArthur Chunqi Li 		u64 perm)
7526884af61SArthur Chunqi Li {
7536884af61SArthur Chunqi Li 	install_ept_entry(pml4, 1, guest_addr, (phys & PAGE_MASK) | perm, 0);
7546884af61SArthur Chunqi Li }
7556884af61SArthur Chunqi Li 
7566884af61SArthur Chunqi Li /* Map a 1G-size page */
7576884af61SArthur Chunqi Li void install_1g_ept(unsigned long *pml4,
7586884af61SArthur Chunqi Li 		unsigned long phys,
7596884af61SArthur Chunqi Li 		unsigned long guest_addr,
7606884af61SArthur Chunqi Li 		u64 perm)
7616884af61SArthur Chunqi Li {
7626884af61SArthur Chunqi Li 	install_ept_entry(pml4, 3, guest_addr,
7636884af61SArthur Chunqi Li 			(phys & PAGE_MASK) | perm | EPT_LARGE_PAGE, 0);
7646884af61SArthur Chunqi Li }
7656884af61SArthur Chunqi Li 
7666884af61SArthur Chunqi Li /* Map a 2M-size page */
7676884af61SArthur Chunqi Li void install_2m_ept(unsigned long *pml4,
7686884af61SArthur Chunqi Li 		unsigned long phys,
7696884af61SArthur Chunqi Li 		unsigned long guest_addr,
7706884af61SArthur Chunqi Li 		u64 perm)
7716884af61SArthur Chunqi Li {
7726884af61SArthur Chunqi Li 	install_ept_entry(pml4, 2, guest_addr,
7736884af61SArthur Chunqi Li 			(phys & PAGE_MASK) | perm | EPT_LARGE_PAGE, 0);
7746884af61SArthur Chunqi Li }
7756884af61SArthur Chunqi Li 
7766884af61SArthur Chunqi Li /* setup_ept_range : Setup a range of 1:1 mapped page to EPT paging structure.
7776884af61SArthur Chunqi Li 		@start : start address of guest page
7786884af61SArthur Chunqi Li 		@len : length of address to be mapped
7796884af61SArthur Chunqi Li 		@map_1g : whether 1G page map is used
7806884af61SArthur Chunqi Li 		@map_2m : whether 2M page map is used
7816884af61SArthur Chunqi Li 		@perm : permission for every page
7826884af61SArthur Chunqi Li  */
783b947e241SJan Kiszka void setup_ept_range(unsigned long *pml4, unsigned long start,
7846884af61SArthur Chunqi Li 		     unsigned long len, int map_1g, int map_2m, u64 perm)
7856884af61SArthur Chunqi Li {
7866884af61SArthur Chunqi Li 	u64 phys = start;
7876884af61SArthur Chunqi Li 	u64 max = (u64)len + (u64)start;
7886884af61SArthur Chunqi Li 
7896884af61SArthur Chunqi Li 	if (map_1g) {
7906884af61SArthur Chunqi Li 		while (phys + PAGE_SIZE_1G <= max) {
7916884af61SArthur Chunqi Li 			install_1g_ept(pml4, phys, phys, perm);
7926884af61SArthur Chunqi Li 			phys += PAGE_SIZE_1G;
7936884af61SArthur Chunqi Li 		}
7946884af61SArthur Chunqi Li 	}
7956884af61SArthur Chunqi Li 	if (map_2m) {
7966884af61SArthur Chunqi Li 		while (phys + PAGE_SIZE_2M <= max) {
7976884af61SArthur Chunqi Li 			install_2m_ept(pml4, phys, phys, perm);
7986884af61SArthur Chunqi Li 			phys += PAGE_SIZE_2M;
7996884af61SArthur Chunqi Li 		}
8006884af61SArthur Chunqi Li 	}
8016884af61SArthur Chunqi Li 	while (phys + PAGE_SIZE <= max) {
8026884af61SArthur Chunqi Li 		install_ept(pml4, phys, phys, perm);
8036884af61SArthur Chunqi Li 		phys += PAGE_SIZE;
8046884af61SArthur Chunqi Li 	}
8056884af61SArthur Chunqi Li }
8066884af61SArthur Chunqi Li 
8076884af61SArthur Chunqi Li /* get_ept_pte : Get the PTE of a given level in EPT,
8086884af61SArthur Chunqi Li     @level == 1 means get the latest level*/
8096884af61SArthur Chunqi Li unsigned long get_ept_pte(unsigned long *pml4,
8106884af61SArthur Chunqi Li 		unsigned long guest_addr, int level)
8116884af61SArthur Chunqi Li {
8126884af61SArthur Chunqi Li 	int l;
8136884af61SArthur Chunqi Li 	unsigned long *pt = pml4, pte;
8146884af61SArthur Chunqi Li 	unsigned offset;
8156884af61SArthur Chunqi Li 
8162ca6f1f3SPaolo Bonzini 	if (level < 1 || level > 3)
8172ca6f1f3SPaolo Bonzini 		return -1;
8182ca6f1f3SPaolo Bonzini 	for (l = EPT_PAGE_LEVEL; ; --l) {
819a969e087SPeter Feiner 		offset = (guest_addr >> EPT_LEVEL_SHIFT(l)) & EPT_PGDIR_MASK;
8206884af61SArthur Chunqi Li 		pte = pt[offset];
8216884af61SArthur Chunqi Li 		if (!(pte & (EPT_PRESENT)))
8226884af61SArthur Chunqi Li 			return 0;
8236884af61SArthur Chunqi Li 		if (l == level)
8242ca6f1f3SPaolo Bonzini 			break;
8256884af61SArthur Chunqi Li 		if (l < 4 && (pte & EPT_LARGE_PAGE))
8266884af61SArthur Chunqi Li 			return pte;
82700b5c590SPeter Feiner 		pt = (unsigned long *)(pte & EPT_ADDR_MASK);
8286884af61SArthur Chunqi Li 	}
829a969e087SPeter Feiner 	offset = (guest_addr >> EPT_LEVEL_SHIFT(l)) & EPT_PGDIR_MASK;
8306884af61SArthur Chunqi Li 	pte = pt[offset];
8316884af61SArthur Chunqi Li 	return pte;
8326884af61SArthur Chunqi Li }
8336884af61SArthur Chunqi Li 
834521820dbSPaolo Bonzini static void clear_ept_ad_pte(unsigned long *pml4, unsigned long guest_addr)
835521820dbSPaolo Bonzini {
836521820dbSPaolo Bonzini 	int l;
837521820dbSPaolo Bonzini 	unsigned long *pt = pml4;
838521820dbSPaolo Bonzini 	u64 pte;
839521820dbSPaolo Bonzini 	unsigned offset;
840521820dbSPaolo Bonzini 
841521820dbSPaolo Bonzini 	for (l = EPT_PAGE_LEVEL; ; --l) {
842521820dbSPaolo Bonzini 		offset = (guest_addr >> EPT_LEVEL_SHIFT(l)) & EPT_PGDIR_MASK;
843521820dbSPaolo Bonzini 		pt[offset] &= ~(EPT_ACCESS_FLAG|EPT_DIRTY_FLAG);
844521820dbSPaolo Bonzini 		pte = pt[offset];
845521820dbSPaolo Bonzini 		if (l == 1 || (l < 4 && (pte & EPT_LARGE_PAGE)))
846521820dbSPaolo Bonzini 			break;
847521820dbSPaolo Bonzini 		pt = (unsigned long *)(pte & EPT_ADDR_MASK);
848521820dbSPaolo Bonzini 	}
849521820dbSPaolo Bonzini }
850521820dbSPaolo Bonzini 
851521820dbSPaolo Bonzini /* clear_ept_ad : Clear EPT A/D bits for the page table walk and the
852521820dbSPaolo Bonzini    final GPA of a guest address.  */
853521820dbSPaolo Bonzini void clear_ept_ad(unsigned long *pml4, u64 guest_cr3,
854521820dbSPaolo Bonzini 		  unsigned long guest_addr)
855521820dbSPaolo Bonzini {
856521820dbSPaolo Bonzini 	int l;
857521820dbSPaolo Bonzini 	unsigned long *pt = (unsigned long *)guest_cr3, gpa;
858521820dbSPaolo Bonzini 	u64 pte, offset_in_page;
859521820dbSPaolo Bonzini 	unsigned offset;
860521820dbSPaolo Bonzini 
861521820dbSPaolo Bonzini 	for (l = EPT_PAGE_LEVEL; ; --l) {
862521820dbSPaolo Bonzini 		offset = (guest_addr >> EPT_LEVEL_SHIFT(l)) & EPT_PGDIR_MASK;
863521820dbSPaolo Bonzini 
864521820dbSPaolo Bonzini 		clear_ept_ad_pte(pml4, (u64) &pt[offset]);
865521820dbSPaolo Bonzini 		pte = pt[offset];
866521820dbSPaolo Bonzini 		if (l == 1 || (l < 4 && (pte & PT_PAGE_SIZE_MASK)))
867521820dbSPaolo Bonzini 			break;
868521820dbSPaolo Bonzini 		if (!(pte & PT_PRESENT_MASK))
869521820dbSPaolo Bonzini 			return;
870521820dbSPaolo Bonzini 		pt = (unsigned long *)(pte & PT_ADDR_MASK);
871521820dbSPaolo Bonzini 	}
872521820dbSPaolo Bonzini 
873521820dbSPaolo Bonzini 	offset = (guest_addr >> EPT_LEVEL_SHIFT(l)) & EPT_PGDIR_MASK;
874521820dbSPaolo Bonzini 	offset_in_page = guest_addr & ((1 << EPT_LEVEL_SHIFT(l)) - 1);
875521820dbSPaolo Bonzini 	gpa = (pt[offset] & PT_ADDR_MASK) | (guest_addr & offset_in_page);
876521820dbSPaolo Bonzini 	clear_ept_ad_pte(pml4, gpa);
877521820dbSPaolo Bonzini }
878521820dbSPaolo Bonzini 
879521820dbSPaolo Bonzini /* check_ept_ad : Check the content of EPT A/D bits for the page table
880521820dbSPaolo Bonzini    walk and the final GPA of a guest address.  */
881521820dbSPaolo Bonzini void check_ept_ad(unsigned long *pml4, u64 guest_cr3,
882521820dbSPaolo Bonzini 		  unsigned long guest_addr, int expected_gpa_ad,
883521820dbSPaolo Bonzini 		  int expected_pt_ad)
884521820dbSPaolo Bonzini {
885521820dbSPaolo Bonzini 	int l;
886521820dbSPaolo Bonzini 	unsigned long *pt = (unsigned long *)guest_cr3, gpa;
887521820dbSPaolo Bonzini 	u64 ept_pte, pte, offset_in_page;
888521820dbSPaolo Bonzini 	unsigned offset;
889521820dbSPaolo Bonzini 	bool bad_pt_ad = false;
890521820dbSPaolo Bonzini 
891521820dbSPaolo Bonzini 	for (l = EPT_PAGE_LEVEL; ; --l) {
892521820dbSPaolo Bonzini 		offset = (guest_addr >> EPT_LEVEL_SHIFT(l)) & EPT_PGDIR_MASK;
893521820dbSPaolo Bonzini 
894521820dbSPaolo Bonzini 		ept_pte = get_ept_pte(pml4, (u64) &pt[offset], 1);
895521820dbSPaolo Bonzini 		if (ept_pte == 0)
896521820dbSPaolo Bonzini 			return;
897521820dbSPaolo Bonzini 
898521820dbSPaolo Bonzini 		if (!bad_pt_ad) {
899521820dbSPaolo Bonzini 			bad_pt_ad |= (ept_pte & (EPT_ACCESS_FLAG|EPT_DIRTY_FLAG)) != expected_pt_ad;
900521820dbSPaolo Bonzini 			if (bad_pt_ad)
901521820dbSPaolo Bonzini 				report("EPT - guest level %d page table A=%d/D=%d",
902521820dbSPaolo Bonzini 				       false, l,
903521820dbSPaolo Bonzini 				       !!(expected_pt_ad & EPT_ACCESS_FLAG),
904521820dbSPaolo Bonzini 				       !!(expected_pt_ad & EPT_DIRTY_FLAG));
905521820dbSPaolo Bonzini 		}
906521820dbSPaolo Bonzini 
907521820dbSPaolo Bonzini 		pte = pt[offset];
908521820dbSPaolo Bonzini 		if (l == 1 || (l < 4 && (pte & PT_PAGE_SIZE_MASK)))
909521820dbSPaolo Bonzini 			break;
910521820dbSPaolo Bonzini 		if (!(pte & PT_PRESENT_MASK))
911521820dbSPaolo Bonzini 			return;
912521820dbSPaolo Bonzini 		pt = (unsigned long *)(pte & PT_ADDR_MASK);
913521820dbSPaolo Bonzini 	}
914521820dbSPaolo Bonzini 
915521820dbSPaolo Bonzini 	if (!bad_pt_ad)
916521820dbSPaolo Bonzini 		report("EPT - guest page table structures A=%d/D=%d",
917521820dbSPaolo Bonzini 		       true,
918521820dbSPaolo Bonzini 		       !!(expected_pt_ad & EPT_ACCESS_FLAG),
919521820dbSPaolo Bonzini 		       !!(expected_pt_ad & EPT_DIRTY_FLAG));
920521820dbSPaolo Bonzini 
921521820dbSPaolo Bonzini 	offset = (guest_addr >> EPT_LEVEL_SHIFT(l)) & EPT_PGDIR_MASK;
922521820dbSPaolo Bonzini 	offset_in_page = guest_addr & ((1 << EPT_LEVEL_SHIFT(l)) - 1);
923521820dbSPaolo Bonzini 	gpa = (pt[offset] & PT_ADDR_MASK) | (guest_addr & offset_in_page);
924521820dbSPaolo Bonzini 
925521820dbSPaolo Bonzini 	ept_pte = get_ept_pte(pml4, gpa, 1);
926521820dbSPaolo Bonzini 	report("EPT - guest physical address A=%d/D=%d",
927521820dbSPaolo Bonzini 	       (ept_pte & (EPT_ACCESS_FLAG|EPT_DIRTY_FLAG)) == expected_gpa_ad,
928521820dbSPaolo Bonzini 	       !!(expected_gpa_ad & EPT_ACCESS_FLAG),
929521820dbSPaolo Bonzini 	       !!(expected_gpa_ad & EPT_DIRTY_FLAG));
930521820dbSPaolo Bonzini }
931521820dbSPaolo Bonzini 
932521820dbSPaolo Bonzini 
9332f888fccSBandan Das void ept_sync(int type, u64 eptp)
9342f888fccSBandan Das {
9352f888fccSBandan Das 	switch (type) {
9362f888fccSBandan Das 	case INVEPT_SINGLE:
9372f888fccSBandan Das 		if (ept_vpid.val & EPT_CAP_INVEPT_SINGLE) {
9382f888fccSBandan Das 			invept(INVEPT_SINGLE, eptp);
9392f888fccSBandan Das 			break;
9402f888fccSBandan Das 		}
9412f888fccSBandan Das 		/* else fall through */
9422f888fccSBandan Das 	case INVEPT_GLOBAL:
9432f888fccSBandan Das 		if (ept_vpid.val & EPT_CAP_INVEPT_ALL) {
9442f888fccSBandan Das 			invept(INVEPT_GLOBAL, eptp);
9452f888fccSBandan Das 			break;
9462f888fccSBandan Das 		}
9472f888fccSBandan Das 		/* else fall through */
9482f888fccSBandan Das 	default:
9492f888fccSBandan Das 		printf("WARNING: invept is not supported!\n");
9502f888fccSBandan Das 	}
9512f888fccSBandan Das }
9522f888fccSBandan Das 
9536884af61SArthur Chunqi Li int set_ept_pte(unsigned long *pml4, unsigned long guest_addr,
9546884af61SArthur Chunqi Li 		int level, u64 pte_val)
9556884af61SArthur Chunqi Li {
9566884af61SArthur Chunqi Li 	int l;
9576884af61SArthur Chunqi Li 	unsigned long *pt = pml4;
9586884af61SArthur Chunqi Li 	unsigned offset;
9596884af61SArthur Chunqi Li 
9606884af61SArthur Chunqi Li 	if (level < 1 || level > 3)
9616884af61SArthur Chunqi Li 		return -1;
9622ca6f1f3SPaolo Bonzini 	for (l = EPT_PAGE_LEVEL; ; --l) {
963a969e087SPeter Feiner 		offset = (guest_addr >> EPT_LEVEL_SHIFT(l)) & EPT_PGDIR_MASK;
9642ca6f1f3SPaolo Bonzini 		if (l == level)
9652ca6f1f3SPaolo Bonzini 			break;
9666884af61SArthur Chunqi Li 		if (!(pt[offset] & (EPT_PRESENT)))
9676884af61SArthur Chunqi Li 			return -1;
96800b5c590SPeter Feiner 		pt = (unsigned long *)(pt[offset] & EPT_ADDR_MASK);
9696884af61SArthur Chunqi Li 	}
970a969e087SPeter Feiner 	offset = (guest_addr >> EPT_LEVEL_SHIFT(l)) & EPT_PGDIR_MASK;
9716884af61SArthur Chunqi Li 	pt[offset] = pte_val;
9726884af61SArthur Chunqi Li 	return 0;
9736884af61SArthur Chunqi Li }
9746884af61SArthur Chunqi Li 
975b093c6ceSWanpeng Li void vpid_sync(int type, u16 vpid)
976b093c6ceSWanpeng Li {
977b093c6ceSWanpeng Li 	switch(type) {
978b093c6ceSWanpeng Li 	case INVVPID_SINGLE:
979b093c6ceSWanpeng Li 		if (ept_vpid.val & VPID_CAP_INVVPID_SINGLE) {
980b093c6ceSWanpeng Li 			invvpid(INVVPID_SINGLE, vpid, 0);
981b093c6ceSWanpeng Li 			break;
982b093c6ceSWanpeng Li 		}
983b093c6ceSWanpeng Li 	case INVVPID_ALL:
984b093c6ceSWanpeng Li 		if (ept_vpid.val & VPID_CAP_INVVPID_ALL) {
985b093c6ceSWanpeng Li 			invvpid(INVVPID_ALL, vpid, 0);
986b093c6ceSWanpeng Li 			break;
987b093c6ceSWanpeng Li 		}
988b093c6ceSWanpeng Li 	default:
989b093c6ceSWanpeng Li 		printf("WARNING: invvpid is not supported\n");
990b093c6ceSWanpeng Li 	}
991b093c6ceSWanpeng Li }
9926884af61SArthur Chunqi Li 
9939d7eaa29SArthur Chunqi Li static void init_vmcs_ctrl(void)
9949d7eaa29SArthur Chunqi Li {
9959d7eaa29SArthur Chunqi Li 	/* 26.2 CHECKS ON VMX CONTROLS AND HOST-STATE AREA */
9969d7eaa29SArthur Chunqi Li 	/* 26.2.1.1 */
9979d7eaa29SArthur Chunqi Li 	vmcs_write(PIN_CONTROLS, ctrl_pin);
9989d7eaa29SArthur Chunqi Li 	/* Disable VMEXIT of IO instruction */
9999d7eaa29SArthur Chunqi Li 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu[0]);
10009d7eaa29SArthur Chunqi Li 	if (ctrl_cpu_rev[0].set & CPU_SECONDARY) {
10016884af61SArthur Chunqi Li 		ctrl_cpu[1] = (ctrl_cpu[1] | ctrl_cpu_rev[1].set) &
10026884af61SArthur Chunqi Li 			ctrl_cpu_rev[1].clr;
10039d7eaa29SArthur Chunqi Li 		vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu[1]);
10049d7eaa29SArthur Chunqi Li 	}
10059d7eaa29SArthur Chunqi Li 	vmcs_write(CR3_TARGET_COUNT, 0);
10069d7eaa29SArthur Chunqi Li 	vmcs_write(VPID, ++vpid_cnt);
10079d7eaa29SArthur Chunqi Li }
10089d7eaa29SArthur Chunqi Li 
10099d7eaa29SArthur Chunqi Li static void init_vmcs_host(void)
10109d7eaa29SArthur Chunqi Li {
10119d7eaa29SArthur Chunqi Li 	/* 26.2 CHECKS ON VMX CONTROLS AND HOST-STATE AREA */
10129d7eaa29SArthur Chunqi Li 	/* 26.2.1.2 */
10139d7eaa29SArthur Chunqi Li 	vmcs_write(HOST_EFER, rdmsr(MSR_EFER));
10149d7eaa29SArthur Chunqi Li 
10159d7eaa29SArthur Chunqi Li 	/* 26.2.1.3 */
10169d7eaa29SArthur Chunqi Li 	vmcs_write(ENT_CONTROLS, ctrl_enter);
10179d7eaa29SArthur Chunqi Li 	vmcs_write(EXI_CONTROLS, ctrl_exit);
10189d7eaa29SArthur Chunqi Li 
10199d7eaa29SArthur Chunqi Li 	/* 26.2.2 */
10209d7eaa29SArthur Chunqi Li 	vmcs_write(HOST_CR0, read_cr0());
10219d7eaa29SArthur Chunqi Li 	vmcs_write(HOST_CR3, read_cr3());
10229d7eaa29SArthur Chunqi Li 	vmcs_write(HOST_CR4, read_cr4());
10239d7eaa29SArthur Chunqi Li 	vmcs_write(HOST_SYSENTER_EIP, (u64)(&entry_sysenter));
102469d8fe0eSPaolo Bonzini 	vmcs_write(HOST_SYSENTER_CS,  KERNEL_CS);
10259d7eaa29SArthur Chunqi Li 
10269d7eaa29SArthur Chunqi Li 	/* 26.2.3 */
102769d8fe0eSPaolo Bonzini 	vmcs_write(HOST_SEL_CS, KERNEL_CS);
102869d8fe0eSPaolo Bonzini 	vmcs_write(HOST_SEL_SS, KERNEL_DS);
102969d8fe0eSPaolo Bonzini 	vmcs_write(HOST_SEL_DS, KERNEL_DS);
103069d8fe0eSPaolo Bonzini 	vmcs_write(HOST_SEL_ES, KERNEL_DS);
103169d8fe0eSPaolo Bonzini 	vmcs_write(HOST_SEL_FS, KERNEL_DS);
103269d8fe0eSPaolo Bonzini 	vmcs_write(HOST_SEL_GS, KERNEL_DS);
103369d8fe0eSPaolo Bonzini 	vmcs_write(HOST_SEL_TR, TSS_MAIN);
1034337166aaSJan Kiszka 	vmcs_write(HOST_BASE_TR, tss_descr.base);
1035337166aaSJan Kiszka 	vmcs_write(HOST_BASE_GDTR, gdt64_desc.base);
1036337166aaSJan Kiszka 	vmcs_write(HOST_BASE_IDTR, idt_descr.base);
10379d7eaa29SArthur Chunqi Li 	vmcs_write(HOST_BASE_FS, 0);
10389d7eaa29SArthur Chunqi Li 	vmcs_write(HOST_BASE_GS, 0);
10399d7eaa29SArthur Chunqi Li 
10409d7eaa29SArthur Chunqi Li 	/* Set other vmcs area */
10419d7eaa29SArthur Chunqi Li 	vmcs_write(PF_ERROR_MASK, 0);
10429d7eaa29SArthur Chunqi Li 	vmcs_write(PF_ERROR_MATCH, 0);
10439d7eaa29SArthur Chunqi Li 	vmcs_write(VMCS_LINK_PTR, ~0ul);
10449d7eaa29SArthur Chunqi Li 	vmcs_write(VMCS_LINK_PTR_HI, ~0ul);
10459d7eaa29SArthur Chunqi Li 	vmcs_write(HOST_RIP, (u64)(&vmx_return));
10469d7eaa29SArthur Chunqi Li }
10479d7eaa29SArthur Chunqi Li 
10489d7eaa29SArthur Chunqi Li static void init_vmcs_guest(void)
10499d7eaa29SArthur Chunqi Li {
10509d7eaa29SArthur Chunqi Li 	/* 26.3 CHECKING AND LOADING GUEST STATE */
10519d7eaa29SArthur Chunqi Li 	ulong guest_cr0, guest_cr4, guest_cr3;
10529d7eaa29SArthur Chunqi Li 	/* 26.3.1.1 */
10539d7eaa29SArthur Chunqi Li 	guest_cr0 = read_cr0();
10549d7eaa29SArthur Chunqi Li 	guest_cr4 = read_cr4();
10559d7eaa29SArthur Chunqi Li 	guest_cr3 = read_cr3();
10569d7eaa29SArthur Chunqi Li 	if (ctrl_enter & ENT_GUEST_64) {
10579d7eaa29SArthur Chunqi Li 		guest_cr0 |= X86_CR0_PG;
10589d7eaa29SArthur Chunqi Li 		guest_cr4 |= X86_CR4_PAE;
10599d7eaa29SArthur Chunqi Li 	}
10609d7eaa29SArthur Chunqi Li 	if ((ctrl_enter & ENT_GUEST_64) == 0)
10619d7eaa29SArthur Chunqi Li 		guest_cr4 &= (~X86_CR4_PCIDE);
10629d7eaa29SArthur Chunqi Li 	if (guest_cr0 & X86_CR0_PG)
10639d7eaa29SArthur Chunqi Li 		guest_cr0 |= X86_CR0_PE;
10649d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_CR0, guest_cr0);
10659d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_CR3, guest_cr3);
10669d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_CR4, guest_cr4);
106769d8fe0eSPaolo Bonzini 	vmcs_write(GUEST_SYSENTER_CS,  KERNEL_CS);
10689d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_SYSENTER_ESP,
10699d7eaa29SArthur Chunqi Li 		(u64)(guest_syscall_stack + PAGE_SIZE - 1));
10709d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_SYSENTER_EIP, (u64)(&entry_sysenter));
10719d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_DR7, 0);
10729d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_EFER, rdmsr(MSR_EFER));
10739d7eaa29SArthur Chunqi Li 
10749d7eaa29SArthur Chunqi Li 	/* 26.3.1.2 */
107569d8fe0eSPaolo Bonzini 	vmcs_write(GUEST_SEL_CS, KERNEL_CS);
107669d8fe0eSPaolo Bonzini 	vmcs_write(GUEST_SEL_SS, KERNEL_DS);
107769d8fe0eSPaolo Bonzini 	vmcs_write(GUEST_SEL_DS, KERNEL_DS);
107869d8fe0eSPaolo Bonzini 	vmcs_write(GUEST_SEL_ES, KERNEL_DS);
107969d8fe0eSPaolo Bonzini 	vmcs_write(GUEST_SEL_FS, KERNEL_DS);
108069d8fe0eSPaolo Bonzini 	vmcs_write(GUEST_SEL_GS, KERNEL_DS);
108169d8fe0eSPaolo Bonzini 	vmcs_write(GUEST_SEL_TR, TSS_MAIN);
10829d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_SEL_LDTR, 0);
10839d7eaa29SArthur Chunqi Li 
10849d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_BASE_CS, 0);
10859d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_BASE_ES, 0);
10869d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_BASE_SS, 0);
10879d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_BASE_DS, 0);
10889d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_BASE_FS, 0);
10899d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_BASE_GS, 0);
1090337166aaSJan Kiszka 	vmcs_write(GUEST_BASE_TR, tss_descr.base);
10919d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_BASE_LDTR, 0);
10929d7eaa29SArthur Chunqi Li 
10939d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_LIMIT_CS, 0xFFFFFFFF);
10949d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_LIMIT_DS, 0xFFFFFFFF);
10959d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_LIMIT_ES, 0xFFFFFFFF);
10969d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_LIMIT_SS, 0xFFFFFFFF);
10979d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_LIMIT_FS, 0xFFFFFFFF);
10989d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_LIMIT_GS, 0xFFFFFFFF);
10999d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_LIMIT_LDTR, 0xffff);
1100337166aaSJan Kiszka 	vmcs_write(GUEST_LIMIT_TR, tss_descr.limit);
11019d7eaa29SArthur Chunqi Li 
11029d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_AR_CS, 0xa09b);
11039d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_AR_DS, 0xc093);
11049d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_AR_ES, 0xc093);
11059d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_AR_FS, 0xc093);
11069d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_AR_GS, 0xc093);
11079d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_AR_SS, 0xc093);
11089d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_AR_LDTR, 0x82);
11099d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_AR_TR, 0x8b);
11109d7eaa29SArthur Chunqi Li 
11119d7eaa29SArthur Chunqi Li 	/* 26.3.1.3 */
1112337166aaSJan Kiszka 	vmcs_write(GUEST_BASE_GDTR, gdt64_desc.base);
1113337166aaSJan Kiszka 	vmcs_write(GUEST_BASE_IDTR, idt_descr.base);
1114337166aaSJan Kiszka 	vmcs_write(GUEST_LIMIT_GDTR, gdt64_desc.limit);
1115337166aaSJan Kiszka 	vmcs_write(GUEST_LIMIT_IDTR, idt_descr.limit);
11169d7eaa29SArthur Chunqi Li 
11179d7eaa29SArthur Chunqi Li 	/* 26.3.1.4 */
11189d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_RIP, (u64)(&guest_entry));
11199d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_RSP, (u64)(guest_stack + PAGE_SIZE - 1));
11209d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_RFLAGS, 0x2);
11219d7eaa29SArthur Chunqi Li 
11229d7eaa29SArthur Chunqi Li 	/* 26.3.1.5 */
112317ba0dd0SJan Kiszka 	vmcs_write(GUEST_ACTV_STATE, ACTV_ACTIVE);
11249d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_INTR_STATE, 0);
11259d7eaa29SArthur Chunqi Li }
11269d7eaa29SArthur Chunqi Li 
11279d7eaa29SArthur Chunqi Li static int init_vmcs(struct vmcs **vmcs)
11289d7eaa29SArthur Chunqi Li {
11299d7eaa29SArthur Chunqi Li 	*vmcs = alloc_page();
11309d7eaa29SArthur Chunqi Li 	memset(*vmcs, 0, PAGE_SIZE);
11319d7eaa29SArthur Chunqi Li 	(*vmcs)->revision_id = basic.revision;
11329d7eaa29SArthur Chunqi Li 	/* vmclear first to init vmcs */
11339d7eaa29SArthur Chunqi Li 	if (vmcs_clear(*vmcs)) {
11349d7eaa29SArthur Chunqi Li 		printf("%s : vmcs_clear error\n", __func__);
11359d7eaa29SArthur Chunqi Li 		return 1;
11369d7eaa29SArthur Chunqi Li 	}
11379d7eaa29SArthur Chunqi Li 
11389d7eaa29SArthur Chunqi Li 	if (make_vmcs_current(*vmcs)) {
11399d7eaa29SArthur Chunqi Li 		printf("%s : make_vmcs_current error\n", __func__);
11409d7eaa29SArthur Chunqi Li 		return 1;
11419d7eaa29SArthur Chunqi Li 	}
11429d7eaa29SArthur Chunqi Li 
11439d7eaa29SArthur Chunqi Li 	/* All settings to pin/exit/enter/cpu
11449d7eaa29SArthur Chunqi Li 	   control fields should be placed here */
11459d7eaa29SArthur Chunqi Li 	ctrl_pin |= PIN_EXTINT | PIN_NMI | PIN_VIRT_NMI;
11469d7eaa29SArthur Chunqi Li 	ctrl_exit = EXI_LOAD_EFER | EXI_HOST_64;
11479d7eaa29SArthur Chunqi Li 	ctrl_enter = (ENT_LOAD_EFER | ENT_GUEST_64);
11489d7eaa29SArthur Chunqi Li 	/* DIsable IO instruction VMEXIT now */
11499d7eaa29SArthur Chunqi Li 	ctrl_cpu[0] &= (~(CPU_IO | CPU_IO_BITMAP));
11509d7eaa29SArthur Chunqi Li 	ctrl_cpu[1] = 0;
11519d7eaa29SArthur Chunqi Li 
11529d7eaa29SArthur Chunqi Li 	ctrl_pin = (ctrl_pin | ctrl_pin_rev.set) & ctrl_pin_rev.clr;
11539d7eaa29SArthur Chunqi Li 	ctrl_enter = (ctrl_enter | ctrl_enter_rev.set) & ctrl_enter_rev.clr;
11549d7eaa29SArthur Chunqi Li 	ctrl_exit = (ctrl_exit | ctrl_exit_rev.set) & ctrl_exit_rev.clr;
11559d7eaa29SArthur Chunqi Li 	ctrl_cpu[0] = (ctrl_cpu[0] | ctrl_cpu_rev[0].set) & ctrl_cpu_rev[0].clr;
11569d7eaa29SArthur Chunqi Li 
11579d7eaa29SArthur Chunqi Li 	init_vmcs_ctrl();
11589d7eaa29SArthur Chunqi Li 	init_vmcs_host();
11599d7eaa29SArthur Chunqi Li 	init_vmcs_guest();
11609d7eaa29SArthur Chunqi Li 	return 0;
11619d7eaa29SArthur Chunqi Li }
11629d7eaa29SArthur Chunqi Li 
11639d7eaa29SArthur Chunqi Li static void init_vmx(void)
11649d7eaa29SArthur Chunqi Li {
11653ee34093SArthur Chunqi Li 	ulong fix_cr0_set, fix_cr0_clr;
11663ee34093SArthur Chunqi Li 	ulong fix_cr4_set, fix_cr4_clr;
11673ee34093SArthur Chunqi Li 
11689d7eaa29SArthur Chunqi Li 	vmxon_region = alloc_page();
11699d7eaa29SArthur Chunqi Li 	memset(vmxon_region, 0, PAGE_SIZE);
11709d7eaa29SArthur Chunqi Li 
11719d7eaa29SArthur Chunqi Li 	fix_cr0_set =  rdmsr(MSR_IA32_VMX_CR0_FIXED0);
11729d7eaa29SArthur Chunqi Li 	fix_cr0_clr =  rdmsr(MSR_IA32_VMX_CR0_FIXED1);
11739d7eaa29SArthur Chunqi Li 	fix_cr4_set =  rdmsr(MSR_IA32_VMX_CR4_FIXED0);
11749d7eaa29SArthur Chunqi Li 	fix_cr4_clr = rdmsr(MSR_IA32_VMX_CR4_FIXED1);
11759d7eaa29SArthur Chunqi Li 	basic.val = rdmsr(MSR_IA32_VMX_BASIC);
11769d7eaa29SArthur Chunqi Li 	ctrl_pin_rev.val = rdmsr(basic.ctrl ? MSR_IA32_VMX_TRUE_PIN
11779d7eaa29SArthur Chunqi Li 			: MSR_IA32_VMX_PINBASED_CTLS);
11789d7eaa29SArthur Chunqi Li 	ctrl_exit_rev.val = rdmsr(basic.ctrl ? MSR_IA32_VMX_TRUE_EXIT
11799d7eaa29SArthur Chunqi Li 			: MSR_IA32_VMX_EXIT_CTLS);
11809d7eaa29SArthur Chunqi Li 	ctrl_enter_rev.val = rdmsr(basic.ctrl ? MSR_IA32_VMX_TRUE_ENTRY
11819d7eaa29SArthur Chunqi Li 			: MSR_IA32_VMX_ENTRY_CTLS);
11829d7eaa29SArthur Chunqi Li 	ctrl_cpu_rev[0].val = rdmsr(basic.ctrl ? MSR_IA32_VMX_TRUE_PROC
11839d7eaa29SArthur Chunqi Li 			: MSR_IA32_VMX_PROCBASED_CTLS);
11846884af61SArthur Chunqi Li 	if ((ctrl_cpu_rev[0].clr & CPU_SECONDARY) != 0)
11859d7eaa29SArthur Chunqi Li 		ctrl_cpu_rev[1].val = rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2);
11866884af61SArthur Chunqi Li 	else
11876884af61SArthur Chunqi Li 		ctrl_cpu_rev[1].val = 0;
11886884af61SArthur Chunqi Li 	if ((ctrl_cpu_rev[1].clr & (CPU_EPT | CPU_VPID)) != 0)
11899d7eaa29SArthur Chunqi Li 		ept_vpid.val = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP);
11906884af61SArthur Chunqi Li 	else
11916884af61SArthur Chunqi Li 		ept_vpid.val = 0;
11929d7eaa29SArthur Chunqi Li 
11939d7eaa29SArthur Chunqi Li 	write_cr0((read_cr0() & fix_cr0_clr) | fix_cr0_set);
11949d7eaa29SArthur Chunqi Li 	write_cr4((read_cr4() & fix_cr4_clr) | fix_cr4_set | X86_CR4_VMXE);
11959d7eaa29SArthur Chunqi Li 
11969d7eaa29SArthur Chunqi Li 	*vmxon_region = basic.revision;
11979d7eaa29SArthur Chunqi Li 
11989d7eaa29SArthur Chunqi Li 	guest_stack = alloc_page();
11999d7eaa29SArthur Chunqi Li 	memset(guest_stack, 0, PAGE_SIZE);
12009d7eaa29SArthur Chunqi Li 	guest_syscall_stack = alloc_page();
12019d7eaa29SArthur Chunqi Li 	memset(guest_syscall_stack, 0, PAGE_SIZE);
12029d7eaa29SArthur Chunqi Li }
12039d7eaa29SArthur Chunqi Li 
1204e3f363c4SJan Kiszka static void do_vmxon_off(void *data)
12059d7eaa29SArthur Chunqi Li {
12063b127446SJan Kiszka 	vmx_on();
12073b127446SJan Kiszka 	vmx_off();
120803f37ef2SPaolo Bonzini }
12093b127446SJan Kiszka 
1210e3f363c4SJan Kiszka static void do_write_feature_control(void *data)
12113b127446SJan Kiszka {
12123b127446SJan Kiszka 	wrmsr(MSR_IA32_FEATURE_CONTROL, 0);
121303f37ef2SPaolo Bonzini }
12143b127446SJan Kiszka 
12153b127446SJan Kiszka static int test_vmx_feature_control(void)
12163b127446SJan Kiszka {
12173b127446SJan Kiszka 	u64 ia32_feature_control;
12183b127446SJan Kiszka 	bool vmx_enabled;
12193b127446SJan Kiszka 
12203b127446SJan Kiszka 	ia32_feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
12213b127446SJan Kiszka 	vmx_enabled = ((ia32_feature_control & 0x5) == 0x5);
12223b127446SJan Kiszka 	if ((ia32_feature_control & 0x5) == 0x5) {
12233b127446SJan Kiszka 		printf("VMX enabled and locked by BIOS\n");
12243b127446SJan Kiszka 		return 0;
12253b127446SJan Kiszka 	} else if (ia32_feature_control & 0x1) {
12263b127446SJan Kiszka 		printf("ERROR: VMX locked out by BIOS!?\n");
12273b127446SJan Kiszka 		return 1;
12283b127446SJan Kiszka 	}
12293b127446SJan Kiszka 
12303b127446SJan Kiszka 	wrmsr(MSR_IA32_FEATURE_CONTROL, 0);
12313b127446SJan Kiszka 	report("test vmxon with FEATURE_CONTROL cleared",
1232e3f363c4SJan Kiszka 	       test_for_exception(GP_VECTOR, &do_vmxon_off, NULL));
12333b127446SJan Kiszka 
12343b127446SJan Kiszka 	wrmsr(MSR_IA32_FEATURE_CONTROL, 0x4);
12353b127446SJan Kiszka 	report("test vmxon without FEATURE_CONTROL lock",
1236e3f363c4SJan Kiszka 	       test_for_exception(GP_VECTOR, &do_vmxon_off, NULL));
12373b127446SJan Kiszka 
12383b127446SJan Kiszka 	wrmsr(MSR_IA32_FEATURE_CONTROL, 0x5);
12393b127446SJan Kiszka 	vmx_enabled = ((rdmsr(MSR_IA32_FEATURE_CONTROL) & 0x5) == 0x5);
12403b127446SJan Kiszka 	report("test enable VMX in FEATURE_CONTROL", vmx_enabled);
12413b127446SJan Kiszka 
12423b127446SJan Kiszka 	report("test FEATURE_CONTROL lock bit",
1243e3f363c4SJan Kiszka 	       test_for_exception(GP_VECTOR, &do_write_feature_control, NULL));
12443b127446SJan Kiszka 
12453b127446SJan Kiszka 	return !vmx_enabled;
12469d7eaa29SArthur Chunqi Li }
12479d7eaa29SArthur Chunqi Li 
12489d7eaa29SArthur Chunqi Li static int test_vmxon(void)
12499d7eaa29SArthur Chunqi Li {
1250ce21d809SBandan Das 	int ret, ret1;
1251ce21d809SBandan Das 	u64 *tmp_region = vmxon_region;
1252e2cf1c9dSEduardo Habkost 	int width = cpuid_maxphyaddr();
12539d7eaa29SArthur Chunqi Li 
1254ce21d809SBandan Das 	/* Unaligned page access */
1255ce21d809SBandan Das 	vmxon_region = (u64 *)((intptr_t)vmxon_region + 1);
1256ce21d809SBandan Das 	ret1 = vmx_on();
1257ce21d809SBandan Das 	report("test vmxon with unaligned vmxon region", ret1);
1258ce21d809SBandan Das 	if (!ret1) {
1259ce21d809SBandan Das 		ret = 1;
1260ce21d809SBandan Das 		goto out;
1261ce21d809SBandan Das 	}
1262ce21d809SBandan Das 
1263ce21d809SBandan Das 	/* gpa bits beyond physical address width are set*/
1264ce21d809SBandan Das 	vmxon_region = (u64 *)((intptr_t)tmp_region | ((u64)1 << (width+1)));
1265ce21d809SBandan Das 	ret1 = vmx_on();
1266ce21d809SBandan Das 	report("test vmxon with bits set beyond physical address width", ret1);
1267ce21d809SBandan Das 	if (!ret1) {
1268ce21d809SBandan Das 		ret = 1;
1269ce21d809SBandan Das 		goto out;
1270ce21d809SBandan Das 	}
1271ce21d809SBandan Das 
1272ce21d809SBandan Das 	/* invalid revision indentifier */
1273ce21d809SBandan Das 	vmxon_region = tmp_region;
1274ce21d809SBandan Das 	*vmxon_region = 0xba9da9;
1275ce21d809SBandan Das 	ret1 = vmx_on();
1276ce21d809SBandan Das 	report("test vmxon with invalid revision identifier", ret1);
1277ce21d809SBandan Das 	if (!ret1) {
1278ce21d809SBandan Das 		ret = 1;
1279ce21d809SBandan Das 		goto out;
1280ce21d809SBandan Das 	}
1281ce21d809SBandan Das 
1282ce21d809SBandan Das 	/* and finally a valid region */
1283ce21d809SBandan Das 	*vmxon_region = basic.revision;
12849d7eaa29SArthur Chunqi Li 	ret = vmx_on();
1285ce21d809SBandan Das 	report("test vmxon with valid vmxon region", !ret);
1286ce21d809SBandan Das 
1287ce21d809SBandan Das out:
12889d7eaa29SArthur Chunqi Li 	return ret;
12899d7eaa29SArthur Chunqi Li }
12909d7eaa29SArthur Chunqi Li 
12919d7eaa29SArthur Chunqi Li static void test_vmptrld(void)
12929d7eaa29SArthur Chunqi Li {
1293daeec979SBandan Das 	struct vmcs *vmcs, *tmp_root;
1294e2cf1c9dSEduardo Habkost 	int width = cpuid_maxphyaddr();
12959d7eaa29SArthur Chunqi Li 
12969d7eaa29SArthur Chunqi Li 	vmcs = alloc_page();
12979d7eaa29SArthur Chunqi Li 	vmcs->revision_id = basic.revision;
1298daeec979SBandan Das 
1299daeec979SBandan Das 	/* Unaligned page access */
1300daeec979SBandan Das 	tmp_root = (struct vmcs *)((intptr_t)vmcs + 1);
1301daeec979SBandan Das 	report("test vmptrld with unaligned vmcs",
13029c305952SPaolo Bonzini 	       make_vmcs_current(tmp_root) == 1);
1303daeec979SBandan Das 
1304daeec979SBandan Das 	/* gpa bits beyond physical address width are set*/
1305daeec979SBandan Das 	tmp_root = (struct vmcs *)((intptr_t)vmcs |
1306daeec979SBandan Das 				   ((u64)1 << (width+1)));
1307daeec979SBandan Das 	report("test vmptrld with vmcs address bits set beyond physical address width",
13089c305952SPaolo Bonzini 	       make_vmcs_current(tmp_root) == 1);
1309daeec979SBandan Das 
1310daeec979SBandan Das 	/* Pass VMXON region */
1311799a84f8SGanShun 	make_vmcs_current(vmcs);
1312daeec979SBandan Das 	tmp_root = (struct vmcs *)vmxon_region;
1313daeec979SBandan Das 	report("test vmptrld with vmxon region",
13149c305952SPaolo Bonzini 	       make_vmcs_current(tmp_root) == 1);
1315799a84f8SGanShun 	report("test vmptrld with vmxon region vm-instruction error",
1316799a84f8SGanShun 	       vmcs_read(VMX_INST_ERROR) == VMXERR_VMPTRLD_VMXON_POINTER);
1317daeec979SBandan Das 
1318daeec979SBandan Das 	report("test vmptrld with valid vmcs region", make_vmcs_current(vmcs) == 0);
13199d7eaa29SArthur Chunqi Li }
13209d7eaa29SArthur Chunqi Li 
13219d7eaa29SArthur Chunqi Li static void test_vmptrst(void)
13229d7eaa29SArthur Chunqi Li {
13239d7eaa29SArthur Chunqi Li 	int ret;
13249d7eaa29SArthur Chunqi Li 	struct vmcs *vmcs1, *vmcs2;
13259d7eaa29SArthur Chunqi Li 
13269d7eaa29SArthur Chunqi Li 	vmcs1 = alloc_page();
13279d7eaa29SArthur Chunqi Li 	memset(vmcs1, 0, PAGE_SIZE);
13289d7eaa29SArthur Chunqi Li 	init_vmcs(&vmcs1);
13299d7eaa29SArthur Chunqi Li 	ret = vmcs_save(&vmcs2);
13309d7eaa29SArthur Chunqi Li 	report("test vmptrst", (!ret) && (vmcs1 == vmcs2));
13319d7eaa29SArthur Chunqi Li }
13329d7eaa29SArthur Chunqi Li 
133369c8d31cSJan Kiszka struct vmx_ctl_msr {
133469c8d31cSJan Kiszka 	const char *name;
133569c8d31cSJan Kiszka 	u32 index, true_index;
133669c8d31cSJan Kiszka 	u32 default1;
133769c8d31cSJan Kiszka } vmx_ctl_msr[] = {
133869c8d31cSJan Kiszka 	{ "MSR_IA32_VMX_PINBASED_CTLS", MSR_IA32_VMX_PINBASED_CTLS,
133969c8d31cSJan Kiszka 	  MSR_IA32_VMX_TRUE_PIN, 0x16 },
134069c8d31cSJan Kiszka 	{ "MSR_IA32_VMX_PROCBASED_CTLS", MSR_IA32_VMX_PROCBASED_CTLS,
134169c8d31cSJan Kiszka 	  MSR_IA32_VMX_TRUE_PROC, 0x401e172 },
134269c8d31cSJan Kiszka 	{ "MSR_IA32_VMX_PROCBASED_CTLS2", MSR_IA32_VMX_PROCBASED_CTLS2,
134369c8d31cSJan Kiszka 	  MSR_IA32_VMX_PROCBASED_CTLS2, 0 },
134469c8d31cSJan Kiszka 	{ "MSR_IA32_VMX_EXIT_CTLS", MSR_IA32_VMX_EXIT_CTLS,
134569c8d31cSJan Kiszka 	  MSR_IA32_VMX_TRUE_EXIT, 0x36dff },
134669c8d31cSJan Kiszka 	{ "MSR_IA32_VMX_ENTRY_CTLS", MSR_IA32_VMX_ENTRY_CTLS,
134769c8d31cSJan Kiszka 	  MSR_IA32_VMX_TRUE_ENTRY, 0x11ff },
134869c8d31cSJan Kiszka };
134969c8d31cSJan Kiszka 
135069c8d31cSJan Kiszka static void test_vmx_caps(void)
135169c8d31cSJan Kiszka {
135269c8d31cSJan Kiszka 	u64 val, default1, fixed0, fixed1;
135369c8d31cSJan Kiszka 	union vmx_ctrl_msr ctrl, true_ctrl;
135469c8d31cSJan Kiszka 	unsigned int n;
135569c8d31cSJan Kiszka 	bool ok;
135669c8d31cSJan Kiszka 
135769c8d31cSJan Kiszka 	printf("\nTest suite: VMX capability reporting\n");
135869c8d31cSJan Kiszka 
135969c8d31cSJan Kiszka 	report("MSR_IA32_VMX_BASIC",
136069c8d31cSJan Kiszka 	       (basic.revision & (1ul << 31)) == 0 &&
136169c8d31cSJan Kiszka 	       basic.size > 0 && basic.size <= 4096 &&
136269c8d31cSJan Kiszka 	       (basic.type == 0 || basic.type == 6) &&
136369c8d31cSJan Kiszka 	       basic.reserved1 == 0 && basic.reserved2 == 0);
136469c8d31cSJan Kiszka 
136569c8d31cSJan Kiszka 	val = rdmsr(MSR_IA32_VMX_MISC);
136669c8d31cSJan Kiszka 	report("MSR_IA32_VMX_MISC",
136769c8d31cSJan Kiszka 	       (!(ctrl_cpu_rev[1].clr & CPU_URG) || val & (1ul << 5)) &&
136869c8d31cSJan Kiszka 	       ((val >> 16) & 0x1ff) <= 256 &&
136969c8d31cSJan Kiszka 	       (val & 0xc0007e00) == 0);
137069c8d31cSJan Kiszka 
137169c8d31cSJan Kiszka 	for (n = 0; n < ARRAY_SIZE(vmx_ctl_msr); n++) {
137269c8d31cSJan Kiszka 		ctrl.val = rdmsr(vmx_ctl_msr[n].index);
137369c8d31cSJan Kiszka 		default1 = vmx_ctl_msr[n].default1;
137469c8d31cSJan Kiszka 		ok = (ctrl.set & default1) == default1;
137569c8d31cSJan Kiszka 		ok = ok && (ctrl.set & ~ctrl.clr) == 0;
137669c8d31cSJan Kiszka 		if (ok && basic.ctrl) {
137769c8d31cSJan Kiszka 			true_ctrl.val = rdmsr(vmx_ctl_msr[n].true_index);
137869c8d31cSJan Kiszka 			ok = ctrl.clr == true_ctrl.clr;
137969c8d31cSJan Kiszka 			ok = ok && ctrl.set == (true_ctrl.set | default1);
138069c8d31cSJan Kiszka 		}
138169c8d31cSJan Kiszka 		report(vmx_ctl_msr[n].name, ok);
138269c8d31cSJan Kiszka 	}
138369c8d31cSJan Kiszka 
138469c8d31cSJan Kiszka 	fixed0 = rdmsr(MSR_IA32_VMX_CR0_FIXED0);
138569c8d31cSJan Kiszka 	fixed1 = rdmsr(MSR_IA32_VMX_CR0_FIXED1);
138669c8d31cSJan Kiszka 	report("MSR_IA32_VMX_IA32_VMX_CR0_FIXED0/1",
138769c8d31cSJan Kiszka 	       ((fixed0 ^ fixed1) & ~fixed1) == 0);
138869c8d31cSJan Kiszka 
138969c8d31cSJan Kiszka 	fixed0 = rdmsr(MSR_IA32_VMX_CR4_FIXED0);
139069c8d31cSJan Kiszka 	fixed1 = rdmsr(MSR_IA32_VMX_CR4_FIXED1);
139169c8d31cSJan Kiszka 	report("MSR_IA32_VMX_IA32_VMX_CR4_FIXED0/1",
139269c8d31cSJan Kiszka 	       ((fixed0 ^ fixed1) & ~fixed1) == 0);
139369c8d31cSJan Kiszka 
139469c8d31cSJan Kiszka 	val = rdmsr(MSR_IA32_VMX_VMCS_ENUM);
139569c8d31cSJan Kiszka 	report("MSR_IA32_VMX_VMCS_ENUM",
139669c8d31cSJan Kiszka 	       (val & 0x3e) >= 0x2a &&
139769c8d31cSJan Kiszka 	       (val & 0xfffffffffffffc01Ull) == 0);
139869c8d31cSJan Kiszka 
139969c8d31cSJan Kiszka 	val = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP);
140069c8d31cSJan Kiszka 	report("MSR_IA32_VMX_EPT_VPID_CAP",
1401625f52abSPaolo Bonzini 	       (val & 0xfffff07ef98cbebeUll) == 0);
140269c8d31cSJan Kiszka }
140369c8d31cSJan Kiszka 
14049d7eaa29SArthur Chunqi Li /* This function can only be called in guest */
14059d7eaa29SArthur Chunqi Li static void __attribute__((__used__)) hypercall(u32 hypercall_no)
14069d7eaa29SArthur Chunqi Li {
14079d7eaa29SArthur Chunqi Li 	u64 val = 0;
14089d7eaa29SArthur Chunqi Li 	val = (hypercall_no & HYPERCALL_MASK) | HYPERCALL_BIT;
14099d7eaa29SArthur Chunqi Li 	hypercall_field = val;
14109d7eaa29SArthur Chunqi Li 	asm volatile("vmcall\n\t");
14119d7eaa29SArthur Chunqi Li }
14129d7eaa29SArthur Chunqi Li 
14139d7eaa29SArthur Chunqi Li static bool is_hypercall()
14149d7eaa29SArthur Chunqi Li {
14159d7eaa29SArthur Chunqi Li 	ulong reason, hyper_bit;
14169d7eaa29SArthur Chunqi Li 
14179d7eaa29SArthur Chunqi Li 	reason = vmcs_read(EXI_REASON) & 0xff;
14189d7eaa29SArthur Chunqi Li 	hyper_bit = hypercall_field & HYPERCALL_BIT;
14199d7eaa29SArthur Chunqi Li 	if (reason == VMX_VMCALL && hyper_bit)
14209d7eaa29SArthur Chunqi Li 		return true;
14219d7eaa29SArthur Chunqi Li 	return false;
14229d7eaa29SArthur Chunqi Li }
14239d7eaa29SArthur Chunqi Li 
14249d7eaa29SArthur Chunqi Li static int handle_hypercall()
14259d7eaa29SArthur Chunqi Li {
14269d7eaa29SArthur Chunqi Li 	ulong hypercall_no;
14279d7eaa29SArthur Chunqi Li 
14289d7eaa29SArthur Chunqi Li 	hypercall_no = hypercall_field & HYPERCALL_MASK;
14299d7eaa29SArthur Chunqi Li 	hypercall_field = 0;
14309d7eaa29SArthur Chunqi Li 	switch (hypercall_no) {
14319d7eaa29SArthur Chunqi Li 	case HYPERCALL_VMEXIT:
14329d7eaa29SArthur Chunqi Li 		return VMX_TEST_VMEXIT;
1433*794c67a9SPeter Feiner 	case HYPERCALL_VMABORT:
1434*794c67a9SPeter Feiner 		return VMX_TEST_VMABORT;
1435*794c67a9SPeter Feiner 	case HYPERCALL_VMSKIP:
1436*794c67a9SPeter Feiner 		return VMX_TEST_VMSKIP;
14379d7eaa29SArthur Chunqi Li 	default:
1438b006d7ebSAndrew Jones 		printf("ERROR : Invalid hypercall number : %ld\n", hypercall_no);
14399d7eaa29SArthur Chunqi Li 	}
14409d7eaa29SArthur Chunqi Li 	return VMX_TEST_EXIT;
14419d7eaa29SArthur Chunqi Li }
14429d7eaa29SArthur Chunqi Li 
1443*794c67a9SPeter Feiner static void continue_abort(void)
1444*794c67a9SPeter Feiner {
1445*794c67a9SPeter Feiner 	assert(!in_guest);
1446*794c67a9SPeter Feiner 	printf("Host was here when guest aborted:\n");
1447*794c67a9SPeter Feiner 	dump_stack();
1448*794c67a9SPeter Feiner 	longjmp(abort_target, 1);
1449*794c67a9SPeter Feiner 	abort();
1450*794c67a9SPeter Feiner }
1451*794c67a9SPeter Feiner 
1452*794c67a9SPeter Feiner void __abort_test(void)
1453*794c67a9SPeter Feiner {
1454*794c67a9SPeter Feiner 	if (in_guest)
1455*794c67a9SPeter Feiner 		hypercall(HYPERCALL_VMABORT);
1456*794c67a9SPeter Feiner 	else
1457*794c67a9SPeter Feiner 		longjmp(abort_target, 1);
1458*794c67a9SPeter Feiner 	abort();
1459*794c67a9SPeter Feiner }
1460*794c67a9SPeter Feiner 
1461*794c67a9SPeter Feiner static void continue_skip(void)
1462*794c67a9SPeter Feiner {
1463*794c67a9SPeter Feiner 	assert(!in_guest);
1464*794c67a9SPeter Feiner 	longjmp(abort_target, 1);
1465*794c67a9SPeter Feiner 	abort();
1466*794c67a9SPeter Feiner }
1467*794c67a9SPeter Feiner 
1468*794c67a9SPeter Feiner void test_skip(const char *msg)
1469*794c67a9SPeter Feiner {
1470*794c67a9SPeter Feiner 	printf("%s skipping test: %s\n", in_guest ? "Guest" : "Host", msg);
1471*794c67a9SPeter Feiner 	if (in_guest)
1472*794c67a9SPeter Feiner 		hypercall(HYPERCALL_VMABORT);
1473*794c67a9SPeter Feiner 	else
1474*794c67a9SPeter Feiner 		longjmp(abort_target, 1);
1475*794c67a9SPeter Feiner 	abort();
1476*794c67a9SPeter Feiner }
1477*794c67a9SPeter Feiner 
14789d7eaa29SArthur Chunqi Li static int exit_handler()
14799d7eaa29SArthur Chunqi Li {
14809d7eaa29SArthur Chunqi Li 	int ret;
14819d7eaa29SArthur Chunqi Li 
14829d7eaa29SArthur Chunqi Li 	current->exits++;
14831d9284d0SArthur Chunqi Li 	regs.rflags = vmcs_read(GUEST_RFLAGS);
14849d7eaa29SArthur Chunqi Li 	if (is_hypercall())
14859d7eaa29SArthur Chunqi Li 		ret = handle_hypercall();
14869d7eaa29SArthur Chunqi Li 	else
14879d7eaa29SArthur Chunqi Li 		ret = current->exit_handler();
14881d9284d0SArthur Chunqi Li 	vmcs_write(GUEST_RFLAGS, regs.rflags);
14893b50efe3SPeter Feiner 
14909d7eaa29SArthur Chunqi Li 	return ret;
14919d7eaa29SArthur Chunqi Li }
14923b50efe3SPeter Feiner 
14933b50efe3SPeter Feiner /*
14943b50efe3SPeter Feiner  * Called if vmlaunch or vmresume fails.
14953b50efe3SPeter Feiner  *	@early    - failure due to "VMX controls and host-state area" (26.2)
14963b50efe3SPeter Feiner  *	@vmlaunch - was this a vmlaunch or vmresume
14973b50efe3SPeter Feiner  *	@rflags   - host rflags
14983b50efe3SPeter Feiner  */
14993b50efe3SPeter Feiner static int
15003b50efe3SPeter Feiner entry_failure_handler(struct vmentry_failure *failure)
15013b50efe3SPeter Feiner {
15023b50efe3SPeter Feiner 	if (current->entry_failure_handler)
15033b50efe3SPeter Feiner 		return current->entry_failure_handler(failure);
15043b50efe3SPeter Feiner 	else
15053b50efe3SPeter Feiner 		return VMX_TEST_EXIT;
15069d7eaa29SArthur Chunqi Li }
15079d7eaa29SArthur Chunqi Li 
1508c76ddf06SPeter Feiner /*
1509c76ddf06SPeter Feiner  * Tries to enter the guest. Returns true iff entry succeeded. Otherwise,
1510c76ddf06SPeter Feiner  * populates @failure.
1511c76ddf06SPeter Feiner  */
1512c76ddf06SPeter Feiner static bool vmx_enter_guest(struct vmentry_failure *failure)
15139d7eaa29SArthur Chunqi Li {
1514c76ddf06SPeter Feiner 	failure->early = 0;
15154e809db5SPeter Feiner 
1516*794c67a9SPeter Feiner 	in_guest = 1;
15179d7eaa29SArthur Chunqi Li 	asm volatile (
1518897d8365SPeter Feiner 		"mov %[HOST_RSP], %%rdi\n\t"
1519897d8365SPeter Feiner 		"vmwrite %%rsp, %%rdi\n\t"
15209d7eaa29SArthur Chunqi Li 		LOAD_GPR_C
152144417388SPaolo Bonzini 		"cmpb $0, %[launched]\n\t"
15229d7eaa29SArthur Chunqi Li 		"jne 1f\n\t"
15239d7eaa29SArthur Chunqi Li 		"vmlaunch\n\t"
15249d7eaa29SArthur Chunqi Li 		"jmp 2f\n\t"
15259d7eaa29SArthur Chunqi Li 		"1: "
15269d7eaa29SArthur Chunqi Li 		"vmresume\n\t"
15279d7eaa29SArthur Chunqi Li 		"2: "
1528f37cf4e2SPeter Feiner 		SAVE_GPR_C
1529897d8365SPeter Feiner 		"pushf\n\t"
1530897d8365SPeter Feiner 		"pop %%rdi\n\t"
1531c76ddf06SPeter Feiner 		"mov %%rdi, %[failure_flags]\n\t"
1532c76ddf06SPeter Feiner 		"movl $1, %[failure_flags]\n\t"
1533f37cf4e2SPeter Feiner 		"jmp 3f\n\t"
15349d7eaa29SArthur Chunqi Li 		"vmx_return:\n\t"
15359d7eaa29SArthur Chunqi Li 		SAVE_GPR_C
1536f37cf4e2SPeter Feiner 		"3: \n\t"
1537c76ddf06SPeter Feiner 		: [failure_early]"+m"(failure->early),
1538c76ddf06SPeter Feiner 		  [failure_flags]"=m"(failure->flags)
1539897d8365SPeter Feiner 		: [launched]"m"(launched), [HOST_RSP]"i"(HOST_RSP)
1540897d8365SPeter Feiner 		: "rdi", "memory", "cc"
15419d7eaa29SArthur Chunqi Li 	);
1542*794c67a9SPeter Feiner 	in_guest = 0;
15433b50efe3SPeter Feiner 
1544c76ddf06SPeter Feiner 	failure->vmlaunch = !launched;
1545c76ddf06SPeter Feiner 	failure->instr = launched ? "vmresume" : "vmlaunch";
1546c76ddf06SPeter Feiner 
1547c76ddf06SPeter Feiner 	return !failure->early && !(vmcs_read(EXI_REASON) & VMX_ENTRY_FAILURE);
1548c76ddf06SPeter Feiner }
1549c76ddf06SPeter Feiner 
1550c76ddf06SPeter Feiner static int vmx_run()
1551c76ddf06SPeter Feiner {
1552c76ddf06SPeter Feiner 	while (1) {
1553c76ddf06SPeter Feiner 		u32 ret;
1554c76ddf06SPeter Feiner 		bool entered;
1555c76ddf06SPeter Feiner 		struct vmentry_failure failure;
1556c76ddf06SPeter Feiner 
1557c76ddf06SPeter Feiner 		entered = vmx_enter_guest(&failure);
15583b50efe3SPeter Feiner 
15593b50efe3SPeter Feiner 		if (entered) {
15603b50efe3SPeter Feiner 			/*
15613b50efe3SPeter Feiner 			 * VMCS isn't in "launched" state if there's been any
15623b50efe3SPeter Feiner 			 * entry failure (early or otherwise).
15633b50efe3SPeter Feiner 			 */
15649d7eaa29SArthur Chunqi Li 			launched = 1;
15659d7eaa29SArthur Chunqi Li 			ret = exit_handler();
15663b50efe3SPeter Feiner 		} else {
15673b50efe3SPeter Feiner 			ret = entry_failure_handler(&failure);
15689d7eaa29SArthur Chunqi Li 		}
15693b50efe3SPeter Feiner 
15709d7eaa29SArthur Chunqi Li 		switch (ret) {
15713b50efe3SPeter Feiner 		case VMX_TEST_RESUME:
15723b50efe3SPeter Feiner 			continue;
15739d7eaa29SArthur Chunqi Li 		case VMX_TEST_VMEXIT:
1574*794c67a9SPeter Feiner 			guest_finished = 1;
15759d7eaa29SArthur Chunqi Li 			return 0;
15763b50efe3SPeter Feiner 		case VMX_TEST_EXIT:
15779d7eaa29SArthur Chunqi Li 			break;
15789d7eaa29SArthur Chunqi Li 		default:
15793b50efe3SPeter Feiner 			printf("ERROR : Invalid %s_handler return val %d.\n",
15803b50efe3SPeter Feiner 			       entered ? "exit" : "entry_failure",
15813b50efe3SPeter Feiner 			       ret);
15829d7eaa29SArthur Chunqi Li 			break;
15839d7eaa29SArthur Chunqi Li 		}
15843b50efe3SPeter Feiner 
15853b50efe3SPeter Feiner 		if (entered)
15863b50efe3SPeter Feiner 			print_vmexit_info();
15873b50efe3SPeter Feiner 		else
15883b50efe3SPeter Feiner 			print_vmentry_failure_info(&failure);
15893b50efe3SPeter Feiner 		abort();
15903b50efe3SPeter Feiner 	}
15919d7eaa29SArthur Chunqi Li }
15929d7eaa29SArthur Chunqi Li 
1593*794c67a9SPeter Feiner static void run_teardown_step(struct test_teardown_step *step)
1594*794c67a9SPeter Feiner {
1595*794c67a9SPeter Feiner 	step->func(step->data);
1596*794c67a9SPeter Feiner }
1597*794c67a9SPeter Feiner 
15989d7eaa29SArthur Chunqi Li static int test_run(struct vmx_test *test)
15999d7eaa29SArthur Chunqi Li {
1600*794c67a9SPeter Feiner 	int r;
1601*794c67a9SPeter Feiner 
1602*794c67a9SPeter Feiner 	/* Validate V2 interface. */
1603*794c67a9SPeter Feiner 	if (test->v2) {
1604*794c67a9SPeter Feiner 		int ret = 0;
1605*794c67a9SPeter Feiner 		if (test->init || test->guest_main || test->exit_handler ||
1606*794c67a9SPeter Feiner 		    test->syscall_handler) {
1607*794c67a9SPeter Feiner 			report("V2 test cannot specify V1 callbacks.", 0);
1608*794c67a9SPeter Feiner 			ret = 1;
1609*794c67a9SPeter Feiner 		}
1610*794c67a9SPeter Feiner 		if (ret)
1611*794c67a9SPeter Feiner 			return ret;
1612*794c67a9SPeter Feiner 	}
1613*794c67a9SPeter Feiner 
16149d7eaa29SArthur Chunqi Li 	if (test->name == NULL)
16159d7eaa29SArthur Chunqi Li 		test->name = "(no name)";
16169d7eaa29SArthur Chunqi Li 	if (vmx_on()) {
16179d7eaa29SArthur Chunqi Li 		printf("%s : vmxon failed.\n", __func__);
16189d7eaa29SArthur Chunqi Li 		return 1;
16199d7eaa29SArthur Chunqi Li 	}
1620*794c67a9SPeter Feiner 
16219d7eaa29SArthur Chunqi Li 	init_vmcs(&(test->vmcs));
16229d7eaa29SArthur Chunqi Li 	/* Directly call test->init is ok here, init_vmcs has done
16239d7eaa29SArthur Chunqi Li 	   vmcs init, vmclear and vmptrld*/
1624c592c151SJan Kiszka 	if (test->init && test->init(test->vmcs) != VMX_TEST_START)
1625a0e30e71SPaolo Bonzini 		goto out;
1626*794c67a9SPeter Feiner 	teardown_count = 0;
1627*794c67a9SPeter Feiner 	v2_guest_main = NULL;
16289d7eaa29SArthur Chunqi Li 	test->exits = 0;
16299d7eaa29SArthur Chunqi Li 	current = test;
16309d7eaa29SArthur Chunqi Li 	regs = test->guest_regs;
16319d7eaa29SArthur Chunqi Li 	vmcs_write(GUEST_RFLAGS, regs.rflags | 0x2);
16329d7eaa29SArthur Chunqi Li 	launched = 0;
1633*794c67a9SPeter Feiner 	guest_finished = 0;
16349d7eaa29SArthur Chunqi Li 	printf("\nTest suite: %s\n", test->name);
1635*794c67a9SPeter Feiner 
1636*794c67a9SPeter Feiner 	r = setjmp(abort_target);
1637*794c67a9SPeter Feiner 	if (r) {
1638*794c67a9SPeter Feiner 		assert(!in_guest);
1639*794c67a9SPeter Feiner 		goto out;
1640*794c67a9SPeter Feiner 	}
1641*794c67a9SPeter Feiner 
1642*794c67a9SPeter Feiner 
1643*794c67a9SPeter Feiner 	if (test->v2)
1644*794c67a9SPeter Feiner 		test->v2();
1645*794c67a9SPeter Feiner 	else
16469d7eaa29SArthur Chunqi Li 		vmx_run();
1647*794c67a9SPeter Feiner 
1648*794c67a9SPeter Feiner 	while (teardown_count > 0)
1649*794c67a9SPeter Feiner 		run_teardown_step(&teardown_steps[--teardown_count]);
1650*794c67a9SPeter Feiner 
1651*794c67a9SPeter Feiner 	if (launched && !guest_finished)
1652*794c67a9SPeter Feiner 		report("Guest didn't run to completion.", 0);
1653*794c67a9SPeter Feiner 
1654a0e30e71SPaolo Bonzini out:
16559d7eaa29SArthur Chunqi Li 	if (vmx_off()) {
16569d7eaa29SArthur Chunqi Li 		printf("%s : vmxoff failed.\n", __func__);
16579d7eaa29SArthur Chunqi Li 		return 1;
16589d7eaa29SArthur Chunqi Li 	}
16599d7eaa29SArthur Chunqi Li 	return 0;
16609d7eaa29SArthur Chunqi Li }
16619d7eaa29SArthur Chunqi Li 
1662*794c67a9SPeter Feiner /*
1663*794c67a9SPeter Feiner  * Add a teardown step. Executed after the test's main function returns.
1664*794c67a9SPeter Feiner  * Teardown steps executed in reverse order.
1665*794c67a9SPeter Feiner  */
1666*794c67a9SPeter Feiner void test_add_teardown(test_teardown_func func, void *data)
1667*794c67a9SPeter Feiner {
1668*794c67a9SPeter Feiner 	struct test_teardown_step *step;
1669*794c67a9SPeter Feiner 
1670*794c67a9SPeter Feiner 	TEST_ASSERT_MSG(teardown_count < MAX_TEST_TEARDOWN_STEPS,
1671*794c67a9SPeter Feiner 			"There are already %d teardown steps.",
1672*794c67a9SPeter Feiner 			teardown_count);
1673*794c67a9SPeter Feiner 	step = &teardown_steps[teardown_count++];
1674*794c67a9SPeter Feiner 	step->func = func;
1675*794c67a9SPeter Feiner 	step->data = data;
1676*794c67a9SPeter Feiner }
1677*794c67a9SPeter Feiner 
1678*794c67a9SPeter Feiner /*
1679*794c67a9SPeter Feiner  * Set the target of the first enter_guest call. Can only be called once per
1680*794c67a9SPeter Feiner  * test. Must be called before first enter_guest call.
1681*794c67a9SPeter Feiner  */
1682*794c67a9SPeter Feiner void test_set_guest(test_guest_func func)
1683*794c67a9SPeter Feiner {
1684*794c67a9SPeter Feiner 	assert(current->v2);
1685*794c67a9SPeter Feiner 	TEST_ASSERT_MSG(!v2_guest_main, "Already set guest func.");
1686*794c67a9SPeter Feiner 	v2_guest_main = func;
1687*794c67a9SPeter Feiner }
1688*794c67a9SPeter Feiner 
1689*794c67a9SPeter Feiner /*
1690*794c67a9SPeter Feiner  * Enters the guest (or launches it for the first time). Error to call once the
1691*794c67a9SPeter Feiner  * guest has returned (i.e., run past the end of its guest() function). Also
1692*794c67a9SPeter Feiner  * aborts if guest entry fails.
1693*794c67a9SPeter Feiner  */
1694*794c67a9SPeter Feiner void enter_guest(void)
1695*794c67a9SPeter Feiner {
1696*794c67a9SPeter Feiner 	struct vmentry_failure failure;
1697*794c67a9SPeter Feiner 
1698*794c67a9SPeter Feiner 	TEST_ASSERT_MSG(v2_guest_main,
1699*794c67a9SPeter Feiner 			"Never called test_set_guest_func!");
1700*794c67a9SPeter Feiner 
1701*794c67a9SPeter Feiner 	TEST_ASSERT_MSG(!guest_finished,
1702*794c67a9SPeter Feiner 			"Called enter_guest() after guest returned.");
1703*794c67a9SPeter Feiner 
1704*794c67a9SPeter Feiner 	if (!vmx_enter_guest(&failure)) {
1705*794c67a9SPeter Feiner 		print_vmentry_failure_info(&failure);
1706*794c67a9SPeter Feiner 		abort();
1707*794c67a9SPeter Feiner 	}
1708*794c67a9SPeter Feiner 
1709*794c67a9SPeter Feiner 	launched = 1;
1710*794c67a9SPeter Feiner 
1711*794c67a9SPeter Feiner 	if (is_hypercall()) {
1712*794c67a9SPeter Feiner 		int ret;
1713*794c67a9SPeter Feiner 
1714*794c67a9SPeter Feiner 		ret = handle_hypercall();
1715*794c67a9SPeter Feiner 		switch (ret) {
1716*794c67a9SPeter Feiner 		case VMX_TEST_VMEXIT:
1717*794c67a9SPeter Feiner 			guest_finished = 1;
1718*794c67a9SPeter Feiner 			break;
1719*794c67a9SPeter Feiner 		case VMX_TEST_VMABORT:
1720*794c67a9SPeter Feiner 			continue_abort();
1721*794c67a9SPeter Feiner 			break;
1722*794c67a9SPeter Feiner 		case VMX_TEST_VMSKIP:
1723*794c67a9SPeter Feiner 			continue_skip();
1724*794c67a9SPeter Feiner 			break;
1725*794c67a9SPeter Feiner 		default:
1726*794c67a9SPeter Feiner 			printf("ERROR : Invalid handle_hypercall return %d.\n",
1727*794c67a9SPeter Feiner 			       ret);
1728*794c67a9SPeter Feiner 			abort();
1729*794c67a9SPeter Feiner 		}
1730*794c67a9SPeter Feiner 	}
1731*794c67a9SPeter Feiner }
1732*794c67a9SPeter Feiner 
17333ee34093SArthur Chunqi Li extern struct vmx_test vmx_tests[];
17349d7eaa29SArthur Chunqi Li 
1735875b97b3SPeter Feiner static bool
1736875b97b3SPeter Feiner test_wanted(const char *name, const char *filters[], int filter_count)
17378029cac7SPeter Feiner {
1738875b97b3SPeter Feiner 	int i;
1739875b97b3SPeter Feiner 	bool positive = false;
1740875b97b3SPeter Feiner 	bool match = false;
1741875b97b3SPeter Feiner 	char clean_name[strlen(name) + 1];
1742875b97b3SPeter Feiner 	char *c;
17438029cac7SPeter Feiner 	const char *n;
17448029cac7SPeter Feiner 
1745875b97b3SPeter Feiner 	/* Replace spaces with underscores. */
1746875b97b3SPeter Feiner 	n = name;
1747875b97b3SPeter Feiner 	c = &clean_name[0];
1748875b97b3SPeter Feiner 	do *c++ = (*n == ' ') ? '_' : *n;
1749875b97b3SPeter Feiner 	while (*n++);
1750875b97b3SPeter Feiner 
1751875b97b3SPeter Feiner 	for (i = 0; i < filter_count; i++) {
1752875b97b3SPeter Feiner 		const char *filter = filters[i];
1753875b97b3SPeter Feiner 
1754875b97b3SPeter Feiner 		if (filter[0] == '-') {
1755875b97b3SPeter Feiner 			if (simple_glob(clean_name, filter + 1))
1756875b97b3SPeter Feiner 				return false;
1757875b97b3SPeter Feiner 		} else {
1758875b97b3SPeter Feiner 			positive = true;
1759875b97b3SPeter Feiner 			match |= simple_glob(clean_name, filter);
1760875b97b3SPeter Feiner 		}
1761875b97b3SPeter Feiner 	}
1762875b97b3SPeter Feiner 
1763875b97b3SPeter Feiner 	if (!positive || match) {
1764875b97b3SPeter Feiner 		matched++;
1765875b97b3SPeter Feiner 		return true;
1766875b97b3SPeter Feiner 	} else {
17678029cac7SPeter Feiner 		return false;
17688029cac7SPeter Feiner 	}
17698029cac7SPeter Feiner }
17708029cac7SPeter Feiner 
1771875b97b3SPeter Feiner int main(int argc, const char *argv[])
17729d7eaa29SArthur Chunqi Li {
17733ee34093SArthur Chunqi Li 	int i = 0;
17749d7eaa29SArthur Chunqi Li 
17759d7eaa29SArthur Chunqi Li 	setup_vm();
17769d7eaa29SArthur Chunqi Li 	setup_idt();
17773ee34093SArthur Chunqi Li 	hypercall_field = 0;
17789d7eaa29SArthur Chunqi Li 
1779c04259ffSDavid Matlack 	argv++;
1780c04259ffSDavid Matlack 	argc--;
1781c04259ffSDavid Matlack 
17823b127446SJan Kiszka 	if (!(cpuid(1).c & (1 << 5))) {
17833b127446SJan Kiszka 		printf("WARNING: vmx not supported, add '-cpu host'\n");
17849d7eaa29SArthur Chunqi Li 		goto exit;
17859d7eaa29SArthur Chunqi Li 	}
17869d7eaa29SArthur Chunqi Li 	init_vmx();
1787c04259ffSDavid Matlack 	if (test_wanted("test_vmx_feature_control", argv, argc)) {
1788c04259ffSDavid Matlack 		/* Sets MSR_IA32_FEATURE_CONTROL to 0x5 */
17893b127446SJan Kiszka 		if (test_vmx_feature_control() != 0)
17903b127446SJan Kiszka 			goto exit;
1791c04259ffSDavid Matlack 	} else {
1792c04259ffSDavid Matlack 		if ((rdmsr(MSR_IA32_FEATURE_CONTROL) & 0x5) != 0x5)
1793c04259ffSDavid Matlack 			wrmsr(MSR_IA32_FEATURE_CONTROL, 0x5);
1794c04259ffSDavid Matlack 	}
1795c04259ffSDavid Matlack 
1796c04259ffSDavid Matlack 	if (test_wanted("test_vmxon", argv, argc)) {
1797c04259ffSDavid Matlack 		/* Enables VMX */
17989d7eaa29SArthur Chunqi Li 		if (test_vmxon() != 0)
17999d7eaa29SArthur Chunqi Li 			goto exit;
1800c04259ffSDavid Matlack 	} else {
1801c04259ffSDavid Matlack 		if (vmx_on()) {
1802c04259ffSDavid Matlack 			report("vmxon", 0);
1803c04259ffSDavid Matlack 			goto exit;
1804c04259ffSDavid Matlack 		}
1805c04259ffSDavid Matlack 	}
1806c04259ffSDavid Matlack 
1807c04259ffSDavid Matlack 	if (test_wanted("test_vmptrld", argv, argc))
18089d7eaa29SArthur Chunqi Li 		test_vmptrld();
1809c04259ffSDavid Matlack 	if (test_wanted("test_vmclear", argv, argc))
18109d7eaa29SArthur Chunqi Li 		test_vmclear();
1811c04259ffSDavid Matlack 	if (test_wanted("test_vmptrst", argv, argc))
18129d7eaa29SArthur Chunqi Li 		test_vmptrst();
1813ecd5b431SDavid Matlack 	if (test_wanted("test_vmwrite_vmread", argv, argc))
1814ecd5b431SDavid Matlack 		test_vmwrite_vmread();
18156b72cf76SDavid Matlack 	if (test_wanted("test_vmcs_lifecycle", argv, argc))
18166b72cf76SDavid Matlack 		test_vmcs_lifecycle();
1817c04259ffSDavid Matlack 	if (test_wanted("test_vmx_caps", argv, argc))
181869c8d31cSJan Kiszka 		test_vmx_caps();
18199d7eaa29SArthur Chunqi Li 
182034439b1aSPeter Feiner 	/* Balance vmxon from test_vmxon. */
182134439b1aSPeter Feiner 	vmx_off();
182234439b1aSPeter Feiner 
182334439b1aSPeter Feiner 	for (; vmx_tests[i].name != NULL; i++) {
1824c04259ffSDavid Matlack 		if (!test_wanted(vmx_tests[i].name, argv, argc))
18258029cac7SPeter Feiner 			continue;
18269d7eaa29SArthur Chunqi Li 		if (test_run(&vmx_tests[i]))
18279d7eaa29SArthur Chunqi Li 			goto exit;
18288029cac7SPeter Feiner 	}
18298029cac7SPeter Feiner 
18308029cac7SPeter Feiner 	if (!matched)
18318029cac7SPeter Feiner 		report("command line didn't match any tests!", matched);
18329d7eaa29SArthur Chunqi Li 
18339d7eaa29SArthur Chunqi Li exit:
1834f3cdd159SJan Kiszka 	return report_summary();
18359d7eaa29SArthur Chunqi Li }
1836