xref: /kvm-unit-tests/x86/vmx_tests.c (revision 55601383cca6221889c5641e4bf6cfdbb855b213)
1 /*
2  * All test cases of nested virtualization should be in this file
3  *
4  * Author : Arthur Chunqi Li <yzt356@gmail.com>
5  */
6 #include "vmx.h"
7 #include "msr.h"
8 #include "processor.h"
9 #include "vm.h"
10 #include "fwcfg.h"
11 #include "isr.h"
12 #include "apic.h"
13 #include "types.h"
14 
15 u64 ia32_pat;
16 u64 ia32_efer;
17 void *io_bitmap_a, *io_bitmap_b;
18 u16 ioport;
19 
20 unsigned long *pml4;
21 u64 eptp;
22 void *data_page1, *data_page2;
23 
24 static inline void vmcall()
25 {
26 	asm volatile("vmcall");
27 }
28 
29 void basic_guest_main()
30 {
31 }
32 
33 int basic_exit_handler()
34 {
35 	report("Basic VMX test", 0);
36 	print_vmexit_info();
37 	return VMX_TEST_EXIT;
38 }
39 
40 void vmenter_main()
41 {
42 	u64 rax;
43 	u64 rsp, resume_rsp;
44 
45 	report("test vmlaunch", 1);
46 
47 	asm volatile(
48 		"mov %%rsp, %0\n\t"
49 		"mov %3, %%rax\n\t"
50 		"vmcall\n\t"
51 		"mov %%rax, %1\n\t"
52 		"mov %%rsp, %2\n\t"
53 		: "=r"(rsp), "=r"(rax), "=r"(resume_rsp)
54 		: "g"(0xABCD));
55 	report("test vmresume", (rax == 0xFFFF) && (rsp == resume_rsp));
56 }
57 
58 int vmenter_exit_handler()
59 {
60 	u64 guest_rip;
61 	ulong reason;
62 
63 	guest_rip = vmcs_read(GUEST_RIP);
64 	reason = vmcs_read(EXI_REASON) & 0xff;
65 	switch (reason) {
66 	case VMX_VMCALL:
67 		if (regs.rax != 0xABCD) {
68 			report("test vmresume", 0);
69 			return VMX_TEST_VMEXIT;
70 		}
71 		regs.rax = 0xFFFF;
72 		vmcs_write(GUEST_RIP, guest_rip + 3);
73 		return VMX_TEST_RESUME;
74 	default:
75 		report("test vmresume", 0);
76 		print_vmexit_info();
77 	}
78 	return VMX_TEST_VMEXIT;
79 }
80 
81 u32 preempt_scale;
82 volatile unsigned long long tsc_val;
83 volatile u32 preempt_val;
84 u64 saved_rip;
85 
86 int preemption_timer_init()
87 {
88 	if (!(ctrl_pin_rev.clr & PIN_PREEMPT)) {
89 		printf("\tPreemption timer is not supported\n");
90 		return VMX_TEST_EXIT;
91 	}
92 	vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) | PIN_PREEMPT);
93 	preempt_val = 10000000;
94 	vmcs_write(PREEMPT_TIMER_VALUE, preempt_val);
95 	preempt_scale = rdmsr(MSR_IA32_VMX_MISC) & 0x1F;
96 
97 	if (!(ctrl_exit_rev.clr & EXI_SAVE_PREEMPT))
98 		printf("\tSave preemption value is not supported\n");
99 
100 	return VMX_TEST_START;
101 }
102 
103 void preemption_timer_main()
104 {
105 	tsc_val = rdtsc();
106 	if (ctrl_exit_rev.clr & EXI_SAVE_PREEMPT) {
107 		vmx_set_test_stage(0);
108 		vmcall();
109 		if (vmx_get_test_stage() == 1)
110 			vmcall();
111 	}
112 	vmx_set_test_stage(1);
113 	while (vmx_get_test_stage() == 1) {
114 		if (((rdtsc() - tsc_val) >> preempt_scale)
115 				> 10 * preempt_val) {
116 			vmx_set_test_stage(2);
117 			vmcall();
118 		}
119 	}
120 	tsc_val = rdtsc();
121 	asm volatile ("hlt");
122 	vmcall();
123 	vmx_set_test_stage(5);
124 	vmcall();
125 }
126 
127 int preemption_timer_exit_handler()
128 {
129 	bool guest_halted;
130 	u64 guest_rip;
131 	ulong reason;
132 	u32 insn_len;
133 	u32 ctrl_exit;
134 
135 	guest_rip = vmcs_read(GUEST_RIP);
136 	reason = vmcs_read(EXI_REASON) & 0xff;
137 	insn_len = vmcs_read(EXI_INST_LEN);
138 	switch (reason) {
139 	case VMX_PREEMPT:
140 		switch (vmx_get_test_stage()) {
141 		case 1:
142 		case 2:
143 			report("busy-wait for preemption timer",
144 			       ((rdtsc() - tsc_val) >> preempt_scale) >=
145 			       preempt_val);
146 			vmx_set_test_stage(3);
147 			vmcs_write(PREEMPT_TIMER_VALUE, preempt_val);
148 			return VMX_TEST_RESUME;
149 		case 3:
150 			guest_halted =
151 				(vmcs_read(GUEST_ACTV_STATE) == ACTV_HLT);
152 			report("preemption timer during hlt",
153 			       ((rdtsc() - tsc_val) >> preempt_scale) >=
154 			       preempt_val && guest_halted);
155 			vmx_set_test_stage(4);
156 			vmcs_write(PIN_CONTROLS,
157 				   vmcs_read(PIN_CONTROLS) & ~PIN_PREEMPT);
158 			vmcs_write(GUEST_ACTV_STATE, ACTV_ACTIVE);
159 			return VMX_TEST_RESUME;
160 		case 4:
161 			report("preemption timer with 0 value",
162 			       saved_rip == guest_rip);
163 			break;
164 		default:
165 			printf("Invalid stage.\n");
166 			print_vmexit_info();
167 			break;
168 		}
169 		break;
170 	case VMX_VMCALL:
171 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
172 		switch (vmx_get_test_stage()) {
173 		case 0:
174 			report("Keep preemption value",
175 			       vmcs_read(PREEMPT_TIMER_VALUE) == preempt_val);
176 			vmx_set_test_stage(1);
177 			vmcs_write(PREEMPT_TIMER_VALUE, preempt_val);
178 			ctrl_exit = (vmcs_read(EXI_CONTROLS) |
179 				EXI_SAVE_PREEMPT) & ctrl_exit_rev.clr;
180 			vmcs_write(EXI_CONTROLS, ctrl_exit);
181 			return VMX_TEST_RESUME;
182 		case 1:
183 			report("Save preemption value",
184 			       vmcs_read(PREEMPT_TIMER_VALUE) < preempt_val);
185 			return VMX_TEST_RESUME;
186 		case 2:
187 			report("busy-wait for preemption timer", 0);
188 			vmx_set_test_stage(3);
189 			vmcs_write(PREEMPT_TIMER_VALUE, preempt_val);
190 			return VMX_TEST_RESUME;
191 		case 3:
192 			report("preemption timer during hlt", 0);
193 			vmx_set_test_stage(4);
194 			/* fall through */
195 		case 4:
196 			vmcs_write(PIN_CONTROLS,
197 				   vmcs_read(PIN_CONTROLS) | PIN_PREEMPT);
198 			vmcs_write(PREEMPT_TIMER_VALUE, 0);
199 			saved_rip = guest_rip + insn_len;
200 			return VMX_TEST_RESUME;
201 		case 5:
202 			report("preemption timer with 0 value (vmcall stage 5)", 0);
203 			break;
204 		default:
205 			// Should not reach here
206 			printf("ERROR : unexpected stage, %d\n",
207 			       vmx_get_test_stage());
208 			print_vmexit_info();
209 			return VMX_TEST_VMEXIT;
210 		}
211 		break;
212 	default:
213 		printf("Unknown exit reason, %ld\n", reason);
214 		print_vmexit_info();
215 	}
216 	vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) & ~PIN_PREEMPT);
217 	return VMX_TEST_VMEXIT;
218 }
219 
220 void msr_bmp_init()
221 {
222 	void *msr_bitmap;
223 	u32 ctrl_cpu0;
224 
225 	msr_bitmap = alloc_page();
226 	memset(msr_bitmap, 0x0, PAGE_SIZE);
227 	ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
228 	ctrl_cpu0 |= CPU_MSR_BITMAP;
229 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
230 	vmcs_write(MSR_BITMAP, (u64)msr_bitmap);
231 }
232 
233 static int test_ctrl_pat_init()
234 {
235 	u64 ctrl_ent;
236 	u64 ctrl_exi;
237 
238 	msr_bmp_init();
239 	if (!(ctrl_exit_rev.clr & EXI_SAVE_PAT) &&
240 	    !(ctrl_exit_rev.clr & EXI_LOAD_PAT) &&
241 	    !(ctrl_enter_rev.clr & ENT_LOAD_PAT)) {
242 		printf("\tSave/load PAT is not supported\n");
243 		return 1;
244 	}
245 
246 	ctrl_ent = vmcs_read(ENT_CONTROLS);
247 	ctrl_exi = vmcs_read(EXI_CONTROLS);
248 	ctrl_ent |= ctrl_enter_rev.clr & ENT_LOAD_PAT;
249 	ctrl_exi |= ctrl_exit_rev.clr & (EXI_SAVE_PAT | EXI_LOAD_PAT);
250 	vmcs_write(ENT_CONTROLS, ctrl_ent);
251 	vmcs_write(EXI_CONTROLS, ctrl_exi);
252 	ia32_pat = rdmsr(MSR_IA32_CR_PAT);
253 	vmcs_write(GUEST_PAT, 0x0);
254 	vmcs_write(HOST_PAT, ia32_pat);
255 	return VMX_TEST_START;
256 }
257 
258 static void test_ctrl_pat_main()
259 {
260 	u64 guest_ia32_pat;
261 
262 	guest_ia32_pat = rdmsr(MSR_IA32_CR_PAT);
263 	if (!(ctrl_enter_rev.clr & ENT_LOAD_PAT))
264 		printf("\tENT_LOAD_PAT is not supported.\n");
265 	else {
266 		if (guest_ia32_pat != 0) {
267 			report("Entry load PAT", 0);
268 			return;
269 		}
270 	}
271 	wrmsr(MSR_IA32_CR_PAT, 0x6);
272 	vmcall();
273 	guest_ia32_pat = rdmsr(MSR_IA32_CR_PAT);
274 	if (ctrl_enter_rev.clr & ENT_LOAD_PAT)
275 		report("Entry load PAT", guest_ia32_pat == ia32_pat);
276 }
277 
278 static int test_ctrl_pat_exit_handler()
279 {
280 	u64 guest_rip;
281 	ulong reason;
282 	u64 guest_pat;
283 
284 	guest_rip = vmcs_read(GUEST_RIP);
285 	reason = vmcs_read(EXI_REASON) & 0xff;
286 	switch (reason) {
287 	case VMX_VMCALL:
288 		guest_pat = vmcs_read(GUEST_PAT);
289 		if (!(ctrl_exit_rev.clr & EXI_SAVE_PAT)) {
290 			printf("\tEXI_SAVE_PAT is not supported\n");
291 			vmcs_write(GUEST_PAT, 0x6);
292 		} else {
293 			report("Exit save PAT", guest_pat == 0x6);
294 		}
295 		if (!(ctrl_exit_rev.clr & EXI_LOAD_PAT))
296 			printf("\tEXI_LOAD_PAT is not supported\n");
297 		else
298 			report("Exit load PAT", rdmsr(MSR_IA32_CR_PAT) == ia32_pat);
299 		vmcs_write(GUEST_PAT, ia32_pat);
300 		vmcs_write(GUEST_RIP, guest_rip + 3);
301 		return VMX_TEST_RESUME;
302 	default:
303 		printf("ERROR : Undefined exit reason, reason = %ld.\n", reason);
304 		break;
305 	}
306 	return VMX_TEST_VMEXIT;
307 }
308 
309 static int test_ctrl_efer_init()
310 {
311 	u64 ctrl_ent;
312 	u64 ctrl_exi;
313 
314 	msr_bmp_init();
315 	ctrl_ent = vmcs_read(ENT_CONTROLS) | ENT_LOAD_EFER;
316 	ctrl_exi = vmcs_read(EXI_CONTROLS) | EXI_SAVE_EFER | EXI_LOAD_EFER;
317 	vmcs_write(ENT_CONTROLS, ctrl_ent & ctrl_enter_rev.clr);
318 	vmcs_write(EXI_CONTROLS, ctrl_exi & ctrl_exit_rev.clr);
319 	ia32_efer = rdmsr(MSR_EFER);
320 	vmcs_write(GUEST_EFER, ia32_efer ^ EFER_NX);
321 	vmcs_write(HOST_EFER, ia32_efer ^ EFER_NX);
322 	return VMX_TEST_START;
323 }
324 
325 static void test_ctrl_efer_main()
326 {
327 	u64 guest_ia32_efer;
328 
329 	guest_ia32_efer = rdmsr(MSR_EFER);
330 	if (!(ctrl_enter_rev.clr & ENT_LOAD_EFER))
331 		printf("\tENT_LOAD_EFER is not supported.\n");
332 	else {
333 		if (guest_ia32_efer != (ia32_efer ^ EFER_NX)) {
334 			report("Entry load EFER", 0);
335 			return;
336 		}
337 	}
338 	wrmsr(MSR_EFER, ia32_efer);
339 	vmcall();
340 	guest_ia32_efer = rdmsr(MSR_EFER);
341 	if (ctrl_enter_rev.clr & ENT_LOAD_EFER)
342 		report("Entry load EFER", guest_ia32_efer == ia32_efer);
343 }
344 
345 static int test_ctrl_efer_exit_handler()
346 {
347 	u64 guest_rip;
348 	ulong reason;
349 	u64 guest_efer;
350 
351 	guest_rip = vmcs_read(GUEST_RIP);
352 	reason = vmcs_read(EXI_REASON) & 0xff;
353 	switch (reason) {
354 	case VMX_VMCALL:
355 		guest_efer = vmcs_read(GUEST_EFER);
356 		if (!(ctrl_exit_rev.clr & EXI_SAVE_EFER)) {
357 			printf("\tEXI_SAVE_EFER is not supported\n");
358 			vmcs_write(GUEST_EFER, ia32_efer);
359 		} else {
360 			report("Exit save EFER", guest_efer == ia32_efer);
361 		}
362 		if (!(ctrl_exit_rev.clr & EXI_LOAD_EFER)) {
363 			printf("\tEXI_LOAD_EFER is not supported\n");
364 			wrmsr(MSR_EFER, ia32_efer ^ EFER_NX);
365 		} else {
366 			report("Exit load EFER", rdmsr(MSR_EFER) == (ia32_efer ^ EFER_NX));
367 		}
368 		vmcs_write(GUEST_PAT, ia32_efer);
369 		vmcs_write(GUEST_RIP, guest_rip + 3);
370 		return VMX_TEST_RESUME;
371 	default:
372 		printf("ERROR : Undefined exit reason, reason = %ld.\n", reason);
373 		break;
374 	}
375 	return VMX_TEST_VMEXIT;
376 }
377 
378 u32 guest_cr0, guest_cr4;
379 
380 static void cr_shadowing_main()
381 {
382 	u32 cr0, cr4, tmp;
383 
384 	// Test read through
385 	vmx_set_test_stage(0);
386 	guest_cr0 = read_cr0();
387 	if (vmx_get_test_stage() == 1)
388 		report("Read through CR0", 0);
389 	else
390 		vmcall();
391 	vmx_set_test_stage(1);
392 	guest_cr4 = read_cr4();
393 	if (vmx_get_test_stage() == 2)
394 		report("Read through CR4", 0);
395 	else
396 		vmcall();
397 	// Test write through
398 	guest_cr0 = guest_cr0 ^ (X86_CR0_TS | X86_CR0_MP);
399 	guest_cr4 = guest_cr4 ^ (X86_CR4_TSD | X86_CR4_DE);
400 	vmx_set_test_stage(2);
401 	write_cr0(guest_cr0);
402 	if (vmx_get_test_stage() == 3)
403 		report("Write throuth CR0", 0);
404 	else
405 		vmcall();
406 	vmx_set_test_stage(3);
407 	write_cr4(guest_cr4);
408 	if (vmx_get_test_stage() == 4)
409 		report("Write through CR4", 0);
410 	else
411 		vmcall();
412 	// Test read shadow
413 	vmx_set_test_stage(4);
414 	vmcall();
415 	cr0 = read_cr0();
416 	if (vmx_get_test_stage() != 5)
417 		report("Read shadowing CR0", cr0 == guest_cr0);
418 	vmx_set_test_stage(5);
419 	cr4 = read_cr4();
420 	if (vmx_get_test_stage() != 6)
421 		report("Read shadowing CR4", cr4 == guest_cr4);
422 	// Test write shadow (same value with shadow)
423 	vmx_set_test_stage(6);
424 	write_cr0(guest_cr0);
425 	if (vmx_get_test_stage() == 7)
426 		report("Write shadowing CR0 (same value with shadow)", 0);
427 	else
428 		vmcall();
429 	vmx_set_test_stage(7);
430 	write_cr4(guest_cr4);
431 	if (vmx_get_test_stage() == 8)
432 		report("Write shadowing CR4 (same value with shadow)", 0);
433 	else
434 		vmcall();
435 	// Test write shadow (different value)
436 	vmx_set_test_stage(8);
437 	tmp = guest_cr0 ^ X86_CR0_TS;
438 	asm volatile("mov %0, %%rsi\n\t"
439 		"mov %%rsi, %%cr0\n\t"
440 		::"m"(tmp)
441 		:"rsi", "memory", "cc");
442 	report("Write shadowing different X86_CR0_TS", vmx_get_test_stage() == 9);
443 	vmx_set_test_stage(9);
444 	tmp = guest_cr0 ^ X86_CR0_MP;
445 	asm volatile("mov %0, %%rsi\n\t"
446 		"mov %%rsi, %%cr0\n\t"
447 		::"m"(tmp)
448 		:"rsi", "memory", "cc");
449 	report("Write shadowing different X86_CR0_MP", vmx_get_test_stage() == 10);
450 	vmx_set_test_stage(10);
451 	tmp = guest_cr4 ^ X86_CR4_TSD;
452 	asm volatile("mov %0, %%rsi\n\t"
453 		"mov %%rsi, %%cr4\n\t"
454 		::"m"(tmp)
455 		:"rsi", "memory", "cc");
456 	report("Write shadowing different X86_CR4_TSD", vmx_get_test_stage() == 11);
457 	vmx_set_test_stage(11);
458 	tmp = guest_cr4 ^ X86_CR4_DE;
459 	asm volatile("mov %0, %%rsi\n\t"
460 		"mov %%rsi, %%cr4\n\t"
461 		::"m"(tmp)
462 		:"rsi", "memory", "cc");
463 	report("Write shadowing different X86_CR4_DE", vmx_get_test_stage() == 12);
464 }
465 
466 static int cr_shadowing_exit_handler()
467 {
468 	u64 guest_rip;
469 	ulong reason;
470 	u32 insn_len;
471 	u32 exit_qual;
472 
473 	guest_rip = vmcs_read(GUEST_RIP);
474 	reason = vmcs_read(EXI_REASON) & 0xff;
475 	insn_len = vmcs_read(EXI_INST_LEN);
476 	exit_qual = vmcs_read(EXI_QUALIFICATION);
477 	switch (reason) {
478 	case VMX_VMCALL:
479 		switch (vmx_get_test_stage()) {
480 		case 0:
481 			report("Read through CR0", guest_cr0 == vmcs_read(GUEST_CR0));
482 			break;
483 		case 1:
484 			report("Read through CR4", guest_cr4 == vmcs_read(GUEST_CR4));
485 			break;
486 		case 2:
487 			report("Write through CR0", guest_cr0 == vmcs_read(GUEST_CR0));
488 			break;
489 		case 3:
490 			report("Write through CR4", guest_cr4 == vmcs_read(GUEST_CR4));
491 			break;
492 		case 4:
493 			guest_cr0 = vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP);
494 			guest_cr4 = vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE);
495 			vmcs_write(CR0_MASK, X86_CR0_TS | X86_CR0_MP);
496 			vmcs_write(CR0_READ_SHADOW, guest_cr0 & (X86_CR0_TS | X86_CR0_MP));
497 			vmcs_write(CR4_MASK, X86_CR4_TSD | X86_CR4_DE);
498 			vmcs_write(CR4_READ_SHADOW, guest_cr4 & (X86_CR4_TSD | X86_CR4_DE));
499 			break;
500 		case 6:
501 			report("Write shadowing CR0 (same value)",
502 					guest_cr0 == (vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP)));
503 			break;
504 		case 7:
505 			report("Write shadowing CR4 (same value)",
506 					guest_cr4 == (vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE)));
507 			break;
508 		default:
509 			// Should not reach here
510 			printf("ERROR : unexpected stage, %d\n",
511 			       vmx_get_test_stage());
512 			print_vmexit_info();
513 			return VMX_TEST_VMEXIT;
514 		}
515 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
516 		return VMX_TEST_RESUME;
517 	case VMX_CR:
518 		switch (vmx_get_test_stage()) {
519 		case 4:
520 			report("Read shadowing CR0", 0);
521 			vmx_inc_test_stage();
522 			break;
523 		case 5:
524 			report("Read shadowing CR4", 0);
525 			vmx_inc_test_stage();
526 			break;
527 		case 6:
528 			report("Write shadowing CR0 (same value)", 0);
529 			vmx_inc_test_stage();
530 			break;
531 		case 7:
532 			report("Write shadowing CR4 (same value)", 0);
533 			vmx_inc_test_stage();
534 			break;
535 		case 8:
536 		case 9:
537 			// 0x600 encodes "mov %esi, %cr0"
538 			if (exit_qual == 0x600)
539 				vmx_inc_test_stage();
540 			break;
541 		case 10:
542 		case 11:
543 			// 0x604 encodes "mov %esi, %cr4"
544 			if (exit_qual == 0x604)
545 				vmx_inc_test_stage();
546 			break;
547 		default:
548 			// Should not reach here
549 			printf("ERROR : unexpected stage, %d\n",
550 			       vmx_get_test_stage());
551 			print_vmexit_info();
552 			return VMX_TEST_VMEXIT;
553 		}
554 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
555 		return VMX_TEST_RESUME;
556 	default:
557 		printf("Unknown exit reason, %ld\n", reason);
558 		print_vmexit_info();
559 	}
560 	return VMX_TEST_VMEXIT;
561 }
562 
563 static int iobmp_init()
564 {
565 	u32 ctrl_cpu0;
566 
567 	io_bitmap_a = alloc_page();
568 	io_bitmap_b = alloc_page();
569 	memset(io_bitmap_a, 0x0, PAGE_SIZE);
570 	memset(io_bitmap_b, 0x0, PAGE_SIZE);
571 	ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
572 	ctrl_cpu0 |= CPU_IO_BITMAP;
573 	ctrl_cpu0 &= (~CPU_IO);
574 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
575 	vmcs_write(IO_BITMAP_A, (u64)io_bitmap_a);
576 	vmcs_write(IO_BITMAP_B, (u64)io_bitmap_b);
577 	return VMX_TEST_START;
578 }
579 
580 static void iobmp_main()
581 {
582 	// stage 0, test IO pass
583 	vmx_set_test_stage(0);
584 	inb(0x5000);
585 	outb(0x0, 0x5000);
586 	report("I/O bitmap - I/O pass", vmx_get_test_stage() == 0);
587 	// test IO width, in/out
588 	((u8 *)io_bitmap_a)[0] = 0xFF;
589 	vmx_set_test_stage(2);
590 	inb(0x0);
591 	report("I/O bitmap - trap in", vmx_get_test_stage() == 3);
592 	vmx_set_test_stage(3);
593 	outw(0x0, 0x0);
594 	report("I/O bitmap - trap out", vmx_get_test_stage() == 4);
595 	vmx_set_test_stage(4);
596 	inl(0x0);
597 	report("I/O bitmap - I/O width, long", vmx_get_test_stage() == 5);
598 	// test low/high IO port
599 	vmx_set_test_stage(5);
600 	((u8 *)io_bitmap_a)[0x5000 / 8] = (1 << (0x5000 % 8));
601 	inb(0x5000);
602 	report("I/O bitmap - I/O port, low part", vmx_get_test_stage() == 6);
603 	vmx_set_test_stage(6);
604 	((u8 *)io_bitmap_b)[0x1000 / 8] = (1 << (0x1000 % 8));
605 	inb(0x9000);
606 	report("I/O bitmap - I/O port, high part", vmx_get_test_stage() == 7);
607 	// test partial pass
608 	vmx_set_test_stage(7);
609 	inl(0x4FFF);
610 	report("I/O bitmap - partial pass", vmx_get_test_stage() == 8);
611 	// test overrun
612 	vmx_set_test_stage(8);
613 	memset(io_bitmap_a, 0x0, PAGE_SIZE);
614 	memset(io_bitmap_b, 0x0, PAGE_SIZE);
615 	inl(0xFFFF);
616 	report("I/O bitmap - overrun", vmx_get_test_stage() == 9);
617 	vmx_set_test_stage(9);
618 	vmcall();
619 	outb(0x0, 0x0);
620 	report("I/O bitmap - ignore unconditional exiting",
621 	       vmx_get_test_stage() == 9);
622 	vmx_set_test_stage(10);
623 	vmcall();
624 	outb(0x0, 0x0);
625 	report("I/O bitmap - unconditional exiting",
626 	       vmx_get_test_stage() == 11);
627 }
628 
629 static int iobmp_exit_handler()
630 {
631 	u64 guest_rip;
632 	ulong reason, exit_qual;
633 	u32 insn_len, ctrl_cpu0;
634 
635 	guest_rip = vmcs_read(GUEST_RIP);
636 	reason = vmcs_read(EXI_REASON) & 0xff;
637 	exit_qual = vmcs_read(EXI_QUALIFICATION);
638 	insn_len = vmcs_read(EXI_INST_LEN);
639 	switch (reason) {
640 	case VMX_IO:
641 		switch (vmx_get_test_stage()) {
642 		case 0:
643 		case 1:
644 			vmx_inc_test_stage();
645 			break;
646 		case 2:
647 			report("I/O bitmap - I/O width, byte",
648 					(exit_qual & VMX_IO_SIZE_MASK) == _VMX_IO_BYTE);
649 			report("I/O bitmap - I/O direction, in", exit_qual & VMX_IO_IN);
650 			vmx_inc_test_stage();
651 			break;
652 		case 3:
653 			report("I/O bitmap - I/O width, word",
654 					(exit_qual & VMX_IO_SIZE_MASK) == _VMX_IO_WORD);
655 			report("I/O bitmap - I/O direction, out",
656 					!(exit_qual & VMX_IO_IN));
657 			vmx_inc_test_stage();
658 			break;
659 		case 4:
660 			report("I/O bitmap - I/O width, long",
661 					(exit_qual & VMX_IO_SIZE_MASK) == _VMX_IO_LONG);
662 			vmx_inc_test_stage();
663 			break;
664 		case 5:
665 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x5000)
666 				vmx_inc_test_stage();
667 			break;
668 		case 6:
669 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x9000)
670 				vmx_inc_test_stage();
671 			break;
672 		case 7:
673 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x4FFF)
674 				vmx_inc_test_stage();
675 			break;
676 		case 8:
677 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0xFFFF)
678 				vmx_inc_test_stage();
679 			break;
680 		case 9:
681 		case 10:
682 			ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
683 			vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0 & ~CPU_IO);
684 			vmx_inc_test_stage();
685 			break;
686 		default:
687 			// Should not reach here
688 			printf("ERROR : unexpected stage, %d\n",
689 			       vmx_get_test_stage());
690 			print_vmexit_info();
691 			return VMX_TEST_VMEXIT;
692 		}
693 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
694 		return VMX_TEST_RESUME;
695 	case VMX_VMCALL:
696 		switch (vmx_get_test_stage()) {
697 		case 9:
698 			ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
699 			ctrl_cpu0 |= CPU_IO | CPU_IO_BITMAP;
700 			vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
701 			break;
702 		case 10:
703 			ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
704 			ctrl_cpu0 = (ctrl_cpu0 & ~CPU_IO_BITMAP) | CPU_IO;
705 			vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
706 			break;
707 		default:
708 			// Should not reach here
709 			printf("ERROR : unexpected stage, %d\n",
710 			       vmx_get_test_stage());
711 			print_vmexit_info();
712 			return VMX_TEST_VMEXIT;
713 		}
714 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
715 		return VMX_TEST_RESUME;
716 	default:
717 		printf("guest_rip = 0x%lx\n", guest_rip);
718 		printf("\tERROR : Undefined exit reason, reason = %ld.\n", reason);
719 		break;
720 	}
721 	return VMX_TEST_VMEXIT;
722 }
723 
724 #define INSN_CPU0		0
725 #define INSN_CPU1		1
726 #define INSN_ALWAYS_TRAP	2
727 
728 #define FIELD_EXIT_QUAL		(1 << 0)
729 #define FIELD_INSN_INFO		(1 << 1)
730 
731 asm(
732 	"insn_hlt: hlt;ret\n\t"
733 	"insn_invlpg: invlpg 0x12345678;ret\n\t"
734 	"insn_mwait: mwait;ret\n\t"
735 	"insn_rdpmc: xor %ecx, %ecx; rdpmc;ret\n\t"
736 	"insn_rdtsc: rdtsc;ret\n\t"
737 	"insn_cr3_load: mov cr3,%rax; mov %rax,%cr3;ret\n\t"
738 	"insn_cr3_store: mov %cr3,%rax;ret\n\t"
739 #ifdef __x86_64__
740 	"insn_cr8_load: mov %rax,%cr8;ret\n\t"
741 	"insn_cr8_store: mov %cr8,%rax;ret\n\t"
742 #endif
743 	"insn_monitor: monitor;ret\n\t"
744 	"insn_pause: pause;ret\n\t"
745 	"insn_wbinvd: wbinvd;ret\n\t"
746 	"insn_cpuid: cpuid;ret\n\t"
747 	"insn_invd: invd;ret\n\t"
748 );
749 extern void insn_hlt();
750 extern void insn_invlpg();
751 extern void insn_mwait();
752 extern void insn_rdpmc();
753 extern void insn_rdtsc();
754 extern void insn_cr3_load();
755 extern void insn_cr3_store();
756 #ifdef __x86_64__
757 extern void insn_cr8_load();
758 extern void insn_cr8_store();
759 #endif
760 extern void insn_monitor();
761 extern void insn_pause();
762 extern void insn_wbinvd();
763 extern void insn_cpuid();
764 extern void insn_invd();
765 
766 u32 cur_insn;
767 u64 cr3;
768 
769 struct insn_table {
770 	const char *name;
771 	u32 flag;
772 	void (*insn_func)();
773 	u32 type;
774 	u32 reason;
775 	ulong exit_qual;
776 	u32 insn_info;
777 	// Use FIELD_EXIT_QUAL and FIELD_INSN_INFO to define
778 	// which field need to be tested, reason is always tested
779 	u32 test_field;
780 };
781 
782 /*
783  * Add more test cases of instruction intercept here. Elements in this
784  * table is:
785  *	name/control flag/insn function/type/exit reason/exit qulification/
786  *	instruction info/field to test
787  * The last field defines which fields (exit_qual and insn_info) need to be
788  * tested in exit handler. If set to 0, only "reason" is checked.
789  */
790 static struct insn_table insn_table[] = {
791 	// Flags for Primary Processor-Based VM-Execution Controls
792 	{"HLT",  CPU_HLT, insn_hlt, INSN_CPU0, 12, 0, 0, 0},
793 	{"INVLPG", CPU_INVLPG, insn_invlpg, INSN_CPU0, 14,
794 		0x12345678, 0, FIELD_EXIT_QUAL},
795 	{"MWAIT", CPU_MWAIT, insn_mwait, INSN_CPU0, 36, 0, 0, 0},
796 	{"RDPMC", CPU_RDPMC, insn_rdpmc, INSN_CPU0, 15, 0, 0, 0},
797 	{"RDTSC", CPU_RDTSC, insn_rdtsc, INSN_CPU0, 16, 0, 0, 0},
798 	{"CR3 load", CPU_CR3_LOAD, insn_cr3_load, INSN_CPU0, 28, 0x3, 0,
799 		FIELD_EXIT_QUAL},
800 	{"CR3 store", CPU_CR3_STORE, insn_cr3_store, INSN_CPU0, 28, 0x13, 0,
801 		FIELD_EXIT_QUAL},
802 #ifdef __x86_64__
803 	{"CR8 load", CPU_CR8_LOAD, insn_cr8_load, INSN_CPU0, 28, 0x8, 0,
804 		FIELD_EXIT_QUAL},
805 	{"CR8 store", CPU_CR8_STORE, insn_cr8_store, INSN_CPU0, 28, 0x18, 0,
806 		FIELD_EXIT_QUAL},
807 #endif
808 	{"MONITOR", CPU_MONITOR, insn_monitor, INSN_CPU0, 39, 0, 0, 0},
809 	{"PAUSE", CPU_PAUSE, insn_pause, INSN_CPU0, 40, 0, 0, 0},
810 	// Flags for Secondary Processor-Based VM-Execution Controls
811 	{"WBINVD", CPU_WBINVD, insn_wbinvd, INSN_CPU1, 54, 0, 0, 0},
812 	// Instructions always trap
813 	{"CPUID", 0, insn_cpuid, INSN_ALWAYS_TRAP, 10, 0, 0, 0},
814 	{"INVD", 0, insn_invd, INSN_ALWAYS_TRAP, 13, 0, 0, 0},
815 	// Instructions never trap
816 	{NULL},
817 };
818 
819 static int insn_intercept_init()
820 {
821 	u32 ctrl_cpu;
822 
823 	ctrl_cpu = ctrl_cpu_rev[0].set | CPU_SECONDARY;
824 	ctrl_cpu &= ctrl_cpu_rev[0].clr;
825 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu);
826 	vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu_rev[1].set);
827 	cr3 = read_cr3();
828 	return VMX_TEST_START;
829 }
830 
831 static void insn_intercept_main()
832 {
833 	for (cur_insn = 0; insn_table[cur_insn].name != NULL; cur_insn++) {
834 		vmx_set_test_stage(cur_insn * 2);
835 		if ((insn_table[cur_insn].type == INSN_CPU0 &&
836 		     !(ctrl_cpu_rev[0].clr & insn_table[cur_insn].flag)) ||
837 		    (insn_table[cur_insn].type == INSN_CPU1 &&
838 		     !(ctrl_cpu_rev[1].clr & insn_table[cur_insn].flag))) {
839 			printf("\tCPU_CTRL%d.CPU_%s is not supported.\n",
840 			       insn_table[cur_insn].type - INSN_CPU0,
841 			       insn_table[cur_insn].name);
842 			continue;
843 		}
844 
845 		if ((insn_table[cur_insn].type == INSN_CPU0 &&
846 		     !(ctrl_cpu_rev[0].set & insn_table[cur_insn].flag)) ||
847 		    (insn_table[cur_insn].type == INSN_CPU1 &&
848 		     !(ctrl_cpu_rev[1].set & insn_table[cur_insn].flag))) {
849 			/* skip hlt, it stalls the guest and is tested below */
850 			if (insn_table[cur_insn].insn_func != insn_hlt)
851 				insn_table[cur_insn].insn_func();
852 			report("execute %s", vmx_get_test_stage() == cur_insn * 2,
853 					insn_table[cur_insn].name);
854 		} else if (insn_table[cur_insn].type != INSN_ALWAYS_TRAP)
855 			printf("\tCPU_CTRL%d.CPU_%s always traps.\n",
856 			       insn_table[cur_insn].type - INSN_CPU0,
857 			       insn_table[cur_insn].name);
858 
859 		vmcall();
860 
861 		insn_table[cur_insn].insn_func();
862 		report("intercept %s", vmx_get_test_stage() == cur_insn * 2 + 1,
863 				insn_table[cur_insn].name);
864 
865 		vmx_set_test_stage(cur_insn * 2 + 1);
866 		vmcall();
867 	}
868 }
869 
870 static int insn_intercept_exit_handler()
871 {
872 	u64 guest_rip;
873 	u32 reason;
874 	ulong exit_qual;
875 	u32 insn_len;
876 	u32 insn_info;
877 	bool pass;
878 
879 	guest_rip = vmcs_read(GUEST_RIP);
880 	reason = vmcs_read(EXI_REASON) & 0xff;
881 	exit_qual = vmcs_read(EXI_QUALIFICATION);
882 	insn_len = vmcs_read(EXI_INST_LEN);
883 	insn_info = vmcs_read(EXI_INST_INFO);
884 
885 	if (reason == VMX_VMCALL) {
886 		u32 val = 0;
887 
888 		if (insn_table[cur_insn].type == INSN_CPU0)
889 			val = vmcs_read(CPU_EXEC_CTRL0);
890 		else if (insn_table[cur_insn].type == INSN_CPU1)
891 			val = vmcs_read(CPU_EXEC_CTRL1);
892 
893 		if (vmx_get_test_stage() & 1)
894 			val &= ~insn_table[cur_insn].flag;
895 		else
896 			val |= insn_table[cur_insn].flag;
897 
898 		if (insn_table[cur_insn].type == INSN_CPU0)
899 			vmcs_write(CPU_EXEC_CTRL0, val | ctrl_cpu_rev[0].set);
900 		else if (insn_table[cur_insn].type == INSN_CPU1)
901 			vmcs_write(CPU_EXEC_CTRL1, val | ctrl_cpu_rev[1].set);
902 	} else {
903 		pass = (cur_insn * 2 == vmx_get_test_stage()) &&
904 			insn_table[cur_insn].reason == reason;
905 		if (insn_table[cur_insn].test_field & FIELD_EXIT_QUAL &&
906 		    insn_table[cur_insn].exit_qual != exit_qual)
907 			pass = false;
908 		if (insn_table[cur_insn].test_field & FIELD_INSN_INFO &&
909 		    insn_table[cur_insn].insn_info != insn_info)
910 			pass = false;
911 		if (pass)
912 			vmx_inc_test_stage();
913 	}
914 	vmcs_write(GUEST_RIP, guest_rip + insn_len);
915 	return VMX_TEST_RESUME;
916 }
917 
918 
919 static int setup_ept()
920 {
921 	int support_2m;
922 	unsigned long end_of_memory;
923 
924 	if (!(ept_vpid.val & EPT_CAP_UC) &&
925 			!(ept_vpid.val & EPT_CAP_WB)) {
926 		printf("\tEPT paging-structure memory type "
927 				"UC&WB are not supported\n");
928 		return 1;
929 	}
930 	if (ept_vpid.val & EPT_CAP_UC)
931 		eptp = EPT_MEM_TYPE_UC;
932 	else
933 		eptp = EPT_MEM_TYPE_WB;
934 	if (!(ept_vpid.val & EPT_CAP_PWL4)) {
935 		printf("\tPWL4 is not supported\n");
936 		return 1;
937 	}
938 	eptp |= (3 << EPTP_PG_WALK_LEN_SHIFT);
939 	pml4 = alloc_page();
940 	memset(pml4, 0, PAGE_SIZE);
941 	eptp |= virt_to_phys(pml4);
942 	vmcs_write(EPTP, eptp);
943 	support_2m = !!(ept_vpid.val & EPT_CAP_2M_PAGE);
944 	end_of_memory = fwcfg_get_u64(FW_CFG_RAM_SIZE);
945 	if (end_of_memory < (1ul << 32))
946 		end_of_memory = (1ul << 32);
947 	setup_ept_range(pml4, 0, end_of_memory, 0, support_2m,
948 			EPT_WA | EPT_RA | EPT_EA);
949 	return 0;
950 }
951 
952 static int apic_version;
953 
954 static int ept_init()
955 {
956 	u32 ctrl_cpu[2];
957 
958 	if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY) ||
959 	    !(ctrl_cpu_rev[1].clr & CPU_EPT)) {
960 		printf("\tEPT is not supported");
961 		return VMX_TEST_EXIT;
962 	}
963 
964 	ctrl_cpu[0] = vmcs_read(CPU_EXEC_CTRL0);
965 	ctrl_cpu[1] = vmcs_read(CPU_EXEC_CTRL1);
966 	ctrl_cpu[0] = (ctrl_cpu[0] | CPU_SECONDARY)
967 		& ctrl_cpu_rev[0].clr;
968 	ctrl_cpu[1] = (ctrl_cpu[1] | CPU_EPT)
969 		& ctrl_cpu_rev[1].clr;
970 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu[0]);
971 	vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu[1]);
972 	if (setup_ept())
973 		return VMX_TEST_EXIT;
974 	data_page1 = alloc_page();
975 	data_page2 = alloc_page();
976 	memset(data_page1, 0x0, PAGE_SIZE);
977 	memset(data_page2, 0x0, PAGE_SIZE);
978 	*((u32 *)data_page1) = MAGIC_VAL_1;
979 	*((u32 *)data_page2) = MAGIC_VAL_2;
980 	install_ept(pml4, (unsigned long)data_page1, (unsigned long)data_page2,
981 			EPT_RA | EPT_WA | EPT_EA);
982 
983 	apic_version = *((u32 *)0xfee00030UL);
984 	return VMX_TEST_START;
985 }
986 
987 static void ept_main()
988 {
989 	vmx_set_test_stage(0);
990 	if (*((u32 *)data_page2) != MAGIC_VAL_1 ||
991 			*((u32 *)data_page1) != MAGIC_VAL_1)
992 		report("EPT basic framework - read", 0);
993 	else {
994 		*((u32 *)data_page2) = MAGIC_VAL_3;
995 		vmcall();
996 		if (vmx_get_test_stage() == 1) {
997 			if (*((u32 *)data_page1) == MAGIC_VAL_3 &&
998 					*((u32 *)data_page2) == MAGIC_VAL_2)
999 				report("EPT basic framework", 1);
1000 			else
1001 				report("EPT basic framework - remap", 1);
1002 		}
1003 	}
1004 	// Test EPT Misconfigurations
1005 	vmx_set_test_stage(1);
1006 	vmcall();
1007 	*((u32 *)data_page1) = MAGIC_VAL_1;
1008 	if (vmx_get_test_stage() != 2) {
1009 		report("EPT misconfigurations", 0);
1010 		goto t1;
1011 	}
1012 	vmx_set_test_stage(2);
1013 	vmcall();
1014 	*((u32 *)data_page1) = MAGIC_VAL_1;
1015 	report("EPT misconfigurations", vmx_get_test_stage() == 3);
1016 t1:
1017 	// Test EPT violation
1018 	vmx_set_test_stage(3);
1019 	vmcall();
1020 	*((u32 *)data_page1) = MAGIC_VAL_1;
1021 	report("EPT violation - page permission", vmx_get_test_stage() == 4);
1022 	// Violation caused by EPT paging structure
1023 	vmx_set_test_stage(4);
1024 	vmcall();
1025 	*((u32 *)data_page1) = MAGIC_VAL_2;
1026 	report("EPT violation - paging structure", vmx_get_test_stage() == 5);
1027 
1028 	// Test EPT access to L1 MMIO
1029 	vmx_set_test_stage(6);
1030 	report("EPT - MMIO access", *((u32 *)0xfee00030UL) == apic_version);
1031 
1032 	// Test invalid operand for INVEPT
1033 	vmcall();
1034 	report("EPT - unsupported INVEPT", vmx_get_test_stage() == 7);
1035 }
1036 
1037 bool invept_test(int type, u64 eptp)
1038 {
1039 	bool ret, supported;
1040 
1041 	supported = ept_vpid.val & (EPT_CAP_INVEPT_SINGLE >> INVEPT_SINGLE << type);
1042 	ret = invept(type, eptp);
1043 
1044 	if (ret == !supported)
1045 		return false;
1046 
1047 	if (!supported)
1048 		printf("WARNING: unsupported invept passed!\n");
1049 	else
1050 		printf("WARNING: invept failed!\n");
1051 
1052 	return true;
1053 }
1054 
1055 static int ept_exit_handler()
1056 {
1057 	u64 guest_rip;
1058 	ulong reason;
1059 	u32 insn_len;
1060 	u32 exit_qual;
1061 	static unsigned long data_page1_pte, data_page1_pte_pte;
1062 
1063 	guest_rip = vmcs_read(GUEST_RIP);
1064 	reason = vmcs_read(EXI_REASON) & 0xff;
1065 	insn_len = vmcs_read(EXI_INST_LEN);
1066 	exit_qual = vmcs_read(EXI_QUALIFICATION);
1067 	switch (reason) {
1068 	case VMX_VMCALL:
1069 		switch (vmx_get_test_stage()) {
1070 		case 0:
1071 			if (*((u32 *)data_page1) == MAGIC_VAL_3 &&
1072 					*((u32 *)data_page2) == MAGIC_VAL_2) {
1073 				vmx_inc_test_stage();
1074 				install_ept(pml4, (unsigned long)data_page2,
1075 						(unsigned long)data_page2,
1076 						EPT_RA | EPT_WA | EPT_EA);
1077 			} else
1078 				report("EPT basic framework - write\n", 0);
1079 			break;
1080 		case 1:
1081 			install_ept(pml4, (unsigned long)data_page1,
1082  				(unsigned long)data_page1, EPT_WA);
1083 			ept_sync(INVEPT_SINGLE, eptp);
1084 			break;
1085 		case 2:
1086 			install_ept(pml4, (unsigned long)data_page1,
1087  				(unsigned long)data_page1,
1088  				EPT_RA | EPT_WA | EPT_EA |
1089  				(2 << EPT_MEM_TYPE_SHIFT));
1090 			ept_sync(INVEPT_SINGLE, eptp);
1091 			break;
1092 		case 3:
1093 			data_page1_pte = get_ept_pte(pml4,
1094 				(unsigned long)data_page1, 1);
1095 			set_ept_pte(pml4, (unsigned long)data_page1,
1096 				1, data_page1_pte & (~EPT_PRESENT));
1097 			ept_sync(INVEPT_SINGLE, eptp);
1098 			break;
1099 		case 4:
1100 			data_page1_pte = get_ept_pte(pml4,
1101 				(unsigned long)data_page1, 2);
1102 			data_page1_pte &= PAGE_MASK;
1103 			data_page1_pte_pte = get_ept_pte(pml4, data_page1_pte, 2);
1104 			set_ept_pte(pml4, data_page1_pte, 2,
1105 				data_page1_pte_pte & (~EPT_PRESENT));
1106 			ept_sync(INVEPT_SINGLE, eptp);
1107 			break;
1108 		case 6:
1109 			if (!invept_test(0, eptp))
1110 				vmx_inc_test_stage();
1111 			break;
1112 		// Should not reach here
1113 		default:
1114 			printf("ERROR - unexpected stage, %d.\n",
1115 			       vmx_get_test_stage());
1116 			print_vmexit_info();
1117 			return VMX_TEST_VMEXIT;
1118 		}
1119 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
1120 		return VMX_TEST_RESUME;
1121 	case VMX_EPT_MISCONFIG:
1122 		switch (vmx_get_test_stage()) {
1123 		case 1:
1124 		case 2:
1125 			vmx_inc_test_stage();
1126 			install_ept(pml4, (unsigned long)data_page1,
1127  				(unsigned long)data_page1,
1128  				EPT_RA | EPT_WA | EPT_EA);
1129 			ept_sync(INVEPT_SINGLE, eptp);
1130 			break;
1131 		// Should not reach here
1132 		default:
1133 			printf("ERROR - unexpected stage, %d.\n",
1134 			       vmx_get_test_stage());
1135 			print_vmexit_info();
1136 			return VMX_TEST_VMEXIT;
1137 		}
1138 		return VMX_TEST_RESUME;
1139 	case VMX_EPT_VIOLATION:
1140 		switch(vmx_get_test_stage()) {
1141 		case 3:
1142 			if (exit_qual == (EPT_VLT_WR | EPT_VLT_LADDR_VLD |
1143 					EPT_VLT_PADDR))
1144 				vmx_inc_test_stage();
1145 			set_ept_pte(pml4, (unsigned long)data_page1,
1146 				1, data_page1_pte | (EPT_PRESENT));
1147 			ept_sync(INVEPT_SINGLE, eptp);
1148 			break;
1149 		case 4:
1150 			if (exit_qual == (EPT_VLT_RD | EPT_VLT_LADDR_VLD))
1151 				vmx_inc_test_stage();
1152 			set_ept_pte(pml4, data_page1_pte, 2,
1153 				data_page1_pte_pte | (EPT_PRESENT));
1154 			ept_sync(INVEPT_SINGLE, eptp);
1155 			break;
1156 		default:
1157 			// Should not reach here
1158 			printf("ERROR : unexpected stage, %d\n",
1159 			       vmx_get_test_stage());
1160 			print_vmexit_info();
1161 			return VMX_TEST_VMEXIT;
1162 		}
1163 		return VMX_TEST_RESUME;
1164 	default:
1165 		printf("Unknown exit reason, %ld\n", reason);
1166 		print_vmexit_info();
1167 	}
1168 	return VMX_TEST_VMEXIT;
1169 }
1170 
1171 bool invvpid_test(int type, u16 vpid)
1172 {
1173 	bool ret, supported;
1174 
1175 	supported = ept_vpid.val & (VPID_CAP_INVVPID_SINGLE >> INVVPID_SINGLE << type);
1176 	ret = invvpid(type, vpid, 0);
1177 
1178 	if (ret == !supported)
1179 		return false;
1180 
1181 	if (!supported)
1182 		printf("WARNING: unsupported invvpid passed!\n");
1183 	else
1184 		printf("WARNING: invvpid failed!\n");
1185 
1186 	return true;
1187 }
1188 
1189 static int vpid_init()
1190 {
1191 	u32 ctrl_cpu1;
1192 
1193 	if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY) ||
1194 		!(ctrl_cpu_rev[1].clr & CPU_VPID)) {
1195 		printf("\tVPID is not supported");
1196 		return VMX_TEST_EXIT;
1197 	}
1198 
1199 	ctrl_cpu1 = vmcs_read(CPU_EXEC_CTRL1);
1200 	ctrl_cpu1 |= CPU_VPID;
1201 	vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu1);
1202 	return VMX_TEST_START;
1203 }
1204 
1205 static void vpid_main()
1206 {
1207 	vmx_set_test_stage(0);
1208 	vmcall();
1209 	report("INVVPID SINGLE ADDRESS", vmx_get_test_stage() == 1);
1210 	vmx_set_test_stage(2);
1211 	vmcall();
1212 	report("INVVPID SINGLE", vmx_get_test_stage() == 3);
1213 	vmx_set_test_stage(4);
1214 	vmcall();
1215 	report("INVVPID ALL", vmx_get_test_stage() == 5);
1216 }
1217 
1218 static int vpid_exit_handler()
1219 {
1220 	u64 guest_rip;
1221 	ulong reason;
1222 	u32 insn_len;
1223 
1224 	guest_rip = vmcs_read(GUEST_RIP);
1225 	reason = vmcs_read(EXI_REASON) & 0xff;
1226 	insn_len = vmcs_read(EXI_INST_LEN);
1227 
1228 	switch (reason) {
1229 	case VMX_VMCALL:
1230 		switch(vmx_get_test_stage()) {
1231 		case 0:
1232 			if (!invvpid_test(INVVPID_SINGLE_ADDRESS, 1))
1233 				vmx_inc_test_stage();
1234 			break;
1235 		case 2:
1236 			if (!invvpid_test(INVVPID_SINGLE, 1))
1237 				vmx_inc_test_stage();
1238 			break;
1239 		case 4:
1240 			if (!invvpid_test(INVVPID_ALL, 1))
1241 				vmx_inc_test_stage();
1242 			break;
1243 		default:
1244 			printf("ERROR: unexpected stage, %d\n",
1245 					vmx_get_test_stage());
1246 			print_vmexit_info();
1247 			return VMX_TEST_VMEXIT;
1248 		}
1249 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
1250 		return VMX_TEST_RESUME;
1251 	default:
1252 		printf("Unknown exit reason, %ld\n", reason);
1253 		print_vmexit_info();
1254 	}
1255 	return VMX_TEST_VMEXIT;
1256 }
1257 
1258 #define TIMER_VECTOR	222
1259 
1260 static volatile bool timer_fired;
1261 
1262 static void timer_isr(isr_regs_t *regs)
1263 {
1264 	timer_fired = true;
1265 	apic_write(APIC_EOI, 0);
1266 }
1267 
1268 static int interrupt_init(struct vmcs *vmcs)
1269 {
1270 	msr_bmp_init();
1271 	vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) & ~PIN_EXTINT);
1272 	handle_irq(TIMER_VECTOR, timer_isr);
1273 	return VMX_TEST_START;
1274 }
1275 
1276 static void interrupt_main(void)
1277 {
1278 	long long start, loops;
1279 
1280 	vmx_set_test_stage(0);
1281 
1282 	apic_write(APIC_LVTT, TIMER_VECTOR);
1283 	irq_enable();
1284 
1285 	apic_write(APIC_TMICT, 1);
1286 	for (loops = 0; loops < 10000000 && !timer_fired; loops++)
1287 		asm volatile ("nop");
1288 	report("direct interrupt while running guest", timer_fired);
1289 
1290 	apic_write(APIC_TMICT, 0);
1291 	irq_disable();
1292 	vmcall();
1293 	timer_fired = false;
1294 	apic_write(APIC_TMICT, 1);
1295 	for (loops = 0; loops < 10000000 && !timer_fired; loops++)
1296 		asm volatile ("nop");
1297 	report("intercepted interrupt while running guest", timer_fired);
1298 
1299 	irq_enable();
1300 	apic_write(APIC_TMICT, 0);
1301 	irq_disable();
1302 	vmcall();
1303 	timer_fired = false;
1304 	start = rdtsc();
1305 	apic_write(APIC_TMICT, 1000000);
1306 
1307 	asm volatile ("sti; hlt");
1308 
1309 	report("direct interrupt + hlt",
1310 	       rdtsc() - start > 1000000 && timer_fired);
1311 
1312 	apic_write(APIC_TMICT, 0);
1313 	irq_disable();
1314 	vmcall();
1315 	timer_fired = false;
1316 	start = rdtsc();
1317 	apic_write(APIC_TMICT, 1000000);
1318 
1319 	asm volatile ("sti; hlt");
1320 
1321 	report("intercepted interrupt + hlt",
1322 	       rdtsc() - start > 10000 && timer_fired);
1323 
1324 	apic_write(APIC_TMICT, 0);
1325 	irq_disable();
1326 	vmcall();
1327 	timer_fired = false;
1328 	start = rdtsc();
1329 	apic_write(APIC_TMICT, 1000000);
1330 
1331 	irq_enable();
1332 	asm volatile ("nop");
1333 	vmcall();
1334 
1335 	report("direct interrupt + activity state hlt",
1336 	       rdtsc() - start > 10000 && timer_fired);
1337 
1338 	apic_write(APIC_TMICT, 0);
1339 	irq_disable();
1340 	vmcall();
1341 	timer_fired = false;
1342 	start = rdtsc();
1343 	apic_write(APIC_TMICT, 1000000);
1344 
1345 	irq_enable();
1346 	asm volatile ("nop");
1347 	vmcall();
1348 
1349 	report("intercepted interrupt + activity state hlt",
1350 	       rdtsc() - start > 10000 && timer_fired);
1351 
1352 	apic_write(APIC_TMICT, 0);
1353 	irq_disable();
1354 	vmx_set_test_stage(7);
1355 	vmcall();
1356 	timer_fired = false;
1357 	apic_write(APIC_TMICT, 1);
1358 	for (loops = 0; loops < 10000000 && !timer_fired; loops++)
1359 		asm volatile ("nop");
1360 	report("running a guest with interrupt acknowledgement set", timer_fired);
1361 }
1362 
1363 static int interrupt_exit_handler(void)
1364 {
1365 	u64 guest_rip = vmcs_read(GUEST_RIP);
1366 	ulong reason = vmcs_read(EXI_REASON) & 0xff;
1367 	u32 insn_len = vmcs_read(EXI_INST_LEN);
1368 
1369 	switch (reason) {
1370 	case VMX_VMCALL:
1371 		switch (vmx_get_test_stage()) {
1372 		case 0:
1373 		case 2:
1374 		case 5:
1375 			vmcs_write(PIN_CONTROLS,
1376 				   vmcs_read(PIN_CONTROLS) | PIN_EXTINT);
1377 			break;
1378 		case 7:
1379 			vmcs_write(EXI_CONTROLS, vmcs_read(EXI_CONTROLS) | EXI_INTA);
1380 			vmcs_write(PIN_CONTROLS,
1381 				   vmcs_read(PIN_CONTROLS) | PIN_EXTINT);
1382 			break;
1383 		case 1:
1384 		case 3:
1385 			vmcs_write(PIN_CONTROLS,
1386 				   vmcs_read(PIN_CONTROLS) & ~PIN_EXTINT);
1387 			break;
1388 		case 4:
1389 		case 6:
1390 			vmcs_write(GUEST_ACTV_STATE, ACTV_HLT);
1391 			break;
1392 		}
1393 		vmx_inc_test_stage();
1394 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
1395 		return VMX_TEST_RESUME;
1396 	case VMX_EXTINT:
1397 		if (vmcs_read(EXI_CONTROLS) & EXI_INTA) {
1398 			int vector = vmcs_read(EXI_INTR_INFO) & 0xff;
1399 			handle_external_interrupt(vector);
1400 		} else {
1401 			irq_enable();
1402 			asm volatile ("nop");
1403 			irq_disable();
1404 		}
1405 		if (vmx_get_test_stage() >= 2)
1406 			vmcs_write(GUEST_ACTV_STATE, ACTV_ACTIVE);
1407 		return VMX_TEST_RESUME;
1408 	default:
1409 		printf("Unknown exit reason, %ld\n", reason);
1410 		print_vmexit_info();
1411 	}
1412 
1413 	return VMX_TEST_VMEXIT;
1414 }
1415 
1416 static int dbgctls_init(struct vmcs *vmcs)
1417 {
1418 	u64 dr7 = 0x402;
1419 	u64 zero = 0;
1420 
1421 	msr_bmp_init();
1422 	asm volatile(
1423 		"mov %0,%%dr0\n\t"
1424 		"mov %0,%%dr1\n\t"
1425 		"mov %0,%%dr2\n\t"
1426 		"mov %1,%%dr7\n\t"
1427 		: : "r" (zero), "r" (dr7));
1428 	wrmsr(MSR_IA32_DEBUGCTLMSR, 0x1);
1429 	vmcs_write(GUEST_DR7, 0x404);
1430 	vmcs_write(GUEST_DEBUGCTL, 0x2);
1431 
1432 	vmcs_write(ENT_CONTROLS, vmcs_read(ENT_CONTROLS) | ENT_LOAD_DBGCTLS);
1433 	vmcs_write(EXI_CONTROLS, vmcs_read(EXI_CONTROLS) | EXI_SAVE_DBGCTLS);
1434 
1435 	return VMX_TEST_START;
1436 }
1437 
1438 static void dbgctls_main(void)
1439 {
1440 	u64 dr7, debugctl;
1441 
1442 	asm volatile("mov %%dr7,%0" : "=r" (dr7));
1443 	debugctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
1444 	/* Commented out: KVM does not support DEBUGCTL so far */
1445 	(void)debugctl;
1446 	report("Load debug controls", dr7 == 0x404 /* && debugctl == 0x2 */);
1447 
1448 	dr7 = 0x408;
1449 	asm volatile("mov %0,%%dr7" : : "r" (dr7));
1450 	wrmsr(MSR_IA32_DEBUGCTLMSR, 0x3);
1451 
1452 	vmx_set_test_stage(0);
1453 	vmcall();
1454 	report("Save debug controls", vmx_get_test_stage() == 1);
1455 
1456 	if (ctrl_enter_rev.set & ENT_LOAD_DBGCTLS ||
1457 	    ctrl_exit_rev.set & EXI_SAVE_DBGCTLS) {
1458 		printf("\tDebug controls are always loaded/saved\n");
1459 		return;
1460 	}
1461 	vmx_set_test_stage(2);
1462 	vmcall();
1463 
1464 	asm volatile("mov %%dr7,%0" : "=r" (dr7));
1465 	debugctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
1466 	/* Commented out: KVM does not support DEBUGCTL so far */
1467 	(void)debugctl;
1468 	report("Guest=host debug controls", dr7 == 0x402 /* && debugctl == 0x1 */);
1469 
1470 	dr7 = 0x408;
1471 	asm volatile("mov %0,%%dr7" : : "r" (dr7));
1472 	wrmsr(MSR_IA32_DEBUGCTLMSR, 0x3);
1473 
1474 	vmx_set_test_stage(3);
1475 	vmcall();
1476 	report("Don't save debug controls", vmx_get_test_stage() == 4);
1477 }
1478 
1479 static int dbgctls_exit_handler(void)
1480 {
1481 	unsigned int reason = vmcs_read(EXI_REASON) & 0xff;
1482 	u32 insn_len = vmcs_read(EXI_INST_LEN);
1483 	u64 guest_rip = vmcs_read(GUEST_RIP);
1484 	u64 dr7, debugctl;
1485 
1486 	asm volatile("mov %%dr7,%0" : "=r" (dr7));
1487 	debugctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
1488 
1489 	switch (reason) {
1490 	case VMX_VMCALL:
1491 		switch (vmx_get_test_stage()) {
1492 		case 0:
1493 			if (dr7 == 0x400 && debugctl == 0 &&
1494 			    vmcs_read(GUEST_DR7) == 0x408 /* &&
1495 			    Commented out: KVM does not support DEBUGCTL so far
1496 			    vmcs_read(GUEST_DEBUGCTL) == 0x3 */)
1497 				vmx_inc_test_stage();
1498 			break;
1499 		case 2:
1500 			dr7 = 0x402;
1501 			asm volatile("mov %0,%%dr7" : : "r" (dr7));
1502 			wrmsr(MSR_IA32_DEBUGCTLMSR, 0x1);
1503 			vmcs_write(GUEST_DR7, 0x404);
1504 			vmcs_write(GUEST_DEBUGCTL, 0x2);
1505 
1506 			vmcs_write(ENT_CONTROLS,
1507 				vmcs_read(ENT_CONTROLS) & ~ENT_LOAD_DBGCTLS);
1508 			vmcs_write(EXI_CONTROLS,
1509 				vmcs_read(EXI_CONTROLS) & ~EXI_SAVE_DBGCTLS);
1510 			break;
1511 		case 3:
1512 			if (dr7 == 0x400 && debugctl == 0 &&
1513 			    vmcs_read(GUEST_DR7) == 0x404 /* &&
1514 			    Commented out: KVM does not support DEBUGCTL so far
1515 			    vmcs_read(GUEST_DEBUGCTL) == 0x2 */)
1516 				vmx_inc_test_stage();
1517 			break;
1518 		}
1519 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
1520 		return VMX_TEST_RESUME;
1521 	default:
1522 		printf("Unknown exit reason, %d\n", reason);
1523 		print_vmexit_info();
1524 	}
1525 	return VMX_TEST_VMEXIT;
1526 }
1527 
1528 struct vmx_msr_entry {
1529 	u32 index;
1530 	u32 reserved;
1531 	u64 value;
1532 } __attribute__((packed));
1533 
1534 #define MSR_MAGIC 0x31415926
1535 struct vmx_msr_entry *exit_msr_store, *entry_msr_load, *exit_msr_load;
1536 
1537 static int msr_switch_init(struct vmcs *vmcs)
1538 {
1539 	msr_bmp_init();
1540 	exit_msr_store = alloc_page();
1541 	exit_msr_load = alloc_page();
1542 	entry_msr_load = alloc_page();
1543 	memset(exit_msr_store, 0, PAGE_SIZE);
1544 	memset(exit_msr_load, 0, PAGE_SIZE);
1545 	memset(entry_msr_load, 0, PAGE_SIZE);
1546 	entry_msr_load[0].index = MSR_KERNEL_GS_BASE;
1547 	entry_msr_load[0].value = MSR_MAGIC;
1548 
1549 	vmx_set_test_stage(1);
1550 	vmcs_write(ENT_MSR_LD_CNT, 1);
1551 	vmcs_write(ENTER_MSR_LD_ADDR, (u64)entry_msr_load);
1552 	vmcs_write(EXI_MSR_ST_CNT, 1);
1553 	vmcs_write(EXIT_MSR_ST_ADDR, (u64)exit_msr_store);
1554 	vmcs_write(EXI_MSR_LD_CNT, 1);
1555 	vmcs_write(EXIT_MSR_LD_ADDR, (u64)exit_msr_load);
1556 	return VMX_TEST_START;
1557 }
1558 
1559 static void msr_switch_main()
1560 {
1561 	if (vmx_get_test_stage() == 1) {
1562 		report("VM entry MSR load",
1563 			rdmsr(MSR_KERNEL_GS_BASE) == MSR_MAGIC);
1564 		vmx_set_test_stage(2);
1565 		wrmsr(MSR_KERNEL_GS_BASE, MSR_MAGIC + 1);
1566 		exit_msr_store[0].index = MSR_KERNEL_GS_BASE;
1567 		exit_msr_load[0].index = MSR_KERNEL_GS_BASE;
1568 		exit_msr_load[0].value = MSR_MAGIC + 2;
1569 	}
1570 	vmcall();
1571 }
1572 
1573 static int msr_switch_exit_handler()
1574 {
1575 	ulong reason;
1576 
1577 	reason = vmcs_read(EXI_REASON);
1578 	if (reason == VMX_VMCALL && vmx_get_test_stage() == 2) {
1579 		report("VM exit MSR store",
1580 			exit_msr_store[0].value == MSR_MAGIC + 1);
1581 		report("VM exit MSR load",
1582 			rdmsr(MSR_KERNEL_GS_BASE) == MSR_MAGIC + 2);
1583 		vmx_set_test_stage(3);
1584 		entry_msr_load[0].index = MSR_FS_BASE;
1585 		return VMX_TEST_RESUME;
1586 	}
1587 	printf("ERROR %s: unexpected stage=%u or reason=%lu\n",
1588 		__func__, vmx_get_test_stage(), reason);
1589 	return VMX_TEST_EXIT;
1590 }
1591 
1592 static int msr_switch_entry_failure(struct vmentry_failure *failure)
1593 {
1594 	ulong reason;
1595 
1596 	if (failure->early) {
1597 		printf("ERROR %s: early exit\n", __func__);
1598 		return VMX_TEST_EXIT;
1599 	}
1600 
1601 	reason = vmcs_read(EXI_REASON);
1602 	if (reason == (VMX_ENTRY_FAILURE | VMX_FAIL_MSR) &&
1603 	    vmx_get_test_stage() == 3) {
1604 		report("VM entry MSR load: try to load FS_BASE",
1605 			vmcs_read(EXI_QUALIFICATION) == 1);
1606 		return VMX_TEST_VMEXIT;
1607 	}
1608 	printf("ERROR %s: unexpected stage=%u or reason=%lu\n",
1609 		__func__, vmx_get_test_stage(), reason);
1610 	return VMX_TEST_EXIT;
1611 }
1612 
1613 static int vmmcall_init(struct vmcs *vmcs	)
1614 {
1615 	vmcs_write(EXC_BITMAP, 1 << UD_VECTOR);
1616 	return VMX_TEST_START;
1617 }
1618 
1619 static void vmmcall_main(void)
1620 {
1621 	asm volatile(
1622 		"mov $0xABCD, %%rax\n\t"
1623 		"vmmcall\n\t"
1624 		::: "rax");
1625 
1626 	report("VMMCALL", 0);
1627 }
1628 
1629 static int vmmcall_exit_handler()
1630 {
1631 	ulong reason;
1632 
1633 	reason = vmcs_read(EXI_REASON);
1634 	switch (reason) {
1635 	case VMX_VMCALL:
1636 		printf("here\n");
1637 		report("VMMCALL triggers #UD", 0);
1638 		break;
1639 	case VMX_EXC_NMI:
1640 		report("VMMCALL triggers #UD",
1641 		       (vmcs_read(EXI_INTR_INFO) & 0xff) == UD_VECTOR);
1642 		break;
1643 	default:
1644 		printf("Unknown exit reason, %ld\n", reason);
1645 		print_vmexit_info();
1646 	}
1647 
1648 	return VMX_TEST_VMEXIT;
1649 }
1650 
1651 /* name/init/guest_main/exit_handler/syscall_handler/guest_regs */
1652 struct vmx_test vmx_tests[] = {
1653 	{ "null", NULL, basic_guest_main, basic_exit_handler, NULL, {0} },
1654 	{ "vmenter", NULL, vmenter_main, vmenter_exit_handler, NULL, {0} },
1655 	{ "preemption timer", preemption_timer_init, preemption_timer_main,
1656 		preemption_timer_exit_handler, NULL, {0} },
1657 	{ "control field PAT", test_ctrl_pat_init, test_ctrl_pat_main,
1658 		test_ctrl_pat_exit_handler, NULL, {0} },
1659 	{ "control field EFER", test_ctrl_efer_init, test_ctrl_efer_main,
1660 		test_ctrl_efer_exit_handler, NULL, {0} },
1661 	{ "CR shadowing", NULL, cr_shadowing_main,
1662 		cr_shadowing_exit_handler, NULL, {0} },
1663 	{ "I/O bitmap", iobmp_init, iobmp_main, iobmp_exit_handler,
1664 		NULL, {0} },
1665 	{ "instruction intercept", insn_intercept_init, insn_intercept_main,
1666 		insn_intercept_exit_handler, NULL, {0} },
1667 	{ "EPT framework", ept_init, ept_main, ept_exit_handler, NULL, {0} },
1668 	{ "VPID", vpid_init, vpid_main, vpid_exit_handler, NULL, {0} },
1669 	{ "interrupt", interrupt_init, interrupt_main,
1670 		interrupt_exit_handler, NULL, {0} },
1671 	{ "debug controls", dbgctls_init, dbgctls_main, dbgctls_exit_handler,
1672 		NULL, {0} },
1673 	{ "MSR switch", msr_switch_init, msr_switch_main,
1674 		msr_switch_exit_handler, NULL, {0}, msr_switch_entry_failure },
1675 	{ "vmmcall", vmmcall_init, vmmcall_main, vmmcall_exit_handler, NULL, {0} },
1676 	{ NULL, NULL, NULL, NULL, NULL, {0} },
1677 };
1678