xref: /kvm-unit-tests/x86/vmx_tests.c (revision 7ada359d6cc08cd1dbe8a7a279e6c50a5b3583a6)
1 /*
2  * All test cases of nested virtualization should be in this file
3  *
4  * Author : Arthur Chunqi Li <yzt356@gmail.com>
5  */
6 #include "vmx.h"
7 #include "msr.h"
8 #include "processor.h"
9 #include "vm.h"
10 #include "io.h"
11 #include "fwcfg.h"
12 
13 u64 ia32_pat;
14 u64 ia32_efer;
15 volatile u32 stage;
16 void *io_bitmap_a, *io_bitmap_b;
17 u16 ioport;
18 
19 bool init_fail;
20 unsigned long *pml4;
21 u64 eptp;
22 void *data_page1, *data_page2;
23 
24 static inline void vmcall()
25 {
26 	asm volatile("vmcall");
27 }
28 
29 static inline void set_stage(u32 s)
30 {
31 	barrier();
32 	stage = s;
33 	barrier();
34 }
35 
36 static inline u32 get_stage()
37 {
38 	u32 s;
39 
40 	barrier();
41 	s = stage;
42 	barrier();
43 	return s;
44 }
45 
46 void basic_init()
47 {
48 }
49 
50 void basic_guest_main()
51 {
52 	/* Here is a basic guest_main, print Hello World */
53 	printf("\tHello World, this is null_guest_main!\n");
54 }
55 
56 int basic_exit_handler()
57 {
58 	u64 guest_rip;
59 	ulong reason;
60 
61 	guest_rip = vmcs_read(GUEST_RIP);
62 	reason = vmcs_read(EXI_REASON) & 0xff;
63 
64 	switch (reason) {
65 	case VMX_VMCALL:
66 		print_vmexit_info();
67 		vmcs_write(GUEST_RIP, guest_rip + 3);
68 		return VMX_TEST_RESUME;
69 	default:
70 		break;
71 	}
72 	printf("ERROR : Unhandled vmx exit.\n");
73 	print_vmexit_info();
74 	return VMX_TEST_EXIT;
75 }
76 
77 void basic_syscall_handler(u64 syscall_no)
78 {
79 }
80 
81 void vmenter_main()
82 {
83 	u64 rax;
84 	u64 rsp, resume_rsp;
85 
86 	report("test vmlaunch", 1);
87 
88 	asm volatile(
89 		"mov %%rsp, %0\n\t"
90 		"mov %3, %%rax\n\t"
91 		"vmcall\n\t"
92 		"mov %%rax, %1\n\t"
93 		"mov %%rsp, %2\n\t"
94 		: "=r"(rsp), "=r"(rax), "=r"(resume_rsp)
95 		: "g"(0xABCD));
96 	report("test vmresume", (rax == 0xFFFF) && (rsp == resume_rsp));
97 }
98 
99 int vmenter_exit_handler()
100 {
101 	u64 guest_rip;
102 	ulong reason;
103 
104 	guest_rip = vmcs_read(GUEST_RIP);
105 	reason = vmcs_read(EXI_REASON) & 0xff;
106 	switch (reason) {
107 	case VMX_VMCALL:
108 		if (regs.rax != 0xABCD) {
109 			report("test vmresume", 0);
110 			return VMX_TEST_VMEXIT;
111 		}
112 		regs.rax = 0xFFFF;
113 		vmcs_write(GUEST_RIP, guest_rip + 3);
114 		return VMX_TEST_RESUME;
115 	default:
116 		report("test vmresume", 0);
117 		print_vmexit_info();
118 	}
119 	return VMX_TEST_VMEXIT;
120 }
121 
122 u32 preempt_scale;
123 volatile unsigned long long tsc_val;
124 volatile u32 preempt_val;
125 
126 void preemption_timer_init()
127 {
128 	u32 ctrl_pin;
129 
130 	ctrl_pin = vmcs_read(PIN_CONTROLS) | PIN_PREEMPT;
131 	ctrl_pin &= ctrl_pin_rev.clr;
132 	vmcs_write(PIN_CONTROLS, ctrl_pin);
133 	preempt_val = 10000000;
134 	vmcs_write(PREEMPT_TIMER_VALUE, preempt_val);
135 	preempt_scale = rdmsr(MSR_IA32_VMX_MISC) & 0x1F;
136 }
137 
138 void preemption_timer_main()
139 {
140 	tsc_val = rdtsc();
141 	if (!(ctrl_pin_rev.clr & PIN_PREEMPT)) {
142 		printf("\tPreemption timer is not supported\n");
143 		return;
144 	}
145 	if (!(ctrl_exit_rev.clr & EXI_SAVE_PREEMPT))
146 		printf("\tSave preemption value is not supported\n");
147 	else {
148 		set_stage(0);
149 		vmcall();
150 		if (get_stage() == 1)
151 			vmcall();
152 	}
153 	while (1) {
154 		if (((rdtsc() - tsc_val) >> preempt_scale)
155 				> 10 * preempt_val) {
156 			report("Preemption timer", 0);
157 			break;
158 		}
159 	}
160 }
161 
162 int preemption_timer_exit_handler()
163 {
164 	u64 guest_rip;
165 	ulong reason;
166 	u32 insn_len;
167 	u32 ctrl_exit;
168 
169 	guest_rip = vmcs_read(GUEST_RIP);
170 	reason = vmcs_read(EXI_REASON) & 0xff;
171 	insn_len = vmcs_read(EXI_INST_LEN);
172 	switch (reason) {
173 	case VMX_PREEMPT:
174 		if (((rdtsc() - tsc_val) >> preempt_scale) < preempt_val)
175 			report("Preemption timer", 0);
176 		else
177 			report("Preemption timer", 1);
178 		return VMX_TEST_VMEXIT;
179 	case VMX_VMCALL:
180 		switch (get_stage()) {
181 		case 0:
182 			if (vmcs_read(PREEMPT_TIMER_VALUE) != preempt_val)
183 				report("Save preemption value", 0);
184 			else {
185 				set_stage(get_stage() + 1);
186 				ctrl_exit = (vmcs_read(EXI_CONTROLS) |
187 					EXI_SAVE_PREEMPT) & ctrl_exit_rev.clr;
188 				vmcs_write(EXI_CONTROLS, ctrl_exit);
189 			}
190 			break;
191 		case 1:
192 			if (vmcs_read(PREEMPT_TIMER_VALUE) >= preempt_val)
193 				report("Save preemption value", 0);
194 			else
195 				report("Save preemption value", 1);
196 			break;
197 		default:
198 			printf("Invalid stage.\n");
199 			print_vmexit_info();
200 			return VMX_TEST_VMEXIT;
201 		}
202 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
203 		return VMX_TEST_RESUME;
204 	default:
205 		printf("Unknown exit reason, %d\n", reason);
206 		print_vmexit_info();
207 	}
208 	return VMX_TEST_VMEXIT;
209 }
210 
211 void msr_bmp_init()
212 {
213 	void *msr_bitmap;
214 	u32 ctrl_cpu0;
215 
216 	msr_bitmap = alloc_page();
217 	memset(msr_bitmap, 0x0, PAGE_SIZE);
218 	ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
219 	ctrl_cpu0 |= CPU_MSR_BITMAP;
220 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
221 	vmcs_write(MSR_BITMAP, (u64)msr_bitmap);
222 }
223 
224 static void test_ctrl_pat_init()
225 {
226 	u64 ctrl_ent;
227 	u64 ctrl_exi;
228 
229 	msr_bmp_init();
230 	ctrl_ent = vmcs_read(ENT_CONTROLS);
231 	ctrl_exi = vmcs_read(EXI_CONTROLS);
232 	vmcs_write(ENT_CONTROLS, ctrl_ent | ENT_LOAD_PAT);
233 	vmcs_write(EXI_CONTROLS, ctrl_exi | (EXI_SAVE_PAT | EXI_LOAD_PAT));
234 	ia32_pat = rdmsr(MSR_IA32_CR_PAT);
235 	vmcs_write(GUEST_PAT, 0x0);
236 	vmcs_write(HOST_PAT, ia32_pat);
237 }
238 
239 static void test_ctrl_pat_main()
240 {
241 	u64 guest_ia32_pat;
242 
243 	guest_ia32_pat = rdmsr(MSR_IA32_CR_PAT);
244 	if (!(ctrl_enter_rev.clr & ENT_LOAD_PAT))
245 		printf("\tENT_LOAD_PAT is not supported.\n");
246 	else {
247 		if (guest_ia32_pat != 0) {
248 			report("Entry load PAT", 0);
249 			return;
250 		}
251 	}
252 	wrmsr(MSR_IA32_CR_PAT, 0x6);
253 	vmcall();
254 	guest_ia32_pat = rdmsr(MSR_IA32_CR_PAT);
255 	if (ctrl_enter_rev.clr & ENT_LOAD_PAT) {
256 		if (guest_ia32_pat != ia32_pat) {
257 			report("Entry load PAT", 0);
258 			return;
259 		}
260 		report("Entry load PAT", 1);
261 	}
262 }
263 
264 static int test_ctrl_pat_exit_handler()
265 {
266 	u64 guest_rip;
267 	ulong reason;
268 	u64 guest_pat;
269 
270 	guest_rip = vmcs_read(GUEST_RIP);
271 	reason = vmcs_read(EXI_REASON) & 0xff;
272 	switch (reason) {
273 	case VMX_VMCALL:
274 		guest_pat = vmcs_read(GUEST_PAT);
275 		if (!(ctrl_exit_rev.clr & EXI_SAVE_PAT)) {
276 			printf("\tEXI_SAVE_PAT is not supported\n");
277 			vmcs_write(GUEST_PAT, 0x6);
278 		} else {
279 			if (guest_pat == 0x6)
280 				report("Exit save PAT", 1);
281 			else
282 				report("Exit save PAT", 0);
283 		}
284 		if (!(ctrl_exit_rev.clr & EXI_LOAD_PAT))
285 			printf("\tEXI_LOAD_PAT is not supported\n");
286 		else {
287 			if (rdmsr(MSR_IA32_CR_PAT) == ia32_pat)
288 				report("Exit load PAT", 1);
289 			else
290 				report("Exit load PAT", 0);
291 		}
292 		vmcs_write(GUEST_PAT, ia32_pat);
293 		vmcs_write(GUEST_RIP, guest_rip + 3);
294 		return VMX_TEST_RESUME;
295 	default:
296 		printf("ERROR : Undefined exit reason, reason = %d.\n", reason);
297 		break;
298 	}
299 	return VMX_TEST_VMEXIT;
300 }
301 
302 static void test_ctrl_efer_init()
303 {
304 	u64 ctrl_ent;
305 	u64 ctrl_exi;
306 
307 	msr_bmp_init();
308 	ctrl_ent = vmcs_read(ENT_CONTROLS) | ENT_LOAD_EFER;
309 	ctrl_exi = vmcs_read(EXI_CONTROLS) | EXI_SAVE_EFER | EXI_LOAD_EFER;
310 	vmcs_write(ENT_CONTROLS, ctrl_ent & ctrl_enter_rev.clr);
311 	vmcs_write(EXI_CONTROLS, ctrl_exi & ctrl_exit_rev.clr);
312 	ia32_efer = rdmsr(MSR_EFER);
313 	vmcs_write(GUEST_EFER, ia32_efer ^ EFER_NX);
314 	vmcs_write(HOST_EFER, ia32_efer ^ EFER_NX);
315 }
316 
317 static void test_ctrl_efer_main()
318 {
319 	u64 guest_ia32_efer;
320 
321 	guest_ia32_efer = rdmsr(MSR_EFER);
322 	if (!(ctrl_enter_rev.clr & ENT_LOAD_EFER))
323 		printf("\tENT_LOAD_EFER is not supported.\n");
324 	else {
325 		if (guest_ia32_efer != (ia32_efer ^ EFER_NX)) {
326 			report("Entry load EFER", 0);
327 			return;
328 		}
329 	}
330 	wrmsr(MSR_EFER, ia32_efer);
331 	vmcall();
332 	guest_ia32_efer = rdmsr(MSR_EFER);
333 	if (ctrl_enter_rev.clr & ENT_LOAD_EFER) {
334 		if (guest_ia32_efer != ia32_efer) {
335 			report("Entry load EFER", 0);
336 			return;
337 		}
338 		report("Entry load EFER", 1);
339 	}
340 }
341 
342 static int test_ctrl_efer_exit_handler()
343 {
344 	u64 guest_rip;
345 	ulong reason;
346 	u64 guest_efer;
347 
348 	guest_rip = vmcs_read(GUEST_RIP);
349 	reason = vmcs_read(EXI_REASON) & 0xff;
350 	switch (reason) {
351 	case VMX_VMCALL:
352 		guest_efer = vmcs_read(GUEST_EFER);
353 		if (!(ctrl_exit_rev.clr & EXI_SAVE_EFER)) {
354 			printf("\tEXI_SAVE_EFER is not supported\n");
355 			vmcs_write(GUEST_EFER, ia32_efer);
356 		} else {
357 			if (guest_efer == ia32_efer)
358 				report("Exit save EFER", 1);
359 			else
360 				report("Exit save EFER", 0);
361 		}
362 		if (!(ctrl_exit_rev.clr & EXI_LOAD_EFER)) {
363 			printf("\tEXI_LOAD_EFER is not supported\n");
364 			wrmsr(MSR_EFER, ia32_efer ^ EFER_NX);
365 		} else {
366 			if (rdmsr(MSR_EFER) == (ia32_efer ^ EFER_NX))
367 				report("Exit load EFER", 1);
368 			else
369 				report("Exit load EFER", 0);
370 		}
371 		vmcs_write(GUEST_PAT, ia32_efer);
372 		vmcs_write(GUEST_RIP, guest_rip + 3);
373 		return VMX_TEST_RESUME;
374 	default:
375 		printf("ERROR : Undefined exit reason, reason = %d.\n", reason);
376 		break;
377 	}
378 	return VMX_TEST_VMEXIT;
379 }
380 
381 u32 guest_cr0, guest_cr4;
382 
383 static void cr_shadowing_main()
384 {
385 	u32 cr0, cr4, tmp;
386 
387 	// Test read through
388 	set_stage(0);
389 	guest_cr0 = read_cr0();
390 	if (stage == 1)
391 		report("Read through CR0", 0);
392 	else
393 		vmcall();
394 	set_stage(1);
395 	guest_cr4 = read_cr4();
396 	if (stage == 2)
397 		report("Read through CR4", 0);
398 	else
399 		vmcall();
400 	// Test write through
401 	guest_cr0 = guest_cr0 ^ (X86_CR0_TS | X86_CR0_MP);
402 	guest_cr4 = guest_cr4 ^ (X86_CR4_TSD | X86_CR4_DE);
403 	set_stage(2);
404 	write_cr0(guest_cr0);
405 	if (stage == 3)
406 		report("Write throuth CR0", 0);
407 	else
408 		vmcall();
409 	set_stage(3);
410 	write_cr4(guest_cr4);
411 	if (stage == 4)
412 		report("Write through CR4", 0);
413 	else
414 		vmcall();
415 	// Test read shadow
416 	set_stage(4);
417 	vmcall();
418 	cr0 = read_cr0();
419 	if (stage != 5) {
420 		if (cr0 == guest_cr0)
421 			report("Read shadowing CR0", 1);
422 		else
423 			report("Read shadowing CR0", 0);
424 	}
425 	set_stage(5);
426 	cr4 = read_cr4();
427 	if (stage != 6) {
428 		if (cr4 == guest_cr4)
429 			report("Read shadowing CR4", 1);
430 		else
431 			report("Read shadowing CR4", 0);
432 	}
433 	// Test write shadow (same value with shadow)
434 	set_stage(6);
435 	write_cr0(guest_cr0);
436 	if (stage == 7)
437 		report("Write shadowing CR0 (same value with shadow)", 0);
438 	else
439 		vmcall();
440 	set_stage(7);
441 	write_cr4(guest_cr4);
442 	if (stage == 8)
443 		report("Write shadowing CR4 (same value with shadow)", 0);
444 	else
445 		vmcall();
446 	// Test write shadow (different value)
447 	set_stage(8);
448 	tmp = guest_cr0 ^ X86_CR0_TS;
449 	asm volatile("mov %0, %%rsi\n\t"
450 		"mov %%rsi, %%cr0\n\t"
451 		::"m"(tmp)
452 		:"rsi", "memory", "cc");
453 	if (stage != 9)
454 		report("Write shadowing different X86_CR0_TS", 0);
455 	else
456 		report("Write shadowing different X86_CR0_TS", 1);
457 	set_stage(9);
458 	tmp = guest_cr0 ^ X86_CR0_MP;
459 	asm volatile("mov %0, %%rsi\n\t"
460 		"mov %%rsi, %%cr0\n\t"
461 		::"m"(tmp)
462 		:"rsi", "memory", "cc");
463 	if (stage != 10)
464 		report("Write shadowing different X86_CR0_MP", 0);
465 	else
466 		report("Write shadowing different X86_CR0_MP", 1);
467 	set_stage(10);
468 	tmp = guest_cr4 ^ X86_CR4_TSD;
469 	asm volatile("mov %0, %%rsi\n\t"
470 		"mov %%rsi, %%cr4\n\t"
471 		::"m"(tmp)
472 		:"rsi", "memory", "cc");
473 	if (stage != 11)
474 		report("Write shadowing different X86_CR4_TSD", 0);
475 	else
476 		report("Write shadowing different X86_CR4_TSD", 1);
477 	set_stage(11);
478 	tmp = guest_cr4 ^ X86_CR4_DE;
479 	asm volatile("mov %0, %%rsi\n\t"
480 		"mov %%rsi, %%cr4\n\t"
481 		::"m"(tmp)
482 		:"rsi", "memory", "cc");
483 	if (stage != 12)
484 		report("Write shadowing different X86_CR4_DE", 0);
485 	else
486 		report("Write shadowing different X86_CR4_DE", 1);
487 }
488 
489 static int cr_shadowing_exit_handler()
490 {
491 	u64 guest_rip;
492 	ulong reason;
493 	u32 insn_len;
494 	u32 exit_qual;
495 
496 	guest_rip = vmcs_read(GUEST_RIP);
497 	reason = vmcs_read(EXI_REASON) & 0xff;
498 	insn_len = vmcs_read(EXI_INST_LEN);
499 	exit_qual = vmcs_read(EXI_QUALIFICATION);
500 	switch (reason) {
501 	case VMX_VMCALL:
502 		switch (stage) {
503 		case 0:
504 			if (guest_cr0 == vmcs_read(GUEST_CR0))
505 				report("Read through CR0", 1);
506 			else
507 				report("Read through CR0", 0);
508 			break;
509 		case 1:
510 			if (guest_cr4 == vmcs_read(GUEST_CR4))
511 				report("Read through CR4", 1);
512 			else
513 				report("Read through CR4", 0);
514 			break;
515 		case 2:
516 			if (guest_cr0 == vmcs_read(GUEST_CR0))
517 				report("Write through CR0", 1);
518 			else
519 				report("Write through CR0", 0);
520 			break;
521 		case 3:
522 			if (guest_cr4 == vmcs_read(GUEST_CR4))
523 				report("Write through CR4", 1);
524 			else
525 				report("Write through CR4", 0);
526 			break;
527 		case 4:
528 			guest_cr0 = vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP);
529 			guest_cr4 = vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE);
530 			vmcs_write(CR0_MASK, X86_CR0_TS | X86_CR0_MP);
531 			vmcs_write(CR0_READ_SHADOW, guest_cr0 & (X86_CR0_TS | X86_CR0_MP));
532 			vmcs_write(CR4_MASK, X86_CR4_TSD | X86_CR4_DE);
533 			vmcs_write(CR4_READ_SHADOW, guest_cr4 & (X86_CR4_TSD | X86_CR4_DE));
534 			break;
535 		case 6:
536 			if (guest_cr0 == (vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP)))
537 				report("Write shadowing CR0 (same value)", 1);
538 			else
539 				report("Write shadowing CR0 (same value)", 0);
540 			break;
541 		case 7:
542 			if (guest_cr4 == (vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE)))
543 				report("Write shadowing CR4 (same value)", 1);
544 			else
545 				report("Write shadowing CR4 (same value)", 0);
546 			break;
547 		}
548 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
549 		return VMX_TEST_RESUME;
550 	case VMX_CR:
551 		switch (stage) {
552 		case 4:
553 			report("Read shadowing CR0", 0);
554 			set_stage(stage + 1);
555 			break;
556 		case 5:
557 			report("Read shadowing CR4", 0);
558 			set_stage(stage + 1);
559 			break;
560 		case 6:
561 			report("Write shadowing CR0 (same value)", 0);
562 			set_stage(stage + 1);
563 			break;
564 		case 7:
565 			report("Write shadowing CR4 (same value)", 0);
566 			set_stage(stage + 1);
567 			break;
568 		case 8:
569 		case 9:
570 			// 0x600 encodes "mov %esi, %cr0"
571 			if (exit_qual == 0x600)
572 				set_stage(stage + 1);
573 			break;
574 		case 10:
575 		case 11:
576 			// 0x604 encodes "mov %esi, %cr4"
577 			if (exit_qual == 0x604)
578 				set_stage(stage + 1);
579 			break;
580 		}
581 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
582 		return VMX_TEST_RESUME;
583 	default:
584 		printf("Unknown exit reason, %d\n", reason);
585 		print_vmexit_info();
586 	}
587 	return VMX_TEST_VMEXIT;
588 }
589 
590 static void iobmp_init()
591 {
592 	u32 ctrl_cpu0;
593 
594 	io_bitmap_a = alloc_page();
595 	io_bitmap_a = alloc_page();
596 	memset(io_bitmap_a, 0x0, PAGE_SIZE);
597 	memset(io_bitmap_b, 0x0, PAGE_SIZE);
598 	ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
599 	ctrl_cpu0 |= CPU_IO_BITMAP;
600 	ctrl_cpu0 &= (~CPU_IO);
601 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
602 	vmcs_write(IO_BITMAP_A, (u64)io_bitmap_a);
603 	vmcs_write(IO_BITMAP_B, (u64)io_bitmap_b);
604 }
605 
606 static void iobmp_main()
607 {
608 	// stage 0, test IO pass
609 	set_stage(0);
610 	inb(0x5000);
611 	outb(0x0, 0x5000);
612 	if (stage != 0)
613 		report("I/O bitmap - I/O pass", 0);
614 	else
615 		report("I/O bitmap - I/O pass", 1);
616 	// test IO width, in/out
617 	((u8 *)io_bitmap_a)[0] = 0xFF;
618 	set_stage(2);
619 	inb(0x0);
620 	if (stage != 3)
621 		report("I/O bitmap - trap in", 0);
622 	else
623 		report("I/O bitmap - trap in", 1);
624 	set_stage(3);
625 	outw(0x0, 0x0);
626 	if (stage != 4)
627 		report("I/O bitmap - trap out", 0);
628 	else
629 		report("I/O bitmap - trap out", 1);
630 	set_stage(4);
631 	inl(0x0);
632 	if (stage != 5)
633 		report("I/O bitmap - I/O width, long", 0);
634 	// test low/high IO port
635 	set_stage(5);
636 	((u8 *)io_bitmap_a)[0x5000 / 8] = (1 << (0x5000 % 8));
637 	inb(0x5000);
638 	if (stage == 6)
639 		report("I/O bitmap - I/O port, low part", 1);
640 	else
641 		report("I/O bitmap - I/O port, low part", 0);
642 	set_stage(6);
643 	((u8 *)io_bitmap_b)[0x1000 / 8] = (1 << (0x1000 % 8));
644 	inb(0x9000);
645 	if (stage == 7)
646 		report("I/O bitmap - I/O port, high part", 1);
647 	else
648 		report("I/O bitmap - I/O port, high part", 0);
649 	// test partial pass
650 	set_stage(7);
651 	inl(0x4FFF);
652 	if (stage == 8)
653 		report("I/O bitmap - partial pass", 1);
654 	else
655 		report("I/O bitmap - partial pass", 0);
656 	// test overrun
657 	set_stage(8);
658 	memset(io_bitmap_a, 0x0, PAGE_SIZE);
659 	memset(io_bitmap_b, 0x0, PAGE_SIZE);
660 	inl(0xFFFF);
661 	if (stage == 9)
662 		report("I/O bitmap - overrun", 1);
663 	else
664 		report("I/O bitmap - overrun", 0);
665 
666 	return;
667 }
668 
669 static int iobmp_exit_handler()
670 {
671 	u64 guest_rip;
672 	ulong reason, exit_qual;
673 	u32 insn_len;
674 
675 	guest_rip = vmcs_read(GUEST_RIP);
676 	reason = vmcs_read(EXI_REASON) & 0xff;
677 	exit_qual = vmcs_read(EXI_QUALIFICATION);
678 	insn_len = vmcs_read(EXI_INST_LEN);
679 	switch (reason) {
680 	case VMX_IO:
681 		switch (stage) {
682 		case 2:
683 			if ((exit_qual & VMX_IO_SIZE_MASK) != _VMX_IO_BYTE)
684 				report("I/O bitmap - I/O width, byte", 0);
685 			else
686 				report("I/O bitmap - I/O width, byte", 1);
687 			if (!(exit_qual & VMX_IO_IN))
688 				report("I/O bitmap - I/O direction, in", 0);
689 			else
690 				report("I/O bitmap - I/O direction, in", 1);
691 			set_stage(stage + 1);
692 			break;
693 		case 3:
694 			if ((exit_qual & VMX_IO_SIZE_MASK) != _VMX_IO_WORD)
695 				report("I/O bitmap - I/O width, word", 0);
696 			else
697 				report("I/O bitmap - I/O width, word", 1);
698 			if (!(exit_qual & VMX_IO_IN))
699 				report("I/O bitmap - I/O direction, out", 1);
700 			else
701 				report("I/O bitmap - I/O direction, out", 0);
702 			set_stage(stage + 1);
703 			break;
704 		case 4:
705 			if ((exit_qual & VMX_IO_SIZE_MASK) != _VMX_IO_LONG)
706 				report("I/O bitmap - I/O width, long", 0);
707 			else
708 				report("I/O bitmap - I/O width, long", 1);
709 			set_stage(stage + 1);
710 			break;
711 		case 5:
712 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x5000)
713 				set_stage(stage + 1);
714 			break;
715 		case 6:
716 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x9000)
717 				set_stage(stage + 1);
718 			break;
719 		case 7:
720 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x4FFF)
721 				set_stage(stage + 1);
722 			break;
723 		case 8:
724 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0xFFFF)
725 				set_stage(stage + 1);
726 			break;
727 		case 0:
728 		case 1:
729 			set_stage(stage + 1);
730 		default:
731 			// Should not reach here
732 			break;
733 		}
734 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
735 		return VMX_TEST_RESUME;
736 	default:
737 		printf("guest_rip = 0x%llx\n", guest_rip);
738 		printf("\tERROR : Undefined exit reason, reason = %d.\n", reason);
739 		break;
740 	}
741 	return VMX_TEST_VMEXIT;
742 }
743 
744 #define INSN_CPU0		0
745 #define INSN_CPU1		1
746 #define INSN_ALWAYS_TRAP	2
747 #define INSN_NEVER_TRAP		3
748 
749 #define FIELD_EXIT_QUAL		0
750 #define FIELD_INSN_INFO		1
751 
752 asm(
753 	"insn_hlt: hlt;ret\n\t"
754 	"insn_invlpg: invlpg 0x12345678;ret\n\t"
755 	"insn_mwait: mwait;ret\n\t"
756 	"insn_rdpmc: rdpmc;ret\n\t"
757 	"insn_rdtsc: rdtsc;ret\n\t"
758 	"insn_monitor: monitor;ret\n\t"
759 	"insn_pause: pause;ret\n\t"
760 	"insn_wbinvd: wbinvd;ret\n\t"
761 	"insn_cpuid: cpuid;ret\n\t"
762 	"insn_invd: invd;ret\n\t"
763 );
764 extern void insn_hlt();
765 extern void insn_invlpg();
766 extern void insn_mwait();
767 extern void insn_rdpmc();
768 extern void insn_rdtsc();
769 extern void insn_monitor();
770 extern void insn_pause();
771 extern void insn_wbinvd();
772 extern void insn_cpuid();
773 extern void insn_invd();
774 
775 u32 cur_insn;
776 
777 struct insn_table {
778 	const char *name;
779 	u32 flag;
780 	void (*insn_func)();
781 	u32 type;
782 	u32 reason;
783 	ulong exit_qual;
784 	u32 insn_info;
785 	// Use FIELD_EXIT_QUAL and FIELD_INSN_INFO to efines
786 	// which field need to be tested, reason is always tested
787 	u32 test_field;
788 };
789 
790 /*
791  * Add more test cases of instruction intercept here. Elements in this
792  * table is:
793  *	name/control flag/insn function/type/exit reason/exit qulification/
794  *	instruction info/field to test
795  * The last field defines which fields (exit_qual and insn_info) need to be
796  * tested in exit handler. If set to 0, only "reason" is checked.
797  */
798 static struct insn_table insn_table[] = {
799 	// Flags for Primary Processor-Based VM-Execution Controls
800 	{"HLT",  CPU_HLT, insn_hlt, INSN_CPU0, 12, 0, 0, 0},
801 	{"INVLPG", CPU_INVLPG, insn_invlpg, INSN_CPU0, 14,
802 		0x12345678, 0, FIELD_EXIT_QUAL},
803 	{"MWAIT", CPU_MWAIT, insn_mwait, INSN_CPU0, 36, 0, 0, 0},
804 	{"RDPMC", CPU_RDPMC, insn_rdpmc, INSN_CPU0, 15, 0, 0, 0},
805 	{"RDTSC", CPU_RDTSC, insn_rdtsc, INSN_CPU0, 16, 0, 0, 0},
806 	{"MONITOR", CPU_MONITOR, insn_monitor, INSN_CPU0, 39, 0, 0, 0},
807 	{"PAUSE", CPU_PAUSE, insn_pause, INSN_CPU0, 40, 0, 0, 0},
808 	// Flags for Secondary Processor-Based VM-Execution Controls
809 	{"WBINVD", CPU_WBINVD, insn_wbinvd, INSN_CPU1, 54, 0, 0, 0},
810 	// Instructions always trap
811 	{"CPUID", 0, insn_cpuid, INSN_ALWAYS_TRAP, 10, 0, 0, 0},
812 	{"INVD", 0, insn_invd, INSN_ALWAYS_TRAP, 13, 0, 0, 0},
813 	// Instructions never trap
814 	{NULL},
815 };
816 
817 static void insn_intercept_init()
818 {
819 	u32 ctrl_cpu[2];
820 
821 	ctrl_cpu[0] = vmcs_read(CPU_EXEC_CTRL0);
822 	ctrl_cpu[0] |= CPU_HLT | CPU_INVLPG | CPU_MWAIT | CPU_RDPMC | CPU_RDTSC |
823 		CPU_MONITOR | CPU_PAUSE | CPU_SECONDARY;
824 	ctrl_cpu[0] &= ctrl_cpu_rev[0].clr;
825 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu[0]);
826 	ctrl_cpu[1] = vmcs_read(CPU_EXEC_CTRL1);
827 	ctrl_cpu[1] |= CPU_WBINVD | CPU_RDRAND;
828 	ctrl_cpu[1] &= ctrl_cpu_rev[1].clr;
829 	vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu[1]);
830 }
831 
832 static void insn_intercept_main()
833 {
834 	cur_insn = 0;
835 	while(insn_table[cur_insn].name != NULL) {
836 		set_stage(cur_insn);
837 		if ((insn_table[cur_insn].type == INSN_CPU0
838 			&& !(ctrl_cpu_rev[0].clr & insn_table[cur_insn].flag))
839 			|| (insn_table[cur_insn].type == INSN_CPU1
840 			&& !(ctrl_cpu_rev[1].clr & insn_table[cur_insn].flag))) {
841 			printf("\tCPU_CTRL1.CPU_%s is not supported.\n",
842 				insn_table[cur_insn].name);
843 			continue;
844 		}
845 		insn_table[cur_insn].insn_func();
846 		switch (insn_table[cur_insn].type) {
847 		case INSN_CPU0:
848 		case INSN_CPU1:
849 		case INSN_ALWAYS_TRAP:
850 			if (stage != cur_insn + 1)
851 				report(insn_table[cur_insn].name, 0);
852 			else
853 				report(insn_table[cur_insn].name, 1);
854 			break;
855 		case INSN_NEVER_TRAP:
856 			if (stage == cur_insn + 1)
857 				report(insn_table[cur_insn].name, 0);
858 			else
859 				report(insn_table[cur_insn].name, 1);
860 			break;
861 		}
862 		cur_insn ++;
863 	}
864 }
865 
866 static int insn_intercept_exit_handler()
867 {
868 	u64 guest_rip;
869 	u32 reason;
870 	ulong exit_qual;
871 	u32 insn_len;
872 	u32 insn_info;
873 	bool pass;
874 
875 	guest_rip = vmcs_read(GUEST_RIP);
876 	reason = vmcs_read(EXI_REASON) & 0xff;
877 	exit_qual = vmcs_read(EXI_QUALIFICATION);
878 	insn_len = vmcs_read(EXI_INST_LEN);
879 	insn_info = vmcs_read(EXI_INST_INFO);
880 	pass = (cur_insn == get_stage()) &&
881 			insn_table[cur_insn].reason == reason;
882 	if (insn_table[cur_insn].test_field & FIELD_EXIT_QUAL)
883 		pass = pass && insn_table[cur_insn].exit_qual == exit_qual;
884 	if (insn_table[cur_insn].test_field & FIELD_INSN_INFO)
885 		pass = pass && insn_table[cur_insn].insn_info == insn_info;
886 	if (pass)
887 		set_stage(stage + 1);
888 	vmcs_write(GUEST_RIP, guest_rip + insn_len);
889 	return VMX_TEST_RESUME;
890 }
891 
892 
893 static int setup_ept()
894 {
895 	int support_2m;
896 	unsigned long end_of_memory;
897 
898 	if (!(ept_vpid.val & EPT_CAP_UC) &&
899 			!(ept_vpid.val & EPT_CAP_WB)) {
900 		printf("\tEPT paging-structure memory type "
901 				"UC&WB are not supported\n");
902 		return 1;
903 	}
904 	if (ept_vpid.val & EPT_CAP_UC)
905 		eptp = EPT_MEM_TYPE_UC;
906 	else
907 		eptp = EPT_MEM_TYPE_WB;
908 	if (!(ept_vpid.val & EPT_CAP_PWL4)) {
909 		printf("\tPWL4 is not supported\n");
910 		return 1;
911 	}
912 	eptp |= (3 << EPTP_PG_WALK_LEN_SHIFT);
913 	pml4 = alloc_page();
914 	memset(pml4, 0, PAGE_SIZE);
915 	eptp |= virt_to_phys(pml4);
916 	vmcs_write(EPTP, eptp);
917 	support_2m = !!(ept_vpid.val & EPT_CAP_2M_PAGE);
918 	end_of_memory = fwcfg_get_u64(FW_CFG_RAM_SIZE);
919 	if (end_of_memory < (1ul << 32))
920 		end_of_memory = (1ul << 32);
921 	if (setup_ept_range(pml4, 0, end_of_memory,
922 			0, support_2m, EPT_WA | EPT_RA | EPT_EA)) {
923 		printf("\tSet ept tables failed.\n");
924 		return 1;
925 	}
926 	return 0;
927 }
928 
929 static void ept_init()
930 {
931 	u32 ctrl_cpu[2];
932 
933 	init_fail = false;
934 	ctrl_cpu[0] = vmcs_read(CPU_EXEC_CTRL0);
935 	ctrl_cpu[1] = vmcs_read(CPU_EXEC_CTRL1);
936 	ctrl_cpu[0] = (ctrl_cpu[0] | CPU_SECONDARY)
937 		& ctrl_cpu_rev[0].clr;
938 	ctrl_cpu[1] = (ctrl_cpu[1] | CPU_EPT)
939 		& ctrl_cpu_rev[1].clr;
940 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu[0]);
941 	vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu[1]);
942 	if (setup_ept())
943 		init_fail = true;
944 	data_page1 = alloc_page();
945 	data_page2 = alloc_page();
946 	memset(data_page1, 0x0, PAGE_SIZE);
947 	memset(data_page2, 0x0, PAGE_SIZE);
948 	*((u32 *)data_page1) = MAGIC_VAL_1;
949 	*((u32 *)data_page2) = MAGIC_VAL_2;
950 	install_ept(pml4, (unsigned long)data_page1, (unsigned long)data_page2,
951 			EPT_RA | EPT_WA | EPT_EA);
952 }
953 
954 static void ept_main()
955 {
956 	if (init_fail)
957 		return;
958 	if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY)
959 		&& !(ctrl_cpu_rev[1].clr & CPU_EPT)) {
960 		printf("\tEPT is not supported");
961 		return;
962 	}
963 	set_stage(0);
964 	if (*((u32 *)data_page2) != MAGIC_VAL_1 &&
965 			*((u32 *)data_page1) != MAGIC_VAL_1)
966 		report("EPT basic framework - read", 0);
967 	else {
968 		*((u32 *)data_page2) = MAGIC_VAL_3;
969 		vmcall();
970 		if (get_stage() == 1) {
971 			if (*((u32 *)data_page1) == MAGIC_VAL_3 &&
972 					*((u32 *)data_page2) == MAGIC_VAL_2)
973 				report("EPT basic framework", 1);
974 			else
975 				report("EPT basic framework - remap", 1);
976 		}
977 	}
978 	// Test EPT Misconfigurations
979 	set_stage(1);
980 	vmcall();
981 	*((u32 *)data_page1) = MAGIC_VAL_1;
982 	if (get_stage() != 2) {
983 		report("EPT misconfigurations", 0);
984 		goto t1;
985 	}
986 	set_stage(2);
987 	vmcall();
988 	*((u32 *)data_page1) = MAGIC_VAL_1;
989 	if (get_stage() != 3) {
990 		report("EPT misconfigurations", 0);
991 		goto t1;
992 	}
993 	report("EPT misconfigurations", 1);
994 t1:
995 	// Test EPT violation
996 	set_stage(3);
997 	vmcall();
998 	*((u32 *)data_page1) = MAGIC_VAL_1;
999 	if (get_stage() == 4)
1000 		report("EPT violation - page permission", 1);
1001 	else
1002 		report("EPT violation - page permission", 0);
1003 	// Violation caused by EPT paging structure
1004 	set_stage(4);
1005 	vmcall();
1006 	*((u32 *)data_page1) = MAGIC_VAL_2;
1007 	if (get_stage() == 5)
1008 		report("EPT violation - paging structure", 1);
1009 	else
1010 		report("EPT violation - paging structure", 0);
1011 	return;
1012 }
1013 
1014 static int ept_exit_handler()
1015 {
1016 	u64 guest_rip;
1017 	ulong reason;
1018 	u32 insn_len;
1019 	u32 exit_qual;
1020 	static unsigned long data_page1_pte, data_page1_pte_pte;
1021 
1022 	guest_rip = vmcs_read(GUEST_RIP);
1023 	reason = vmcs_read(EXI_REASON) & 0xff;
1024 	insn_len = vmcs_read(EXI_INST_LEN);
1025 	exit_qual = vmcs_read(EXI_QUALIFICATION);
1026 	switch (reason) {
1027 	case VMX_VMCALL:
1028 		switch (get_stage()) {
1029 		case 0:
1030 			if (*((u32 *)data_page1) == MAGIC_VAL_3 &&
1031 					*((u32 *)data_page2) == MAGIC_VAL_2) {
1032 				set_stage(get_stage() + 1);
1033 				install_ept(pml4, (unsigned long)data_page2,
1034 						(unsigned long)data_page2,
1035 						EPT_RA | EPT_WA | EPT_EA);
1036 			} else
1037 				report("EPT basic framework - write\n", 0);
1038 			break;
1039 		case 1:
1040 			install_ept(pml4, (unsigned long)data_page1,
1041  				(unsigned long)data_page1, EPT_WA);
1042 			invept(INVEPT_SINGLE, eptp);
1043 			break;
1044 		case 2:
1045 			install_ept(pml4, (unsigned long)data_page1,
1046  				(unsigned long)data_page1,
1047  				EPT_RA | EPT_WA | EPT_EA |
1048  				(2 << EPT_MEM_TYPE_SHIFT));
1049 			invept(INVEPT_SINGLE, eptp);
1050 			break;
1051 		case 3:
1052 			data_page1_pte = get_ept_pte(pml4,
1053 				(unsigned long)data_page1, 1);
1054 			set_ept_pte(pml4, (unsigned long)data_page1,
1055 				1, data_page1_pte & (~EPT_PRESENT));
1056 			invept(INVEPT_SINGLE, eptp);
1057 			break;
1058 		case 4:
1059 			data_page1_pte = get_ept_pte(pml4,
1060 				(unsigned long)data_page1, 2);
1061 			data_page1_pte &= PAGE_MASK;
1062 			data_page1_pte_pte = get_ept_pte(pml4, data_page1_pte, 2);
1063 			set_ept_pte(pml4, data_page1_pte, 2,
1064 				data_page1_pte_pte & (~EPT_PRESENT));
1065 			invept(INVEPT_SINGLE, eptp);
1066 			break;
1067 		// Should not reach here
1068 		default:
1069 			printf("ERROR - unknown stage, %d.\n", get_stage());
1070 			print_vmexit_info();
1071 			return VMX_TEST_VMEXIT;
1072 		}
1073 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
1074 		return VMX_TEST_RESUME;
1075 	case VMX_EPT_MISCONFIG:
1076 		switch (get_stage()) {
1077 		case 1:
1078 		case 2:
1079 			set_stage(get_stage() + 1);
1080 			install_ept(pml4, (unsigned long)data_page1,
1081  				(unsigned long)data_page1,
1082  				EPT_RA | EPT_WA | EPT_EA);
1083 			invept(INVEPT_SINGLE, eptp);
1084 			break;
1085 		// Should not reach here
1086 		default:
1087 			printf("ERROR - unknown stage, %d.\n", get_stage());
1088 			print_vmexit_info();
1089 			return VMX_TEST_VMEXIT;
1090 		}
1091 		return VMX_TEST_RESUME;
1092 	case VMX_EPT_VIOLATION:
1093 		switch(get_stage()) {
1094 		case 3:
1095 			if (exit_qual == (EPT_VLT_WR | EPT_VLT_LADDR_VLD |
1096 					EPT_VLT_PADDR))
1097 				set_stage(get_stage() + 1);
1098 			set_ept_pte(pml4, (unsigned long)data_page1,
1099 				1, data_page1_pte | (EPT_PRESENT));
1100 			invept(INVEPT_SINGLE, eptp);
1101 			break;
1102 		case 4:
1103 			if (exit_qual == (EPT_VLT_RD | EPT_VLT_LADDR_VLD))
1104 				set_stage(get_stage() + 1);
1105 			set_ept_pte(pml4, data_page1_pte, 2,
1106 				data_page1_pte_pte | (EPT_PRESENT));
1107 			invept(INVEPT_SINGLE, eptp);
1108 			break;
1109 		default:
1110 			// Should not reach here
1111 			printf("ERROR : unknown stage, %d\n", get_stage());
1112 			print_vmexit_info();
1113 			return VMX_TEST_VMEXIT;
1114 		}
1115 		return VMX_TEST_RESUME;
1116 	default:
1117 		printf("Unknown exit reason, %d\n", reason);
1118 		print_vmexit_info();
1119 	}
1120 	return VMX_TEST_VMEXIT;
1121 }
1122 
1123 /* name/init/guest_main/exit_handler/syscall_handler/guest_regs
1124    basic_* just implement some basic functions */
1125 struct vmx_test vmx_tests[] = {
1126 	{ "null", basic_init, basic_guest_main, basic_exit_handler,
1127 		basic_syscall_handler, {0} },
1128 	{ "vmenter", basic_init, vmenter_main, vmenter_exit_handler,
1129 		basic_syscall_handler, {0} },
1130 	{ "preemption timer", preemption_timer_init, preemption_timer_main,
1131 		preemption_timer_exit_handler, basic_syscall_handler, {0} },
1132 	{ "control field PAT", test_ctrl_pat_init, test_ctrl_pat_main,
1133 		test_ctrl_pat_exit_handler, basic_syscall_handler, {0} },
1134 	{ "control field EFER", test_ctrl_efer_init, test_ctrl_efer_main,
1135 		test_ctrl_efer_exit_handler, basic_syscall_handler, {0} },
1136 	{ "CR shadowing", basic_init, cr_shadowing_main,
1137 		cr_shadowing_exit_handler, basic_syscall_handler, {0} },
1138 	{ "I/O bitmap", iobmp_init, iobmp_main, iobmp_exit_handler,
1139 		basic_syscall_handler, {0} },
1140 	{ "instruction intercept", insn_intercept_init, insn_intercept_main,
1141 		insn_intercept_exit_handler, basic_syscall_handler, {0} },
1142 	{ "EPT framework", ept_init, ept_main, ept_exit_handler,
1143 		basic_syscall_handler, {0} },
1144 	{ NULL, NULL, NULL, NULL, NULL, {0} },
1145 };
1146