xref: /kvm-unit-tests/x86/vmx_tests.c (revision 7d9f863f71c790d7413c8b7c1b27b79612f3703f)
1 /*
2  * All test cases of nested virtualization should be in this file
3  *
4  * Author : Arthur Chunqi Li <yzt356@gmail.com>
5  */
6 #include "vmx.h"
7 #include "msr.h"
8 #include "processor.h"
9 #include "vm.h"
10 #include "io.h"
11 #include "fwcfg.h"
12 
13 u64 ia32_pat;
14 u64 ia32_efer;
15 volatile u32 stage;
16 void *io_bitmap_a, *io_bitmap_b;
17 u16 ioport;
18 
19 unsigned long *pml4;
20 u64 eptp;
21 void *data_page1, *data_page2;
22 
23 static inline void vmcall()
24 {
25 	asm volatile("vmcall");
26 }
27 
28 static inline void set_stage(u32 s)
29 {
30 	barrier();
31 	stage = s;
32 	barrier();
33 }
34 
35 static inline u32 get_stage()
36 {
37 	u32 s;
38 
39 	barrier();
40 	s = stage;
41 	barrier();
42 	return s;
43 }
44 
45 void basic_guest_main()
46 {
47 }
48 
49 int basic_exit_handler()
50 {
51 	report("Basic VMX test", 0);
52 	print_vmexit_info();
53 	return VMX_TEST_EXIT;
54 }
55 
56 void vmenter_main()
57 {
58 	u64 rax;
59 	u64 rsp, resume_rsp;
60 
61 	report("test vmlaunch", 1);
62 
63 	asm volatile(
64 		"mov %%rsp, %0\n\t"
65 		"mov %3, %%rax\n\t"
66 		"vmcall\n\t"
67 		"mov %%rax, %1\n\t"
68 		"mov %%rsp, %2\n\t"
69 		: "=r"(rsp), "=r"(rax), "=r"(resume_rsp)
70 		: "g"(0xABCD));
71 	report("test vmresume", (rax == 0xFFFF) && (rsp == resume_rsp));
72 }
73 
74 int vmenter_exit_handler()
75 {
76 	u64 guest_rip;
77 	ulong reason;
78 
79 	guest_rip = vmcs_read(GUEST_RIP);
80 	reason = vmcs_read(EXI_REASON) & 0xff;
81 	switch (reason) {
82 	case VMX_VMCALL:
83 		if (regs.rax != 0xABCD) {
84 			report("test vmresume", 0);
85 			return VMX_TEST_VMEXIT;
86 		}
87 		regs.rax = 0xFFFF;
88 		vmcs_write(GUEST_RIP, guest_rip + 3);
89 		return VMX_TEST_RESUME;
90 	default:
91 		report("test vmresume", 0);
92 		print_vmexit_info();
93 	}
94 	return VMX_TEST_VMEXIT;
95 }
96 
97 u32 preempt_scale;
98 volatile unsigned long long tsc_val;
99 volatile u32 preempt_val;
100 
101 int preemption_timer_init()
102 {
103 	if (!(ctrl_pin_rev.clr & PIN_PREEMPT)) {
104 		printf("\tPreemption timer is not supported\n");
105 		return VMX_TEST_EXIT;
106 	}
107 	vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) | PIN_PREEMPT);
108 	preempt_val = 10000000;
109 	vmcs_write(PREEMPT_TIMER_VALUE, preempt_val);
110 	preempt_scale = rdmsr(MSR_IA32_VMX_MISC) & 0x1F;
111 
112 	if (!(ctrl_exit_rev.clr & EXI_SAVE_PREEMPT))
113 		printf("\tSave preemption value is not supported\n");
114 
115 	return VMX_TEST_START;
116 }
117 
118 void preemption_timer_main()
119 {
120 	tsc_val = rdtsc();
121 	if (ctrl_exit_rev.clr & EXI_SAVE_PREEMPT) {
122 		set_stage(0);
123 		vmcall();
124 		if (get_stage() == 1)
125 			vmcall();
126 	}
127 	while (1) {
128 		if (((rdtsc() - tsc_val) >> preempt_scale)
129 				> 10 * preempt_val) {
130 			set_stage(2);
131 			vmcall();
132 		}
133 	}
134 }
135 
136 int preemption_timer_exit_handler()
137 {
138 	u64 guest_rip;
139 	ulong reason;
140 	u32 insn_len;
141 	u32 ctrl_exit;
142 
143 	guest_rip = vmcs_read(GUEST_RIP);
144 	reason = vmcs_read(EXI_REASON) & 0xff;
145 	insn_len = vmcs_read(EXI_INST_LEN);
146 	switch (reason) {
147 	case VMX_PREEMPT:
148 		if (((rdtsc() - tsc_val) >> preempt_scale) < preempt_val)
149 			report("Preemption timer", 0);
150 		else
151 			report("Preemption timer", 1);
152 		break;
153 	case VMX_VMCALL:
154 		switch (get_stage()) {
155 		case 0:
156 			if (vmcs_read(PREEMPT_TIMER_VALUE) != preempt_val)
157 				report("Save preemption value", 0);
158 			else {
159 				set_stage(get_stage() + 1);
160 				ctrl_exit = (vmcs_read(EXI_CONTROLS) |
161 					EXI_SAVE_PREEMPT) & ctrl_exit_rev.clr;
162 				vmcs_write(EXI_CONTROLS, ctrl_exit);
163 			}
164 			vmcs_write(GUEST_RIP, guest_rip + insn_len);
165 			return VMX_TEST_RESUME;
166 		case 1:
167 			if (vmcs_read(PREEMPT_TIMER_VALUE) >= preempt_val)
168 				report("Save preemption value", 0);
169 			else
170 				report("Save preemption value", 1);
171 			vmcs_write(GUEST_RIP, guest_rip + insn_len);
172 			return VMX_TEST_RESUME;
173 		case 2:
174 			report("Preemption timer", 0);
175 			break;
176 		default:
177 			// Should not reach here
178 			printf("ERROR : unexpected stage, %d\n", get_stage());
179 			print_vmexit_info();
180 			return VMX_TEST_VMEXIT;
181 		}
182 		break;
183 	default:
184 		printf("Unknown exit reason, %d\n", reason);
185 		print_vmexit_info();
186 	}
187 	vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) & ~PIN_PREEMPT);
188 	return VMX_TEST_VMEXIT;
189 }
190 
191 void msr_bmp_init()
192 {
193 	void *msr_bitmap;
194 	u32 ctrl_cpu0;
195 
196 	msr_bitmap = alloc_page();
197 	memset(msr_bitmap, 0x0, PAGE_SIZE);
198 	ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
199 	ctrl_cpu0 |= CPU_MSR_BITMAP;
200 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
201 	vmcs_write(MSR_BITMAP, (u64)msr_bitmap);
202 }
203 
204 static int test_ctrl_pat_init()
205 {
206 	u64 ctrl_ent;
207 	u64 ctrl_exi;
208 
209 	msr_bmp_init();
210 	ctrl_ent = vmcs_read(ENT_CONTROLS);
211 	ctrl_exi = vmcs_read(EXI_CONTROLS);
212 	vmcs_write(ENT_CONTROLS, ctrl_ent | ENT_LOAD_PAT);
213 	vmcs_write(EXI_CONTROLS, ctrl_exi | (EXI_SAVE_PAT | EXI_LOAD_PAT));
214 	ia32_pat = rdmsr(MSR_IA32_CR_PAT);
215 	vmcs_write(GUEST_PAT, 0x0);
216 	vmcs_write(HOST_PAT, ia32_pat);
217 	return VMX_TEST_START;
218 }
219 
220 static void test_ctrl_pat_main()
221 {
222 	u64 guest_ia32_pat;
223 
224 	guest_ia32_pat = rdmsr(MSR_IA32_CR_PAT);
225 	if (!(ctrl_enter_rev.clr & ENT_LOAD_PAT))
226 		printf("\tENT_LOAD_PAT is not supported.\n");
227 	else {
228 		if (guest_ia32_pat != 0) {
229 			report("Entry load PAT", 0);
230 			return;
231 		}
232 	}
233 	wrmsr(MSR_IA32_CR_PAT, 0x6);
234 	vmcall();
235 	guest_ia32_pat = rdmsr(MSR_IA32_CR_PAT);
236 	if (ctrl_enter_rev.clr & ENT_LOAD_PAT) {
237 		if (guest_ia32_pat != ia32_pat) {
238 			report("Entry load PAT", 0);
239 			return;
240 		}
241 		report("Entry load PAT", 1);
242 	}
243 }
244 
245 static int test_ctrl_pat_exit_handler()
246 {
247 	u64 guest_rip;
248 	ulong reason;
249 	u64 guest_pat;
250 
251 	guest_rip = vmcs_read(GUEST_RIP);
252 	reason = vmcs_read(EXI_REASON) & 0xff;
253 	switch (reason) {
254 	case VMX_VMCALL:
255 		guest_pat = vmcs_read(GUEST_PAT);
256 		if (!(ctrl_exit_rev.clr & EXI_SAVE_PAT)) {
257 			printf("\tEXI_SAVE_PAT is not supported\n");
258 			vmcs_write(GUEST_PAT, 0x6);
259 		} else {
260 			if (guest_pat == 0x6)
261 				report("Exit save PAT", 1);
262 			else
263 				report("Exit save PAT", 0);
264 		}
265 		if (!(ctrl_exit_rev.clr & EXI_LOAD_PAT))
266 			printf("\tEXI_LOAD_PAT is not supported\n");
267 		else {
268 			if (rdmsr(MSR_IA32_CR_PAT) == ia32_pat)
269 				report("Exit load PAT", 1);
270 			else
271 				report("Exit load PAT", 0);
272 		}
273 		vmcs_write(GUEST_PAT, ia32_pat);
274 		vmcs_write(GUEST_RIP, guest_rip + 3);
275 		return VMX_TEST_RESUME;
276 	default:
277 		printf("ERROR : Undefined exit reason, reason = %d.\n", reason);
278 		break;
279 	}
280 	return VMX_TEST_VMEXIT;
281 }
282 
283 static int test_ctrl_efer_init()
284 {
285 	u64 ctrl_ent;
286 	u64 ctrl_exi;
287 
288 	msr_bmp_init();
289 	ctrl_ent = vmcs_read(ENT_CONTROLS) | ENT_LOAD_EFER;
290 	ctrl_exi = vmcs_read(EXI_CONTROLS) | EXI_SAVE_EFER | EXI_LOAD_EFER;
291 	vmcs_write(ENT_CONTROLS, ctrl_ent & ctrl_enter_rev.clr);
292 	vmcs_write(EXI_CONTROLS, ctrl_exi & ctrl_exit_rev.clr);
293 	ia32_efer = rdmsr(MSR_EFER);
294 	vmcs_write(GUEST_EFER, ia32_efer ^ EFER_NX);
295 	vmcs_write(HOST_EFER, ia32_efer ^ EFER_NX);
296 	return VMX_TEST_START;
297 }
298 
299 static void test_ctrl_efer_main()
300 {
301 	u64 guest_ia32_efer;
302 
303 	guest_ia32_efer = rdmsr(MSR_EFER);
304 	if (!(ctrl_enter_rev.clr & ENT_LOAD_EFER))
305 		printf("\tENT_LOAD_EFER is not supported.\n");
306 	else {
307 		if (guest_ia32_efer != (ia32_efer ^ EFER_NX)) {
308 			report("Entry load EFER", 0);
309 			return;
310 		}
311 	}
312 	wrmsr(MSR_EFER, ia32_efer);
313 	vmcall();
314 	guest_ia32_efer = rdmsr(MSR_EFER);
315 	if (ctrl_enter_rev.clr & ENT_LOAD_EFER) {
316 		if (guest_ia32_efer != ia32_efer) {
317 			report("Entry load EFER", 0);
318 			return;
319 		}
320 		report("Entry load EFER", 1);
321 	}
322 }
323 
324 static int test_ctrl_efer_exit_handler()
325 {
326 	u64 guest_rip;
327 	ulong reason;
328 	u64 guest_efer;
329 
330 	guest_rip = vmcs_read(GUEST_RIP);
331 	reason = vmcs_read(EXI_REASON) & 0xff;
332 	switch (reason) {
333 	case VMX_VMCALL:
334 		guest_efer = vmcs_read(GUEST_EFER);
335 		if (!(ctrl_exit_rev.clr & EXI_SAVE_EFER)) {
336 			printf("\tEXI_SAVE_EFER is not supported\n");
337 			vmcs_write(GUEST_EFER, ia32_efer);
338 		} else {
339 			if (guest_efer == ia32_efer)
340 				report("Exit save EFER", 1);
341 			else
342 				report("Exit save EFER", 0);
343 		}
344 		if (!(ctrl_exit_rev.clr & EXI_LOAD_EFER)) {
345 			printf("\tEXI_LOAD_EFER is not supported\n");
346 			wrmsr(MSR_EFER, ia32_efer ^ EFER_NX);
347 		} else {
348 			if (rdmsr(MSR_EFER) == (ia32_efer ^ EFER_NX))
349 				report("Exit load EFER", 1);
350 			else
351 				report("Exit load EFER", 0);
352 		}
353 		vmcs_write(GUEST_PAT, ia32_efer);
354 		vmcs_write(GUEST_RIP, guest_rip + 3);
355 		return VMX_TEST_RESUME;
356 	default:
357 		printf("ERROR : Undefined exit reason, reason = %d.\n", reason);
358 		break;
359 	}
360 	return VMX_TEST_VMEXIT;
361 }
362 
363 u32 guest_cr0, guest_cr4;
364 
365 static void cr_shadowing_main()
366 {
367 	u32 cr0, cr4, tmp;
368 
369 	// Test read through
370 	set_stage(0);
371 	guest_cr0 = read_cr0();
372 	if (stage == 1)
373 		report("Read through CR0", 0);
374 	else
375 		vmcall();
376 	set_stage(1);
377 	guest_cr4 = read_cr4();
378 	if (stage == 2)
379 		report("Read through CR4", 0);
380 	else
381 		vmcall();
382 	// Test write through
383 	guest_cr0 = guest_cr0 ^ (X86_CR0_TS | X86_CR0_MP);
384 	guest_cr4 = guest_cr4 ^ (X86_CR4_TSD | X86_CR4_DE);
385 	set_stage(2);
386 	write_cr0(guest_cr0);
387 	if (stage == 3)
388 		report("Write throuth CR0", 0);
389 	else
390 		vmcall();
391 	set_stage(3);
392 	write_cr4(guest_cr4);
393 	if (stage == 4)
394 		report("Write through CR4", 0);
395 	else
396 		vmcall();
397 	// Test read shadow
398 	set_stage(4);
399 	vmcall();
400 	cr0 = read_cr0();
401 	if (stage != 5) {
402 		if (cr0 == guest_cr0)
403 			report("Read shadowing CR0", 1);
404 		else
405 			report("Read shadowing CR0", 0);
406 	}
407 	set_stage(5);
408 	cr4 = read_cr4();
409 	if (stage != 6) {
410 		if (cr4 == guest_cr4)
411 			report("Read shadowing CR4", 1);
412 		else
413 			report("Read shadowing CR4", 0);
414 	}
415 	// Test write shadow (same value with shadow)
416 	set_stage(6);
417 	write_cr0(guest_cr0);
418 	if (stage == 7)
419 		report("Write shadowing CR0 (same value with shadow)", 0);
420 	else
421 		vmcall();
422 	set_stage(7);
423 	write_cr4(guest_cr4);
424 	if (stage == 8)
425 		report("Write shadowing CR4 (same value with shadow)", 0);
426 	else
427 		vmcall();
428 	// Test write shadow (different value)
429 	set_stage(8);
430 	tmp = guest_cr0 ^ X86_CR0_TS;
431 	asm volatile("mov %0, %%rsi\n\t"
432 		"mov %%rsi, %%cr0\n\t"
433 		::"m"(tmp)
434 		:"rsi", "memory", "cc");
435 	if (stage != 9)
436 		report("Write shadowing different X86_CR0_TS", 0);
437 	else
438 		report("Write shadowing different X86_CR0_TS", 1);
439 	set_stage(9);
440 	tmp = guest_cr0 ^ X86_CR0_MP;
441 	asm volatile("mov %0, %%rsi\n\t"
442 		"mov %%rsi, %%cr0\n\t"
443 		::"m"(tmp)
444 		:"rsi", "memory", "cc");
445 	if (stage != 10)
446 		report("Write shadowing different X86_CR0_MP", 0);
447 	else
448 		report("Write shadowing different X86_CR0_MP", 1);
449 	set_stage(10);
450 	tmp = guest_cr4 ^ X86_CR4_TSD;
451 	asm volatile("mov %0, %%rsi\n\t"
452 		"mov %%rsi, %%cr4\n\t"
453 		::"m"(tmp)
454 		:"rsi", "memory", "cc");
455 	if (stage != 11)
456 		report("Write shadowing different X86_CR4_TSD", 0);
457 	else
458 		report("Write shadowing different X86_CR4_TSD", 1);
459 	set_stage(11);
460 	tmp = guest_cr4 ^ X86_CR4_DE;
461 	asm volatile("mov %0, %%rsi\n\t"
462 		"mov %%rsi, %%cr4\n\t"
463 		::"m"(tmp)
464 		:"rsi", "memory", "cc");
465 	if (stage != 12)
466 		report("Write shadowing different X86_CR4_DE", 0);
467 	else
468 		report("Write shadowing different X86_CR4_DE", 1);
469 }
470 
471 static int cr_shadowing_exit_handler()
472 {
473 	u64 guest_rip;
474 	ulong reason;
475 	u32 insn_len;
476 	u32 exit_qual;
477 
478 	guest_rip = vmcs_read(GUEST_RIP);
479 	reason = vmcs_read(EXI_REASON) & 0xff;
480 	insn_len = vmcs_read(EXI_INST_LEN);
481 	exit_qual = vmcs_read(EXI_QUALIFICATION);
482 	switch (reason) {
483 	case VMX_VMCALL:
484 		switch (get_stage()) {
485 		case 0:
486 			if (guest_cr0 == vmcs_read(GUEST_CR0))
487 				report("Read through CR0", 1);
488 			else
489 				report("Read through CR0", 0);
490 			break;
491 		case 1:
492 			if (guest_cr4 == vmcs_read(GUEST_CR4))
493 				report("Read through CR4", 1);
494 			else
495 				report("Read through CR4", 0);
496 			break;
497 		case 2:
498 			if (guest_cr0 == vmcs_read(GUEST_CR0))
499 				report("Write through CR0", 1);
500 			else
501 				report("Write through CR0", 0);
502 			break;
503 		case 3:
504 			if (guest_cr4 == vmcs_read(GUEST_CR4))
505 				report("Write through CR4", 1);
506 			else
507 				report("Write through CR4", 0);
508 			break;
509 		case 4:
510 			guest_cr0 = vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP);
511 			guest_cr4 = vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE);
512 			vmcs_write(CR0_MASK, X86_CR0_TS | X86_CR0_MP);
513 			vmcs_write(CR0_READ_SHADOW, guest_cr0 & (X86_CR0_TS | X86_CR0_MP));
514 			vmcs_write(CR4_MASK, X86_CR4_TSD | X86_CR4_DE);
515 			vmcs_write(CR4_READ_SHADOW, guest_cr4 & (X86_CR4_TSD | X86_CR4_DE));
516 			break;
517 		case 6:
518 			if (guest_cr0 == (vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP)))
519 				report("Write shadowing CR0 (same value)", 1);
520 			else
521 				report("Write shadowing CR0 (same value)", 0);
522 			break;
523 		case 7:
524 			if (guest_cr4 == (vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE)))
525 				report("Write shadowing CR4 (same value)", 1);
526 			else
527 				report("Write shadowing CR4 (same value)", 0);
528 			break;
529 		default:
530 			// Should not reach here
531 			printf("ERROR : unexpected stage, %d\n", get_stage());
532 			print_vmexit_info();
533 			return VMX_TEST_VMEXIT;
534 		}
535 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
536 		return VMX_TEST_RESUME;
537 	case VMX_CR:
538 		switch (get_stage()) {
539 		case 4:
540 			report("Read shadowing CR0", 0);
541 			set_stage(stage + 1);
542 			break;
543 		case 5:
544 			report("Read shadowing CR4", 0);
545 			set_stage(stage + 1);
546 			break;
547 		case 6:
548 			report("Write shadowing CR0 (same value)", 0);
549 			set_stage(stage + 1);
550 			break;
551 		case 7:
552 			report("Write shadowing CR4 (same value)", 0);
553 			set_stage(stage + 1);
554 			break;
555 		case 8:
556 		case 9:
557 			// 0x600 encodes "mov %esi, %cr0"
558 			if (exit_qual == 0x600)
559 				set_stage(stage + 1);
560 			break;
561 		case 10:
562 		case 11:
563 			// 0x604 encodes "mov %esi, %cr4"
564 			if (exit_qual == 0x604)
565 				set_stage(stage + 1);
566 			break;
567 		default:
568 			// Should not reach here
569 			printf("ERROR : unexpected stage, %d\n", get_stage());
570 			print_vmexit_info();
571 			return VMX_TEST_VMEXIT;
572 		}
573 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
574 		return VMX_TEST_RESUME;
575 	default:
576 		printf("Unknown exit reason, %d\n", reason);
577 		print_vmexit_info();
578 	}
579 	return VMX_TEST_VMEXIT;
580 }
581 
582 static int iobmp_init()
583 {
584 	u32 ctrl_cpu0;
585 
586 	io_bitmap_a = alloc_page();
587 	io_bitmap_a = alloc_page();
588 	memset(io_bitmap_a, 0x0, PAGE_SIZE);
589 	memset(io_bitmap_b, 0x0, PAGE_SIZE);
590 	ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
591 	ctrl_cpu0 |= CPU_IO_BITMAP;
592 	ctrl_cpu0 &= (~CPU_IO);
593 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
594 	vmcs_write(IO_BITMAP_A, (u64)io_bitmap_a);
595 	vmcs_write(IO_BITMAP_B, (u64)io_bitmap_b);
596 	return VMX_TEST_START;
597 }
598 
599 static void iobmp_main()
600 {
601 	// stage 0, test IO pass
602 	set_stage(0);
603 	inb(0x5000);
604 	outb(0x0, 0x5000);
605 	if (stage != 0)
606 		report("I/O bitmap - I/O pass", 0);
607 	else
608 		report("I/O bitmap - I/O pass", 1);
609 	// test IO width, in/out
610 	((u8 *)io_bitmap_a)[0] = 0xFF;
611 	set_stage(2);
612 	inb(0x0);
613 	if (stage != 3)
614 		report("I/O bitmap - trap in", 0);
615 	else
616 		report("I/O bitmap - trap in", 1);
617 	set_stage(3);
618 	outw(0x0, 0x0);
619 	if (stage != 4)
620 		report("I/O bitmap - trap out", 0);
621 	else
622 		report("I/O bitmap - trap out", 1);
623 	set_stage(4);
624 	inl(0x0);
625 	if (stage != 5)
626 		report("I/O bitmap - I/O width, long", 0);
627 	// test low/high IO port
628 	set_stage(5);
629 	((u8 *)io_bitmap_a)[0x5000 / 8] = (1 << (0x5000 % 8));
630 	inb(0x5000);
631 	if (stage == 6)
632 		report("I/O bitmap - I/O port, low part", 1);
633 	else
634 		report("I/O bitmap - I/O port, low part", 0);
635 	set_stage(6);
636 	((u8 *)io_bitmap_b)[0x1000 / 8] = (1 << (0x1000 % 8));
637 	inb(0x9000);
638 	if (stage == 7)
639 		report("I/O bitmap - I/O port, high part", 1);
640 	else
641 		report("I/O bitmap - I/O port, high part", 0);
642 	// test partial pass
643 	set_stage(7);
644 	inl(0x4FFF);
645 	if (stage == 8)
646 		report("I/O bitmap - partial pass", 1);
647 	else
648 		report("I/O bitmap - partial pass", 0);
649 	// test overrun
650 	set_stage(8);
651 	memset(io_bitmap_a, 0x0, PAGE_SIZE);
652 	memset(io_bitmap_b, 0x0, PAGE_SIZE);
653 	inl(0xFFFF);
654 	if (stage == 9)
655 		report("I/O bitmap - overrun", 1);
656 	else
657 		report("I/O bitmap - overrun", 0);
658 }
659 
660 static int iobmp_exit_handler()
661 {
662 	u64 guest_rip;
663 	ulong reason, exit_qual;
664 	u32 insn_len;
665 
666 	guest_rip = vmcs_read(GUEST_RIP);
667 	reason = vmcs_read(EXI_REASON) & 0xff;
668 	exit_qual = vmcs_read(EXI_QUALIFICATION);
669 	insn_len = vmcs_read(EXI_INST_LEN);
670 	switch (reason) {
671 	case VMX_IO:
672 		switch (get_stage()) {
673 		case 0:
674 		case 1:
675 			set_stage(stage + 1);
676 			break;
677 		case 2:
678 			if ((exit_qual & VMX_IO_SIZE_MASK) != _VMX_IO_BYTE)
679 				report("I/O bitmap - I/O width, byte", 0);
680 			else
681 				report("I/O bitmap - I/O width, byte", 1);
682 			if (!(exit_qual & VMX_IO_IN))
683 				report("I/O bitmap - I/O direction, in", 0);
684 			else
685 				report("I/O bitmap - I/O direction, in", 1);
686 			set_stage(stage + 1);
687 			break;
688 		case 3:
689 			if ((exit_qual & VMX_IO_SIZE_MASK) != _VMX_IO_WORD)
690 				report("I/O bitmap - I/O width, word", 0);
691 			else
692 				report("I/O bitmap - I/O width, word", 1);
693 			if (!(exit_qual & VMX_IO_IN))
694 				report("I/O bitmap - I/O direction, out", 1);
695 			else
696 				report("I/O bitmap - I/O direction, out", 0);
697 			set_stage(stage + 1);
698 			break;
699 		case 4:
700 			if ((exit_qual & VMX_IO_SIZE_MASK) != _VMX_IO_LONG)
701 				report("I/O bitmap - I/O width, long", 0);
702 			else
703 				report("I/O bitmap - I/O width, long", 1);
704 			set_stage(stage + 1);
705 			break;
706 		case 5:
707 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x5000)
708 				set_stage(stage + 1);
709 			break;
710 		case 6:
711 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x9000)
712 				set_stage(stage + 1);
713 			break;
714 		case 7:
715 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x4FFF)
716 				set_stage(stage + 1);
717 			break;
718 		case 8:
719 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0xFFFF)
720 				set_stage(stage + 1);
721 			break;
722 		default:
723 			// Should not reach here
724 			printf("ERROR : unexpected stage, %d\n", get_stage());
725 			print_vmexit_info();
726 			return VMX_TEST_VMEXIT;
727 		}
728 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
729 		return VMX_TEST_RESUME;
730 	default:
731 		printf("guest_rip = 0x%llx\n", guest_rip);
732 		printf("\tERROR : Undefined exit reason, reason = %d.\n", reason);
733 		break;
734 	}
735 	return VMX_TEST_VMEXIT;
736 }
737 
738 #define INSN_CPU0		0
739 #define INSN_CPU1		1
740 #define INSN_ALWAYS_TRAP	2
741 #define INSN_NEVER_TRAP		3
742 
743 #define FIELD_EXIT_QUAL		0
744 #define FIELD_INSN_INFO		1
745 
746 asm(
747 	"insn_hlt: hlt;ret\n\t"
748 	"insn_invlpg: invlpg 0x12345678;ret\n\t"
749 	"insn_mwait: mwait;ret\n\t"
750 	"insn_rdpmc: rdpmc;ret\n\t"
751 	"insn_rdtsc: rdtsc;ret\n\t"
752 	"insn_monitor: monitor;ret\n\t"
753 	"insn_pause: pause;ret\n\t"
754 	"insn_wbinvd: wbinvd;ret\n\t"
755 	"insn_cpuid: cpuid;ret\n\t"
756 	"insn_invd: invd;ret\n\t"
757 );
758 extern void insn_hlt();
759 extern void insn_invlpg();
760 extern void insn_mwait();
761 extern void insn_rdpmc();
762 extern void insn_rdtsc();
763 extern void insn_monitor();
764 extern void insn_pause();
765 extern void insn_wbinvd();
766 extern void insn_cpuid();
767 extern void insn_invd();
768 
769 u32 cur_insn;
770 
771 struct insn_table {
772 	const char *name;
773 	u32 flag;
774 	void (*insn_func)();
775 	u32 type;
776 	u32 reason;
777 	ulong exit_qual;
778 	u32 insn_info;
779 	// Use FIELD_EXIT_QUAL and FIELD_INSN_INFO to efines
780 	// which field need to be tested, reason is always tested
781 	u32 test_field;
782 };
783 
784 /*
785  * Add more test cases of instruction intercept here. Elements in this
786  * table is:
787  *	name/control flag/insn function/type/exit reason/exit qulification/
788  *	instruction info/field to test
789  * The last field defines which fields (exit_qual and insn_info) need to be
790  * tested in exit handler. If set to 0, only "reason" is checked.
791  */
792 static struct insn_table insn_table[] = {
793 	// Flags for Primary Processor-Based VM-Execution Controls
794 	{"HLT",  CPU_HLT, insn_hlt, INSN_CPU0, 12, 0, 0, 0},
795 	{"INVLPG", CPU_INVLPG, insn_invlpg, INSN_CPU0, 14,
796 		0x12345678, 0, FIELD_EXIT_QUAL},
797 	{"MWAIT", CPU_MWAIT, insn_mwait, INSN_CPU0, 36, 0, 0, 0},
798 	{"RDPMC", CPU_RDPMC, insn_rdpmc, INSN_CPU0, 15, 0, 0, 0},
799 	{"RDTSC", CPU_RDTSC, insn_rdtsc, INSN_CPU0, 16, 0, 0, 0},
800 	{"MONITOR", CPU_MONITOR, insn_monitor, INSN_CPU0, 39, 0, 0, 0},
801 	{"PAUSE", CPU_PAUSE, insn_pause, INSN_CPU0, 40, 0, 0, 0},
802 	// Flags for Secondary Processor-Based VM-Execution Controls
803 	{"WBINVD", CPU_WBINVD, insn_wbinvd, INSN_CPU1, 54, 0, 0, 0},
804 	// Instructions always trap
805 	{"CPUID", 0, insn_cpuid, INSN_ALWAYS_TRAP, 10, 0, 0, 0},
806 	{"INVD", 0, insn_invd, INSN_ALWAYS_TRAP, 13, 0, 0, 0},
807 	// Instructions never trap
808 	{NULL},
809 };
810 
811 static int insn_intercept_init()
812 {
813 	u32 ctrl_cpu[2];
814 
815 	ctrl_cpu[0] = vmcs_read(CPU_EXEC_CTRL0);
816 	ctrl_cpu[0] |= CPU_HLT | CPU_INVLPG | CPU_MWAIT | CPU_RDPMC | CPU_RDTSC |
817 		CPU_MONITOR | CPU_PAUSE | CPU_SECONDARY;
818 	ctrl_cpu[0] &= ctrl_cpu_rev[0].clr;
819 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu[0]);
820 	ctrl_cpu[1] = vmcs_read(CPU_EXEC_CTRL1);
821 	ctrl_cpu[1] |= CPU_WBINVD | CPU_RDRAND;
822 	ctrl_cpu[1] &= ctrl_cpu_rev[1].clr;
823 	vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu[1]);
824 	return VMX_TEST_START;
825 }
826 
827 static void insn_intercept_main()
828 {
829 	cur_insn = 0;
830 	while(insn_table[cur_insn].name != NULL) {
831 		set_stage(cur_insn);
832 		if ((insn_table[cur_insn].type == INSN_CPU0
833 			&& !(ctrl_cpu_rev[0].clr & insn_table[cur_insn].flag))
834 			|| (insn_table[cur_insn].type == INSN_CPU1
835 			&& !(ctrl_cpu_rev[1].clr & insn_table[cur_insn].flag))) {
836 			printf("\tCPU_CTRL1.CPU_%s is not supported.\n",
837 				insn_table[cur_insn].name);
838 			continue;
839 		}
840 		insn_table[cur_insn].insn_func();
841 		switch (insn_table[cur_insn].type) {
842 		case INSN_CPU0:
843 		case INSN_CPU1:
844 		case INSN_ALWAYS_TRAP:
845 			if (stage != cur_insn + 1)
846 				report(insn_table[cur_insn].name, 0);
847 			else
848 				report(insn_table[cur_insn].name, 1);
849 			break;
850 		case INSN_NEVER_TRAP:
851 			if (stage == cur_insn + 1)
852 				report(insn_table[cur_insn].name, 0);
853 			else
854 				report(insn_table[cur_insn].name, 1);
855 			break;
856 		}
857 		cur_insn ++;
858 	}
859 }
860 
861 static int insn_intercept_exit_handler()
862 {
863 	u64 guest_rip;
864 	u32 reason;
865 	ulong exit_qual;
866 	u32 insn_len;
867 	u32 insn_info;
868 	bool pass;
869 
870 	guest_rip = vmcs_read(GUEST_RIP);
871 	reason = vmcs_read(EXI_REASON) & 0xff;
872 	exit_qual = vmcs_read(EXI_QUALIFICATION);
873 	insn_len = vmcs_read(EXI_INST_LEN);
874 	insn_info = vmcs_read(EXI_INST_INFO);
875 	pass = (cur_insn == get_stage()) &&
876 			insn_table[cur_insn].reason == reason;
877 	if (insn_table[cur_insn].test_field & FIELD_EXIT_QUAL)
878 		pass = pass && insn_table[cur_insn].exit_qual == exit_qual;
879 	if (insn_table[cur_insn].test_field & FIELD_INSN_INFO)
880 		pass = pass && insn_table[cur_insn].insn_info == insn_info;
881 	if (pass)
882 		set_stage(stage + 1);
883 	vmcs_write(GUEST_RIP, guest_rip + insn_len);
884 	return VMX_TEST_RESUME;
885 }
886 
887 
888 static int setup_ept()
889 {
890 	int support_2m;
891 	unsigned long end_of_memory;
892 
893 	if (!(ept_vpid.val & EPT_CAP_UC) &&
894 			!(ept_vpid.val & EPT_CAP_WB)) {
895 		printf("\tEPT paging-structure memory type "
896 				"UC&WB are not supported\n");
897 		return 1;
898 	}
899 	if (ept_vpid.val & EPT_CAP_UC)
900 		eptp = EPT_MEM_TYPE_UC;
901 	else
902 		eptp = EPT_MEM_TYPE_WB;
903 	if (!(ept_vpid.val & EPT_CAP_PWL4)) {
904 		printf("\tPWL4 is not supported\n");
905 		return 1;
906 	}
907 	eptp |= (3 << EPTP_PG_WALK_LEN_SHIFT);
908 	pml4 = alloc_page();
909 	memset(pml4, 0, PAGE_SIZE);
910 	eptp |= virt_to_phys(pml4);
911 	vmcs_write(EPTP, eptp);
912 	support_2m = !!(ept_vpid.val & EPT_CAP_2M_PAGE);
913 	end_of_memory = fwcfg_get_u64(FW_CFG_RAM_SIZE);
914 	if (end_of_memory < (1ul << 32))
915 		end_of_memory = (1ul << 32);
916 	setup_ept_range(pml4, 0, end_of_memory, 0, support_2m,
917 			EPT_WA | EPT_RA | EPT_EA);
918 	return 0;
919 }
920 
921 static int ept_init()
922 {
923 	unsigned long base_addr1, base_addr2;
924 	u32 ctrl_cpu[2];
925 
926 	if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY) ||
927 	    !(ctrl_cpu_rev[1].clr & CPU_EPT)) {
928 		printf("\tEPT is not supported");
929 		return VMX_TEST_EXIT;
930 	}
931 
932 	ctrl_cpu[0] = vmcs_read(CPU_EXEC_CTRL0);
933 	ctrl_cpu[1] = vmcs_read(CPU_EXEC_CTRL1);
934 	ctrl_cpu[0] = (ctrl_cpu[0] | CPU_SECONDARY)
935 		& ctrl_cpu_rev[0].clr;
936 	ctrl_cpu[1] = (ctrl_cpu[1] | CPU_EPT)
937 		& ctrl_cpu_rev[1].clr;
938 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu[0]);
939 	vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu[1]);
940 	if (setup_ept())
941 		return VMX_TEST_EXIT;
942 	data_page1 = alloc_page();
943 	data_page2 = alloc_page();
944 	memset(data_page1, 0x0, PAGE_SIZE);
945 	memset(data_page2, 0x0, PAGE_SIZE);
946 	*((u32 *)data_page1) = MAGIC_VAL_1;
947 	*((u32 *)data_page2) = MAGIC_VAL_2;
948 	base_addr1 = (unsigned long)data_page1 & PAGE_MASK_2M;
949 	base_addr2 = (unsigned long)data_page2 & PAGE_MASK_2M;
950 	setup_ept_range(pml4, base_addr1, base_addr1 + PAGE_SIZE_2M, 0, 0,
951 			EPT_WA | EPT_RA | EPT_EA);
952 	setup_ept_range(pml4, base_addr2, base_addr2 + PAGE_SIZE_2M, 0, 0,
953 			EPT_WA | EPT_RA | EPT_EA);
954 	install_ept(pml4, (unsigned long)data_page1, (unsigned long)data_page2,
955 			EPT_RA | EPT_WA | EPT_EA);
956 	return VMX_TEST_START;
957 }
958 
959 static void ept_main()
960 {
961 	set_stage(0);
962 	if (*((u32 *)data_page2) != MAGIC_VAL_1 ||
963 			*((u32 *)data_page1) != MAGIC_VAL_1)
964 		report("EPT basic framework - read", 0);
965 	else {
966 		*((u32 *)data_page2) = MAGIC_VAL_3;
967 		vmcall();
968 		if (get_stage() == 1) {
969 			if (*((u32 *)data_page1) == MAGIC_VAL_3 &&
970 					*((u32 *)data_page2) == MAGIC_VAL_2)
971 				report("EPT basic framework", 1);
972 			else
973 				report("EPT basic framework - remap", 1);
974 		}
975 	}
976 	// Test EPT Misconfigurations
977 	set_stage(1);
978 	vmcall();
979 	*((u32 *)data_page1) = MAGIC_VAL_1;
980 	if (get_stage() != 2) {
981 		report("EPT misconfigurations", 0);
982 		goto t1;
983 	}
984 	set_stage(2);
985 	vmcall();
986 	*((u32 *)data_page1) = MAGIC_VAL_1;
987 	if (get_stage() != 3) {
988 		report("EPT misconfigurations", 0);
989 		goto t1;
990 	}
991 	report("EPT misconfigurations", 1);
992 t1:
993 	// Test EPT violation
994 	set_stage(3);
995 	vmcall();
996 	*((u32 *)data_page1) = MAGIC_VAL_1;
997 	if (get_stage() == 4)
998 		report("EPT violation - page permission", 1);
999 	else
1000 		report("EPT violation - page permission", 0);
1001 	// Violation caused by EPT paging structure
1002 	set_stage(4);
1003 	vmcall();
1004 	*((u32 *)data_page1) = MAGIC_VAL_2;
1005 	if (get_stage() == 5)
1006 		report("EPT violation - paging structure", 1);
1007 	else
1008 		report("EPT violation - paging structure", 0);
1009 }
1010 
1011 static int ept_exit_handler()
1012 {
1013 	u64 guest_rip;
1014 	ulong reason;
1015 	u32 insn_len;
1016 	u32 exit_qual;
1017 	static unsigned long data_page1_pte, data_page1_pte_pte;
1018 
1019 	guest_rip = vmcs_read(GUEST_RIP);
1020 	reason = vmcs_read(EXI_REASON) & 0xff;
1021 	insn_len = vmcs_read(EXI_INST_LEN);
1022 	exit_qual = vmcs_read(EXI_QUALIFICATION);
1023 	switch (reason) {
1024 	case VMX_VMCALL:
1025 		switch (get_stage()) {
1026 		case 0:
1027 			if (*((u32 *)data_page1) == MAGIC_VAL_3 &&
1028 					*((u32 *)data_page2) == MAGIC_VAL_2) {
1029 				set_stage(get_stage() + 1);
1030 				install_ept(pml4, (unsigned long)data_page2,
1031 						(unsigned long)data_page2,
1032 						EPT_RA | EPT_WA | EPT_EA);
1033 			} else
1034 				report("EPT basic framework - write\n", 0);
1035 			break;
1036 		case 1:
1037 			install_ept(pml4, (unsigned long)data_page1,
1038  				(unsigned long)data_page1, EPT_WA);
1039 			invept(INVEPT_SINGLE, eptp);
1040 			break;
1041 		case 2:
1042 			install_ept(pml4, (unsigned long)data_page1,
1043  				(unsigned long)data_page1,
1044  				EPT_RA | EPT_WA | EPT_EA |
1045  				(2 << EPT_MEM_TYPE_SHIFT));
1046 			invept(INVEPT_SINGLE, eptp);
1047 			break;
1048 		case 3:
1049 			data_page1_pte = get_ept_pte(pml4,
1050 				(unsigned long)data_page1, 1);
1051 			set_ept_pte(pml4, (unsigned long)data_page1,
1052 				1, data_page1_pte & (~EPT_PRESENT));
1053 			invept(INVEPT_SINGLE, eptp);
1054 			break;
1055 		case 4:
1056 			data_page1_pte = get_ept_pte(pml4,
1057 				(unsigned long)data_page1, 2);
1058 			data_page1_pte &= PAGE_MASK;
1059 			data_page1_pte_pte = get_ept_pte(pml4, data_page1_pte, 2);
1060 			set_ept_pte(pml4, data_page1_pte, 2,
1061 				data_page1_pte_pte & (~EPT_PRESENT));
1062 			invept(INVEPT_SINGLE, eptp);
1063 			break;
1064 		// Should not reach here
1065 		default:
1066 			printf("ERROR - unexpected stage, %d.\n", get_stage());
1067 			print_vmexit_info();
1068 			return VMX_TEST_VMEXIT;
1069 		}
1070 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
1071 		return VMX_TEST_RESUME;
1072 	case VMX_EPT_MISCONFIG:
1073 		switch (get_stage()) {
1074 		case 1:
1075 		case 2:
1076 			set_stage(get_stage() + 1);
1077 			install_ept(pml4, (unsigned long)data_page1,
1078  				(unsigned long)data_page1,
1079  				EPT_RA | EPT_WA | EPT_EA);
1080 			invept(INVEPT_SINGLE, eptp);
1081 			break;
1082 		// Should not reach here
1083 		default:
1084 			printf("ERROR - unexpected stage, %d.\n", get_stage());
1085 			print_vmexit_info();
1086 			return VMX_TEST_VMEXIT;
1087 		}
1088 		return VMX_TEST_RESUME;
1089 	case VMX_EPT_VIOLATION:
1090 		switch(get_stage()) {
1091 		case 3:
1092 			if (exit_qual == (EPT_VLT_WR | EPT_VLT_LADDR_VLD |
1093 					EPT_VLT_PADDR))
1094 				set_stage(get_stage() + 1);
1095 			set_ept_pte(pml4, (unsigned long)data_page1,
1096 				1, data_page1_pte | (EPT_PRESENT));
1097 			invept(INVEPT_SINGLE, eptp);
1098 			break;
1099 		case 4:
1100 			if (exit_qual == (EPT_VLT_RD | EPT_VLT_LADDR_VLD))
1101 				set_stage(get_stage() + 1);
1102 			set_ept_pte(pml4, data_page1_pte, 2,
1103 				data_page1_pte_pte | (EPT_PRESENT));
1104 			invept(INVEPT_SINGLE, eptp);
1105 			break;
1106 		default:
1107 			// Should not reach here
1108 			printf("ERROR : unexpected stage, %d\n", get_stage());
1109 			print_vmexit_info();
1110 			return VMX_TEST_VMEXIT;
1111 		}
1112 		return VMX_TEST_RESUME;
1113 	default:
1114 		printf("Unknown exit reason, %d\n", reason);
1115 		print_vmexit_info();
1116 	}
1117 	return VMX_TEST_VMEXIT;
1118 }
1119 
1120 /* name/init/guest_main/exit_handler/syscall_handler/guest_regs */
1121 struct vmx_test vmx_tests[] = {
1122 	{ "null", NULL, basic_guest_main, basic_exit_handler, NULL, {0} },
1123 	{ "vmenter", NULL, vmenter_main, vmenter_exit_handler, NULL, {0} },
1124 	{ "preemption timer", preemption_timer_init, preemption_timer_main,
1125 		preemption_timer_exit_handler, NULL, {0} },
1126 	{ "control field PAT", test_ctrl_pat_init, test_ctrl_pat_main,
1127 		test_ctrl_pat_exit_handler, NULL, {0} },
1128 	{ "control field EFER", test_ctrl_efer_init, test_ctrl_efer_main,
1129 		test_ctrl_efer_exit_handler, NULL, {0} },
1130 	{ "CR shadowing", NULL, cr_shadowing_main,
1131 		cr_shadowing_exit_handler, NULL, {0} },
1132 	{ "I/O bitmap", iobmp_init, iobmp_main, iobmp_exit_handler,
1133 		NULL, {0} },
1134 	{ "instruction intercept", insn_intercept_init, insn_intercept_main,
1135 		insn_intercept_exit_handler, NULL, {0} },
1136 	{ "EPT framework", ept_init, ept_main, ept_exit_handler, NULL, {0} },
1137 	{ NULL, NULL, NULL, NULL, NULL, {0} },
1138 };
1139