xref: /kvm-unit-tests/x86/vmx_tests.c (revision c592c1510d46962896ce90ee9a19fb07efe1430a)
1 /*
2  * All test cases of nested virtualization should be in this file
3  *
4  * Author : Arthur Chunqi Li <yzt356@gmail.com>
5  */
6 #include "vmx.h"
7 #include "msr.h"
8 #include "processor.h"
9 #include "vm.h"
10 #include "io.h"
11 #include "fwcfg.h"
12 
13 u64 ia32_pat;
14 u64 ia32_efer;
15 volatile u32 stage;
16 void *io_bitmap_a, *io_bitmap_b;
17 u16 ioport;
18 
19 unsigned long *pml4;
20 u64 eptp;
21 void *data_page1, *data_page2;
22 
23 static inline void vmcall()
24 {
25 	asm volatile("vmcall");
26 }
27 
28 static inline void set_stage(u32 s)
29 {
30 	barrier();
31 	stage = s;
32 	barrier();
33 }
34 
35 static inline u32 get_stage()
36 {
37 	u32 s;
38 
39 	barrier();
40 	s = stage;
41 	barrier();
42 	return s;
43 }
44 
45 void basic_guest_main()
46 {
47 	/* Here is a basic guest_main, print Hello World */
48 	printf("\tHello World, this is null_guest_main!\n");
49 }
50 
51 int basic_exit_handler()
52 {
53 	u64 guest_rip;
54 	ulong reason;
55 
56 	guest_rip = vmcs_read(GUEST_RIP);
57 	reason = vmcs_read(EXI_REASON) & 0xff;
58 
59 	switch (reason) {
60 	case VMX_VMCALL:
61 		print_vmexit_info();
62 		vmcs_write(GUEST_RIP, guest_rip + 3);
63 		return VMX_TEST_RESUME;
64 	default:
65 		break;
66 	}
67 	printf("ERROR : Unhandled vmx exit.\n");
68 	print_vmexit_info();
69 	return VMX_TEST_EXIT;
70 }
71 
72 void basic_syscall_handler(u64 syscall_no)
73 {
74 }
75 
76 void vmenter_main()
77 {
78 	u64 rax;
79 	u64 rsp, resume_rsp;
80 
81 	report("test vmlaunch", 1);
82 
83 	asm volatile(
84 		"mov %%rsp, %0\n\t"
85 		"mov %3, %%rax\n\t"
86 		"vmcall\n\t"
87 		"mov %%rax, %1\n\t"
88 		"mov %%rsp, %2\n\t"
89 		: "=r"(rsp), "=r"(rax), "=r"(resume_rsp)
90 		: "g"(0xABCD));
91 	report("test vmresume", (rax == 0xFFFF) && (rsp == resume_rsp));
92 }
93 
94 int vmenter_exit_handler()
95 {
96 	u64 guest_rip;
97 	ulong reason;
98 
99 	guest_rip = vmcs_read(GUEST_RIP);
100 	reason = vmcs_read(EXI_REASON) & 0xff;
101 	switch (reason) {
102 	case VMX_VMCALL:
103 		if (regs.rax != 0xABCD) {
104 			report("test vmresume", 0);
105 			return VMX_TEST_VMEXIT;
106 		}
107 		regs.rax = 0xFFFF;
108 		vmcs_write(GUEST_RIP, guest_rip + 3);
109 		return VMX_TEST_RESUME;
110 	default:
111 		report("test vmresume", 0);
112 		print_vmexit_info();
113 	}
114 	return VMX_TEST_VMEXIT;
115 }
116 
117 u32 preempt_scale;
118 volatile unsigned long long tsc_val;
119 volatile u32 preempt_val;
120 
121 int preemption_timer_init()
122 {
123 	if (!(ctrl_pin_rev.clr & PIN_PREEMPT)) {
124 		printf("\tPreemption timer is not supported\n");
125 		return VMX_TEST_EXIT;
126 	}
127 	vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) | PIN_PREEMPT);
128 	preempt_val = 10000000;
129 	vmcs_write(PREEMPT_TIMER_VALUE, preempt_val);
130 	preempt_scale = rdmsr(MSR_IA32_VMX_MISC) & 0x1F;
131 
132 	if (!(ctrl_exit_rev.clr & EXI_SAVE_PREEMPT))
133 		printf("\tSave preemption value is not supported\n");
134 
135 	return VMX_TEST_START;
136 }
137 
138 void preemption_timer_main()
139 {
140 	tsc_val = rdtsc();
141 	if (ctrl_exit_rev.clr & EXI_SAVE_PREEMPT) {
142 		set_stage(0);
143 		vmcall();
144 		if (get_stage() == 1)
145 			vmcall();
146 	}
147 	while (1) {
148 		if (((rdtsc() - tsc_val) >> preempt_scale)
149 				> 10 * preempt_val) {
150 			set_stage(2);
151 			vmcall();
152 		}
153 	}
154 }
155 
156 int preemption_timer_exit_handler()
157 {
158 	u64 guest_rip;
159 	ulong reason;
160 	u32 insn_len;
161 	u32 ctrl_exit;
162 
163 	guest_rip = vmcs_read(GUEST_RIP);
164 	reason = vmcs_read(EXI_REASON) & 0xff;
165 	insn_len = vmcs_read(EXI_INST_LEN);
166 	switch (reason) {
167 	case VMX_PREEMPT:
168 		if (((rdtsc() - tsc_val) >> preempt_scale) < preempt_val)
169 			report("Preemption timer", 0);
170 		else
171 			report("Preemption timer", 1);
172 		break;
173 	case VMX_VMCALL:
174 		switch (get_stage()) {
175 		case 0:
176 			if (vmcs_read(PREEMPT_TIMER_VALUE) != preempt_val)
177 				report("Save preemption value", 0);
178 			else {
179 				set_stage(get_stage() + 1);
180 				ctrl_exit = (vmcs_read(EXI_CONTROLS) |
181 					EXI_SAVE_PREEMPT) & ctrl_exit_rev.clr;
182 				vmcs_write(EXI_CONTROLS, ctrl_exit);
183 			}
184 			vmcs_write(GUEST_RIP, guest_rip + insn_len);
185 			return VMX_TEST_RESUME;
186 		case 1:
187 			if (vmcs_read(PREEMPT_TIMER_VALUE) >= preempt_val)
188 				report("Save preemption value", 0);
189 			else
190 				report("Save preemption value", 1);
191 			vmcs_write(GUEST_RIP, guest_rip + insn_len);
192 			return VMX_TEST_RESUME;
193 		case 2:
194 			report("Preemption timer", 0);
195 			break;
196 		default:
197 			// Should not reach here
198 			printf("ERROR : unexpected stage, %d\n", get_stage());
199 			print_vmexit_info();
200 			return VMX_TEST_VMEXIT;
201 		}
202 		break;
203 	default:
204 		printf("Unknown exit reason, %d\n", reason);
205 		print_vmexit_info();
206 	}
207 	vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) & ~PIN_PREEMPT);
208 	return VMX_TEST_VMEXIT;
209 }
210 
211 void msr_bmp_init()
212 {
213 	void *msr_bitmap;
214 	u32 ctrl_cpu0;
215 
216 	msr_bitmap = alloc_page();
217 	memset(msr_bitmap, 0x0, PAGE_SIZE);
218 	ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
219 	ctrl_cpu0 |= CPU_MSR_BITMAP;
220 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
221 	vmcs_write(MSR_BITMAP, (u64)msr_bitmap);
222 }
223 
224 static int test_ctrl_pat_init()
225 {
226 	u64 ctrl_ent;
227 	u64 ctrl_exi;
228 
229 	msr_bmp_init();
230 	ctrl_ent = vmcs_read(ENT_CONTROLS);
231 	ctrl_exi = vmcs_read(EXI_CONTROLS);
232 	vmcs_write(ENT_CONTROLS, ctrl_ent | ENT_LOAD_PAT);
233 	vmcs_write(EXI_CONTROLS, ctrl_exi | (EXI_SAVE_PAT | EXI_LOAD_PAT));
234 	ia32_pat = rdmsr(MSR_IA32_CR_PAT);
235 	vmcs_write(GUEST_PAT, 0x0);
236 	vmcs_write(HOST_PAT, ia32_pat);
237 	return VMX_TEST_START;
238 }
239 
240 static void test_ctrl_pat_main()
241 {
242 	u64 guest_ia32_pat;
243 
244 	guest_ia32_pat = rdmsr(MSR_IA32_CR_PAT);
245 	if (!(ctrl_enter_rev.clr & ENT_LOAD_PAT))
246 		printf("\tENT_LOAD_PAT is not supported.\n");
247 	else {
248 		if (guest_ia32_pat != 0) {
249 			report("Entry load PAT", 0);
250 			return;
251 		}
252 	}
253 	wrmsr(MSR_IA32_CR_PAT, 0x6);
254 	vmcall();
255 	guest_ia32_pat = rdmsr(MSR_IA32_CR_PAT);
256 	if (ctrl_enter_rev.clr & ENT_LOAD_PAT) {
257 		if (guest_ia32_pat != ia32_pat) {
258 			report("Entry load PAT", 0);
259 			return;
260 		}
261 		report("Entry load PAT", 1);
262 	}
263 }
264 
265 static int test_ctrl_pat_exit_handler()
266 {
267 	u64 guest_rip;
268 	ulong reason;
269 	u64 guest_pat;
270 
271 	guest_rip = vmcs_read(GUEST_RIP);
272 	reason = vmcs_read(EXI_REASON) & 0xff;
273 	switch (reason) {
274 	case VMX_VMCALL:
275 		guest_pat = vmcs_read(GUEST_PAT);
276 		if (!(ctrl_exit_rev.clr & EXI_SAVE_PAT)) {
277 			printf("\tEXI_SAVE_PAT is not supported\n");
278 			vmcs_write(GUEST_PAT, 0x6);
279 		} else {
280 			if (guest_pat == 0x6)
281 				report("Exit save PAT", 1);
282 			else
283 				report("Exit save PAT", 0);
284 		}
285 		if (!(ctrl_exit_rev.clr & EXI_LOAD_PAT))
286 			printf("\tEXI_LOAD_PAT is not supported\n");
287 		else {
288 			if (rdmsr(MSR_IA32_CR_PAT) == ia32_pat)
289 				report("Exit load PAT", 1);
290 			else
291 				report("Exit load PAT", 0);
292 		}
293 		vmcs_write(GUEST_PAT, ia32_pat);
294 		vmcs_write(GUEST_RIP, guest_rip + 3);
295 		return VMX_TEST_RESUME;
296 	default:
297 		printf("ERROR : Undefined exit reason, reason = %d.\n", reason);
298 		break;
299 	}
300 	return VMX_TEST_VMEXIT;
301 }
302 
303 static int test_ctrl_efer_init()
304 {
305 	u64 ctrl_ent;
306 	u64 ctrl_exi;
307 
308 	msr_bmp_init();
309 	ctrl_ent = vmcs_read(ENT_CONTROLS) | ENT_LOAD_EFER;
310 	ctrl_exi = vmcs_read(EXI_CONTROLS) | EXI_SAVE_EFER | EXI_LOAD_EFER;
311 	vmcs_write(ENT_CONTROLS, ctrl_ent & ctrl_enter_rev.clr);
312 	vmcs_write(EXI_CONTROLS, ctrl_exi & ctrl_exit_rev.clr);
313 	ia32_efer = rdmsr(MSR_EFER);
314 	vmcs_write(GUEST_EFER, ia32_efer ^ EFER_NX);
315 	vmcs_write(HOST_EFER, ia32_efer ^ EFER_NX);
316 	return VMX_TEST_START;
317 }
318 
319 static void test_ctrl_efer_main()
320 {
321 	u64 guest_ia32_efer;
322 
323 	guest_ia32_efer = rdmsr(MSR_EFER);
324 	if (!(ctrl_enter_rev.clr & ENT_LOAD_EFER))
325 		printf("\tENT_LOAD_EFER is not supported.\n");
326 	else {
327 		if (guest_ia32_efer != (ia32_efer ^ EFER_NX)) {
328 			report("Entry load EFER", 0);
329 			return;
330 		}
331 	}
332 	wrmsr(MSR_EFER, ia32_efer);
333 	vmcall();
334 	guest_ia32_efer = rdmsr(MSR_EFER);
335 	if (ctrl_enter_rev.clr & ENT_LOAD_EFER) {
336 		if (guest_ia32_efer != ia32_efer) {
337 			report("Entry load EFER", 0);
338 			return;
339 		}
340 		report("Entry load EFER", 1);
341 	}
342 }
343 
344 static int test_ctrl_efer_exit_handler()
345 {
346 	u64 guest_rip;
347 	ulong reason;
348 	u64 guest_efer;
349 
350 	guest_rip = vmcs_read(GUEST_RIP);
351 	reason = vmcs_read(EXI_REASON) & 0xff;
352 	switch (reason) {
353 	case VMX_VMCALL:
354 		guest_efer = vmcs_read(GUEST_EFER);
355 		if (!(ctrl_exit_rev.clr & EXI_SAVE_EFER)) {
356 			printf("\tEXI_SAVE_EFER is not supported\n");
357 			vmcs_write(GUEST_EFER, ia32_efer);
358 		} else {
359 			if (guest_efer == ia32_efer)
360 				report("Exit save EFER", 1);
361 			else
362 				report("Exit save EFER", 0);
363 		}
364 		if (!(ctrl_exit_rev.clr & EXI_LOAD_EFER)) {
365 			printf("\tEXI_LOAD_EFER is not supported\n");
366 			wrmsr(MSR_EFER, ia32_efer ^ EFER_NX);
367 		} else {
368 			if (rdmsr(MSR_EFER) == (ia32_efer ^ EFER_NX))
369 				report("Exit load EFER", 1);
370 			else
371 				report("Exit load EFER", 0);
372 		}
373 		vmcs_write(GUEST_PAT, ia32_efer);
374 		vmcs_write(GUEST_RIP, guest_rip + 3);
375 		return VMX_TEST_RESUME;
376 	default:
377 		printf("ERROR : Undefined exit reason, reason = %d.\n", reason);
378 		break;
379 	}
380 	return VMX_TEST_VMEXIT;
381 }
382 
383 u32 guest_cr0, guest_cr4;
384 
385 static void cr_shadowing_main()
386 {
387 	u32 cr0, cr4, tmp;
388 
389 	// Test read through
390 	set_stage(0);
391 	guest_cr0 = read_cr0();
392 	if (stage == 1)
393 		report("Read through CR0", 0);
394 	else
395 		vmcall();
396 	set_stage(1);
397 	guest_cr4 = read_cr4();
398 	if (stage == 2)
399 		report("Read through CR4", 0);
400 	else
401 		vmcall();
402 	// Test write through
403 	guest_cr0 = guest_cr0 ^ (X86_CR0_TS | X86_CR0_MP);
404 	guest_cr4 = guest_cr4 ^ (X86_CR4_TSD | X86_CR4_DE);
405 	set_stage(2);
406 	write_cr0(guest_cr0);
407 	if (stage == 3)
408 		report("Write throuth CR0", 0);
409 	else
410 		vmcall();
411 	set_stage(3);
412 	write_cr4(guest_cr4);
413 	if (stage == 4)
414 		report("Write through CR4", 0);
415 	else
416 		vmcall();
417 	// Test read shadow
418 	set_stage(4);
419 	vmcall();
420 	cr0 = read_cr0();
421 	if (stage != 5) {
422 		if (cr0 == guest_cr0)
423 			report("Read shadowing CR0", 1);
424 		else
425 			report("Read shadowing CR0", 0);
426 	}
427 	set_stage(5);
428 	cr4 = read_cr4();
429 	if (stage != 6) {
430 		if (cr4 == guest_cr4)
431 			report("Read shadowing CR4", 1);
432 		else
433 			report("Read shadowing CR4", 0);
434 	}
435 	// Test write shadow (same value with shadow)
436 	set_stage(6);
437 	write_cr0(guest_cr0);
438 	if (stage == 7)
439 		report("Write shadowing CR0 (same value with shadow)", 0);
440 	else
441 		vmcall();
442 	set_stage(7);
443 	write_cr4(guest_cr4);
444 	if (stage == 8)
445 		report("Write shadowing CR4 (same value with shadow)", 0);
446 	else
447 		vmcall();
448 	// Test write shadow (different value)
449 	set_stage(8);
450 	tmp = guest_cr0 ^ X86_CR0_TS;
451 	asm volatile("mov %0, %%rsi\n\t"
452 		"mov %%rsi, %%cr0\n\t"
453 		::"m"(tmp)
454 		:"rsi", "memory", "cc");
455 	if (stage != 9)
456 		report("Write shadowing different X86_CR0_TS", 0);
457 	else
458 		report("Write shadowing different X86_CR0_TS", 1);
459 	set_stage(9);
460 	tmp = guest_cr0 ^ X86_CR0_MP;
461 	asm volatile("mov %0, %%rsi\n\t"
462 		"mov %%rsi, %%cr0\n\t"
463 		::"m"(tmp)
464 		:"rsi", "memory", "cc");
465 	if (stage != 10)
466 		report("Write shadowing different X86_CR0_MP", 0);
467 	else
468 		report("Write shadowing different X86_CR0_MP", 1);
469 	set_stage(10);
470 	tmp = guest_cr4 ^ X86_CR4_TSD;
471 	asm volatile("mov %0, %%rsi\n\t"
472 		"mov %%rsi, %%cr4\n\t"
473 		::"m"(tmp)
474 		:"rsi", "memory", "cc");
475 	if (stage != 11)
476 		report("Write shadowing different X86_CR4_TSD", 0);
477 	else
478 		report("Write shadowing different X86_CR4_TSD", 1);
479 	set_stage(11);
480 	tmp = guest_cr4 ^ X86_CR4_DE;
481 	asm volatile("mov %0, %%rsi\n\t"
482 		"mov %%rsi, %%cr4\n\t"
483 		::"m"(tmp)
484 		:"rsi", "memory", "cc");
485 	if (stage != 12)
486 		report("Write shadowing different X86_CR4_DE", 0);
487 	else
488 		report("Write shadowing different X86_CR4_DE", 1);
489 }
490 
491 static int cr_shadowing_exit_handler()
492 {
493 	u64 guest_rip;
494 	ulong reason;
495 	u32 insn_len;
496 	u32 exit_qual;
497 
498 	guest_rip = vmcs_read(GUEST_RIP);
499 	reason = vmcs_read(EXI_REASON) & 0xff;
500 	insn_len = vmcs_read(EXI_INST_LEN);
501 	exit_qual = vmcs_read(EXI_QUALIFICATION);
502 	switch (reason) {
503 	case VMX_VMCALL:
504 		switch (get_stage()) {
505 		case 0:
506 			if (guest_cr0 == vmcs_read(GUEST_CR0))
507 				report("Read through CR0", 1);
508 			else
509 				report("Read through CR0", 0);
510 			break;
511 		case 1:
512 			if (guest_cr4 == vmcs_read(GUEST_CR4))
513 				report("Read through CR4", 1);
514 			else
515 				report("Read through CR4", 0);
516 			break;
517 		case 2:
518 			if (guest_cr0 == vmcs_read(GUEST_CR0))
519 				report("Write through CR0", 1);
520 			else
521 				report("Write through CR0", 0);
522 			break;
523 		case 3:
524 			if (guest_cr4 == vmcs_read(GUEST_CR4))
525 				report("Write through CR4", 1);
526 			else
527 				report("Write through CR4", 0);
528 			break;
529 		case 4:
530 			guest_cr0 = vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP);
531 			guest_cr4 = vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE);
532 			vmcs_write(CR0_MASK, X86_CR0_TS | X86_CR0_MP);
533 			vmcs_write(CR0_READ_SHADOW, guest_cr0 & (X86_CR0_TS | X86_CR0_MP));
534 			vmcs_write(CR4_MASK, X86_CR4_TSD | X86_CR4_DE);
535 			vmcs_write(CR4_READ_SHADOW, guest_cr4 & (X86_CR4_TSD | X86_CR4_DE));
536 			break;
537 		case 6:
538 			if (guest_cr0 == (vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP)))
539 				report("Write shadowing CR0 (same value)", 1);
540 			else
541 				report("Write shadowing CR0 (same value)", 0);
542 			break;
543 		case 7:
544 			if (guest_cr4 == (vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE)))
545 				report("Write shadowing CR4 (same value)", 1);
546 			else
547 				report("Write shadowing CR4 (same value)", 0);
548 			break;
549 		default:
550 			// Should not reach here
551 			printf("ERROR : unexpected stage, %d\n", get_stage());
552 			print_vmexit_info();
553 			return VMX_TEST_VMEXIT;
554 		}
555 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
556 		return VMX_TEST_RESUME;
557 	case VMX_CR:
558 		switch (get_stage()) {
559 		case 4:
560 			report("Read shadowing CR0", 0);
561 			set_stage(stage + 1);
562 			break;
563 		case 5:
564 			report("Read shadowing CR4", 0);
565 			set_stage(stage + 1);
566 			break;
567 		case 6:
568 			report("Write shadowing CR0 (same value)", 0);
569 			set_stage(stage + 1);
570 			break;
571 		case 7:
572 			report("Write shadowing CR4 (same value)", 0);
573 			set_stage(stage + 1);
574 			break;
575 		case 8:
576 		case 9:
577 			// 0x600 encodes "mov %esi, %cr0"
578 			if (exit_qual == 0x600)
579 				set_stage(stage + 1);
580 			break;
581 		case 10:
582 		case 11:
583 			// 0x604 encodes "mov %esi, %cr4"
584 			if (exit_qual == 0x604)
585 				set_stage(stage + 1);
586 			break;
587 		default:
588 			// Should not reach here
589 			printf("ERROR : unexpected stage, %d\n", get_stage());
590 			print_vmexit_info();
591 			return VMX_TEST_VMEXIT;
592 		}
593 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
594 		return VMX_TEST_RESUME;
595 	default:
596 		printf("Unknown exit reason, %d\n", reason);
597 		print_vmexit_info();
598 	}
599 	return VMX_TEST_VMEXIT;
600 }
601 
602 static int iobmp_init()
603 {
604 	u32 ctrl_cpu0;
605 
606 	io_bitmap_a = alloc_page();
607 	io_bitmap_a = alloc_page();
608 	memset(io_bitmap_a, 0x0, PAGE_SIZE);
609 	memset(io_bitmap_b, 0x0, PAGE_SIZE);
610 	ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
611 	ctrl_cpu0 |= CPU_IO_BITMAP;
612 	ctrl_cpu0 &= (~CPU_IO);
613 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
614 	vmcs_write(IO_BITMAP_A, (u64)io_bitmap_a);
615 	vmcs_write(IO_BITMAP_B, (u64)io_bitmap_b);
616 	return VMX_TEST_START;
617 }
618 
619 static void iobmp_main()
620 {
621 	// stage 0, test IO pass
622 	set_stage(0);
623 	inb(0x5000);
624 	outb(0x0, 0x5000);
625 	if (stage != 0)
626 		report("I/O bitmap - I/O pass", 0);
627 	else
628 		report("I/O bitmap - I/O pass", 1);
629 	// test IO width, in/out
630 	((u8 *)io_bitmap_a)[0] = 0xFF;
631 	set_stage(2);
632 	inb(0x0);
633 	if (stage != 3)
634 		report("I/O bitmap - trap in", 0);
635 	else
636 		report("I/O bitmap - trap in", 1);
637 	set_stage(3);
638 	outw(0x0, 0x0);
639 	if (stage != 4)
640 		report("I/O bitmap - trap out", 0);
641 	else
642 		report("I/O bitmap - trap out", 1);
643 	set_stage(4);
644 	inl(0x0);
645 	if (stage != 5)
646 		report("I/O bitmap - I/O width, long", 0);
647 	// test low/high IO port
648 	set_stage(5);
649 	((u8 *)io_bitmap_a)[0x5000 / 8] = (1 << (0x5000 % 8));
650 	inb(0x5000);
651 	if (stage == 6)
652 		report("I/O bitmap - I/O port, low part", 1);
653 	else
654 		report("I/O bitmap - I/O port, low part", 0);
655 	set_stage(6);
656 	((u8 *)io_bitmap_b)[0x1000 / 8] = (1 << (0x1000 % 8));
657 	inb(0x9000);
658 	if (stage == 7)
659 		report("I/O bitmap - I/O port, high part", 1);
660 	else
661 		report("I/O bitmap - I/O port, high part", 0);
662 	// test partial pass
663 	set_stage(7);
664 	inl(0x4FFF);
665 	if (stage == 8)
666 		report("I/O bitmap - partial pass", 1);
667 	else
668 		report("I/O bitmap - partial pass", 0);
669 	// test overrun
670 	set_stage(8);
671 	memset(io_bitmap_a, 0x0, PAGE_SIZE);
672 	memset(io_bitmap_b, 0x0, PAGE_SIZE);
673 	inl(0xFFFF);
674 	if (stage == 9)
675 		report("I/O bitmap - overrun", 1);
676 	else
677 		report("I/O bitmap - overrun", 0);
678 }
679 
680 static int iobmp_exit_handler()
681 {
682 	u64 guest_rip;
683 	ulong reason, exit_qual;
684 	u32 insn_len;
685 
686 	guest_rip = vmcs_read(GUEST_RIP);
687 	reason = vmcs_read(EXI_REASON) & 0xff;
688 	exit_qual = vmcs_read(EXI_QUALIFICATION);
689 	insn_len = vmcs_read(EXI_INST_LEN);
690 	switch (reason) {
691 	case VMX_IO:
692 		switch (get_stage()) {
693 		case 0:
694 		case 1:
695 			set_stage(stage + 1);
696 			break;
697 		case 2:
698 			if ((exit_qual & VMX_IO_SIZE_MASK) != _VMX_IO_BYTE)
699 				report("I/O bitmap - I/O width, byte", 0);
700 			else
701 				report("I/O bitmap - I/O width, byte", 1);
702 			if (!(exit_qual & VMX_IO_IN))
703 				report("I/O bitmap - I/O direction, in", 0);
704 			else
705 				report("I/O bitmap - I/O direction, in", 1);
706 			set_stage(stage + 1);
707 			break;
708 		case 3:
709 			if ((exit_qual & VMX_IO_SIZE_MASK) != _VMX_IO_WORD)
710 				report("I/O bitmap - I/O width, word", 0);
711 			else
712 				report("I/O bitmap - I/O width, word", 1);
713 			if (!(exit_qual & VMX_IO_IN))
714 				report("I/O bitmap - I/O direction, out", 1);
715 			else
716 				report("I/O bitmap - I/O direction, out", 0);
717 			set_stage(stage + 1);
718 			break;
719 		case 4:
720 			if ((exit_qual & VMX_IO_SIZE_MASK) != _VMX_IO_LONG)
721 				report("I/O bitmap - I/O width, long", 0);
722 			else
723 				report("I/O bitmap - I/O width, long", 1);
724 			set_stage(stage + 1);
725 			break;
726 		case 5:
727 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x5000)
728 				set_stage(stage + 1);
729 			break;
730 		case 6:
731 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x9000)
732 				set_stage(stage + 1);
733 			break;
734 		case 7:
735 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x4FFF)
736 				set_stage(stage + 1);
737 			break;
738 		case 8:
739 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0xFFFF)
740 				set_stage(stage + 1);
741 			break;
742 		default:
743 			// Should not reach here
744 			printf("ERROR : unexpected stage, %d\n", get_stage());
745 			print_vmexit_info();
746 			return VMX_TEST_VMEXIT;
747 		}
748 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
749 		return VMX_TEST_RESUME;
750 	default:
751 		printf("guest_rip = 0x%llx\n", guest_rip);
752 		printf("\tERROR : Undefined exit reason, reason = %d.\n", reason);
753 		break;
754 	}
755 	return VMX_TEST_VMEXIT;
756 }
757 
758 #define INSN_CPU0		0
759 #define INSN_CPU1		1
760 #define INSN_ALWAYS_TRAP	2
761 #define INSN_NEVER_TRAP		3
762 
763 #define FIELD_EXIT_QUAL		0
764 #define FIELD_INSN_INFO		1
765 
766 asm(
767 	"insn_hlt: hlt;ret\n\t"
768 	"insn_invlpg: invlpg 0x12345678;ret\n\t"
769 	"insn_mwait: mwait;ret\n\t"
770 	"insn_rdpmc: rdpmc;ret\n\t"
771 	"insn_rdtsc: rdtsc;ret\n\t"
772 	"insn_monitor: monitor;ret\n\t"
773 	"insn_pause: pause;ret\n\t"
774 	"insn_wbinvd: wbinvd;ret\n\t"
775 	"insn_cpuid: cpuid;ret\n\t"
776 	"insn_invd: invd;ret\n\t"
777 );
778 extern void insn_hlt();
779 extern void insn_invlpg();
780 extern void insn_mwait();
781 extern void insn_rdpmc();
782 extern void insn_rdtsc();
783 extern void insn_monitor();
784 extern void insn_pause();
785 extern void insn_wbinvd();
786 extern void insn_cpuid();
787 extern void insn_invd();
788 
789 u32 cur_insn;
790 
791 struct insn_table {
792 	const char *name;
793 	u32 flag;
794 	void (*insn_func)();
795 	u32 type;
796 	u32 reason;
797 	ulong exit_qual;
798 	u32 insn_info;
799 	// Use FIELD_EXIT_QUAL and FIELD_INSN_INFO to efines
800 	// which field need to be tested, reason is always tested
801 	u32 test_field;
802 };
803 
804 /*
805  * Add more test cases of instruction intercept here. Elements in this
806  * table is:
807  *	name/control flag/insn function/type/exit reason/exit qulification/
808  *	instruction info/field to test
809  * The last field defines which fields (exit_qual and insn_info) need to be
810  * tested in exit handler. If set to 0, only "reason" is checked.
811  */
812 static struct insn_table insn_table[] = {
813 	// Flags for Primary Processor-Based VM-Execution Controls
814 	{"HLT",  CPU_HLT, insn_hlt, INSN_CPU0, 12, 0, 0, 0},
815 	{"INVLPG", CPU_INVLPG, insn_invlpg, INSN_CPU0, 14,
816 		0x12345678, 0, FIELD_EXIT_QUAL},
817 	{"MWAIT", CPU_MWAIT, insn_mwait, INSN_CPU0, 36, 0, 0, 0},
818 	{"RDPMC", CPU_RDPMC, insn_rdpmc, INSN_CPU0, 15, 0, 0, 0},
819 	{"RDTSC", CPU_RDTSC, insn_rdtsc, INSN_CPU0, 16, 0, 0, 0},
820 	{"MONITOR", CPU_MONITOR, insn_monitor, INSN_CPU0, 39, 0, 0, 0},
821 	{"PAUSE", CPU_PAUSE, insn_pause, INSN_CPU0, 40, 0, 0, 0},
822 	// Flags for Secondary Processor-Based VM-Execution Controls
823 	{"WBINVD", CPU_WBINVD, insn_wbinvd, INSN_CPU1, 54, 0, 0, 0},
824 	// Instructions always trap
825 	{"CPUID", 0, insn_cpuid, INSN_ALWAYS_TRAP, 10, 0, 0, 0},
826 	{"INVD", 0, insn_invd, INSN_ALWAYS_TRAP, 13, 0, 0, 0},
827 	// Instructions never trap
828 	{NULL},
829 };
830 
831 static int insn_intercept_init()
832 {
833 	u32 ctrl_cpu[2];
834 
835 	ctrl_cpu[0] = vmcs_read(CPU_EXEC_CTRL0);
836 	ctrl_cpu[0] |= CPU_HLT | CPU_INVLPG | CPU_MWAIT | CPU_RDPMC | CPU_RDTSC |
837 		CPU_MONITOR | CPU_PAUSE | CPU_SECONDARY;
838 	ctrl_cpu[0] &= ctrl_cpu_rev[0].clr;
839 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu[0]);
840 	ctrl_cpu[1] = vmcs_read(CPU_EXEC_CTRL1);
841 	ctrl_cpu[1] |= CPU_WBINVD | CPU_RDRAND;
842 	ctrl_cpu[1] &= ctrl_cpu_rev[1].clr;
843 	vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu[1]);
844 	return VMX_TEST_START;
845 }
846 
847 static void insn_intercept_main()
848 {
849 	cur_insn = 0;
850 	while(insn_table[cur_insn].name != NULL) {
851 		set_stage(cur_insn);
852 		if ((insn_table[cur_insn].type == INSN_CPU0
853 			&& !(ctrl_cpu_rev[0].clr & insn_table[cur_insn].flag))
854 			|| (insn_table[cur_insn].type == INSN_CPU1
855 			&& !(ctrl_cpu_rev[1].clr & insn_table[cur_insn].flag))) {
856 			printf("\tCPU_CTRL1.CPU_%s is not supported.\n",
857 				insn_table[cur_insn].name);
858 			continue;
859 		}
860 		insn_table[cur_insn].insn_func();
861 		switch (insn_table[cur_insn].type) {
862 		case INSN_CPU0:
863 		case INSN_CPU1:
864 		case INSN_ALWAYS_TRAP:
865 			if (stage != cur_insn + 1)
866 				report(insn_table[cur_insn].name, 0);
867 			else
868 				report(insn_table[cur_insn].name, 1);
869 			break;
870 		case INSN_NEVER_TRAP:
871 			if (stage == cur_insn + 1)
872 				report(insn_table[cur_insn].name, 0);
873 			else
874 				report(insn_table[cur_insn].name, 1);
875 			break;
876 		}
877 		cur_insn ++;
878 	}
879 }
880 
881 static int insn_intercept_exit_handler()
882 {
883 	u64 guest_rip;
884 	u32 reason;
885 	ulong exit_qual;
886 	u32 insn_len;
887 	u32 insn_info;
888 	bool pass;
889 
890 	guest_rip = vmcs_read(GUEST_RIP);
891 	reason = vmcs_read(EXI_REASON) & 0xff;
892 	exit_qual = vmcs_read(EXI_QUALIFICATION);
893 	insn_len = vmcs_read(EXI_INST_LEN);
894 	insn_info = vmcs_read(EXI_INST_INFO);
895 	pass = (cur_insn == get_stage()) &&
896 			insn_table[cur_insn].reason == reason;
897 	if (insn_table[cur_insn].test_field & FIELD_EXIT_QUAL)
898 		pass = pass && insn_table[cur_insn].exit_qual == exit_qual;
899 	if (insn_table[cur_insn].test_field & FIELD_INSN_INFO)
900 		pass = pass && insn_table[cur_insn].insn_info == insn_info;
901 	if (pass)
902 		set_stage(stage + 1);
903 	vmcs_write(GUEST_RIP, guest_rip + insn_len);
904 	return VMX_TEST_RESUME;
905 }
906 
907 
908 static int setup_ept()
909 {
910 	int support_2m;
911 	unsigned long end_of_memory;
912 
913 	if (!(ept_vpid.val & EPT_CAP_UC) &&
914 			!(ept_vpid.val & EPT_CAP_WB)) {
915 		printf("\tEPT paging-structure memory type "
916 				"UC&WB are not supported\n");
917 		return 1;
918 	}
919 	if (ept_vpid.val & EPT_CAP_UC)
920 		eptp = EPT_MEM_TYPE_UC;
921 	else
922 		eptp = EPT_MEM_TYPE_WB;
923 	if (!(ept_vpid.val & EPT_CAP_PWL4)) {
924 		printf("\tPWL4 is not supported\n");
925 		return 1;
926 	}
927 	eptp |= (3 << EPTP_PG_WALK_LEN_SHIFT);
928 	pml4 = alloc_page();
929 	memset(pml4, 0, PAGE_SIZE);
930 	eptp |= virt_to_phys(pml4);
931 	vmcs_write(EPTP, eptp);
932 	support_2m = !!(ept_vpid.val & EPT_CAP_2M_PAGE);
933 	end_of_memory = fwcfg_get_u64(FW_CFG_RAM_SIZE);
934 	if (end_of_memory < (1ul << 32))
935 		end_of_memory = (1ul << 32);
936 	if (setup_ept_range(pml4, 0, end_of_memory,
937 			0, support_2m, EPT_WA | EPT_RA | EPT_EA)) {
938 		printf("\tSet ept tables failed.\n");
939 		return 1;
940 	}
941 	return 0;
942 }
943 
944 static int ept_init()
945 {
946 	unsigned long base_addr1, base_addr2;
947 	u32 ctrl_cpu[2];
948 
949 	if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY) ||
950 	    !(ctrl_cpu_rev[1].clr & CPU_EPT)) {
951 		printf("\tEPT is not supported");
952 		return VMX_TEST_EXIT;
953 	}
954 
955 	ctrl_cpu[0] = vmcs_read(CPU_EXEC_CTRL0);
956 	ctrl_cpu[1] = vmcs_read(CPU_EXEC_CTRL1);
957 	ctrl_cpu[0] = (ctrl_cpu[0] | CPU_SECONDARY)
958 		& ctrl_cpu_rev[0].clr;
959 	ctrl_cpu[1] = (ctrl_cpu[1] | CPU_EPT)
960 		& ctrl_cpu_rev[1].clr;
961 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu[0]);
962 	vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu[1]);
963 	if (setup_ept())
964 		return VMX_TEST_EXIT;
965 	data_page1 = alloc_page();
966 	data_page2 = alloc_page();
967 	memset(data_page1, 0x0, PAGE_SIZE);
968 	memset(data_page2, 0x0, PAGE_SIZE);
969 	*((u32 *)data_page1) = MAGIC_VAL_1;
970 	*((u32 *)data_page2) = MAGIC_VAL_2;
971 	base_addr1 = (unsigned long)data_page1 & PAGE_MASK_2M;
972 	base_addr2 = (unsigned long)data_page2 & PAGE_MASK_2M;
973 	if (setup_ept_range(pml4, base_addr1, base_addr1 + PAGE_SIZE_2M, 0, 0,
974 			    EPT_WA | EPT_RA | EPT_EA) ||
975 	    setup_ept_range(pml4, base_addr2, base_addr2 + PAGE_SIZE_2M, 0, 0,
976 			    EPT_WA | EPT_RA | EPT_EA))
977 		return VMX_TEST_EXIT;
978 	install_ept(pml4, (unsigned long)data_page1, (unsigned long)data_page2,
979 			EPT_RA | EPT_WA | EPT_EA);
980 	return VMX_TEST_START;
981 }
982 
983 static void ept_main()
984 {
985 	set_stage(0);
986 	if (*((u32 *)data_page2) != MAGIC_VAL_1 ||
987 			*((u32 *)data_page1) != MAGIC_VAL_1)
988 		report("EPT basic framework - read", 0);
989 	else {
990 		*((u32 *)data_page2) = MAGIC_VAL_3;
991 		vmcall();
992 		if (get_stage() == 1) {
993 			if (*((u32 *)data_page1) == MAGIC_VAL_3 &&
994 					*((u32 *)data_page2) == MAGIC_VAL_2)
995 				report("EPT basic framework", 1);
996 			else
997 				report("EPT basic framework - remap", 1);
998 		}
999 	}
1000 	// Test EPT Misconfigurations
1001 	set_stage(1);
1002 	vmcall();
1003 	*((u32 *)data_page1) = MAGIC_VAL_1;
1004 	if (get_stage() != 2) {
1005 		report("EPT misconfigurations", 0);
1006 		goto t1;
1007 	}
1008 	set_stage(2);
1009 	vmcall();
1010 	*((u32 *)data_page1) = MAGIC_VAL_1;
1011 	if (get_stage() != 3) {
1012 		report("EPT misconfigurations", 0);
1013 		goto t1;
1014 	}
1015 	report("EPT misconfigurations", 1);
1016 t1:
1017 	// Test EPT violation
1018 	set_stage(3);
1019 	vmcall();
1020 	*((u32 *)data_page1) = MAGIC_VAL_1;
1021 	if (get_stage() == 4)
1022 		report("EPT violation - page permission", 1);
1023 	else
1024 		report("EPT violation - page permission", 0);
1025 	// Violation caused by EPT paging structure
1026 	set_stage(4);
1027 	vmcall();
1028 	*((u32 *)data_page1) = MAGIC_VAL_2;
1029 	if (get_stage() == 5)
1030 		report("EPT violation - paging structure", 1);
1031 	else
1032 		report("EPT violation - paging structure", 0);
1033 }
1034 
1035 static int ept_exit_handler()
1036 {
1037 	u64 guest_rip;
1038 	ulong reason;
1039 	u32 insn_len;
1040 	u32 exit_qual;
1041 	static unsigned long data_page1_pte, data_page1_pte_pte;
1042 
1043 	guest_rip = vmcs_read(GUEST_RIP);
1044 	reason = vmcs_read(EXI_REASON) & 0xff;
1045 	insn_len = vmcs_read(EXI_INST_LEN);
1046 	exit_qual = vmcs_read(EXI_QUALIFICATION);
1047 	switch (reason) {
1048 	case VMX_VMCALL:
1049 		switch (get_stage()) {
1050 		case 0:
1051 			if (*((u32 *)data_page1) == MAGIC_VAL_3 &&
1052 					*((u32 *)data_page2) == MAGIC_VAL_2) {
1053 				set_stage(get_stage() + 1);
1054 				install_ept(pml4, (unsigned long)data_page2,
1055 						(unsigned long)data_page2,
1056 						EPT_RA | EPT_WA | EPT_EA);
1057 			} else
1058 				report("EPT basic framework - write\n", 0);
1059 			break;
1060 		case 1:
1061 			install_ept(pml4, (unsigned long)data_page1,
1062  				(unsigned long)data_page1, EPT_WA);
1063 			invept(INVEPT_SINGLE, eptp);
1064 			break;
1065 		case 2:
1066 			install_ept(pml4, (unsigned long)data_page1,
1067  				(unsigned long)data_page1,
1068  				EPT_RA | EPT_WA | EPT_EA |
1069  				(2 << EPT_MEM_TYPE_SHIFT));
1070 			invept(INVEPT_SINGLE, eptp);
1071 			break;
1072 		case 3:
1073 			data_page1_pte = get_ept_pte(pml4,
1074 				(unsigned long)data_page1, 1);
1075 			set_ept_pte(pml4, (unsigned long)data_page1,
1076 				1, data_page1_pte & (~EPT_PRESENT));
1077 			invept(INVEPT_SINGLE, eptp);
1078 			break;
1079 		case 4:
1080 			data_page1_pte = get_ept_pte(pml4,
1081 				(unsigned long)data_page1, 2);
1082 			data_page1_pte &= PAGE_MASK;
1083 			data_page1_pte_pte = get_ept_pte(pml4, data_page1_pte, 2);
1084 			set_ept_pte(pml4, data_page1_pte, 2,
1085 				data_page1_pte_pte & (~EPT_PRESENT));
1086 			invept(INVEPT_SINGLE, eptp);
1087 			break;
1088 		// Should not reach here
1089 		default:
1090 			printf("ERROR - unexpected stage, %d.\n", get_stage());
1091 			print_vmexit_info();
1092 			return VMX_TEST_VMEXIT;
1093 		}
1094 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
1095 		return VMX_TEST_RESUME;
1096 	case VMX_EPT_MISCONFIG:
1097 		switch (get_stage()) {
1098 		case 1:
1099 		case 2:
1100 			set_stage(get_stage() + 1);
1101 			install_ept(pml4, (unsigned long)data_page1,
1102  				(unsigned long)data_page1,
1103  				EPT_RA | EPT_WA | EPT_EA);
1104 			invept(INVEPT_SINGLE, eptp);
1105 			break;
1106 		// Should not reach here
1107 		default:
1108 			printf("ERROR - unexpected stage, %d.\n", get_stage());
1109 			print_vmexit_info();
1110 			return VMX_TEST_VMEXIT;
1111 		}
1112 		return VMX_TEST_RESUME;
1113 	case VMX_EPT_VIOLATION:
1114 		switch(get_stage()) {
1115 		case 3:
1116 			if (exit_qual == (EPT_VLT_WR | EPT_VLT_LADDR_VLD |
1117 					EPT_VLT_PADDR))
1118 				set_stage(get_stage() + 1);
1119 			set_ept_pte(pml4, (unsigned long)data_page1,
1120 				1, data_page1_pte | (EPT_PRESENT));
1121 			invept(INVEPT_SINGLE, eptp);
1122 			break;
1123 		case 4:
1124 			if (exit_qual == (EPT_VLT_RD | EPT_VLT_LADDR_VLD))
1125 				set_stage(get_stage() + 1);
1126 			set_ept_pte(pml4, data_page1_pte, 2,
1127 				data_page1_pte_pte | (EPT_PRESENT));
1128 			invept(INVEPT_SINGLE, eptp);
1129 			break;
1130 		default:
1131 			// Should not reach here
1132 			printf("ERROR : unexpected stage, %d\n", get_stage());
1133 			print_vmexit_info();
1134 			return VMX_TEST_VMEXIT;
1135 		}
1136 		return VMX_TEST_RESUME;
1137 	default:
1138 		printf("Unknown exit reason, %d\n", reason);
1139 		print_vmexit_info();
1140 	}
1141 	return VMX_TEST_VMEXIT;
1142 }
1143 
1144 /* name/init/guest_main/exit_handler/syscall_handler/guest_regs
1145    basic_* just implement some basic functions */
1146 struct vmx_test vmx_tests[] = {
1147 	{ "null", NULL, basic_guest_main, basic_exit_handler,
1148 		basic_syscall_handler, {0} },
1149 	{ "vmenter", NULL, vmenter_main, vmenter_exit_handler,
1150 		basic_syscall_handler, {0} },
1151 	{ "preemption timer", preemption_timer_init, preemption_timer_main,
1152 		preemption_timer_exit_handler, basic_syscall_handler, {0} },
1153 	{ "control field PAT", test_ctrl_pat_init, test_ctrl_pat_main,
1154 		test_ctrl_pat_exit_handler, basic_syscall_handler, {0} },
1155 	{ "control field EFER", test_ctrl_efer_init, test_ctrl_efer_main,
1156 		test_ctrl_efer_exit_handler, basic_syscall_handler, {0} },
1157 	{ "CR shadowing", NULL, cr_shadowing_main,
1158 		cr_shadowing_exit_handler, basic_syscall_handler, {0} },
1159 	{ "I/O bitmap", iobmp_init, iobmp_main, iobmp_exit_handler,
1160 		basic_syscall_handler, {0} },
1161 	{ "instruction intercept", insn_intercept_init, insn_intercept_main,
1162 		insn_intercept_exit_handler, basic_syscall_handler, {0} },
1163 	{ "EPT framework", ept_init, ept_main, ept_exit_handler,
1164 		basic_syscall_handler, {0} },
1165 	{ NULL, NULL, NULL, NULL, NULL, {0} },
1166 };
1167