xref: /kvm-unit-tests/x86/vmx_tests.c (revision f0dfe8ec5cbaabd0c15f4dec6c2ae336d1da7d9c)
1 #include "vmx.h"
2 #include "msr.h"
3 #include "processor.h"
4 #include "vm.h"
5 #include "io.h"
6 #include "fwcfg.h"
7 
8 u64 ia32_pat;
9 u64 ia32_efer;
10 volatile u32 stage;
11 void *io_bitmap_a, *io_bitmap_b;
12 u16 ioport;
13 
14 bool init_fail;
15 unsigned long *pml4;
16 u64 eptp;
17 void *data_page1, *data_page2;
18 
19 static inline void vmcall()
20 {
21 	asm volatile("vmcall");
22 }
23 
24 static inline void set_stage(u32 s)
25 {
26 	barrier();
27 	stage = s;
28 	barrier();
29 }
30 
31 static inline u32 get_stage()
32 {
33 	u32 s;
34 
35 	barrier();
36 	s = stage;
37 	barrier();
38 	return s;
39 }
40 
41 void basic_init()
42 {
43 }
44 
45 void basic_guest_main()
46 {
47 	/* Here is a basic guest_main, print Hello World */
48 	printf("\tHello World, this is null_guest_main!\n");
49 }
50 
51 int basic_exit_handler()
52 {
53 	u64 guest_rip;
54 	ulong reason;
55 
56 	guest_rip = vmcs_read(GUEST_RIP);
57 	reason = vmcs_read(EXI_REASON) & 0xff;
58 
59 	switch (reason) {
60 	case VMX_VMCALL:
61 		print_vmexit_info();
62 		vmcs_write(GUEST_RIP, guest_rip + 3);
63 		return VMX_TEST_RESUME;
64 	default:
65 		break;
66 	}
67 	printf("ERROR : Unhandled vmx exit.\n");
68 	print_vmexit_info();
69 	return VMX_TEST_EXIT;
70 }
71 
72 void basic_syscall_handler(u64 syscall_no)
73 {
74 }
75 
76 void vmenter_main()
77 {
78 	u64 rax;
79 	u64 rsp, resume_rsp;
80 
81 	report("test vmlaunch", 1);
82 
83 	asm volatile(
84 		"mov %%rsp, %0\n\t"
85 		"mov %3, %%rax\n\t"
86 		"vmcall\n\t"
87 		"mov %%rax, %1\n\t"
88 		"mov %%rsp, %2\n\t"
89 		: "=r"(rsp), "=r"(rax), "=r"(resume_rsp)
90 		: "g"(0xABCD));
91 	report("test vmresume", (rax == 0xFFFF) && (rsp == resume_rsp));
92 }
93 
94 int vmenter_exit_handler()
95 {
96 	u64 guest_rip;
97 	ulong reason;
98 
99 	guest_rip = vmcs_read(GUEST_RIP);
100 	reason = vmcs_read(EXI_REASON) & 0xff;
101 	switch (reason) {
102 	case VMX_VMCALL:
103 		if (regs.rax != 0xABCD) {
104 			report("test vmresume", 0);
105 			return VMX_TEST_VMEXIT;
106 		}
107 		regs.rax = 0xFFFF;
108 		vmcs_write(GUEST_RIP, guest_rip + 3);
109 		return VMX_TEST_RESUME;
110 	default:
111 		report("test vmresume", 0);
112 		print_vmexit_info();
113 	}
114 	return VMX_TEST_VMEXIT;
115 }
116 
117 u32 preempt_scale;
118 volatile unsigned long long tsc_val;
119 volatile u32 preempt_val;
120 
121 void preemption_timer_init()
122 {
123 	u32 ctrl_pin;
124 
125 	ctrl_pin = vmcs_read(PIN_CONTROLS) | PIN_PREEMPT;
126 	ctrl_pin &= ctrl_pin_rev.clr;
127 	vmcs_write(PIN_CONTROLS, ctrl_pin);
128 	preempt_val = 10000000;
129 	vmcs_write(PREEMPT_TIMER_VALUE, preempt_val);
130 	preempt_scale = rdmsr(MSR_IA32_VMX_MISC) & 0x1F;
131 }
132 
133 void preemption_timer_main()
134 {
135 	tsc_val = rdtsc();
136 	if (!(ctrl_pin_rev.clr & PIN_PREEMPT)) {
137 		printf("\tPreemption timer is not supported\n");
138 		return;
139 	}
140 	if (!(ctrl_exit_rev.clr & EXI_SAVE_PREEMPT))
141 		printf("\tSave preemption value is not supported\n");
142 	else {
143 		set_stage(0);
144 		vmcall();
145 		if (get_stage() == 1)
146 			vmcall();
147 	}
148 	while (1) {
149 		if (((rdtsc() - tsc_val) >> preempt_scale)
150 				> 10 * preempt_val) {
151 			report("Preemption timer", 0);
152 			break;
153 		}
154 	}
155 }
156 
157 int preemption_timer_exit_handler()
158 {
159 	u64 guest_rip;
160 	ulong reason;
161 	u32 insn_len;
162 	u32 ctrl_exit;
163 
164 	guest_rip = vmcs_read(GUEST_RIP);
165 	reason = vmcs_read(EXI_REASON) & 0xff;
166 	insn_len = vmcs_read(EXI_INST_LEN);
167 	switch (reason) {
168 	case VMX_PREEMPT:
169 		if (((rdtsc() - tsc_val) >> preempt_scale) < preempt_val)
170 			report("Preemption timer", 0);
171 		else
172 			report("Preemption timer", 1);
173 		return VMX_TEST_VMEXIT;
174 	case VMX_VMCALL:
175 		switch (get_stage()) {
176 		case 0:
177 			if (vmcs_read(PREEMPT_TIMER_VALUE) != preempt_val)
178 				report("Save preemption value", 0);
179 			else {
180 				set_stage(get_stage() + 1);
181 				ctrl_exit = (vmcs_read(EXI_CONTROLS) |
182 					EXI_SAVE_PREEMPT) & ctrl_exit_rev.clr;
183 				vmcs_write(EXI_CONTROLS, ctrl_exit);
184 			}
185 			break;
186 		case 1:
187 			if (vmcs_read(PREEMPT_TIMER_VALUE) >= preempt_val)
188 				report("Save preemption value", 0);
189 			else
190 				report("Save preemption value", 1);
191 			break;
192 		default:
193 			printf("Invalid stage.\n");
194 			print_vmexit_info();
195 			return VMX_TEST_VMEXIT;
196 		}
197 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
198 		return VMX_TEST_RESUME;
199 	default:
200 		printf("Unknown exit reason, %d\n", reason);
201 		print_vmexit_info();
202 	}
203 	return VMX_TEST_VMEXIT;
204 }
205 
206 void msr_bmp_init()
207 {
208 	void *msr_bitmap;
209 	u32 ctrl_cpu0;
210 
211 	msr_bitmap = alloc_page();
212 	memset(msr_bitmap, 0x0, PAGE_SIZE);
213 	ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
214 	ctrl_cpu0 |= CPU_MSR_BITMAP;
215 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
216 	vmcs_write(MSR_BITMAP, (u64)msr_bitmap);
217 }
218 
219 static void test_ctrl_pat_init()
220 {
221 	u64 ctrl_ent;
222 	u64 ctrl_exi;
223 
224 	msr_bmp_init();
225 	ctrl_ent = vmcs_read(ENT_CONTROLS);
226 	ctrl_exi = vmcs_read(EXI_CONTROLS);
227 	vmcs_write(ENT_CONTROLS, ctrl_ent | ENT_LOAD_PAT);
228 	vmcs_write(EXI_CONTROLS, ctrl_exi | (EXI_SAVE_PAT | EXI_LOAD_PAT));
229 	ia32_pat = rdmsr(MSR_IA32_CR_PAT);
230 	vmcs_write(GUEST_PAT, 0x0);
231 	vmcs_write(HOST_PAT, ia32_pat);
232 }
233 
234 static void test_ctrl_pat_main()
235 {
236 	u64 guest_ia32_pat;
237 
238 	guest_ia32_pat = rdmsr(MSR_IA32_CR_PAT);
239 	if (!(ctrl_enter_rev.clr & ENT_LOAD_PAT))
240 		printf("\tENT_LOAD_PAT is not supported.\n");
241 	else {
242 		if (guest_ia32_pat != 0) {
243 			report("Entry load PAT", 0);
244 			return;
245 		}
246 	}
247 	wrmsr(MSR_IA32_CR_PAT, 0x6);
248 	vmcall();
249 	guest_ia32_pat = rdmsr(MSR_IA32_CR_PAT);
250 	if (ctrl_enter_rev.clr & ENT_LOAD_PAT) {
251 		if (guest_ia32_pat != ia32_pat) {
252 			report("Entry load PAT", 0);
253 			return;
254 		}
255 		report("Entry load PAT", 1);
256 	}
257 }
258 
259 static int test_ctrl_pat_exit_handler()
260 {
261 	u64 guest_rip;
262 	ulong reason;
263 	u64 guest_pat;
264 
265 	guest_rip = vmcs_read(GUEST_RIP);
266 	reason = vmcs_read(EXI_REASON) & 0xff;
267 	switch (reason) {
268 	case VMX_VMCALL:
269 		guest_pat = vmcs_read(GUEST_PAT);
270 		if (!(ctrl_exit_rev.clr & EXI_SAVE_PAT)) {
271 			printf("\tEXI_SAVE_PAT is not supported\n");
272 			vmcs_write(GUEST_PAT, 0x6);
273 		} else {
274 			if (guest_pat == 0x6)
275 				report("Exit save PAT", 1);
276 			else
277 				report("Exit save PAT", 0);
278 		}
279 		if (!(ctrl_exit_rev.clr & EXI_LOAD_PAT))
280 			printf("\tEXI_LOAD_PAT is not supported\n");
281 		else {
282 			if (rdmsr(MSR_IA32_CR_PAT) == ia32_pat)
283 				report("Exit load PAT", 1);
284 			else
285 				report("Exit load PAT", 0);
286 		}
287 		vmcs_write(GUEST_PAT, ia32_pat);
288 		vmcs_write(GUEST_RIP, guest_rip + 3);
289 		return VMX_TEST_RESUME;
290 	default:
291 		printf("ERROR : Undefined exit reason, reason = %d.\n", reason);
292 		break;
293 	}
294 	return VMX_TEST_VMEXIT;
295 }
296 
297 static void test_ctrl_efer_init()
298 {
299 	u64 ctrl_ent;
300 	u64 ctrl_exi;
301 
302 	msr_bmp_init();
303 	ctrl_ent = vmcs_read(ENT_CONTROLS) | ENT_LOAD_EFER;
304 	ctrl_exi = vmcs_read(EXI_CONTROLS) | EXI_SAVE_EFER | EXI_LOAD_EFER;
305 	vmcs_write(ENT_CONTROLS, ctrl_ent & ctrl_enter_rev.clr);
306 	vmcs_write(EXI_CONTROLS, ctrl_exi & ctrl_exit_rev.clr);
307 	ia32_efer = rdmsr(MSR_EFER);
308 	vmcs_write(GUEST_EFER, ia32_efer ^ EFER_NX);
309 	vmcs_write(HOST_EFER, ia32_efer ^ EFER_NX);
310 }
311 
312 static void test_ctrl_efer_main()
313 {
314 	u64 guest_ia32_efer;
315 
316 	guest_ia32_efer = rdmsr(MSR_EFER);
317 	if (!(ctrl_enter_rev.clr & ENT_LOAD_EFER))
318 		printf("\tENT_LOAD_EFER is not supported.\n");
319 	else {
320 		if (guest_ia32_efer != (ia32_efer ^ EFER_NX)) {
321 			report("Entry load EFER", 0);
322 			return;
323 		}
324 	}
325 	wrmsr(MSR_EFER, ia32_efer);
326 	vmcall();
327 	guest_ia32_efer = rdmsr(MSR_EFER);
328 	if (ctrl_enter_rev.clr & ENT_LOAD_EFER) {
329 		if (guest_ia32_efer != ia32_efer) {
330 			report("Entry load EFER", 0);
331 			return;
332 		}
333 		report("Entry load EFER", 1);
334 	}
335 }
336 
337 static int test_ctrl_efer_exit_handler()
338 {
339 	u64 guest_rip;
340 	ulong reason;
341 	u64 guest_efer;
342 
343 	guest_rip = vmcs_read(GUEST_RIP);
344 	reason = vmcs_read(EXI_REASON) & 0xff;
345 	switch (reason) {
346 	case VMX_VMCALL:
347 		guest_efer = vmcs_read(GUEST_EFER);
348 		if (!(ctrl_exit_rev.clr & EXI_SAVE_EFER)) {
349 			printf("\tEXI_SAVE_EFER is not supported\n");
350 			vmcs_write(GUEST_EFER, ia32_efer);
351 		} else {
352 			if (guest_efer == ia32_efer)
353 				report("Exit save EFER", 1);
354 			else
355 				report("Exit save EFER", 0);
356 		}
357 		if (!(ctrl_exit_rev.clr & EXI_LOAD_EFER)) {
358 			printf("\tEXI_LOAD_EFER is not supported\n");
359 			wrmsr(MSR_EFER, ia32_efer ^ EFER_NX);
360 		} else {
361 			if (rdmsr(MSR_EFER) == (ia32_efer ^ EFER_NX))
362 				report("Exit load EFER", 1);
363 			else
364 				report("Exit load EFER", 0);
365 		}
366 		vmcs_write(GUEST_PAT, ia32_efer);
367 		vmcs_write(GUEST_RIP, guest_rip + 3);
368 		return VMX_TEST_RESUME;
369 	default:
370 		printf("ERROR : Undefined exit reason, reason = %d.\n", reason);
371 		break;
372 	}
373 	return VMX_TEST_VMEXIT;
374 }
375 
376 u32 guest_cr0, guest_cr4;
377 
378 static void cr_shadowing_main()
379 {
380 	u32 cr0, cr4, tmp;
381 
382 	// Test read through
383 	set_stage(0);
384 	guest_cr0 = read_cr0();
385 	if (stage == 1)
386 		report("Read through CR0", 0);
387 	else
388 		vmcall();
389 	set_stage(1);
390 	guest_cr4 = read_cr4();
391 	if (stage == 2)
392 		report("Read through CR4", 0);
393 	else
394 		vmcall();
395 	// Test write through
396 	guest_cr0 = guest_cr0 ^ (X86_CR0_TS | X86_CR0_MP);
397 	guest_cr4 = guest_cr4 ^ (X86_CR4_TSD | X86_CR4_DE);
398 	set_stage(2);
399 	write_cr0(guest_cr0);
400 	if (stage == 3)
401 		report("Write throuth CR0", 0);
402 	else
403 		vmcall();
404 	set_stage(3);
405 	write_cr4(guest_cr4);
406 	if (stage == 4)
407 		report("Write through CR4", 0);
408 	else
409 		vmcall();
410 	// Test read shadow
411 	set_stage(4);
412 	vmcall();
413 	cr0 = read_cr0();
414 	if (stage != 5) {
415 		if (cr0 == guest_cr0)
416 			report("Read shadowing CR0", 1);
417 		else
418 			report("Read shadowing CR0", 0);
419 	}
420 	set_stage(5);
421 	cr4 = read_cr4();
422 	if (stage != 6) {
423 		if (cr4 == guest_cr4)
424 			report("Read shadowing CR4", 1);
425 		else
426 			report("Read shadowing CR4", 0);
427 	}
428 	// Test write shadow (same value with shadow)
429 	set_stage(6);
430 	write_cr0(guest_cr0);
431 	if (stage == 7)
432 		report("Write shadowing CR0 (same value with shadow)", 0);
433 	else
434 		vmcall();
435 	set_stage(7);
436 	write_cr4(guest_cr4);
437 	if (stage == 8)
438 		report("Write shadowing CR4 (same value with shadow)", 0);
439 	else
440 		vmcall();
441 	// Test write shadow (different value)
442 	set_stage(8);
443 	tmp = guest_cr0 ^ X86_CR0_TS;
444 	asm volatile("mov %0, %%rsi\n\t"
445 		"mov %%rsi, %%cr0\n\t"
446 		::"m"(tmp)
447 		:"rsi", "memory", "cc");
448 	if (stage != 9)
449 		report("Write shadowing different X86_CR0_TS", 0);
450 	else
451 		report("Write shadowing different X86_CR0_TS", 1);
452 	set_stage(9);
453 	tmp = guest_cr0 ^ X86_CR0_MP;
454 	asm volatile("mov %0, %%rsi\n\t"
455 		"mov %%rsi, %%cr0\n\t"
456 		::"m"(tmp)
457 		:"rsi", "memory", "cc");
458 	if (stage != 10)
459 		report("Write shadowing different X86_CR0_MP", 0);
460 	else
461 		report("Write shadowing different X86_CR0_MP", 1);
462 	set_stage(10);
463 	tmp = guest_cr4 ^ X86_CR4_TSD;
464 	asm volatile("mov %0, %%rsi\n\t"
465 		"mov %%rsi, %%cr4\n\t"
466 		::"m"(tmp)
467 		:"rsi", "memory", "cc");
468 	if (stage != 11)
469 		report("Write shadowing different X86_CR4_TSD", 0);
470 	else
471 		report("Write shadowing different X86_CR4_TSD", 1);
472 	set_stage(11);
473 	tmp = guest_cr4 ^ X86_CR4_DE;
474 	asm volatile("mov %0, %%rsi\n\t"
475 		"mov %%rsi, %%cr4\n\t"
476 		::"m"(tmp)
477 		:"rsi", "memory", "cc");
478 	if (stage != 12)
479 		report("Write shadowing different X86_CR4_DE", 0);
480 	else
481 		report("Write shadowing different X86_CR4_DE", 1);
482 }
483 
484 static int cr_shadowing_exit_handler()
485 {
486 	u64 guest_rip;
487 	ulong reason;
488 	u32 insn_len;
489 	u32 exit_qual;
490 
491 	guest_rip = vmcs_read(GUEST_RIP);
492 	reason = vmcs_read(EXI_REASON) & 0xff;
493 	insn_len = vmcs_read(EXI_INST_LEN);
494 	exit_qual = vmcs_read(EXI_QUALIFICATION);
495 	switch (reason) {
496 	case VMX_VMCALL:
497 		switch (stage) {
498 		case 0:
499 			if (guest_cr0 == vmcs_read(GUEST_CR0))
500 				report("Read through CR0", 1);
501 			else
502 				report("Read through CR0", 0);
503 			break;
504 		case 1:
505 			if (guest_cr4 == vmcs_read(GUEST_CR4))
506 				report("Read through CR4", 1);
507 			else
508 				report("Read through CR4", 0);
509 			break;
510 		case 2:
511 			if (guest_cr0 == vmcs_read(GUEST_CR0))
512 				report("Write through CR0", 1);
513 			else
514 				report("Write through CR0", 0);
515 			break;
516 		case 3:
517 			if (guest_cr4 == vmcs_read(GUEST_CR4))
518 				report("Write through CR4", 1);
519 			else
520 				report("Write through CR4", 0);
521 			break;
522 		case 4:
523 			guest_cr0 = vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP);
524 			guest_cr4 = vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE);
525 			vmcs_write(CR0_MASK, X86_CR0_TS | X86_CR0_MP);
526 			vmcs_write(CR0_READ_SHADOW, guest_cr0 & (X86_CR0_TS | X86_CR0_MP));
527 			vmcs_write(CR4_MASK, X86_CR4_TSD | X86_CR4_DE);
528 			vmcs_write(CR4_READ_SHADOW, guest_cr4 & (X86_CR4_TSD | X86_CR4_DE));
529 			break;
530 		case 6:
531 			if (guest_cr0 == (vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP)))
532 				report("Write shadowing CR0 (same value)", 1);
533 			else
534 				report("Write shadowing CR0 (same value)", 0);
535 			break;
536 		case 7:
537 			if (guest_cr4 == (vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE)))
538 				report("Write shadowing CR4 (same value)", 1);
539 			else
540 				report("Write shadowing CR4 (same value)", 0);
541 			break;
542 		}
543 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
544 		return VMX_TEST_RESUME;
545 	case VMX_CR:
546 		switch (stage) {
547 		case 4:
548 			report("Read shadowing CR0", 0);
549 			set_stage(stage + 1);
550 			break;
551 		case 5:
552 			report("Read shadowing CR4", 0);
553 			set_stage(stage + 1);
554 			break;
555 		case 6:
556 			report("Write shadowing CR0 (same value)", 0);
557 			set_stage(stage + 1);
558 			break;
559 		case 7:
560 			report("Write shadowing CR4 (same value)", 0);
561 			set_stage(stage + 1);
562 			break;
563 		case 8:
564 		case 9:
565 			// 0x600 encodes "mov %esi, %cr0"
566 			if (exit_qual == 0x600)
567 				set_stage(stage + 1);
568 			break;
569 		case 10:
570 		case 11:
571 			// 0x604 encodes "mov %esi, %cr4"
572 			if (exit_qual == 0x604)
573 				set_stage(stage + 1);
574 			break;
575 		}
576 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
577 		return VMX_TEST_RESUME;
578 	default:
579 		printf("Unknown exit reason, %d\n", reason);
580 		print_vmexit_info();
581 	}
582 	return VMX_TEST_VMEXIT;
583 }
584 
585 static void iobmp_init()
586 {
587 	u32 ctrl_cpu0;
588 
589 	io_bitmap_a = alloc_page();
590 	io_bitmap_a = alloc_page();
591 	memset(io_bitmap_a, 0x0, PAGE_SIZE);
592 	memset(io_bitmap_b, 0x0, PAGE_SIZE);
593 	ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
594 	ctrl_cpu0 |= CPU_IO_BITMAP;
595 	ctrl_cpu0 &= (~CPU_IO);
596 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
597 	vmcs_write(IO_BITMAP_A, (u64)io_bitmap_a);
598 	vmcs_write(IO_BITMAP_B, (u64)io_bitmap_b);
599 }
600 
601 static void iobmp_main()
602 {
603 	// stage 0, test IO pass
604 	set_stage(0);
605 	inb(0x5000);
606 	outb(0x0, 0x5000);
607 	if (stage != 0)
608 		report("I/O bitmap - I/O pass", 0);
609 	else
610 		report("I/O bitmap - I/O pass", 1);
611 	// test IO width, in/out
612 	((u8 *)io_bitmap_a)[0] = 0xFF;
613 	set_stage(2);
614 	inb(0x0);
615 	if (stage != 3)
616 		report("I/O bitmap - trap in", 0);
617 	else
618 		report("I/O bitmap - trap in", 1);
619 	set_stage(3);
620 	outw(0x0, 0x0);
621 	if (stage != 4)
622 		report("I/O bitmap - trap out", 0);
623 	else
624 		report("I/O bitmap - trap out", 1);
625 	set_stage(4);
626 	inl(0x0);
627 	if (stage != 5)
628 		report("I/O bitmap - I/O width, long", 0);
629 	// test low/high IO port
630 	set_stage(5);
631 	((u8 *)io_bitmap_a)[0x5000 / 8] = (1 << (0x5000 % 8));
632 	inb(0x5000);
633 	if (stage == 6)
634 		report("I/O bitmap - I/O port, low part", 1);
635 	else
636 		report("I/O bitmap - I/O port, low part", 0);
637 	set_stage(6);
638 	((u8 *)io_bitmap_b)[0x1000 / 8] = (1 << (0x1000 % 8));
639 	inb(0x9000);
640 	if (stage == 7)
641 		report("I/O bitmap - I/O port, high part", 1);
642 	else
643 		report("I/O bitmap - I/O port, high part", 0);
644 	// test partial pass
645 	set_stage(7);
646 	inl(0x4FFF);
647 	if (stage == 8)
648 		report("I/O bitmap - partial pass", 1);
649 	else
650 		report("I/O bitmap - partial pass", 0);
651 	// test overrun
652 	set_stage(8);
653 	memset(io_bitmap_a, 0x0, PAGE_SIZE);
654 	memset(io_bitmap_b, 0x0, PAGE_SIZE);
655 	inl(0xFFFF);
656 	if (stage == 9)
657 		report("I/O bitmap - overrun", 1);
658 	else
659 		report("I/O bitmap - overrun", 0);
660 
661 	return;
662 }
663 
664 static int iobmp_exit_handler()
665 {
666 	u64 guest_rip;
667 	ulong reason, exit_qual;
668 	u32 insn_len;
669 
670 	guest_rip = vmcs_read(GUEST_RIP);
671 	reason = vmcs_read(EXI_REASON) & 0xff;
672 	exit_qual = vmcs_read(EXI_QUALIFICATION);
673 	insn_len = vmcs_read(EXI_INST_LEN);
674 	switch (reason) {
675 	case VMX_IO:
676 		switch (stage) {
677 		case 2:
678 			if ((exit_qual & VMX_IO_SIZE_MASK) != _VMX_IO_BYTE)
679 				report("I/O bitmap - I/O width, byte", 0);
680 			else
681 				report("I/O bitmap - I/O width, byte", 1);
682 			if (!(exit_qual & VMX_IO_IN))
683 				report("I/O bitmap - I/O direction, in", 0);
684 			else
685 				report("I/O bitmap - I/O direction, in", 1);
686 			set_stage(stage + 1);
687 			break;
688 		case 3:
689 			if ((exit_qual & VMX_IO_SIZE_MASK) != _VMX_IO_WORD)
690 				report("I/O bitmap - I/O width, word", 0);
691 			else
692 				report("I/O bitmap - I/O width, word", 1);
693 			if (!(exit_qual & VMX_IO_IN))
694 				report("I/O bitmap - I/O direction, out", 1);
695 			else
696 				report("I/O bitmap - I/O direction, out", 0);
697 			set_stage(stage + 1);
698 			break;
699 		case 4:
700 			if ((exit_qual & VMX_IO_SIZE_MASK) != _VMX_IO_LONG)
701 				report("I/O bitmap - I/O width, long", 0);
702 			else
703 				report("I/O bitmap - I/O width, long", 1);
704 			set_stage(stage + 1);
705 			break;
706 		case 5:
707 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x5000)
708 				set_stage(stage + 1);
709 			break;
710 		case 6:
711 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x9000)
712 				set_stage(stage + 1);
713 			break;
714 		case 7:
715 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x4FFF)
716 				set_stage(stage + 1);
717 			break;
718 		case 8:
719 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0xFFFF)
720 				set_stage(stage + 1);
721 			break;
722 		case 0:
723 		case 1:
724 			set_stage(stage + 1);
725 		default:
726 			// Should not reach here
727 			break;
728 		}
729 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
730 		return VMX_TEST_RESUME;
731 	default:
732 		printf("guest_rip = 0x%llx\n", guest_rip);
733 		printf("\tERROR : Undefined exit reason, reason = %d.\n", reason);
734 		break;
735 	}
736 	return VMX_TEST_VMEXIT;
737 }
738 
739 #define INSN_CPU0		0
740 #define INSN_CPU1		1
741 #define INSN_ALWAYS_TRAP	2
742 #define INSN_NEVER_TRAP		3
743 
744 #define FIELD_EXIT_QUAL		0
745 #define FIELD_INSN_INFO		1
746 
747 asm(
748 	"insn_hlt: hlt;ret\n\t"
749 	"insn_invlpg: invlpg 0x12345678;ret\n\t"
750 	"insn_mwait: mwait;ret\n\t"
751 	"insn_rdpmc: rdpmc;ret\n\t"
752 	"insn_rdtsc: rdtsc;ret\n\t"
753 	"insn_monitor: monitor;ret\n\t"
754 	"insn_pause: pause;ret\n\t"
755 	"insn_wbinvd: wbinvd;ret\n\t"
756 	"insn_cpuid: cpuid;ret\n\t"
757 	"insn_invd: invd;ret\n\t"
758 );
759 extern void insn_hlt();
760 extern void insn_invlpg();
761 extern void insn_mwait();
762 extern void insn_rdpmc();
763 extern void insn_rdtsc();
764 extern void insn_monitor();
765 extern void insn_pause();
766 extern void insn_wbinvd();
767 extern void insn_cpuid();
768 extern void insn_invd();
769 
770 u32 cur_insn;
771 
772 struct insn_table {
773 	const char *name;
774 	u32 flag;
775 	void (*insn_func)();
776 	u32 type;
777 	u32 reason;
778 	ulong exit_qual;
779 	u32 insn_info;
780 	// Use FIELD_EXIT_QUAL and FIELD_INSN_INFO to efines
781 	// which field need to be tested, reason is always tested
782 	u32 test_field;
783 };
784 
785 static struct insn_table insn_table[] = {
786 	// Flags for Primary Processor-Based VM-Execution Controls
787 	{"HLT",  CPU_HLT, insn_hlt, INSN_CPU0, 12, 0, 0, 0},
788 	{"INVLPG", CPU_INVLPG, insn_invlpg, INSN_CPU0, 14,
789 		0x12345678, 0, FIELD_EXIT_QUAL},
790 	{"MWAIT", CPU_MWAIT, insn_mwait, INSN_CPU0, 36, 0, 0, 0},
791 	{"RDPMC", CPU_RDPMC, insn_rdpmc, INSN_CPU0, 15, 0, 0, 0},
792 	{"RDTSC", CPU_RDTSC, insn_rdtsc, INSN_CPU0, 16, 0, 0, 0},
793 	{"MONITOR", CPU_MONITOR, insn_monitor, INSN_CPU0, 39, 0, 0, 0},
794 	{"PAUSE", CPU_PAUSE, insn_pause, INSN_CPU0, 40, 0, 0, 0},
795 	// Flags for Secondary Processor-Based VM-Execution Controls
796 	{"WBINVD", CPU_WBINVD, insn_wbinvd, INSN_CPU1, 54, 0, 0, 0},
797 	// Instructions always trap
798 	{"CPUID", 0, insn_cpuid, INSN_ALWAYS_TRAP, 10, 0, 0, 0},
799 	{"INVD", 0, insn_invd, INSN_ALWAYS_TRAP, 13, 0, 0, 0},
800 	// Instructions never trap
801 	{NULL},
802 };
803 
804 static void insn_intercept_init()
805 {
806 	u32 ctrl_cpu[2];
807 
808 	ctrl_cpu[0] = vmcs_read(CPU_EXEC_CTRL0);
809 	ctrl_cpu[0] |= CPU_HLT | CPU_INVLPG | CPU_MWAIT | CPU_RDPMC | CPU_RDTSC |
810 		CPU_MONITOR | CPU_PAUSE | CPU_SECONDARY;
811 	ctrl_cpu[0] &= ctrl_cpu_rev[0].clr;
812 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu[0]);
813 	ctrl_cpu[1] = vmcs_read(CPU_EXEC_CTRL1);
814 	ctrl_cpu[1] |= CPU_WBINVD | CPU_RDRAND;
815 	ctrl_cpu[1] &= ctrl_cpu_rev[1].clr;
816 	vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu[1]);
817 }
818 
819 static void insn_intercept_main()
820 {
821 	cur_insn = 0;
822 	while(insn_table[cur_insn].name != NULL) {
823 		set_stage(cur_insn);
824 		if ((insn_table[cur_insn].type == INSN_CPU0
825 			&& !(ctrl_cpu_rev[0].clr & insn_table[cur_insn].flag))
826 			|| (insn_table[cur_insn].type == INSN_CPU1
827 			&& !(ctrl_cpu_rev[1].clr & insn_table[cur_insn].flag))) {
828 			printf("\tCPU_CTRL1.CPU_%s is not supported.\n",
829 				insn_table[cur_insn].name);
830 			continue;
831 		}
832 		insn_table[cur_insn].insn_func();
833 		switch (insn_table[cur_insn].type) {
834 		case INSN_CPU0:
835 		case INSN_CPU1:
836 		case INSN_ALWAYS_TRAP:
837 			if (stage != cur_insn + 1)
838 				report(insn_table[cur_insn].name, 0);
839 			else
840 				report(insn_table[cur_insn].name, 1);
841 			break;
842 		case INSN_NEVER_TRAP:
843 			if (stage == cur_insn + 1)
844 				report(insn_table[cur_insn].name, 0);
845 			else
846 				report(insn_table[cur_insn].name, 1);
847 			break;
848 		}
849 		cur_insn ++;
850 	}
851 }
852 
853 static int insn_intercept_exit_handler()
854 {
855 	u64 guest_rip;
856 	u32 reason;
857 	ulong exit_qual;
858 	u32 insn_len;
859 	u32 insn_info;
860 	bool pass;
861 
862 	guest_rip = vmcs_read(GUEST_RIP);
863 	reason = vmcs_read(EXI_REASON) & 0xff;
864 	exit_qual = vmcs_read(EXI_QUALIFICATION);
865 	insn_len = vmcs_read(EXI_INST_LEN);
866 	insn_info = vmcs_read(EXI_INST_INFO);
867 	pass = (cur_insn == get_stage()) &&
868 			insn_table[cur_insn].reason == reason;
869 	if (insn_table[cur_insn].test_field & FIELD_EXIT_QUAL)
870 		pass = pass && insn_table[cur_insn].exit_qual == exit_qual;
871 	if (insn_table[cur_insn].test_field & FIELD_INSN_INFO)
872 		pass = pass && insn_table[cur_insn].insn_info == insn_info;
873 	if (pass)
874 		set_stage(stage + 1);
875 	vmcs_write(GUEST_RIP, guest_rip + insn_len);
876 	return VMX_TEST_RESUME;
877 }
878 
879 
880 static int setup_ept()
881 {
882 	int support_2m;
883 	unsigned long end_of_memory;
884 
885 	if (!(ept_vpid.val & EPT_CAP_UC) &&
886 			!(ept_vpid.val & EPT_CAP_WB)) {
887 		printf("\tEPT paging-structure memory type "
888 				"UC&WB are not supported\n");
889 		return 1;
890 	}
891 	if (ept_vpid.val & EPT_CAP_UC)
892 		eptp = EPT_MEM_TYPE_UC;
893 	else
894 		eptp = EPT_MEM_TYPE_WB;
895 	if (!(ept_vpid.val & EPT_CAP_PWL4)) {
896 		printf("\tPWL4 is not supported\n");
897 		return 1;
898 	}
899 	eptp |= (3 << EPTP_PG_WALK_LEN_SHIFT);
900 	pml4 = alloc_page();
901 	memset(pml4, 0, PAGE_SIZE);
902 	eptp |= virt_to_phys(pml4);
903 	vmcs_write(EPTP, eptp);
904 	support_2m = !!(ept_vpid.val & EPT_CAP_2M_PAGE);
905 	end_of_memory = fwcfg_get_u64(FW_CFG_RAM_SIZE);
906 	if (end_of_memory < (1ul << 32))
907 		end_of_memory = (1ul << 32);
908 	if (setup_ept_range(pml4, 0, end_of_memory,
909 			0, support_2m, EPT_WA | EPT_RA | EPT_EA)) {
910 		printf("\tSet ept tables failed.\n");
911 		return 1;
912 	}
913 	return 0;
914 }
915 
916 static void ept_init()
917 {
918 	u32 ctrl_cpu[2];
919 
920 	init_fail = false;
921 	ctrl_cpu[0] = vmcs_read(CPU_EXEC_CTRL0);
922 	ctrl_cpu[1] = vmcs_read(CPU_EXEC_CTRL1);
923 	ctrl_cpu[0] = (ctrl_cpu[0] | CPU_SECONDARY)
924 		& ctrl_cpu_rev[0].clr;
925 	ctrl_cpu[1] = (ctrl_cpu[1] | CPU_EPT)
926 		& ctrl_cpu_rev[1].clr;
927 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu[0]);
928 	vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu[1] | CPU_EPT);
929 	if (setup_ept())
930 		init_fail = true;
931 	data_page1 = alloc_page();
932 	data_page2 = alloc_page();
933 	memset(data_page1, 0x0, PAGE_SIZE);
934 	memset(data_page2, 0x0, PAGE_SIZE);
935 	*((u32 *)data_page1) = MAGIC_VAL_1;
936 	*((u32 *)data_page2) = MAGIC_VAL_2;
937 	install_ept(pml4, (unsigned long)data_page1, (unsigned long)data_page2,
938 			EPT_RA | EPT_WA | EPT_EA);
939 }
940 
941 static void ept_main()
942 {
943 	if (init_fail)
944 		return;
945 	if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY)
946 		&& !(ctrl_cpu_rev[1].clr & CPU_EPT)) {
947 		printf("\tEPT is not supported");
948 		return;
949 	}
950 	set_stage(0);
951 	if (*((u32 *)data_page2) != MAGIC_VAL_1 &&
952 			*((u32 *)data_page1) != MAGIC_VAL_1)
953 		report("EPT basic framework - read", 0);
954 	else {
955 		*((u32 *)data_page2) = MAGIC_VAL_3;
956 		vmcall();
957 		if (get_stage() == 1) {
958 			if (*((u32 *)data_page1) == MAGIC_VAL_3 &&
959 					*((u32 *)data_page2) == MAGIC_VAL_2)
960 				report("EPT basic framework", 1);
961 			else
962 				report("EPT basic framework - remap", 1);
963 		}
964 	}
965 	// Test EPT Misconfigurations
966 	set_stage(1);
967 	vmcall();
968 	*((u32 *)data_page1) = MAGIC_VAL_1;
969 	if (get_stage() != 2) {
970 		report("EPT misconfigurations", 0);
971 		goto t1;
972 	}
973 	set_stage(2);
974 	vmcall();
975 	*((u32 *)data_page1) = MAGIC_VAL_1;
976 	if (get_stage() != 3) {
977 		report("EPT misconfigurations", 0);
978 		goto t1;
979 	}
980 	report("EPT misconfigurations", 1);
981 t1:
982 	// Test EPT violation
983 	set_stage(3);
984 	vmcall();
985 	*((u32 *)data_page1) = MAGIC_VAL_1;
986 	if (get_stage() == 4)
987 		report("EPT violation - page permission", 1);
988 	else
989 		report("EPT violation - page permission", 0);
990 	// Violation caused by EPT paging structure
991 	set_stage(4);
992 	vmcall();
993 	*((u32 *)data_page1) = MAGIC_VAL_2;
994 	if (get_stage() == 5)
995 		report("EPT violation - paging structure", 1);
996 	else
997 		report("EPT violation - paging structure", 0);
998 	return;
999 }
1000 
1001 static int ept_exit_handler()
1002 {
1003 	u64 guest_rip;
1004 	ulong reason;
1005 	u32 insn_len;
1006 	u32 exit_qual;
1007 	static unsigned long data_page1_pte, data_page1_pte_pte;
1008 
1009 	guest_rip = vmcs_read(GUEST_RIP);
1010 	reason = vmcs_read(EXI_REASON) & 0xff;
1011 	insn_len = vmcs_read(EXI_INST_LEN);
1012 	exit_qual = vmcs_read(EXI_QUALIFICATION);
1013 	switch (reason) {
1014 	case VMX_VMCALL:
1015 		switch (get_stage()) {
1016 		case 0:
1017 			if (*((u32 *)data_page1) == MAGIC_VAL_3 &&
1018 					*((u32 *)data_page2) == MAGIC_VAL_2) {
1019 				set_stage(get_stage() + 1);
1020 				install_ept(pml4, (unsigned long)data_page2,
1021 						(unsigned long)data_page2,
1022 						EPT_RA | EPT_WA | EPT_EA);
1023 			} else
1024 				report("EPT basic framework - write\n", 0);
1025 			break;
1026 		case 1:
1027 			install_ept(pml4, (unsigned long)data_page1,
1028  				(unsigned long)data_page1, EPT_WA);
1029 			invept(INVEPT_SINGLE, eptp);
1030 			break;
1031 		case 2:
1032 			install_ept(pml4, (unsigned long)data_page1,
1033  				(unsigned long)data_page1,
1034  				EPT_RA | EPT_WA | EPT_EA |
1035  				(2 << EPT_MEM_TYPE_SHIFT));
1036 			invept(INVEPT_SINGLE, eptp);
1037 			break;
1038 		case 3:
1039 			data_page1_pte = get_ept_pte(pml4,
1040 				(unsigned long)data_page1, 1);
1041 			set_ept_pte(pml4, (unsigned long)data_page1,
1042 				1, data_page1_pte & (~EPT_PRESENT));
1043 			invept(INVEPT_SINGLE, eptp);
1044 			break;
1045 		case 4:
1046 			data_page1_pte = get_ept_pte(pml4,
1047 				(unsigned long)data_page1, 2);
1048 			data_page1_pte &= PAGE_MASK;
1049 			data_page1_pte_pte = get_ept_pte(pml4, data_page1_pte, 2);
1050 			set_ept_pte(pml4, data_page1_pte, 2,
1051 				data_page1_pte_pte & (~EPT_PRESENT));
1052 			invept(INVEPT_SINGLE, eptp);
1053 			break;
1054 		// Should not reach here
1055 		default:
1056 			printf("ERROR - unknown stage, %d.\n", get_stage());
1057 			print_vmexit_info();
1058 			return VMX_TEST_VMEXIT;
1059 		}
1060 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
1061 		return VMX_TEST_RESUME;
1062 	case VMX_EPT_MISCONFIG:
1063 		switch (get_stage()) {
1064 		case 1:
1065 		case 2:
1066 			set_stage(get_stage() + 1);
1067 			install_ept(pml4, (unsigned long)data_page1,
1068  				(unsigned long)data_page1,
1069  				EPT_RA | EPT_WA | EPT_EA);
1070 			invept(INVEPT_SINGLE, eptp);
1071 			break;
1072 		// Should not reach here
1073 		default:
1074 			printf("ERROR - unknown stage, %d.\n", get_stage());
1075 			print_vmexit_info();
1076 			return VMX_TEST_VMEXIT;
1077 		}
1078 		return VMX_TEST_RESUME;
1079 	case VMX_EPT_VIOLATION:
1080 		switch(get_stage()) {
1081 		case 3:
1082 			if (exit_qual == (EPT_VLT_WR | EPT_VLT_LADDR_VLD |
1083 					EPT_VLT_PADDR))
1084 				set_stage(get_stage() + 1);
1085 			set_ept_pte(pml4, (unsigned long)data_page1,
1086 				1, data_page1_pte | (EPT_PRESENT));
1087 			invept(INVEPT_SINGLE, eptp);
1088 			break;
1089 		case 4:
1090 			if (exit_qual == (EPT_VLT_RD | EPT_VLT_LADDR_VLD))
1091 				set_stage(get_stage() + 1);
1092 			set_ept_pte(pml4, data_page1_pte, 2,
1093 				data_page1_pte_pte | (EPT_PRESENT));
1094 			invept(INVEPT_SINGLE, eptp);
1095 			break;
1096 		default:
1097 			// Should not reach here
1098 			printf("ERROR : unknown stage, %d\n", get_stage());
1099 			print_vmexit_info();
1100 			return VMX_TEST_VMEXIT;
1101 		}
1102 		return VMX_TEST_RESUME;
1103 	default:
1104 		printf("Unknown exit reason, %d\n", reason);
1105 		print_vmexit_info();
1106 	}
1107 	return VMX_TEST_VMEXIT;
1108 }
1109 
1110 /* name/init/guest_main/exit_handler/syscall_handler/guest_regs
1111    basic_* just implement some basic functions */
1112 struct vmx_test vmx_tests[] = {
1113 	{ "null", basic_init, basic_guest_main, basic_exit_handler,
1114 		basic_syscall_handler, {0} },
1115 	{ "vmenter", basic_init, vmenter_main, vmenter_exit_handler,
1116 		basic_syscall_handler, {0} },
1117 	{ "preemption timer", preemption_timer_init, preemption_timer_main,
1118 		preemption_timer_exit_handler, basic_syscall_handler, {0} },
1119 	{ "control field PAT", test_ctrl_pat_init, test_ctrl_pat_main,
1120 		test_ctrl_pat_exit_handler, basic_syscall_handler, {0} },
1121 	{ "control field EFER", test_ctrl_efer_init, test_ctrl_efer_main,
1122 		test_ctrl_efer_exit_handler, basic_syscall_handler, {0} },
1123 	{ "CR shadowing", basic_init, cr_shadowing_main,
1124 		cr_shadowing_exit_handler, basic_syscall_handler, {0} },
1125 	{ "I/O bitmap", iobmp_init, iobmp_main, iobmp_exit_handler,
1126 		basic_syscall_handler, {0} },
1127 	{ "instruction intercept", insn_intercept_init, insn_intercept_main,
1128 		insn_intercept_exit_handler, basic_syscall_handler, {0} },
1129 	{ "EPT framework", ept_init, ept_main, ept_exit_handler,
1130 		basic_syscall_handler, {0} },
1131 	{ NULL, NULL, NULL, NULL, NULL, {0} },
1132 };
1133