xref: /kvm-unit-tests/x86/vmx_tests.c (revision b64aef07132e514b4190c660210e1e7cd1a455ae)
1 /*
2  * All test cases of nested virtualization should be in this file
3  *
4  * Author : Arthur Chunqi Li <yzt356@gmail.com>
5  */
6 #include "vmx.h"
7 #include "msr.h"
8 #include "processor.h"
9 #include "vm.h"
10 #include "io.h"
11 #include "fwcfg.h"
12 
13 u64 ia32_pat;
14 u64 ia32_efer;
15 volatile u32 stage;
16 void *io_bitmap_a, *io_bitmap_b;
17 u16 ioport;
18 
19 unsigned long *pml4;
20 u64 eptp;
21 void *data_page1, *data_page2;
22 
23 static inline void vmcall()
24 {
25 	asm volatile("vmcall");
26 }
27 
28 static inline void set_stage(u32 s)
29 {
30 	barrier();
31 	stage = s;
32 	barrier();
33 }
34 
35 static inline u32 get_stage()
36 {
37 	u32 s;
38 
39 	barrier();
40 	s = stage;
41 	barrier();
42 	return s;
43 }
44 
45 void basic_guest_main()
46 {
47 }
48 
49 int basic_exit_handler()
50 {
51 	report("Basic VMX test", 0);
52 	print_vmexit_info();
53 	return VMX_TEST_EXIT;
54 }
55 
56 void vmenter_main()
57 {
58 	u64 rax;
59 	u64 rsp, resume_rsp;
60 
61 	report("test vmlaunch", 1);
62 
63 	asm volatile(
64 		"mov %%rsp, %0\n\t"
65 		"mov %3, %%rax\n\t"
66 		"vmcall\n\t"
67 		"mov %%rax, %1\n\t"
68 		"mov %%rsp, %2\n\t"
69 		: "=r"(rsp), "=r"(rax), "=r"(resume_rsp)
70 		: "g"(0xABCD));
71 	report("test vmresume", (rax == 0xFFFF) && (rsp == resume_rsp));
72 }
73 
74 int vmenter_exit_handler()
75 {
76 	u64 guest_rip;
77 	ulong reason;
78 
79 	guest_rip = vmcs_read(GUEST_RIP);
80 	reason = vmcs_read(EXI_REASON) & 0xff;
81 	switch (reason) {
82 	case VMX_VMCALL:
83 		if (regs.rax != 0xABCD) {
84 			report("test vmresume", 0);
85 			return VMX_TEST_VMEXIT;
86 		}
87 		regs.rax = 0xFFFF;
88 		vmcs_write(GUEST_RIP, guest_rip + 3);
89 		return VMX_TEST_RESUME;
90 	default:
91 		report("test vmresume", 0);
92 		print_vmexit_info();
93 	}
94 	return VMX_TEST_VMEXIT;
95 }
96 
97 u32 preempt_scale;
98 volatile unsigned long long tsc_val;
99 volatile u32 preempt_val;
100 
101 int preemption_timer_init()
102 {
103 	if (!(ctrl_pin_rev.clr & PIN_PREEMPT)) {
104 		printf("\tPreemption timer is not supported\n");
105 		return VMX_TEST_EXIT;
106 	}
107 	vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) | PIN_PREEMPT);
108 	preempt_val = 10000000;
109 	vmcs_write(PREEMPT_TIMER_VALUE, preempt_val);
110 	preempt_scale = rdmsr(MSR_IA32_VMX_MISC) & 0x1F;
111 
112 	if (!(ctrl_exit_rev.clr & EXI_SAVE_PREEMPT))
113 		printf("\tSave preemption value is not supported\n");
114 
115 	return VMX_TEST_START;
116 }
117 
118 void preemption_timer_main()
119 {
120 	tsc_val = rdtsc();
121 	if (ctrl_exit_rev.clr & EXI_SAVE_PREEMPT) {
122 		set_stage(0);
123 		vmcall();
124 		if (get_stage() == 1)
125 			vmcall();
126 	}
127 	while (1) {
128 		if (((rdtsc() - tsc_val) >> preempt_scale)
129 				> 10 * preempt_val) {
130 			set_stage(2);
131 			vmcall();
132 		}
133 	}
134 }
135 
136 int preemption_timer_exit_handler()
137 {
138 	u64 guest_rip;
139 	ulong reason;
140 	u32 insn_len;
141 	u32 ctrl_exit;
142 
143 	guest_rip = vmcs_read(GUEST_RIP);
144 	reason = vmcs_read(EXI_REASON) & 0xff;
145 	insn_len = vmcs_read(EXI_INST_LEN);
146 	switch (reason) {
147 	case VMX_PREEMPT:
148 		if (((rdtsc() - tsc_val) >> preempt_scale) < preempt_val)
149 			report("Preemption timer", 0);
150 		else
151 			report("Preemption timer", 1);
152 		break;
153 	case VMX_VMCALL:
154 		switch (get_stage()) {
155 		case 0:
156 			if (vmcs_read(PREEMPT_TIMER_VALUE) != preempt_val)
157 				report("Save preemption value", 0);
158 			else {
159 				set_stage(get_stage() + 1);
160 				ctrl_exit = (vmcs_read(EXI_CONTROLS) |
161 					EXI_SAVE_PREEMPT) & ctrl_exit_rev.clr;
162 				vmcs_write(EXI_CONTROLS, ctrl_exit);
163 			}
164 			vmcs_write(GUEST_RIP, guest_rip + insn_len);
165 			return VMX_TEST_RESUME;
166 		case 1:
167 			if (vmcs_read(PREEMPT_TIMER_VALUE) >= preempt_val)
168 				report("Save preemption value", 0);
169 			else
170 				report("Save preemption value", 1);
171 			vmcs_write(GUEST_RIP, guest_rip + insn_len);
172 			return VMX_TEST_RESUME;
173 		case 2:
174 			report("Preemption timer", 0);
175 			break;
176 		default:
177 			// Should not reach here
178 			printf("ERROR : unexpected stage, %d\n", get_stage());
179 			print_vmexit_info();
180 			return VMX_TEST_VMEXIT;
181 		}
182 		break;
183 	default:
184 		printf("Unknown exit reason, %d\n", reason);
185 		print_vmexit_info();
186 	}
187 	vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) & ~PIN_PREEMPT);
188 	return VMX_TEST_VMEXIT;
189 }
190 
191 void msr_bmp_init()
192 {
193 	void *msr_bitmap;
194 	u32 ctrl_cpu0;
195 
196 	msr_bitmap = alloc_page();
197 	memset(msr_bitmap, 0x0, PAGE_SIZE);
198 	ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
199 	ctrl_cpu0 |= CPU_MSR_BITMAP;
200 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
201 	vmcs_write(MSR_BITMAP, (u64)msr_bitmap);
202 }
203 
204 static int test_ctrl_pat_init()
205 {
206 	u64 ctrl_ent;
207 	u64 ctrl_exi;
208 
209 	msr_bmp_init();
210 	ctrl_ent = vmcs_read(ENT_CONTROLS);
211 	ctrl_exi = vmcs_read(EXI_CONTROLS);
212 	vmcs_write(ENT_CONTROLS, ctrl_ent | ENT_LOAD_PAT);
213 	vmcs_write(EXI_CONTROLS, ctrl_exi | (EXI_SAVE_PAT | EXI_LOAD_PAT));
214 	ia32_pat = rdmsr(MSR_IA32_CR_PAT);
215 	vmcs_write(GUEST_PAT, 0x0);
216 	vmcs_write(HOST_PAT, ia32_pat);
217 	return VMX_TEST_START;
218 }
219 
220 static void test_ctrl_pat_main()
221 {
222 	u64 guest_ia32_pat;
223 
224 	guest_ia32_pat = rdmsr(MSR_IA32_CR_PAT);
225 	if (!(ctrl_enter_rev.clr & ENT_LOAD_PAT))
226 		printf("\tENT_LOAD_PAT is not supported.\n");
227 	else {
228 		if (guest_ia32_pat != 0) {
229 			report("Entry load PAT", 0);
230 			return;
231 		}
232 	}
233 	wrmsr(MSR_IA32_CR_PAT, 0x6);
234 	vmcall();
235 	guest_ia32_pat = rdmsr(MSR_IA32_CR_PAT);
236 	if (ctrl_enter_rev.clr & ENT_LOAD_PAT) {
237 		if (guest_ia32_pat != ia32_pat) {
238 			report("Entry load PAT", 0);
239 			return;
240 		}
241 		report("Entry load PAT", 1);
242 	}
243 }
244 
245 static int test_ctrl_pat_exit_handler()
246 {
247 	u64 guest_rip;
248 	ulong reason;
249 	u64 guest_pat;
250 
251 	guest_rip = vmcs_read(GUEST_RIP);
252 	reason = vmcs_read(EXI_REASON) & 0xff;
253 	switch (reason) {
254 	case VMX_VMCALL:
255 		guest_pat = vmcs_read(GUEST_PAT);
256 		if (!(ctrl_exit_rev.clr & EXI_SAVE_PAT)) {
257 			printf("\tEXI_SAVE_PAT is not supported\n");
258 			vmcs_write(GUEST_PAT, 0x6);
259 		} else {
260 			if (guest_pat == 0x6)
261 				report("Exit save PAT", 1);
262 			else
263 				report("Exit save PAT", 0);
264 		}
265 		if (!(ctrl_exit_rev.clr & EXI_LOAD_PAT))
266 			printf("\tEXI_LOAD_PAT is not supported\n");
267 		else {
268 			if (rdmsr(MSR_IA32_CR_PAT) == ia32_pat)
269 				report("Exit load PAT", 1);
270 			else
271 				report("Exit load PAT", 0);
272 		}
273 		vmcs_write(GUEST_PAT, ia32_pat);
274 		vmcs_write(GUEST_RIP, guest_rip + 3);
275 		return VMX_TEST_RESUME;
276 	default:
277 		printf("ERROR : Undefined exit reason, reason = %d.\n", reason);
278 		break;
279 	}
280 	return VMX_TEST_VMEXIT;
281 }
282 
283 static int test_ctrl_efer_init()
284 {
285 	u64 ctrl_ent;
286 	u64 ctrl_exi;
287 
288 	msr_bmp_init();
289 	ctrl_ent = vmcs_read(ENT_CONTROLS) | ENT_LOAD_EFER;
290 	ctrl_exi = vmcs_read(EXI_CONTROLS) | EXI_SAVE_EFER | EXI_LOAD_EFER;
291 	vmcs_write(ENT_CONTROLS, ctrl_ent & ctrl_enter_rev.clr);
292 	vmcs_write(EXI_CONTROLS, ctrl_exi & ctrl_exit_rev.clr);
293 	ia32_efer = rdmsr(MSR_EFER);
294 	vmcs_write(GUEST_EFER, ia32_efer ^ EFER_NX);
295 	vmcs_write(HOST_EFER, ia32_efer ^ EFER_NX);
296 	return VMX_TEST_START;
297 }
298 
299 static void test_ctrl_efer_main()
300 {
301 	u64 guest_ia32_efer;
302 
303 	guest_ia32_efer = rdmsr(MSR_EFER);
304 	if (!(ctrl_enter_rev.clr & ENT_LOAD_EFER))
305 		printf("\tENT_LOAD_EFER is not supported.\n");
306 	else {
307 		if (guest_ia32_efer != (ia32_efer ^ EFER_NX)) {
308 			report("Entry load EFER", 0);
309 			return;
310 		}
311 	}
312 	wrmsr(MSR_EFER, ia32_efer);
313 	vmcall();
314 	guest_ia32_efer = rdmsr(MSR_EFER);
315 	if (ctrl_enter_rev.clr & ENT_LOAD_EFER) {
316 		if (guest_ia32_efer != ia32_efer) {
317 			report("Entry load EFER", 0);
318 			return;
319 		}
320 		report("Entry load EFER", 1);
321 	}
322 }
323 
324 static int test_ctrl_efer_exit_handler()
325 {
326 	u64 guest_rip;
327 	ulong reason;
328 	u64 guest_efer;
329 
330 	guest_rip = vmcs_read(GUEST_RIP);
331 	reason = vmcs_read(EXI_REASON) & 0xff;
332 	switch (reason) {
333 	case VMX_VMCALL:
334 		guest_efer = vmcs_read(GUEST_EFER);
335 		if (!(ctrl_exit_rev.clr & EXI_SAVE_EFER)) {
336 			printf("\tEXI_SAVE_EFER is not supported\n");
337 			vmcs_write(GUEST_EFER, ia32_efer);
338 		} else {
339 			if (guest_efer == ia32_efer)
340 				report("Exit save EFER", 1);
341 			else
342 				report("Exit save EFER", 0);
343 		}
344 		if (!(ctrl_exit_rev.clr & EXI_LOAD_EFER)) {
345 			printf("\tEXI_LOAD_EFER is not supported\n");
346 			wrmsr(MSR_EFER, ia32_efer ^ EFER_NX);
347 		} else {
348 			if (rdmsr(MSR_EFER) == (ia32_efer ^ EFER_NX))
349 				report("Exit load EFER", 1);
350 			else
351 				report("Exit load EFER", 0);
352 		}
353 		vmcs_write(GUEST_PAT, ia32_efer);
354 		vmcs_write(GUEST_RIP, guest_rip + 3);
355 		return VMX_TEST_RESUME;
356 	default:
357 		printf("ERROR : Undefined exit reason, reason = %d.\n", reason);
358 		break;
359 	}
360 	return VMX_TEST_VMEXIT;
361 }
362 
363 u32 guest_cr0, guest_cr4;
364 
365 static void cr_shadowing_main()
366 {
367 	u32 cr0, cr4, tmp;
368 
369 	// Test read through
370 	set_stage(0);
371 	guest_cr0 = read_cr0();
372 	if (stage == 1)
373 		report("Read through CR0", 0);
374 	else
375 		vmcall();
376 	set_stage(1);
377 	guest_cr4 = read_cr4();
378 	if (stage == 2)
379 		report("Read through CR4", 0);
380 	else
381 		vmcall();
382 	// Test write through
383 	guest_cr0 = guest_cr0 ^ (X86_CR0_TS | X86_CR0_MP);
384 	guest_cr4 = guest_cr4 ^ (X86_CR4_TSD | X86_CR4_DE);
385 	set_stage(2);
386 	write_cr0(guest_cr0);
387 	if (stage == 3)
388 		report("Write throuth CR0", 0);
389 	else
390 		vmcall();
391 	set_stage(3);
392 	write_cr4(guest_cr4);
393 	if (stage == 4)
394 		report("Write through CR4", 0);
395 	else
396 		vmcall();
397 	// Test read shadow
398 	set_stage(4);
399 	vmcall();
400 	cr0 = read_cr0();
401 	if (stage != 5) {
402 		if (cr0 == guest_cr0)
403 			report("Read shadowing CR0", 1);
404 		else
405 			report("Read shadowing CR0", 0);
406 	}
407 	set_stage(5);
408 	cr4 = read_cr4();
409 	if (stage != 6) {
410 		if (cr4 == guest_cr4)
411 			report("Read shadowing CR4", 1);
412 		else
413 			report("Read shadowing CR4", 0);
414 	}
415 	// Test write shadow (same value with shadow)
416 	set_stage(6);
417 	write_cr0(guest_cr0);
418 	if (stage == 7)
419 		report("Write shadowing CR0 (same value with shadow)", 0);
420 	else
421 		vmcall();
422 	set_stage(7);
423 	write_cr4(guest_cr4);
424 	if (stage == 8)
425 		report("Write shadowing CR4 (same value with shadow)", 0);
426 	else
427 		vmcall();
428 	// Test write shadow (different value)
429 	set_stage(8);
430 	tmp = guest_cr0 ^ X86_CR0_TS;
431 	asm volatile("mov %0, %%rsi\n\t"
432 		"mov %%rsi, %%cr0\n\t"
433 		::"m"(tmp)
434 		:"rsi", "memory", "cc");
435 	if (stage != 9)
436 		report("Write shadowing different X86_CR0_TS", 0);
437 	else
438 		report("Write shadowing different X86_CR0_TS", 1);
439 	set_stage(9);
440 	tmp = guest_cr0 ^ X86_CR0_MP;
441 	asm volatile("mov %0, %%rsi\n\t"
442 		"mov %%rsi, %%cr0\n\t"
443 		::"m"(tmp)
444 		:"rsi", "memory", "cc");
445 	if (stage != 10)
446 		report("Write shadowing different X86_CR0_MP", 0);
447 	else
448 		report("Write shadowing different X86_CR0_MP", 1);
449 	set_stage(10);
450 	tmp = guest_cr4 ^ X86_CR4_TSD;
451 	asm volatile("mov %0, %%rsi\n\t"
452 		"mov %%rsi, %%cr4\n\t"
453 		::"m"(tmp)
454 		:"rsi", "memory", "cc");
455 	if (stage != 11)
456 		report("Write shadowing different X86_CR4_TSD", 0);
457 	else
458 		report("Write shadowing different X86_CR4_TSD", 1);
459 	set_stage(11);
460 	tmp = guest_cr4 ^ X86_CR4_DE;
461 	asm volatile("mov %0, %%rsi\n\t"
462 		"mov %%rsi, %%cr4\n\t"
463 		::"m"(tmp)
464 		:"rsi", "memory", "cc");
465 	if (stage != 12)
466 		report("Write shadowing different X86_CR4_DE", 0);
467 	else
468 		report("Write shadowing different X86_CR4_DE", 1);
469 }
470 
471 static int cr_shadowing_exit_handler()
472 {
473 	u64 guest_rip;
474 	ulong reason;
475 	u32 insn_len;
476 	u32 exit_qual;
477 
478 	guest_rip = vmcs_read(GUEST_RIP);
479 	reason = vmcs_read(EXI_REASON) & 0xff;
480 	insn_len = vmcs_read(EXI_INST_LEN);
481 	exit_qual = vmcs_read(EXI_QUALIFICATION);
482 	switch (reason) {
483 	case VMX_VMCALL:
484 		switch (get_stage()) {
485 		case 0:
486 			if (guest_cr0 == vmcs_read(GUEST_CR0))
487 				report("Read through CR0", 1);
488 			else
489 				report("Read through CR0", 0);
490 			break;
491 		case 1:
492 			if (guest_cr4 == vmcs_read(GUEST_CR4))
493 				report("Read through CR4", 1);
494 			else
495 				report("Read through CR4", 0);
496 			break;
497 		case 2:
498 			if (guest_cr0 == vmcs_read(GUEST_CR0))
499 				report("Write through CR0", 1);
500 			else
501 				report("Write through CR0", 0);
502 			break;
503 		case 3:
504 			if (guest_cr4 == vmcs_read(GUEST_CR4))
505 				report("Write through CR4", 1);
506 			else
507 				report("Write through CR4", 0);
508 			break;
509 		case 4:
510 			guest_cr0 = vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP);
511 			guest_cr4 = vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE);
512 			vmcs_write(CR0_MASK, X86_CR0_TS | X86_CR0_MP);
513 			vmcs_write(CR0_READ_SHADOW, guest_cr0 & (X86_CR0_TS | X86_CR0_MP));
514 			vmcs_write(CR4_MASK, X86_CR4_TSD | X86_CR4_DE);
515 			vmcs_write(CR4_READ_SHADOW, guest_cr4 & (X86_CR4_TSD | X86_CR4_DE));
516 			break;
517 		case 6:
518 			if (guest_cr0 == (vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP)))
519 				report("Write shadowing CR0 (same value)", 1);
520 			else
521 				report("Write shadowing CR0 (same value)", 0);
522 			break;
523 		case 7:
524 			if (guest_cr4 == (vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE)))
525 				report("Write shadowing CR4 (same value)", 1);
526 			else
527 				report("Write shadowing CR4 (same value)", 0);
528 			break;
529 		default:
530 			// Should not reach here
531 			printf("ERROR : unexpected stage, %d\n", get_stage());
532 			print_vmexit_info();
533 			return VMX_TEST_VMEXIT;
534 		}
535 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
536 		return VMX_TEST_RESUME;
537 	case VMX_CR:
538 		switch (get_stage()) {
539 		case 4:
540 			report("Read shadowing CR0", 0);
541 			set_stage(stage + 1);
542 			break;
543 		case 5:
544 			report("Read shadowing CR4", 0);
545 			set_stage(stage + 1);
546 			break;
547 		case 6:
548 			report("Write shadowing CR0 (same value)", 0);
549 			set_stage(stage + 1);
550 			break;
551 		case 7:
552 			report("Write shadowing CR4 (same value)", 0);
553 			set_stage(stage + 1);
554 			break;
555 		case 8:
556 		case 9:
557 			// 0x600 encodes "mov %esi, %cr0"
558 			if (exit_qual == 0x600)
559 				set_stage(stage + 1);
560 			break;
561 		case 10:
562 		case 11:
563 			// 0x604 encodes "mov %esi, %cr4"
564 			if (exit_qual == 0x604)
565 				set_stage(stage + 1);
566 			break;
567 		default:
568 			// Should not reach here
569 			printf("ERROR : unexpected stage, %d\n", get_stage());
570 			print_vmexit_info();
571 			return VMX_TEST_VMEXIT;
572 		}
573 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
574 		return VMX_TEST_RESUME;
575 	default:
576 		printf("Unknown exit reason, %d\n", reason);
577 		print_vmexit_info();
578 	}
579 	return VMX_TEST_VMEXIT;
580 }
581 
582 static int iobmp_init()
583 {
584 	u32 ctrl_cpu0;
585 
586 	io_bitmap_a = alloc_page();
587 	io_bitmap_a = alloc_page();
588 	memset(io_bitmap_a, 0x0, PAGE_SIZE);
589 	memset(io_bitmap_b, 0x0, PAGE_SIZE);
590 	ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
591 	ctrl_cpu0 |= CPU_IO_BITMAP;
592 	ctrl_cpu0 &= (~CPU_IO);
593 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
594 	vmcs_write(IO_BITMAP_A, (u64)io_bitmap_a);
595 	vmcs_write(IO_BITMAP_B, (u64)io_bitmap_b);
596 	return VMX_TEST_START;
597 }
598 
599 static void iobmp_main()
600 {
601 	// stage 0, test IO pass
602 	set_stage(0);
603 	inb(0x5000);
604 	outb(0x0, 0x5000);
605 	if (stage != 0)
606 		report("I/O bitmap - I/O pass", 0);
607 	else
608 		report("I/O bitmap - I/O pass", 1);
609 	// test IO width, in/out
610 	((u8 *)io_bitmap_a)[0] = 0xFF;
611 	set_stage(2);
612 	inb(0x0);
613 	if (stage != 3)
614 		report("I/O bitmap - trap in", 0);
615 	else
616 		report("I/O bitmap - trap in", 1);
617 	set_stage(3);
618 	outw(0x0, 0x0);
619 	if (stage != 4)
620 		report("I/O bitmap - trap out", 0);
621 	else
622 		report("I/O bitmap - trap out", 1);
623 	set_stage(4);
624 	inl(0x0);
625 	if (stage != 5)
626 		report("I/O bitmap - I/O width, long", 0);
627 	// test low/high IO port
628 	set_stage(5);
629 	((u8 *)io_bitmap_a)[0x5000 / 8] = (1 << (0x5000 % 8));
630 	inb(0x5000);
631 	if (stage == 6)
632 		report("I/O bitmap - I/O port, low part", 1);
633 	else
634 		report("I/O bitmap - I/O port, low part", 0);
635 	set_stage(6);
636 	((u8 *)io_bitmap_b)[0x1000 / 8] = (1 << (0x1000 % 8));
637 	inb(0x9000);
638 	if (stage == 7)
639 		report("I/O bitmap - I/O port, high part", 1);
640 	else
641 		report("I/O bitmap - I/O port, high part", 0);
642 	// test partial pass
643 	set_stage(7);
644 	inl(0x4FFF);
645 	if (stage == 8)
646 		report("I/O bitmap - partial pass", 1);
647 	else
648 		report("I/O bitmap - partial pass", 0);
649 	// test overrun
650 	set_stage(8);
651 	memset(io_bitmap_a, 0x0, PAGE_SIZE);
652 	memset(io_bitmap_b, 0x0, PAGE_SIZE);
653 	inl(0xFFFF);
654 	if (stage == 9)
655 		report("I/O bitmap - overrun", 1);
656 	else
657 		report("I/O bitmap - overrun", 0);
658 	set_stage(9);
659 	vmcall();
660 	outb(0x0, 0x0);
661 	report("I/O bitmap - ignore unconditional exiting", stage == 9);
662 	set_stage(10);
663 	vmcall();
664 	outb(0x0, 0x0);
665 	report("I/O bitmap - unconditional exiting", stage == 11);
666 }
667 
668 static int iobmp_exit_handler()
669 {
670 	u64 guest_rip;
671 	ulong reason, exit_qual;
672 	u32 insn_len, ctrl_cpu0;
673 
674 	guest_rip = vmcs_read(GUEST_RIP);
675 	reason = vmcs_read(EXI_REASON) & 0xff;
676 	exit_qual = vmcs_read(EXI_QUALIFICATION);
677 	insn_len = vmcs_read(EXI_INST_LEN);
678 	switch (reason) {
679 	case VMX_IO:
680 		switch (get_stage()) {
681 		case 0:
682 		case 1:
683 			set_stage(stage + 1);
684 			break;
685 		case 2:
686 			if ((exit_qual & VMX_IO_SIZE_MASK) != _VMX_IO_BYTE)
687 				report("I/O bitmap - I/O width, byte", 0);
688 			else
689 				report("I/O bitmap - I/O width, byte", 1);
690 			if (!(exit_qual & VMX_IO_IN))
691 				report("I/O bitmap - I/O direction, in", 0);
692 			else
693 				report("I/O bitmap - I/O direction, in", 1);
694 			set_stage(stage + 1);
695 			break;
696 		case 3:
697 			if ((exit_qual & VMX_IO_SIZE_MASK) != _VMX_IO_WORD)
698 				report("I/O bitmap - I/O width, word", 0);
699 			else
700 				report("I/O bitmap - I/O width, word", 1);
701 			if (!(exit_qual & VMX_IO_IN))
702 				report("I/O bitmap - I/O direction, out", 1);
703 			else
704 				report("I/O bitmap - I/O direction, out", 0);
705 			set_stage(stage + 1);
706 			break;
707 		case 4:
708 			if ((exit_qual & VMX_IO_SIZE_MASK) != _VMX_IO_LONG)
709 				report("I/O bitmap - I/O width, long", 0);
710 			else
711 				report("I/O bitmap - I/O width, long", 1);
712 			set_stage(stage + 1);
713 			break;
714 		case 5:
715 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x5000)
716 				set_stage(stage + 1);
717 			break;
718 		case 6:
719 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x9000)
720 				set_stage(stage + 1);
721 			break;
722 		case 7:
723 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x4FFF)
724 				set_stage(stage + 1);
725 			break;
726 		case 8:
727 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0xFFFF)
728 				set_stage(stage + 1);
729 			break;
730 		case 9:
731 		case 10:
732 			ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
733 			vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0 & ~CPU_IO);
734 			set_stage(stage + 1);
735 			break;
736 		default:
737 			// Should not reach here
738 			printf("ERROR : unexpected stage, %d\n", get_stage());
739 			print_vmexit_info();
740 			return VMX_TEST_VMEXIT;
741 		}
742 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
743 		return VMX_TEST_RESUME;
744 	case VMX_VMCALL:
745 		switch (get_stage()) {
746 		case 9:
747 			ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
748 			ctrl_cpu0 |= CPU_IO | CPU_IO_BITMAP;
749 			vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
750 			break;
751 		case 10:
752 			ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
753 			ctrl_cpu0 = (ctrl_cpu0 & ~CPU_IO_BITMAP) | CPU_IO;
754 			vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
755 			break;
756 		default:
757 			// Should not reach here
758 			printf("ERROR : unexpected stage, %d\n", get_stage());
759 			print_vmexit_info();
760 			return VMX_TEST_VMEXIT;
761 		}
762 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
763 		return VMX_TEST_RESUME;
764 	default:
765 		printf("guest_rip = 0x%llx\n", guest_rip);
766 		printf("\tERROR : Undefined exit reason, reason = %d.\n", reason);
767 		break;
768 	}
769 	return VMX_TEST_VMEXIT;
770 }
771 
772 #define INSN_CPU0		0
773 #define INSN_CPU1		1
774 #define INSN_ALWAYS_TRAP	2
775 #define INSN_NEVER_TRAP		3
776 
777 #define FIELD_EXIT_QUAL		0
778 #define FIELD_INSN_INFO		1
779 
780 asm(
781 	"insn_hlt: hlt;ret\n\t"
782 	"insn_invlpg: invlpg 0x12345678;ret\n\t"
783 	"insn_mwait: mwait;ret\n\t"
784 	"insn_rdpmc: rdpmc;ret\n\t"
785 	"insn_rdtsc: rdtsc;ret\n\t"
786 	"insn_monitor: monitor;ret\n\t"
787 	"insn_pause: pause;ret\n\t"
788 	"insn_wbinvd: wbinvd;ret\n\t"
789 	"insn_cpuid: cpuid;ret\n\t"
790 	"insn_invd: invd;ret\n\t"
791 );
792 extern void insn_hlt();
793 extern void insn_invlpg();
794 extern void insn_mwait();
795 extern void insn_rdpmc();
796 extern void insn_rdtsc();
797 extern void insn_monitor();
798 extern void insn_pause();
799 extern void insn_wbinvd();
800 extern void insn_cpuid();
801 extern void insn_invd();
802 
803 u32 cur_insn;
804 
805 struct insn_table {
806 	const char *name;
807 	u32 flag;
808 	void (*insn_func)();
809 	u32 type;
810 	u32 reason;
811 	ulong exit_qual;
812 	u32 insn_info;
813 	// Use FIELD_EXIT_QUAL and FIELD_INSN_INFO to efines
814 	// which field need to be tested, reason is always tested
815 	u32 test_field;
816 };
817 
818 /*
819  * Add more test cases of instruction intercept here. Elements in this
820  * table is:
821  *	name/control flag/insn function/type/exit reason/exit qulification/
822  *	instruction info/field to test
823  * The last field defines which fields (exit_qual and insn_info) need to be
824  * tested in exit handler. If set to 0, only "reason" is checked.
825  */
826 static struct insn_table insn_table[] = {
827 	// Flags for Primary Processor-Based VM-Execution Controls
828 	{"HLT",  CPU_HLT, insn_hlt, INSN_CPU0, 12, 0, 0, 0},
829 	{"INVLPG", CPU_INVLPG, insn_invlpg, INSN_CPU0, 14,
830 		0x12345678, 0, FIELD_EXIT_QUAL},
831 	{"MWAIT", CPU_MWAIT, insn_mwait, INSN_CPU0, 36, 0, 0, 0},
832 	{"RDPMC", CPU_RDPMC, insn_rdpmc, INSN_CPU0, 15, 0, 0, 0},
833 	{"RDTSC", CPU_RDTSC, insn_rdtsc, INSN_CPU0, 16, 0, 0, 0},
834 	{"MONITOR", CPU_MONITOR, insn_monitor, INSN_CPU0, 39, 0, 0, 0},
835 	{"PAUSE", CPU_PAUSE, insn_pause, INSN_CPU0, 40, 0, 0, 0},
836 	// Flags for Secondary Processor-Based VM-Execution Controls
837 	{"WBINVD", CPU_WBINVD, insn_wbinvd, INSN_CPU1, 54, 0, 0, 0},
838 	// Instructions always trap
839 	{"CPUID", 0, insn_cpuid, INSN_ALWAYS_TRAP, 10, 0, 0, 0},
840 	{"INVD", 0, insn_invd, INSN_ALWAYS_TRAP, 13, 0, 0, 0},
841 	// Instructions never trap
842 	{NULL},
843 };
844 
845 static int insn_intercept_init()
846 {
847 	u32 ctrl_cpu[2];
848 
849 	ctrl_cpu[0] = vmcs_read(CPU_EXEC_CTRL0);
850 	ctrl_cpu[0] |= CPU_HLT | CPU_INVLPG | CPU_MWAIT | CPU_RDPMC | CPU_RDTSC |
851 		CPU_MONITOR | CPU_PAUSE | CPU_SECONDARY;
852 	ctrl_cpu[0] &= ctrl_cpu_rev[0].clr;
853 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu[0]);
854 	ctrl_cpu[1] = vmcs_read(CPU_EXEC_CTRL1);
855 	ctrl_cpu[1] |= CPU_WBINVD | CPU_RDRAND;
856 	ctrl_cpu[1] &= ctrl_cpu_rev[1].clr;
857 	vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu[1]);
858 	return VMX_TEST_START;
859 }
860 
861 static void insn_intercept_main()
862 {
863 	cur_insn = 0;
864 	while(insn_table[cur_insn].name != NULL) {
865 		set_stage(cur_insn);
866 		if ((insn_table[cur_insn].type == INSN_CPU0
867 			&& !(ctrl_cpu_rev[0].clr & insn_table[cur_insn].flag))
868 			|| (insn_table[cur_insn].type == INSN_CPU1
869 			&& !(ctrl_cpu_rev[1].clr & insn_table[cur_insn].flag))) {
870 			printf("\tCPU_CTRL1.CPU_%s is not supported.\n",
871 				insn_table[cur_insn].name);
872 			continue;
873 		}
874 		insn_table[cur_insn].insn_func();
875 		switch (insn_table[cur_insn].type) {
876 		case INSN_CPU0:
877 		case INSN_CPU1:
878 		case INSN_ALWAYS_TRAP:
879 			if (stage != cur_insn + 1)
880 				report(insn_table[cur_insn].name, 0);
881 			else
882 				report(insn_table[cur_insn].name, 1);
883 			break;
884 		case INSN_NEVER_TRAP:
885 			if (stage == cur_insn + 1)
886 				report(insn_table[cur_insn].name, 0);
887 			else
888 				report(insn_table[cur_insn].name, 1);
889 			break;
890 		}
891 		cur_insn ++;
892 	}
893 }
894 
895 static int insn_intercept_exit_handler()
896 {
897 	u64 guest_rip;
898 	u32 reason;
899 	ulong exit_qual;
900 	u32 insn_len;
901 	u32 insn_info;
902 	bool pass;
903 
904 	guest_rip = vmcs_read(GUEST_RIP);
905 	reason = vmcs_read(EXI_REASON) & 0xff;
906 	exit_qual = vmcs_read(EXI_QUALIFICATION);
907 	insn_len = vmcs_read(EXI_INST_LEN);
908 	insn_info = vmcs_read(EXI_INST_INFO);
909 	pass = (cur_insn == get_stage()) &&
910 			insn_table[cur_insn].reason == reason;
911 	if (insn_table[cur_insn].test_field & FIELD_EXIT_QUAL)
912 		pass = pass && insn_table[cur_insn].exit_qual == exit_qual;
913 	if (insn_table[cur_insn].test_field & FIELD_INSN_INFO)
914 		pass = pass && insn_table[cur_insn].insn_info == insn_info;
915 	if (pass)
916 		set_stage(stage + 1);
917 	vmcs_write(GUEST_RIP, guest_rip + insn_len);
918 	return VMX_TEST_RESUME;
919 }
920 
921 
922 static int setup_ept()
923 {
924 	int support_2m;
925 	unsigned long end_of_memory;
926 
927 	if (!(ept_vpid.val & EPT_CAP_UC) &&
928 			!(ept_vpid.val & EPT_CAP_WB)) {
929 		printf("\tEPT paging-structure memory type "
930 				"UC&WB are not supported\n");
931 		return 1;
932 	}
933 	if (ept_vpid.val & EPT_CAP_UC)
934 		eptp = EPT_MEM_TYPE_UC;
935 	else
936 		eptp = EPT_MEM_TYPE_WB;
937 	if (!(ept_vpid.val & EPT_CAP_PWL4)) {
938 		printf("\tPWL4 is not supported\n");
939 		return 1;
940 	}
941 	eptp |= (3 << EPTP_PG_WALK_LEN_SHIFT);
942 	pml4 = alloc_page();
943 	memset(pml4, 0, PAGE_SIZE);
944 	eptp |= virt_to_phys(pml4);
945 	vmcs_write(EPTP, eptp);
946 	support_2m = !!(ept_vpid.val & EPT_CAP_2M_PAGE);
947 	end_of_memory = fwcfg_get_u64(FW_CFG_RAM_SIZE);
948 	if (end_of_memory < (1ul << 32))
949 		end_of_memory = (1ul << 32);
950 	setup_ept_range(pml4, 0, end_of_memory, 0, support_2m,
951 			EPT_WA | EPT_RA | EPT_EA);
952 	return 0;
953 }
954 
955 static int ept_init()
956 {
957 	unsigned long base_addr1, base_addr2;
958 	u32 ctrl_cpu[2];
959 
960 	if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY) ||
961 	    !(ctrl_cpu_rev[1].clr & CPU_EPT)) {
962 		printf("\tEPT is not supported");
963 		return VMX_TEST_EXIT;
964 	}
965 
966 	ctrl_cpu[0] = vmcs_read(CPU_EXEC_CTRL0);
967 	ctrl_cpu[1] = vmcs_read(CPU_EXEC_CTRL1);
968 	ctrl_cpu[0] = (ctrl_cpu[0] | CPU_SECONDARY)
969 		& ctrl_cpu_rev[0].clr;
970 	ctrl_cpu[1] = (ctrl_cpu[1] | CPU_EPT)
971 		& ctrl_cpu_rev[1].clr;
972 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu[0]);
973 	vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu[1]);
974 	if (setup_ept())
975 		return VMX_TEST_EXIT;
976 	data_page1 = alloc_page();
977 	data_page2 = alloc_page();
978 	memset(data_page1, 0x0, PAGE_SIZE);
979 	memset(data_page2, 0x0, PAGE_SIZE);
980 	*((u32 *)data_page1) = MAGIC_VAL_1;
981 	*((u32 *)data_page2) = MAGIC_VAL_2;
982 	base_addr1 = (unsigned long)data_page1 & PAGE_MASK_2M;
983 	base_addr2 = (unsigned long)data_page2 & PAGE_MASK_2M;
984 	setup_ept_range(pml4, base_addr1, base_addr1 + PAGE_SIZE_2M, 0, 0,
985 			EPT_WA | EPT_RA | EPT_EA);
986 	setup_ept_range(pml4, base_addr2, base_addr2 + PAGE_SIZE_2M, 0, 0,
987 			EPT_WA | EPT_RA | EPT_EA);
988 	install_ept(pml4, (unsigned long)data_page1, (unsigned long)data_page2,
989 			EPT_RA | EPT_WA | EPT_EA);
990 	return VMX_TEST_START;
991 }
992 
993 static void ept_main()
994 {
995 	set_stage(0);
996 	if (*((u32 *)data_page2) != MAGIC_VAL_1 ||
997 			*((u32 *)data_page1) != MAGIC_VAL_1)
998 		report("EPT basic framework - read", 0);
999 	else {
1000 		*((u32 *)data_page2) = MAGIC_VAL_3;
1001 		vmcall();
1002 		if (get_stage() == 1) {
1003 			if (*((u32 *)data_page1) == MAGIC_VAL_3 &&
1004 					*((u32 *)data_page2) == MAGIC_VAL_2)
1005 				report("EPT basic framework", 1);
1006 			else
1007 				report("EPT basic framework - remap", 1);
1008 		}
1009 	}
1010 	// Test EPT Misconfigurations
1011 	set_stage(1);
1012 	vmcall();
1013 	*((u32 *)data_page1) = MAGIC_VAL_1;
1014 	if (get_stage() != 2) {
1015 		report("EPT misconfigurations", 0);
1016 		goto t1;
1017 	}
1018 	set_stage(2);
1019 	vmcall();
1020 	*((u32 *)data_page1) = MAGIC_VAL_1;
1021 	if (get_stage() != 3) {
1022 		report("EPT misconfigurations", 0);
1023 		goto t1;
1024 	}
1025 	report("EPT misconfigurations", 1);
1026 t1:
1027 	// Test EPT violation
1028 	set_stage(3);
1029 	vmcall();
1030 	*((u32 *)data_page1) = MAGIC_VAL_1;
1031 	if (get_stage() == 4)
1032 		report("EPT violation - page permission", 1);
1033 	else
1034 		report("EPT violation - page permission", 0);
1035 	// Violation caused by EPT paging structure
1036 	set_stage(4);
1037 	vmcall();
1038 	*((u32 *)data_page1) = MAGIC_VAL_2;
1039 	if (get_stage() == 5)
1040 		report("EPT violation - paging structure", 1);
1041 	else
1042 		report("EPT violation - paging structure", 0);
1043 }
1044 
1045 static int ept_exit_handler()
1046 {
1047 	u64 guest_rip;
1048 	ulong reason;
1049 	u32 insn_len;
1050 	u32 exit_qual;
1051 	static unsigned long data_page1_pte, data_page1_pte_pte;
1052 
1053 	guest_rip = vmcs_read(GUEST_RIP);
1054 	reason = vmcs_read(EXI_REASON) & 0xff;
1055 	insn_len = vmcs_read(EXI_INST_LEN);
1056 	exit_qual = vmcs_read(EXI_QUALIFICATION);
1057 	switch (reason) {
1058 	case VMX_VMCALL:
1059 		switch (get_stage()) {
1060 		case 0:
1061 			if (*((u32 *)data_page1) == MAGIC_VAL_3 &&
1062 					*((u32 *)data_page2) == MAGIC_VAL_2) {
1063 				set_stage(get_stage() + 1);
1064 				install_ept(pml4, (unsigned long)data_page2,
1065 						(unsigned long)data_page2,
1066 						EPT_RA | EPT_WA | EPT_EA);
1067 			} else
1068 				report("EPT basic framework - write\n", 0);
1069 			break;
1070 		case 1:
1071 			install_ept(pml4, (unsigned long)data_page1,
1072  				(unsigned long)data_page1, EPT_WA);
1073 			invept(INVEPT_SINGLE, eptp);
1074 			break;
1075 		case 2:
1076 			install_ept(pml4, (unsigned long)data_page1,
1077  				(unsigned long)data_page1,
1078  				EPT_RA | EPT_WA | EPT_EA |
1079  				(2 << EPT_MEM_TYPE_SHIFT));
1080 			invept(INVEPT_SINGLE, eptp);
1081 			break;
1082 		case 3:
1083 			data_page1_pte = get_ept_pte(pml4,
1084 				(unsigned long)data_page1, 1);
1085 			set_ept_pte(pml4, (unsigned long)data_page1,
1086 				1, data_page1_pte & (~EPT_PRESENT));
1087 			invept(INVEPT_SINGLE, eptp);
1088 			break;
1089 		case 4:
1090 			data_page1_pte = get_ept_pte(pml4,
1091 				(unsigned long)data_page1, 2);
1092 			data_page1_pte &= PAGE_MASK;
1093 			data_page1_pte_pte = get_ept_pte(pml4, data_page1_pte, 2);
1094 			set_ept_pte(pml4, data_page1_pte, 2,
1095 				data_page1_pte_pte & (~EPT_PRESENT));
1096 			invept(INVEPT_SINGLE, eptp);
1097 			break;
1098 		// Should not reach here
1099 		default:
1100 			printf("ERROR - unexpected stage, %d.\n", get_stage());
1101 			print_vmexit_info();
1102 			return VMX_TEST_VMEXIT;
1103 		}
1104 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
1105 		return VMX_TEST_RESUME;
1106 	case VMX_EPT_MISCONFIG:
1107 		switch (get_stage()) {
1108 		case 1:
1109 		case 2:
1110 			set_stage(get_stage() + 1);
1111 			install_ept(pml4, (unsigned long)data_page1,
1112  				(unsigned long)data_page1,
1113  				EPT_RA | EPT_WA | EPT_EA);
1114 			invept(INVEPT_SINGLE, eptp);
1115 			break;
1116 		// Should not reach here
1117 		default:
1118 			printf("ERROR - unexpected stage, %d.\n", get_stage());
1119 			print_vmexit_info();
1120 			return VMX_TEST_VMEXIT;
1121 		}
1122 		return VMX_TEST_RESUME;
1123 	case VMX_EPT_VIOLATION:
1124 		switch(get_stage()) {
1125 		case 3:
1126 			if (exit_qual == (EPT_VLT_WR | EPT_VLT_LADDR_VLD |
1127 					EPT_VLT_PADDR))
1128 				set_stage(get_stage() + 1);
1129 			set_ept_pte(pml4, (unsigned long)data_page1,
1130 				1, data_page1_pte | (EPT_PRESENT));
1131 			invept(INVEPT_SINGLE, eptp);
1132 			break;
1133 		case 4:
1134 			if (exit_qual == (EPT_VLT_RD | EPT_VLT_LADDR_VLD))
1135 				set_stage(get_stage() + 1);
1136 			set_ept_pte(pml4, data_page1_pte, 2,
1137 				data_page1_pte_pte | (EPT_PRESENT));
1138 			invept(INVEPT_SINGLE, eptp);
1139 			break;
1140 		default:
1141 			// Should not reach here
1142 			printf("ERROR : unexpected stage, %d\n", get_stage());
1143 			print_vmexit_info();
1144 			return VMX_TEST_VMEXIT;
1145 		}
1146 		return VMX_TEST_RESUME;
1147 	default:
1148 		printf("Unknown exit reason, %d\n", reason);
1149 		print_vmexit_info();
1150 	}
1151 	return VMX_TEST_VMEXIT;
1152 }
1153 
1154 /* name/init/guest_main/exit_handler/syscall_handler/guest_regs */
1155 struct vmx_test vmx_tests[] = {
1156 	{ "null", NULL, basic_guest_main, basic_exit_handler, NULL, {0} },
1157 	{ "vmenter", NULL, vmenter_main, vmenter_exit_handler, NULL, {0} },
1158 	{ "preemption timer", preemption_timer_init, preemption_timer_main,
1159 		preemption_timer_exit_handler, NULL, {0} },
1160 	{ "control field PAT", test_ctrl_pat_init, test_ctrl_pat_main,
1161 		test_ctrl_pat_exit_handler, NULL, {0} },
1162 	{ "control field EFER", test_ctrl_efer_init, test_ctrl_efer_main,
1163 		test_ctrl_efer_exit_handler, NULL, {0} },
1164 	{ "CR shadowing", NULL, cr_shadowing_main,
1165 		cr_shadowing_exit_handler, NULL, {0} },
1166 	{ "I/O bitmap", iobmp_init, iobmp_main, iobmp_exit_handler,
1167 		NULL, {0} },
1168 	{ "instruction intercept", insn_intercept_init, insn_intercept_main,
1169 		insn_intercept_exit_handler, NULL, {0} },
1170 	{ "EPT framework", ept_init, ept_main, ept_exit_handler, NULL, {0} },
1171 	{ NULL, NULL, NULL, NULL, NULL, {0} },
1172 };
1173