xref: /kvm-unit-tests/x86/vmx_tests.c (revision f0dc549a38f9cc816929773dfa1f9a49dd7e0f2f)
1 /*
2  * All test cases of nested virtualization should be in this file
3  *
4  * Author : Arthur Chunqi Li <yzt356@gmail.com>
5  */
6 #include "vmx.h"
7 #include "msr.h"
8 #include "processor.h"
9 #include "vm.h"
10 #include "io.h"
11 #include "fwcfg.h"
12 #include "isr.h"
13 #include "apic.h"
14 
15 u64 ia32_pat;
16 u64 ia32_efer;
17 volatile u32 stage;
18 void *io_bitmap_a, *io_bitmap_b;
19 u16 ioport;
20 
21 unsigned long *pml4;
22 u64 eptp;
23 void *data_page1, *data_page2;
24 
25 static inline void vmcall()
26 {
27 	asm volatile("vmcall");
28 }
29 
30 static inline void set_stage(u32 s)
31 {
32 	barrier();
33 	stage = s;
34 	barrier();
35 }
36 
37 static inline u32 get_stage()
38 {
39 	u32 s;
40 
41 	barrier();
42 	s = stage;
43 	barrier();
44 	return s;
45 }
46 
47 void basic_guest_main()
48 {
49 }
50 
51 int basic_exit_handler()
52 {
53 	report("Basic VMX test", 0);
54 	print_vmexit_info();
55 	return VMX_TEST_EXIT;
56 }
57 
58 void vmenter_main()
59 {
60 	u64 rax;
61 	u64 rsp, resume_rsp;
62 
63 	report("test vmlaunch", 1);
64 
65 	asm volatile(
66 		"mov %%rsp, %0\n\t"
67 		"mov %3, %%rax\n\t"
68 		"vmcall\n\t"
69 		"mov %%rax, %1\n\t"
70 		"mov %%rsp, %2\n\t"
71 		: "=r"(rsp), "=r"(rax), "=r"(resume_rsp)
72 		: "g"(0xABCD));
73 	report("test vmresume", (rax == 0xFFFF) && (rsp == resume_rsp));
74 }
75 
76 int vmenter_exit_handler()
77 {
78 	u64 guest_rip;
79 	ulong reason;
80 
81 	guest_rip = vmcs_read(GUEST_RIP);
82 	reason = vmcs_read(EXI_REASON) & 0xff;
83 	switch (reason) {
84 	case VMX_VMCALL:
85 		if (regs.rax != 0xABCD) {
86 			report("test vmresume", 0);
87 			return VMX_TEST_VMEXIT;
88 		}
89 		regs.rax = 0xFFFF;
90 		vmcs_write(GUEST_RIP, guest_rip + 3);
91 		return VMX_TEST_RESUME;
92 	default:
93 		report("test vmresume", 0);
94 		print_vmexit_info();
95 	}
96 	return VMX_TEST_VMEXIT;
97 }
98 
99 u32 preempt_scale;
100 volatile unsigned long long tsc_val;
101 volatile u32 preempt_val;
102 u64 saved_rip;
103 
104 int preemption_timer_init()
105 {
106 	if (!(ctrl_pin_rev.clr & PIN_PREEMPT)) {
107 		printf("\tPreemption timer is not supported\n");
108 		return VMX_TEST_EXIT;
109 	}
110 	vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) | PIN_PREEMPT);
111 	preempt_val = 10000000;
112 	vmcs_write(PREEMPT_TIMER_VALUE, preempt_val);
113 	preempt_scale = rdmsr(MSR_IA32_VMX_MISC) & 0x1F;
114 
115 	if (!(ctrl_exit_rev.clr & EXI_SAVE_PREEMPT))
116 		printf("\tSave preemption value is not supported\n");
117 
118 	return VMX_TEST_START;
119 }
120 
121 void preemption_timer_main()
122 {
123 	tsc_val = rdtsc();
124 	if (ctrl_exit_rev.clr & EXI_SAVE_PREEMPT) {
125 		set_stage(0);
126 		vmcall();
127 		if (get_stage() == 1)
128 			vmcall();
129 	}
130 	set_stage(1);
131 	while (get_stage() == 1) {
132 		if (((rdtsc() - tsc_val) >> preempt_scale)
133 				> 10 * preempt_val) {
134 			set_stage(2);
135 			vmcall();
136 		}
137 	}
138 	tsc_val = rdtsc();
139 	asm volatile ("hlt");
140 	vmcall();
141 	set_stage(5);
142 	vmcall();
143 }
144 
145 int preemption_timer_exit_handler()
146 {
147 	bool guest_halted;
148 	u64 guest_rip;
149 	ulong reason;
150 	u32 insn_len;
151 	u32 ctrl_exit;
152 
153 	guest_rip = vmcs_read(GUEST_RIP);
154 	reason = vmcs_read(EXI_REASON) & 0xff;
155 	insn_len = vmcs_read(EXI_INST_LEN);
156 	switch (reason) {
157 	case VMX_PREEMPT:
158 		switch (get_stage()) {
159 		case 1:
160 		case 2:
161 			report("busy-wait for preemption timer",
162 			       ((rdtsc() - tsc_val) >> preempt_scale) >=
163 			       preempt_val);
164 			set_stage(3);
165 			vmcs_write(PREEMPT_TIMER_VALUE, preempt_val);
166 			return VMX_TEST_RESUME;
167 		case 3:
168 			guest_halted =
169 				(vmcs_read(GUEST_ACTV_STATE) == ACTV_HLT);
170 			report("preemption timer during hlt",
171 			       ((rdtsc() - tsc_val) >> preempt_scale) >=
172 			       preempt_val && guest_halted);
173 			set_stage(4);
174 			vmcs_write(PIN_CONTROLS,
175 				   vmcs_read(PIN_CONTROLS) & ~PIN_PREEMPT);
176 			vmcs_write(GUEST_ACTV_STATE, ACTV_ACTIVE);
177 			return VMX_TEST_RESUME;
178 		case 4:
179 			report("preemption timer with 0 value",
180 			       saved_rip == guest_rip);
181 			break;
182 		default:
183 			printf("Invalid stage.\n");
184 			print_vmexit_info();
185 			break;
186 		}
187 		break;
188 	case VMX_VMCALL:
189 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
190 		switch (get_stage()) {
191 		case 0:
192 			report("Keep preemption value",
193 			       vmcs_read(PREEMPT_TIMER_VALUE) == preempt_val);
194 			set_stage(1);
195 			vmcs_write(PREEMPT_TIMER_VALUE, preempt_val);
196 			ctrl_exit = (vmcs_read(EXI_CONTROLS) |
197 				EXI_SAVE_PREEMPT) & ctrl_exit_rev.clr;
198 			vmcs_write(EXI_CONTROLS, ctrl_exit);
199 			return VMX_TEST_RESUME;
200 		case 1:
201 			report("Save preemption value",
202 			       vmcs_read(PREEMPT_TIMER_VALUE) < preempt_val);
203 			return VMX_TEST_RESUME;
204 		case 2:
205 			report("busy-wait for preemption timer", 0);
206 			set_stage(3);
207 			vmcs_write(PREEMPT_TIMER_VALUE, preempt_val);
208 			return VMX_TEST_RESUME;
209 		case 3:
210 			report("preemption timer during hlt", 0);
211 			set_stage(4);
212 			/* fall through */
213 		case 4:
214 			vmcs_write(PIN_CONTROLS,
215 				   vmcs_read(PIN_CONTROLS) | PIN_PREEMPT);
216 			vmcs_write(PREEMPT_TIMER_VALUE, 0);
217 			saved_rip = guest_rip + insn_len;
218 			return VMX_TEST_RESUME;
219 		case 5:
220 			report("preemption timer with 0 value (vmcall stage 5)", 0);
221 			break;
222 		default:
223 			// Should not reach here
224 			printf("ERROR : unexpected stage, %d\n", get_stage());
225 			print_vmexit_info();
226 			return VMX_TEST_VMEXIT;
227 		}
228 		break;
229 	default:
230 		printf("Unknown exit reason, %d\n", reason);
231 		print_vmexit_info();
232 	}
233 	vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) & ~PIN_PREEMPT);
234 	return VMX_TEST_VMEXIT;
235 }
236 
237 void msr_bmp_init()
238 {
239 	void *msr_bitmap;
240 	u32 ctrl_cpu0;
241 
242 	msr_bitmap = alloc_page();
243 	memset(msr_bitmap, 0x0, PAGE_SIZE);
244 	ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
245 	ctrl_cpu0 |= CPU_MSR_BITMAP;
246 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
247 	vmcs_write(MSR_BITMAP, (u64)msr_bitmap);
248 }
249 
250 static int test_ctrl_pat_init()
251 {
252 	u64 ctrl_ent;
253 	u64 ctrl_exi;
254 
255 	msr_bmp_init();
256 	ctrl_ent = vmcs_read(ENT_CONTROLS);
257 	ctrl_exi = vmcs_read(EXI_CONTROLS);
258 	vmcs_write(ENT_CONTROLS, ctrl_ent | ENT_LOAD_PAT);
259 	vmcs_write(EXI_CONTROLS, ctrl_exi | (EXI_SAVE_PAT | EXI_LOAD_PAT));
260 	ia32_pat = rdmsr(MSR_IA32_CR_PAT);
261 	vmcs_write(GUEST_PAT, 0x0);
262 	vmcs_write(HOST_PAT, ia32_pat);
263 	return VMX_TEST_START;
264 }
265 
266 static void test_ctrl_pat_main()
267 {
268 	u64 guest_ia32_pat;
269 
270 	guest_ia32_pat = rdmsr(MSR_IA32_CR_PAT);
271 	if (!(ctrl_enter_rev.clr & ENT_LOAD_PAT))
272 		printf("\tENT_LOAD_PAT is not supported.\n");
273 	else {
274 		if (guest_ia32_pat != 0) {
275 			report("Entry load PAT", 0);
276 			return;
277 		}
278 	}
279 	wrmsr(MSR_IA32_CR_PAT, 0x6);
280 	vmcall();
281 	guest_ia32_pat = rdmsr(MSR_IA32_CR_PAT);
282 	if (ctrl_enter_rev.clr & ENT_LOAD_PAT) {
283 		if (guest_ia32_pat != ia32_pat) {
284 			report("Entry load PAT", 0);
285 			return;
286 		}
287 		report("Entry load PAT", 1);
288 	}
289 }
290 
291 static int test_ctrl_pat_exit_handler()
292 {
293 	u64 guest_rip;
294 	ulong reason;
295 	u64 guest_pat;
296 
297 	guest_rip = vmcs_read(GUEST_RIP);
298 	reason = vmcs_read(EXI_REASON) & 0xff;
299 	switch (reason) {
300 	case VMX_VMCALL:
301 		guest_pat = vmcs_read(GUEST_PAT);
302 		if (!(ctrl_exit_rev.clr & EXI_SAVE_PAT)) {
303 			printf("\tEXI_SAVE_PAT is not supported\n");
304 			vmcs_write(GUEST_PAT, 0x6);
305 		} else {
306 			if (guest_pat == 0x6)
307 				report("Exit save PAT", 1);
308 			else
309 				report("Exit save PAT", 0);
310 		}
311 		if (!(ctrl_exit_rev.clr & EXI_LOAD_PAT))
312 			printf("\tEXI_LOAD_PAT is not supported\n");
313 		else {
314 			if (rdmsr(MSR_IA32_CR_PAT) == ia32_pat)
315 				report("Exit load PAT", 1);
316 			else
317 				report("Exit load PAT", 0);
318 		}
319 		vmcs_write(GUEST_PAT, ia32_pat);
320 		vmcs_write(GUEST_RIP, guest_rip + 3);
321 		return VMX_TEST_RESUME;
322 	default:
323 		printf("ERROR : Undefined exit reason, reason = %d.\n", reason);
324 		break;
325 	}
326 	return VMX_TEST_VMEXIT;
327 }
328 
329 static int test_ctrl_efer_init()
330 {
331 	u64 ctrl_ent;
332 	u64 ctrl_exi;
333 
334 	msr_bmp_init();
335 	ctrl_ent = vmcs_read(ENT_CONTROLS) | ENT_LOAD_EFER;
336 	ctrl_exi = vmcs_read(EXI_CONTROLS) | EXI_SAVE_EFER | EXI_LOAD_EFER;
337 	vmcs_write(ENT_CONTROLS, ctrl_ent & ctrl_enter_rev.clr);
338 	vmcs_write(EXI_CONTROLS, ctrl_exi & ctrl_exit_rev.clr);
339 	ia32_efer = rdmsr(MSR_EFER);
340 	vmcs_write(GUEST_EFER, ia32_efer ^ EFER_NX);
341 	vmcs_write(HOST_EFER, ia32_efer ^ EFER_NX);
342 	return VMX_TEST_START;
343 }
344 
345 static void test_ctrl_efer_main()
346 {
347 	u64 guest_ia32_efer;
348 
349 	guest_ia32_efer = rdmsr(MSR_EFER);
350 	if (!(ctrl_enter_rev.clr & ENT_LOAD_EFER))
351 		printf("\tENT_LOAD_EFER is not supported.\n");
352 	else {
353 		if (guest_ia32_efer != (ia32_efer ^ EFER_NX)) {
354 			report("Entry load EFER", 0);
355 			return;
356 		}
357 	}
358 	wrmsr(MSR_EFER, ia32_efer);
359 	vmcall();
360 	guest_ia32_efer = rdmsr(MSR_EFER);
361 	if (ctrl_enter_rev.clr & ENT_LOAD_EFER) {
362 		if (guest_ia32_efer != ia32_efer) {
363 			report("Entry load EFER", 0);
364 			return;
365 		}
366 		report("Entry load EFER", 1);
367 	}
368 }
369 
370 static int test_ctrl_efer_exit_handler()
371 {
372 	u64 guest_rip;
373 	ulong reason;
374 	u64 guest_efer;
375 
376 	guest_rip = vmcs_read(GUEST_RIP);
377 	reason = vmcs_read(EXI_REASON) & 0xff;
378 	switch (reason) {
379 	case VMX_VMCALL:
380 		guest_efer = vmcs_read(GUEST_EFER);
381 		if (!(ctrl_exit_rev.clr & EXI_SAVE_EFER)) {
382 			printf("\tEXI_SAVE_EFER is not supported\n");
383 			vmcs_write(GUEST_EFER, ia32_efer);
384 		} else {
385 			if (guest_efer == ia32_efer)
386 				report("Exit save EFER", 1);
387 			else
388 				report("Exit save EFER", 0);
389 		}
390 		if (!(ctrl_exit_rev.clr & EXI_LOAD_EFER)) {
391 			printf("\tEXI_LOAD_EFER is not supported\n");
392 			wrmsr(MSR_EFER, ia32_efer ^ EFER_NX);
393 		} else {
394 			if (rdmsr(MSR_EFER) == (ia32_efer ^ EFER_NX))
395 				report("Exit load EFER", 1);
396 			else
397 				report("Exit load EFER", 0);
398 		}
399 		vmcs_write(GUEST_PAT, ia32_efer);
400 		vmcs_write(GUEST_RIP, guest_rip + 3);
401 		return VMX_TEST_RESUME;
402 	default:
403 		printf("ERROR : Undefined exit reason, reason = %d.\n", reason);
404 		break;
405 	}
406 	return VMX_TEST_VMEXIT;
407 }
408 
409 u32 guest_cr0, guest_cr4;
410 
411 static void cr_shadowing_main()
412 {
413 	u32 cr0, cr4, tmp;
414 
415 	// Test read through
416 	set_stage(0);
417 	guest_cr0 = read_cr0();
418 	if (stage == 1)
419 		report("Read through CR0", 0);
420 	else
421 		vmcall();
422 	set_stage(1);
423 	guest_cr4 = read_cr4();
424 	if (stage == 2)
425 		report("Read through CR4", 0);
426 	else
427 		vmcall();
428 	// Test write through
429 	guest_cr0 = guest_cr0 ^ (X86_CR0_TS | X86_CR0_MP);
430 	guest_cr4 = guest_cr4 ^ (X86_CR4_TSD | X86_CR4_DE);
431 	set_stage(2);
432 	write_cr0(guest_cr0);
433 	if (stage == 3)
434 		report("Write throuth CR0", 0);
435 	else
436 		vmcall();
437 	set_stage(3);
438 	write_cr4(guest_cr4);
439 	if (stage == 4)
440 		report("Write through CR4", 0);
441 	else
442 		vmcall();
443 	// Test read shadow
444 	set_stage(4);
445 	vmcall();
446 	cr0 = read_cr0();
447 	if (stage != 5) {
448 		if (cr0 == guest_cr0)
449 			report("Read shadowing CR0", 1);
450 		else
451 			report("Read shadowing CR0", 0);
452 	}
453 	set_stage(5);
454 	cr4 = read_cr4();
455 	if (stage != 6) {
456 		if (cr4 == guest_cr4)
457 			report("Read shadowing CR4", 1);
458 		else
459 			report("Read shadowing CR4", 0);
460 	}
461 	// Test write shadow (same value with shadow)
462 	set_stage(6);
463 	write_cr0(guest_cr0);
464 	if (stage == 7)
465 		report("Write shadowing CR0 (same value with shadow)", 0);
466 	else
467 		vmcall();
468 	set_stage(7);
469 	write_cr4(guest_cr4);
470 	if (stage == 8)
471 		report("Write shadowing CR4 (same value with shadow)", 0);
472 	else
473 		vmcall();
474 	// Test write shadow (different value)
475 	set_stage(8);
476 	tmp = guest_cr0 ^ X86_CR0_TS;
477 	asm volatile("mov %0, %%rsi\n\t"
478 		"mov %%rsi, %%cr0\n\t"
479 		::"m"(tmp)
480 		:"rsi", "memory", "cc");
481 	if (stage != 9)
482 		report("Write shadowing different X86_CR0_TS", 0);
483 	else
484 		report("Write shadowing different X86_CR0_TS", 1);
485 	set_stage(9);
486 	tmp = guest_cr0 ^ X86_CR0_MP;
487 	asm volatile("mov %0, %%rsi\n\t"
488 		"mov %%rsi, %%cr0\n\t"
489 		::"m"(tmp)
490 		:"rsi", "memory", "cc");
491 	if (stage != 10)
492 		report("Write shadowing different X86_CR0_MP", 0);
493 	else
494 		report("Write shadowing different X86_CR0_MP", 1);
495 	set_stage(10);
496 	tmp = guest_cr4 ^ X86_CR4_TSD;
497 	asm volatile("mov %0, %%rsi\n\t"
498 		"mov %%rsi, %%cr4\n\t"
499 		::"m"(tmp)
500 		:"rsi", "memory", "cc");
501 	if (stage != 11)
502 		report("Write shadowing different X86_CR4_TSD", 0);
503 	else
504 		report("Write shadowing different X86_CR4_TSD", 1);
505 	set_stage(11);
506 	tmp = guest_cr4 ^ X86_CR4_DE;
507 	asm volatile("mov %0, %%rsi\n\t"
508 		"mov %%rsi, %%cr4\n\t"
509 		::"m"(tmp)
510 		:"rsi", "memory", "cc");
511 	if (stage != 12)
512 		report("Write shadowing different X86_CR4_DE", 0);
513 	else
514 		report("Write shadowing different X86_CR4_DE", 1);
515 }
516 
517 static int cr_shadowing_exit_handler()
518 {
519 	u64 guest_rip;
520 	ulong reason;
521 	u32 insn_len;
522 	u32 exit_qual;
523 
524 	guest_rip = vmcs_read(GUEST_RIP);
525 	reason = vmcs_read(EXI_REASON) & 0xff;
526 	insn_len = vmcs_read(EXI_INST_LEN);
527 	exit_qual = vmcs_read(EXI_QUALIFICATION);
528 	switch (reason) {
529 	case VMX_VMCALL:
530 		switch (get_stage()) {
531 		case 0:
532 			if (guest_cr0 == vmcs_read(GUEST_CR0))
533 				report("Read through CR0", 1);
534 			else
535 				report("Read through CR0", 0);
536 			break;
537 		case 1:
538 			if (guest_cr4 == vmcs_read(GUEST_CR4))
539 				report("Read through CR4", 1);
540 			else
541 				report("Read through CR4", 0);
542 			break;
543 		case 2:
544 			if (guest_cr0 == vmcs_read(GUEST_CR0))
545 				report("Write through CR0", 1);
546 			else
547 				report("Write through CR0", 0);
548 			break;
549 		case 3:
550 			if (guest_cr4 == vmcs_read(GUEST_CR4))
551 				report("Write through CR4", 1);
552 			else
553 				report("Write through CR4", 0);
554 			break;
555 		case 4:
556 			guest_cr0 = vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP);
557 			guest_cr4 = vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE);
558 			vmcs_write(CR0_MASK, X86_CR0_TS | X86_CR0_MP);
559 			vmcs_write(CR0_READ_SHADOW, guest_cr0 & (X86_CR0_TS | X86_CR0_MP));
560 			vmcs_write(CR4_MASK, X86_CR4_TSD | X86_CR4_DE);
561 			vmcs_write(CR4_READ_SHADOW, guest_cr4 & (X86_CR4_TSD | X86_CR4_DE));
562 			break;
563 		case 6:
564 			if (guest_cr0 == (vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP)))
565 				report("Write shadowing CR0 (same value)", 1);
566 			else
567 				report("Write shadowing CR0 (same value)", 0);
568 			break;
569 		case 7:
570 			if (guest_cr4 == (vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE)))
571 				report("Write shadowing CR4 (same value)", 1);
572 			else
573 				report("Write shadowing CR4 (same value)", 0);
574 			break;
575 		default:
576 			// Should not reach here
577 			printf("ERROR : unexpected stage, %d\n", get_stage());
578 			print_vmexit_info();
579 			return VMX_TEST_VMEXIT;
580 		}
581 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
582 		return VMX_TEST_RESUME;
583 	case VMX_CR:
584 		switch (get_stage()) {
585 		case 4:
586 			report("Read shadowing CR0", 0);
587 			set_stage(stage + 1);
588 			break;
589 		case 5:
590 			report("Read shadowing CR4", 0);
591 			set_stage(stage + 1);
592 			break;
593 		case 6:
594 			report("Write shadowing CR0 (same value)", 0);
595 			set_stage(stage + 1);
596 			break;
597 		case 7:
598 			report("Write shadowing CR4 (same value)", 0);
599 			set_stage(stage + 1);
600 			break;
601 		case 8:
602 		case 9:
603 			// 0x600 encodes "mov %esi, %cr0"
604 			if (exit_qual == 0x600)
605 				set_stage(stage + 1);
606 			break;
607 		case 10:
608 		case 11:
609 			// 0x604 encodes "mov %esi, %cr4"
610 			if (exit_qual == 0x604)
611 				set_stage(stage + 1);
612 			break;
613 		default:
614 			// Should not reach here
615 			printf("ERROR : unexpected stage, %d\n", get_stage());
616 			print_vmexit_info();
617 			return VMX_TEST_VMEXIT;
618 		}
619 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
620 		return VMX_TEST_RESUME;
621 	default:
622 		printf("Unknown exit reason, %d\n", reason);
623 		print_vmexit_info();
624 	}
625 	return VMX_TEST_VMEXIT;
626 }
627 
628 static int iobmp_init()
629 {
630 	u32 ctrl_cpu0;
631 
632 	io_bitmap_a = alloc_page();
633 	io_bitmap_a = alloc_page();
634 	memset(io_bitmap_a, 0x0, PAGE_SIZE);
635 	memset(io_bitmap_b, 0x0, PAGE_SIZE);
636 	ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
637 	ctrl_cpu0 |= CPU_IO_BITMAP;
638 	ctrl_cpu0 &= (~CPU_IO);
639 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
640 	vmcs_write(IO_BITMAP_A, (u64)io_bitmap_a);
641 	vmcs_write(IO_BITMAP_B, (u64)io_bitmap_b);
642 	return VMX_TEST_START;
643 }
644 
645 static void iobmp_main()
646 {
647 	// stage 0, test IO pass
648 	set_stage(0);
649 	inb(0x5000);
650 	outb(0x0, 0x5000);
651 	if (stage != 0)
652 		report("I/O bitmap - I/O pass", 0);
653 	else
654 		report("I/O bitmap - I/O pass", 1);
655 	// test IO width, in/out
656 	((u8 *)io_bitmap_a)[0] = 0xFF;
657 	set_stage(2);
658 	inb(0x0);
659 	if (stage != 3)
660 		report("I/O bitmap - trap in", 0);
661 	else
662 		report("I/O bitmap - trap in", 1);
663 	set_stage(3);
664 	outw(0x0, 0x0);
665 	if (stage != 4)
666 		report("I/O bitmap - trap out", 0);
667 	else
668 		report("I/O bitmap - trap out", 1);
669 	set_stage(4);
670 	inl(0x0);
671 	if (stage != 5)
672 		report("I/O bitmap - I/O width, long", 0);
673 	// test low/high IO port
674 	set_stage(5);
675 	((u8 *)io_bitmap_a)[0x5000 / 8] = (1 << (0x5000 % 8));
676 	inb(0x5000);
677 	if (stage == 6)
678 		report("I/O bitmap - I/O port, low part", 1);
679 	else
680 		report("I/O bitmap - I/O port, low part", 0);
681 	set_stage(6);
682 	((u8 *)io_bitmap_b)[0x1000 / 8] = (1 << (0x1000 % 8));
683 	inb(0x9000);
684 	if (stage == 7)
685 		report("I/O bitmap - I/O port, high part", 1);
686 	else
687 		report("I/O bitmap - I/O port, high part", 0);
688 	// test partial pass
689 	set_stage(7);
690 	inl(0x4FFF);
691 	if (stage == 8)
692 		report("I/O bitmap - partial pass", 1);
693 	else
694 		report("I/O bitmap - partial pass", 0);
695 	// test overrun
696 	set_stage(8);
697 	memset(io_bitmap_a, 0x0, PAGE_SIZE);
698 	memset(io_bitmap_b, 0x0, PAGE_SIZE);
699 	inl(0xFFFF);
700 	if (stage == 9)
701 		report("I/O bitmap - overrun", 1);
702 	else
703 		report("I/O bitmap - overrun", 0);
704 	set_stage(9);
705 	vmcall();
706 	outb(0x0, 0x0);
707 	report("I/O bitmap - ignore unconditional exiting", stage == 9);
708 	set_stage(10);
709 	vmcall();
710 	outb(0x0, 0x0);
711 	report("I/O bitmap - unconditional exiting", stage == 11);
712 }
713 
714 static int iobmp_exit_handler()
715 {
716 	u64 guest_rip;
717 	ulong reason, exit_qual;
718 	u32 insn_len, ctrl_cpu0;
719 
720 	guest_rip = vmcs_read(GUEST_RIP);
721 	reason = vmcs_read(EXI_REASON) & 0xff;
722 	exit_qual = vmcs_read(EXI_QUALIFICATION);
723 	insn_len = vmcs_read(EXI_INST_LEN);
724 	switch (reason) {
725 	case VMX_IO:
726 		switch (get_stage()) {
727 		case 0:
728 		case 1:
729 			set_stage(stage + 1);
730 			break;
731 		case 2:
732 			if ((exit_qual & VMX_IO_SIZE_MASK) != _VMX_IO_BYTE)
733 				report("I/O bitmap - I/O width, byte", 0);
734 			else
735 				report("I/O bitmap - I/O width, byte", 1);
736 			if (!(exit_qual & VMX_IO_IN))
737 				report("I/O bitmap - I/O direction, in", 0);
738 			else
739 				report("I/O bitmap - I/O direction, in", 1);
740 			set_stage(stage + 1);
741 			break;
742 		case 3:
743 			if ((exit_qual & VMX_IO_SIZE_MASK) != _VMX_IO_WORD)
744 				report("I/O bitmap - I/O width, word", 0);
745 			else
746 				report("I/O bitmap - I/O width, word", 1);
747 			if (!(exit_qual & VMX_IO_IN))
748 				report("I/O bitmap - I/O direction, out", 1);
749 			else
750 				report("I/O bitmap - I/O direction, out", 0);
751 			set_stage(stage + 1);
752 			break;
753 		case 4:
754 			if ((exit_qual & VMX_IO_SIZE_MASK) != _VMX_IO_LONG)
755 				report("I/O bitmap - I/O width, long", 0);
756 			else
757 				report("I/O bitmap - I/O width, long", 1);
758 			set_stage(stage + 1);
759 			break;
760 		case 5:
761 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x5000)
762 				set_stage(stage + 1);
763 			break;
764 		case 6:
765 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x9000)
766 				set_stage(stage + 1);
767 			break;
768 		case 7:
769 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x4FFF)
770 				set_stage(stage + 1);
771 			break;
772 		case 8:
773 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0xFFFF)
774 				set_stage(stage + 1);
775 			break;
776 		case 9:
777 		case 10:
778 			ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
779 			vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0 & ~CPU_IO);
780 			set_stage(stage + 1);
781 			break;
782 		default:
783 			// Should not reach here
784 			printf("ERROR : unexpected stage, %d\n", get_stage());
785 			print_vmexit_info();
786 			return VMX_TEST_VMEXIT;
787 		}
788 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
789 		return VMX_TEST_RESUME;
790 	case VMX_VMCALL:
791 		switch (get_stage()) {
792 		case 9:
793 			ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
794 			ctrl_cpu0 |= CPU_IO | CPU_IO_BITMAP;
795 			vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
796 			break;
797 		case 10:
798 			ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
799 			ctrl_cpu0 = (ctrl_cpu0 & ~CPU_IO_BITMAP) | CPU_IO;
800 			vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
801 			break;
802 		default:
803 			// Should not reach here
804 			printf("ERROR : unexpected stage, %d\n", get_stage());
805 			print_vmexit_info();
806 			return VMX_TEST_VMEXIT;
807 		}
808 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
809 		return VMX_TEST_RESUME;
810 	default:
811 		printf("guest_rip = 0x%llx\n", guest_rip);
812 		printf("\tERROR : Undefined exit reason, reason = %d.\n", reason);
813 		break;
814 	}
815 	return VMX_TEST_VMEXIT;
816 }
817 
818 #define INSN_CPU0		0
819 #define INSN_CPU1		1
820 #define INSN_ALWAYS_TRAP	2
821 #define INSN_NEVER_TRAP		3
822 
823 #define FIELD_EXIT_QUAL		(1 << 0)
824 #define FIELD_INSN_INFO		(1 << 1)
825 
826 asm(
827 	"insn_hlt: hlt;ret\n\t"
828 	"insn_invlpg: invlpg 0x12345678;ret\n\t"
829 	"insn_mwait: mwait;ret\n\t"
830 	"insn_rdpmc: rdpmc;ret\n\t"
831 	"insn_rdtsc: rdtsc;ret\n\t"
832 	"insn_cr3_load: mov %rax,%cr3;ret\n\t"
833 	"insn_cr3_store: mov %cr3,%rax;ret\n\t"
834 #ifdef __x86_64__
835 	"insn_cr8_load: mov %rax,%cr8;ret\n\t"
836 	"insn_cr8_store: mov %cr8,%rax;ret\n\t"
837 #endif
838 	"insn_monitor: monitor;ret\n\t"
839 	"insn_pause: pause;ret\n\t"
840 	"insn_wbinvd: wbinvd;ret\n\t"
841 	"insn_cpuid: cpuid;ret\n\t"
842 	"insn_invd: invd;ret\n\t"
843 );
844 extern void insn_hlt();
845 extern void insn_invlpg();
846 extern void insn_mwait();
847 extern void insn_rdpmc();
848 extern void insn_rdtsc();
849 extern void insn_cr3_load();
850 extern void insn_cr3_store();
851 #ifdef __x86_64__
852 extern void insn_cr8_load();
853 extern void insn_cr8_store();
854 #endif
855 extern void insn_monitor();
856 extern void insn_pause();
857 extern void insn_wbinvd();
858 extern void insn_cpuid();
859 extern void insn_invd();
860 
861 u32 cur_insn;
862 
863 struct insn_table {
864 	const char *name;
865 	u32 flag;
866 	void (*insn_func)();
867 	u32 type;
868 	u32 reason;
869 	ulong exit_qual;
870 	u32 insn_info;
871 	// Use FIELD_EXIT_QUAL and FIELD_INSN_INFO to define
872 	// which field need to be tested, reason is always tested
873 	u32 test_field;
874 };
875 
876 /*
877  * Add more test cases of instruction intercept here. Elements in this
878  * table is:
879  *	name/control flag/insn function/type/exit reason/exit qulification/
880  *	instruction info/field to test
881  * The last field defines which fields (exit_qual and insn_info) need to be
882  * tested in exit handler. If set to 0, only "reason" is checked.
883  */
884 static struct insn_table insn_table[] = {
885 	// Flags for Primary Processor-Based VM-Execution Controls
886 	{"HLT",  CPU_HLT, insn_hlt, INSN_CPU0, 12, 0, 0, 0},
887 	{"INVLPG", CPU_INVLPG, insn_invlpg, INSN_CPU0, 14,
888 		0x12345678, 0, FIELD_EXIT_QUAL},
889 	{"MWAIT", CPU_MWAIT, insn_mwait, INSN_CPU0, 36, 0, 0, 0},
890 	{"RDPMC", CPU_RDPMC, insn_rdpmc, INSN_CPU0, 15, 0, 0, 0},
891 	{"RDTSC", CPU_RDTSC, insn_rdtsc, INSN_CPU0, 16, 0, 0, 0},
892 	{"CR3 load", CPU_CR3_LOAD, insn_cr3_load, INSN_CPU0, 28, 0x3, 0,
893 		FIELD_EXIT_QUAL},
894 	{"CR3 store", CPU_CR3_STORE, insn_cr3_store, INSN_CPU0, 28, 0x13, 0,
895 		FIELD_EXIT_QUAL},
896 #ifdef __x86_64__
897 	{"CR8 load", CPU_CR8_LOAD, insn_cr8_load, INSN_CPU0, 28, 0x8, 0,
898 		FIELD_EXIT_QUAL},
899 	{"CR8 store", CPU_CR8_STORE, insn_cr8_store, INSN_CPU0, 28, 0x18, 0,
900 		FIELD_EXIT_QUAL},
901 #endif
902 	{"MONITOR", CPU_MONITOR, insn_monitor, INSN_CPU0, 39, 0, 0, 0},
903 	{"PAUSE", CPU_PAUSE, insn_pause, INSN_CPU0, 40, 0, 0, 0},
904 	// Flags for Secondary Processor-Based VM-Execution Controls
905 	{"WBINVD", CPU_WBINVD, insn_wbinvd, INSN_CPU1, 54, 0, 0, 0},
906 	// Instructions always trap
907 	{"CPUID", 0, insn_cpuid, INSN_ALWAYS_TRAP, 10, 0, 0, 0},
908 	{"INVD", 0, insn_invd, INSN_ALWAYS_TRAP, 13, 0, 0, 0},
909 	// Instructions never trap
910 	{NULL},
911 };
912 
913 static int insn_intercept_init()
914 {
915 	u32 ctrl_cpu[2];
916 
917 	ctrl_cpu[0] = vmcs_read(CPU_EXEC_CTRL0);
918 	ctrl_cpu[0] |= CPU_HLT | CPU_INVLPG | CPU_MWAIT | CPU_RDPMC | CPU_RDTSC |
919 		CPU_CR3_LOAD | CPU_CR3_STORE |
920 #ifdef __x86_64__
921 		CPU_CR8_LOAD | CPU_CR8_STORE |
922 #endif
923 		CPU_MONITOR | CPU_PAUSE | CPU_SECONDARY;
924 	ctrl_cpu[0] &= ctrl_cpu_rev[0].clr;
925 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu[0]);
926 	ctrl_cpu[1] = vmcs_read(CPU_EXEC_CTRL1);
927 	ctrl_cpu[1] |= CPU_WBINVD | CPU_RDRAND;
928 	ctrl_cpu[1] &= ctrl_cpu_rev[1].clr;
929 	vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu[1]);
930 	return VMX_TEST_START;
931 }
932 
933 static void insn_intercept_main()
934 {
935 	cur_insn = 0;
936 	while(insn_table[cur_insn].name != NULL) {
937 		set_stage(cur_insn);
938 		if ((insn_table[cur_insn].type == INSN_CPU0
939 			&& !(ctrl_cpu_rev[0].clr & insn_table[cur_insn].flag))
940 			|| (insn_table[cur_insn].type == INSN_CPU1
941 			&& !(ctrl_cpu_rev[1].clr & insn_table[cur_insn].flag))) {
942 			printf("\tCPU_CTRL1.CPU_%s is not supported.\n",
943 				insn_table[cur_insn].name);
944 			continue;
945 		}
946 		insn_table[cur_insn].insn_func();
947 		switch (insn_table[cur_insn].type) {
948 		case INSN_CPU0:
949 		case INSN_CPU1:
950 		case INSN_ALWAYS_TRAP:
951 			if (stage != cur_insn + 1)
952 				report(insn_table[cur_insn].name, 0);
953 			else
954 				report(insn_table[cur_insn].name, 1);
955 			break;
956 		case INSN_NEVER_TRAP:
957 			if (stage == cur_insn + 1)
958 				report(insn_table[cur_insn].name, 0);
959 			else
960 				report(insn_table[cur_insn].name, 1);
961 			break;
962 		}
963 		cur_insn ++;
964 	}
965 }
966 
967 static int insn_intercept_exit_handler()
968 {
969 	u64 guest_rip;
970 	u32 reason;
971 	ulong exit_qual;
972 	u32 insn_len;
973 	u32 insn_info;
974 	bool pass;
975 
976 	guest_rip = vmcs_read(GUEST_RIP);
977 	reason = vmcs_read(EXI_REASON) & 0xff;
978 	exit_qual = vmcs_read(EXI_QUALIFICATION);
979 	insn_len = vmcs_read(EXI_INST_LEN);
980 	insn_info = vmcs_read(EXI_INST_INFO);
981 	pass = (cur_insn == get_stage()) &&
982 			insn_table[cur_insn].reason == reason;
983 	if (insn_table[cur_insn].test_field & FIELD_EXIT_QUAL)
984 		pass = pass && insn_table[cur_insn].exit_qual == exit_qual;
985 	if (insn_table[cur_insn].test_field & FIELD_INSN_INFO)
986 		pass = pass && insn_table[cur_insn].insn_info == insn_info;
987 	if (pass)
988 		set_stage(stage + 1);
989 	vmcs_write(GUEST_RIP, guest_rip + insn_len);
990 	return VMX_TEST_RESUME;
991 }
992 
993 
994 static int setup_ept()
995 {
996 	int support_2m;
997 	unsigned long end_of_memory;
998 
999 	if (!(ept_vpid.val & EPT_CAP_UC) &&
1000 			!(ept_vpid.val & EPT_CAP_WB)) {
1001 		printf("\tEPT paging-structure memory type "
1002 				"UC&WB are not supported\n");
1003 		return 1;
1004 	}
1005 	if (ept_vpid.val & EPT_CAP_UC)
1006 		eptp = EPT_MEM_TYPE_UC;
1007 	else
1008 		eptp = EPT_MEM_TYPE_WB;
1009 	if (!(ept_vpid.val & EPT_CAP_PWL4)) {
1010 		printf("\tPWL4 is not supported\n");
1011 		return 1;
1012 	}
1013 	eptp |= (3 << EPTP_PG_WALK_LEN_SHIFT);
1014 	pml4 = alloc_page();
1015 	memset(pml4, 0, PAGE_SIZE);
1016 	eptp |= virt_to_phys(pml4);
1017 	vmcs_write(EPTP, eptp);
1018 	support_2m = !!(ept_vpid.val & EPT_CAP_2M_PAGE);
1019 	end_of_memory = fwcfg_get_u64(FW_CFG_RAM_SIZE);
1020 	if (end_of_memory < (1ul << 32))
1021 		end_of_memory = (1ul << 32);
1022 	setup_ept_range(pml4, 0, end_of_memory, 0, support_2m,
1023 			EPT_WA | EPT_RA | EPT_EA);
1024 	return 0;
1025 }
1026 
1027 static int ept_init()
1028 {
1029 	unsigned long base_addr1, base_addr2;
1030 	u32 ctrl_cpu[2];
1031 
1032 	if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY) ||
1033 	    !(ctrl_cpu_rev[1].clr & CPU_EPT)) {
1034 		printf("\tEPT is not supported");
1035 		return VMX_TEST_EXIT;
1036 	}
1037 
1038 	ctrl_cpu[0] = vmcs_read(CPU_EXEC_CTRL0);
1039 	ctrl_cpu[1] = vmcs_read(CPU_EXEC_CTRL1);
1040 	ctrl_cpu[0] = (ctrl_cpu[0] | CPU_SECONDARY)
1041 		& ctrl_cpu_rev[0].clr;
1042 	ctrl_cpu[1] = (ctrl_cpu[1] | CPU_EPT)
1043 		& ctrl_cpu_rev[1].clr;
1044 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu[0]);
1045 	vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu[1]);
1046 	if (setup_ept())
1047 		return VMX_TEST_EXIT;
1048 	data_page1 = alloc_page();
1049 	data_page2 = alloc_page();
1050 	memset(data_page1, 0x0, PAGE_SIZE);
1051 	memset(data_page2, 0x0, PAGE_SIZE);
1052 	*((u32 *)data_page1) = MAGIC_VAL_1;
1053 	*((u32 *)data_page2) = MAGIC_VAL_2;
1054 	base_addr1 = (unsigned long)data_page1 & PAGE_MASK_2M;
1055 	base_addr2 = (unsigned long)data_page2 & PAGE_MASK_2M;
1056 	setup_ept_range(pml4, base_addr1, base_addr1 + PAGE_SIZE_2M, 0, 0,
1057 			EPT_WA | EPT_RA | EPT_EA);
1058 	setup_ept_range(pml4, base_addr2, base_addr2 + PAGE_SIZE_2M, 0, 0,
1059 			EPT_WA | EPT_RA | EPT_EA);
1060 	install_ept(pml4, (unsigned long)data_page1, (unsigned long)data_page2,
1061 			EPT_RA | EPT_WA | EPT_EA);
1062 	return VMX_TEST_START;
1063 }
1064 
1065 static void ept_main()
1066 {
1067 	set_stage(0);
1068 	if (*((u32 *)data_page2) != MAGIC_VAL_1 ||
1069 			*((u32 *)data_page1) != MAGIC_VAL_1)
1070 		report("EPT basic framework - read", 0);
1071 	else {
1072 		*((u32 *)data_page2) = MAGIC_VAL_3;
1073 		vmcall();
1074 		if (get_stage() == 1) {
1075 			if (*((u32 *)data_page1) == MAGIC_VAL_3 &&
1076 					*((u32 *)data_page2) == MAGIC_VAL_2)
1077 				report("EPT basic framework", 1);
1078 			else
1079 				report("EPT basic framework - remap", 1);
1080 		}
1081 	}
1082 	// Test EPT Misconfigurations
1083 	set_stage(1);
1084 	vmcall();
1085 	*((u32 *)data_page1) = MAGIC_VAL_1;
1086 	if (get_stage() != 2) {
1087 		report("EPT misconfigurations", 0);
1088 		goto t1;
1089 	}
1090 	set_stage(2);
1091 	vmcall();
1092 	*((u32 *)data_page1) = MAGIC_VAL_1;
1093 	if (get_stage() != 3) {
1094 		report("EPT misconfigurations", 0);
1095 		goto t1;
1096 	}
1097 	report("EPT misconfigurations", 1);
1098 t1:
1099 	// Test EPT violation
1100 	set_stage(3);
1101 	vmcall();
1102 	*((u32 *)data_page1) = MAGIC_VAL_1;
1103 	if (get_stage() == 4)
1104 		report("EPT violation - page permission", 1);
1105 	else
1106 		report("EPT violation - page permission", 0);
1107 	// Violation caused by EPT paging structure
1108 	set_stage(4);
1109 	vmcall();
1110 	*((u32 *)data_page1) = MAGIC_VAL_2;
1111 	if (get_stage() == 5)
1112 		report("EPT violation - paging structure", 1);
1113 	else
1114 		report("EPT violation - paging structure", 0);
1115 }
1116 
1117 static int ept_exit_handler()
1118 {
1119 	u64 guest_rip;
1120 	ulong reason;
1121 	u32 insn_len;
1122 	u32 exit_qual;
1123 	static unsigned long data_page1_pte, data_page1_pte_pte;
1124 
1125 	guest_rip = vmcs_read(GUEST_RIP);
1126 	reason = vmcs_read(EXI_REASON) & 0xff;
1127 	insn_len = vmcs_read(EXI_INST_LEN);
1128 	exit_qual = vmcs_read(EXI_QUALIFICATION);
1129 	switch (reason) {
1130 	case VMX_VMCALL:
1131 		switch (get_stage()) {
1132 		case 0:
1133 			if (*((u32 *)data_page1) == MAGIC_VAL_3 &&
1134 					*((u32 *)data_page2) == MAGIC_VAL_2) {
1135 				set_stage(get_stage() + 1);
1136 				install_ept(pml4, (unsigned long)data_page2,
1137 						(unsigned long)data_page2,
1138 						EPT_RA | EPT_WA | EPT_EA);
1139 			} else
1140 				report("EPT basic framework - write\n", 0);
1141 			break;
1142 		case 1:
1143 			install_ept(pml4, (unsigned long)data_page1,
1144  				(unsigned long)data_page1, EPT_WA);
1145 			ept_sync(INVEPT_SINGLE, eptp);
1146 			break;
1147 		case 2:
1148 			install_ept(pml4, (unsigned long)data_page1,
1149  				(unsigned long)data_page1,
1150  				EPT_RA | EPT_WA | EPT_EA |
1151  				(2 << EPT_MEM_TYPE_SHIFT));
1152 			ept_sync(INVEPT_SINGLE, eptp);
1153 			break;
1154 		case 3:
1155 			data_page1_pte = get_ept_pte(pml4,
1156 				(unsigned long)data_page1, 1);
1157 			set_ept_pte(pml4, (unsigned long)data_page1,
1158 				1, data_page1_pte & (~EPT_PRESENT));
1159 			ept_sync(INVEPT_SINGLE, eptp);
1160 			break;
1161 		case 4:
1162 			data_page1_pte = get_ept_pte(pml4,
1163 				(unsigned long)data_page1, 2);
1164 			data_page1_pte &= PAGE_MASK;
1165 			data_page1_pte_pte = get_ept_pte(pml4, data_page1_pte, 2);
1166 			set_ept_pte(pml4, data_page1_pte, 2,
1167 				data_page1_pte_pte & (~EPT_PRESENT));
1168 			ept_sync(INVEPT_SINGLE, eptp);
1169 			break;
1170 		// Should not reach here
1171 		default:
1172 			printf("ERROR - unexpected stage, %d.\n", get_stage());
1173 			print_vmexit_info();
1174 			return VMX_TEST_VMEXIT;
1175 		}
1176 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
1177 		return VMX_TEST_RESUME;
1178 	case VMX_EPT_MISCONFIG:
1179 		switch (get_stage()) {
1180 		case 1:
1181 		case 2:
1182 			set_stage(get_stage() + 1);
1183 			install_ept(pml4, (unsigned long)data_page1,
1184  				(unsigned long)data_page1,
1185  				EPT_RA | EPT_WA | EPT_EA);
1186 			ept_sync(INVEPT_SINGLE, eptp);
1187 			break;
1188 		// Should not reach here
1189 		default:
1190 			printf("ERROR - unexpected stage, %d.\n", get_stage());
1191 			print_vmexit_info();
1192 			return VMX_TEST_VMEXIT;
1193 		}
1194 		return VMX_TEST_RESUME;
1195 	case VMX_EPT_VIOLATION:
1196 		switch(get_stage()) {
1197 		case 3:
1198 			if (exit_qual == (EPT_VLT_WR | EPT_VLT_LADDR_VLD |
1199 					EPT_VLT_PADDR))
1200 				set_stage(get_stage() + 1);
1201 			set_ept_pte(pml4, (unsigned long)data_page1,
1202 				1, data_page1_pte | (EPT_PRESENT));
1203 			ept_sync(INVEPT_SINGLE, eptp);
1204 			break;
1205 		case 4:
1206 			if (exit_qual == (EPT_VLT_RD | EPT_VLT_LADDR_VLD))
1207 				set_stage(get_stage() + 1);
1208 			set_ept_pte(pml4, data_page1_pte, 2,
1209 				data_page1_pte_pte | (EPT_PRESENT));
1210 			ept_sync(INVEPT_SINGLE, eptp);
1211 			break;
1212 		default:
1213 			// Should not reach here
1214 			printf("ERROR : unexpected stage, %d\n", get_stage());
1215 			print_vmexit_info();
1216 			return VMX_TEST_VMEXIT;
1217 		}
1218 		return VMX_TEST_RESUME;
1219 	default:
1220 		printf("Unknown exit reason, %d\n", reason);
1221 		print_vmexit_info();
1222 	}
1223 	return VMX_TEST_VMEXIT;
1224 }
1225 
1226 #define TIMER_VECTOR	222
1227 
1228 static volatile bool timer_fired;
1229 
1230 static void timer_isr(isr_regs_t *regs)
1231 {
1232 	timer_fired = true;
1233 	apic_write(APIC_EOI, 0);
1234 }
1235 
1236 static int interrupt_init(struct vmcs *vmcs)
1237 {
1238 	msr_bmp_init();
1239 	vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) & ~PIN_EXTINT);
1240 	handle_irq(TIMER_VECTOR, timer_isr);
1241 	return VMX_TEST_START;
1242 }
1243 
1244 static void interrupt_main(void)
1245 {
1246 	long long start, loops;
1247 
1248 	set_stage(0);
1249 
1250 	apic_write(APIC_LVTT, TIMER_VECTOR);
1251 	irq_enable();
1252 
1253 	apic_write(APIC_TMICT, 1);
1254 	for (loops = 0; loops < 10000000 && !timer_fired; loops++)
1255 		asm volatile ("nop");
1256 	report("direct interrupt while running guest", timer_fired);
1257 
1258 	apic_write(APIC_TMICT, 0);
1259 	irq_disable();
1260 	vmcall();
1261 	timer_fired = false;
1262 	apic_write(APIC_TMICT, 1);
1263 	for (loops = 0; loops < 10000000 && !timer_fired; loops++)
1264 		asm volatile ("nop");
1265 	report("intercepted interrupt while running guest", timer_fired);
1266 
1267 	irq_enable();
1268 	apic_write(APIC_TMICT, 0);
1269 	irq_disable();
1270 	vmcall();
1271 	timer_fired = false;
1272 	start = rdtsc();
1273 	apic_write(APIC_TMICT, 1000000);
1274 
1275 	asm volatile ("sti; hlt");
1276 
1277 	report("direct interrupt + hlt",
1278 	       rdtsc() - start > 1000000 && timer_fired);
1279 
1280 	apic_write(APIC_TMICT, 0);
1281 	irq_disable();
1282 	vmcall();
1283 	timer_fired = false;
1284 	start = rdtsc();
1285 	apic_write(APIC_TMICT, 1000000);
1286 
1287 	asm volatile ("sti; hlt");
1288 
1289 	report("intercepted interrupt + hlt",
1290 	       rdtsc() - start > 10000 && timer_fired);
1291 
1292 	apic_write(APIC_TMICT, 0);
1293 	irq_disable();
1294 	vmcall();
1295 	timer_fired = false;
1296 	start = rdtsc();
1297 	apic_write(APIC_TMICT, 1000000);
1298 
1299 	irq_enable();
1300 	asm volatile ("nop");
1301 	vmcall();
1302 
1303 	report("direct interrupt + activity state hlt",
1304 	       rdtsc() - start > 10000 && timer_fired);
1305 
1306 	apic_write(APIC_TMICT, 0);
1307 	irq_disable();
1308 	vmcall();
1309 	timer_fired = false;
1310 	start = rdtsc();
1311 	apic_write(APIC_TMICT, 1000000);
1312 
1313 	irq_enable();
1314 	asm volatile ("nop");
1315 	vmcall();
1316 
1317 	report("intercepted interrupt + activity state hlt",
1318 	       rdtsc() - start > 10000 && timer_fired);
1319 
1320 	apic_write(APIC_TMICT, 0);
1321 	irq_disable();
1322 	set_stage(7);
1323 	vmcall();
1324 	timer_fired = false;
1325 	apic_write(APIC_TMICT, 1);
1326 	for (loops = 0; loops < 10000000 && !timer_fired; loops++)
1327 		asm volatile ("nop");
1328 	report("running a guest with interrupt acknowledgement set", timer_fired);
1329 }
1330 
1331 static int interrupt_exit_handler(void)
1332 {
1333 	u64 guest_rip = vmcs_read(GUEST_RIP);
1334 	ulong reason = vmcs_read(EXI_REASON) & 0xff;
1335 	u32 insn_len = vmcs_read(EXI_INST_LEN);
1336 
1337 	switch (reason) {
1338 	case VMX_VMCALL:
1339 		switch (get_stage()) {
1340 		case 0:
1341 		case 2:
1342 		case 5:
1343 			vmcs_write(PIN_CONTROLS,
1344 				   vmcs_read(PIN_CONTROLS) | PIN_EXTINT);
1345 			break;
1346 		case 7:
1347 			vmcs_write(EXI_CONTROLS, vmcs_read(EXI_CONTROLS) | EXI_INTA);
1348 			vmcs_write(PIN_CONTROLS,
1349 				   vmcs_read(PIN_CONTROLS) | PIN_EXTINT);
1350 			break;
1351 		case 1:
1352 		case 3:
1353 			vmcs_write(PIN_CONTROLS,
1354 				   vmcs_read(PIN_CONTROLS) & ~PIN_EXTINT);
1355 			break;
1356 		case 4:
1357 		case 6:
1358 			vmcs_write(GUEST_ACTV_STATE, ACTV_HLT);
1359 			break;
1360 		}
1361 		set_stage(get_stage() + 1);
1362 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
1363 		return VMX_TEST_RESUME;
1364 	case VMX_EXTINT:
1365 		if (vmcs_read(EXI_CONTROLS) & EXI_INTA) {
1366 			int vector = vmcs_read(EXI_INTR_INFO) & 0xff;
1367 			handle_external_interrupt(vector);
1368 		} else {
1369 			irq_enable();
1370 			asm volatile ("nop");
1371 			irq_disable();
1372 		}
1373 		if (get_stage() >= 2) {
1374 			vmcs_write(GUEST_ACTV_STATE, ACTV_ACTIVE);
1375 			vmcs_write(GUEST_RIP, guest_rip + insn_len);
1376 		}
1377 		return VMX_TEST_RESUME;
1378 	default:
1379 		printf("Unknown exit reason, %d\n", reason);
1380 		print_vmexit_info();
1381 	}
1382 
1383 	return VMX_TEST_VMEXIT;
1384 }
1385 
1386 /* name/init/guest_main/exit_handler/syscall_handler/guest_regs */
1387 struct vmx_test vmx_tests[] = {
1388 	{ "null", NULL, basic_guest_main, basic_exit_handler, NULL, {0} },
1389 	{ "vmenter", NULL, vmenter_main, vmenter_exit_handler, NULL, {0} },
1390 	{ "preemption timer", preemption_timer_init, preemption_timer_main,
1391 		preemption_timer_exit_handler, NULL, {0} },
1392 	{ "control field PAT", test_ctrl_pat_init, test_ctrl_pat_main,
1393 		test_ctrl_pat_exit_handler, NULL, {0} },
1394 	{ "control field EFER", test_ctrl_efer_init, test_ctrl_efer_main,
1395 		test_ctrl_efer_exit_handler, NULL, {0} },
1396 	{ "CR shadowing", NULL, cr_shadowing_main,
1397 		cr_shadowing_exit_handler, NULL, {0} },
1398 	{ "I/O bitmap", iobmp_init, iobmp_main, iobmp_exit_handler,
1399 		NULL, {0} },
1400 	{ "instruction intercept", insn_intercept_init, insn_intercept_main,
1401 		insn_intercept_exit_handler, NULL, {0} },
1402 	{ "EPT framework", ept_init, ept_main, ept_exit_handler, NULL, {0} },
1403 	{ "interrupt", interrupt_init, interrupt_main,
1404 		interrupt_exit_handler, NULL, {0} },
1405 	{ NULL, NULL, NULL, NULL, NULL, {0} },
1406 };
1407