xref: /kvm-unit-tests/x86/vmx_tests.c (revision 17ba0dd05e933669a6f2a99d465e817837b1a579)
1 /*
2  * All test cases of nested virtualization should be in this file
3  *
4  * Author : Arthur Chunqi Li <yzt356@gmail.com>
5  */
6 #include "vmx.h"
7 #include "msr.h"
8 #include "processor.h"
9 #include "vm.h"
10 #include "io.h"
11 #include "fwcfg.h"
12 #include "isr.h"
13 #include "apic.h"
14 
15 u64 ia32_pat;
16 u64 ia32_efer;
17 volatile u32 stage;
18 void *io_bitmap_a, *io_bitmap_b;
19 u16 ioport;
20 
21 unsigned long *pml4;
22 u64 eptp;
23 void *data_page1, *data_page2;
24 
25 static inline void vmcall()
26 {
27 	asm volatile("vmcall");
28 }
29 
30 static inline void set_stage(u32 s)
31 {
32 	barrier();
33 	stage = s;
34 	barrier();
35 }
36 
37 static inline u32 get_stage()
38 {
39 	u32 s;
40 
41 	barrier();
42 	s = stage;
43 	barrier();
44 	return s;
45 }
46 
47 void basic_guest_main()
48 {
49 }
50 
51 int basic_exit_handler()
52 {
53 	report("Basic VMX test", 0);
54 	print_vmexit_info();
55 	return VMX_TEST_EXIT;
56 }
57 
58 void vmenter_main()
59 {
60 	u64 rax;
61 	u64 rsp, resume_rsp;
62 
63 	report("test vmlaunch", 1);
64 
65 	asm volatile(
66 		"mov %%rsp, %0\n\t"
67 		"mov %3, %%rax\n\t"
68 		"vmcall\n\t"
69 		"mov %%rax, %1\n\t"
70 		"mov %%rsp, %2\n\t"
71 		: "=r"(rsp), "=r"(rax), "=r"(resume_rsp)
72 		: "g"(0xABCD));
73 	report("test vmresume", (rax == 0xFFFF) && (rsp == resume_rsp));
74 }
75 
76 int vmenter_exit_handler()
77 {
78 	u64 guest_rip;
79 	ulong reason;
80 
81 	guest_rip = vmcs_read(GUEST_RIP);
82 	reason = vmcs_read(EXI_REASON) & 0xff;
83 	switch (reason) {
84 	case VMX_VMCALL:
85 		if (regs.rax != 0xABCD) {
86 			report("test vmresume", 0);
87 			return VMX_TEST_VMEXIT;
88 		}
89 		regs.rax = 0xFFFF;
90 		vmcs_write(GUEST_RIP, guest_rip + 3);
91 		return VMX_TEST_RESUME;
92 	default:
93 		report("test vmresume", 0);
94 		print_vmexit_info();
95 	}
96 	return VMX_TEST_VMEXIT;
97 }
98 
99 u32 preempt_scale;
100 volatile unsigned long long tsc_val;
101 volatile u32 preempt_val;
102 
103 int preemption_timer_init()
104 {
105 	if (!(ctrl_pin_rev.clr & PIN_PREEMPT)) {
106 		printf("\tPreemption timer is not supported\n");
107 		return VMX_TEST_EXIT;
108 	}
109 	vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) | PIN_PREEMPT);
110 	preempt_val = 10000000;
111 	vmcs_write(PREEMPT_TIMER_VALUE, preempt_val);
112 	preempt_scale = rdmsr(MSR_IA32_VMX_MISC) & 0x1F;
113 
114 	if (!(ctrl_exit_rev.clr & EXI_SAVE_PREEMPT))
115 		printf("\tSave preemption value is not supported\n");
116 
117 	return VMX_TEST_START;
118 }
119 
120 void preemption_timer_main()
121 {
122 	tsc_val = rdtsc();
123 	if (ctrl_exit_rev.clr & EXI_SAVE_PREEMPT) {
124 		set_stage(0);
125 		vmcall();
126 		if (get_stage() == 1)
127 			vmcall();
128 	}
129 	while (1) {
130 		if (((rdtsc() - tsc_val) >> preempt_scale)
131 				> 10 * preempt_val) {
132 			set_stage(2);
133 			vmcall();
134 		}
135 	}
136 }
137 
138 int preemption_timer_exit_handler()
139 {
140 	u64 guest_rip;
141 	ulong reason;
142 	u32 insn_len;
143 	u32 ctrl_exit;
144 
145 	guest_rip = vmcs_read(GUEST_RIP);
146 	reason = vmcs_read(EXI_REASON) & 0xff;
147 	insn_len = vmcs_read(EXI_INST_LEN);
148 	switch (reason) {
149 	case VMX_PREEMPT:
150 		if (((rdtsc() - tsc_val) >> preempt_scale) < preempt_val)
151 			report("Preemption timer", 0);
152 		else
153 			report("Preemption timer", 1);
154 		break;
155 	case VMX_VMCALL:
156 		switch (get_stage()) {
157 		case 0:
158 			if (vmcs_read(PREEMPT_TIMER_VALUE) != preempt_val)
159 				report("Save preemption value", 0);
160 			else {
161 				set_stage(get_stage() + 1);
162 				ctrl_exit = (vmcs_read(EXI_CONTROLS) |
163 					EXI_SAVE_PREEMPT) & ctrl_exit_rev.clr;
164 				vmcs_write(EXI_CONTROLS, ctrl_exit);
165 			}
166 			vmcs_write(GUEST_RIP, guest_rip + insn_len);
167 			return VMX_TEST_RESUME;
168 		case 1:
169 			if (vmcs_read(PREEMPT_TIMER_VALUE) >= preempt_val)
170 				report("Save preemption value", 0);
171 			else
172 				report("Save preemption value", 1);
173 			vmcs_write(GUEST_RIP, guest_rip + insn_len);
174 			return VMX_TEST_RESUME;
175 		case 2:
176 			report("Preemption timer", 0);
177 			break;
178 		default:
179 			// Should not reach here
180 			printf("ERROR : unexpected stage, %d\n", get_stage());
181 			print_vmexit_info();
182 			return VMX_TEST_VMEXIT;
183 		}
184 		break;
185 	default:
186 		printf("Unknown exit reason, %d\n", reason);
187 		print_vmexit_info();
188 	}
189 	vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) & ~PIN_PREEMPT);
190 	return VMX_TEST_VMEXIT;
191 }
192 
193 void msr_bmp_init()
194 {
195 	void *msr_bitmap;
196 	u32 ctrl_cpu0;
197 
198 	msr_bitmap = alloc_page();
199 	memset(msr_bitmap, 0x0, PAGE_SIZE);
200 	ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
201 	ctrl_cpu0 |= CPU_MSR_BITMAP;
202 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
203 	vmcs_write(MSR_BITMAP, (u64)msr_bitmap);
204 }
205 
206 static int test_ctrl_pat_init()
207 {
208 	u64 ctrl_ent;
209 	u64 ctrl_exi;
210 
211 	msr_bmp_init();
212 	ctrl_ent = vmcs_read(ENT_CONTROLS);
213 	ctrl_exi = vmcs_read(EXI_CONTROLS);
214 	vmcs_write(ENT_CONTROLS, ctrl_ent | ENT_LOAD_PAT);
215 	vmcs_write(EXI_CONTROLS, ctrl_exi | (EXI_SAVE_PAT | EXI_LOAD_PAT));
216 	ia32_pat = rdmsr(MSR_IA32_CR_PAT);
217 	vmcs_write(GUEST_PAT, 0x0);
218 	vmcs_write(HOST_PAT, ia32_pat);
219 	return VMX_TEST_START;
220 }
221 
222 static void test_ctrl_pat_main()
223 {
224 	u64 guest_ia32_pat;
225 
226 	guest_ia32_pat = rdmsr(MSR_IA32_CR_PAT);
227 	if (!(ctrl_enter_rev.clr & ENT_LOAD_PAT))
228 		printf("\tENT_LOAD_PAT is not supported.\n");
229 	else {
230 		if (guest_ia32_pat != 0) {
231 			report("Entry load PAT", 0);
232 			return;
233 		}
234 	}
235 	wrmsr(MSR_IA32_CR_PAT, 0x6);
236 	vmcall();
237 	guest_ia32_pat = rdmsr(MSR_IA32_CR_PAT);
238 	if (ctrl_enter_rev.clr & ENT_LOAD_PAT) {
239 		if (guest_ia32_pat != ia32_pat) {
240 			report("Entry load PAT", 0);
241 			return;
242 		}
243 		report("Entry load PAT", 1);
244 	}
245 }
246 
247 static int test_ctrl_pat_exit_handler()
248 {
249 	u64 guest_rip;
250 	ulong reason;
251 	u64 guest_pat;
252 
253 	guest_rip = vmcs_read(GUEST_RIP);
254 	reason = vmcs_read(EXI_REASON) & 0xff;
255 	switch (reason) {
256 	case VMX_VMCALL:
257 		guest_pat = vmcs_read(GUEST_PAT);
258 		if (!(ctrl_exit_rev.clr & EXI_SAVE_PAT)) {
259 			printf("\tEXI_SAVE_PAT is not supported\n");
260 			vmcs_write(GUEST_PAT, 0x6);
261 		} else {
262 			if (guest_pat == 0x6)
263 				report("Exit save PAT", 1);
264 			else
265 				report("Exit save PAT", 0);
266 		}
267 		if (!(ctrl_exit_rev.clr & EXI_LOAD_PAT))
268 			printf("\tEXI_LOAD_PAT is not supported\n");
269 		else {
270 			if (rdmsr(MSR_IA32_CR_PAT) == ia32_pat)
271 				report("Exit load PAT", 1);
272 			else
273 				report("Exit load PAT", 0);
274 		}
275 		vmcs_write(GUEST_PAT, ia32_pat);
276 		vmcs_write(GUEST_RIP, guest_rip + 3);
277 		return VMX_TEST_RESUME;
278 	default:
279 		printf("ERROR : Undefined exit reason, reason = %d.\n", reason);
280 		break;
281 	}
282 	return VMX_TEST_VMEXIT;
283 }
284 
285 static int test_ctrl_efer_init()
286 {
287 	u64 ctrl_ent;
288 	u64 ctrl_exi;
289 
290 	msr_bmp_init();
291 	ctrl_ent = vmcs_read(ENT_CONTROLS) | ENT_LOAD_EFER;
292 	ctrl_exi = vmcs_read(EXI_CONTROLS) | EXI_SAVE_EFER | EXI_LOAD_EFER;
293 	vmcs_write(ENT_CONTROLS, ctrl_ent & ctrl_enter_rev.clr);
294 	vmcs_write(EXI_CONTROLS, ctrl_exi & ctrl_exit_rev.clr);
295 	ia32_efer = rdmsr(MSR_EFER);
296 	vmcs_write(GUEST_EFER, ia32_efer ^ EFER_NX);
297 	vmcs_write(HOST_EFER, ia32_efer ^ EFER_NX);
298 	return VMX_TEST_START;
299 }
300 
301 static void test_ctrl_efer_main()
302 {
303 	u64 guest_ia32_efer;
304 
305 	guest_ia32_efer = rdmsr(MSR_EFER);
306 	if (!(ctrl_enter_rev.clr & ENT_LOAD_EFER))
307 		printf("\tENT_LOAD_EFER is not supported.\n");
308 	else {
309 		if (guest_ia32_efer != (ia32_efer ^ EFER_NX)) {
310 			report("Entry load EFER", 0);
311 			return;
312 		}
313 	}
314 	wrmsr(MSR_EFER, ia32_efer);
315 	vmcall();
316 	guest_ia32_efer = rdmsr(MSR_EFER);
317 	if (ctrl_enter_rev.clr & ENT_LOAD_EFER) {
318 		if (guest_ia32_efer != ia32_efer) {
319 			report("Entry load EFER", 0);
320 			return;
321 		}
322 		report("Entry load EFER", 1);
323 	}
324 }
325 
326 static int test_ctrl_efer_exit_handler()
327 {
328 	u64 guest_rip;
329 	ulong reason;
330 	u64 guest_efer;
331 
332 	guest_rip = vmcs_read(GUEST_RIP);
333 	reason = vmcs_read(EXI_REASON) & 0xff;
334 	switch (reason) {
335 	case VMX_VMCALL:
336 		guest_efer = vmcs_read(GUEST_EFER);
337 		if (!(ctrl_exit_rev.clr & EXI_SAVE_EFER)) {
338 			printf("\tEXI_SAVE_EFER is not supported\n");
339 			vmcs_write(GUEST_EFER, ia32_efer);
340 		} else {
341 			if (guest_efer == ia32_efer)
342 				report("Exit save EFER", 1);
343 			else
344 				report("Exit save EFER", 0);
345 		}
346 		if (!(ctrl_exit_rev.clr & EXI_LOAD_EFER)) {
347 			printf("\tEXI_LOAD_EFER is not supported\n");
348 			wrmsr(MSR_EFER, ia32_efer ^ EFER_NX);
349 		} else {
350 			if (rdmsr(MSR_EFER) == (ia32_efer ^ EFER_NX))
351 				report("Exit load EFER", 1);
352 			else
353 				report("Exit load EFER", 0);
354 		}
355 		vmcs_write(GUEST_PAT, ia32_efer);
356 		vmcs_write(GUEST_RIP, guest_rip + 3);
357 		return VMX_TEST_RESUME;
358 	default:
359 		printf("ERROR : Undefined exit reason, reason = %d.\n", reason);
360 		break;
361 	}
362 	return VMX_TEST_VMEXIT;
363 }
364 
365 u32 guest_cr0, guest_cr4;
366 
367 static void cr_shadowing_main()
368 {
369 	u32 cr0, cr4, tmp;
370 
371 	// Test read through
372 	set_stage(0);
373 	guest_cr0 = read_cr0();
374 	if (stage == 1)
375 		report("Read through CR0", 0);
376 	else
377 		vmcall();
378 	set_stage(1);
379 	guest_cr4 = read_cr4();
380 	if (stage == 2)
381 		report("Read through CR4", 0);
382 	else
383 		vmcall();
384 	// Test write through
385 	guest_cr0 = guest_cr0 ^ (X86_CR0_TS | X86_CR0_MP);
386 	guest_cr4 = guest_cr4 ^ (X86_CR4_TSD | X86_CR4_DE);
387 	set_stage(2);
388 	write_cr0(guest_cr0);
389 	if (stage == 3)
390 		report("Write throuth CR0", 0);
391 	else
392 		vmcall();
393 	set_stage(3);
394 	write_cr4(guest_cr4);
395 	if (stage == 4)
396 		report("Write through CR4", 0);
397 	else
398 		vmcall();
399 	// Test read shadow
400 	set_stage(4);
401 	vmcall();
402 	cr0 = read_cr0();
403 	if (stage != 5) {
404 		if (cr0 == guest_cr0)
405 			report("Read shadowing CR0", 1);
406 		else
407 			report("Read shadowing CR0", 0);
408 	}
409 	set_stage(5);
410 	cr4 = read_cr4();
411 	if (stage != 6) {
412 		if (cr4 == guest_cr4)
413 			report("Read shadowing CR4", 1);
414 		else
415 			report("Read shadowing CR4", 0);
416 	}
417 	// Test write shadow (same value with shadow)
418 	set_stage(6);
419 	write_cr0(guest_cr0);
420 	if (stage == 7)
421 		report("Write shadowing CR0 (same value with shadow)", 0);
422 	else
423 		vmcall();
424 	set_stage(7);
425 	write_cr4(guest_cr4);
426 	if (stage == 8)
427 		report("Write shadowing CR4 (same value with shadow)", 0);
428 	else
429 		vmcall();
430 	// Test write shadow (different value)
431 	set_stage(8);
432 	tmp = guest_cr0 ^ X86_CR0_TS;
433 	asm volatile("mov %0, %%rsi\n\t"
434 		"mov %%rsi, %%cr0\n\t"
435 		::"m"(tmp)
436 		:"rsi", "memory", "cc");
437 	if (stage != 9)
438 		report("Write shadowing different X86_CR0_TS", 0);
439 	else
440 		report("Write shadowing different X86_CR0_TS", 1);
441 	set_stage(9);
442 	tmp = guest_cr0 ^ X86_CR0_MP;
443 	asm volatile("mov %0, %%rsi\n\t"
444 		"mov %%rsi, %%cr0\n\t"
445 		::"m"(tmp)
446 		:"rsi", "memory", "cc");
447 	if (stage != 10)
448 		report("Write shadowing different X86_CR0_MP", 0);
449 	else
450 		report("Write shadowing different X86_CR0_MP", 1);
451 	set_stage(10);
452 	tmp = guest_cr4 ^ X86_CR4_TSD;
453 	asm volatile("mov %0, %%rsi\n\t"
454 		"mov %%rsi, %%cr4\n\t"
455 		::"m"(tmp)
456 		:"rsi", "memory", "cc");
457 	if (stage != 11)
458 		report("Write shadowing different X86_CR4_TSD", 0);
459 	else
460 		report("Write shadowing different X86_CR4_TSD", 1);
461 	set_stage(11);
462 	tmp = guest_cr4 ^ X86_CR4_DE;
463 	asm volatile("mov %0, %%rsi\n\t"
464 		"mov %%rsi, %%cr4\n\t"
465 		::"m"(tmp)
466 		:"rsi", "memory", "cc");
467 	if (stage != 12)
468 		report("Write shadowing different X86_CR4_DE", 0);
469 	else
470 		report("Write shadowing different X86_CR4_DE", 1);
471 }
472 
473 static int cr_shadowing_exit_handler()
474 {
475 	u64 guest_rip;
476 	ulong reason;
477 	u32 insn_len;
478 	u32 exit_qual;
479 
480 	guest_rip = vmcs_read(GUEST_RIP);
481 	reason = vmcs_read(EXI_REASON) & 0xff;
482 	insn_len = vmcs_read(EXI_INST_LEN);
483 	exit_qual = vmcs_read(EXI_QUALIFICATION);
484 	switch (reason) {
485 	case VMX_VMCALL:
486 		switch (get_stage()) {
487 		case 0:
488 			if (guest_cr0 == vmcs_read(GUEST_CR0))
489 				report("Read through CR0", 1);
490 			else
491 				report("Read through CR0", 0);
492 			break;
493 		case 1:
494 			if (guest_cr4 == vmcs_read(GUEST_CR4))
495 				report("Read through CR4", 1);
496 			else
497 				report("Read through CR4", 0);
498 			break;
499 		case 2:
500 			if (guest_cr0 == vmcs_read(GUEST_CR0))
501 				report("Write through CR0", 1);
502 			else
503 				report("Write through CR0", 0);
504 			break;
505 		case 3:
506 			if (guest_cr4 == vmcs_read(GUEST_CR4))
507 				report("Write through CR4", 1);
508 			else
509 				report("Write through CR4", 0);
510 			break;
511 		case 4:
512 			guest_cr0 = vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP);
513 			guest_cr4 = vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE);
514 			vmcs_write(CR0_MASK, X86_CR0_TS | X86_CR0_MP);
515 			vmcs_write(CR0_READ_SHADOW, guest_cr0 & (X86_CR0_TS | X86_CR0_MP));
516 			vmcs_write(CR4_MASK, X86_CR4_TSD | X86_CR4_DE);
517 			vmcs_write(CR4_READ_SHADOW, guest_cr4 & (X86_CR4_TSD | X86_CR4_DE));
518 			break;
519 		case 6:
520 			if (guest_cr0 == (vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP)))
521 				report("Write shadowing CR0 (same value)", 1);
522 			else
523 				report("Write shadowing CR0 (same value)", 0);
524 			break;
525 		case 7:
526 			if (guest_cr4 == (vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE)))
527 				report("Write shadowing CR4 (same value)", 1);
528 			else
529 				report("Write shadowing CR4 (same value)", 0);
530 			break;
531 		default:
532 			// Should not reach here
533 			printf("ERROR : unexpected stage, %d\n", get_stage());
534 			print_vmexit_info();
535 			return VMX_TEST_VMEXIT;
536 		}
537 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
538 		return VMX_TEST_RESUME;
539 	case VMX_CR:
540 		switch (get_stage()) {
541 		case 4:
542 			report("Read shadowing CR0", 0);
543 			set_stage(stage + 1);
544 			break;
545 		case 5:
546 			report("Read shadowing CR4", 0);
547 			set_stage(stage + 1);
548 			break;
549 		case 6:
550 			report("Write shadowing CR0 (same value)", 0);
551 			set_stage(stage + 1);
552 			break;
553 		case 7:
554 			report("Write shadowing CR4 (same value)", 0);
555 			set_stage(stage + 1);
556 			break;
557 		case 8:
558 		case 9:
559 			// 0x600 encodes "mov %esi, %cr0"
560 			if (exit_qual == 0x600)
561 				set_stage(stage + 1);
562 			break;
563 		case 10:
564 		case 11:
565 			// 0x604 encodes "mov %esi, %cr4"
566 			if (exit_qual == 0x604)
567 				set_stage(stage + 1);
568 			break;
569 		default:
570 			// Should not reach here
571 			printf("ERROR : unexpected stage, %d\n", get_stage());
572 			print_vmexit_info();
573 			return VMX_TEST_VMEXIT;
574 		}
575 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
576 		return VMX_TEST_RESUME;
577 	default:
578 		printf("Unknown exit reason, %d\n", reason);
579 		print_vmexit_info();
580 	}
581 	return VMX_TEST_VMEXIT;
582 }
583 
584 static int iobmp_init()
585 {
586 	u32 ctrl_cpu0;
587 
588 	io_bitmap_a = alloc_page();
589 	io_bitmap_a = alloc_page();
590 	memset(io_bitmap_a, 0x0, PAGE_SIZE);
591 	memset(io_bitmap_b, 0x0, PAGE_SIZE);
592 	ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
593 	ctrl_cpu0 |= CPU_IO_BITMAP;
594 	ctrl_cpu0 &= (~CPU_IO);
595 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
596 	vmcs_write(IO_BITMAP_A, (u64)io_bitmap_a);
597 	vmcs_write(IO_BITMAP_B, (u64)io_bitmap_b);
598 	return VMX_TEST_START;
599 }
600 
601 static void iobmp_main()
602 {
603 	// stage 0, test IO pass
604 	set_stage(0);
605 	inb(0x5000);
606 	outb(0x0, 0x5000);
607 	if (stage != 0)
608 		report("I/O bitmap - I/O pass", 0);
609 	else
610 		report("I/O bitmap - I/O pass", 1);
611 	// test IO width, in/out
612 	((u8 *)io_bitmap_a)[0] = 0xFF;
613 	set_stage(2);
614 	inb(0x0);
615 	if (stage != 3)
616 		report("I/O bitmap - trap in", 0);
617 	else
618 		report("I/O bitmap - trap in", 1);
619 	set_stage(3);
620 	outw(0x0, 0x0);
621 	if (stage != 4)
622 		report("I/O bitmap - trap out", 0);
623 	else
624 		report("I/O bitmap - trap out", 1);
625 	set_stage(4);
626 	inl(0x0);
627 	if (stage != 5)
628 		report("I/O bitmap - I/O width, long", 0);
629 	// test low/high IO port
630 	set_stage(5);
631 	((u8 *)io_bitmap_a)[0x5000 / 8] = (1 << (0x5000 % 8));
632 	inb(0x5000);
633 	if (stage == 6)
634 		report("I/O bitmap - I/O port, low part", 1);
635 	else
636 		report("I/O bitmap - I/O port, low part", 0);
637 	set_stage(6);
638 	((u8 *)io_bitmap_b)[0x1000 / 8] = (1 << (0x1000 % 8));
639 	inb(0x9000);
640 	if (stage == 7)
641 		report("I/O bitmap - I/O port, high part", 1);
642 	else
643 		report("I/O bitmap - I/O port, high part", 0);
644 	// test partial pass
645 	set_stage(7);
646 	inl(0x4FFF);
647 	if (stage == 8)
648 		report("I/O bitmap - partial pass", 1);
649 	else
650 		report("I/O bitmap - partial pass", 0);
651 	// test overrun
652 	set_stage(8);
653 	memset(io_bitmap_a, 0x0, PAGE_SIZE);
654 	memset(io_bitmap_b, 0x0, PAGE_SIZE);
655 	inl(0xFFFF);
656 	if (stage == 9)
657 		report("I/O bitmap - overrun", 1);
658 	else
659 		report("I/O bitmap - overrun", 0);
660 	set_stage(9);
661 	vmcall();
662 	outb(0x0, 0x0);
663 	report("I/O bitmap - ignore unconditional exiting", stage == 9);
664 	set_stage(10);
665 	vmcall();
666 	outb(0x0, 0x0);
667 	report("I/O bitmap - unconditional exiting", stage == 11);
668 }
669 
670 static int iobmp_exit_handler()
671 {
672 	u64 guest_rip;
673 	ulong reason, exit_qual;
674 	u32 insn_len, ctrl_cpu0;
675 
676 	guest_rip = vmcs_read(GUEST_RIP);
677 	reason = vmcs_read(EXI_REASON) & 0xff;
678 	exit_qual = vmcs_read(EXI_QUALIFICATION);
679 	insn_len = vmcs_read(EXI_INST_LEN);
680 	switch (reason) {
681 	case VMX_IO:
682 		switch (get_stage()) {
683 		case 0:
684 		case 1:
685 			set_stage(stage + 1);
686 			break;
687 		case 2:
688 			if ((exit_qual & VMX_IO_SIZE_MASK) != _VMX_IO_BYTE)
689 				report("I/O bitmap - I/O width, byte", 0);
690 			else
691 				report("I/O bitmap - I/O width, byte", 1);
692 			if (!(exit_qual & VMX_IO_IN))
693 				report("I/O bitmap - I/O direction, in", 0);
694 			else
695 				report("I/O bitmap - I/O direction, in", 1);
696 			set_stage(stage + 1);
697 			break;
698 		case 3:
699 			if ((exit_qual & VMX_IO_SIZE_MASK) != _VMX_IO_WORD)
700 				report("I/O bitmap - I/O width, word", 0);
701 			else
702 				report("I/O bitmap - I/O width, word", 1);
703 			if (!(exit_qual & VMX_IO_IN))
704 				report("I/O bitmap - I/O direction, out", 1);
705 			else
706 				report("I/O bitmap - I/O direction, out", 0);
707 			set_stage(stage + 1);
708 			break;
709 		case 4:
710 			if ((exit_qual & VMX_IO_SIZE_MASK) != _VMX_IO_LONG)
711 				report("I/O bitmap - I/O width, long", 0);
712 			else
713 				report("I/O bitmap - I/O width, long", 1);
714 			set_stage(stage + 1);
715 			break;
716 		case 5:
717 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x5000)
718 				set_stage(stage + 1);
719 			break;
720 		case 6:
721 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x9000)
722 				set_stage(stage + 1);
723 			break;
724 		case 7:
725 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x4FFF)
726 				set_stage(stage + 1);
727 			break;
728 		case 8:
729 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0xFFFF)
730 				set_stage(stage + 1);
731 			break;
732 		case 9:
733 		case 10:
734 			ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
735 			vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0 & ~CPU_IO);
736 			set_stage(stage + 1);
737 			break;
738 		default:
739 			// Should not reach here
740 			printf("ERROR : unexpected stage, %d\n", get_stage());
741 			print_vmexit_info();
742 			return VMX_TEST_VMEXIT;
743 		}
744 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
745 		return VMX_TEST_RESUME;
746 	case VMX_VMCALL:
747 		switch (get_stage()) {
748 		case 9:
749 			ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
750 			ctrl_cpu0 |= CPU_IO | CPU_IO_BITMAP;
751 			vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
752 			break;
753 		case 10:
754 			ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
755 			ctrl_cpu0 = (ctrl_cpu0 & ~CPU_IO_BITMAP) | CPU_IO;
756 			vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
757 			break;
758 		default:
759 			// Should not reach here
760 			printf("ERROR : unexpected stage, %d\n", get_stage());
761 			print_vmexit_info();
762 			return VMX_TEST_VMEXIT;
763 		}
764 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
765 		return VMX_TEST_RESUME;
766 	default:
767 		printf("guest_rip = 0x%llx\n", guest_rip);
768 		printf("\tERROR : Undefined exit reason, reason = %d.\n", reason);
769 		break;
770 	}
771 	return VMX_TEST_VMEXIT;
772 }
773 
774 #define INSN_CPU0		0
775 #define INSN_CPU1		1
776 #define INSN_ALWAYS_TRAP	2
777 #define INSN_NEVER_TRAP		3
778 
779 #define FIELD_EXIT_QUAL		0
780 #define FIELD_INSN_INFO		1
781 
782 asm(
783 	"insn_hlt: hlt;ret\n\t"
784 	"insn_invlpg: invlpg 0x12345678;ret\n\t"
785 	"insn_mwait: mwait;ret\n\t"
786 	"insn_rdpmc: rdpmc;ret\n\t"
787 	"insn_rdtsc: rdtsc;ret\n\t"
788 	"insn_monitor: monitor;ret\n\t"
789 	"insn_pause: pause;ret\n\t"
790 	"insn_wbinvd: wbinvd;ret\n\t"
791 	"insn_cpuid: cpuid;ret\n\t"
792 	"insn_invd: invd;ret\n\t"
793 );
794 extern void insn_hlt();
795 extern void insn_invlpg();
796 extern void insn_mwait();
797 extern void insn_rdpmc();
798 extern void insn_rdtsc();
799 extern void insn_monitor();
800 extern void insn_pause();
801 extern void insn_wbinvd();
802 extern void insn_cpuid();
803 extern void insn_invd();
804 
805 u32 cur_insn;
806 
807 struct insn_table {
808 	const char *name;
809 	u32 flag;
810 	void (*insn_func)();
811 	u32 type;
812 	u32 reason;
813 	ulong exit_qual;
814 	u32 insn_info;
815 	// Use FIELD_EXIT_QUAL and FIELD_INSN_INFO to efines
816 	// which field need to be tested, reason is always tested
817 	u32 test_field;
818 };
819 
820 /*
821  * Add more test cases of instruction intercept here. Elements in this
822  * table is:
823  *	name/control flag/insn function/type/exit reason/exit qulification/
824  *	instruction info/field to test
825  * The last field defines which fields (exit_qual and insn_info) need to be
826  * tested in exit handler. If set to 0, only "reason" is checked.
827  */
828 static struct insn_table insn_table[] = {
829 	// Flags for Primary Processor-Based VM-Execution Controls
830 	{"HLT",  CPU_HLT, insn_hlt, INSN_CPU0, 12, 0, 0, 0},
831 	{"INVLPG", CPU_INVLPG, insn_invlpg, INSN_CPU0, 14,
832 		0x12345678, 0, FIELD_EXIT_QUAL},
833 	{"MWAIT", CPU_MWAIT, insn_mwait, INSN_CPU0, 36, 0, 0, 0},
834 	{"RDPMC", CPU_RDPMC, insn_rdpmc, INSN_CPU0, 15, 0, 0, 0},
835 	{"RDTSC", CPU_RDTSC, insn_rdtsc, INSN_CPU0, 16, 0, 0, 0},
836 	{"MONITOR", CPU_MONITOR, insn_monitor, INSN_CPU0, 39, 0, 0, 0},
837 	{"PAUSE", CPU_PAUSE, insn_pause, INSN_CPU0, 40, 0, 0, 0},
838 	// Flags for Secondary Processor-Based VM-Execution Controls
839 	{"WBINVD", CPU_WBINVD, insn_wbinvd, INSN_CPU1, 54, 0, 0, 0},
840 	// Instructions always trap
841 	{"CPUID", 0, insn_cpuid, INSN_ALWAYS_TRAP, 10, 0, 0, 0},
842 	{"INVD", 0, insn_invd, INSN_ALWAYS_TRAP, 13, 0, 0, 0},
843 	// Instructions never trap
844 	{NULL},
845 };
846 
847 static int insn_intercept_init()
848 {
849 	u32 ctrl_cpu[2];
850 
851 	ctrl_cpu[0] = vmcs_read(CPU_EXEC_CTRL0);
852 	ctrl_cpu[0] |= CPU_HLT | CPU_INVLPG | CPU_MWAIT | CPU_RDPMC | CPU_RDTSC |
853 		CPU_MONITOR | CPU_PAUSE | CPU_SECONDARY;
854 	ctrl_cpu[0] &= ctrl_cpu_rev[0].clr;
855 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu[0]);
856 	ctrl_cpu[1] = vmcs_read(CPU_EXEC_CTRL1);
857 	ctrl_cpu[1] |= CPU_WBINVD | CPU_RDRAND;
858 	ctrl_cpu[1] &= ctrl_cpu_rev[1].clr;
859 	vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu[1]);
860 	return VMX_TEST_START;
861 }
862 
863 static void insn_intercept_main()
864 {
865 	cur_insn = 0;
866 	while(insn_table[cur_insn].name != NULL) {
867 		set_stage(cur_insn);
868 		if ((insn_table[cur_insn].type == INSN_CPU0
869 			&& !(ctrl_cpu_rev[0].clr & insn_table[cur_insn].flag))
870 			|| (insn_table[cur_insn].type == INSN_CPU1
871 			&& !(ctrl_cpu_rev[1].clr & insn_table[cur_insn].flag))) {
872 			printf("\tCPU_CTRL1.CPU_%s is not supported.\n",
873 				insn_table[cur_insn].name);
874 			continue;
875 		}
876 		insn_table[cur_insn].insn_func();
877 		switch (insn_table[cur_insn].type) {
878 		case INSN_CPU0:
879 		case INSN_CPU1:
880 		case INSN_ALWAYS_TRAP:
881 			if (stage != cur_insn + 1)
882 				report(insn_table[cur_insn].name, 0);
883 			else
884 				report(insn_table[cur_insn].name, 1);
885 			break;
886 		case INSN_NEVER_TRAP:
887 			if (stage == cur_insn + 1)
888 				report(insn_table[cur_insn].name, 0);
889 			else
890 				report(insn_table[cur_insn].name, 1);
891 			break;
892 		}
893 		cur_insn ++;
894 	}
895 }
896 
897 static int insn_intercept_exit_handler()
898 {
899 	u64 guest_rip;
900 	u32 reason;
901 	ulong exit_qual;
902 	u32 insn_len;
903 	u32 insn_info;
904 	bool pass;
905 
906 	guest_rip = vmcs_read(GUEST_RIP);
907 	reason = vmcs_read(EXI_REASON) & 0xff;
908 	exit_qual = vmcs_read(EXI_QUALIFICATION);
909 	insn_len = vmcs_read(EXI_INST_LEN);
910 	insn_info = vmcs_read(EXI_INST_INFO);
911 	pass = (cur_insn == get_stage()) &&
912 			insn_table[cur_insn].reason == reason;
913 	if (insn_table[cur_insn].test_field & FIELD_EXIT_QUAL)
914 		pass = pass && insn_table[cur_insn].exit_qual == exit_qual;
915 	if (insn_table[cur_insn].test_field & FIELD_INSN_INFO)
916 		pass = pass && insn_table[cur_insn].insn_info == insn_info;
917 	if (pass)
918 		set_stage(stage + 1);
919 	vmcs_write(GUEST_RIP, guest_rip + insn_len);
920 	return VMX_TEST_RESUME;
921 }
922 
923 
924 static int setup_ept()
925 {
926 	int support_2m;
927 	unsigned long end_of_memory;
928 
929 	if (!(ept_vpid.val & EPT_CAP_UC) &&
930 			!(ept_vpid.val & EPT_CAP_WB)) {
931 		printf("\tEPT paging-structure memory type "
932 				"UC&WB are not supported\n");
933 		return 1;
934 	}
935 	if (ept_vpid.val & EPT_CAP_UC)
936 		eptp = EPT_MEM_TYPE_UC;
937 	else
938 		eptp = EPT_MEM_TYPE_WB;
939 	if (!(ept_vpid.val & EPT_CAP_PWL4)) {
940 		printf("\tPWL4 is not supported\n");
941 		return 1;
942 	}
943 	eptp |= (3 << EPTP_PG_WALK_LEN_SHIFT);
944 	pml4 = alloc_page();
945 	memset(pml4, 0, PAGE_SIZE);
946 	eptp |= virt_to_phys(pml4);
947 	vmcs_write(EPTP, eptp);
948 	support_2m = !!(ept_vpid.val & EPT_CAP_2M_PAGE);
949 	end_of_memory = fwcfg_get_u64(FW_CFG_RAM_SIZE);
950 	if (end_of_memory < (1ul << 32))
951 		end_of_memory = (1ul << 32);
952 	setup_ept_range(pml4, 0, end_of_memory, 0, support_2m,
953 			EPT_WA | EPT_RA | EPT_EA);
954 	return 0;
955 }
956 
957 static int ept_init()
958 {
959 	unsigned long base_addr1, base_addr2;
960 	u32 ctrl_cpu[2];
961 
962 	if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY) ||
963 	    !(ctrl_cpu_rev[1].clr & CPU_EPT)) {
964 		printf("\tEPT is not supported");
965 		return VMX_TEST_EXIT;
966 	}
967 
968 	ctrl_cpu[0] = vmcs_read(CPU_EXEC_CTRL0);
969 	ctrl_cpu[1] = vmcs_read(CPU_EXEC_CTRL1);
970 	ctrl_cpu[0] = (ctrl_cpu[0] | CPU_SECONDARY)
971 		& ctrl_cpu_rev[0].clr;
972 	ctrl_cpu[1] = (ctrl_cpu[1] | CPU_EPT)
973 		& ctrl_cpu_rev[1].clr;
974 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu[0]);
975 	vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu[1]);
976 	if (setup_ept())
977 		return VMX_TEST_EXIT;
978 	data_page1 = alloc_page();
979 	data_page2 = alloc_page();
980 	memset(data_page1, 0x0, PAGE_SIZE);
981 	memset(data_page2, 0x0, PAGE_SIZE);
982 	*((u32 *)data_page1) = MAGIC_VAL_1;
983 	*((u32 *)data_page2) = MAGIC_VAL_2;
984 	base_addr1 = (unsigned long)data_page1 & PAGE_MASK_2M;
985 	base_addr2 = (unsigned long)data_page2 & PAGE_MASK_2M;
986 	setup_ept_range(pml4, base_addr1, base_addr1 + PAGE_SIZE_2M, 0, 0,
987 			EPT_WA | EPT_RA | EPT_EA);
988 	setup_ept_range(pml4, base_addr2, base_addr2 + PAGE_SIZE_2M, 0, 0,
989 			EPT_WA | EPT_RA | EPT_EA);
990 	install_ept(pml4, (unsigned long)data_page1, (unsigned long)data_page2,
991 			EPT_RA | EPT_WA | EPT_EA);
992 	return VMX_TEST_START;
993 }
994 
995 static void ept_main()
996 {
997 	set_stage(0);
998 	if (*((u32 *)data_page2) != MAGIC_VAL_1 ||
999 			*((u32 *)data_page1) != MAGIC_VAL_1)
1000 		report("EPT basic framework - read", 0);
1001 	else {
1002 		*((u32 *)data_page2) = MAGIC_VAL_3;
1003 		vmcall();
1004 		if (get_stage() == 1) {
1005 			if (*((u32 *)data_page1) == MAGIC_VAL_3 &&
1006 					*((u32 *)data_page2) == MAGIC_VAL_2)
1007 				report("EPT basic framework", 1);
1008 			else
1009 				report("EPT basic framework - remap", 1);
1010 		}
1011 	}
1012 	// Test EPT Misconfigurations
1013 	set_stage(1);
1014 	vmcall();
1015 	*((u32 *)data_page1) = MAGIC_VAL_1;
1016 	if (get_stage() != 2) {
1017 		report("EPT misconfigurations", 0);
1018 		goto t1;
1019 	}
1020 	set_stage(2);
1021 	vmcall();
1022 	*((u32 *)data_page1) = MAGIC_VAL_1;
1023 	if (get_stage() != 3) {
1024 		report("EPT misconfigurations", 0);
1025 		goto t1;
1026 	}
1027 	report("EPT misconfigurations", 1);
1028 t1:
1029 	// Test EPT violation
1030 	set_stage(3);
1031 	vmcall();
1032 	*((u32 *)data_page1) = MAGIC_VAL_1;
1033 	if (get_stage() == 4)
1034 		report("EPT violation - page permission", 1);
1035 	else
1036 		report("EPT violation - page permission", 0);
1037 	// Violation caused by EPT paging structure
1038 	set_stage(4);
1039 	vmcall();
1040 	*((u32 *)data_page1) = MAGIC_VAL_2;
1041 	if (get_stage() == 5)
1042 		report("EPT violation - paging structure", 1);
1043 	else
1044 		report("EPT violation - paging structure", 0);
1045 }
1046 
1047 static int ept_exit_handler()
1048 {
1049 	u64 guest_rip;
1050 	ulong reason;
1051 	u32 insn_len;
1052 	u32 exit_qual;
1053 	static unsigned long data_page1_pte, data_page1_pte_pte;
1054 
1055 	guest_rip = vmcs_read(GUEST_RIP);
1056 	reason = vmcs_read(EXI_REASON) & 0xff;
1057 	insn_len = vmcs_read(EXI_INST_LEN);
1058 	exit_qual = vmcs_read(EXI_QUALIFICATION);
1059 	switch (reason) {
1060 	case VMX_VMCALL:
1061 		switch (get_stage()) {
1062 		case 0:
1063 			if (*((u32 *)data_page1) == MAGIC_VAL_3 &&
1064 					*((u32 *)data_page2) == MAGIC_VAL_2) {
1065 				set_stage(get_stage() + 1);
1066 				install_ept(pml4, (unsigned long)data_page2,
1067 						(unsigned long)data_page2,
1068 						EPT_RA | EPT_WA | EPT_EA);
1069 			} else
1070 				report("EPT basic framework - write\n", 0);
1071 			break;
1072 		case 1:
1073 			install_ept(pml4, (unsigned long)data_page1,
1074  				(unsigned long)data_page1, EPT_WA);
1075 			invept(INVEPT_SINGLE, eptp);
1076 			break;
1077 		case 2:
1078 			install_ept(pml4, (unsigned long)data_page1,
1079  				(unsigned long)data_page1,
1080  				EPT_RA | EPT_WA | EPT_EA |
1081  				(2 << EPT_MEM_TYPE_SHIFT));
1082 			invept(INVEPT_SINGLE, eptp);
1083 			break;
1084 		case 3:
1085 			data_page1_pte = get_ept_pte(pml4,
1086 				(unsigned long)data_page1, 1);
1087 			set_ept_pte(pml4, (unsigned long)data_page1,
1088 				1, data_page1_pte & (~EPT_PRESENT));
1089 			invept(INVEPT_SINGLE, eptp);
1090 			break;
1091 		case 4:
1092 			data_page1_pte = get_ept_pte(pml4,
1093 				(unsigned long)data_page1, 2);
1094 			data_page1_pte &= PAGE_MASK;
1095 			data_page1_pte_pte = get_ept_pte(pml4, data_page1_pte, 2);
1096 			set_ept_pte(pml4, data_page1_pte, 2,
1097 				data_page1_pte_pte & (~EPT_PRESENT));
1098 			invept(INVEPT_SINGLE, eptp);
1099 			break;
1100 		// Should not reach here
1101 		default:
1102 			printf("ERROR - unexpected stage, %d.\n", get_stage());
1103 			print_vmexit_info();
1104 			return VMX_TEST_VMEXIT;
1105 		}
1106 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
1107 		return VMX_TEST_RESUME;
1108 	case VMX_EPT_MISCONFIG:
1109 		switch (get_stage()) {
1110 		case 1:
1111 		case 2:
1112 			set_stage(get_stage() + 1);
1113 			install_ept(pml4, (unsigned long)data_page1,
1114  				(unsigned long)data_page1,
1115  				EPT_RA | EPT_WA | EPT_EA);
1116 			invept(INVEPT_SINGLE, eptp);
1117 			break;
1118 		// Should not reach here
1119 		default:
1120 			printf("ERROR - unexpected stage, %d.\n", get_stage());
1121 			print_vmexit_info();
1122 			return VMX_TEST_VMEXIT;
1123 		}
1124 		return VMX_TEST_RESUME;
1125 	case VMX_EPT_VIOLATION:
1126 		switch(get_stage()) {
1127 		case 3:
1128 			if (exit_qual == (EPT_VLT_WR | EPT_VLT_LADDR_VLD |
1129 					EPT_VLT_PADDR))
1130 				set_stage(get_stage() + 1);
1131 			set_ept_pte(pml4, (unsigned long)data_page1,
1132 				1, data_page1_pte | (EPT_PRESENT));
1133 			invept(INVEPT_SINGLE, eptp);
1134 			break;
1135 		case 4:
1136 			if (exit_qual == (EPT_VLT_RD | EPT_VLT_LADDR_VLD))
1137 				set_stage(get_stage() + 1);
1138 			set_ept_pte(pml4, data_page1_pte, 2,
1139 				data_page1_pte_pte | (EPT_PRESENT));
1140 			invept(INVEPT_SINGLE, eptp);
1141 			break;
1142 		default:
1143 			// Should not reach here
1144 			printf("ERROR : unexpected stage, %d\n", get_stage());
1145 			print_vmexit_info();
1146 			return VMX_TEST_VMEXIT;
1147 		}
1148 		return VMX_TEST_RESUME;
1149 	default:
1150 		printf("Unknown exit reason, %d\n", reason);
1151 		print_vmexit_info();
1152 	}
1153 	return VMX_TEST_VMEXIT;
1154 }
1155 
1156 #define TIMER_VECTOR	222
1157 
1158 static volatile bool timer_fired;
1159 
1160 static void timer_isr(isr_regs_t *regs)
1161 {
1162 	timer_fired = true;
1163 	apic_write(APIC_EOI, 0);
1164 }
1165 
1166 static int interrupt_init(struct vmcs *vmcs)
1167 {
1168 	msr_bmp_init();
1169 	vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) & ~PIN_EXTINT);
1170 	handle_irq(TIMER_VECTOR, timer_isr);
1171 	return VMX_TEST_START;
1172 }
1173 
1174 static void interrupt_main(void)
1175 {
1176 	long long start, loops;
1177 
1178 	set_stage(0);
1179 
1180 	apic_write(APIC_LVTT, TIMER_VECTOR);
1181 	irq_enable();
1182 
1183 	apic_write(APIC_TMICT, 1);
1184 	for (loops = 0; loops < 10000000 && !timer_fired; loops++)
1185 		asm volatile ("nop");
1186 	report("direct interrupt while running guest", timer_fired);
1187 
1188 	apic_write(APIC_TMICT, 0);
1189 	irq_disable();
1190 	vmcall();
1191 	timer_fired = false;
1192 	apic_write(APIC_TMICT, 1);
1193 	for (loops = 0; loops < 10000000 && !timer_fired; loops++)
1194 		asm volatile ("nop");
1195 	report("intercepted interrupt while running guest", timer_fired);
1196 
1197 	irq_enable();
1198 	apic_write(APIC_TMICT, 0);
1199 	irq_disable();
1200 	vmcall();
1201 	timer_fired = false;
1202 	start = rdtsc();
1203 	apic_write(APIC_TMICT, 1000000);
1204 
1205 	asm volatile ("sti; hlt");
1206 
1207 	report("direct interrupt + hlt",
1208 	       rdtsc() - start > 1000000 && timer_fired);
1209 
1210 	apic_write(APIC_TMICT, 0);
1211 	irq_disable();
1212 	vmcall();
1213 	timer_fired = false;
1214 	start = rdtsc();
1215 	apic_write(APIC_TMICT, 1000000);
1216 
1217 	asm volatile ("sti; hlt");
1218 
1219 	report("intercepted interrupt + hlt",
1220 	       rdtsc() - start > 10000 && timer_fired);
1221 
1222 	apic_write(APIC_TMICT, 0);
1223 	irq_disable();
1224 	vmcall();
1225 	timer_fired = false;
1226 	start = rdtsc();
1227 	apic_write(APIC_TMICT, 1000000);
1228 
1229 	irq_enable();
1230 	asm volatile ("nop");
1231 	vmcall();
1232 
1233 	report("direct interrupt + activity state hlt",
1234 	       rdtsc() - start > 10000 && timer_fired);
1235 
1236 	apic_write(APIC_TMICT, 0);
1237 	irq_disable();
1238 	vmcall();
1239 	timer_fired = false;
1240 	start = rdtsc();
1241 	apic_write(APIC_TMICT, 1000000);
1242 
1243 	irq_enable();
1244 	asm volatile ("nop");
1245 	vmcall();
1246 
1247 	report("intercepted interrupt + activity state hlt",
1248 	       rdtsc() - start > 10000 && timer_fired);
1249 }
1250 
1251 static int interrupt_exit_handler(void)
1252 {
1253 	u64 guest_rip = vmcs_read(GUEST_RIP);
1254 	ulong reason = vmcs_read(EXI_REASON) & 0xff;
1255 	u32 insn_len = vmcs_read(EXI_INST_LEN);
1256 
1257 	switch (reason) {
1258 	case VMX_VMCALL:
1259 		switch (get_stage()) {
1260 		case 0:
1261 		case 2:
1262 		case 5:
1263 			vmcs_write(PIN_CONTROLS,
1264 				   vmcs_read(PIN_CONTROLS) | PIN_EXTINT);
1265 			break;
1266 		case 1:
1267 		case 3:
1268 			vmcs_write(PIN_CONTROLS,
1269 				   vmcs_read(PIN_CONTROLS) & ~PIN_EXTINT);
1270 			break;
1271 		case 4:
1272 		case 6:
1273 			vmcs_write(GUEST_ACTV_STATE, ACTV_HLT);
1274 			break;
1275 		}
1276 		set_stage(get_stage() + 1);
1277 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
1278 		return VMX_TEST_RESUME;
1279 	case VMX_EXTINT:
1280 		irq_enable();
1281 		asm volatile ("nop");
1282 		irq_disable();
1283 		if (get_stage() >= 2) {
1284 			vmcs_write(GUEST_ACTV_STATE, ACTV_ACTIVE);
1285 			vmcs_write(GUEST_RIP, guest_rip + insn_len);
1286 		}
1287 		return VMX_TEST_RESUME;
1288 	default:
1289 		printf("Unknown exit reason, %d\n", reason);
1290 		print_vmexit_info();
1291 	}
1292 
1293 	return VMX_TEST_VMEXIT;
1294 }
1295 
1296 /* name/init/guest_main/exit_handler/syscall_handler/guest_regs */
1297 struct vmx_test vmx_tests[] = {
1298 	{ "null", NULL, basic_guest_main, basic_exit_handler, NULL, {0} },
1299 	{ "vmenter", NULL, vmenter_main, vmenter_exit_handler, NULL, {0} },
1300 	{ "preemption timer", preemption_timer_init, preemption_timer_main,
1301 		preemption_timer_exit_handler, NULL, {0} },
1302 	{ "control field PAT", test_ctrl_pat_init, test_ctrl_pat_main,
1303 		test_ctrl_pat_exit_handler, NULL, {0} },
1304 	{ "control field EFER", test_ctrl_efer_init, test_ctrl_efer_main,
1305 		test_ctrl_efer_exit_handler, NULL, {0} },
1306 	{ "CR shadowing", NULL, cr_shadowing_main,
1307 		cr_shadowing_exit_handler, NULL, {0} },
1308 	{ "I/O bitmap", iobmp_init, iobmp_main, iobmp_exit_handler,
1309 		NULL, {0} },
1310 	{ "instruction intercept", insn_intercept_init, insn_intercept_main,
1311 		insn_intercept_exit_handler, NULL, {0} },
1312 	{ "EPT framework", ept_init, ept_main, ept_exit_handler, NULL, {0} },
1313 	{ "interrupt", interrupt_init, interrupt_main,
1314 		interrupt_exit_handler, NULL, {0} },
1315 	{ NULL, NULL, NULL, NULL, NULL, {0} },
1316 };
1317