xref: /kvm-unit-tests/x86/vmx_tests.c (revision 2f888fccaecd2db84a195e88d4b9d5858a68d10d)
1 /*
2  * All test cases of nested virtualization should be in this file
3  *
4  * Author : Arthur Chunqi Li <yzt356@gmail.com>
5  */
6 #include "vmx.h"
7 #include "msr.h"
8 #include "processor.h"
9 #include "vm.h"
10 #include "io.h"
11 #include "fwcfg.h"
12 #include "isr.h"
13 #include "apic.h"
14 
15 u64 ia32_pat;
16 u64 ia32_efer;
17 volatile u32 stage;
18 void *io_bitmap_a, *io_bitmap_b;
19 u16 ioport;
20 
21 unsigned long *pml4;
22 u64 eptp;
23 void *data_page1, *data_page2;
24 
25 static inline void vmcall()
26 {
27 	asm volatile("vmcall");
28 }
29 
30 static inline void set_stage(u32 s)
31 {
32 	barrier();
33 	stage = s;
34 	barrier();
35 }
36 
37 static inline u32 get_stage()
38 {
39 	u32 s;
40 
41 	barrier();
42 	s = stage;
43 	barrier();
44 	return s;
45 }
46 
47 void basic_guest_main()
48 {
49 }
50 
51 int basic_exit_handler()
52 {
53 	report("Basic VMX test", 0);
54 	print_vmexit_info();
55 	return VMX_TEST_EXIT;
56 }
57 
58 void vmenter_main()
59 {
60 	u64 rax;
61 	u64 rsp, resume_rsp;
62 
63 	report("test vmlaunch", 1);
64 
65 	asm volatile(
66 		"mov %%rsp, %0\n\t"
67 		"mov %3, %%rax\n\t"
68 		"vmcall\n\t"
69 		"mov %%rax, %1\n\t"
70 		"mov %%rsp, %2\n\t"
71 		: "=r"(rsp), "=r"(rax), "=r"(resume_rsp)
72 		: "g"(0xABCD));
73 	report("test vmresume", (rax == 0xFFFF) && (rsp == resume_rsp));
74 }
75 
76 int vmenter_exit_handler()
77 {
78 	u64 guest_rip;
79 	ulong reason;
80 
81 	guest_rip = vmcs_read(GUEST_RIP);
82 	reason = vmcs_read(EXI_REASON) & 0xff;
83 	switch (reason) {
84 	case VMX_VMCALL:
85 		if (regs.rax != 0xABCD) {
86 			report("test vmresume", 0);
87 			return VMX_TEST_VMEXIT;
88 		}
89 		regs.rax = 0xFFFF;
90 		vmcs_write(GUEST_RIP, guest_rip + 3);
91 		return VMX_TEST_RESUME;
92 	default:
93 		report("test vmresume", 0);
94 		print_vmexit_info();
95 	}
96 	return VMX_TEST_VMEXIT;
97 }
98 
99 u32 preempt_scale;
100 volatile unsigned long long tsc_val;
101 volatile u32 preempt_val;
102 u64 saved_rip;
103 
104 int preemption_timer_init()
105 {
106 	if (!(ctrl_pin_rev.clr & PIN_PREEMPT)) {
107 		printf("\tPreemption timer is not supported\n");
108 		return VMX_TEST_EXIT;
109 	}
110 	vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) | PIN_PREEMPT);
111 	preempt_val = 10000000;
112 	vmcs_write(PREEMPT_TIMER_VALUE, preempt_val);
113 	preempt_scale = rdmsr(MSR_IA32_VMX_MISC) & 0x1F;
114 
115 	if (!(ctrl_exit_rev.clr & EXI_SAVE_PREEMPT))
116 		printf("\tSave preemption value is not supported\n");
117 
118 	return VMX_TEST_START;
119 }
120 
121 void preemption_timer_main()
122 {
123 	tsc_val = rdtsc();
124 	if (ctrl_exit_rev.clr & EXI_SAVE_PREEMPT) {
125 		set_stage(0);
126 		vmcall();
127 		if (get_stage() == 1)
128 			vmcall();
129 	}
130 	set_stage(1);
131 	while (get_stage() == 1) {
132 		if (((rdtsc() - tsc_val) >> preempt_scale)
133 				> 10 * preempt_val) {
134 			set_stage(2);
135 			vmcall();
136 		}
137 	}
138 	tsc_val = rdtsc();
139 	asm volatile ("hlt");
140 	vmcall();
141 	set_stage(5);
142 	vmcall();
143 }
144 
145 int preemption_timer_exit_handler()
146 {
147 	bool guest_halted;
148 	u64 guest_rip;
149 	ulong reason;
150 	u32 insn_len;
151 	u32 ctrl_exit;
152 
153 	guest_rip = vmcs_read(GUEST_RIP);
154 	reason = vmcs_read(EXI_REASON) & 0xff;
155 	insn_len = vmcs_read(EXI_INST_LEN);
156 	switch (reason) {
157 	case VMX_PREEMPT:
158 		switch (get_stage()) {
159 		case 1:
160 		case 2:
161 			report("busy-wait for preemption timer",
162 			       ((rdtsc() - tsc_val) >> preempt_scale) >=
163 			       preempt_val);
164 			set_stage(3);
165 			vmcs_write(PREEMPT_TIMER_VALUE, preempt_val);
166 			return VMX_TEST_RESUME;
167 		case 3:
168 			guest_halted =
169 				(vmcs_read(GUEST_ACTV_STATE) == ACTV_HLT);
170 			report("preemption timer during hlt",
171 			       ((rdtsc() - tsc_val) >> preempt_scale) >=
172 			       preempt_val && guest_halted);
173 			set_stage(4);
174 			vmcs_write(PIN_CONTROLS,
175 				   vmcs_read(PIN_CONTROLS) & ~PIN_PREEMPT);
176 			vmcs_write(GUEST_ACTV_STATE, ACTV_ACTIVE);
177 			return VMX_TEST_RESUME;
178 		case 4:
179 			report("preemption timer with 0 value",
180 			       saved_rip == guest_rip);
181 			break;
182 		default:
183 			printf("Invalid stage.\n");
184 			print_vmexit_info();
185 			break;
186 		}
187 		break;
188 	case VMX_VMCALL:
189 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
190 		switch (get_stage()) {
191 		case 0:
192 			report("Keep preemption value",
193 			       vmcs_read(PREEMPT_TIMER_VALUE) == preempt_val);
194 			set_stage(1);
195 			vmcs_write(PREEMPT_TIMER_VALUE, preempt_val);
196 			ctrl_exit = (vmcs_read(EXI_CONTROLS) |
197 				EXI_SAVE_PREEMPT) & ctrl_exit_rev.clr;
198 			vmcs_write(EXI_CONTROLS, ctrl_exit);
199 			return VMX_TEST_RESUME;
200 		case 1:
201 			report("Save preemption value",
202 			       vmcs_read(PREEMPT_TIMER_VALUE) < preempt_val);
203 			return VMX_TEST_RESUME;
204 		case 2:
205 			report("busy-wait for preemption timer", 0);
206 			set_stage(3);
207 			vmcs_write(PREEMPT_TIMER_VALUE, preempt_val);
208 			return VMX_TEST_RESUME;
209 		case 3:
210 			report("preemption timer during hlt", 0);
211 			set_stage(4);
212 			/* fall through */
213 		case 4:
214 			vmcs_write(PIN_CONTROLS,
215 				   vmcs_read(PIN_CONTROLS) | PIN_PREEMPT);
216 			vmcs_write(PREEMPT_TIMER_VALUE, 0);
217 			saved_rip = guest_rip + insn_len;
218 			return VMX_TEST_RESUME;
219 		case 5:
220 			report("preemption timer with 0 value (vmcall stage 5)", 0);
221 			break;
222 		default:
223 			// Should not reach here
224 			printf("ERROR : unexpected stage, %d\n", get_stage());
225 			print_vmexit_info();
226 			return VMX_TEST_VMEXIT;
227 		}
228 		break;
229 	default:
230 		printf("Unknown exit reason, %d\n", reason);
231 		print_vmexit_info();
232 	}
233 	vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) & ~PIN_PREEMPT);
234 	return VMX_TEST_VMEXIT;
235 }
236 
237 void msr_bmp_init()
238 {
239 	void *msr_bitmap;
240 	u32 ctrl_cpu0;
241 
242 	msr_bitmap = alloc_page();
243 	memset(msr_bitmap, 0x0, PAGE_SIZE);
244 	ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
245 	ctrl_cpu0 |= CPU_MSR_BITMAP;
246 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
247 	vmcs_write(MSR_BITMAP, (u64)msr_bitmap);
248 }
249 
250 static int test_ctrl_pat_init()
251 {
252 	u64 ctrl_ent;
253 	u64 ctrl_exi;
254 
255 	msr_bmp_init();
256 	ctrl_ent = vmcs_read(ENT_CONTROLS);
257 	ctrl_exi = vmcs_read(EXI_CONTROLS);
258 	vmcs_write(ENT_CONTROLS, ctrl_ent | ENT_LOAD_PAT);
259 	vmcs_write(EXI_CONTROLS, ctrl_exi | (EXI_SAVE_PAT | EXI_LOAD_PAT));
260 	ia32_pat = rdmsr(MSR_IA32_CR_PAT);
261 	vmcs_write(GUEST_PAT, 0x0);
262 	vmcs_write(HOST_PAT, ia32_pat);
263 	return VMX_TEST_START;
264 }
265 
266 static void test_ctrl_pat_main()
267 {
268 	u64 guest_ia32_pat;
269 
270 	guest_ia32_pat = rdmsr(MSR_IA32_CR_PAT);
271 	if (!(ctrl_enter_rev.clr & ENT_LOAD_PAT))
272 		printf("\tENT_LOAD_PAT is not supported.\n");
273 	else {
274 		if (guest_ia32_pat != 0) {
275 			report("Entry load PAT", 0);
276 			return;
277 		}
278 	}
279 	wrmsr(MSR_IA32_CR_PAT, 0x6);
280 	vmcall();
281 	guest_ia32_pat = rdmsr(MSR_IA32_CR_PAT);
282 	if (ctrl_enter_rev.clr & ENT_LOAD_PAT) {
283 		if (guest_ia32_pat != ia32_pat) {
284 			report("Entry load PAT", 0);
285 			return;
286 		}
287 		report("Entry load PAT", 1);
288 	}
289 }
290 
291 static int test_ctrl_pat_exit_handler()
292 {
293 	u64 guest_rip;
294 	ulong reason;
295 	u64 guest_pat;
296 
297 	guest_rip = vmcs_read(GUEST_RIP);
298 	reason = vmcs_read(EXI_REASON) & 0xff;
299 	switch (reason) {
300 	case VMX_VMCALL:
301 		guest_pat = vmcs_read(GUEST_PAT);
302 		if (!(ctrl_exit_rev.clr & EXI_SAVE_PAT)) {
303 			printf("\tEXI_SAVE_PAT is not supported\n");
304 			vmcs_write(GUEST_PAT, 0x6);
305 		} else {
306 			if (guest_pat == 0x6)
307 				report("Exit save PAT", 1);
308 			else
309 				report("Exit save PAT", 0);
310 		}
311 		if (!(ctrl_exit_rev.clr & EXI_LOAD_PAT))
312 			printf("\tEXI_LOAD_PAT is not supported\n");
313 		else {
314 			if (rdmsr(MSR_IA32_CR_PAT) == ia32_pat)
315 				report("Exit load PAT", 1);
316 			else
317 				report("Exit load PAT", 0);
318 		}
319 		vmcs_write(GUEST_PAT, ia32_pat);
320 		vmcs_write(GUEST_RIP, guest_rip + 3);
321 		return VMX_TEST_RESUME;
322 	default:
323 		printf("ERROR : Undefined exit reason, reason = %d.\n", reason);
324 		break;
325 	}
326 	return VMX_TEST_VMEXIT;
327 }
328 
329 static int test_ctrl_efer_init()
330 {
331 	u64 ctrl_ent;
332 	u64 ctrl_exi;
333 
334 	msr_bmp_init();
335 	ctrl_ent = vmcs_read(ENT_CONTROLS) | ENT_LOAD_EFER;
336 	ctrl_exi = vmcs_read(EXI_CONTROLS) | EXI_SAVE_EFER | EXI_LOAD_EFER;
337 	vmcs_write(ENT_CONTROLS, ctrl_ent & ctrl_enter_rev.clr);
338 	vmcs_write(EXI_CONTROLS, ctrl_exi & ctrl_exit_rev.clr);
339 	ia32_efer = rdmsr(MSR_EFER);
340 	vmcs_write(GUEST_EFER, ia32_efer ^ EFER_NX);
341 	vmcs_write(HOST_EFER, ia32_efer ^ EFER_NX);
342 	return VMX_TEST_START;
343 }
344 
345 static void test_ctrl_efer_main()
346 {
347 	u64 guest_ia32_efer;
348 
349 	guest_ia32_efer = rdmsr(MSR_EFER);
350 	if (!(ctrl_enter_rev.clr & ENT_LOAD_EFER))
351 		printf("\tENT_LOAD_EFER is not supported.\n");
352 	else {
353 		if (guest_ia32_efer != (ia32_efer ^ EFER_NX)) {
354 			report("Entry load EFER", 0);
355 			return;
356 		}
357 	}
358 	wrmsr(MSR_EFER, ia32_efer);
359 	vmcall();
360 	guest_ia32_efer = rdmsr(MSR_EFER);
361 	if (ctrl_enter_rev.clr & ENT_LOAD_EFER) {
362 		if (guest_ia32_efer != ia32_efer) {
363 			report("Entry load EFER", 0);
364 			return;
365 		}
366 		report("Entry load EFER", 1);
367 	}
368 }
369 
370 static int test_ctrl_efer_exit_handler()
371 {
372 	u64 guest_rip;
373 	ulong reason;
374 	u64 guest_efer;
375 
376 	guest_rip = vmcs_read(GUEST_RIP);
377 	reason = vmcs_read(EXI_REASON) & 0xff;
378 	switch (reason) {
379 	case VMX_VMCALL:
380 		guest_efer = vmcs_read(GUEST_EFER);
381 		if (!(ctrl_exit_rev.clr & EXI_SAVE_EFER)) {
382 			printf("\tEXI_SAVE_EFER is not supported\n");
383 			vmcs_write(GUEST_EFER, ia32_efer);
384 		} else {
385 			if (guest_efer == ia32_efer)
386 				report("Exit save EFER", 1);
387 			else
388 				report("Exit save EFER", 0);
389 		}
390 		if (!(ctrl_exit_rev.clr & EXI_LOAD_EFER)) {
391 			printf("\tEXI_LOAD_EFER is not supported\n");
392 			wrmsr(MSR_EFER, ia32_efer ^ EFER_NX);
393 		} else {
394 			if (rdmsr(MSR_EFER) == (ia32_efer ^ EFER_NX))
395 				report("Exit load EFER", 1);
396 			else
397 				report("Exit load EFER", 0);
398 		}
399 		vmcs_write(GUEST_PAT, ia32_efer);
400 		vmcs_write(GUEST_RIP, guest_rip + 3);
401 		return VMX_TEST_RESUME;
402 	default:
403 		printf("ERROR : Undefined exit reason, reason = %d.\n", reason);
404 		break;
405 	}
406 	return VMX_TEST_VMEXIT;
407 }
408 
409 u32 guest_cr0, guest_cr4;
410 
411 static void cr_shadowing_main()
412 {
413 	u32 cr0, cr4, tmp;
414 
415 	// Test read through
416 	set_stage(0);
417 	guest_cr0 = read_cr0();
418 	if (stage == 1)
419 		report("Read through CR0", 0);
420 	else
421 		vmcall();
422 	set_stage(1);
423 	guest_cr4 = read_cr4();
424 	if (stage == 2)
425 		report("Read through CR4", 0);
426 	else
427 		vmcall();
428 	// Test write through
429 	guest_cr0 = guest_cr0 ^ (X86_CR0_TS | X86_CR0_MP);
430 	guest_cr4 = guest_cr4 ^ (X86_CR4_TSD | X86_CR4_DE);
431 	set_stage(2);
432 	write_cr0(guest_cr0);
433 	if (stage == 3)
434 		report("Write throuth CR0", 0);
435 	else
436 		vmcall();
437 	set_stage(3);
438 	write_cr4(guest_cr4);
439 	if (stage == 4)
440 		report("Write through CR4", 0);
441 	else
442 		vmcall();
443 	// Test read shadow
444 	set_stage(4);
445 	vmcall();
446 	cr0 = read_cr0();
447 	if (stage != 5) {
448 		if (cr0 == guest_cr0)
449 			report("Read shadowing CR0", 1);
450 		else
451 			report("Read shadowing CR0", 0);
452 	}
453 	set_stage(5);
454 	cr4 = read_cr4();
455 	if (stage != 6) {
456 		if (cr4 == guest_cr4)
457 			report("Read shadowing CR4", 1);
458 		else
459 			report("Read shadowing CR4", 0);
460 	}
461 	// Test write shadow (same value with shadow)
462 	set_stage(6);
463 	write_cr0(guest_cr0);
464 	if (stage == 7)
465 		report("Write shadowing CR0 (same value with shadow)", 0);
466 	else
467 		vmcall();
468 	set_stage(7);
469 	write_cr4(guest_cr4);
470 	if (stage == 8)
471 		report("Write shadowing CR4 (same value with shadow)", 0);
472 	else
473 		vmcall();
474 	// Test write shadow (different value)
475 	set_stage(8);
476 	tmp = guest_cr0 ^ X86_CR0_TS;
477 	asm volatile("mov %0, %%rsi\n\t"
478 		"mov %%rsi, %%cr0\n\t"
479 		::"m"(tmp)
480 		:"rsi", "memory", "cc");
481 	if (stage != 9)
482 		report("Write shadowing different X86_CR0_TS", 0);
483 	else
484 		report("Write shadowing different X86_CR0_TS", 1);
485 	set_stage(9);
486 	tmp = guest_cr0 ^ X86_CR0_MP;
487 	asm volatile("mov %0, %%rsi\n\t"
488 		"mov %%rsi, %%cr0\n\t"
489 		::"m"(tmp)
490 		:"rsi", "memory", "cc");
491 	if (stage != 10)
492 		report("Write shadowing different X86_CR0_MP", 0);
493 	else
494 		report("Write shadowing different X86_CR0_MP", 1);
495 	set_stage(10);
496 	tmp = guest_cr4 ^ X86_CR4_TSD;
497 	asm volatile("mov %0, %%rsi\n\t"
498 		"mov %%rsi, %%cr4\n\t"
499 		::"m"(tmp)
500 		:"rsi", "memory", "cc");
501 	if (stage != 11)
502 		report("Write shadowing different X86_CR4_TSD", 0);
503 	else
504 		report("Write shadowing different X86_CR4_TSD", 1);
505 	set_stage(11);
506 	tmp = guest_cr4 ^ X86_CR4_DE;
507 	asm volatile("mov %0, %%rsi\n\t"
508 		"mov %%rsi, %%cr4\n\t"
509 		::"m"(tmp)
510 		:"rsi", "memory", "cc");
511 	if (stage != 12)
512 		report("Write shadowing different X86_CR4_DE", 0);
513 	else
514 		report("Write shadowing different X86_CR4_DE", 1);
515 }
516 
517 static int cr_shadowing_exit_handler()
518 {
519 	u64 guest_rip;
520 	ulong reason;
521 	u32 insn_len;
522 	u32 exit_qual;
523 
524 	guest_rip = vmcs_read(GUEST_RIP);
525 	reason = vmcs_read(EXI_REASON) & 0xff;
526 	insn_len = vmcs_read(EXI_INST_LEN);
527 	exit_qual = vmcs_read(EXI_QUALIFICATION);
528 	switch (reason) {
529 	case VMX_VMCALL:
530 		switch (get_stage()) {
531 		case 0:
532 			if (guest_cr0 == vmcs_read(GUEST_CR0))
533 				report("Read through CR0", 1);
534 			else
535 				report("Read through CR0", 0);
536 			break;
537 		case 1:
538 			if (guest_cr4 == vmcs_read(GUEST_CR4))
539 				report("Read through CR4", 1);
540 			else
541 				report("Read through CR4", 0);
542 			break;
543 		case 2:
544 			if (guest_cr0 == vmcs_read(GUEST_CR0))
545 				report("Write through CR0", 1);
546 			else
547 				report("Write through CR0", 0);
548 			break;
549 		case 3:
550 			if (guest_cr4 == vmcs_read(GUEST_CR4))
551 				report("Write through CR4", 1);
552 			else
553 				report("Write through CR4", 0);
554 			break;
555 		case 4:
556 			guest_cr0 = vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP);
557 			guest_cr4 = vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE);
558 			vmcs_write(CR0_MASK, X86_CR0_TS | X86_CR0_MP);
559 			vmcs_write(CR0_READ_SHADOW, guest_cr0 & (X86_CR0_TS | X86_CR0_MP));
560 			vmcs_write(CR4_MASK, X86_CR4_TSD | X86_CR4_DE);
561 			vmcs_write(CR4_READ_SHADOW, guest_cr4 & (X86_CR4_TSD | X86_CR4_DE));
562 			break;
563 		case 6:
564 			if (guest_cr0 == (vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP)))
565 				report("Write shadowing CR0 (same value)", 1);
566 			else
567 				report("Write shadowing CR0 (same value)", 0);
568 			break;
569 		case 7:
570 			if (guest_cr4 == (vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE)))
571 				report("Write shadowing CR4 (same value)", 1);
572 			else
573 				report("Write shadowing CR4 (same value)", 0);
574 			break;
575 		default:
576 			// Should not reach here
577 			printf("ERROR : unexpected stage, %d\n", get_stage());
578 			print_vmexit_info();
579 			return VMX_TEST_VMEXIT;
580 		}
581 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
582 		return VMX_TEST_RESUME;
583 	case VMX_CR:
584 		switch (get_stage()) {
585 		case 4:
586 			report("Read shadowing CR0", 0);
587 			set_stage(stage + 1);
588 			break;
589 		case 5:
590 			report("Read shadowing CR4", 0);
591 			set_stage(stage + 1);
592 			break;
593 		case 6:
594 			report("Write shadowing CR0 (same value)", 0);
595 			set_stage(stage + 1);
596 			break;
597 		case 7:
598 			report("Write shadowing CR4 (same value)", 0);
599 			set_stage(stage + 1);
600 			break;
601 		case 8:
602 		case 9:
603 			// 0x600 encodes "mov %esi, %cr0"
604 			if (exit_qual == 0x600)
605 				set_stage(stage + 1);
606 			break;
607 		case 10:
608 		case 11:
609 			// 0x604 encodes "mov %esi, %cr4"
610 			if (exit_qual == 0x604)
611 				set_stage(stage + 1);
612 			break;
613 		default:
614 			// Should not reach here
615 			printf("ERROR : unexpected stage, %d\n", get_stage());
616 			print_vmexit_info();
617 			return VMX_TEST_VMEXIT;
618 		}
619 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
620 		return VMX_TEST_RESUME;
621 	default:
622 		printf("Unknown exit reason, %d\n", reason);
623 		print_vmexit_info();
624 	}
625 	return VMX_TEST_VMEXIT;
626 }
627 
628 static int iobmp_init()
629 {
630 	u32 ctrl_cpu0;
631 
632 	io_bitmap_a = alloc_page();
633 	io_bitmap_a = alloc_page();
634 	memset(io_bitmap_a, 0x0, PAGE_SIZE);
635 	memset(io_bitmap_b, 0x0, PAGE_SIZE);
636 	ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
637 	ctrl_cpu0 |= CPU_IO_BITMAP;
638 	ctrl_cpu0 &= (~CPU_IO);
639 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
640 	vmcs_write(IO_BITMAP_A, (u64)io_bitmap_a);
641 	vmcs_write(IO_BITMAP_B, (u64)io_bitmap_b);
642 	return VMX_TEST_START;
643 }
644 
645 static void iobmp_main()
646 {
647 	// stage 0, test IO pass
648 	set_stage(0);
649 	inb(0x5000);
650 	outb(0x0, 0x5000);
651 	if (stage != 0)
652 		report("I/O bitmap - I/O pass", 0);
653 	else
654 		report("I/O bitmap - I/O pass", 1);
655 	// test IO width, in/out
656 	((u8 *)io_bitmap_a)[0] = 0xFF;
657 	set_stage(2);
658 	inb(0x0);
659 	if (stage != 3)
660 		report("I/O bitmap - trap in", 0);
661 	else
662 		report("I/O bitmap - trap in", 1);
663 	set_stage(3);
664 	outw(0x0, 0x0);
665 	if (stage != 4)
666 		report("I/O bitmap - trap out", 0);
667 	else
668 		report("I/O bitmap - trap out", 1);
669 	set_stage(4);
670 	inl(0x0);
671 	if (stage != 5)
672 		report("I/O bitmap - I/O width, long", 0);
673 	// test low/high IO port
674 	set_stage(5);
675 	((u8 *)io_bitmap_a)[0x5000 / 8] = (1 << (0x5000 % 8));
676 	inb(0x5000);
677 	if (stage == 6)
678 		report("I/O bitmap - I/O port, low part", 1);
679 	else
680 		report("I/O bitmap - I/O port, low part", 0);
681 	set_stage(6);
682 	((u8 *)io_bitmap_b)[0x1000 / 8] = (1 << (0x1000 % 8));
683 	inb(0x9000);
684 	if (stage == 7)
685 		report("I/O bitmap - I/O port, high part", 1);
686 	else
687 		report("I/O bitmap - I/O port, high part", 0);
688 	// test partial pass
689 	set_stage(7);
690 	inl(0x4FFF);
691 	if (stage == 8)
692 		report("I/O bitmap - partial pass", 1);
693 	else
694 		report("I/O bitmap - partial pass", 0);
695 	// test overrun
696 	set_stage(8);
697 	memset(io_bitmap_a, 0x0, PAGE_SIZE);
698 	memset(io_bitmap_b, 0x0, PAGE_SIZE);
699 	inl(0xFFFF);
700 	if (stage == 9)
701 		report("I/O bitmap - overrun", 1);
702 	else
703 		report("I/O bitmap - overrun", 0);
704 	set_stage(9);
705 	vmcall();
706 	outb(0x0, 0x0);
707 	report("I/O bitmap - ignore unconditional exiting", stage == 9);
708 	set_stage(10);
709 	vmcall();
710 	outb(0x0, 0x0);
711 	report("I/O bitmap - unconditional exiting", stage == 11);
712 }
713 
714 static int iobmp_exit_handler()
715 {
716 	u64 guest_rip;
717 	ulong reason, exit_qual;
718 	u32 insn_len, ctrl_cpu0;
719 
720 	guest_rip = vmcs_read(GUEST_RIP);
721 	reason = vmcs_read(EXI_REASON) & 0xff;
722 	exit_qual = vmcs_read(EXI_QUALIFICATION);
723 	insn_len = vmcs_read(EXI_INST_LEN);
724 	switch (reason) {
725 	case VMX_IO:
726 		switch (get_stage()) {
727 		case 0:
728 		case 1:
729 			set_stage(stage + 1);
730 			break;
731 		case 2:
732 			if ((exit_qual & VMX_IO_SIZE_MASK) != _VMX_IO_BYTE)
733 				report("I/O bitmap - I/O width, byte", 0);
734 			else
735 				report("I/O bitmap - I/O width, byte", 1);
736 			if (!(exit_qual & VMX_IO_IN))
737 				report("I/O bitmap - I/O direction, in", 0);
738 			else
739 				report("I/O bitmap - I/O direction, in", 1);
740 			set_stage(stage + 1);
741 			break;
742 		case 3:
743 			if ((exit_qual & VMX_IO_SIZE_MASK) != _VMX_IO_WORD)
744 				report("I/O bitmap - I/O width, word", 0);
745 			else
746 				report("I/O bitmap - I/O width, word", 1);
747 			if (!(exit_qual & VMX_IO_IN))
748 				report("I/O bitmap - I/O direction, out", 1);
749 			else
750 				report("I/O bitmap - I/O direction, out", 0);
751 			set_stage(stage + 1);
752 			break;
753 		case 4:
754 			if ((exit_qual & VMX_IO_SIZE_MASK) != _VMX_IO_LONG)
755 				report("I/O bitmap - I/O width, long", 0);
756 			else
757 				report("I/O bitmap - I/O width, long", 1);
758 			set_stage(stage + 1);
759 			break;
760 		case 5:
761 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x5000)
762 				set_stage(stage + 1);
763 			break;
764 		case 6:
765 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x9000)
766 				set_stage(stage + 1);
767 			break;
768 		case 7:
769 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x4FFF)
770 				set_stage(stage + 1);
771 			break;
772 		case 8:
773 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0xFFFF)
774 				set_stage(stage + 1);
775 			break;
776 		case 9:
777 		case 10:
778 			ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
779 			vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0 & ~CPU_IO);
780 			set_stage(stage + 1);
781 			break;
782 		default:
783 			// Should not reach here
784 			printf("ERROR : unexpected stage, %d\n", get_stage());
785 			print_vmexit_info();
786 			return VMX_TEST_VMEXIT;
787 		}
788 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
789 		return VMX_TEST_RESUME;
790 	case VMX_VMCALL:
791 		switch (get_stage()) {
792 		case 9:
793 			ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
794 			ctrl_cpu0 |= CPU_IO | CPU_IO_BITMAP;
795 			vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
796 			break;
797 		case 10:
798 			ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
799 			ctrl_cpu0 = (ctrl_cpu0 & ~CPU_IO_BITMAP) | CPU_IO;
800 			vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
801 			break;
802 		default:
803 			// Should not reach here
804 			printf("ERROR : unexpected stage, %d\n", get_stage());
805 			print_vmexit_info();
806 			return VMX_TEST_VMEXIT;
807 		}
808 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
809 		return VMX_TEST_RESUME;
810 	default:
811 		printf("guest_rip = 0x%llx\n", guest_rip);
812 		printf("\tERROR : Undefined exit reason, reason = %d.\n", reason);
813 		break;
814 	}
815 	return VMX_TEST_VMEXIT;
816 }
817 
818 #define INSN_CPU0		0
819 #define INSN_CPU1		1
820 #define INSN_ALWAYS_TRAP	2
821 #define INSN_NEVER_TRAP		3
822 
823 #define FIELD_EXIT_QUAL		0
824 #define FIELD_INSN_INFO		1
825 
826 asm(
827 	"insn_hlt: hlt;ret\n\t"
828 	"insn_invlpg: invlpg 0x12345678;ret\n\t"
829 	"insn_mwait: mwait;ret\n\t"
830 	"insn_rdpmc: rdpmc;ret\n\t"
831 	"insn_rdtsc: rdtsc;ret\n\t"
832 	"insn_monitor: monitor;ret\n\t"
833 	"insn_pause: pause;ret\n\t"
834 	"insn_wbinvd: wbinvd;ret\n\t"
835 	"insn_cpuid: cpuid;ret\n\t"
836 	"insn_invd: invd;ret\n\t"
837 );
838 extern void insn_hlt();
839 extern void insn_invlpg();
840 extern void insn_mwait();
841 extern void insn_rdpmc();
842 extern void insn_rdtsc();
843 extern void insn_monitor();
844 extern void insn_pause();
845 extern void insn_wbinvd();
846 extern void insn_cpuid();
847 extern void insn_invd();
848 
849 u32 cur_insn;
850 
851 struct insn_table {
852 	const char *name;
853 	u32 flag;
854 	void (*insn_func)();
855 	u32 type;
856 	u32 reason;
857 	ulong exit_qual;
858 	u32 insn_info;
859 	// Use FIELD_EXIT_QUAL and FIELD_INSN_INFO to efines
860 	// which field need to be tested, reason is always tested
861 	u32 test_field;
862 };
863 
864 /*
865  * Add more test cases of instruction intercept here. Elements in this
866  * table is:
867  *	name/control flag/insn function/type/exit reason/exit qulification/
868  *	instruction info/field to test
869  * The last field defines which fields (exit_qual and insn_info) need to be
870  * tested in exit handler. If set to 0, only "reason" is checked.
871  */
872 static struct insn_table insn_table[] = {
873 	// Flags for Primary Processor-Based VM-Execution Controls
874 	{"HLT",  CPU_HLT, insn_hlt, INSN_CPU0, 12, 0, 0, 0},
875 	{"INVLPG", CPU_INVLPG, insn_invlpg, INSN_CPU0, 14,
876 		0x12345678, 0, FIELD_EXIT_QUAL},
877 	{"MWAIT", CPU_MWAIT, insn_mwait, INSN_CPU0, 36, 0, 0, 0},
878 	{"RDPMC", CPU_RDPMC, insn_rdpmc, INSN_CPU0, 15, 0, 0, 0},
879 	{"RDTSC", CPU_RDTSC, insn_rdtsc, INSN_CPU0, 16, 0, 0, 0},
880 	{"MONITOR", CPU_MONITOR, insn_monitor, INSN_CPU0, 39, 0, 0, 0},
881 	{"PAUSE", CPU_PAUSE, insn_pause, INSN_CPU0, 40, 0, 0, 0},
882 	// Flags for Secondary Processor-Based VM-Execution Controls
883 	{"WBINVD", CPU_WBINVD, insn_wbinvd, INSN_CPU1, 54, 0, 0, 0},
884 	// Instructions always trap
885 	{"CPUID", 0, insn_cpuid, INSN_ALWAYS_TRAP, 10, 0, 0, 0},
886 	{"INVD", 0, insn_invd, INSN_ALWAYS_TRAP, 13, 0, 0, 0},
887 	// Instructions never trap
888 	{NULL},
889 };
890 
891 static int insn_intercept_init()
892 {
893 	u32 ctrl_cpu[2];
894 
895 	ctrl_cpu[0] = vmcs_read(CPU_EXEC_CTRL0);
896 	ctrl_cpu[0] |= CPU_HLT | CPU_INVLPG | CPU_MWAIT | CPU_RDPMC | CPU_RDTSC |
897 		CPU_MONITOR | CPU_PAUSE | CPU_SECONDARY;
898 	ctrl_cpu[0] &= ctrl_cpu_rev[0].clr;
899 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu[0]);
900 	ctrl_cpu[1] = vmcs_read(CPU_EXEC_CTRL1);
901 	ctrl_cpu[1] |= CPU_WBINVD | CPU_RDRAND;
902 	ctrl_cpu[1] &= ctrl_cpu_rev[1].clr;
903 	vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu[1]);
904 	return VMX_TEST_START;
905 }
906 
907 static void insn_intercept_main()
908 {
909 	cur_insn = 0;
910 	while(insn_table[cur_insn].name != NULL) {
911 		set_stage(cur_insn);
912 		if ((insn_table[cur_insn].type == INSN_CPU0
913 			&& !(ctrl_cpu_rev[0].clr & insn_table[cur_insn].flag))
914 			|| (insn_table[cur_insn].type == INSN_CPU1
915 			&& !(ctrl_cpu_rev[1].clr & insn_table[cur_insn].flag))) {
916 			printf("\tCPU_CTRL1.CPU_%s is not supported.\n",
917 				insn_table[cur_insn].name);
918 			continue;
919 		}
920 		insn_table[cur_insn].insn_func();
921 		switch (insn_table[cur_insn].type) {
922 		case INSN_CPU0:
923 		case INSN_CPU1:
924 		case INSN_ALWAYS_TRAP:
925 			if (stage != cur_insn + 1)
926 				report(insn_table[cur_insn].name, 0);
927 			else
928 				report(insn_table[cur_insn].name, 1);
929 			break;
930 		case INSN_NEVER_TRAP:
931 			if (stage == cur_insn + 1)
932 				report(insn_table[cur_insn].name, 0);
933 			else
934 				report(insn_table[cur_insn].name, 1);
935 			break;
936 		}
937 		cur_insn ++;
938 	}
939 }
940 
941 static int insn_intercept_exit_handler()
942 {
943 	u64 guest_rip;
944 	u32 reason;
945 	ulong exit_qual;
946 	u32 insn_len;
947 	u32 insn_info;
948 	bool pass;
949 
950 	guest_rip = vmcs_read(GUEST_RIP);
951 	reason = vmcs_read(EXI_REASON) & 0xff;
952 	exit_qual = vmcs_read(EXI_QUALIFICATION);
953 	insn_len = vmcs_read(EXI_INST_LEN);
954 	insn_info = vmcs_read(EXI_INST_INFO);
955 	pass = (cur_insn == get_stage()) &&
956 			insn_table[cur_insn].reason == reason;
957 	if (insn_table[cur_insn].test_field & FIELD_EXIT_QUAL)
958 		pass = pass && insn_table[cur_insn].exit_qual == exit_qual;
959 	if (insn_table[cur_insn].test_field & FIELD_INSN_INFO)
960 		pass = pass && insn_table[cur_insn].insn_info == insn_info;
961 	if (pass)
962 		set_stage(stage + 1);
963 	vmcs_write(GUEST_RIP, guest_rip + insn_len);
964 	return VMX_TEST_RESUME;
965 }
966 
967 
968 static int setup_ept()
969 {
970 	int support_2m;
971 	unsigned long end_of_memory;
972 
973 	if (!(ept_vpid.val & EPT_CAP_UC) &&
974 			!(ept_vpid.val & EPT_CAP_WB)) {
975 		printf("\tEPT paging-structure memory type "
976 				"UC&WB are not supported\n");
977 		return 1;
978 	}
979 	if (ept_vpid.val & EPT_CAP_UC)
980 		eptp = EPT_MEM_TYPE_UC;
981 	else
982 		eptp = EPT_MEM_TYPE_WB;
983 	if (!(ept_vpid.val & EPT_CAP_PWL4)) {
984 		printf("\tPWL4 is not supported\n");
985 		return 1;
986 	}
987 	eptp |= (3 << EPTP_PG_WALK_LEN_SHIFT);
988 	pml4 = alloc_page();
989 	memset(pml4, 0, PAGE_SIZE);
990 	eptp |= virt_to_phys(pml4);
991 	vmcs_write(EPTP, eptp);
992 	support_2m = !!(ept_vpid.val & EPT_CAP_2M_PAGE);
993 	end_of_memory = fwcfg_get_u64(FW_CFG_RAM_SIZE);
994 	if (end_of_memory < (1ul << 32))
995 		end_of_memory = (1ul << 32);
996 	setup_ept_range(pml4, 0, end_of_memory, 0, support_2m,
997 			EPT_WA | EPT_RA | EPT_EA);
998 	return 0;
999 }
1000 
1001 static int ept_init()
1002 {
1003 	unsigned long base_addr1, base_addr2;
1004 	u32 ctrl_cpu[2];
1005 
1006 	if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY) ||
1007 	    !(ctrl_cpu_rev[1].clr & CPU_EPT)) {
1008 		printf("\tEPT is not supported");
1009 		return VMX_TEST_EXIT;
1010 	}
1011 
1012 	ctrl_cpu[0] = vmcs_read(CPU_EXEC_CTRL0);
1013 	ctrl_cpu[1] = vmcs_read(CPU_EXEC_CTRL1);
1014 	ctrl_cpu[0] = (ctrl_cpu[0] | CPU_SECONDARY)
1015 		& ctrl_cpu_rev[0].clr;
1016 	ctrl_cpu[1] = (ctrl_cpu[1] | CPU_EPT)
1017 		& ctrl_cpu_rev[1].clr;
1018 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu[0]);
1019 	vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu[1]);
1020 	if (setup_ept())
1021 		return VMX_TEST_EXIT;
1022 	data_page1 = alloc_page();
1023 	data_page2 = alloc_page();
1024 	memset(data_page1, 0x0, PAGE_SIZE);
1025 	memset(data_page2, 0x0, PAGE_SIZE);
1026 	*((u32 *)data_page1) = MAGIC_VAL_1;
1027 	*((u32 *)data_page2) = MAGIC_VAL_2;
1028 	base_addr1 = (unsigned long)data_page1 & PAGE_MASK_2M;
1029 	base_addr2 = (unsigned long)data_page2 & PAGE_MASK_2M;
1030 	setup_ept_range(pml4, base_addr1, base_addr1 + PAGE_SIZE_2M, 0, 0,
1031 			EPT_WA | EPT_RA | EPT_EA);
1032 	setup_ept_range(pml4, base_addr2, base_addr2 + PAGE_SIZE_2M, 0, 0,
1033 			EPT_WA | EPT_RA | EPT_EA);
1034 	install_ept(pml4, (unsigned long)data_page1, (unsigned long)data_page2,
1035 			EPT_RA | EPT_WA | EPT_EA);
1036 	return VMX_TEST_START;
1037 }
1038 
1039 static void ept_main()
1040 {
1041 	set_stage(0);
1042 	if (*((u32 *)data_page2) != MAGIC_VAL_1 ||
1043 			*((u32 *)data_page1) != MAGIC_VAL_1)
1044 		report("EPT basic framework - read", 0);
1045 	else {
1046 		*((u32 *)data_page2) = MAGIC_VAL_3;
1047 		vmcall();
1048 		if (get_stage() == 1) {
1049 			if (*((u32 *)data_page1) == MAGIC_VAL_3 &&
1050 					*((u32 *)data_page2) == MAGIC_VAL_2)
1051 				report("EPT basic framework", 1);
1052 			else
1053 				report("EPT basic framework - remap", 1);
1054 		}
1055 	}
1056 	// Test EPT Misconfigurations
1057 	set_stage(1);
1058 	vmcall();
1059 	*((u32 *)data_page1) = MAGIC_VAL_1;
1060 	if (get_stage() != 2) {
1061 		report("EPT misconfigurations", 0);
1062 		goto t1;
1063 	}
1064 	set_stage(2);
1065 	vmcall();
1066 	*((u32 *)data_page1) = MAGIC_VAL_1;
1067 	if (get_stage() != 3) {
1068 		report("EPT misconfigurations", 0);
1069 		goto t1;
1070 	}
1071 	report("EPT misconfigurations", 1);
1072 t1:
1073 	// Test EPT violation
1074 	set_stage(3);
1075 	vmcall();
1076 	*((u32 *)data_page1) = MAGIC_VAL_1;
1077 	if (get_stage() == 4)
1078 		report("EPT violation - page permission", 1);
1079 	else
1080 		report("EPT violation - page permission", 0);
1081 	// Violation caused by EPT paging structure
1082 	set_stage(4);
1083 	vmcall();
1084 	*((u32 *)data_page1) = MAGIC_VAL_2;
1085 	if (get_stage() == 5)
1086 		report("EPT violation - paging structure", 1);
1087 	else
1088 		report("EPT violation - paging structure", 0);
1089 }
1090 
1091 static int ept_exit_handler()
1092 {
1093 	u64 guest_rip;
1094 	ulong reason;
1095 	u32 insn_len;
1096 	u32 exit_qual;
1097 	static unsigned long data_page1_pte, data_page1_pte_pte;
1098 
1099 	guest_rip = vmcs_read(GUEST_RIP);
1100 	reason = vmcs_read(EXI_REASON) & 0xff;
1101 	insn_len = vmcs_read(EXI_INST_LEN);
1102 	exit_qual = vmcs_read(EXI_QUALIFICATION);
1103 	switch (reason) {
1104 	case VMX_VMCALL:
1105 		switch (get_stage()) {
1106 		case 0:
1107 			if (*((u32 *)data_page1) == MAGIC_VAL_3 &&
1108 					*((u32 *)data_page2) == MAGIC_VAL_2) {
1109 				set_stage(get_stage() + 1);
1110 				install_ept(pml4, (unsigned long)data_page2,
1111 						(unsigned long)data_page2,
1112 						EPT_RA | EPT_WA | EPT_EA);
1113 			} else
1114 				report("EPT basic framework - write\n", 0);
1115 			break;
1116 		case 1:
1117 			install_ept(pml4, (unsigned long)data_page1,
1118  				(unsigned long)data_page1, EPT_WA);
1119 			ept_sync(INVEPT_SINGLE, eptp);
1120 			break;
1121 		case 2:
1122 			install_ept(pml4, (unsigned long)data_page1,
1123  				(unsigned long)data_page1,
1124  				EPT_RA | EPT_WA | EPT_EA |
1125  				(2 << EPT_MEM_TYPE_SHIFT));
1126 			ept_sync(INVEPT_SINGLE, eptp);
1127 			break;
1128 		case 3:
1129 			data_page1_pte = get_ept_pte(pml4,
1130 				(unsigned long)data_page1, 1);
1131 			set_ept_pte(pml4, (unsigned long)data_page1,
1132 				1, data_page1_pte & (~EPT_PRESENT));
1133 			ept_sync(INVEPT_SINGLE, eptp);
1134 			break;
1135 		case 4:
1136 			data_page1_pte = get_ept_pte(pml4,
1137 				(unsigned long)data_page1, 2);
1138 			data_page1_pte &= PAGE_MASK;
1139 			data_page1_pte_pte = get_ept_pte(pml4, data_page1_pte, 2);
1140 			set_ept_pte(pml4, data_page1_pte, 2,
1141 				data_page1_pte_pte & (~EPT_PRESENT));
1142 			ept_sync(INVEPT_SINGLE, eptp);
1143 			break;
1144 		// Should not reach here
1145 		default:
1146 			printf("ERROR - unexpected stage, %d.\n", get_stage());
1147 			print_vmexit_info();
1148 			return VMX_TEST_VMEXIT;
1149 		}
1150 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
1151 		return VMX_TEST_RESUME;
1152 	case VMX_EPT_MISCONFIG:
1153 		switch (get_stage()) {
1154 		case 1:
1155 		case 2:
1156 			set_stage(get_stage() + 1);
1157 			install_ept(pml4, (unsigned long)data_page1,
1158  				(unsigned long)data_page1,
1159  				EPT_RA | EPT_WA | EPT_EA);
1160 			ept_sync(INVEPT_SINGLE, eptp);
1161 			break;
1162 		// Should not reach here
1163 		default:
1164 			printf("ERROR - unexpected stage, %d.\n", get_stage());
1165 			print_vmexit_info();
1166 			return VMX_TEST_VMEXIT;
1167 		}
1168 		return VMX_TEST_RESUME;
1169 	case VMX_EPT_VIOLATION:
1170 		switch(get_stage()) {
1171 		case 3:
1172 			if (exit_qual == (EPT_VLT_WR | EPT_VLT_LADDR_VLD |
1173 					EPT_VLT_PADDR))
1174 				set_stage(get_stage() + 1);
1175 			set_ept_pte(pml4, (unsigned long)data_page1,
1176 				1, data_page1_pte | (EPT_PRESENT));
1177 			ept_sync(INVEPT_SINGLE, eptp);
1178 			break;
1179 		case 4:
1180 			if (exit_qual == (EPT_VLT_RD | EPT_VLT_LADDR_VLD))
1181 				set_stage(get_stage() + 1);
1182 			set_ept_pte(pml4, data_page1_pte, 2,
1183 				data_page1_pte_pte | (EPT_PRESENT));
1184 			ept_sync(INVEPT_SINGLE, eptp);
1185 			break;
1186 		default:
1187 			// Should not reach here
1188 			printf("ERROR : unexpected stage, %d\n", get_stage());
1189 			print_vmexit_info();
1190 			return VMX_TEST_VMEXIT;
1191 		}
1192 		return VMX_TEST_RESUME;
1193 	default:
1194 		printf("Unknown exit reason, %d\n", reason);
1195 		print_vmexit_info();
1196 	}
1197 	return VMX_TEST_VMEXIT;
1198 }
1199 
1200 #define TIMER_VECTOR	222
1201 
1202 static volatile bool timer_fired;
1203 
1204 static void timer_isr(isr_regs_t *regs)
1205 {
1206 	timer_fired = true;
1207 	apic_write(APIC_EOI, 0);
1208 }
1209 
1210 static int interrupt_init(struct vmcs *vmcs)
1211 {
1212 	msr_bmp_init();
1213 	vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) & ~PIN_EXTINT);
1214 	handle_irq(TIMER_VECTOR, timer_isr);
1215 	return VMX_TEST_START;
1216 }
1217 
1218 static void interrupt_main(void)
1219 {
1220 	long long start, loops;
1221 
1222 	set_stage(0);
1223 
1224 	apic_write(APIC_LVTT, TIMER_VECTOR);
1225 	irq_enable();
1226 
1227 	apic_write(APIC_TMICT, 1);
1228 	for (loops = 0; loops < 10000000 && !timer_fired; loops++)
1229 		asm volatile ("nop");
1230 	report("direct interrupt while running guest", timer_fired);
1231 
1232 	apic_write(APIC_TMICT, 0);
1233 	irq_disable();
1234 	vmcall();
1235 	timer_fired = false;
1236 	apic_write(APIC_TMICT, 1);
1237 	for (loops = 0; loops < 10000000 && !timer_fired; loops++)
1238 		asm volatile ("nop");
1239 	report("intercepted interrupt while running guest", timer_fired);
1240 
1241 	irq_enable();
1242 	apic_write(APIC_TMICT, 0);
1243 	irq_disable();
1244 	vmcall();
1245 	timer_fired = false;
1246 	start = rdtsc();
1247 	apic_write(APIC_TMICT, 1000000);
1248 
1249 	asm volatile ("sti; hlt");
1250 
1251 	report("direct interrupt + hlt",
1252 	       rdtsc() - start > 1000000 && timer_fired);
1253 
1254 	apic_write(APIC_TMICT, 0);
1255 	irq_disable();
1256 	vmcall();
1257 	timer_fired = false;
1258 	start = rdtsc();
1259 	apic_write(APIC_TMICT, 1000000);
1260 
1261 	asm volatile ("sti; hlt");
1262 
1263 	report("intercepted interrupt + hlt",
1264 	       rdtsc() - start > 10000 && timer_fired);
1265 
1266 	apic_write(APIC_TMICT, 0);
1267 	irq_disable();
1268 	vmcall();
1269 	timer_fired = false;
1270 	start = rdtsc();
1271 	apic_write(APIC_TMICT, 1000000);
1272 
1273 	irq_enable();
1274 	asm volatile ("nop");
1275 	vmcall();
1276 
1277 	report("direct interrupt + activity state hlt",
1278 	       rdtsc() - start > 10000 && timer_fired);
1279 
1280 	apic_write(APIC_TMICT, 0);
1281 	irq_disable();
1282 	vmcall();
1283 	timer_fired = false;
1284 	start = rdtsc();
1285 	apic_write(APIC_TMICT, 1000000);
1286 
1287 	irq_enable();
1288 	asm volatile ("nop");
1289 	vmcall();
1290 
1291 	report("intercepted interrupt + activity state hlt",
1292 	       rdtsc() - start > 10000 && timer_fired);
1293 }
1294 
1295 static int interrupt_exit_handler(void)
1296 {
1297 	u64 guest_rip = vmcs_read(GUEST_RIP);
1298 	ulong reason = vmcs_read(EXI_REASON) & 0xff;
1299 	u32 insn_len = vmcs_read(EXI_INST_LEN);
1300 
1301 	switch (reason) {
1302 	case VMX_VMCALL:
1303 		switch (get_stage()) {
1304 		case 0:
1305 		case 2:
1306 		case 5:
1307 			vmcs_write(PIN_CONTROLS,
1308 				   vmcs_read(PIN_CONTROLS) | PIN_EXTINT);
1309 			break;
1310 		case 1:
1311 		case 3:
1312 			vmcs_write(PIN_CONTROLS,
1313 				   vmcs_read(PIN_CONTROLS) & ~PIN_EXTINT);
1314 			break;
1315 		case 4:
1316 		case 6:
1317 			vmcs_write(GUEST_ACTV_STATE, ACTV_HLT);
1318 			break;
1319 		}
1320 		set_stage(get_stage() + 1);
1321 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
1322 		return VMX_TEST_RESUME;
1323 	case VMX_EXTINT:
1324 		irq_enable();
1325 		asm volatile ("nop");
1326 		irq_disable();
1327 		if (get_stage() >= 2) {
1328 			vmcs_write(GUEST_ACTV_STATE, ACTV_ACTIVE);
1329 			vmcs_write(GUEST_RIP, guest_rip + insn_len);
1330 		}
1331 		return VMX_TEST_RESUME;
1332 	default:
1333 		printf("Unknown exit reason, %d\n", reason);
1334 		print_vmexit_info();
1335 	}
1336 
1337 	return VMX_TEST_VMEXIT;
1338 }
1339 
1340 /* name/init/guest_main/exit_handler/syscall_handler/guest_regs */
1341 struct vmx_test vmx_tests[] = {
1342 	{ "null", NULL, basic_guest_main, basic_exit_handler, NULL, {0} },
1343 	{ "vmenter", NULL, vmenter_main, vmenter_exit_handler, NULL, {0} },
1344 	{ "preemption timer", preemption_timer_init, preemption_timer_main,
1345 		preemption_timer_exit_handler, NULL, {0} },
1346 	{ "control field PAT", test_ctrl_pat_init, test_ctrl_pat_main,
1347 		test_ctrl_pat_exit_handler, NULL, {0} },
1348 	{ "control field EFER", test_ctrl_efer_init, test_ctrl_efer_main,
1349 		test_ctrl_efer_exit_handler, NULL, {0} },
1350 	{ "CR shadowing", NULL, cr_shadowing_main,
1351 		cr_shadowing_exit_handler, NULL, {0} },
1352 	{ "I/O bitmap", iobmp_init, iobmp_main, iobmp_exit_handler,
1353 		NULL, {0} },
1354 	{ "instruction intercept", insn_intercept_init, insn_intercept_main,
1355 		insn_intercept_exit_handler, NULL, {0} },
1356 	{ "EPT framework", ept_init, ept_main, ept_exit_handler, NULL, {0} },
1357 	{ "interrupt", interrupt_init, interrupt_main,
1358 		interrupt_exit_handler, NULL, {0} },
1359 	{ NULL, NULL, NULL, NULL, NULL, {0} },
1360 };
1361