xref: /kvm-unit-tests/x86/vmx_tests.c (revision b6e5015f73fc2c5c200db70303551a5f8a74cb41)
1 /*
2  * All test cases of nested virtualization should be in this file
3  *
4  * Author : Arthur Chunqi Li <yzt356@gmail.com>
5  */
6 #include "vmx.h"
7 #include "msr.h"
8 #include "processor.h"
9 #include "vm.h"
10 #include "io.h"
11 #include "fwcfg.h"
12 #include "isr.h"
13 #include "apic.h"
14 
15 u64 ia32_pat;
16 u64 ia32_efer;
17 void *io_bitmap_a, *io_bitmap_b;
18 u16 ioport;
19 
20 unsigned long *pml4;
21 u64 eptp;
22 void *data_page1, *data_page2;
23 
24 static inline void vmcall()
25 {
26 	asm volatile("vmcall");
27 }
28 
29 void basic_guest_main()
30 {
31 }
32 
33 int basic_exit_handler()
34 {
35 	report("Basic VMX test", 0);
36 	print_vmexit_info();
37 	return VMX_TEST_EXIT;
38 }
39 
40 void vmenter_main()
41 {
42 	u64 rax;
43 	u64 rsp, resume_rsp;
44 
45 	report("test vmlaunch", 1);
46 
47 	asm volatile(
48 		"mov %%rsp, %0\n\t"
49 		"mov %3, %%rax\n\t"
50 		"vmcall\n\t"
51 		"mov %%rax, %1\n\t"
52 		"mov %%rsp, %2\n\t"
53 		: "=r"(rsp), "=r"(rax), "=r"(resume_rsp)
54 		: "g"(0xABCD));
55 	report("test vmresume", (rax == 0xFFFF) && (rsp == resume_rsp));
56 }
57 
58 int vmenter_exit_handler()
59 {
60 	u64 guest_rip;
61 	ulong reason;
62 
63 	guest_rip = vmcs_read(GUEST_RIP);
64 	reason = vmcs_read(EXI_REASON) & 0xff;
65 	switch (reason) {
66 	case VMX_VMCALL:
67 		if (regs.rax != 0xABCD) {
68 			report("test vmresume", 0);
69 			return VMX_TEST_VMEXIT;
70 		}
71 		regs.rax = 0xFFFF;
72 		vmcs_write(GUEST_RIP, guest_rip + 3);
73 		return VMX_TEST_RESUME;
74 	default:
75 		report("test vmresume", 0);
76 		print_vmexit_info();
77 	}
78 	return VMX_TEST_VMEXIT;
79 }
80 
81 u32 preempt_scale;
82 volatile unsigned long long tsc_val;
83 volatile u32 preempt_val;
84 u64 saved_rip;
85 
86 int preemption_timer_init()
87 {
88 	if (!(ctrl_pin_rev.clr & PIN_PREEMPT)) {
89 		printf("\tPreemption timer is not supported\n");
90 		return VMX_TEST_EXIT;
91 	}
92 	vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) | PIN_PREEMPT);
93 	preempt_val = 10000000;
94 	vmcs_write(PREEMPT_TIMER_VALUE, preempt_val);
95 	preempt_scale = rdmsr(MSR_IA32_VMX_MISC) & 0x1F;
96 
97 	if (!(ctrl_exit_rev.clr & EXI_SAVE_PREEMPT))
98 		printf("\tSave preemption value is not supported\n");
99 
100 	return VMX_TEST_START;
101 }
102 
103 void preemption_timer_main()
104 {
105 	tsc_val = rdtsc();
106 	if (ctrl_exit_rev.clr & EXI_SAVE_PREEMPT) {
107 		vmx_set_test_stage(0);
108 		vmcall();
109 		if (vmx_get_test_stage() == 1)
110 			vmcall();
111 	}
112 	vmx_set_test_stage(1);
113 	while (vmx_get_test_stage() == 1) {
114 		if (((rdtsc() - tsc_val) >> preempt_scale)
115 				> 10 * preempt_val) {
116 			vmx_set_test_stage(2);
117 			vmcall();
118 		}
119 	}
120 	tsc_val = rdtsc();
121 	asm volatile ("hlt");
122 	vmcall();
123 	vmx_set_test_stage(5);
124 	vmcall();
125 }
126 
127 int preemption_timer_exit_handler()
128 {
129 	bool guest_halted;
130 	u64 guest_rip;
131 	ulong reason;
132 	u32 insn_len;
133 	u32 ctrl_exit;
134 
135 	guest_rip = vmcs_read(GUEST_RIP);
136 	reason = vmcs_read(EXI_REASON) & 0xff;
137 	insn_len = vmcs_read(EXI_INST_LEN);
138 	switch (reason) {
139 	case VMX_PREEMPT:
140 		switch (vmx_get_test_stage()) {
141 		case 1:
142 		case 2:
143 			report("busy-wait for preemption timer",
144 			       ((rdtsc() - tsc_val) >> preempt_scale) >=
145 			       preempt_val);
146 			vmx_set_test_stage(3);
147 			vmcs_write(PREEMPT_TIMER_VALUE, preempt_val);
148 			return VMX_TEST_RESUME;
149 		case 3:
150 			guest_halted =
151 				(vmcs_read(GUEST_ACTV_STATE) == ACTV_HLT);
152 			report("preemption timer during hlt",
153 			       ((rdtsc() - tsc_val) >> preempt_scale) >=
154 			       preempt_val && guest_halted);
155 			vmx_set_test_stage(4);
156 			vmcs_write(PIN_CONTROLS,
157 				   vmcs_read(PIN_CONTROLS) & ~PIN_PREEMPT);
158 			vmcs_write(GUEST_ACTV_STATE, ACTV_ACTIVE);
159 			return VMX_TEST_RESUME;
160 		case 4:
161 			report("preemption timer with 0 value",
162 			       saved_rip == guest_rip);
163 			break;
164 		default:
165 			printf("Invalid stage.\n");
166 			print_vmexit_info();
167 			break;
168 		}
169 		break;
170 	case VMX_VMCALL:
171 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
172 		switch (vmx_get_test_stage()) {
173 		case 0:
174 			report("Keep preemption value",
175 			       vmcs_read(PREEMPT_TIMER_VALUE) == preempt_val);
176 			vmx_set_test_stage(1);
177 			vmcs_write(PREEMPT_TIMER_VALUE, preempt_val);
178 			ctrl_exit = (vmcs_read(EXI_CONTROLS) |
179 				EXI_SAVE_PREEMPT) & ctrl_exit_rev.clr;
180 			vmcs_write(EXI_CONTROLS, ctrl_exit);
181 			return VMX_TEST_RESUME;
182 		case 1:
183 			report("Save preemption value",
184 			       vmcs_read(PREEMPT_TIMER_VALUE) < preempt_val);
185 			return VMX_TEST_RESUME;
186 		case 2:
187 			report("busy-wait for preemption timer", 0);
188 			vmx_set_test_stage(3);
189 			vmcs_write(PREEMPT_TIMER_VALUE, preempt_val);
190 			return VMX_TEST_RESUME;
191 		case 3:
192 			report("preemption timer during hlt", 0);
193 			vmx_set_test_stage(4);
194 			/* fall through */
195 		case 4:
196 			vmcs_write(PIN_CONTROLS,
197 				   vmcs_read(PIN_CONTROLS) | PIN_PREEMPT);
198 			vmcs_write(PREEMPT_TIMER_VALUE, 0);
199 			saved_rip = guest_rip + insn_len;
200 			return VMX_TEST_RESUME;
201 		case 5:
202 			report("preemption timer with 0 value (vmcall stage 5)", 0);
203 			break;
204 		default:
205 			// Should not reach here
206 			printf("ERROR : unexpected stage, %d\n",
207 			       vmx_get_test_stage());
208 			print_vmexit_info();
209 			return VMX_TEST_VMEXIT;
210 		}
211 		break;
212 	default:
213 		printf("Unknown exit reason, %d\n", reason);
214 		print_vmexit_info();
215 	}
216 	vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) & ~PIN_PREEMPT);
217 	return VMX_TEST_VMEXIT;
218 }
219 
220 void msr_bmp_init()
221 {
222 	void *msr_bitmap;
223 	u32 ctrl_cpu0;
224 
225 	msr_bitmap = alloc_page();
226 	memset(msr_bitmap, 0x0, PAGE_SIZE);
227 	ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
228 	ctrl_cpu0 |= CPU_MSR_BITMAP;
229 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
230 	vmcs_write(MSR_BITMAP, (u64)msr_bitmap);
231 }
232 
233 static int test_ctrl_pat_init()
234 {
235 	u64 ctrl_ent;
236 	u64 ctrl_exi;
237 
238 	msr_bmp_init();
239 	if (!(ctrl_exit_rev.clr & EXI_SAVE_PAT) &&
240 	    !(ctrl_exit_rev.clr & EXI_LOAD_PAT) &&
241 	    !(ctrl_enter_rev.clr & ENT_LOAD_PAT)) {
242 		printf("\tSave/load PAT is not supported\n");
243 		return 1;
244 	}
245 
246 	ctrl_ent = vmcs_read(ENT_CONTROLS);
247 	ctrl_exi = vmcs_read(EXI_CONTROLS);
248 	ctrl_ent |= ctrl_enter_rev.clr & ENT_LOAD_PAT;
249 	ctrl_exi |= ctrl_exit_rev.clr & (EXI_SAVE_PAT | EXI_LOAD_PAT);
250 	vmcs_write(ENT_CONTROLS, ctrl_ent);
251 	vmcs_write(EXI_CONTROLS, ctrl_exi);
252 	ia32_pat = rdmsr(MSR_IA32_CR_PAT);
253 	vmcs_write(GUEST_PAT, 0x0);
254 	vmcs_write(HOST_PAT, ia32_pat);
255 	return VMX_TEST_START;
256 }
257 
258 static void test_ctrl_pat_main()
259 {
260 	u64 guest_ia32_pat;
261 
262 	guest_ia32_pat = rdmsr(MSR_IA32_CR_PAT);
263 	if (!(ctrl_enter_rev.clr & ENT_LOAD_PAT))
264 		printf("\tENT_LOAD_PAT is not supported.\n");
265 	else {
266 		if (guest_ia32_pat != 0) {
267 			report("Entry load PAT", 0);
268 			return;
269 		}
270 	}
271 	wrmsr(MSR_IA32_CR_PAT, 0x6);
272 	vmcall();
273 	guest_ia32_pat = rdmsr(MSR_IA32_CR_PAT);
274 	if (ctrl_enter_rev.clr & ENT_LOAD_PAT)
275 		report("Entry load PAT", guest_ia32_pat == ia32_pat);
276 }
277 
278 static int test_ctrl_pat_exit_handler()
279 {
280 	u64 guest_rip;
281 	ulong reason;
282 	u64 guest_pat;
283 
284 	guest_rip = vmcs_read(GUEST_RIP);
285 	reason = vmcs_read(EXI_REASON) & 0xff;
286 	switch (reason) {
287 	case VMX_VMCALL:
288 		guest_pat = vmcs_read(GUEST_PAT);
289 		if (!(ctrl_exit_rev.clr & EXI_SAVE_PAT)) {
290 			printf("\tEXI_SAVE_PAT is not supported\n");
291 			vmcs_write(GUEST_PAT, 0x6);
292 		} else {
293 			report("Exit save PAT", guest_pat == 0x6);
294 		}
295 		if (!(ctrl_exit_rev.clr & EXI_LOAD_PAT))
296 			printf("\tEXI_LOAD_PAT is not supported\n");
297 		else
298 			report("Exit load PAT", rdmsr(MSR_IA32_CR_PAT) == ia32_pat);
299 		vmcs_write(GUEST_PAT, ia32_pat);
300 		vmcs_write(GUEST_RIP, guest_rip + 3);
301 		return VMX_TEST_RESUME;
302 	default:
303 		printf("ERROR : Undefined exit reason, reason = %d.\n", reason);
304 		break;
305 	}
306 	return VMX_TEST_VMEXIT;
307 }
308 
309 static int test_ctrl_efer_init()
310 {
311 	u64 ctrl_ent;
312 	u64 ctrl_exi;
313 
314 	msr_bmp_init();
315 	ctrl_ent = vmcs_read(ENT_CONTROLS) | ENT_LOAD_EFER;
316 	ctrl_exi = vmcs_read(EXI_CONTROLS) | EXI_SAVE_EFER | EXI_LOAD_EFER;
317 	vmcs_write(ENT_CONTROLS, ctrl_ent & ctrl_enter_rev.clr);
318 	vmcs_write(EXI_CONTROLS, ctrl_exi & ctrl_exit_rev.clr);
319 	ia32_efer = rdmsr(MSR_EFER);
320 	vmcs_write(GUEST_EFER, ia32_efer ^ EFER_NX);
321 	vmcs_write(HOST_EFER, ia32_efer ^ EFER_NX);
322 	return VMX_TEST_START;
323 }
324 
325 static void test_ctrl_efer_main()
326 {
327 	u64 guest_ia32_efer;
328 
329 	guest_ia32_efer = rdmsr(MSR_EFER);
330 	if (!(ctrl_enter_rev.clr & ENT_LOAD_EFER))
331 		printf("\tENT_LOAD_EFER is not supported.\n");
332 	else {
333 		if (guest_ia32_efer != (ia32_efer ^ EFER_NX)) {
334 			report("Entry load EFER", 0);
335 			return;
336 		}
337 	}
338 	wrmsr(MSR_EFER, ia32_efer);
339 	vmcall();
340 	guest_ia32_efer = rdmsr(MSR_EFER);
341 	if (ctrl_enter_rev.clr & ENT_LOAD_EFER)
342 		report("Entry load EFER", guest_ia32_efer == ia32_efer);
343 }
344 
345 static int test_ctrl_efer_exit_handler()
346 {
347 	u64 guest_rip;
348 	ulong reason;
349 	u64 guest_efer;
350 
351 	guest_rip = vmcs_read(GUEST_RIP);
352 	reason = vmcs_read(EXI_REASON) & 0xff;
353 	switch (reason) {
354 	case VMX_VMCALL:
355 		guest_efer = vmcs_read(GUEST_EFER);
356 		if (!(ctrl_exit_rev.clr & EXI_SAVE_EFER)) {
357 			printf("\tEXI_SAVE_EFER is not supported\n");
358 			vmcs_write(GUEST_EFER, ia32_efer);
359 		} else {
360 			report("Exit save EFER", guest_efer == ia32_efer);
361 		}
362 		if (!(ctrl_exit_rev.clr & EXI_LOAD_EFER)) {
363 			printf("\tEXI_LOAD_EFER is not supported\n");
364 			wrmsr(MSR_EFER, ia32_efer ^ EFER_NX);
365 		} else {
366 			report("Exit load EFER", rdmsr(MSR_EFER) == (ia32_efer ^ EFER_NX));
367 		}
368 		vmcs_write(GUEST_PAT, ia32_efer);
369 		vmcs_write(GUEST_RIP, guest_rip + 3);
370 		return VMX_TEST_RESUME;
371 	default:
372 		printf("ERROR : Undefined exit reason, reason = %d.\n", reason);
373 		break;
374 	}
375 	return VMX_TEST_VMEXIT;
376 }
377 
378 u32 guest_cr0, guest_cr4;
379 
380 static void cr_shadowing_main()
381 {
382 	u32 cr0, cr4, tmp;
383 
384 	// Test read through
385 	vmx_set_test_stage(0);
386 	guest_cr0 = read_cr0();
387 	if (vmx_get_test_stage() == 1)
388 		report("Read through CR0", 0);
389 	else
390 		vmcall();
391 	vmx_set_test_stage(1);
392 	guest_cr4 = read_cr4();
393 	if (vmx_get_test_stage() == 2)
394 		report("Read through CR4", 0);
395 	else
396 		vmcall();
397 	// Test write through
398 	guest_cr0 = guest_cr0 ^ (X86_CR0_TS | X86_CR0_MP);
399 	guest_cr4 = guest_cr4 ^ (X86_CR4_TSD | X86_CR4_DE);
400 	vmx_set_test_stage(2);
401 	write_cr0(guest_cr0);
402 	if (vmx_get_test_stage() == 3)
403 		report("Write throuth CR0", 0);
404 	else
405 		vmcall();
406 	vmx_set_test_stage(3);
407 	write_cr4(guest_cr4);
408 	if (vmx_get_test_stage() == 4)
409 		report("Write through CR4", 0);
410 	else
411 		vmcall();
412 	// Test read shadow
413 	vmx_set_test_stage(4);
414 	vmcall();
415 	cr0 = read_cr0();
416 	if (vmx_get_test_stage() != 5)
417 		report("Read shadowing CR0", cr0 == guest_cr0);
418 	vmx_set_test_stage(5);
419 	cr4 = read_cr4();
420 	if (vmx_get_test_stage() != 6)
421 		report("Read shadowing CR4", cr4 == guest_cr4);
422 	// Test write shadow (same value with shadow)
423 	vmx_set_test_stage(6);
424 	write_cr0(guest_cr0);
425 	if (vmx_get_test_stage() == 7)
426 		report("Write shadowing CR0 (same value with shadow)", 0);
427 	else
428 		vmcall();
429 	vmx_set_test_stage(7);
430 	write_cr4(guest_cr4);
431 	if (vmx_get_test_stage() == 8)
432 		report("Write shadowing CR4 (same value with shadow)", 0);
433 	else
434 		vmcall();
435 	// Test write shadow (different value)
436 	vmx_set_test_stage(8);
437 	tmp = guest_cr0 ^ X86_CR0_TS;
438 	asm volatile("mov %0, %%rsi\n\t"
439 		"mov %%rsi, %%cr0\n\t"
440 		::"m"(tmp)
441 		:"rsi", "memory", "cc");
442 	report("Write shadowing different X86_CR0_TS", vmx_get_test_stage() == 9);
443 	vmx_set_test_stage(9);
444 	tmp = guest_cr0 ^ X86_CR0_MP;
445 	asm volatile("mov %0, %%rsi\n\t"
446 		"mov %%rsi, %%cr0\n\t"
447 		::"m"(tmp)
448 		:"rsi", "memory", "cc");
449 	report("Write shadowing different X86_CR0_MP", vmx_get_test_stage() == 10);
450 	vmx_set_test_stage(10);
451 	tmp = guest_cr4 ^ X86_CR4_TSD;
452 	asm volatile("mov %0, %%rsi\n\t"
453 		"mov %%rsi, %%cr4\n\t"
454 		::"m"(tmp)
455 		:"rsi", "memory", "cc");
456 	report("Write shadowing different X86_CR4_TSD", vmx_get_test_stage() == 11);
457 	vmx_set_test_stage(11);
458 	tmp = guest_cr4 ^ X86_CR4_DE;
459 	asm volatile("mov %0, %%rsi\n\t"
460 		"mov %%rsi, %%cr4\n\t"
461 		::"m"(tmp)
462 		:"rsi", "memory", "cc");
463 	report("Write shadowing different X86_CR4_DE", vmx_get_test_stage() == 12);
464 }
465 
466 static int cr_shadowing_exit_handler()
467 {
468 	u64 guest_rip;
469 	ulong reason;
470 	u32 insn_len;
471 	u32 exit_qual;
472 
473 	guest_rip = vmcs_read(GUEST_RIP);
474 	reason = vmcs_read(EXI_REASON) & 0xff;
475 	insn_len = vmcs_read(EXI_INST_LEN);
476 	exit_qual = vmcs_read(EXI_QUALIFICATION);
477 	switch (reason) {
478 	case VMX_VMCALL:
479 		switch (vmx_get_test_stage()) {
480 		case 0:
481 			report("Read through CR0", guest_cr0 == vmcs_read(GUEST_CR0));
482 			break;
483 		case 1:
484 			report("Read through CR4", guest_cr4 == vmcs_read(GUEST_CR4));
485 			break;
486 		case 2:
487 			report("Write through CR0", guest_cr0 == vmcs_read(GUEST_CR0));
488 			break;
489 		case 3:
490 			report("Write through CR4", guest_cr4 == vmcs_read(GUEST_CR4));
491 			break;
492 		case 4:
493 			guest_cr0 = vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP);
494 			guest_cr4 = vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE);
495 			vmcs_write(CR0_MASK, X86_CR0_TS | X86_CR0_MP);
496 			vmcs_write(CR0_READ_SHADOW, guest_cr0 & (X86_CR0_TS | X86_CR0_MP));
497 			vmcs_write(CR4_MASK, X86_CR4_TSD | X86_CR4_DE);
498 			vmcs_write(CR4_READ_SHADOW, guest_cr4 & (X86_CR4_TSD | X86_CR4_DE));
499 			break;
500 		case 6:
501 			report("Write shadowing CR0 (same value)",
502 					guest_cr0 == (vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP)));
503 			break;
504 		case 7:
505 			report("Write shadowing CR4 (same value)",
506 					guest_cr4 == (vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE)));
507 			break;
508 		default:
509 			// Should not reach here
510 			printf("ERROR : unexpected stage, %d\n",
511 			       vmx_get_test_stage());
512 			print_vmexit_info();
513 			return VMX_TEST_VMEXIT;
514 		}
515 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
516 		return VMX_TEST_RESUME;
517 	case VMX_CR:
518 		switch (vmx_get_test_stage()) {
519 		case 4:
520 			report("Read shadowing CR0", 0);
521 			vmx_inc_test_stage();
522 			break;
523 		case 5:
524 			report("Read shadowing CR4", 0);
525 			vmx_inc_test_stage();
526 			break;
527 		case 6:
528 			report("Write shadowing CR0 (same value)", 0);
529 			vmx_inc_test_stage();
530 			break;
531 		case 7:
532 			report("Write shadowing CR4 (same value)", 0);
533 			vmx_inc_test_stage();
534 			break;
535 		case 8:
536 		case 9:
537 			// 0x600 encodes "mov %esi, %cr0"
538 			if (exit_qual == 0x600)
539 				vmx_inc_test_stage();
540 			break;
541 		case 10:
542 		case 11:
543 			// 0x604 encodes "mov %esi, %cr4"
544 			if (exit_qual == 0x604)
545 				vmx_inc_test_stage();
546 			break;
547 		default:
548 			// Should not reach here
549 			printf("ERROR : unexpected stage, %d\n",
550 			       vmx_get_test_stage());
551 			print_vmexit_info();
552 			return VMX_TEST_VMEXIT;
553 		}
554 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
555 		return VMX_TEST_RESUME;
556 	default:
557 		printf("Unknown exit reason, %d\n", reason);
558 		print_vmexit_info();
559 	}
560 	return VMX_TEST_VMEXIT;
561 }
562 
563 static int iobmp_init()
564 {
565 	u32 ctrl_cpu0;
566 
567 	io_bitmap_a = alloc_page();
568 	io_bitmap_a = alloc_page();
569 	memset(io_bitmap_a, 0x0, PAGE_SIZE);
570 	memset(io_bitmap_b, 0x0, PAGE_SIZE);
571 	ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
572 	ctrl_cpu0 |= CPU_IO_BITMAP;
573 	ctrl_cpu0 &= (~CPU_IO);
574 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
575 	vmcs_write(IO_BITMAP_A, (u64)io_bitmap_a);
576 	vmcs_write(IO_BITMAP_B, (u64)io_bitmap_b);
577 	return VMX_TEST_START;
578 }
579 
580 static void iobmp_main()
581 {
582 	// stage 0, test IO pass
583 	vmx_set_test_stage(0);
584 	inb(0x5000);
585 	outb(0x0, 0x5000);
586 	report("I/O bitmap - I/O pass", vmx_get_test_stage() == 0);
587 	// test IO width, in/out
588 	((u8 *)io_bitmap_a)[0] = 0xFF;
589 	vmx_set_test_stage(2);
590 	inb(0x0);
591 	report("I/O bitmap - trap in", vmx_get_test_stage() == 3);
592 	vmx_set_test_stage(3);
593 	outw(0x0, 0x0);
594 	report("I/O bitmap - trap out", vmx_get_test_stage() == 4);
595 	vmx_set_test_stage(4);
596 	inl(0x0);
597 	report("I/O bitmap - I/O width, long", vmx_get_test_stage() == 5);
598 	// test low/high IO port
599 	vmx_set_test_stage(5);
600 	((u8 *)io_bitmap_a)[0x5000 / 8] = (1 << (0x5000 % 8));
601 	inb(0x5000);
602 	report("I/O bitmap - I/O port, low part", vmx_get_test_stage() == 6);
603 	vmx_set_test_stage(6);
604 	((u8 *)io_bitmap_b)[0x1000 / 8] = (1 << (0x1000 % 8));
605 	inb(0x9000);
606 	report("I/O bitmap - I/O port, high part", vmx_get_test_stage() == 7);
607 	// test partial pass
608 	vmx_set_test_stage(7);
609 	inl(0x4FFF);
610 	report("I/O bitmap - partial pass", vmx_get_test_stage() == 8);
611 	// test overrun
612 	vmx_set_test_stage(8);
613 	memset(io_bitmap_a, 0x0, PAGE_SIZE);
614 	memset(io_bitmap_b, 0x0, PAGE_SIZE);
615 	inl(0xFFFF);
616 	report("I/O bitmap - overrun", vmx_get_test_stage() == 9);
617 	vmx_set_test_stage(9);
618 	vmcall();
619 	outb(0x0, 0x0);
620 	report("I/O bitmap - ignore unconditional exiting",
621 	       vmx_get_test_stage() == 9);
622 	vmx_set_test_stage(10);
623 	vmcall();
624 	outb(0x0, 0x0);
625 	report("I/O bitmap - unconditional exiting",
626 	       vmx_get_test_stage() == 11);
627 }
628 
629 static int iobmp_exit_handler()
630 {
631 	u64 guest_rip;
632 	ulong reason, exit_qual;
633 	u32 insn_len, ctrl_cpu0;
634 
635 	guest_rip = vmcs_read(GUEST_RIP);
636 	reason = vmcs_read(EXI_REASON) & 0xff;
637 	exit_qual = vmcs_read(EXI_QUALIFICATION);
638 	insn_len = vmcs_read(EXI_INST_LEN);
639 	switch (reason) {
640 	case VMX_IO:
641 		switch (vmx_get_test_stage()) {
642 		case 0:
643 		case 1:
644 			vmx_inc_test_stage();
645 			break;
646 		case 2:
647 			report("I/O bitmap - I/O width, byte",
648 					(exit_qual & VMX_IO_SIZE_MASK) == _VMX_IO_BYTE);
649 			report("I/O bitmap - I/O direction, in", exit_qual & VMX_IO_IN);
650 			vmx_inc_test_stage();
651 			break;
652 		case 3:
653 			report("I/O bitmap - I/O width, word",
654 					(exit_qual & VMX_IO_SIZE_MASK) == _VMX_IO_WORD);
655 			report("I/O bitmap - I/O direction, out",
656 					!(exit_qual & VMX_IO_IN));
657 			vmx_inc_test_stage();
658 			break;
659 		case 4:
660 			report("I/O bitmap - I/O width, long",
661 					(exit_qual & VMX_IO_SIZE_MASK) == _VMX_IO_LONG);
662 			vmx_inc_test_stage();
663 			break;
664 		case 5:
665 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x5000)
666 				vmx_inc_test_stage();
667 			break;
668 		case 6:
669 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x9000)
670 				vmx_inc_test_stage();
671 			break;
672 		case 7:
673 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x4FFF)
674 				vmx_inc_test_stage();
675 			break;
676 		case 8:
677 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0xFFFF)
678 				vmx_inc_test_stage();
679 			break;
680 		case 9:
681 		case 10:
682 			ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
683 			vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0 & ~CPU_IO);
684 			vmx_inc_test_stage();
685 			break;
686 		default:
687 			// Should not reach here
688 			printf("ERROR : unexpected stage, %d\n",
689 			       vmx_get_test_stage());
690 			print_vmexit_info();
691 			return VMX_TEST_VMEXIT;
692 		}
693 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
694 		return VMX_TEST_RESUME;
695 	case VMX_VMCALL:
696 		switch (vmx_get_test_stage()) {
697 		case 9:
698 			ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
699 			ctrl_cpu0 |= CPU_IO | CPU_IO_BITMAP;
700 			vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
701 			break;
702 		case 10:
703 			ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
704 			ctrl_cpu0 = (ctrl_cpu0 & ~CPU_IO_BITMAP) | CPU_IO;
705 			vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
706 			break;
707 		default:
708 			// Should not reach here
709 			printf("ERROR : unexpected stage, %d\n",
710 			       vmx_get_test_stage());
711 			print_vmexit_info();
712 			return VMX_TEST_VMEXIT;
713 		}
714 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
715 		return VMX_TEST_RESUME;
716 	default:
717 		printf("guest_rip = 0x%llx\n", guest_rip);
718 		printf("\tERROR : Undefined exit reason, reason = %d.\n", reason);
719 		break;
720 	}
721 	return VMX_TEST_VMEXIT;
722 }
723 
724 #define INSN_CPU0		0
725 #define INSN_CPU1		1
726 #define INSN_ALWAYS_TRAP	2
727 
728 #define FIELD_EXIT_QUAL		(1 << 0)
729 #define FIELD_INSN_INFO		(1 << 1)
730 
731 asm(
732 	"insn_hlt: hlt;ret\n\t"
733 	"insn_invlpg: invlpg 0x12345678;ret\n\t"
734 	"insn_mwait: mwait;ret\n\t"
735 	"insn_rdpmc: rdpmc;ret\n\t"
736 	"insn_rdtsc: rdtsc;ret\n\t"
737 	"insn_cr3_load: mov cr3,%rax; mov %rax,%cr3;ret\n\t"
738 	"insn_cr3_store: mov %cr3,%rax;ret\n\t"
739 #ifdef __x86_64__
740 	"insn_cr8_load: mov %rax,%cr8;ret\n\t"
741 	"insn_cr8_store: mov %cr8,%rax;ret\n\t"
742 #endif
743 	"insn_monitor: monitor;ret\n\t"
744 	"insn_pause: pause;ret\n\t"
745 	"insn_wbinvd: wbinvd;ret\n\t"
746 	"insn_cpuid: cpuid;ret\n\t"
747 	"insn_invd: invd;ret\n\t"
748 );
749 extern void insn_hlt();
750 extern void insn_invlpg();
751 extern void insn_mwait();
752 extern void insn_rdpmc();
753 extern void insn_rdtsc();
754 extern void insn_cr3_load();
755 extern void insn_cr3_store();
756 #ifdef __x86_64__
757 extern void insn_cr8_load();
758 extern void insn_cr8_store();
759 #endif
760 extern void insn_monitor();
761 extern void insn_pause();
762 extern void insn_wbinvd();
763 extern void insn_cpuid();
764 extern void insn_invd();
765 
766 u32 cur_insn;
767 u64 cr3;
768 
769 struct insn_table {
770 	const char *name;
771 	u32 flag;
772 	void (*insn_func)();
773 	u32 type;
774 	u32 reason;
775 	ulong exit_qual;
776 	u32 insn_info;
777 	// Use FIELD_EXIT_QUAL and FIELD_INSN_INFO to define
778 	// which field need to be tested, reason is always tested
779 	u32 test_field;
780 };
781 
782 /*
783  * Add more test cases of instruction intercept here. Elements in this
784  * table is:
785  *	name/control flag/insn function/type/exit reason/exit qulification/
786  *	instruction info/field to test
787  * The last field defines which fields (exit_qual and insn_info) need to be
788  * tested in exit handler. If set to 0, only "reason" is checked.
789  */
790 static struct insn_table insn_table[] = {
791 	// Flags for Primary Processor-Based VM-Execution Controls
792 	{"HLT",  CPU_HLT, insn_hlt, INSN_CPU0, 12, 0, 0, 0},
793 	{"INVLPG", CPU_INVLPG, insn_invlpg, INSN_CPU0, 14,
794 		0x12345678, 0, FIELD_EXIT_QUAL},
795 	{"MWAIT", CPU_MWAIT, insn_mwait, INSN_CPU0, 36, 0, 0, 0},
796 	{"RDPMC", CPU_RDPMC, insn_rdpmc, INSN_CPU0, 15, 0, 0, 0},
797 	{"RDTSC", CPU_RDTSC, insn_rdtsc, INSN_CPU0, 16, 0, 0, 0},
798 	{"CR3 load", CPU_CR3_LOAD, insn_cr3_load, INSN_CPU0, 28, 0x3, 0,
799 		FIELD_EXIT_QUAL},
800 	{"CR3 store", CPU_CR3_STORE, insn_cr3_store, INSN_CPU0, 28, 0x13, 0,
801 		FIELD_EXIT_QUAL},
802 #ifdef __x86_64__
803 	{"CR8 load", CPU_CR8_LOAD, insn_cr8_load, INSN_CPU0, 28, 0x8, 0,
804 		FIELD_EXIT_QUAL},
805 	{"CR8 store", CPU_CR8_STORE, insn_cr8_store, INSN_CPU0, 28, 0x18, 0,
806 		FIELD_EXIT_QUAL},
807 #endif
808 	{"MONITOR", CPU_MONITOR, insn_monitor, INSN_CPU0, 39, 0, 0, 0},
809 	{"PAUSE", CPU_PAUSE, insn_pause, INSN_CPU0, 40, 0, 0, 0},
810 	// Flags for Secondary Processor-Based VM-Execution Controls
811 	{"WBINVD", CPU_WBINVD, insn_wbinvd, INSN_CPU1, 54, 0, 0, 0},
812 	// Instructions always trap
813 	{"CPUID", 0, insn_cpuid, INSN_ALWAYS_TRAP, 10, 0, 0, 0},
814 	{"INVD", 0, insn_invd, INSN_ALWAYS_TRAP, 13, 0, 0, 0},
815 	// Instructions never trap
816 	{NULL},
817 };
818 
819 static int insn_intercept_init()
820 {
821 	u32 ctrl_cpu;
822 
823 	ctrl_cpu = ctrl_cpu_rev[0].set | CPU_SECONDARY;
824 	ctrl_cpu &= ctrl_cpu_rev[0].clr;
825 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu);
826 	vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu_rev[1].set);
827 	cr3 = read_cr3();
828 	return VMX_TEST_START;
829 }
830 
831 static void insn_intercept_main()
832 {
833 	for (cur_insn = 0; insn_table[cur_insn].name != NULL; cur_insn++) {
834 		vmx_set_test_stage(cur_insn * 2);
835 		if ((insn_table[cur_insn].type == INSN_CPU0 &&
836 		     !(ctrl_cpu_rev[0].clr & insn_table[cur_insn].flag)) ||
837 		    (insn_table[cur_insn].type == INSN_CPU1 &&
838 		     !(ctrl_cpu_rev[1].clr & insn_table[cur_insn].flag))) {
839 			printf("\tCPU_CTRL%d.CPU_%s is not supported.\n",
840 			       insn_table[cur_insn].type - INSN_CPU0,
841 			       insn_table[cur_insn].name);
842 			continue;
843 		}
844 
845 		if ((insn_table[cur_insn].type == INSN_CPU0 &&
846 		     !(ctrl_cpu_rev[0].set & insn_table[cur_insn].flag)) ||
847 		    (insn_table[cur_insn].type == INSN_CPU1 &&
848 		     !(ctrl_cpu_rev[1].set & insn_table[cur_insn].flag))) {
849 			/* skip hlt, it stalls the guest and is tested below */
850 			if (insn_table[cur_insn].insn_func != insn_hlt)
851 				insn_table[cur_insn].insn_func();
852 			report("execute %s", vmx_get_test_stage() == cur_insn * 2,
853 					insn_table[cur_insn].name);
854 		} else if (insn_table[cur_insn].type != INSN_ALWAYS_TRAP)
855 			printf("\tCPU_CTRL%d.CPU_%s always traps.\n",
856 			       insn_table[cur_insn].type - INSN_CPU0,
857 			       insn_table[cur_insn].name);
858 
859 		vmcall();
860 
861 		insn_table[cur_insn].insn_func();
862 		report("intercept %s", vmx_get_test_stage() == cur_insn * 2 + 1,
863 				insn_table[cur_insn].name);
864 
865 		vmx_set_test_stage(cur_insn * 2 + 1);
866 		vmcall();
867 	}
868 }
869 
870 static int insn_intercept_exit_handler()
871 {
872 	u64 guest_rip;
873 	u32 reason;
874 	ulong exit_qual;
875 	u32 insn_len;
876 	u32 insn_info;
877 	bool pass;
878 
879 	guest_rip = vmcs_read(GUEST_RIP);
880 	reason = vmcs_read(EXI_REASON) & 0xff;
881 	exit_qual = vmcs_read(EXI_QUALIFICATION);
882 	insn_len = vmcs_read(EXI_INST_LEN);
883 	insn_info = vmcs_read(EXI_INST_INFO);
884 
885 	if (reason == VMX_VMCALL) {
886 		u32 val = 0;
887 
888 		if (insn_table[cur_insn].type == INSN_CPU0)
889 			val = vmcs_read(CPU_EXEC_CTRL0);
890 		else if (insn_table[cur_insn].type == INSN_CPU1)
891 			val = vmcs_read(CPU_EXEC_CTRL1);
892 
893 		if (vmx_get_test_stage() & 1)
894 			val &= ~insn_table[cur_insn].flag;
895 		else
896 			val |= insn_table[cur_insn].flag;
897 
898 		if (insn_table[cur_insn].type == INSN_CPU0)
899 			vmcs_write(CPU_EXEC_CTRL0, val | ctrl_cpu_rev[0].set);
900 		else if (insn_table[cur_insn].type == INSN_CPU1)
901 			vmcs_write(CPU_EXEC_CTRL1, val | ctrl_cpu_rev[1].set);
902 	} else {
903 		pass = (cur_insn * 2 == vmx_get_test_stage()) &&
904 			insn_table[cur_insn].reason == reason;
905 		if (insn_table[cur_insn].test_field & FIELD_EXIT_QUAL &&
906 		    insn_table[cur_insn].exit_qual != exit_qual)
907 			pass = false;
908 		if (insn_table[cur_insn].test_field & FIELD_INSN_INFO &&
909 		    insn_table[cur_insn].insn_info != insn_info)
910 			pass = false;
911 		if (pass)
912 			vmx_inc_test_stage();
913 	}
914 	vmcs_write(GUEST_RIP, guest_rip + insn_len);
915 	return VMX_TEST_RESUME;
916 }
917 
918 
919 static int setup_ept()
920 {
921 	int support_2m;
922 	unsigned long end_of_memory;
923 
924 	if (!(ept_vpid.val & EPT_CAP_UC) &&
925 			!(ept_vpid.val & EPT_CAP_WB)) {
926 		printf("\tEPT paging-structure memory type "
927 				"UC&WB are not supported\n");
928 		return 1;
929 	}
930 	if (ept_vpid.val & EPT_CAP_UC)
931 		eptp = EPT_MEM_TYPE_UC;
932 	else
933 		eptp = EPT_MEM_TYPE_WB;
934 	if (!(ept_vpid.val & EPT_CAP_PWL4)) {
935 		printf("\tPWL4 is not supported\n");
936 		return 1;
937 	}
938 	eptp |= (3 << EPTP_PG_WALK_LEN_SHIFT);
939 	pml4 = alloc_page();
940 	memset(pml4, 0, PAGE_SIZE);
941 	eptp |= virt_to_phys(pml4);
942 	vmcs_write(EPTP, eptp);
943 	support_2m = !!(ept_vpid.val & EPT_CAP_2M_PAGE);
944 	end_of_memory = fwcfg_get_u64(FW_CFG_RAM_SIZE);
945 	if (end_of_memory < (1ul << 32))
946 		end_of_memory = (1ul << 32);
947 	setup_ept_range(pml4, 0, end_of_memory, 0, support_2m,
948 			EPT_WA | EPT_RA | EPT_EA);
949 	return 0;
950 }
951 
952 static int apic_version;
953 
954 static int ept_init()
955 {
956 	unsigned long base_addr1, base_addr2;
957 	u32 ctrl_cpu[2];
958 
959 	if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY) ||
960 	    !(ctrl_cpu_rev[1].clr & CPU_EPT)) {
961 		printf("\tEPT is not supported");
962 		return VMX_TEST_EXIT;
963 	}
964 
965 	ctrl_cpu[0] = vmcs_read(CPU_EXEC_CTRL0);
966 	ctrl_cpu[1] = vmcs_read(CPU_EXEC_CTRL1);
967 	ctrl_cpu[0] = (ctrl_cpu[0] | CPU_SECONDARY)
968 		& ctrl_cpu_rev[0].clr;
969 	ctrl_cpu[1] = (ctrl_cpu[1] | CPU_EPT)
970 		& ctrl_cpu_rev[1].clr;
971 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu[0]);
972 	vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu[1]);
973 	if (setup_ept())
974 		return VMX_TEST_EXIT;
975 	data_page1 = alloc_page();
976 	data_page2 = alloc_page();
977 	memset(data_page1, 0x0, PAGE_SIZE);
978 	memset(data_page2, 0x0, PAGE_SIZE);
979 	*((u32 *)data_page1) = MAGIC_VAL_1;
980 	*((u32 *)data_page2) = MAGIC_VAL_2;
981 	base_addr1 = (unsigned long)data_page1 & PAGE_MASK_2M;
982 	base_addr2 = (unsigned long)data_page2 & PAGE_MASK_2M;
983 	setup_ept_range(pml4, base_addr1, base_addr1 + PAGE_SIZE_2M, 0, 0,
984 			EPT_WA | EPT_RA | EPT_EA);
985 	setup_ept_range(pml4, base_addr2, base_addr2 + PAGE_SIZE_2M, 0, 0,
986 			EPT_WA | EPT_RA | EPT_EA);
987 	install_ept(pml4, (unsigned long)data_page1, (unsigned long)data_page2,
988 			EPT_RA | EPT_WA | EPT_EA);
989 
990 	apic_version = *((u32 *)0xfee00030UL);
991 	return VMX_TEST_START;
992 }
993 
994 static void ept_main()
995 {
996 	vmx_set_test_stage(0);
997 	if (*((u32 *)data_page2) != MAGIC_VAL_1 ||
998 			*((u32 *)data_page1) != MAGIC_VAL_1)
999 		report("EPT basic framework - read", 0);
1000 	else {
1001 		*((u32 *)data_page2) = MAGIC_VAL_3;
1002 		vmcall();
1003 		if (vmx_get_test_stage() == 1) {
1004 			if (*((u32 *)data_page1) == MAGIC_VAL_3 &&
1005 					*((u32 *)data_page2) == MAGIC_VAL_2)
1006 				report("EPT basic framework", 1);
1007 			else
1008 				report("EPT basic framework - remap", 1);
1009 		}
1010 	}
1011 	// Test EPT Misconfigurations
1012 	vmx_set_test_stage(1);
1013 	vmcall();
1014 	*((u32 *)data_page1) = MAGIC_VAL_1;
1015 	if (vmx_get_test_stage() != 2) {
1016 		report("EPT misconfigurations", 0);
1017 		goto t1;
1018 	}
1019 	vmx_set_test_stage(2);
1020 	vmcall();
1021 	*((u32 *)data_page1) = MAGIC_VAL_1;
1022 	report("EPT misconfigurations", vmx_get_test_stage() == 3);
1023 t1:
1024 	// Test EPT violation
1025 	vmx_set_test_stage(3);
1026 	vmcall();
1027 	*((u32 *)data_page1) = MAGIC_VAL_1;
1028 	report("EPT violation - page permission", vmx_get_test_stage() == 4);
1029 	// Violation caused by EPT paging structure
1030 	vmx_set_test_stage(4);
1031 	vmcall();
1032 	*((u32 *)data_page1) = MAGIC_VAL_2;
1033 	report("EPT violation - paging structure", vmx_get_test_stage() == 5);
1034 
1035 	// Test EPT access to L1 MMIO
1036 	vmx_set_test_stage(6);
1037 	report("EPT - MMIO access", *((u32 *)0xfee00030UL) == apic_version);
1038 }
1039 
1040 static int ept_exit_handler()
1041 {
1042 	u64 guest_rip;
1043 	ulong reason;
1044 	u32 insn_len;
1045 	u32 exit_qual;
1046 	static unsigned long data_page1_pte, data_page1_pte_pte;
1047 
1048 	guest_rip = vmcs_read(GUEST_RIP);
1049 	reason = vmcs_read(EXI_REASON) & 0xff;
1050 	insn_len = vmcs_read(EXI_INST_LEN);
1051 	exit_qual = vmcs_read(EXI_QUALIFICATION);
1052 	switch (reason) {
1053 	case VMX_VMCALL:
1054 		switch (vmx_get_test_stage()) {
1055 		case 0:
1056 			if (*((u32 *)data_page1) == MAGIC_VAL_3 &&
1057 					*((u32 *)data_page2) == MAGIC_VAL_2) {
1058 				vmx_inc_test_stage();
1059 				install_ept(pml4, (unsigned long)data_page2,
1060 						(unsigned long)data_page2,
1061 						EPT_RA | EPT_WA | EPT_EA);
1062 			} else
1063 				report("EPT basic framework - write\n", 0);
1064 			break;
1065 		case 1:
1066 			install_ept(pml4, (unsigned long)data_page1,
1067  				(unsigned long)data_page1, EPT_WA);
1068 			ept_sync(INVEPT_SINGLE, eptp);
1069 			break;
1070 		case 2:
1071 			install_ept(pml4, (unsigned long)data_page1,
1072  				(unsigned long)data_page1,
1073  				EPT_RA | EPT_WA | EPT_EA |
1074  				(2 << EPT_MEM_TYPE_SHIFT));
1075 			ept_sync(INVEPT_SINGLE, eptp);
1076 			break;
1077 		case 3:
1078 			data_page1_pte = get_ept_pte(pml4,
1079 				(unsigned long)data_page1, 1);
1080 			set_ept_pte(pml4, (unsigned long)data_page1,
1081 				1, data_page1_pte & (~EPT_PRESENT));
1082 			ept_sync(INVEPT_SINGLE, eptp);
1083 			break;
1084 		case 4:
1085 			data_page1_pte = get_ept_pte(pml4,
1086 				(unsigned long)data_page1, 2);
1087 			data_page1_pte &= PAGE_MASK;
1088 			data_page1_pte_pte = get_ept_pte(pml4, data_page1_pte, 2);
1089 			set_ept_pte(pml4, data_page1_pte, 2,
1090 				data_page1_pte_pte & (~EPT_PRESENT));
1091 			ept_sync(INVEPT_SINGLE, eptp);
1092 			break;
1093 		// Should not reach here
1094 		default:
1095 			printf("ERROR - unexpected stage, %d.\n",
1096 			       vmx_get_test_stage());
1097 			print_vmexit_info();
1098 			return VMX_TEST_VMEXIT;
1099 		}
1100 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
1101 		return VMX_TEST_RESUME;
1102 	case VMX_EPT_MISCONFIG:
1103 		switch (vmx_get_test_stage()) {
1104 		case 1:
1105 		case 2:
1106 			vmx_inc_test_stage();
1107 			install_ept(pml4, (unsigned long)data_page1,
1108  				(unsigned long)data_page1,
1109  				EPT_RA | EPT_WA | EPT_EA);
1110 			ept_sync(INVEPT_SINGLE, eptp);
1111 			break;
1112 		// Should not reach here
1113 		default:
1114 			printf("ERROR - unexpected stage, %d.\n",
1115 			       vmx_get_test_stage());
1116 			print_vmexit_info();
1117 			return VMX_TEST_VMEXIT;
1118 		}
1119 		return VMX_TEST_RESUME;
1120 	case VMX_EPT_VIOLATION:
1121 		switch(vmx_get_test_stage()) {
1122 		case 3:
1123 			if (exit_qual == (EPT_VLT_WR | EPT_VLT_LADDR_VLD |
1124 					EPT_VLT_PADDR))
1125 				vmx_inc_test_stage();
1126 			set_ept_pte(pml4, (unsigned long)data_page1,
1127 				1, data_page1_pte | (EPT_PRESENT));
1128 			ept_sync(INVEPT_SINGLE, eptp);
1129 			break;
1130 		case 4:
1131 			if (exit_qual == (EPT_VLT_RD | EPT_VLT_LADDR_VLD))
1132 				vmx_inc_test_stage();
1133 			set_ept_pte(pml4, data_page1_pte, 2,
1134 				data_page1_pte_pte | (EPT_PRESENT));
1135 			ept_sync(INVEPT_SINGLE, eptp);
1136 			break;
1137 		default:
1138 			// Should not reach here
1139 			printf("ERROR : unexpected stage, %d\n",
1140 			       vmx_get_test_stage());
1141 			print_vmexit_info();
1142 			return VMX_TEST_VMEXIT;
1143 		}
1144 		return VMX_TEST_RESUME;
1145 	default:
1146 		printf("Unknown exit reason, %d\n", reason);
1147 		print_vmexit_info();
1148 	}
1149 	return VMX_TEST_VMEXIT;
1150 }
1151 
1152 #define TIMER_VECTOR	222
1153 
1154 static volatile bool timer_fired;
1155 
1156 static void timer_isr(isr_regs_t *regs)
1157 {
1158 	timer_fired = true;
1159 	apic_write(APIC_EOI, 0);
1160 }
1161 
1162 static int interrupt_init(struct vmcs *vmcs)
1163 {
1164 	msr_bmp_init();
1165 	vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) & ~PIN_EXTINT);
1166 	handle_irq(TIMER_VECTOR, timer_isr);
1167 	return VMX_TEST_START;
1168 }
1169 
1170 static void interrupt_main(void)
1171 {
1172 	long long start, loops;
1173 
1174 	vmx_set_test_stage(0);
1175 
1176 	apic_write(APIC_LVTT, TIMER_VECTOR);
1177 	irq_enable();
1178 
1179 	apic_write(APIC_TMICT, 1);
1180 	for (loops = 0; loops < 10000000 && !timer_fired; loops++)
1181 		asm volatile ("nop");
1182 	report("direct interrupt while running guest", timer_fired);
1183 
1184 	apic_write(APIC_TMICT, 0);
1185 	irq_disable();
1186 	vmcall();
1187 	timer_fired = false;
1188 	apic_write(APIC_TMICT, 1);
1189 	for (loops = 0; loops < 10000000 && !timer_fired; loops++)
1190 		asm volatile ("nop");
1191 	report("intercepted interrupt while running guest", timer_fired);
1192 
1193 	irq_enable();
1194 	apic_write(APIC_TMICT, 0);
1195 	irq_disable();
1196 	vmcall();
1197 	timer_fired = false;
1198 	start = rdtsc();
1199 	apic_write(APIC_TMICT, 1000000);
1200 
1201 	asm volatile ("sti; hlt");
1202 
1203 	report("direct interrupt + hlt",
1204 	       rdtsc() - start > 1000000 && timer_fired);
1205 
1206 	apic_write(APIC_TMICT, 0);
1207 	irq_disable();
1208 	vmcall();
1209 	timer_fired = false;
1210 	start = rdtsc();
1211 	apic_write(APIC_TMICT, 1000000);
1212 
1213 	asm volatile ("sti; hlt");
1214 
1215 	report("intercepted interrupt + hlt",
1216 	       rdtsc() - start > 10000 && timer_fired);
1217 
1218 	apic_write(APIC_TMICT, 0);
1219 	irq_disable();
1220 	vmcall();
1221 	timer_fired = false;
1222 	start = rdtsc();
1223 	apic_write(APIC_TMICT, 1000000);
1224 
1225 	irq_enable();
1226 	asm volatile ("nop");
1227 	vmcall();
1228 
1229 	report("direct interrupt + activity state hlt",
1230 	       rdtsc() - start > 10000 && timer_fired);
1231 
1232 	apic_write(APIC_TMICT, 0);
1233 	irq_disable();
1234 	vmcall();
1235 	timer_fired = false;
1236 	start = rdtsc();
1237 	apic_write(APIC_TMICT, 1000000);
1238 
1239 	irq_enable();
1240 	asm volatile ("nop");
1241 	vmcall();
1242 
1243 	report("intercepted interrupt + activity state hlt",
1244 	       rdtsc() - start > 10000 && timer_fired);
1245 
1246 	apic_write(APIC_TMICT, 0);
1247 	irq_disable();
1248 	vmx_set_test_stage(7);
1249 	vmcall();
1250 	timer_fired = false;
1251 	apic_write(APIC_TMICT, 1);
1252 	for (loops = 0; loops < 10000000 && !timer_fired; loops++)
1253 		asm volatile ("nop");
1254 	report("running a guest with interrupt acknowledgement set", timer_fired);
1255 }
1256 
1257 static int interrupt_exit_handler(void)
1258 {
1259 	u64 guest_rip = vmcs_read(GUEST_RIP);
1260 	ulong reason = vmcs_read(EXI_REASON) & 0xff;
1261 	u32 insn_len = vmcs_read(EXI_INST_LEN);
1262 
1263 	switch (reason) {
1264 	case VMX_VMCALL:
1265 		switch (vmx_get_test_stage()) {
1266 		case 0:
1267 		case 2:
1268 		case 5:
1269 			vmcs_write(PIN_CONTROLS,
1270 				   vmcs_read(PIN_CONTROLS) | PIN_EXTINT);
1271 			break;
1272 		case 7:
1273 			vmcs_write(EXI_CONTROLS, vmcs_read(EXI_CONTROLS) | EXI_INTA);
1274 			vmcs_write(PIN_CONTROLS,
1275 				   vmcs_read(PIN_CONTROLS) | PIN_EXTINT);
1276 			break;
1277 		case 1:
1278 		case 3:
1279 			vmcs_write(PIN_CONTROLS,
1280 				   vmcs_read(PIN_CONTROLS) & ~PIN_EXTINT);
1281 			break;
1282 		case 4:
1283 		case 6:
1284 			vmcs_write(GUEST_ACTV_STATE, ACTV_HLT);
1285 			break;
1286 		}
1287 		vmx_inc_test_stage();
1288 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
1289 		return VMX_TEST_RESUME;
1290 	case VMX_EXTINT:
1291 		if (vmcs_read(EXI_CONTROLS) & EXI_INTA) {
1292 			int vector = vmcs_read(EXI_INTR_INFO) & 0xff;
1293 			handle_external_interrupt(vector);
1294 		} else {
1295 			irq_enable();
1296 			asm volatile ("nop");
1297 			irq_disable();
1298 		}
1299 		if (vmx_get_test_stage() >= 2) {
1300 			vmcs_write(GUEST_ACTV_STATE, ACTV_ACTIVE);
1301 			vmcs_write(GUEST_RIP, guest_rip + insn_len);
1302 		}
1303 		return VMX_TEST_RESUME;
1304 	default:
1305 		printf("Unknown exit reason, %d\n", reason);
1306 		print_vmexit_info();
1307 	}
1308 
1309 	return VMX_TEST_VMEXIT;
1310 }
1311 
1312 static int dbgctls_init(struct vmcs *vmcs)
1313 {
1314 	u64 dr7 = 0x402;
1315 	u64 zero = 0;
1316 
1317 	msr_bmp_init();
1318 	asm volatile(
1319 		"mov %0,%%dr0\n\t"
1320 		"mov %0,%%dr1\n\t"
1321 		"mov %0,%%dr2\n\t"
1322 		"mov %1,%%dr7\n\t"
1323 		: : "r" (zero), "r" (dr7));
1324 	wrmsr(MSR_IA32_DEBUGCTLMSR, 0x1);
1325 	vmcs_write(GUEST_DR7, 0x404);
1326 	vmcs_write(GUEST_DEBUGCTL, 0x2);
1327 
1328 	vmcs_write(ENT_CONTROLS, vmcs_read(ENT_CONTROLS) | ENT_LOAD_DBGCTLS);
1329 	vmcs_write(EXI_CONTROLS, vmcs_read(EXI_CONTROLS) | EXI_SAVE_DBGCTLS);
1330 
1331 	return VMX_TEST_START;
1332 }
1333 
1334 static void dbgctls_main(void)
1335 {
1336 	u64 dr7, debugctl;
1337 
1338 	asm volatile("mov %%dr7,%0" : "=r" (dr7));
1339 	debugctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
1340 	/* Commented out: KVM does not support DEBUGCTL so far */
1341 	report("Load debug controls", dr7 == 0x404 /* && debugctl == 0x2 */);
1342 
1343 	dr7 = 0x408;
1344 	asm volatile("mov %0,%%dr7" : : "r" (dr7));
1345 	wrmsr(MSR_IA32_DEBUGCTLMSR, 0x3);
1346 
1347 	vmx_set_test_stage(0);
1348 	vmcall();
1349 	report("Save debug controls", vmx_get_test_stage() == 1);
1350 
1351 	if (ctrl_enter_rev.set & ENT_LOAD_DBGCTLS ||
1352 	    ctrl_exit_rev.set & EXI_SAVE_DBGCTLS) {
1353 		printf("\tDebug controls are always loaded/saved\n");
1354 		return;
1355 	}
1356 	vmx_set_test_stage(2);
1357 	vmcall();
1358 
1359 	asm volatile("mov %%dr7,%0" : "=r" (dr7));
1360 	debugctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
1361 	/* Commented out: KVM does not support DEBUGCTL so far */
1362 	report("Guest=host debug controls", dr7 == 0x402 /* && debugctl == 0x1 */);
1363 
1364 	dr7 = 0x408;
1365 	asm volatile("mov %0,%%dr7" : : "r" (dr7));
1366 	wrmsr(MSR_IA32_DEBUGCTLMSR, 0x3);
1367 
1368 	vmx_set_test_stage(3);
1369 	vmcall();
1370 	report("Don't save debug controls", vmx_get_test_stage() == 4);
1371 }
1372 
1373 static int dbgctls_exit_handler(void)
1374 {
1375 	unsigned int reason = vmcs_read(EXI_REASON) & 0xff;
1376 	u32 insn_len = vmcs_read(EXI_INST_LEN);
1377 	u64 guest_rip = vmcs_read(GUEST_RIP);
1378 	u64 dr7, debugctl;
1379 
1380 	asm volatile("mov %%dr7,%0" : "=r" (dr7));
1381 	debugctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
1382 
1383 	switch (reason) {
1384 	case VMX_VMCALL:
1385 		switch (vmx_get_test_stage()) {
1386 		case 0:
1387 			if (dr7 == 0x400 && debugctl == 0 &&
1388 			    vmcs_read(GUEST_DR7) == 0x408 /* &&
1389 			    Commented out: KVM does not support DEBUGCTL so far
1390 			    vmcs_read(GUEST_DEBUGCTL) == 0x3 */)
1391 				vmx_inc_test_stage();
1392 			break;
1393 		case 2:
1394 			dr7 = 0x402;
1395 			asm volatile("mov %0,%%dr7" : : "r" (dr7));
1396 			wrmsr(MSR_IA32_DEBUGCTLMSR, 0x1);
1397 			vmcs_write(GUEST_DR7, 0x404);
1398 			vmcs_write(GUEST_DEBUGCTL, 0x2);
1399 
1400 			vmcs_write(ENT_CONTROLS,
1401 				vmcs_read(ENT_CONTROLS) & ~ENT_LOAD_DBGCTLS);
1402 			vmcs_write(EXI_CONTROLS,
1403 				vmcs_read(EXI_CONTROLS) & ~EXI_SAVE_DBGCTLS);
1404 			break;
1405 		case 3:
1406 			if (dr7 == 0x400 && debugctl == 0 &&
1407 			    vmcs_read(GUEST_DR7) == 0x404 /* &&
1408 			    Commented out: KVM does not support DEBUGCTL so far
1409 			    vmcs_read(GUEST_DEBUGCTL) == 0x2 */)
1410 				vmx_inc_test_stage();
1411 			break;
1412 		}
1413 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
1414 		return VMX_TEST_RESUME;
1415 	default:
1416 		printf("Unknown exit reason, %d\n", reason);
1417 		print_vmexit_info();
1418 	}
1419 	return VMX_TEST_VMEXIT;
1420 }
1421 
1422 struct vmx_msr_entry {
1423 	u32 index;
1424 	u32 reserved;
1425 	u64 value;
1426 } __attribute__((packed));
1427 
1428 #define MSR_MAGIC 0x31415926
1429 struct vmx_msr_entry *exit_msr_store, *entry_msr_load, *exit_msr_load;
1430 
1431 static int msr_switch_init(struct vmcs *vmcs)
1432 {
1433 	msr_bmp_init();
1434 	exit_msr_store = alloc_page();
1435 	exit_msr_load = alloc_page();
1436 	entry_msr_load = alloc_page();
1437 	memset(exit_msr_store, 0, PAGE_SIZE);
1438 	memset(exit_msr_load, 0, PAGE_SIZE);
1439 	memset(entry_msr_load, 0, PAGE_SIZE);
1440 	entry_msr_load[0].index = MSR_KERNEL_GS_BASE;
1441 	entry_msr_load[0].value = MSR_MAGIC;
1442 
1443 	vmx_set_test_stage(1);
1444 	vmcs_write(ENT_MSR_LD_CNT, 1);
1445 	vmcs_write(ENTER_MSR_LD_ADDR, (u64)entry_msr_load);
1446 	vmcs_write(EXI_MSR_ST_CNT, 1);
1447 	vmcs_write(EXIT_MSR_ST_ADDR, (u64)exit_msr_store);
1448 	vmcs_write(EXI_MSR_LD_CNT, 1);
1449 	vmcs_write(EXIT_MSR_LD_ADDR, (u64)exit_msr_load);
1450 	return VMX_TEST_START;
1451 }
1452 
1453 static void msr_switch_main()
1454 {
1455 	if (vmx_get_test_stage() == 1) {
1456 		report("VM entry MSR load",
1457 			rdmsr(MSR_KERNEL_GS_BASE) == MSR_MAGIC);
1458 		vmx_set_test_stage(2);
1459 		wrmsr(MSR_KERNEL_GS_BASE, MSR_MAGIC + 1);
1460 		exit_msr_store[0].index = MSR_KERNEL_GS_BASE;
1461 		exit_msr_load[0].index = MSR_KERNEL_GS_BASE;
1462 		exit_msr_load[0].value = MSR_MAGIC + 2;
1463 	}
1464 	vmcall();
1465 }
1466 
1467 static int msr_switch_exit_handler()
1468 {
1469 	ulong reason;
1470 
1471 	reason = vmcs_read(EXI_REASON);
1472 	switch (reason) {
1473 	case 0x80000000 | VMX_FAIL_MSR:
1474 		if (vmx_get_test_stage() == 3) {
1475 			report("VM entry MSR load: try to load FS_BASE",
1476 				vmcs_read(EXI_QUALIFICATION) == 1);
1477 			return VMX_TEST_VMEXIT;
1478 		}
1479 		break;
1480 	case VMX_VMCALL:
1481 		if (vmx_get_test_stage() == 2) {
1482 			report("VM exit MSR store",
1483 				exit_msr_store[0].value == MSR_MAGIC + 1);
1484 			report("VM exit MSR load",
1485 				rdmsr(MSR_KERNEL_GS_BASE) == MSR_MAGIC + 2);
1486 			vmx_set_test_stage(3);
1487 			entry_msr_load[0].index = MSR_FS_BASE;
1488 			return VMX_TEST_RESUME;
1489 		}
1490 	}
1491 	printf("ERROR %s: unexpected stage=%u or reason=%lu\n",
1492 		__func__, vmx_get_test_stage(), reason);
1493 	return VMX_TEST_EXIT;
1494 }
1495 
1496 /* name/init/guest_main/exit_handler/syscall_handler/guest_regs */
1497 struct vmx_test vmx_tests[] = {
1498 	{ "null", NULL, basic_guest_main, basic_exit_handler, NULL, {0} },
1499 	{ "vmenter", NULL, vmenter_main, vmenter_exit_handler, NULL, {0} },
1500 	{ "preemption timer", preemption_timer_init, preemption_timer_main,
1501 		preemption_timer_exit_handler, NULL, {0} },
1502 	{ "control field PAT", test_ctrl_pat_init, test_ctrl_pat_main,
1503 		test_ctrl_pat_exit_handler, NULL, {0} },
1504 	{ "control field EFER", test_ctrl_efer_init, test_ctrl_efer_main,
1505 		test_ctrl_efer_exit_handler, NULL, {0} },
1506 	{ "CR shadowing", NULL, cr_shadowing_main,
1507 		cr_shadowing_exit_handler, NULL, {0} },
1508 	{ "I/O bitmap", iobmp_init, iobmp_main, iobmp_exit_handler,
1509 		NULL, {0} },
1510 	{ "instruction intercept", insn_intercept_init, insn_intercept_main,
1511 		insn_intercept_exit_handler, NULL, {0} },
1512 	{ "EPT framework", ept_init, ept_main, ept_exit_handler, NULL, {0} },
1513 	{ "interrupt", interrupt_init, interrupt_main,
1514 		interrupt_exit_handler, NULL, {0} },
1515 	{ "debug controls", dbgctls_init, dbgctls_main, dbgctls_exit_handler,
1516 		NULL, {0} },
1517 	{ "MSR switch", msr_switch_init, msr_switch_main,
1518 		msr_switch_exit_handler, NULL, {0} },
1519 	{ NULL, NULL, NULL, NULL, NULL, {0} },
1520 };
1521