xref: /kvm-unit-tests/x86/vmx_tests.c (revision f03b24f5d5885844812b7c32011d1d09a077b4c9)
1 /*
2  * All test cases of nested virtualization should be in this file
3  *
4  * Author : Arthur Chunqi Li <yzt356@gmail.com>
5  */
6 #include "vmx.h"
7 #include "msr.h"
8 #include "processor.h"
9 #include "vm.h"
10 #include "io.h"
11 #include "fwcfg.h"
12 #include "isr.h"
13 #include "apic.h"
14 
15 u64 ia32_pat;
16 u64 ia32_efer;
17 void *io_bitmap_a, *io_bitmap_b;
18 u16 ioport;
19 
20 unsigned long *pml4;
21 u64 eptp;
22 void *data_page1, *data_page2;
23 
24 static inline void vmcall()
25 {
26 	asm volatile("vmcall");
27 }
28 
29 void basic_guest_main()
30 {
31 }
32 
33 int basic_exit_handler()
34 {
35 	report("Basic VMX test", 0);
36 	print_vmexit_info();
37 	return VMX_TEST_EXIT;
38 }
39 
40 void vmenter_main()
41 {
42 	u64 rax;
43 	u64 rsp, resume_rsp;
44 
45 	report("test vmlaunch", 1);
46 
47 	asm volatile(
48 		"mov %%rsp, %0\n\t"
49 		"mov %3, %%rax\n\t"
50 		"vmcall\n\t"
51 		"mov %%rax, %1\n\t"
52 		"mov %%rsp, %2\n\t"
53 		: "=r"(rsp), "=r"(rax), "=r"(resume_rsp)
54 		: "g"(0xABCD));
55 	report("test vmresume", (rax == 0xFFFF) && (rsp == resume_rsp));
56 }
57 
58 int vmenter_exit_handler()
59 {
60 	u64 guest_rip;
61 	ulong reason;
62 
63 	guest_rip = vmcs_read(GUEST_RIP);
64 	reason = vmcs_read(EXI_REASON) & 0xff;
65 	switch (reason) {
66 	case VMX_VMCALL:
67 		if (regs.rax != 0xABCD) {
68 			report("test vmresume", 0);
69 			return VMX_TEST_VMEXIT;
70 		}
71 		regs.rax = 0xFFFF;
72 		vmcs_write(GUEST_RIP, guest_rip + 3);
73 		return VMX_TEST_RESUME;
74 	default:
75 		report("test vmresume", 0);
76 		print_vmexit_info();
77 	}
78 	return VMX_TEST_VMEXIT;
79 }
80 
81 u32 preempt_scale;
82 volatile unsigned long long tsc_val;
83 volatile u32 preempt_val;
84 u64 saved_rip;
85 
86 int preemption_timer_init()
87 {
88 	if (!(ctrl_pin_rev.clr & PIN_PREEMPT)) {
89 		printf("\tPreemption timer is not supported\n");
90 		return VMX_TEST_EXIT;
91 	}
92 	vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) | PIN_PREEMPT);
93 	preempt_val = 10000000;
94 	vmcs_write(PREEMPT_TIMER_VALUE, preempt_val);
95 	preempt_scale = rdmsr(MSR_IA32_VMX_MISC) & 0x1F;
96 
97 	if (!(ctrl_exit_rev.clr & EXI_SAVE_PREEMPT))
98 		printf("\tSave preemption value is not supported\n");
99 
100 	return VMX_TEST_START;
101 }
102 
103 void preemption_timer_main()
104 {
105 	tsc_val = rdtsc();
106 	if (ctrl_exit_rev.clr & EXI_SAVE_PREEMPT) {
107 		vmx_set_test_stage(0);
108 		vmcall();
109 		if (vmx_get_test_stage() == 1)
110 			vmcall();
111 	}
112 	vmx_set_test_stage(1);
113 	while (vmx_get_test_stage() == 1) {
114 		if (((rdtsc() - tsc_val) >> preempt_scale)
115 				> 10 * preempt_val) {
116 			vmx_set_test_stage(2);
117 			vmcall();
118 		}
119 	}
120 	tsc_val = rdtsc();
121 	asm volatile ("hlt");
122 	vmcall();
123 	vmx_set_test_stage(5);
124 	vmcall();
125 }
126 
127 int preemption_timer_exit_handler()
128 {
129 	bool guest_halted;
130 	u64 guest_rip;
131 	ulong reason;
132 	u32 insn_len;
133 	u32 ctrl_exit;
134 
135 	guest_rip = vmcs_read(GUEST_RIP);
136 	reason = vmcs_read(EXI_REASON) & 0xff;
137 	insn_len = vmcs_read(EXI_INST_LEN);
138 	switch (reason) {
139 	case VMX_PREEMPT:
140 		switch (vmx_get_test_stage()) {
141 		case 1:
142 		case 2:
143 			report("busy-wait for preemption timer",
144 			       ((rdtsc() - tsc_val) >> preempt_scale) >=
145 			       preempt_val);
146 			vmx_set_test_stage(3);
147 			vmcs_write(PREEMPT_TIMER_VALUE, preempt_val);
148 			return VMX_TEST_RESUME;
149 		case 3:
150 			guest_halted =
151 				(vmcs_read(GUEST_ACTV_STATE) == ACTV_HLT);
152 			report("preemption timer during hlt",
153 			       ((rdtsc() - tsc_val) >> preempt_scale) >=
154 			       preempt_val && guest_halted);
155 			vmx_set_test_stage(4);
156 			vmcs_write(PIN_CONTROLS,
157 				   vmcs_read(PIN_CONTROLS) & ~PIN_PREEMPT);
158 			vmcs_write(GUEST_ACTV_STATE, ACTV_ACTIVE);
159 			return VMX_TEST_RESUME;
160 		case 4:
161 			report("preemption timer with 0 value",
162 			       saved_rip == guest_rip);
163 			break;
164 		default:
165 			printf("Invalid stage.\n");
166 			print_vmexit_info();
167 			break;
168 		}
169 		break;
170 	case VMX_VMCALL:
171 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
172 		switch (vmx_get_test_stage()) {
173 		case 0:
174 			report("Keep preemption value",
175 			       vmcs_read(PREEMPT_TIMER_VALUE) == preempt_val);
176 			vmx_set_test_stage(1);
177 			vmcs_write(PREEMPT_TIMER_VALUE, preempt_val);
178 			ctrl_exit = (vmcs_read(EXI_CONTROLS) |
179 				EXI_SAVE_PREEMPT) & ctrl_exit_rev.clr;
180 			vmcs_write(EXI_CONTROLS, ctrl_exit);
181 			return VMX_TEST_RESUME;
182 		case 1:
183 			report("Save preemption value",
184 			       vmcs_read(PREEMPT_TIMER_VALUE) < preempt_val);
185 			return VMX_TEST_RESUME;
186 		case 2:
187 			report("busy-wait for preemption timer", 0);
188 			vmx_set_test_stage(3);
189 			vmcs_write(PREEMPT_TIMER_VALUE, preempt_val);
190 			return VMX_TEST_RESUME;
191 		case 3:
192 			report("preemption timer during hlt", 0);
193 			vmx_set_test_stage(4);
194 			/* fall through */
195 		case 4:
196 			vmcs_write(PIN_CONTROLS,
197 				   vmcs_read(PIN_CONTROLS) | PIN_PREEMPT);
198 			vmcs_write(PREEMPT_TIMER_VALUE, 0);
199 			saved_rip = guest_rip + insn_len;
200 			return VMX_TEST_RESUME;
201 		case 5:
202 			report("preemption timer with 0 value (vmcall stage 5)", 0);
203 			break;
204 		default:
205 			// Should not reach here
206 			printf("ERROR : unexpected stage, %d\n",
207 			       vmx_get_test_stage());
208 			print_vmexit_info();
209 			return VMX_TEST_VMEXIT;
210 		}
211 		break;
212 	default:
213 		printf("Unknown exit reason, %d\n", reason);
214 		print_vmexit_info();
215 	}
216 	vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) & ~PIN_PREEMPT);
217 	return VMX_TEST_VMEXIT;
218 }
219 
220 void msr_bmp_init()
221 {
222 	void *msr_bitmap;
223 	u32 ctrl_cpu0;
224 
225 	msr_bitmap = alloc_page();
226 	memset(msr_bitmap, 0x0, PAGE_SIZE);
227 	ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
228 	ctrl_cpu0 |= CPU_MSR_BITMAP;
229 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
230 	vmcs_write(MSR_BITMAP, (u64)msr_bitmap);
231 }
232 
233 static int test_ctrl_pat_init()
234 {
235 	u64 ctrl_ent;
236 	u64 ctrl_exi;
237 
238 	msr_bmp_init();
239 	if (!(ctrl_exit_rev.clr & EXI_SAVE_PAT) &&
240 	    !(ctrl_exit_rev.clr & EXI_LOAD_PAT) &&
241 	    !(ctrl_enter_rev.clr & ENT_LOAD_PAT)) {
242 		printf("\tSave/load PAT is not supported\n");
243 		return 1;
244 	}
245 
246 	ctrl_ent = vmcs_read(ENT_CONTROLS);
247 	ctrl_exi = vmcs_read(EXI_CONTROLS);
248 	ctrl_ent |= ctrl_enter_rev.clr & ENT_LOAD_PAT;
249 	ctrl_exi |= ctrl_exit_rev.clr & (EXI_SAVE_PAT | EXI_LOAD_PAT);
250 	vmcs_write(ENT_CONTROLS, ctrl_ent);
251 	vmcs_write(EXI_CONTROLS, ctrl_exi);
252 	ia32_pat = rdmsr(MSR_IA32_CR_PAT);
253 	vmcs_write(GUEST_PAT, 0x0);
254 	vmcs_write(HOST_PAT, ia32_pat);
255 	return VMX_TEST_START;
256 }
257 
258 static void test_ctrl_pat_main()
259 {
260 	u64 guest_ia32_pat;
261 
262 	guest_ia32_pat = rdmsr(MSR_IA32_CR_PAT);
263 	if (!(ctrl_enter_rev.clr & ENT_LOAD_PAT))
264 		printf("\tENT_LOAD_PAT is not supported.\n");
265 	else {
266 		if (guest_ia32_pat != 0) {
267 			report("Entry load PAT", 0);
268 			return;
269 		}
270 	}
271 	wrmsr(MSR_IA32_CR_PAT, 0x6);
272 	vmcall();
273 	guest_ia32_pat = rdmsr(MSR_IA32_CR_PAT);
274 	if (ctrl_enter_rev.clr & ENT_LOAD_PAT) {
275 		if (guest_ia32_pat != ia32_pat) {
276 			report("Entry load PAT", 0);
277 			return;
278 		}
279 		report("Entry load PAT", 1);
280 	}
281 }
282 
283 static int test_ctrl_pat_exit_handler()
284 {
285 	u64 guest_rip;
286 	ulong reason;
287 	u64 guest_pat;
288 
289 	guest_rip = vmcs_read(GUEST_RIP);
290 	reason = vmcs_read(EXI_REASON) & 0xff;
291 	switch (reason) {
292 	case VMX_VMCALL:
293 		guest_pat = vmcs_read(GUEST_PAT);
294 		if (!(ctrl_exit_rev.clr & EXI_SAVE_PAT)) {
295 			printf("\tEXI_SAVE_PAT is not supported\n");
296 			vmcs_write(GUEST_PAT, 0x6);
297 		} else {
298 			if (guest_pat == 0x6)
299 				report("Exit save PAT", 1);
300 			else
301 				report("Exit save PAT", 0);
302 		}
303 		if (!(ctrl_exit_rev.clr & EXI_LOAD_PAT))
304 			printf("\tEXI_LOAD_PAT is not supported\n");
305 		else {
306 			if (rdmsr(MSR_IA32_CR_PAT) == ia32_pat)
307 				report("Exit load PAT", 1);
308 			else
309 				report("Exit load PAT", 0);
310 		}
311 		vmcs_write(GUEST_PAT, ia32_pat);
312 		vmcs_write(GUEST_RIP, guest_rip + 3);
313 		return VMX_TEST_RESUME;
314 	default:
315 		printf("ERROR : Undefined exit reason, reason = %d.\n", reason);
316 		break;
317 	}
318 	return VMX_TEST_VMEXIT;
319 }
320 
321 static int test_ctrl_efer_init()
322 {
323 	u64 ctrl_ent;
324 	u64 ctrl_exi;
325 
326 	msr_bmp_init();
327 	ctrl_ent = vmcs_read(ENT_CONTROLS) | ENT_LOAD_EFER;
328 	ctrl_exi = vmcs_read(EXI_CONTROLS) | EXI_SAVE_EFER | EXI_LOAD_EFER;
329 	vmcs_write(ENT_CONTROLS, ctrl_ent & ctrl_enter_rev.clr);
330 	vmcs_write(EXI_CONTROLS, ctrl_exi & ctrl_exit_rev.clr);
331 	ia32_efer = rdmsr(MSR_EFER);
332 	vmcs_write(GUEST_EFER, ia32_efer ^ EFER_NX);
333 	vmcs_write(HOST_EFER, ia32_efer ^ EFER_NX);
334 	return VMX_TEST_START;
335 }
336 
337 static void test_ctrl_efer_main()
338 {
339 	u64 guest_ia32_efer;
340 
341 	guest_ia32_efer = rdmsr(MSR_EFER);
342 	if (!(ctrl_enter_rev.clr & ENT_LOAD_EFER))
343 		printf("\tENT_LOAD_EFER is not supported.\n");
344 	else {
345 		if (guest_ia32_efer != (ia32_efer ^ EFER_NX)) {
346 			report("Entry load EFER", 0);
347 			return;
348 		}
349 	}
350 	wrmsr(MSR_EFER, ia32_efer);
351 	vmcall();
352 	guest_ia32_efer = rdmsr(MSR_EFER);
353 	if (ctrl_enter_rev.clr & ENT_LOAD_EFER) {
354 		if (guest_ia32_efer != ia32_efer) {
355 			report("Entry load EFER", 0);
356 			return;
357 		}
358 		report("Entry load EFER", 1);
359 	}
360 }
361 
362 static int test_ctrl_efer_exit_handler()
363 {
364 	u64 guest_rip;
365 	ulong reason;
366 	u64 guest_efer;
367 
368 	guest_rip = vmcs_read(GUEST_RIP);
369 	reason = vmcs_read(EXI_REASON) & 0xff;
370 	switch (reason) {
371 	case VMX_VMCALL:
372 		guest_efer = vmcs_read(GUEST_EFER);
373 		if (!(ctrl_exit_rev.clr & EXI_SAVE_EFER)) {
374 			printf("\tEXI_SAVE_EFER is not supported\n");
375 			vmcs_write(GUEST_EFER, ia32_efer);
376 		} else {
377 			if (guest_efer == ia32_efer)
378 				report("Exit save EFER", 1);
379 			else
380 				report("Exit save EFER", 0);
381 		}
382 		if (!(ctrl_exit_rev.clr & EXI_LOAD_EFER)) {
383 			printf("\tEXI_LOAD_EFER is not supported\n");
384 			wrmsr(MSR_EFER, ia32_efer ^ EFER_NX);
385 		} else {
386 			if (rdmsr(MSR_EFER) == (ia32_efer ^ EFER_NX))
387 				report("Exit load EFER", 1);
388 			else
389 				report("Exit load EFER", 0);
390 		}
391 		vmcs_write(GUEST_PAT, ia32_efer);
392 		vmcs_write(GUEST_RIP, guest_rip + 3);
393 		return VMX_TEST_RESUME;
394 	default:
395 		printf("ERROR : Undefined exit reason, reason = %d.\n", reason);
396 		break;
397 	}
398 	return VMX_TEST_VMEXIT;
399 }
400 
401 u32 guest_cr0, guest_cr4;
402 
403 static void cr_shadowing_main()
404 {
405 	u32 cr0, cr4, tmp;
406 
407 	// Test read through
408 	vmx_set_test_stage(0);
409 	guest_cr0 = read_cr0();
410 	if (vmx_get_test_stage() == 1)
411 		report("Read through CR0", 0);
412 	else
413 		vmcall();
414 	vmx_set_test_stage(1);
415 	guest_cr4 = read_cr4();
416 	if (vmx_get_test_stage() == 2)
417 		report("Read through CR4", 0);
418 	else
419 		vmcall();
420 	// Test write through
421 	guest_cr0 = guest_cr0 ^ (X86_CR0_TS | X86_CR0_MP);
422 	guest_cr4 = guest_cr4 ^ (X86_CR4_TSD | X86_CR4_DE);
423 	vmx_set_test_stage(2);
424 	write_cr0(guest_cr0);
425 	if (vmx_get_test_stage() == 3)
426 		report("Write throuth CR0", 0);
427 	else
428 		vmcall();
429 	vmx_set_test_stage(3);
430 	write_cr4(guest_cr4);
431 	if (vmx_get_test_stage() == 4)
432 		report("Write through CR4", 0);
433 	else
434 		vmcall();
435 	// Test read shadow
436 	vmx_set_test_stage(4);
437 	vmcall();
438 	cr0 = read_cr0();
439 	if (vmx_get_test_stage() != 5) {
440 		if (cr0 == guest_cr0)
441 			report("Read shadowing CR0", 1);
442 		else
443 			report("Read shadowing CR0", 0);
444 	}
445 	vmx_set_test_stage(5);
446 	cr4 = read_cr4();
447 	if (vmx_get_test_stage() != 6) {
448 		if (cr4 == guest_cr4)
449 			report("Read shadowing CR4", 1);
450 		else
451 			report("Read shadowing CR4", 0);
452 	}
453 	// Test write shadow (same value with shadow)
454 	vmx_set_test_stage(6);
455 	write_cr0(guest_cr0);
456 	if (vmx_get_test_stage() == 7)
457 		report("Write shadowing CR0 (same value with shadow)", 0);
458 	else
459 		vmcall();
460 	vmx_set_test_stage(7);
461 	write_cr4(guest_cr4);
462 	if (vmx_get_test_stage() == 8)
463 		report("Write shadowing CR4 (same value with shadow)", 0);
464 	else
465 		vmcall();
466 	// Test write shadow (different value)
467 	vmx_set_test_stage(8);
468 	tmp = guest_cr0 ^ X86_CR0_TS;
469 	asm volatile("mov %0, %%rsi\n\t"
470 		"mov %%rsi, %%cr0\n\t"
471 		::"m"(tmp)
472 		:"rsi", "memory", "cc");
473 	if (vmx_get_test_stage() != 9)
474 		report("Write shadowing different X86_CR0_TS", 0);
475 	else
476 		report("Write shadowing different X86_CR0_TS", 1);
477 	vmx_set_test_stage(9);
478 	tmp = guest_cr0 ^ X86_CR0_MP;
479 	asm volatile("mov %0, %%rsi\n\t"
480 		"mov %%rsi, %%cr0\n\t"
481 		::"m"(tmp)
482 		:"rsi", "memory", "cc");
483 	if (vmx_get_test_stage() != 10)
484 		report("Write shadowing different X86_CR0_MP", 0);
485 	else
486 		report("Write shadowing different X86_CR0_MP", 1);
487 	vmx_set_test_stage(10);
488 	tmp = guest_cr4 ^ X86_CR4_TSD;
489 	asm volatile("mov %0, %%rsi\n\t"
490 		"mov %%rsi, %%cr4\n\t"
491 		::"m"(tmp)
492 		:"rsi", "memory", "cc");
493 	if (vmx_get_test_stage() != 11)
494 		report("Write shadowing different X86_CR4_TSD", 0);
495 	else
496 		report("Write shadowing different X86_CR4_TSD", 1);
497 	vmx_set_test_stage(11);
498 	tmp = guest_cr4 ^ X86_CR4_DE;
499 	asm volatile("mov %0, %%rsi\n\t"
500 		"mov %%rsi, %%cr4\n\t"
501 		::"m"(tmp)
502 		:"rsi", "memory", "cc");
503 	if (vmx_get_test_stage() != 12)
504 		report("Write shadowing different X86_CR4_DE", 0);
505 	else
506 		report("Write shadowing different X86_CR4_DE", 1);
507 }
508 
509 static int cr_shadowing_exit_handler()
510 {
511 	u64 guest_rip;
512 	ulong reason;
513 	u32 insn_len;
514 	u32 exit_qual;
515 
516 	guest_rip = vmcs_read(GUEST_RIP);
517 	reason = vmcs_read(EXI_REASON) & 0xff;
518 	insn_len = vmcs_read(EXI_INST_LEN);
519 	exit_qual = vmcs_read(EXI_QUALIFICATION);
520 	switch (reason) {
521 	case VMX_VMCALL:
522 		switch (vmx_get_test_stage()) {
523 		case 0:
524 			if (guest_cr0 == vmcs_read(GUEST_CR0))
525 				report("Read through CR0", 1);
526 			else
527 				report("Read through CR0", 0);
528 			break;
529 		case 1:
530 			if (guest_cr4 == vmcs_read(GUEST_CR4))
531 				report("Read through CR4", 1);
532 			else
533 				report("Read through CR4", 0);
534 			break;
535 		case 2:
536 			if (guest_cr0 == vmcs_read(GUEST_CR0))
537 				report("Write through CR0", 1);
538 			else
539 				report("Write through CR0", 0);
540 			break;
541 		case 3:
542 			if (guest_cr4 == vmcs_read(GUEST_CR4))
543 				report("Write through CR4", 1);
544 			else
545 				report("Write through CR4", 0);
546 			break;
547 		case 4:
548 			guest_cr0 = vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP);
549 			guest_cr4 = vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE);
550 			vmcs_write(CR0_MASK, X86_CR0_TS | X86_CR0_MP);
551 			vmcs_write(CR0_READ_SHADOW, guest_cr0 & (X86_CR0_TS | X86_CR0_MP));
552 			vmcs_write(CR4_MASK, X86_CR4_TSD | X86_CR4_DE);
553 			vmcs_write(CR4_READ_SHADOW, guest_cr4 & (X86_CR4_TSD | X86_CR4_DE));
554 			break;
555 		case 6:
556 			if (guest_cr0 == (vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP)))
557 				report("Write shadowing CR0 (same value)", 1);
558 			else
559 				report("Write shadowing CR0 (same value)", 0);
560 			break;
561 		case 7:
562 			if (guest_cr4 == (vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE)))
563 				report("Write shadowing CR4 (same value)", 1);
564 			else
565 				report("Write shadowing CR4 (same value)", 0);
566 			break;
567 		default:
568 			// Should not reach here
569 			printf("ERROR : unexpected stage, %d\n",
570 			       vmx_get_test_stage());
571 			print_vmexit_info();
572 			return VMX_TEST_VMEXIT;
573 		}
574 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
575 		return VMX_TEST_RESUME;
576 	case VMX_CR:
577 		switch (vmx_get_test_stage()) {
578 		case 4:
579 			report("Read shadowing CR0", 0);
580 			vmx_inc_test_stage();
581 			break;
582 		case 5:
583 			report("Read shadowing CR4", 0);
584 			vmx_inc_test_stage();
585 			break;
586 		case 6:
587 			report("Write shadowing CR0 (same value)", 0);
588 			vmx_inc_test_stage();
589 			break;
590 		case 7:
591 			report("Write shadowing CR4 (same value)", 0);
592 			vmx_inc_test_stage();
593 			break;
594 		case 8:
595 		case 9:
596 			// 0x600 encodes "mov %esi, %cr0"
597 			if (exit_qual == 0x600)
598 				vmx_inc_test_stage();
599 			break;
600 		case 10:
601 		case 11:
602 			// 0x604 encodes "mov %esi, %cr4"
603 			if (exit_qual == 0x604)
604 				vmx_inc_test_stage();
605 			break;
606 		default:
607 			// Should not reach here
608 			printf("ERROR : unexpected stage, %d\n",
609 			       vmx_get_test_stage());
610 			print_vmexit_info();
611 			return VMX_TEST_VMEXIT;
612 		}
613 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
614 		return VMX_TEST_RESUME;
615 	default:
616 		printf("Unknown exit reason, %d\n", reason);
617 		print_vmexit_info();
618 	}
619 	return VMX_TEST_VMEXIT;
620 }
621 
622 static int iobmp_init()
623 {
624 	u32 ctrl_cpu0;
625 
626 	io_bitmap_a = alloc_page();
627 	io_bitmap_a = alloc_page();
628 	memset(io_bitmap_a, 0x0, PAGE_SIZE);
629 	memset(io_bitmap_b, 0x0, PAGE_SIZE);
630 	ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
631 	ctrl_cpu0 |= CPU_IO_BITMAP;
632 	ctrl_cpu0 &= (~CPU_IO);
633 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
634 	vmcs_write(IO_BITMAP_A, (u64)io_bitmap_a);
635 	vmcs_write(IO_BITMAP_B, (u64)io_bitmap_b);
636 	return VMX_TEST_START;
637 }
638 
639 static void iobmp_main()
640 {
641 	// stage 0, test IO pass
642 	vmx_set_test_stage(0);
643 	inb(0x5000);
644 	outb(0x0, 0x5000);
645 	if (vmx_get_test_stage() != 0)
646 		report("I/O bitmap - I/O pass", 0);
647 	else
648 		report("I/O bitmap - I/O pass", 1);
649 	// test IO width, in/out
650 	((u8 *)io_bitmap_a)[0] = 0xFF;
651 	vmx_set_test_stage(2);
652 	inb(0x0);
653 	if (vmx_get_test_stage() != 3)
654 		report("I/O bitmap - trap in", 0);
655 	else
656 		report("I/O bitmap - trap in", 1);
657 	vmx_set_test_stage(3);
658 	outw(0x0, 0x0);
659 	if (vmx_get_test_stage() != 4)
660 		report("I/O bitmap - trap out", 0);
661 	else
662 		report("I/O bitmap - trap out", 1);
663 	vmx_set_test_stage(4);
664 	inl(0x0);
665 	if (vmx_get_test_stage() != 5)
666 		report("I/O bitmap - I/O width, long", 0);
667 	// test low/high IO port
668 	vmx_set_test_stage(5);
669 	((u8 *)io_bitmap_a)[0x5000 / 8] = (1 << (0x5000 % 8));
670 	inb(0x5000);
671 	if (vmx_get_test_stage() == 6)
672 		report("I/O bitmap - I/O port, low part", 1);
673 	else
674 		report("I/O bitmap - I/O port, low part", 0);
675 	vmx_set_test_stage(6);
676 	((u8 *)io_bitmap_b)[0x1000 / 8] = (1 << (0x1000 % 8));
677 	inb(0x9000);
678 	if (vmx_get_test_stage() == 7)
679 		report("I/O bitmap - I/O port, high part", 1);
680 	else
681 		report("I/O bitmap - I/O port, high part", 0);
682 	// test partial pass
683 	vmx_set_test_stage(7);
684 	inl(0x4FFF);
685 	if (vmx_get_test_stage() == 8)
686 		report("I/O bitmap - partial pass", 1);
687 	else
688 		report("I/O bitmap - partial pass", 0);
689 	// test overrun
690 	vmx_set_test_stage(8);
691 	memset(io_bitmap_a, 0x0, PAGE_SIZE);
692 	memset(io_bitmap_b, 0x0, PAGE_SIZE);
693 	inl(0xFFFF);
694 	if (vmx_get_test_stage() == 9)
695 		report("I/O bitmap - overrun", 1);
696 	else
697 		report("I/O bitmap - overrun", 0);
698 	vmx_set_test_stage(9);
699 	vmcall();
700 	outb(0x0, 0x0);
701 	report("I/O bitmap - ignore unconditional exiting",
702 	       vmx_get_test_stage() == 9);
703 	vmx_set_test_stage(10);
704 	vmcall();
705 	outb(0x0, 0x0);
706 	report("I/O bitmap - unconditional exiting",
707 	       vmx_get_test_stage() == 11);
708 }
709 
710 static int iobmp_exit_handler()
711 {
712 	u64 guest_rip;
713 	ulong reason, exit_qual;
714 	u32 insn_len, ctrl_cpu0;
715 
716 	guest_rip = vmcs_read(GUEST_RIP);
717 	reason = vmcs_read(EXI_REASON) & 0xff;
718 	exit_qual = vmcs_read(EXI_QUALIFICATION);
719 	insn_len = vmcs_read(EXI_INST_LEN);
720 	switch (reason) {
721 	case VMX_IO:
722 		switch (vmx_get_test_stage()) {
723 		case 0:
724 		case 1:
725 			vmx_inc_test_stage();
726 			break;
727 		case 2:
728 			if ((exit_qual & VMX_IO_SIZE_MASK) != _VMX_IO_BYTE)
729 				report("I/O bitmap - I/O width, byte", 0);
730 			else
731 				report("I/O bitmap - I/O width, byte", 1);
732 			if (!(exit_qual & VMX_IO_IN))
733 				report("I/O bitmap - I/O direction, in", 0);
734 			else
735 				report("I/O bitmap - I/O direction, in", 1);
736 			vmx_inc_test_stage();
737 			break;
738 		case 3:
739 			if ((exit_qual & VMX_IO_SIZE_MASK) != _VMX_IO_WORD)
740 				report("I/O bitmap - I/O width, word", 0);
741 			else
742 				report("I/O bitmap - I/O width, word", 1);
743 			if (!(exit_qual & VMX_IO_IN))
744 				report("I/O bitmap - I/O direction, out", 1);
745 			else
746 				report("I/O bitmap - I/O direction, out", 0);
747 			vmx_inc_test_stage();
748 			break;
749 		case 4:
750 			if ((exit_qual & VMX_IO_SIZE_MASK) != _VMX_IO_LONG)
751 				report("I/O bitmap - I/O width, long", 0);
752 			else
753 				report("I/O bitmap - I/O width, long", 1);
754 			vmx_inc_test_stage();
755 			break;
756 		case 5:
757 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x5000)
758 				vmx_inc_test_stage();
759 			break;
760 		case 6:
761 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x9000)
762 				vmx_inc_test_stage();
763 			break;
764 		case 7:
765 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x4FFF)
766 				vmx_inc_test_stage();
767 			break;
768 		case 8:
769 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0xFFFF)
770 				vmx_inc_test_stage();
771 			break;
772 		case 9:
773 		case 10:
774 			ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
775 			vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0 & ~CPU_IO);
776 			vmx_inc_test_stage();
777 			break;
778 		default:
779 			// Should not reach here
780 			printf("ERROR : unexpected stage, %d\n",
781 			       vmx_get_test_stage());
782 			print_vmexit_info();
783 			return VMX_TEST_VMEXIT;
784 		}
785 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
786 		return VMX_TEST_RESUME;
787 	case VMX_VMCALL:
788 		switch (vmx_get_test_stage()) {
789 		case 9:
790 			ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
791 			ctrl_cpu0 |= CPU_IO | CPU_IO_BITMAP;
792 			vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
793 			break;
794 		case 10:
795 			ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
796 			ctrl_cpu0 = (ctrl_cpu0 & ~CPU_IO_BITMAP) | CPU_IO;
797 			vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
798 			break;
799 		default:
800 			// Should not reach here
801 			printf("ERROR : unexpected stage, %d\n",
802 			       vmx_get_test_stage());
803 			print_vmexit_info();
804 			return VMX_TEST_VMEXIT;
805 		}
806 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
807 		return VMX_TEST_RESUME;
808 	default:
809 		printf("guest_rip = 0x%llx\n", guest_rip);
810 		printf("\tERROR : Undefined exit reason, reason = %d.\n", reason);
811 		break;
812 	}
813 	return VMX_TEST_VMEXIT;
814 }
815 
816 #define INSN_CPU0		0
817 #define INSN_CPU1		1
818 #define INSN_ALWAYS_TRAP	2
819 
820 #define FIELD_EXIT_QUAL		(1 << 0)
821 #define FIELD_INSN_INFO		(1 << 1)
822 
823 asm(
824 	"insn_hlt: hlt;ret\n\t"
825 	"insn_invlpg: invlpg 0x12345678;ret\n\t"
826 	"insn_mwait: mwait;ret\n\t"
827 	"insn_rdpmc: rdpmc;ret\n\t"
828 	"insn_rdtsc: rdtsc;ret\n\t"
829 	"insn_cr3_load: mov cr3,%rax; mov %rax,%cr3;ret\n\t"
830 	"insn_cr3_store: mov %cr3,%rax;ret\n\t"
831 #ifdef __x86_64__
832 	"insn_cr8_load: mov %rax,%cr8;ret\n\t"
833 	"insn_cr8_store: mov %cr8,%rax;ret\n\t"
834 #endif
835 	"insn_monitor: monitor;ret\n\t"
836 	"insn_pause: pause;ret\n\t"
837 	"insn_wbinvd: wbinvd;ret\n\t"
838 	"insn_cpuid: cpuid;ret\n\t"
839 	"insn_invd: invd;ret\n\t"
840 );
841 extern void insn_hlt();
842 extern void insn_invlpg();
843 extern void insn_mwait();
844 extern void insn_rdpmc();
845 extern void insn_rdtsc();
846 extern void insn_cr3_load();
847 extern void insn_cr3_store();
848 #ifdef __x86_64__
849 extern void insn_cr8_load();
850 extern void insn_cr8_store();
851 #endif
852 extern void insn_monitor();
853 extern void insn_pause();
854 extern void insn_wbinvd();
855 extern void insn_cpuid();
856 extern void insn_invd();
857 
858 u32 cur_insn;
859 u64 cr3;
860 
861 struct insn_table {
862 	const char *name;
863 	u32 flag;
864 	void (*insn_func)();
865 	u32 type;
866 	u32 reason;
867 	ulong exit_qual;
868 	u32 insn_info;
869 	// Use FIELD_EXIT_QUAL and FIELD_INSN_INFO to define
870 	// which field need to be tested, reason is always tested
871 	u32 test_field;
872 };
873 
874 /*
875  * Add more test cases of instruction intercept here. Elements in this
876  * table is:
877  *	name/control flag/insn function/type/exit reason/exit qulification/
878  *	instruction info/field to test
879  * The last field defines which fields (exit_qual and insn_info) need to be
880  * tested in exit handler. If set to 0, only "reason" is checked.
881  */
882 static struct insn_table insn_table[] = {
883 	// Flags for Primary Processor-Based VM-Execution Controls
884 	{"HLT",  CPU_HLT, insn_hlt, INSN_CPU0, 12, 0, 0, 0},
885 	{"INVLPG", CPU_INVLPG, insn_invlpg, INSN_CPU0, 14,
886 		0x12345678, 0, FIELD_EXIT_QUAL},
887 	{"MWAIT", CPU_MWAIT, insn_mwait, INSN_CPU0, 36, 0, 0, 0},
888 	{"RDPMC", CPU_RDPMC, insn_rdpmc, INSN_CPU0, 15, 0, 0, 0},
889 	{"RDTSC", CPU_RDTSC, insn_rdtsc, INSN_CPU0, 16, 0, 0, 0},
890 	{"CR3 load", CPU_CR3_LOAD, insn_cr3_load, INSN_CPU0, 28, 0x3, 0,
891 		FIELD_EXIT_QUAL},
892 	{"CR3 store", CPU_CR3_STORE, insn_cr3_store, INSN_CPU0, 28, 0x13, 0,
893 		FIELD_EXIT_QUAL},
894 #ifdef __x86_64__
895 	{"CR8 load", CPU_CR8_LOAD, insn_cr8_load, INSN_CPU0, 28, 0x8, 0,
896 		FIELD_EXIT_QUAL},
897 	{"CR8 store", CPU_CR8_STORE, insn_cr8_store, INSN_CPU0, 28, 0x18, 0,
898 		FIELD_EXIT_QUAL},
899 #endif
900 	{"MONITOR", CPU_MONITOR, insn_monitor, INSN_CPU0, 39, 0, 0, 0},
901 	{"PAUSE", CPU_PAUSE, insn_pause, INSN_CPU0, 40, 0, 0, 0},
902 	// Flags for Secondary Processor-Based VM-Execution Controls
903 	{"WBINVD", CPU_WBINVD, insn_wbinvd, INSN_CPU1, 54, 0, 0, 0},
904 	// Instructions always trap
905 	{"CPUID", 0, insn_cpuid, INSN_ALWAYS_TRAP, 10, 0, 0, 0},
906 	{"INVD", 0, insn_invd, INSN_ALWAYS_TRAP, 13, 0, 0, 0},
907 	// Instructions never trap
908 	{NULL},
909 };
910 
911 static int insn_intercept_init()
912 {
913 	u32 ctrl_cpu;
914 
915 	ctrl_cpu = ctrl_cpu_rev[0].set | CPU_SECONDARY;
916 	ctrl_cpu &= ctrl_cpu_rev[0].clr;
917 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu);
918 	vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu_rev[1].set);
919 	cr3 = read_cr3();
920 	return VMX_TEST_START;
921 }
922 
923 static void insn_intercept_main()
924 {
925 	char msg[80];
926 
927 	for (cur_insn = 0; insn_table[cur_insn].name != NULL; cur_insn++) {
928 		vmx_set_test_stage(cur_insn * 2);
929 		if ((insn_table[cur_insn].type == INSN_CPU0 &&
930 		     !(ctrl_cpu_rev[0].clr & insn_table[cur_insn].flag)) ||
931 		    (insn_table[cur_insn].type == INSN_CPU1 &&
932 		     !(ctrl_cpu_rev[1].clr & insn_table[cur_insn].flag))) {
933 			printf("\tCPU_CTRL%d.CPU_%s is not supported.\n",
934 			       insn_table[cur_insn].type - INSN_CPU0,
935 			       insn_table[cur_insn].name);
936 			continue;
937 		}
938 
939 		if ((insn_table[cur_insn].type == INSN_CPU0 &&
940 		     !(ctrl_cpu_rev[0].set & insn_table[cur_insn].flag)) ||
941 		    (insn_table[cur_insn].type == INSN_CPU1 &&
942 		     !(ctrl_cpu_rev[1].set & insn_table[cur_insn].flag))) {
943 			/* skip hlt, it stalls the guest and is tested below */
944 			if (insn_table[cur_insn].insn_func != insn_hlt)
945 				insn_table[cur_insn].insn_func();
946 			snprintf(msg, sizeof(msg), "execute %s",
947 				 insn_table[cur_insn].name);
948 			report(msg, vmx_get_test_stage() == cur_insn * 2);
949 		} else if (insn_table[cur_insn].type != INSN_ALWAYS_TRAP)
950 			printf("\tCPU_CTRL%d.CPU_%s always traps.\n",
951 			       insn_table[cur_insn].type - INSN_CPU0,
952 			       insn_table[cur_insn].name);
953 
954 		vmcall();
955 
956 		insn_table[cur_insn].insn_func();
957 		snprintf(msg, sizeof(msg), "intercept %s",
958 			 insn_table[cur_insn].name);
959 		report(msg, vmx_get_test_stage() == cur_insn * 2 + 1);
960 
961 		vmx_set_test_stage(cur_insn * 2 + 1);
962 		vmcall();
963 	}
964 }
965 
966 static int insn_intercept_exit_handler()
967 {
968 	u64 guest_rip;
969 	u32 reason;
970 	ulong exit_qual;
971 	u32 insn_len;
972 	u32 insn_info;
973 	bool pass;
974 
975 	guest_rip = vmcs_read(GUEST_RIP);
976 	reason = vmcs_read(EXI_REASON) & 0xff;
977 	exit_qual = vmcs_read(EXI_QUALIFICATION);
978 	insn_len = vmcs_read(EXI_INST_LEN);
979 	insn_info = vmcs_read(EXI_INST_INFO);
980 
981 	if (reason == VMX_VMCALL) {
982 		u32 val = 0;
983 
984 		if (insn_table[cur_insn].type == INSN_CPU0)
985 			val = vmcs_read(CPU_EXEC_CTRL0);
986 		else if (insn_table[cur_insn].type == INSN_CPU1)
987 			val = vmcs_read(CPU_EXEC_CTRL1);
988 
989 		if (vmx_get_test_stage() & 1)
990 			val &= ~insn_table[cur_insn].flag;
991 		else
992 			val |= insn_table[cur_insn].flag;
993 
994 		if (insn_table[cur_insn].type == INSN_CPU0)
995 			vmcs_write(CPU_EXEC_CTRL0, val | ctrl_cpu_rev[0].set);
996 		else if (insn_table[cur_insn].type == INSN_CPU1)
997 			vmcs_write(CPU_EXEC_CTRL1, val | ctrl_cpu_rev[1].set);
998 	} else {
999 		pass = (cur_insn * 2 == vmx_get_test_stage()) &&
1000 			insn_table[cur_insn].reason == reason;
1001 		if (insn_table[cur_insn].test_field & FIELD_EXIT_QUAL &&
1002 		    insn_table[cur_insn].exit_qual != exit_qual)
1003 			pass = false;
1004 		if (insn_table[cur_insn].test_field & FIELD_INSN_INFO &&
1005 		    insn_table[cur_insn].insn_info != insn_info)
1006 			pass = false;
1007 		if (pass)
1008 			vmx_inc_test_stage();
1009 	}
1010 	vmcs_write(GUEST_RIP, guest_rip + insn_len);
1011 	return VMX_TEST_RESUME;
1012 }
1013 
1014 
1015 static int setup_ept()
1016 {
1017 	int support_2m;
1018 	unsigned long end_of_memory;
1019 
1020 	if (!(ept_vpid.val & EPT_CAP_UC) &&
1021 			!(ept_vpid.val & EPT_CAP_WB)) {
1022 		printf("\tEPT paging-structure memory type "
1023 				"UC&WB are not supported\n");
1024 		return 1;
1025 	}
1026 	if (ept_vpid.val & EPT_CAP_UC)
1027 		eptp = EPT_MEM_TYPE_UC;
1028 	else
1029 		eptp = EPT_MEM_TYPE_WB;
1030 	if (!(ept_vpid.val & EPT_CAP_PWL4)) {
1031 		printf("\tPWL4 is not supported\n");
1032 		return 1;
1033 	}
1034 	eptp |= (3 << EPTP_PG_WALK_LEN_SHIFT);
1035 	pml4 = alloc_page();
1036 	memset(pml4, 0, PAGE_SIZE);
1037 	eptp |= virt_to_phys(pml4);
1038 	vmcs_write(EPTP, eptp);
1039 	support_2m = !!(ept_vpid.val & EPT_CAP_2M_PAGE);
1040 	end_of_memory = fwcfg_get_u64(FW_CFG_RAM_SIZE);
1041 	if (end_of_memory < (1ul << 32))
1042 		end_of_memory = (1ul << 32);
1043 	setup_ept_range(pml4, 0, end_of_memory, 0, support_2m,
1044 			EPT_WA | EPT_RA | EPT_EA);
1045 	return 0;
1046 }
1047 
1048 static int apic_version;
1049 
1050 static int ept_init()
1051 {
1052 	unsigned long base_addr1, base_addr2;
1053 	u32 ctrl_cpu[2];
1054 
1055 	if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY) ||
1056 	    !(ctrl_cpu_rev[1].clr & CPU_EPT)) {
1057 		printf("\tEPT is not supported");
1058 		return VMX_TEST_EXIT;
1059 	}
1060 
1061 	ctrl_cpu[0] = vmcs_read(CPU_EXEC_CTRL0);
1062 	ctrl_cpu[1] = vmcs_read(CPU_EXEC_CTRL1);
1063 	ctrl_cpu[0] = (ctrl_cpu[0] | CPU_SECONDARY)
1064 		& ctrl_cpu_rev[0].clr;
1065 	ctrl_cpu[1] = (ctrl_cpu[1] | CPU_EPT)
1066 		& ctrl_cpu_rev[1].clr;
1067 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu[0]);
1068 	vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu[1]);
1069 	if (setup_ept())
1070 		return VMX_TEST_EXIT;
1071 	data_page1 = alloc_page();
1072 	data_page2 = alloc_page();
1073 	memset(data_page1, 0x0, PAGE_SIZE);
1074 	memset(data_page2, 0x0, PAGE_SIZE);
1075 	*((u32 *)data_page1) = MAGIC_VAL_1;
1076 	*((u32 *)data_page2) = MAGIC_VAL_2;
1077 	base_addr1 = (unsigned long)data_page1 & PAGE_MASK_2M;
1078 	base_addr2 = (unsigned long)data_page2 & PAGE_MASK_2M;
1079 	setup_ept_range(pml4, base_addr1, base_addr1 + PAGE_SIZE_2M, 0, 0,
1080 			EPT_WA | EPT_RA | EPT_EA);
1081 	setup_ept_range(pml4, base_addr2, base_addr2 + PAGE_SIZE_2M, 0, 0,
1082 			EPT_WA | EPT_RA | EPT_EA);
1083 	install_ept(pml4, (unsigned long)data_page1, (unsigned long)data_page2,
1084 			EPT_RA | EPT_WA | EPT_EA);
1085 
1086 	apic_version = *((u32 *)0xfee00030UL);
1087 	return VMX_TEST_START;
1088 }
1089 
1090 static void ept_main()
1091 {
1092 	vmx_set_test_stage(0);
1093 	if (*((u32 *)data_page2) != MAGIC_VAL_1 ||
1094 			*((u32 *)data_page1) != MAGIC_VAL_1)
1095 		report("EPT basic framework - read", 0);
1096 	else {
1097 		*((u32 *)data_page2) = MAGIC_VAL_3;
1098 		vmcall();
1099 		if (vmx_get_test_stage() == 1) {
1100 			if (*((u32 *)data_page1) == MAGIC_VAL_3 &&
1101 					*((u32 *)data_page2) == MAGIC_VAL_2)
1102 				report("EPT basic framework", 1);
1103 			else
1104 				report("EPT basic framework - remap", 1);
1105 		}
1106 	}
1107 	// Test EPT Misconfigurations
1108 	vmx_set_test_stage(1);
1109 	vmcall();
1110 	*((u32 *)data_page1) = MAGIC_VAL_1;
1111 	if (vmx_get_test_stage() != 2) {
1112 		report("EPT misconfigurations", 0);
1113 		goto t1;
1114 	}
1115 	vmx_set_test_stage(2);
1116 	vmcall();
1117 	*((u32 *)data_page1) = MAGIC_VAL_1;
1118 	if (vmx_get_test_stage() != 3) {
1119 		report("EPT misconfigurations", 0);
1120 		goto t1;
1121 	}
1122 	report("EPT misconfigurations", 1);
1123 t1:
1124 	// Test EPT violation
1125 	vmx_set_test_stage(3);
1126 	vmcall();
1127 	*((u32 *)data_page1) = MAGIC_VAL_1;
1128 	if (vmx_get_test_stage() == 4)
1129 		report("EPT violation - page permission", 1);
1130 	else
1131 		report("EPT violation - page permission", 0);
1132 	// Violation caused by EPT paging structure
1133 	vmx_set_test_stage(4);
1134 	vmcall();
1135 	*((u32 *)data_page1) = MAGIC_VAL_2;
1136 	if (vmx_get_test_stage() == 5)
1137 		report("EPT violation - paging structure", 1);
1138 	else
1139 		report("EPT violation - paging structure", 0);
1140 
1141 	// Test EPT access to L1 MMIO
1142 	vmx_set_test_stage(6);
1143 	report("EPT - MMIO access", *((u32 *)0xfee00030UL) == apic_version);
1144 }
1145 
1146 static int ept_exit_handler()
1147 {
1148 	u64 guest_rip;
1149 	ulong reason;
1150 	u32 insn_len;
1151 	u32 exit_qual;
1152 	static unsigned long data_page1_pte, data_page1_pte_pte;
1153 
1154 	guest_rip = vmcs_read(GUEST_RIP);
1155 	reason = vmcs_read(EXI_REASON) & 0xff;
1156 	insn_len = vmcs_read(EXI_INST_LEN);
1157 	exit_qual = vmcs_read(EXI_QUALIFICATION);
1158 	switch (reason) {
1159 	case VMX_VMCALL:
1160 		switch (vmx_get_test_stage()) {
1161 		case 0:
1162 			if (*((u32 *)data_page1) == MAGIC_VAL_3 &&
1163 					*((u32 *)data_page2) == MAGIC_VAL_2) {
1164 				vmx_inc_test_stage();
1165 				install_ept(pml4, (unsigned long)data_page2,
1166 						(unsigned long)data_page2,
1167 						EPT_RA | EPT_WA | EPT_EA);
1168 			} else
1169 				report("EPT basic framework - write\n", 0);
1170 			break;
1171 		case 1:
1172 			install_ept(pml4, (unsigned long)data_page1,
1173  				(unsigned long)data_page1, EPT_WA);
1174 			ept_sync(INVEPT_SINGLE, eptp);
1175 			break;
1176 		case 2:
1177 			install_ept(pml4, (unsigned long)data_page1,
1178  				(unsigned long)data_page1,
1179  				EPT_RA | EPT_WA | EPT_EA |
1180  				(2 << EPT_MEM_TYPE_SHIFT));
1181 			ept_sync(INVEPT_SINGLE, eptp);
1182 			break;
1183 		case 3:
1184 			data_page1_pte = get_ept_pte(pml4,
1185 				(unsigned long)data_page1, 1);
1186 			set_ept_pte(pml4, (unsigned long)data_page1,
1187 				1, data_page1_pte & (~EPT_PRESENT));
1188 			ept_sync(INVEPT_SINGLE, eptp);
1189 			break;
1190 		case 4:
1191 			data_page1_pte = get_ept_pte(pml4,
1192 				(unsigned long)data_page1, 2);
1193 			data_page1_pte &= PAGE_MASK;
1194 			data_page1_pte_pte = get_ept_pte(pml4, data_page1_pte, 2);
1195 			set_ept_pte(pml4, data_page1_pte, 2,
1196 				data_page1_pte_pte & (~EPT_PRESENT));
1197 			ept_sync(INVEPT_SINGLE, eptp);
1198 			break;
1199 		// Should not reach here
1200 		default:
1201 			printf("ERROR - unexpected stage, %d.\n",
1202 			       vmx_get_test_stage());
1203 			print_vmexit_info();
1204 			return VMX_TEST_VMEXIT;
1205 		}
1206 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
1207 		return VMX_TEST_RESUME;
1208 	case VMX_EPT_MISCONFIG:
1209 		switch (vmx_get_test_stage()) {
1210 		case 1:
1211 		case 2:
1212 			vmx_inc_test_stage();
1213 			install_ept(pml4, (unsigned long)data_page1,
1214  				(unsigned long)data_page1,
1215  				EPT_RA | EPT_WA | EPT_EA);
1216 			ept_sync(INVEPT_SINGLE, eptp);
1217 			break;
1218 		// Should not reach here
1219 		default:
1220 			printf("ERROR - unexpected stage, %d.\n",
1221 			       vmx_get_test_stage());
1222 			print_vmexit_info();
1223 			return VMX_TEST_VMEXIT;
1224 		}
1225 		return VMX_TEST_RESUME;
1226 	case VMX_EPT_VIOLATION:
1227 		switch(vmx_get_test_stage()) {
1228 		case 3:
1229 			if (exit_qual == (EPT_VLT_WR | EPT_VLT_LADDR_VLD |
1230 					EPT_VLT_PADDR))
1231 				vmx_inc_test_stage();
1232 			set_ept_pte(pml4, (unsigned long)data_page1,
1233 				1, data_page1_pte | (EPT_PRESENT));
1234 			ept_sync(INVEPT_SINGLE, eptp);
1235 			break;
1236 		case 4:
1237 			if (exit_qual == (EPT_VLT_RD | EPT_VLT_LADDR_VLD))
1238 				vmx_inc_test_stage();
1239 			set_ept_pte(pml4, data_page1_pte, 2,
1240 				data_page1_pte_pte | (EPT_PRESENT));
1241 			ept_sync(INVEPT_SINGLE, eptp);
1242 			break;
1243 		default:
1244 			// Should not reach here
1245 			printf("ERROR : unexpected stage, %d\n",
1246 			       vmx_get_test_stage());
1247 			print_vmexit_info();
1248 			return VMX_TEST_VMEXIT;
1249 		}
1250 		return VMX_TEST_RESUME;
1251 	default:
1252 		printf("Unknown exit reason, %d\n", reason);
1253 		print_vmexit_info();
1254 	}
1255 	return VMX_TEST_VMEXIT;
1256 }
1257 
1258 #define TIMER_VECTOR	222
1259 
1260 static volatile bool timer_fired;
1261 
1262 static void timer_isr(isr_regs_t *regs)
1263 {
1264 	timer_fired = true;
1265 	apic_write(APIC_EOI, 0);
1266 }
1267 
1268 static int interrupt_init(struct vmcs *vmcs)
1269 {
1270 	msr_bmp_init();
1271 	vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) & ~PIN_EXTINT);
1272 	handle_irq(TIMER_VECTOR, timer_isr);
1273 	return VMX_TEST_START;
1274 }
1275 
1276 static void interrupt_main(void)
1277 {
1278 	long long start, loops;
1279 
1280 	vmx_set_test_stage(0);
1281 
1282 	apic_write(APIC_LVTT, TIMER_VECTOR);
1283 	irq_enable();
1284 
1285 	apic_write(APIC_TMICT, 1);
1286 	for (loops = 0; loops < 10000000 && !timer_fired; loops++)
1287 		asm volatile ("nop");
1288 	report("direct interrupt while running guest", timer_fired);
1289 
1290 	apic_write(APIC_TMICT, 0);
1291 	irq_disable();
1292 	vmcall();
1293 	timer_fired = false;
1294 	apic_write(APIC_TMICT, 1);
1295 	for (loops = 0; loops < 10000000 && !timer_fired; loops++)
1296 		asm volatile ("nop");
1297 	report("intercepted interrupt while running guest", timer_fired);
1298 
1299 	irq_enable();
1300 	apic_write(APIC_TMICT, 0);
1301 	irq_disable();
1302 	vmcall();
1303 	timer_fired = false;
1304 	start = rdtsc();
1305 	apic_write(APIC_TMICT, 1000000);
1306 
1307 	asm volatile ("sti; hlt");
1308 
1309 	report("direct interrupt + hlt",
1310 	       rdtsc() - start > 1000000 && timer_fired);
1311 
1312 	apic_write(APIC_TMICT, 0);
1313 	irq_disable();
1314 	vmcall();
1315 	timer_fired = false;
1316 	start = rdtsc();
1317 	apic_write(APIC_TMICT, 1000000);
1318 
1319 	asm volatile ("sti; hlt");
1320 
1321 	report("intercepted interrupt + hlt",
1322 	       rdtsc() - start > 10000 && timer_fired);
1323 
1324 	apic_write(APIC_TMICT, 0);
1325 	irq_disable();
1326 	vmcall();
1327 	timer_fired = false;
1328 	start = rdtsc();
1329 	apic_write(APIC_TMICT, 1000000);
1330 
1331 	irq_enable();
1332 	asm volatile ("nop");
1333 	vmcall();
1334 
1335 	report("direct interrupt + activity state hlt",
1336 	       rdtsc() - start > 10000 && timer_fired);
1337 
1338 	apic_write(APIC_TMICT, 0);
1339 	irq_disable();
1340 	vmcall();
1341 	timer_fired = false;
1342 	start = rdtsc();
1343 	apic_write(APIC_TMICT, 1000000);
1344 
1345 	irq_enable();
1346 	asm volatile ("nop");
1347 	vmcall();
1348 
1349 	report("intercepted interrupt + activity state hlt",
1350 	       rdtsc() - start > 10000 && timer_fired);
1351 
1352 	apic_write(APIC_TMICT, 0);
1353 	irq_disable();
1354 	vmx_set_test_stage(7);
1355 	vmcall();
1356 	timer_fired = false;
1357 	apic_write(APIC_TMICT, 1);
1358 	for (loops = 0; loops < 10000000 && !timer_fired; loops++)
1359 		asm volatile ("nop");
1360 	report("running a guest with interrupt acknowledgement set", timer_fired);
1361 }
1362 
1363 static int interrupt_exit_handler(void)
1364 {
1365 	u64 guest_rip = vmcs_read(GUEST_RIP);
1366 	ulong reason = vmcs_read(EXI_REASON) & 0xff;
1367 	u32 insn_len = vmcs_read(EXI_INST_LEN);
1368 
1369 	switch (reason) {
1370 	case VMX_VMCALL:
1371 		switch (vmx_get_test_stage()) {
1372 		case 0:
1373 		case 2:
1374 		case 5:
1375 			vmcs_write(PIN_CONTROLS,
1376 				   vmcs_read(PIN_CONTROLS) | PIN_EXTINT);
1377 			break;
1378 		case 7:
1379 			vmcs_write(EXI_CONTROLS, vmcs_read(EXI_CONTROLS) | EXI_INTA);
1380 			vmcs_write(PIN_CONTROLS,
1381 				   vmcs_read(PIN_CONTROLS) | PIN_EXTINT);
1382 			break;
1383 		case 1:
1384 		case 3:
1385 			vmcs_write(PIN_CONTROLS,
1386 				   vmcs_read(PIN_CONTROLS) & ~PIN_EXTINT);
1387 			break;
1388 		case 4:
1389 		case 6:
1390 			vmcs_write(GUEST_ACTV_STATE, ACTV_HLT);
1391 			break;
1392 		}
1393 		vmx_inc_test_stage();
1394 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
1395 		return VMX_TEST_RESUME;
1396 	case VMX_EXTINT:
1397 		if (vmcs_read(EXI_CONTROLS) & EXI_INTA) {
1398 			int vector = vmcs_read(EXI_INTR_INFO) & 0xff;
1399 			handle_external_interrupt(vector);
1400 		} else {
1401 			irq_enable();
1402 			asm volatile ("nop");
1403 			irq_disable();
1404 		}
1405 		if (vmx_get_test_stage() >= 2) {
1406 			vmcs_write(GUEST_ACTV_STATE, ACTV_ACTIVE);
1407 			vmcs_write(GUEST_RIP, guest_rip + insn_len);
1408 		}
1409 		return VMX_TEST_RESUME;
1410 	default:
1411 		printf("Unknown exit reason, %d\n", reason);
1412 		print_vmexit_info();
1413 	}
1414 
1415 	return VMX_TEST_VMEXIT;
1416 }
1417 
1418 static int dbgctls_init(struct vmcs *vmcs)
1419 {
1420 	u64 dr7 = 0x402;
1421 	u64 zero = 0;
1422 
1423 	msr_bmp_init();
1424 	asm volatile(
1425 		"mov %0,%%dr0\n\t"
1426 		"mov %0,%%dr1\n\t"
1427 		"mov %0,%%dr2\n\t"
1428 		"mov %1,%%dr7\n\t"
1429 		: : "r" (zero), "r" (dr7));
1430 	wrmsr(MSR_IA32_DEBUGCTLMSR, 0x1);
1431 	vmcs_write(GUEST_DR7, 0x404);
1432 	vmcs_write(GUEST_DEBUGCTL, 0x2);
1433 
1434 	vmcs_write(ENT_CONTROLS, vmcs_read(ENT_CONTROLS) | ENT_LOAD_DBGCTLS);
1435 	vmcs_write(EXI_CONTROLS, vmcs_read(EXI_CONTROLS) | EXI_SAVE_DBGCTLS);
1436 
1437 	return VMX_TEST_START;
1438 }
1439 
1440 static void dbgctls_main(void)
1441 {
1442 	u64 dr7, debugctl;
1443 
1444 	asm volatile("mov %%dr7,%0" : "=r" (dr7));
1445 	debugctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
1446 	/* Commented out: KVM does not support DEBUGCTL so far */
1447 	report("Load debug controls", dr7 == 0x404 /* && debugctl == 0x2 */);
1448 
1449 	dr7 = 0x408;
1450 	asm volatile("mov %0,%%dr7" : : "r" (dr7));
1451 	wrmsr(MSR_IA32_DEBUGCTLMSR, 0x3);
1452 
1453 	vmx_set_test_stage(0);
1454 	vmcall();
1455 	report("Save debug controls", vmx_get_test_stage() == 1);
1456 
1457 	if (ctrl_enter_rev.set & ENT_LOAD_DBGCTLS ||
1458 	    ctrl_exit_rev.set & EXI_SAVE_DBGCTLS) {
1459 		printf("\tDebug controls are always loaded/saved\n");
1460 		return;
1461 	}
1462 	vmx_set_test_stage(2);
1463 	vmcall();
1464 
1465 	asm volatile("mov %%dr7,%0" : "=r" (dr7));
1466 	debugctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
1467 	/* Commented out: KVM does not support DEBUGCTL so far */
1468 	report("Guest=host debug controls", dr7 == 0x402 /* && debugctl == 0x1 */);
1469 
1470 	dr7 = 0x408;
1471 	asm volatile("mov %0,%%dr7" : : "r" (dr7));
1472 	wrmsr(MSR_IA32_DEBUGCTLMSR, 0x3);
1473 
1474 	vmx_set_test_stage(3);
1475 	vmcall();
1476 	report("Don't save debug controls", vmx_get_test_stage() == 4);
1477 }
1478 
1479 static int dbgctls_exit_handler(void)
1480 {
1481 	unsigned int reason = vmcs_read(EXI_REASON) & 0xff;
1482 	u32 insn_len = vmcs_read(EXI_INST_LEN);
1483 	u64 guest_rip = vmcs_read(GUEST_RIP);
1484 	u64 dr7, debugctl;
1485 
1486 	asm volatile("mov %%dr7,%0" : "=r" (dr7));
1487 	debugctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
1488 
1489 	switch (reason) {
1490 	case VMX_VMCALL:
1491 		switch (vmx_get_test_stage()) {
1492 		case 0:
1493 			if (dr7 == 0x400 && debugctl == 0 &&
1494 			    vmcs_read(GUEST_DR7) == 0x408 /* &&
1495 			    Commented out: KVM does not support DEBUGCTL so far
1496 			    vmcs_read(GUEST_DEBUGCTL) == 0x3 */)
1497 				vmx_inc_test_stage();
1498 			break;
1499 		case 2:
1500 			dr7 = 0x402;
1501 			asm volatile("mov %0,%%dr7" : : "r" (dr7));
1502 			wrmsr(MSR_IA32_DEBUGCTLMSR, 0x1);
1503 			vmcs_write(GUEST_DR7, 0x404);
1504 			vmcs_write(GUEST_DEBUGCTL, 0x2);
1505 
1506 			vmcs_write(ENT_CONTROLS,
1507 				vmcs_read(ENT_CONTROLS) & ~ENT_LOAD_DBGCTLS);
1508 			vmcs_write(EXI_CONTROLS,
1509 				vmcs_read(EXI_CONTROLS) & ~EXI_SAVE_DBGCTLS);
1510 			break;
1511 		case 3:
1512 			if (dr7 == 0x400 && debugctl == 0 &&
1513 			    vmcs_read(GUEST_DR7) == 0x404 /* &&
1514 			    Commented out: KVM does not support DEBUGCTL so far
1515 			    vmcs_read(GUEST_DEBUGCTL) == 0x2 */)
1516 				vmx_inc_test_stage();
1517 			break;
1518 		}
1519 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
1520 		return VMX_TEST_RESUME;
1521 	default:
1522 		printf("Unknown exit reason, %d\n", reason);
1523 		print_vmexit_info();
1524 	}
1525 	return VMX_TEST_VMEXIT;
1526 }
1527 
1528 /* name/init/guest_main/exit_handler/syscall_handler/guest_regs */
1529 struct vmx_test vmx_tests[] = {
1530 	{ "null", NULL, basic_guest_main, basic_exit_handler, NULL, {0} },
1531 	{ "vmenter", NULL, vmenter_main, vmenter_exit_handler, NULL, {0} },
1532 	{ "preemption timer", preemption_timer_init, preemption_timer_main,
1533 		preemption_timer_exit_handler, NULL, {0} },
1534 	{ "control field PAT", test_ctrl_pat_init, test_ctrl_pat_main,
1535 		test_ctrl_pat_exit_handler, NULL, {0} },
1536 	{ "control field EFER", test_ctrl_efer_init, test_ctrl_efer_main,
1537 		test_ctrl_efer_exit_handler, NULL, {0} },
1538 	{ "CR shadowing", NULL, cr_shadowing_main,
1539 		cr_shadowing_exit_handler, NULL, {0} },
1540 	{ "I/O bitmap", iobmp_init, iobmp_main, iobmp_exit_handler,
1541 		NULL, {0} },
1542 	{ "instruction intercept", insn_intercept_init, insn_intercept_main,
1543 		insn_intercept_exit_handler, NULL, {0} },
1544 	{ "EPT framework", ept_init, ept_main, ept_exit_handler, NULL, {0} },
1545 	{ "interrupt", interrupt_init, interrupt_main,
1546 		interrupt_exit_handler, NULL, {0} },
1547 	{ "debug controls", dbgctls_init, dbgctls_main, dbgctls_exit_handler,
1548 		NULL, {0} },
1549 	{ NULL, NULL, NULL, NULL, NULL, {0} },
1550 };
1551