xref: /kvm-unit-tests/x86/vmx_tests.c (revision dc8dabd3f52f5cc561eb18af18e919cbf953e122)
1 /*
2  * All test cases of nested virtualization should be in this file
3  *
4  * Author : Arthur Chunqi Li <yzt356@gmail.com>
5  */
6 #include "vmx.h"
7 #include "msr.h"
8 #include "processor.h"
9 #include "vm.h"
10 #include "io.h"
11 #include "fwcfg.h"
12 
13 u64 ia32_pat;
14 u64 ia32_efer;
15 volatile u32 stage;
16 void *io_bitmap_a, *io_bitmap_b;
17 u16 ioport;
18 
19 bool init_fail;
20 unsigned long *pml4;
21 u64 eptp;
22 void *data_page1, *data_page2;
23 
24 static inline void vmcall()
25 {
26 	asm volatile("vmcall");
27 }
28 
29 static inline void set_stage(u32 s)
30 {
31 	barrier();
32 	stage = s;
33 	barrier();
34 }
35 
36 static inline u32 get_stage()
37 {
38 	u32 s;
39 
40 	barrier();
41 	s = stage;
42 	barrier();
43 	return s;
44 }
45 
46 void basic_guest_main()
47 {
48 	/* Here is a basic guest_main, print Hello World */
49 	printf("\tHello World, this is null_guest_main!\n");
50 }
51 
52 int basic_exit_handler()
53 {
54 	u64 guest_rip;
55 	ulong reason;
56 
57 	guest_rip = vmcs_read(GUEST_RIP);
58 	reason = vmcs_read(EXI_REASON) & 0xff;
59 
60 	switch (reason) {
61 	case VMX_VMCALL:
62 		print_vmexit_info();
63 		vmcs_write(GUEST_RIP, guest_rip + 3);
64 		return VMX_TEST_RESUME;
65 	default:
66 		break;
67 	}
68 	printf("ERROR : Unhandled vmx exit.\n");
69 	print_vmexit_info();
70 	return VMX_TEST_EXIT;
71 }
72 
73 void basic_syscall_handler(u64 syscall_no)
74 {
75 }
76 
77 void vmenter_main()
78 {
79 	u64 rax;
80 	u64 rsp, resume_rsp;
81 
82 	report("test vmlaunch", 1);
83 
84 	asm volatile(
85 		"mov %%rsp, %0\n\t"
86 		"mov %3, %%rax\n\t"
87 		"vmcall\n\t"
88 		"mov %%rax, %1\n\t"
89 		"mov %%rsp, %2\n\t"
90 		: "=r"(rsp), "=r"(rax), "=r"(resume_rsp)
91 		: "g"(0xABCD));
92 	report("test vmresume", (rax == 0xFFFF) && (rsp == resume_rsp));
93 }
94 
95 int vmenter_exit_handler()
96 {
97 	u64 guest_rip;
98 	ulong reason;
99 
100 	guest_rip = vmcs_read(GUEST_RIP);
101 	reason = vmcs_read(EXI_REASON) & 0xff;
102 	switch (reason) {
103 	case VMX_VMCALL:
104 		if (regs.rax != 0xABCD) {
105 			report("test vmresume", 0);
106 			return VMX_TEST_VMEXIT;
107 		}
108 		regs.rax = 0xFFFF;
109 		vmcs_write(GUEST_RIP, guest_rip + 3);
110 		return VMX_TEST_RESUME;
111 	default:
112 		report("test vmresume", 0);
113 		print_vmexit_info();
114 	}
115 	return VMX_TEST_VMEXIT;
116 }
117 
118 u32 preempt_scale;
119 volatile unsigned long long tsc_val;
120 volatile u32 preempt_val;
121 
122 void preemption_timer_init()
123 {
124 	u32 ctrl_pin;
125 
126 	ctrl_pin = vmcs_read(PIN_CONTROLS) | PIN_PREEMPT;
127 	ctrl_pin &= ctrl_pin_rev.clr;
128 	vmcs_write(PIN_CONTROLS, ctrl_pin);
129 	preempt_val = 10000000;
130 	vmcs_write(PREEMPT_TIMER_VALUE, preempt_val);
131 	preempt_scale = rdmsr(MSR_IA32_VMX_MISC) & 0x1F;
132 
133 	if (!(ctrl_exit_rev.clr & EXI_SAVE_PREEMPT))
134 		printf("\tSave preemption value is not supported\n");
135 }
136 
137 void preemption_timer_main()
138 {
139 	tsc_val = rdtsc();
140 	if (!(ctrl_pin_rev.clr & PIN_PREEMPT)) {
141 		printf("\tPreemption timer is not supported\n");
142 		return;
143 	}
144 	if (ctrl_exit_rev.clr & EXI_SAVE_PREEMPT) {
145 		set_stage(0);
146 		vmcall();
147 		if (get_stage() == 1)
148 			vmcall();
149 	}
150 	while (1) {
151 		if (((rdtsc() - tsc_val) >> preempt_scale)
152 				> 10 * preempt_val) {
153 			set_stage(2);
154 			vmcall();
155 		}
156 	}
157 }
158 
159 int preemption_timer_exit_handler()
160 {
161 	u64 guest_rip;
162 	ulong reason;
163 	u32 insn_len;
164 	u32 ctrl_exit;
165 
166 	guest_rip = vmcs_read(GUEST_RIP);
167 	reason = vmcs_read(EXI_REASON) & 0xff;
168 	insn_len = vmcs_read(EXI_INST_LEN);
169 	switch (reason) {
170 	case VMX_PREEMPT:
171 		if (((rdtsc() - tsc_val) >> preempt_scale) < preempt_val)
172 			report("Preemption timer", 0);
173 		else
174 			report("Preemption timer", 1);
175 		break;
176 	case VMX_VMCALL:
177 		switch (get_stage()) {
178 		case 0:
179 			if (vmcs_read(PREEMPT_TIMER_VALUE) != preempt_val)
180 				report("Save preemption value", 0);
181 			else {
182 				set_stage(get_stage() + 1);
183 				ctrl_exit = (vmcs_read(EXI_CONTROLS) |
184 					EXI_SAVE_PREEMPT) & ctrl_exit_rev.clr;
185 				vmcs_write(EXI_CONTROLS, ctrl_exit);
186 			}
187 			vmcs_write(GUEST_RIP, guest_rip + insn_len);
188 			return VMX_TEST_RESUME;
189 		case 1:
190 			if (vmcs_read(PREEMPT_TIMER_VALUE) >= preempt_val)
191 				report("Save preemption value", 0);
192 			else
193 				report("Save preemption value", 1);
194 			vmcs_write(GUEST_RIP, guest_rip + insn_len);
195 			return VMX_TEST_RESUME;
196 		case 2:
197 			report("Preemption timer", 0);
198 			break;
199 		default:
200 			// Should not reach here
201 			printf("ERROR : unexpected stage, %d\n", get_stage());
202 			print_vmexit_info();
203 			return VMX_TEST_VMEXIT;
204 		}
205 		break;
206 	default:
207 		printf("Unknown exit reason, %d\n", reason);
208 		print_vmexit_info();
209 	}
210 	vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) & ~PIN_PREEMPT);
211 	return VMX_TEST_VMEXIT;
212 }
213 
214 void msr_bmp_init()
215 {
216 	void *msr_bitmap;
217 	u32 ctrl_cpu0;
218 
219 	msr_bitmap = alloc_page();
220 	memset(msr_bitmap, 0x0, PAGE_SIZE);
221 	ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
222 	ctrl_cpu0 |= CPU_MSR_BITMAP;
223 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
224 	vmcs_write(MSR_BITMAP, (u64)msr_bitmap);
225 }
226 
227 static void test_ctrl_pat_init()
228 {
229 	u64 ctrl_ent;
230 	u64 ctrl_exi;
231 
232 	msr_bmp_init();
233 	ctrl_ent = vmcs_read(ENT_CONTROLS);
234 	ctrl_exi = vmcs_read(EXI_CONTROLS);
235 	vmcs_write(ENT_CONTROLS, ctrl_ent | ENT_LOAD_PAT);
236 	vmcs_write(EXI_CONTROLS, ctrl_exi | (EXI_SAVE_PAT | EXI_LOAD_PAT));
237 	ia32_pat = rdmsr(MSR_IA32_CR_PAT);
238 	vmcs_write(GUEST_PAT, 0x0);
239 	vmcs_write(HOST_PAT, ia32_pat);
240 }
241 
242 static void test_ctrl_pat_main()
243 {
244 	u64 guest_ia32_pat;
245 
246 	guest_ia32_pat = rdmsr(MSR_IA32_CR_PAT);
247 	if (!(ctrl_enter_rev.clr & ENT_LOAD_PAT))
248 		printf("\tENT_LOAD_PAT is not supported.\n");
249 	else {
250 		if (guest_ia32_pat != 0) {
251 			report("Entry load PAT", 0);
252 			return;
253 		}
254 	}
255 	wrmsr(MSR_IA32_CR_PAT, 0x6);
256 	vmcall();
257 	guest_ia32_pat = rdmsr(MSR_IA32_CR_PAT);
258 	if (ctrl_enter_rev.clr & ENT_LOAD_PAT) {
259 		if (guest_ia32_pat != ia32_pat) {
260 			report("Entry load PAT", 0);
261 			return;
262 		}
263 		report("Entry load PAT", 1);
264 	}
265 }
266 
267 static int test_ctrl_pat_exit_handler()
268 {
269 	u64 guest_rip;
270 	ulong reason;
271 	u64 guest_pat;
272 
273 	guest_rip = vmcs_read(GUEST_RIP);
274 	reason = vmcs_read(EXI_REASON) & 0xff;
275 	switch (reason) {
276 	case VMX_VMCALL:
277 		guest_pat = vmcs_read(GUEST_PAT);
278 		if (!(ctrl_exit_rev.clr & EXI_SAVE_PAT)) {
279 			printf("\tEXI_SAVE_PAT is not supported\n");
280 			vmcs_write(GUEST_PAT, 0x6);
281 		} else {
282 			if (guest_pat == 0x6)
283 				report("Exit save PAT", 1);
284 			else
285 				report("Exit save PAT", 0);
286 		}
287 		if (!(ctrl_exit_rev.clr & EXI_LOAD_PAT))
288 			printf("\tEXI_LOAD_PAT is not supported\n");
289 		else {
290 			if (rdmsr(MSR_IA32_CR_PAT) == ia32_pat)
291 				report("Exit load PAT", 1);
292 			else
293 				report("Exit load PAT", 0);
294 		}
295 		vmcs_write(GUEST_PAT, ia32_pat);
296 		vmcs_write(GUEST_RIP, guest_rip + 3);
297 		return VMX_TEST_RESUME;
298 	default:
299 		printf("ERROR : Undefined exit reason, reason = %d.\n", reason);
300 		break;
301 	}
302 	return VMX_TEST_VMEXIT;
303 }
304 
305 static void test_ctrl_efer_init()
306 {
307 	u64 ctrl_ent;
308 	u64 ctrl_exi;
309 
310 	msr_bmp_init();
311 	ctrl_ent = vmcs_read(ENT_CONTROLS) | ENT_LOAD_EFER;
312 	ctrl_exi = vmcs_read(EXI_CONTROLS) | EXI_SAVE_EFER | EXI_LOAD_EFER;
313 	vmcs_write(ENT_CONTROLS, ctrl_ent & ctrl_enter_rev.clr);
314 	vmcs_write(EXI_CONTROLS, ctrl_exi & ctrl_exit_rev.clr);
315 	ia32_efer = rdmsr(MSR_EFER);
316 	vmcs_write(GUEST_EFER, ia32_efer ^ EFER_NX);
317 	vmcs_write(HOST_EFER, ia32_efer ^ EFER_NX);
318 }
319 
320 static void test_ctrl_efer_main()
321 {
322 	u64 guest_ia32_efer;
323 
324 	guest_ia32_efer = rdmsr(MSR_EFER);
325 	if (!(ctrl_enter_rev.clr & ENT_LOAD_EFER))
326 		printf("\tENT_LOAD_EFER is not supported.\n");
327 	else {
328 		if (guest_ia32_efer != (ia32_efer ^ EFER_NX)) {
329 			report("Entry load EFER", 0);
330 			return;
331 		}
332 	}
333 	wrmsr(MSR_EFER, ia32_efer);
334 	vmcall();
335 	guest_ia32_efer = rdmsr(MSR_EFER);
336 	if (ctrl_enter_rev.clr & ENT_LOAD_EFER) {
337 		if (guest_ia32_efer != ia32_efer) {
338 			report("Entry load EFER", 0);
339 			return;
340 		}
341 		report("Entry load EFER", 1);
342 	}
343 }
344 
345 static int test_ctrl_efer_exit_handler()
346 {
347 	u64 guest_rip;
348 	ulong reason;
349 	u64 guest_efer;
350 
351 	guest_rip = vmcs_read(GUEST_RIP);
352 	reason = vmcs_read(EXI_REASON) & 0xff;
353 	switch (reason) {
354 	case VMX_VMCALL:
355 		guest_efer = vmcs_read(GUEST_EFER);
356 		if (!(ctrl_exit_rev.clr & EXI_SAVE_EFER)) {
357 			printf("\tEXI_SAVE_EFER is not supported\n");
358 			vmcs_write(GUEST_EFER, ia32_efer);
359 		} else {
360 			if (guest_efer == ia32_efer)
361 				report("Exit save EFER", 1);
362 			else
363 				report("Exit save EFER", 0);
364 		}
365 		if (!(ctrl_exit_rev.clr & EXI_LOAD_EFER)) {
366 			printf("\tEXI_LOAD_EFER is not supported\n");
367 			wrmsr(MSR_EFER, ia32_efer ^ EFER_NX);
368 		} else {
369 			if (rdmsr(MSR_EFER) == (ia32_efer ^ EFER_NX))
370 				report("Exit load EFER", 1);
371 			else
372 				report("Exit load EFER", 0);
373 		}
374 		vmcs_write(GUEST_PAT, ia32_efer);
375 		vmcs_write(GUEST_RIP, guest_rip + 3);
376 		return VMX_TEST_RESUME;
377 	default:
378 		printf("ERROR : Undefined exit reason, reason = %d.\n", reason);
379 		break;
380 	}
381 	return VMX_TEST_VMEXIT;
382 }
383 
384 u32 guest_cr0, guest_cr4;
385 
386 static void cr_shadowing_main()
387 {
388 	u32 cr0, cr4, tmp;
389 
390 	// Test read through
391 	set_stage(0);
392 	guest_cr0 = read_cr0();
393 	if (stage == 1)
394 		report("Read through CR0", 0);
395 	else
396 		vmcall();
397 	set_stage(1);
398 	guest_cr4 = read_cr4();
399 	if (stage == 2)
400 		report("Read through CR4", 0);
401 	else
402 		vmcall();
403 	// Test write through
404 	guest_cr0 = guest_cr0 ^ (X86_CR0_TS | X86_CR0_MP);
405 	guest_cr4 = guest_cr4 ^ (X86_CR4_TSD | X86_CR4_DE);
406 	set_stage(2);
407 	write_cr0(guest_cr0);
408 	if (stage == 3)
409 		report("Write throuth CR0", 0);
410 	else
411 		vmcall();
412 	set_stage(3);
413 	write_cr4(guest_cr4);
414 	if (stage == 4)
415 		report("Write through CR4", 0);
416 	else
417 		vmcall();
418 	// Test read shadow
419 	set_stage(4);
420 	vmcall();
421 	cr0 = read_cr0();
422 	if (stage != 5) {
423 		if (cr0 == guest_cr0)
424 			report("Read shadowing CR0", 1);
425 		else
426 			report("Read shadowing CR0", 0);
427 	}
428 	set_stage(5);
429 	cr4 = read_cr4();
430 	if (stage != 6) {
431 		if (cr4 == guest_cr4)
432 			report("Read shadowing CR4", 1);
433 		else
434 			report("Read shadowing CR4", 0);
435 	}
436 	// Test write shadow (same value with shadow)
437 	set_stage(6);
438 	write_cr0(guest_cr0);
439 	if (stage == 7)
440 		report("Write shadowing CR0 (same value with shadow)", 0);
441 	else
442 		vmcall();
443 	set_stage(7);
444 	write_cr4(guest_cr4);
445 	if (stage == 8)
446 		report("Write shadowing CR4 (same value with shadow)", 0);
447 	else
448 		vmcall();
449 	// Test write shadow (different value)
450 	set_stage(8);
451 	tmp = guest_cr0 ^ X86_CR0_TS;
452 	asm volatile("mov %0, %%rsi\n\t"
453 		"mov %%rsi, %%cr0\n\t"
454 		::"m"(tmp)
455 		:"rsi", "memory", "cc");
456 	if (stage != 9)
457 		report("Write shadowing different X86_CR0_TS", 0);
458 	else
459 		report("Write shadowing different X86_CR0_TS", 1);
460 	set_stage(9);
461 	tmp = guest_cr0 ^ X86_CR0_MP;
462 	asm volatile("mov %0, %%rsi\n\t"
463 		"mov %%rsi, %%cr0\n\t"
464 		::"m"(tmp)
465 		:"rsi", "memory", "cc");
466 	if (stage != 10)
467 		report("Write shadowing different X86_CR0_MP", 0);
468 	else
469 		report("Write shadowing different X86_CR0_MP", 1);
470 	set_stage(10);
471 	tmp = guest_cr4 ^ X86_CR4_TSD;
472 	asm volatile("mov %0, %%rsi\n\t"
473 		"mov %%rsi, %%cr4\n\t"
474 		::"m"(tmp)
475 		:"rsi", "memory", "cc");
476 	if (stage != 11)
477 		report("Write shadowing different X86_CR4_TSD", 0);
478 	else
479 		report("Write shadowing different X86_CR4_TSD", 1);
480 	set_stage(11);
481 	tmp = guest_cr4 ^ X86_CR4_DE;
482 	asm volatile("mov %0, %%rsi\n\t"
483 		"mov %%rsi, %%cr4\n\t"
484 		::"m"(tmp)
485 		:"rsi", "memory", "cc");
486 	if (stage != 12)
487 		report("Write shadowing different X86_CR4_DE", 0);
488 	else
489 		report("Write shadowing different X86_CR4_DE", 1);
490 }
491 
492 static int cr_shadowing_exit_handler()
493 {
494 	u64 guest_rip;
495 	ulong reason;
496 	u32 insn_len;
497 	u32 exit_qual;
498 
499 	guest_rip = vmcs_read(GUEST_RIP);
500 	reason = vmcs_read(EXI_REASON) & 0xff;
501 	insn_len = vmcs_read(EXI_INST_LEN);
502 	exit_qual = vmcs_read(EXI_QUALIFICATION);
503 	switch (reason) {
504 	case VMX_VMCALL:
505 		switch (get_stage()) {
506 		case 0:
507 			if (guest_cr0 == vmcs_read(GUEST_CR0))
508 				report("Read through CR0", 1);
509 			else
510 				report("Read through CR0", 0);
511 			break;
512 		case 1:
513 			if (guest_cr4 == vmcs_read(GUEST_CR4))
514 				report("Read through CR4", 1);
515 			else
516 				report("Read through CR4", 0);
517 			break;
518 		case 2:
519 			if (guest_cr0 == vmcs_read(GUEST_CR0))
520 				report("Write through CR0", 1);
521 			else
522 				report("Write through CR0", 0);
523 			break;
524 		case 3:
525 			if (guest_cr4 == vmcs_read(GUEST_CR4))
526 				report("Write through CR4", 1);
527 			else
528 				report("Write through CR4", 0);
529 			break;
530 		case 4:
531 			guest_cr0 = vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP);
532 			guest_cr4 = vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE);
533 			vmcs_write(CR0_MASK, X86_CR0_TS | X86_CR0_MP);
534 			vmcs_write(CR0_READ_SHADOW, guest_cr0 & (X86_CR0_TS | X86_CR0_MP));
535 			vmcs_write(CR4_MASK, X86_CR4_TSD | X86_CR4_DE);
536 			vmcs_write(CR4_READ_SHADOW, guest_cr4 & (X86_CR4_TSD | X86_CR4_DE));
537 			break;
538 		case 6:
539 			if (guest_cr0 == (vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP)))
540 				report("Write shadowing CR0 (same value)", 1);
541 			else
542 				report("Write shadowing CR0 (same value)", 0);
543 			break;
544 		case 7:
545 			if (guest_cr4 == (vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE)))
546 				report("Write shadowing CR4 (same value)", 1);
547 			else
548 				report("Write shadowing CR4 (same value)", 0);
549 			break;
550 		default:
551 			// Should not reach here
552 			printf("ERROR : unexpected stage, %d\n", get_stage());
553 			print_vmexit_info();
554 			return VMX_TEST_VMEXIT;
555 		}
556 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
557 		return VMX_TEST_RESUME;
558 	case VMX_CR:
559 		switch (get_stage()) {
560 		case 4:
561 			report("Read shadowing CR0", 0);
562 			set_stage(stage + 1);
563 			break;
564 		case 5:
565 			report("Read shadowing CR4", 0);
566 			set_stage(stage + 1);
567 			break;
568 		case 6:
569 			report("Write shadowing CR0 (same value)", 0);
570 			set_stage(stage + 1);
571 			break;
572 		case 7:
573 			report("Write shadowing CR4 (same value)", 0);
574 			set_stage(stage + 1);
575 			break;
576 		case 8:
577 		case 9:
578 			// 0x600 encodes "mov %esi, %cr0"
579 			if (exit_qual == 0x600)
580 				set_stage(stage + 1);
581 			break;
582 		case 10:
583 		case 11:
584 			// 0x604 encodes "mov %esi, %cr4"
585 			if (exit_qual == 0x604)
586 				set_stage(stage + 1);
587 			break;
588 		default:
589 			// Should not reach here
590 			printf("ERROR : unexpected stage, %d\n", get_stage());
591 			print_vmexit_info();
592 			return VMX_TEST_VMEXIT;
593 		}
594 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
595 		return VMX_TEST_RESUME;
596 	default:
597 		printf("Unknown exit reason, %d\n", reason);
598 		print_vmexit_info();
599 	}
600 	return VMX_TEST_VMEXIT;
601 }
602 
603 static void iobmp_init()
604 {
605 	u32 ctrl_cpu0;
606 
607 	io_bitmap_a = alloc_page();
608 	io_bitmap_a = alloc_page();
609 	memset(io_bitmap_a, 0x0, PAGE_SIZE);
610 	memset(io_bitmap_b, 0x0, PAGE_SIZE);
611 	ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
612 	ctrl_cpu0 |= CPU_IO_BITMAP;
613 	ctrl_cpu0 &= (~CPU_IO);
614 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
615 	vmcs_write(IO_BITMAP_A, (u64)io_bitmap_a);
616 	vmcs_write(IO_BITMAP_B, (u64)io_bitmap_b);
617 }
618 
619 static void iobmp_main()
620 {
621 	// stage 0, test IO pass
622 	set_stage(0);
623 	inb(0x5000);
624 	outb(0x0, 0x5000);
625 	if (stage != 0)
626 		report("I/O bitmap - I/O pass", 0);
627 	else
628 		report("I/O bitmap - I/O pass", 1);
629 	// test IO width, in/out
630 	((u8 *)io_bitmap_a)[0] = 0xFF;
631 	set_stage(2);
632 	inb(0x0);
633 	if (stage != 3)
634 		report("I/O bitmap - trap in", 0);
635 	else
636 		report("I/O bitmap - trap in", 1);
637 	set_stage(3);
638 	outw(0x0, 0x0);
639 	if (stage != 4)
640 		report("I/O bitmap - trap out", 0);
641 	else
642 		report("I/O bitmap - trap out", 1);
643 	set_stage(4);
644 	inl(0x0);
645 	if (stage != 5)
646 		report("I/O bitmap - I/O width, long", 0);
647 	// test low/high IO port
648 	set_stage(5);
649 	((u8 *)io_bitmap_a)[0x5000 / 8] = (1 << (0x5000 % 8));
650 	inb(0x5000);
651 	if (stage == 6)
652 		report("I/O bitmap - I/O port, low part", 1);
653 	else
654 		report("I/O bitmap - I/O port, low part", 0);
655 	set_stage(6);
656 	((u8 *)io_bitmap_b)[0x1000 / 8] = (1 << (0x1000 % 8));
657 	inb(0x9000);
658 	if (stage == 7)
659 		report("I/O bitmap - I/O port, high part", 1);
660 	else
661 		report("I/O bitmap - I/O port, high part", 0);
662 	// test partial pass
663 	set_stage(7);
664 	inl(0x4FFF);
665 	if (stage == 8)
666 		report("I/O bitmap - partial pass", 1);
667 	else
668 		report("I/O bitmap - partial pass", 0);
669 	// test overrun
670 	set_stage(8);
671 	memset(io_bitmap_a, 0x0, PAGE_SIZE);
672 	memset(io_bitmap_b, 0x0, PAGE_SIZE);
673 	inl(0xFFFF);
674 	if (stage == 9)
675 		report("I/O bitmap - overrun", 1);
676 	else
677 		report("I/O bitmap - overrun", 0);
678 }
679 
680 static int iobmp_exit_handler()
681 {
682 	u64 guest_rip;
683 	ulong reason, exit_qual;
684 	u32 insn_len;
685 
686 	guest_rip = vmcs_read(GUEST_RIP);
687 	reason = vmcs_read(EXI_REASON) & 0xff;
688 	exit_qual = vmcs_read(EXI_QUALIFICATION);
689 	insn_len = vmcs_read(EXI_INST_LEN);
690 	switch (reason) {
691 	case VMX_IO:
692 		switch (get_stage()) {
693 		case 0:
694 		case 1:
695 			set_stage(stage + 1);
696 			break;
697 		case 2:
698 			if ((exit_qual & VMX_IO_SIZE_MASK) != _VMX_IO_BYTE)
699 				report("I/O bitmap - I/O width, byte", 0);
700 			else
701 				report("I/O bitmap - I/O width, byte", 1);
702 			if (!(exit_qual & VMX_IO_IN))
703 				report("I/O bitmap - I/O direction, in", 0);
704 			else
705 				report("I/O bitmap - I/O direction, in", 1);
706 			set_stage(stage + 1);
707 			break;
708 		case 3:
709 			if ((exit_qual & VMX_IO_SIZE_MASK) != _VMX_IO_WORD)
710 				report("I/O bitmap - I/O width, word", 0);
711 			else
712 				report("I/O bitmap - I/O width, word", 1);
713 			if (!(exit_qual & VMX_IO_IN))
714 				report("I/O bitmap - I/O direction, out", 1);
715 			else
716 				report("I/O bitmap - I/O direction, out", 0);
717 			set_stage(stage + 1);
718 			break;
719 		case 4:
720 			if ((exit_qual & VMX_IO_SIZE_MASK) != _VMX_IO_LONG)
721 				report("I/O bitmap - I/O width, long", 0);
722 			else
723 				report("I/O bitmap - I/O width, long", 1);
724 			set_stage(stage + 1);
725 			break;
726 		case 5:
727 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x5000)
728 				set_stage(stage + 1);
729 			break;
730 		case 6:
731 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x9000)
732 				set_stage(stage + 1);
733 			break;
734 		case 7:
735 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x4FFF)
736 				set_stage(stage + 1);
737 			break;
738 		case 8:
739 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0xFFFF)
740 				set_stage(stage + 1);
741 			break;
742 		default:
743 			// Should not reach here
744 			printf("ERROR : unexpected stage, %d\n", get_stage());
745 			print_vmexit_info();
746 			return VMX_TEST_VMEXIT;
747 		}
748 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
749 		return VMX_TEST_RESUME;
750 	default:
751 		printf("guest_rip = 0x%llx\n", guest_rip);
752 		printf("\tERROR : Undefined exit reason, reason = %d.\n", reason);
753 		break;
754 	}
755 	return VMX_TEST_VMEXIT;
756 }
757 
758 #define INSN_CPU0		0
759 #define INSN_CPU1		1
760 #define INSN_ALWAYS_TRAP	2
761 #define INSN_NEVER_TRAP		3
762 
763 #define FIELD_EXIT_QUAL		0
764 #define FIELD_INSN_INFO		1
765 
766 asm(
767 	"insn_hlt: hlt;ret\n\t"
768 	"insn_invlpg: invlpg 0x12345678;ret\n\t"
769 	"insn_mwait: mwait;ret\n\t"
770 	"insn_rdpmc: rdpmc;ret\n\t"
771 	"insn_rdtsc: rdtsc;ret\n\t"
772 	"insn_monitor: monitor;ret\n\t"
773 	"insn_pause: pause;ret\n\t"
774 	"insn_wbinvd: wbinvd;ret\n\t"
775 	"insn_cpuid: cpuid;ret\n\t"
776 	"insn_invd: invd;ret\n\t"
777 );
778 extern void insn_hlt();
779 extern void insn_invlpg();
780 extern void insn_mwait();
781 extern void insn_rdpmc();
782 extern void insn_rdtsc();
783 extern void insn_monitor();
784 extern void insn_pause();
785 extern void insn_wbinvd();
786 extern void insn_cpuid();
787 extern void insn_invd();
788 
789 u32 cur_insn;
790 
791 struct insn_table {
792 	const char *name;
793 	u32 flag;
794 	void (*insn_func)();
795 	u32 type;
796 	u32 reason;
797 	ulong exit_qual;
798 	u32 insn_info;
799 	// Use FIELD_EXIT_QUAL and FIELD_INSN_INFO to efines
800 	// which field need to be tested, reason is always tested
801 	u32 test_field;
802 };
803 
804 /*
805  * Add more test cases of instruction intercept here. Elements in this
806  * table is:
807  *	name/control flag/insn function/type/exit reason/exit qulification/
808  *	instruction info/field to test
809  * The last field defines which fields (exit_qual and insn_info) need to be
810  * tested in exit handler. If set to 0, only "reason" is checked.
811  */
812 static struct insn_table insn_table[] = {
813 	// Flags for Primary Processor-Based VM-Execution Controls
814 	{"HLT",  CPU_HLT, insn_hlt, INSN_CPU0, 12, 0, 0, 0},
815 	{"INVLPG", CPU_INVLPG, insn_invlpg, INSN_CPU0, 14,
816 		0x12345678, 0, FIELD_EXIT_QUAL},
817 	{"MWAIT", CPU_MWAIT, insn_mwait, INSN_CPU0, 36, 0, 0, 0},
818 	{"RDPMC", CPU_RDPMC, insn_rdpmc, INSN_CPU0, 15, 0, 0, 0},
819 	{"RDTSC", CPU_RDTSC, insn_rdtsc, INSN_CPU0, 16, 0, 0, 0},
820 	{"MONITOR", CPU_MONITOR, insn_monitor, INSN_CPU0, 39, 0, 0, 0},
821 	{"PAUSE", CPU_PAUSE, insn_pause, INSN_CPU0, 40, 0, 0, 0},
822 	// Flags for Secondary Processor-Based VM-Execution Controls
823 	{"WBINVD", CPU_WBINVD, insn_wbinvd, INSN_CPU1, 54, 0, 0, 0},
824 	// Instructions always trap
825 	{"CPUID", 0, insn_cpuid, INSN_ALWAYS_TRAP, 10, 0, 0, 0},
826 	{"INVD", 0, insn_invd, INSN_ALWAYS_TRAP, 13, 0, 0, 0},
827 	// Instructions never trap
828 	{NULL},
829 };
830 
831 static void insn_intercept_init()
832 {
833 	u32 ctrl_cpu[2];
834 
835 	ctrl_cpu[0] = vmcs_read(CPU_EXEC_CTRL0);
836 	ctrl_cpu[0] |= CPU_HLT | CPU_INVLPG | CPU_MWAIT | CPU_RDPMC | CPU_RDTSC |
837 		CPU_MONITOR | CPU_PAUSE | CPU_SECONDARY;
838 	ctrl_cpu[0] &= ctrl_cpu_rev[0].clr;
839 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu[0]);
840 	ctrl_cpu[1] = vmcs_read(CPU_EXEC_CTRL1);
841 	ctrl_cpu[1] |= CPU_WBINVD | CPU_RDRAND;
842 	ctrl_cpu[1] &= ctrl_cpu_rev[1].clr;
843 	vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu[1]);
844 }
845 
846 static void insn_intercept_main()
847 {
848 	cur_insn = 0;
849 	while(insn_table[cur_insn].name != NULL) {
850 		set_stage(cur_insn);
851 		if ((insn_table[cur_insn].type == INSN_CPU0
852 			&& !(ctrl_cpu_rev[0].clr & insn_table[cur_insn].flag))
853 			|| (insn_table[cur_insn].type == INSN_CPU1
854 			&& !(ctrl_cpu_rev[1].clr & insn_table[cur_insn].flag))) {
855 			printf("\tCPU_CTRL1.CPU_%s is not supported.\n",
856 				insn_table[cur_insn].name);
857 			continue;
858 		}
859 		insn_table[cur_insn].insn_func();
860 		switch (insn_table[cur_insn].type) {
861 		case INSN_CPU0:
862 		case INSN_CPU1:
863 		case INSN_ALWAYS_TRAP:
864 			if (stage != cur_insn + 1)
865 				report(insn_table[cur_insn].name, 0);
866 			else
867 				report(insn_table[cur_insn].name, 1);
868 			break;
869 		case INSN_NEVER_TRAP:
870 			if (stage == cur_insn + 1)
871 				report(insn_table[cur_insn].name, 0);
872 			else
873 				report(insn_table[cur_insn].name, 1);
874 			break;
875 		}
876 		cur_insn ++;
877 	}
878 }
879 
880 static int insn_intercept_exit_handler()
881 {
882 	u64 guest_rip;
883 	u32 reason;
884 	ulong exit_qual;
885 	u32 insn_len;
886 	u32 insn_info;
887 	bool pass;
888 
889 	guest_rip = vmcs_read(GUEST_RIP);
890 	reason = vmcs_read(EXI_REASON) & 0xff;
891 	exit_qual = vmcs_read(EXI_QUALIFICATION);
892 	insn_len = vmcs_read(EXI_INST_LEN);
893 	insn_info = vmcs_read(EXI_INST_INFO);
894 	pass = (cur_insn == get_stage()) &&
895 			insn_table[cur_insn].reason == reason;
896 	if (insn_table[cur_insn].test_field & FIELD_EXIT_QUAL)
897 		pass = pass && insn_table[cur_insn].exit_qual == exit_qual;
898 	if (insn_table[cur_insn].test_field & FIELD_INSN_INFO)
899 		pass = pass && insn_table[cur_insn].insn_info == insn_info;
900 	if (pass)
901 		set_stage(stage + 1);
902 	vmcs_write(GUEST_RIP, guest_rip + insn_len);
903 	return VMX_TEST_RESUME;
904 }
905 
906 
907 static int setup_ept()
908 {
909 	int support_2m;
910 	unsigned long end_of_memory;
911 
912 	if (!(ept_vpid.val & EPT_CAP_UC) &&
913 			!(ept_vpid.val & EPT_CAP_WB)) {
914 		printf("\tEPT paging-structure memory type "
915 				"UC&WB are not supported\n");
916 		return 1;
917 	}
918 	if (ept_vpid.val & EPT_CAP_UC)
919 		eptp = EPT_MEM_TYPE_UC;
920 	else
921 		eptp = EPT_MEM_TYPE_WB;
922 	if (!(ept_vpid.val & EPT_CAP_PWL4)) {
923 		printf("\tPWL4 is not supported\n");
924 		return 1;
925 	}
926 	eptp |= (3 << EPTP_PG_WALK_LEN_SHIFT);
927 	pml4 = alloc_page();
928 	memset(pml4, 0, PAGE_SIZE);
929 	eptp |= virt_to_phys(pml4);
930 	vmcs_write(EPTP, eptp);
931 	support_2m = !!(ept_vpid.val & EPT_CAP_2M_PAGE);
932 	end_of_memory = fwcfg_get_u64(FW_CFG_RAM_SIZE);
933 	if (end_of_memory < (1ul << 32))
934 		end_of_memory = (1ul << 32);
935 	if (setup_ept_range(pml4, 0, end_of_memory,
936 			0, support_2m, EPT_WA | EPT_RA | EPT_EA)) {
937 		printf("\tSet ept tables failed.\n");
938 		return 1;
939 	}
940 	return 0;
941 }
942 
943 static void ept_init()
944 {
945 	unsigned long base_addr1, base_addr2;
946 	u32 ctrl_cpu[2];
947 
948 	init_fail = false;
949 	ctrl_cpu[0] = vmcs_read(CPU_EXEC_CTRL0);
950 	ctrl_cpu[1] = vmcs_read(CPU_EXEC_CTRL1);
951 	ctrl_cpu[0] = (ctrl_cpu[0] | CPU_SECONDARY)
952 		& ctrl_cpu_rev[0].clr;
953 	ctrl_cpu[1] = (ctrl_cpu[1] | CPU_EPT)
954 		& ctrl_cpu_rev[1].clr;
955 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu[0]);
956 	vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu[1]);
957 	if (setup_ept())
958 		init_fail = true;
959 	data_page1 = alloc_page();
960 	data_page2 = alloc_page();
961 	memset(data_page1, 0x0, PAGE_SIZE);
962 	memset(data_page2, 0x0, PAGE_SIZE);
963 	*((u32 *)data_page1) = MAGIC_VAL_1;
964 	*((u32 *)data_page2) = MAGIC_VAL_2;
965 	base_addr1 = (unsigned long)data_page1 & PAGE_MASK_2M;
966 	base_addr2 = (unsigned long)data_page2 & PAGE_MASK_2M;
967 	if (setup_ept_range(pml4, base_addr1, base_addr1 + PAGE_SIZE_2M, 0, 0,
968 			    EPT_WA | EPT_RA | EPT_EA) ||
969 	    setup_ept_range(pml4, base_addr2, base_addr2 + PAGE_SIZE_2M, 0, 0,
970 			    EPT_WA | EPT_RA | EPT_EA))
971 		init_fail = true;
972 	install_ept(pml4, (unsigned long)data_page1, (unsigned long)data_page2,
973 			EPT_RA | EPT_WA | EPT_EA);
974 }
975 
976 static void ept_main()
977 {
978 	if (init_fail)
979 		return;
980 	if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY) ||
981 	    !(ctrl_cpu_rev[1].clr & CPU_EPT)) {
982 		printf("\tEPT is not supported");
983 		return;
984 	}
985 	set_stage(0);
986 	if (*((u32 *)data_page2) != MAGIC_VAL_1 ||
987 			*((u32 *)data_page1) != MAGIC_VAL_1)
988 		report("EPT basic framework - read", 0);
989 	else {
990 		*((u32 *)data_page2) = MAGIC_VAL_3;
991 		vmcall();
992 		if (get_stage() == 1) {
993 			if (*((u32 *)data_page1) == MAGIC_VAL_3 &&
994 					*((u32 *)data_page2) == MAGIC_VAL_2)
995 				report("EPT basic framework", 1);
996 			else
997 				report("EPT basic framework - remap", 1);
998 		}
999 	}
1000 	// Test EPT Misconfigurations
1001 	set_stage(1);
1002 	vmcall();
1003 	*((u32 *)data_page1) = MAGIC_VAL_1;
1004 	if (get_stage() != 2) {
1005 		report("EPT misconfigurations", 0);
1006 		goto t1;
1007 	}
1008 	set_stage(2);
1009 	vmcall();
1010 	*((u32 *)data_page1) = MAGIC_VAL_1;
1011 	if (get_stage() != 3) {
1012 		report("EPT misconfigurations", 0);
1013 		goto t1;
1014 	}
1015 	report("EPT misconfigurations", 1);
1016 t1:
1017 	// Test EPT violation
1018 	set_stage(3);
1019 	vmcall();
1020 	*((u32 *)data_page1) = MAGIC_VAL_1;
1021 	if (get_stage() == 4)
1022 		report("EPT violation - page permission", 1);
1023 	else
1024 		report("EPT violation - page permission", 0);
1025 	// Violation caused by EPT paging structure
1026 	set_stage(4);
1027 	vmcall();
1028 	*((u32 *)data_page1) = MAGIC_VAL_2;
1029 	if (get_stage() == 5)
1030 		report("EPT violation - paging structure", 1);
1031 	else
1032 		report("EPT violation - paging structure", 0);
1033 }
1034 
1035 static int ept_exit_handler()
1036 {
1037 	u64 guest_rip;
1038 	ulong reason;
1039 	u32 insn_len;
1040 	u32 exit_qual;
1041 	static unsigned long data_page1_pte, data_page1_pte_pte;
1042 
1043 	guest_rip = vmcs_read(GUEST_RIP);
1044 	reason = vmcs_read(EXI_REASON) & 0xff;
1045 	insn_len = vmcs_read(EXI_INST_LEN);
1046 	exit_qual = vmcs_read(EXI_QUALIFICATION);
1047 	switch (reason) {
1048 	case VMX_VMCALL:
1049 		switch (get_stage()) {
1050 		case 0:
1051 			if (*((u32 *)data_page1) == MAGIC_VAL_3 &&
1052 					*((u32 *)data_page2) == MAGIC_VAL_2) {
1053 				set_stage(get_stage() + 1);
1054 				install_ept(pml4, (unsigned long)data_page2,
1055 						(unsigned long)data_page2,
1056 						EPT_RA | EPT_WA | EPT_EA);
1057 			} else
1058 				report("EPT basic framework - write\n", 0);
1059 			break;
1060 		case 1:
1061 			install_ept(pml4, (unsigned long)data_page1,
1062  				(unsigned long)data_page1, EPT_WA);
1063 			invept(INVEPT_SINGLE, eptp);
1064 			break;
1065 		case 2:
1066 			install_ept(pml4, (unsigned long)data_page1,
1067  				(unsigned long)data_page1,
1068  				EPT_RA | EPT_WA | EPT_EA |
1069  				(2 << EPT_MEM_TYPE_SHIFT));
1070 			invept(INVEPT_SINGLE, eptp);
1071 			break;
1072 		case 3:
1073 			data_page1_pte = get_ept_pte(pml4,
1074 				(unsigned long)data_page1, 1);
1075 			set_ept_pte(pml4, (unsigned long)data_page1,
1076 				1, data_page1_pte & (~EPT_PRESENT));
1077 			invept(INVEPT_SINGLE, eptp);
1078 			break;
1079 		case 4:
1080 			data_page1_pte = get_ept_pte(pml4,
1081 				(unsigned long)data_page1, 2);
1082 			data_page1_pte &= PAGE_MASK;
1083 			data_page1_pte_pte = get_ept_pte(pml4, data_page1_pte, 2);
1084 			set_ept_pte(pml4, data_page1_pte, 2,
1085 				data_page1_pte_pte & (~EPT_PRESENT));
1086 			invept(INVEPT_SINGLE, eptp);
1087 			break;
1088 		// Should not reach here
1089 		default:
1090 			printf("ERROR - unexpected stage, %d.\n", get_stage());
1091 			print_vmexit_info();
1092 			return VMX_TEST_VMEXIT;
1093 		}
1094 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
1095 		return VMX_TEST_RESUME;
1096 	case VMX_EPT_MISCONFIG:
1097 		switch (get_stage()) {
1098 		case 1:
1099 		case 2:
1100 			set_stage(get_stage() + 1);
1101 			install_ept(pml4, (unsigned long)data_page1,
1102  				(unsigned long)data_page1,
1103  				EPT_RA | EPT_WA | EPT_EA);
1104 			invept(INVEPT_SINGLE, eptp);
1105 			break;
1106 		// Should not reach here
1107 		default:
1108 			printf("ERROR - unexpected stage, %d.\n", get_stage());
1109 			print_vmexit_info();
1110 			return VMX_TEST_VMEXIT;
1111 		}
1112 		return VMX_TEST_RESUME;
1113 	case VMX_EPT_VIOLATION:
1114 		switch(get_stage()) {
1115 		case 3:
1116 			if (exit_qual == (EPT_VLT_WR | EPT_VLT_LADDR_VLD |
1117 					EPT_VLT_PADDR))
1118 				set_stage(get_stage() + 1);
1119 			set_ept_pte(pml4, (unsigned long)data_page1,
1120 				1, data_page1_pte | (EPT_PRESENT));
1121 			invept(INVEPT_SINGLE, eptp);
1122 			break;
1123 		case 4:
1124 			if (exit_qual == (EPT_VLT_RD | EPT_VLT_LADDR_VLD))
1125 				set_stage(get_stage() + 1);
1126 			set_ept_pte(pml4, data_page1_pte, 2,
1127 				data_page1_pte_pte | (EPT_PRESENT));
1128 			invept(INVEPT_SINGLE, eptp);
1129 			break;
1130 		default:
1131 			// Should not reach here
1132 			printf("ERROR : unexpected stage, %d\n", get_stage());
1133 			print_vmexit_info();
1134 			return VMX_TEST_VMEXIT;
1135 		}
1136 		return VMX_TEST_RESUME;
1137 	default:
1138 		printf("Unknown exit reason, %d\n", reason);
1139 		print_vmexit_info();
1140 	}
1141 	return VMX_TEST_VMEXIT;
1142 }
1143 
1144 /* name/init/guest_main/exit_handler/syscall_handler/guest_regs
1145    basic_* just implement some basic functions */
1146 struct vmx_test vmx_tests[] = {
1147 	{ "null", NULL, basic_guest_main, basic_exit_handler,
1148 		basic_syscall_handler, {0} },
1149 	{ "vmenter", NULL, vmenter_main, vmenter_exit_handler,
1150 		basic_syscall_handler, {0} },
1151 	{ "preemption timer", preemption_timer_init, preemption_timer_main,
1152 		preemption_timer_exit_handler, basic_syscall_handler, {0} },
1153 	{ "control field PAT", test_ctrl_pat_init, test_ctrl_pat_main,
1154 		test_ctrl_pat_exit_handler, basic_syscall_handler, {0} },
1155 	{ "control field EFER", test_ctrl_efer_init, test_ctrl_efer_main,
1156 		test_ctrl_efer_exit_handler, basic_syscall_handler, {0} },
1157 	{ "CR shadowing", NULL, cr_shadowing_main,
1158 		cr_shadowing_exit_handler, basic_syscall_handler, {0} },
1159 	{ "I/O bitmap", iobmp_init, iobmp_main, iobmp_exit_handler,
1160 		basic_syscall_handler, {0} },
1161 	{ "instruction intercept", insn_intercept_init, insn_intercept_main,
1162 		insn_intercept_exit_handler, basic_syscall_handler, {0} },
1163 	{ "EPT framework", ept_init, ept_main, ept_exit_handler,
1164 		basic_syscall_handler, {0} },
1165 	{ NULL, NULL, NULL, NULL, NULL, {0} },
1166 };
1167