xref: /kvm-unit-tests/x86/vmx_tests.c (revision 6eb44827fd97b04f78cbe7003ac462905a36382c)
1 #include "vmx.h"
2 #include "msr.h"
3 #include "processor.h"
4 #include "vm.h"
5 #include "io.h"
6 
7 u64 ia32_pat;
8 u64 ia32_efer;
9 volatile u32 stage;
10 void *io_bitmap_a, *io_bitmap_b;
11 u16 ioport;
12 
13 static inline void vmcall()
14 {
15 	asm volatile("vmcall");
16 }
17 
18 static inline void set_stage(u32 s)
19 {
20 	barrier();
21 	stage = s;
22 	barrier();
23 }
24 
25 static inline u32 get_stage()
26 {
27 	u32 s;
28 
29 	barrier();
30 	s = stage;
31 	barrier();
32 	return s;
33 }
34 
35 void basic_init()
36 {
37 }
38 
39 void basic_guest_main()
40 {
41 	/* Here is a basic guest_main, print Hello World */
42 	printf("\tHello World, this is null_guest_main!\n");
43 }
44 
45 int basic_exit_handler()
46 {
47 	u64 guest_rip;
48 	ulong reason;
49 
50 	guest_rip = vmcs_read(GUEST_RIP);
51 	reason = vmcs_read(EXI_REASON) & 0xff;
52 
53 	switch (reason) {
54 	case VMX_VMCALL:
55 		print_vmexit_info();
56 		vmcs_write(GUEST_RIP, guest_rip + 3);
57 		return VMX_TEST_RESUME;
58 	default:
59 		break;
60 	}
61 	printf("ERROR : Unhandled vmx exit.\n");
62 	print_vmexit_info();
63 	return VMX_TEST_EXIT;
64 }
65 
66 void basic_syscall_handler(u64 syscall_no)
67 {
68 }
69 
70 void vmenter_main()
71 {
72 	u64 rax;
73 	u64 rsp, resume_rsp;
74 
75 	report("test vmlaunch", 1);
76 
77 	asm volatile(
78 		"mov %%rsp, %0\n\t"
79 		"mov %3, %%rax\n\t"
80 		"vmcall\n\t"
81 		"mov %%rax, %1\n\t"
82 		"mov %%rsp, %2\n\t"
83 		: "=r"(rsp), "=r"(rax), "=r"(resume_rsp)
84 		: "g"(0xABCD));
85 	report("test vmresume", (rax == 0xFFFF) && (rsp == resume_rsp));
86 }
87 
88 int vmenter_exit_handler()
89 {
90 	u64 guest_rip;
91 	ulong reason;
92 
93 	guest_rip = vmcs_read(GUEST_RIP);
94 	reason = vmcs_read(EXI_REASON) & 0xff;
95 	switch (reason) {
96 	case VMX_VMCALL:
97 		if (regs.rax != 0xABCD) {
98 			report("test vmresume", 0);
99 			return VMX_TEST_VMEXIT;
100 		}
101 		regs.rax = 0xFFFF;
102 		vmcs_write(GUEST_RIP, guest_rip + 3);
103 		return VMX_TEST_RESUME;
104 	default:
105 		report("test vmresume", 0);
106 		print_vmexit_info();
107 	}
108 	return VMX_TEST_VMEXIT;
109 }
110 
111 void msr_bmp_init()
112 {
113 	void *msr_bitmap;
114 	u32 ctrl_cpu0;
115 
116 	msr_bitmap = alloc_page();
117 	memset(msr_bitmap, 0x0, PAGE_SIZE);
118 	ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
119 	ctrl_cpu0 |= CPU_MSR_BITMAP;
120 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
121 	vmcs_write(MSR_BITMAP, (u64)msr_bitmap);
122 }
123 
124 static void test_ctrl_pat_init()
125 {
126 	u64 ctrl_ent;
127 	u64 ctrl_exi;
128 
129 	msr_bmp_init();
130 	ctrl_ent = vmcs_read(ENT_CONTROLS);
131 	ctrl_exi = vmcs_read(EXI_CONTROLS);
132 	vmcs_write(ENT_CONTROLS, ctrl_ent | ENT_LOAD_PAT);
133 	vmcs_write(EXI_CONTROLS, ctrl_exi | (EXI_SAVE_PAT | EXI_LOAD_PAT));
134 	ia32_pat = rdmsr(MSR_IA32_CR_PAT);
135 	vmcs_write(GUEST_PAT, 0x0);
136 	vmcs_write(HOST_PAT, ia32_pat);
137 }
138 
139 static void test_ctrl_pat_main()
140 {
141 	u64 guest_ia32_pat;
142 
143 	guest_ia32_pat = rdmsr(MSR_IA32_CR_PAT);
144 	if (!(ctrl_enter_rev.clr & ENT_LOAD_PAT))
145 		printf("\tENT_LOAD_PAT is not supported.\n");
146 	else {
147 		if (guest_ia32_pat != 0) {
148 			report("Entry load PAT", 0);
149 			return;
150 		}
151 	}
152 	wrmsr(MSR_IA32_CR_PAT, 0x6);
153 	vmcall();
154 	guest_ia32_pat = rdmsr(MSR_IA32_CR_PAT);
155 	if (ctrl_enter_rev.clr & ENT_LOAD_PAT) {
156 		if (guest_ia32_pat != ia32_pat) {
157 			report("Entry load PAT", 0);
158 			return;
159 		}
160 		report("Entry load PAT", 1);
161 	}
162 }
163 
164 static int test_ctrl_pat_exit_handler()
165 {
166 	u64 guest_rip;
167 	ulong reason;
168 	u64 guest_pat;
169 
170 	guest_rip = vmcs_read(GUEST_RIP);
171 	reason = vmcs_read(EXI_REASON) & 0xff;
172 	switch (reason) {
173 	case VMX_VMCALL:
174 		guest_pat = vmcs_read(GUEST_PAT);
175 		if (!(ctrl_exit_rev.clr & EXI_SAVE_PAT)) {
176 			printf("\tEXI_SAVE_PAT is not supported\n");
177 			vmcs_write(GUEST_PAT, 0x6);
178 		} else {
179 			if (guest_pat == 0x6)
180 				report("Exit save PAT", 1);
181 			else
182 				report("Exit save PAT", 0);
183 		}
184 		if (!(ctrl_exit_rev.clr & EXI_LOAD_PAT))
185 			printf("\tEXI_LOAD_PAT is not supported\n");
186 		else {
187 			if (rdmsr(MSR_IA32_CR_PAT) == ia32_pat)
188 				report("Exit load PAT", 1);
189 			else
190 				report("Exit load PAT", 0);
191 		}
192 		vmcs_write(GUEST_PAT, ia32_pat);
193 		vmcs_write(GUEST_RIP, guest_rip + 3);
194 		return VMX_TEST_RESUME;
195 	default:
196 		printf("ERROR : Undefined exit reason, reason = %d.\n", reason);
197 		break;
198 	}
199 	return VMX_TEST_VMEXIT;
200 }
201 
202 static void test_ctrl_efer_init()
203 {
204 	u64 ctrl_ent;
205 	u64 ctrl_exi;
206 
207 	msr_bmp_init();
208 	ctrl_ent = vmcs_read(ENT_CONTROLS) | ENT_LOAD_EFER;
209 	ctrl_exi = vmcs_read(EXI_CONTROLS) | EXI_SAVE_EFER | EXI_LOAD_EFER;
210 	vmcs_write(ENT_CONTROLS, ctrl_ent & ctrl_enter_rev.clr);
211 	vmcs_write(EXI_CONTROLS, ctrl_exi & ctrl_exit_rev.clr);
212 	ia32_efer = rdmsr(MSR_EFER);
213 	vmcs_write(GUEST_EFER, ia32_efer ^ EFER_NX);
214 	vmcs_write(HOST_EFER, ia32_efer ^ EFER_NX);
215 }
216 
217 static void test_ctrl_efer_main()
218 {
219 	u64 guest_ia32_efer;
220 
221 	guest_ia32_efer = rdmsr(MSR_EFER);
222 	if (!(ctrl_enter_rev.clr & ENT_LOAD_EFER))
223 		printf("\tENT_LOAD_EFER is not supported.\n");
224 	else {
225 		if (guest_ia32_efer != (ia32_efer ^ EFER_NX)) {
226 			report("Entry load EFER", 0);
227 			return;
228 		}
229 	}
230 	wrmsr(MSR_EFER, ia32_efer);
231 	vmcall();
232 	guest_ia32_efer = rdmsr(MSR_EFER);
233 	if (ctrl_enter_rev.clr & ENT_LOAD_EFER) {
234 		if (guest_ia32_efer != ia32_efer) {
235 			report("Entry load EFER", 0);
236 			return;
237 		}
238 		report("Entry load EFER", 1);
239 	}
240 }
241 
242 static int test_ctrl_efer_exit_handler()
243 {
244 	u64 guest_rip;
245 	ulong reason;
246 	u64 guest_efer;
247 
248 	guest_rip = vmcs_read(GUEST_RIP);
249 	reason = vmcs_read(EXI_REASON) & 0xff;
250 	switch (reason) {
251 	case VMX_VMCALL:
252 		guest_efer = vmcs_read(GUEST_EFER);
253 		if (!(ctrl_exit_rev.clr & EXI_SAVE_EFER)) {
254 			printf("\tEXI_SAVE_EFER is not supported\n");
255 			vmcs_write(GUEST_EFER, ia32_efer);
256 		} else {
257 			if (guest_efer == ia32_efer)
258 				report("Exit save EFER", 1);
259 			else
260 				report("Exit save EFER", 0);
261 		}
262 		if (!(ctrl_exit_rev.clr & EXI_LOAD_EFER)) {
263 			printf("\tEXI_LOAD_EFER is not supported\n");
264 			wrmsr(MSR_EFER, ia32_efer ^ EFER_NX);
265 		} else {
266 			if (rdmsr(MSR_EFER) == (ia32_efer ^ EFER_NX))
267 				report("Exit load EFER", 1);
268 			else
269 				report("Exit load EFER", 0);
270 		}
271 		vmcs_write(GUEST_PAT, ia32_efer);
272 		vmcs_write(GUEST_RIP, guest_rip + 3);
273 		return VMX_TEST_RESUME;
274 	default:
275 		printf("ERROR : Undefined exit reason, reason = %d.\n", reason);
276 		break;
277 	}
278 	return VMX_TEST_VMEXIT;
279 }
280 
281 u32 guest_cr0, guest_cr4;
282 
283 static void cr_shadowing_main()
284 {
285 	u32 cr0, cr4, tmp;
286 
287 	// Test read through
288 	set_stage(0);
289 	guest_cr0 = read_cr0();
290 	if (stage == 1)
291 		report("Read through CR0", 0);
292 	else
293 		vmcall();
294 	set_stage(1);
295 	guest_cr4 = read_cr4();
296 	if (stage == 2)
297 		report("Read through CR4", 0);
298 	else
299 		vmcall();
300 	// Test write through
301 	guest_cr0 = guest_cr0 ^ (X86_CR0_TS | X86_CR0_MP);
302 	guest_cr4 = guest_cr4 ^ (X86_CR4_TSD | X86_CR4_DE);
303 	set_stage(2);
304 	write_cr0(guest_cr0);
305 	if (stage == 3)
306 		report("Write throuth CR0", 0);
307 	else
308 		vmcall();
309 	set_stage(3);
310 	write_cr4(guest_cr4);
311 	if (stage == 4)
312 		report("Write through CR4", 0);
313 	else
314 		vmcall();
315 	// Test read shadow
316 	set_stage(4);
317 	vmcall();
318 	cr0 = read_cr0();
319 	if (stage != 5) {
320 		if (cr0 == guest_cr0)
321 			report("Read shadowing CR0", 1);
322 		else
323 			report("Read shadowing CR0", 0);
324 	}
325 	set_stage(5);
326 	cr4 = read_cr4();
327 	if (stage != 6) {
328 		if (cr4 == guest_cr4)
329 			report("Read shadowing CR4", 1);
330 		else
331 			report("Read shadowing CR4", 0);
332 	}
333 	// Test write shadow (same value with shadow)
334 	set_stage(6);
335 	write_cr0(guest_cr0);
336 	if (stage == 7)
337 		report("Write shadowing CR0 (same value with shadow)", 0);
338 	else
339 		vmcall();
340 	set_stage(7);
341 	write_cr4(guest_cr4);
342 	if (stage == 8)
343 		report("Write shadowing CR4 (same value with shadow)", 0);
344 	else
345 		vmcall();
346 	// Test write shadow (different value)
347 	set_stage(8);
348 	tmp = guest_cr0 ^ X86_CR0_TS;
349 	asm volatile("mov %0, %%rsi\n\t"
350 		"mov %%rsi, %%cr0\n\t"
351 		::"m"(tmp)
352 		:"rsi", "memory", "cc");
353 	if (stage != 9)
354 		report("Write shadowing different X86_CR0_TS", 0);
355 	else
356 		report("Write shadowing different X86_CR0_TS", 1);
357 	set_stage(9);
358 	tmp = guest_cr0 ^ X86_CR0_MP;
359 	asm volatile("mov %0, %%rsi\n\t"
360 		"mov %%rsi, %%cr0\n\t"
361 		::"m"(tmp)
362 		:"rsi", "memory", "cc");
363 	if (stage != 10)
364 		report("Write shadowing different X86_CR0_MP", 0);
365 	else
366 		report("Write shadowing different X86_CR0_MP", 1);
367 	set_stage(10);
368 	tmp = guest_cr4 ^ X86_CR4_TSD;
369 	asm volatile("mov %0, %%rsi\n\t"
370 		"mov %%rsi, %%cr4\n\t"
371 		::"m"(tmp)
372 		:"rsi", "memory", "cc");
373 	if (stage != 11)
374 		report("Write shadowing different X86_CR4_TSD", 0);
375 	else
376 		report("Write shadowing different X86_CR4_TSD", 1);
377 	set_stage(11);
378 	tmp = guest_cr4 ^ X86_CR4_DE;
379 	asm volatile("mov %0, %%rsi\n\t"
380 		"mov %%rsi, %%cr4\n\t"
381 		::"m"(tmp)
382 		:"rsi", "memory", "cc");
383 	if (stage != 12)
384 		report("Write shadowing different X86_CR4_DE", 0);
385 	else
386 		report("Write shadowing different X86_CR4_DE", 1);
387 }
388 
389 static int cr_shadowing_exit_handler()
390 {
391 	u64 guest_rip;
392 	ulong reason;
393 	u32 insn_len;
394 	u32 exit_qual;
395 
396 	guest_rip = vmcs_read(GUEST_RIP);
397 	reason = vmcs_read(EXI_REASON) & 0xff;
398 	insn_len = vmcs_read(EXI_INST_LEN);
399 	exit_qual = vmcs_read(EXI_QUALIFICATION);
400 	switch (reason) {
401 	case VMX_VMCALL:
402 		switch (stage) {
403 		case 0:
404 			if (guest_cr0 == vmcs_read(GUEST_CR0))
405 				report("Read through CR0", 1);
406 			else
407 				report("Read through CR0", 0);
408 			break;
409 		case 1:
410 			if (guest_cr4 == vmcs_read(GUEST_CR4))
411 				report("Read through CR4", 1);
412 			else
413 				report("Read through CR4", 0);
414 			break;
415 		case 2:
416 			if (guest_cr0 == vmcs_read(GUEST_CR0))
417 				report("Write through CR0", 1);
418 			else
419 				report("Write through CR0", 0);
420 			break;
421 		case 3:
422 			if (guest_cr4 == vmcs_read(GUEST_CR4))
423 				report("Write through CR4", 1);
424 			else
425 				report("Write through CR4", 0);
426 			break;
427 		case 4:
428 			guest_cr0 = vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP);
429 			guest_cr4 = vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE);
430 			vmcs_write(CR0_MASK, X86_CR0_TS | X86_CR0_MP);
431 			vmcs_write(CR0_READ_SHADOW, guest_cr0 & (X86_CR0_TS | X86_CR0_MP));
432 			vmcs_write(CR4_MASK, X86_CR4_TSD | X86_CR4_DE);
433 			vmcs_write(CR4_READ_SHADOW, guest_cr4 & (X86_CR4_TSD | X86_CR4_DE));
434 			break;
435 		case 6:
436 			if (guest_cr0 == (vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP)))
437 				report("Write shadowing CR0 (same value)", 1);
438 			else
439 				report("Write shadowing CR0 (same value)", 0);
440 			break;
441 		case 7:
442 			if (guest_cr4 == (vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE)))
443 				report("Write shadowing CR4 (same value)", 1);
444 			else
445 				report("Write shadowing CR4 (same value)", 0);
446 			break;
447 		}
448 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
449 		return VMX_TEST_RESUME;
450 	case VMX_CR:
451 		switch (stage) {
452 		case 4:
453 			report("Read shadowing CR0", 0);
454 			set_stage(stage + 1);
455 			break;
456 		case 5:
457 			report("Read shadowing CR4", 0);
458 			set_stage(stage + 1);
459 			break;
460 		case 6:
461 			report("Write shadowing CR0 (same value)", 0);
462 			set_stage(stage + 1);
463 			break;
464 		case 7:
465 			report("Write shadowing CR4 (same value)", 0);
466 			set_stage(stage + 1);
467 			break;
468 		case 8:
469 		case 9:
470 			// 0x600 encodes "mov %esi, %cr0"
471 			if (exit_qual == 0x600)
472 				set_stage(stage + 1);
473 			break;
474 		case 10:
475 		case 11:
476 			// 0x604 encodes "mov %esi, %cr4"
477 			if (exit_qual == 0x604)
478 				set_stage(stage + 1);
479 			break;
480 		}
481 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
482 		return VMX_TEST_RESUME;
483 	default:
484 		printf("Unknown exit reason, %d\n", reason);
485 		print_vmexit_info();
486 	}
487 	return VMX_TEST_VMEXIT;
488 }
489 
490 static void iobmp_init()
491 {
492 	u32 ctrl_cpu0;
493 
494 	io_bitmap_a = alloc_page();
495 	io_bitmap_a = alloc_page();
496 	memset(io_bitmap_a, 0x0, PAGE_SIZE);
497 	memset(io_bitmap_b, 0x0, PAGE_SIZE);
498 	ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
499 	ctrl_cpu0 |= CPU_IO_BITMAP;
500 	ctrl_cpu0 &= (~CPU_IO);
501 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
502 	vmcs_write(IO_BITMAP_A, (u64)io_bitmap_a);
503 	vmcs_write(IO_BITMAP_B, (u64)io_bitmap_b);
504 }
505 
506 static void iobmp_main()
507 {
508 	// stage 0, test IO pass
509 	set_stage(0);
510 	inb(0x5000);
511 	outb(0x0, 0x5000);
512 	if (stage != 0)
513 		report("I/O bitmap - I/O pass", 0);
514 	else
515 		report("I/O bitmap - I/O pass", 1);
516 	// test IO width, in/out
517 	((u8 *)io_bitmap_a)[0] = 0xFF;
518 	set_stage(2);
519 	inb(0x0);
520 	if (stage != 3)
521 		report("I/O bitmap - trap in", 0);
522 	else
523 		report("I/O bitmap - trap in", 1);
524 	set_stage(3);
525 	outw(0x0, 0x0);
526 	if (stage != 4)
527 		report("I/O bitmap - trap out", 0);
528 	else
529 		report("I/O bitmap - trap out", 1);
530 	set_stage(4);
531 	inl(0x0);
532 	if (stage != 5)
533 		report("I/O bitmap - I/O width, long", 0);
534 	// test low/high IO port
535 	set_stage(5);
536 	((u8 *)io_bitmap_a)[0x5000 / 8] = (1 << (0x5000 % 8));
537 	inb(0x5000);
538 	if (stage == 6)
539 		report("I/O bitmap - I/O port, low part", 1);
540 	else
541 		report("I/O bitmap - I/O port, low part", 0);
542 	set_stage(6);
543 	((u8 *)io_bitmap_b)[0x1000 / 8] = (1 << (0x1000 % 8));
544 	inb(0x9000);
545 	if (stage == 7)
546 		report("I/O bitmap - I/O port, high part", 1);
547 	else
548 		report("I/O bitmap - I/O port, high part", 0);
549 	// test partial pass
550 	set_stage(7);
551 	inl(0x4FFF);
552 	if (stage == 8)
553 		report("I/O bitmap - partial pass", 1);
554 	else
555 		report("I/O bitmap - partial pass", 0);
556 	// test overrun
557 	set_stage(8);
558 	memset(io_bitmap_a, 0x0, PAGE_SIZE);
559 	memset(io_bitmap_b, 0x0, PAGE_SIZE);
560 	inl(0xFFFF);
561 	if (stage == 9)
562 		report("I/O bitmap - overrun", 1);
563 	else
564 		report("I/O bitmap - overrun", 0);
565 
566 	return;
567 }
568 
569 static int iobmp_exit_handler()
570 {
571 	u64 guest_rip;
572 	ulong reason, exit_qual;
573 	u32 insn_len;
574 
575 	guest_rip = vmcs_read(GUEST_RIP);
576 	reason = vmcs_read(EXI_REASON) & 0xff;
577 	exit_qual = vmcs_read(EXI_QUALIFICATION);
578 	insn_len = vmcs_read(EXI_INST_LEN);
579 	switch (reason) {
580 	case VMX_IO:
581 		switch (stage) {
582 		case 2:
583 			if ((exit_qual & VMX_IO_SIZE_MASK) != _VMX_IO_BYTE)
584 				report("I/O bitmap - I/O width, byte", 0);
585 			else
586 				report("I/O bitmap - I/O width, byte", 1);
587 			if (!(exit_qual & VMX_IO_IN))
588 				report("I/O bitmap - I/O direction, in", 0);
589 			else
590 				report("I/O bitmap - I/O direction, in", 1);
591 			set_stage(stage + 1);
592 			break;
593 		case 3:
594 			if ((exit_qual & VMX_IO_SIZE_MASK) != _VMX_IO_WORD)
595 				report("I/O bitmap - I/O width, word", 0);
596 			else
597 				report("I/O bitmap - I/O width, word", 1);
598 			if (!(exit_qual & VMX_IO_IN))
599 				report("I/O bitmap - I/O direction, out", 1);
600 			else
601 				report("I/O bitmap - I/O direction, out", 0);
602 			set_stage(stage + 1);
603 			break;
604 		case 4:
605 			if ((exit_qual & VMX_IO_SIZE_MASK) != _VMX_IO_LONG)
606 				report("I/O bitmap - I/O width, long", 0);
607 			else
608 				report("I/O bitmap - I/O width, long", 1);
609 			set_stage(stage + 1);
610 			break;
611 		case 5:
612 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x5000)
613 				set_stage(stage + 1);
614 			break;
615 		case 6:
616 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x9000)
617 				set_stage(stage + 1);
618 			break;
619 		case 7:
620 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x4FFF)
621 				set_stage(stage + 1);
622 			break;
623 		case 8:
624 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0xFFFF)
625 				set_stage(stage + 1);
626 			break;
627 		case 0:
628 		case 1:
629 			set_stage(stage + 1);
630 		default:
631 			// Should not reach here
632 			break;
633 		}
634 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
635 		return VMX_TEST_RESUME;
636 	default:
637 		printf("guest_rip = 0x%llx\n", guest_rip);
638 		printf("\tERROR : Undefined exit reason, reason = %d.\n", reason);
639 		break;
640 	}
641 	return VMX_TEST_VMEXIT;
642 }
643 
644 #define INSN_CPU0		0
645 #define INSN_CPU1		1
646 #define INSN_ALWAYS_TRAP	2
647 #define INSN_NEVER_TRAP		3
648 
649 #define FIELD_EXIT_QUAL		0
650 #define FIELD_INSN_INFO		1
651 
652 asm(
653 	"insn_hlt: hlt;ret\n\t"
654 	"insn_invlpg: invlpg 0x12345678;ret\n\t"
655 	"insn_mwait: mwait;ret\n\t"
656 	"insn_rdpmc: rdpmc;ret\n\t"
657 	"insn_rdtsc: rdtsc;ret\n\t"
658 	"insn_monitor: monitor;ret\n\t"
659 	"insn_pause: pause;ret\n\t"
660 	"insn_wbinvd: wbinvd;ret\n\t"
661 	"insn_cpuid: cpuid;ret\n\t"
662 	"insn_invd: invd;ret\n\t"
663 );
664 extern void insn_hlt();
665 extern void insn_invlpg();
666 extern void insn_mwait();
667 extern void insn_rdpmc();
668 extern void insn_rdtsc();
669 extern void insn_monitor();
670 extern void insn_pause();
671 extern void insn_wbinvd();
672 extern void insn_cpuid();
673 extern void insn_invd();
674 
675 u32 cur_insn;
676 
677 struct insn_table {
678 	const char *name;
679 	u32 flag;
680 	void (*insn_func)();
681 	u32 type;
682 	u32 reason;
683 	ulong exit_qual;
684 	u32 insn_info;
685 	// Use FIELD_EXIT_QUAL and FIELD_INSN_INFO to efines
686 	// which field need to be tested, reason is always tested
687 	u32 test_field;
688 };
689 
690 static struct insn_table insn_table[] = {
691 	// Flags for Primary Processor-Based VM-Execution Controls
692 	{"HLT",  CPU_HLT, insn_hlt, INSN_CPU0, 12, 0, 0, 0},
693 	{"INVLPG", CPU_INVLPG, insn_invlpg, INSN_CPU0, 14,
694 		0x12345678, 0, FIELD_EXIT_QUAL},
695 	{"MWAIT", CPU_MWAIT, insn_mwait, INSN_CPU0, 36, 0, 0, 0},
696 	{"RDPMC", CPU_RDPMC, insn_rdpmc, INSN_CPU0, 15, 0, 0, 0},
697 	{"RDTSC", CPU_RDTSC, insn_rdtsc, INSN_CPU0, 16, 0, 0, 0},
698 	{"MONITOR", CPU_MONITOR, insn_monitor, INSN_CPU0, 39, 0, 0, 0},
699 	{"PAUSE", CPU_PAUSE, insn_pause, INSN_CPU0, 40, 0, 0, 0},
700 	// Flags for Secondary Processor-Based VM-Execution Controls
701 	{"WBINVD", CPU_WBINVD, insn_wbinvd, INSN_CPU1, 54, 0, 0, 0},
702 	// Instructions always trap
703 	{"CPUID", 0, insn_cpuid, INSN_ALWAYS_TRAP, 10, 0, 0, 0},
704 	{"INVD", 0, insn_invd, INSN_ALWAYS_TRAP, 13, 0, 0, 0},
705 	// Instructions never trap
706 	{NULL},
707 };
708 
709 static void insn_intercept_init()
710 {
711 	u32 ctrl_cpu[2];
712 
713 	ctrl_cpu[0] = vmcs_read(CPU_EXEC_CTRL0);
714 	ctrl_cpu[0] |= CPU_HLT | CPU_INVLPG | CPU_MWAIT | CPU_RDPMC | CPU_RDTSC |
715 		CPU_MONITOR | CPU_PAUSE | CPU_SECONDARY;
716 	ctrl_cpu[0] &= ctrl_cpu_rev[0].clr;
717 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu[0]);
718 	ctrl_cpu[1] = vmcs_read(CPU_EXEC_CTRL1);
719 	ctrl_cpu[1] |= CPU_WBINVD | CPU_RDRAND;
720 	ctrl_cpu[1] &= ctrl_cpu_rev[1].clr;
721 	vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu[1]);
722 }
723 
724 static void insn_intercept_main()
725 {
726 	cur_insn = 0;
727 	while(insn_table[cur_insn].name != NULL) {
728 		set_stage(cur_insn);
729 		if ((insn_table[cur_insn].type == INSN_CPU0
730 			&& !(ctrl_cpu_rev[0].clr & insn_table[cur_insn].flag))
731 			|| (insn_table[cur_insn].type == INSN_CPU1
732 			&& !(ctrl_cpu_rev[1].clr & insn_table[cur_insn].flag))) {
733 			printf("\tCPU_CTRL1.CPU_%s is not supported.\n",
734 				insn_table[cur_insn].name);
735 			continue;
736 		}
737 		insn_table[cur_insn].insn_func();
738 		switch (insn_table[cur_insn].type) {
739 		case INSN_CPU0:
740 		case INSN_CPU1:
741 		case INSN_ALWAYS_TRAP:
742 			if (stage != cur_insn + 1)
743 				report(insn_table[cur_insn].name, 0);
744 			else
745 				report(insn_table[cur_insn].name, 1);
746 			break;
747 		case INSN_NEVER_TRAP:
748 			if (stage == cur_insn + 1)
749 				report(insn_table[cur_insn].name, 0);
750 			else
751 				report(insn_table[cur_insn].name, 1);
752 			break;
753 		}
754 		cur_insn ++;
755 	}
756 }
757 
758 static int insn_intercept_exit_handler()
759 {
760 	u64 guest_rip;
761 	u32 reason;
762 	ulong exit_qual;
763 	u32 insn_len;
764 	u32 insn_info;
765 	bool pass;
766 
767 	guest_rip = vmcs_read(GUEST_RIP);
768 	reason = vmcs_read(EXI_REASON) & 0xff;
769 	exit_qual = vmcs_read(EXI_QUALIFICATION);
770 	insn_len = vmcs_read(EXI_INST_LEN);
771 	insn_info = vmcs_read(EXI_INST_INFO);
772 	pass = (cur_insn == get_stage()) &&
773 			insn_table[cur_insn].reason == reason;
774 	if (insn_table[cur_insn].test_field & FIELD_EXIT_QUAL)
775 		pass = pass && insn_table[cur_insn].exit_qual == exit_qual;
776 	if (insn_table[cur_insn].test_field & FIELD_INSN_INFO)
777 		pass = pass && insn_table[cur_insn].insn_info == insn_info;
778 	if (pass)
779 		set_stage(stage + 1);
780 	vmcs_write(GUEST_RIP, guest_rip + insn_len);
781 	return VMX_TEST_RESUME;
782 }
783 
784 /* name/init/guest_main/exit_handler/syscall_handler/guest_regs
785    basic_* just implement some basic functions */
786 struct vmx_test vmx_tests[] = {
787 	{ "null", basic_init, basic_guest_main, basic_exit_handler,
788 		basic_syscall_handler, {0} },
789 	{ "vmenter", basic_init, vmenter_main, vmenter_exit_handler,
790 		basic_syscall_handler, {0} },
791 	{ "control field PAT", test_ctrl_pat_init, test_ctrl_pat_main,
792 		test_ctrl_pat_exit_handler, basic_syscall_handler, {0} },
793 	{ "control field EFER", test_ctrl_efer_init, test_ctrl_efer_main,
794 		test_ctrl_efer_exit_handler, basic_syscall_handler, {0} },
795 	{ "CR shadowing", basic_init, cr_shadowing_main,
796 		cr_shadowing_exit_handler, basic_syscall_handler, {0} },
797 	{ "I/O bitmap", iobmp_init, iobmp_main, iobmp_exit_handler,
798 		basic_syscall_handler, {0} },
799 	{ "instruction intercept", insn_intercept_init, insn_intercept_main,
800 		insn_intercept_exit_handler, basic_syscall_handler, {0} },
801 	{ NULL, NULL, NULL, NULL, NULL, {0} },
802 };
803