xref: /kvm-unit-tests/x86/vmx_tests.c (revision 1e8b34120cf691879acdc483950c6bc3869e69f9)
1 /*
2  * All test cases of nested virtualization should be in this file
3  *
4  * Author : Arthur Chunqi Li <yzt356@gmail.com>
5  */
6 
7 #include <asm/debugreg.h>
8 
9 #include "vmx.h"
10 #include "msr.h"
11 #include "processor.h"
12 #include "vm.h"
13 #include "pci.h"
14 #include "fwcfg.h"
15 #include "isr.h"
16 #include "desc.h"
17 #include "apic.h"
18 #include "types.h"
19 #include "vmalloc.h"
20 #include "alloc_page.h"
21 #include "smp.h"
22 #include "delay.h"
23 
24 #define NONCANONICAL            0xaaaaaaaaaaaaaaaaull
25 
26 #define VPID_CAP_INVVPID_TYPES_SHIFT 40
27 
28 u64 ia32_pat;
29 u64 ia32_efer;
30 void *io_bitmap_a, *io_bitmap_b;
31 u16 ioport;
32 
33 unsigned long *pml4;
34 u64 eptp;
35 void *data_page1, *data_page2;
36 
37 phys_addr_t pci_physaddr;
38 
39 void *pml_log;
40 #define PML_INDEX 512
41 
42 static inline unsigned ffs(unsigned x)
43 {
44 	int pos = -1;
45 
46 	__asm__ __volatile__("bsf %1, %%eax; cmovnz %%eax, %0"
47 			     : "+r"(pos) : "rm"(x) : "eax");
48 	return pos + 1;
49 }
50 
51 static inline void vmcall(void)
52 {
53 	asm volatile("vmcall");
54 }
55 
56 static void basic_guest_main(void)
57 {
58 	report("Basic VMX test", 1);
59 }
60 
61 static int basic_exit_handler(void)
62 {
63 	report("Basic VMX test", 0);
64 	print_vmexit_info();
65 	return VMX_TEST_EXIT;
66 }
67 
68 static void vmenter_main(void)
69 {
70 	u64 rax;
71 	u64 rsp, resume_rsp;
72 
73 	report("test vmlaunch", 1);
74 
75 	asm volatile(
76 		"mov %%rsp, %0\n\t"
77 		"mov %3, %%rax\n\t"
78 		"vmcall\n\t"
79 		"mov %%rax, %1\n\t"
80 		"mov %%rsp, %2\n\t"
81 		: "=r"(rsp), "=r"(rax), "=r"(resume_rsp)
82 		: "g"(0xABCD));
83 	report("test vmresume", (rax == 0xFFFF) && (rsp == resume_rsp));
84 }
85 
86 static int vmenter_exit_handler(void)
87 {
88 	u64 guest_rip;
89 	ulong reason;
90 
91 	guest_rip = vmcs_read(GUEST_RIP);
92 	reason = vmcs_read(EXI_REASON) & 0xff;
93 	switch (reason) {
94 	case VMX_VMCALL:
95 		if (regs.rax != 0xABCD) {
96 			report("test vmresume", 0);
97 			return VMX_TEST_VMEXIT;
98 		}
99 		regs.rax = 0xFFFF;
100 		vmcs_write(GUEST_RIP, guest_rip + 3);
101 		return VMX_TEST_RESUME;
102 	default:
103 		report("test vmresume", 0);
104 		print_vmexit_info();
105 	}
106 	return VMX_TEST_VMEXIT;
107 }
108 
109 u32 preempt_scale;
110 volatile unsigned long long tsc_val;
111 volatile u32 preempt_val;
112 u64 saved_rip;
113 
114 static int preemption_timer_init(struct vmcs *vmcs)
115 {
116 	if (!(ctrl_pin_rev.clr & PIN_PREEMPT)) {
117 		printf("\tPreemption timer is not supported\n");
118 		return VMX_TEST_EXIT;
119 	}
120 	vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) | PIN_PREEMPT);
121 	preempt_val = 10000000;
122 	vmcs_write(PREEMPT_TIMER_VALUE, preempt_val);
123 	preempt_scale = rdmsr(MSR_IA32_VMX_MISC) & 0x1F;
124 
125 	if (!(ctrl_exit_rev.clr & EXI_SAVE_PREEMPT))
126 		printf("\tSave preemption value is not supported\n");
127 
128 	return VMX_TEST_START;
129 }
130 
131 static void preemption_timer_main(void)
132 {
133 	tsc_val = rdtsc();
134 	if (ctrl_exit_rev.clr & EXI_SAVE_PREEMPT) {
135 		vmx_set_test_stage(0);
136 		vmcall();
137 		if (vmx_get_test_stage() == 1)
138 			vmcall();
139 	}
140 	vmx_set_test_stage(1);
141 	while (vmx_get_test_stage() == 1) {
142 		if (((rdtsc() - tsc_val) >> preempt_scale)
143 				> 10 * preempt_val) {
144 			vmx_set_test_stage(2);
145 			vmcall();
146 		}
147 	}
148 	tsc_val = rdtsc();
149 	asm volatile ("hlt");
150 	vmcall();
151 	vmx_set_test_stage(5);
152 	vmcall();
153 }
154 
155 static int preemption_timer_exit_handler(void)
156 {
157 	bool guest_halted;
158 	u64 guest_rip;
159 	ulong reason;
160 	u32 insn_len;
161 	u32 ctrl_exit;
162 
163 	guest_rip = vmcs_read(GUEST_RIP);
164 	reason = vmcs_read(EXI_REASON) & 0xff;
165 	insn_len = vmcs_read(EXI_INST_LEN);
166 	switch (reason) {
167 	case VMX_PREEMPT:
168 		switch (vmx_get_test_stage()) {
169 		case 1:
170 		case 2:
171 			report("busy-wait for preemption timer",
172 			       ((rdtsc() - tsc_val) >> preempt_scale) >=
173 			       preempt_val);
174 			vmx_set_test_stage(3);
175 			vmcs_write(PREEMPT_TIMER_VALUE, preempt_val);
176 			return VMX_TEST_RESUME;
177 		case 3:
178 			guest_halted =
179 				(vmcs_read(GUEST_ACTV_STATE) == ACTV_HLT);
180 			report("preemption timer during hlt",
181 			       ((rdtsc() - tsc_val) >> preempt_scale) >=
182 			       preempt_val && guest_halted);
183 			vmx_set_test_stage(4);
184 			vmcs_write(PIN_CONTROLS,
185 				   vmcs_read(PIN_CONTROLS) & ~PIN_PREEMPT);
186 			vmcs_write(EXI_CONTROLS,
187 				   vmcs_read(EXI_CONTROLS) & ~EXI_SAVE_PREEMPT);
188 			vmcs_write(GUEST_ACTV_STATE, ACTV_ACTIVE);
189 			return VMX_TEST_RESUME;
190 		case 4:
191 			report("preemption timer with 0 value",
192 			       saved_rip == guest_rip);
193 			break;
194 		default:
195 			report("Invalid stage.", false);
196 			print_vmexit_info();
197 			break;
198 		}
199 		break;
200 	case VMX_VMCALL:
201 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
202 		switch (vmx_get_test_stage()) {
203 		case 0:
204 			report("Keep preemption value",
205 			       vmcs_read(PREEMPT_TIMER_VALUE) == preempt_val);
206 			vmx_set_test_stage(1);
207 			vmcs_write(PREEMPT_TIMER_VALUE, preempt_val);
208 			ctrl_exit = (vmcs_read(EXI_CONTROLS) |
209 				EXI_SAVE_PREEMPT) & ctrl_exit_rev.clr;
210 			vmcs_write(EXI_CONTROLS, ctrl_exit);
211 			return VMX_TEST_RESUME;
212 		case 1:
213 			report("Save preemption value",
214 			       vmcs_read(PREEMPT_TIMER_VALUE) < preempt_val);
215 			return VMX_TEST_RESUME;
216 		case 2:
217 			report("busy-wait for preemption timer", 0);
218 			vmx_set_test_stage(3);
219 			vmcs_write(PREEMPT_TIMER_VALUE, preempt_val);
220 			return VMX_TEST_RESUME;
221 		case 3:
222 			report("preemption timer during hlt", 0);
223 			vmx_set_test_stage(4);
224 			/* fall through */
225 		case 4:
226 			vmcs_write(PIN_CONTROLS,
227 				   vmcs_read(PIN_CONTROLS) | PIN_PREEMPT);
228 			vmcs_write(PREEMPT_TIMER_VALUE, 0);
229 			saved_rip = guest_rip + insn_len;
230 			return VMX_TEST_RESUME;
231 		case 5:
232 			report("preemption timer with 0 value (vmcall stage 5)", 0);
233 			break;
234 		default:
235 			// Should not reach here
236 			report("unexpected stage, %d", false,
237 			       vmx_get_test_stage());
238 			print_vmexit_info();
239 			return VMX_TEST_VMEXIT;
240 		}
241 		break;
242 	default:
243 		report("Unknown exit reason, %ld", false, reason);
244 		print_vmexit_info();
245 	}
246 	vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) & ~PIN_PREEMPT);
247 	return VMX_TEST_VMEXIT;
248 }
249 
250 static void msr_bmp_init(void)
251 {
252 	void *msr_bitmap;
253 	u32 ctrl_cpu0;
254 
255 	msr_bitmap = alloc_page();
256 	ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
257 	ctrl_cpu0 |= CPU_MSR_BITMAP;
258 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
259 	vmcs_write(MSR_BITMAP, (u64)msr_bitmap);
260 }
261 
262 static void *get_msr_bitmap(void)
263 {
264 	void *msr_bitmap;
265 
266 	if (vmcs_read(CPU_EXEC_CTRL0) & CPU_MSR_BITMAP) {
267 		msr_bitmap = (void *)vmcs_read(MSR_BITMAP);
268 	} else {
269 		msr_bitmap = alloc_page();
270 		memset(msr_bitmap, 0xff, PAGE_SIZE);
271 		vmcs_write(MSR_BITMAP, (u64)msr_bitmap);
272 		vmcs_set_bits(CPU_EXEC_CTRL0, CPU_MSR_BITMAP);
273 	}
274 
275 	return msr_bitmap;
276 }
277 
278 static void disable_intercept_for_x2apic_msrs(void)
279 {
280 	unsigned long *msr_bitmap = (unsigned long *)get_msr_bitmap();
281 	u32 msr;
282 
283 	for (msr = APIC_BASE_MSR;
284 		 msr < (APIC_BASE_MSR+0xff);
285 		 msr += BITS_PER_LONG) {
286 		unsigned int word = msr / BITS_PER_LONG;
287 
288 		msr_bitmap[word] = 0;
289 		msr_bitmap[word + (0x800 / sizeof(long))] = 0;
290 	}
291 }
292 
293 static int test_ctrl_pat_init(struct vmcs *vmcs)
294 {
295 	u64 ctrl_ent;
296 	u64 ctrl_exi;
297 
298 	msr_bmp_init();
299 	if (!(ctrl_exit_rev.clr & EXI_SAVE_PAT) &&
300 	    !(ctrl_exit_rev.clr & EXI_LOAD_PAT) &&
301 	    !(ctrl_enter_rev.clr & ENT_LOAD_PAT)) {
302 		printf("\tSave/load PAT is not supported\n");
303 		return 1;
304 	}
305 
306 	ctrl_ent = vmcs_read(ENT_CONTROLS);
307 	ctrl_exi = vmcs_read(EXI_CONTROLS);
308 	ctrl_ent |= ctrl_enter_rev.clr & ENT_LOAD_PAT;
309 	ctrl_exi |= ctrl_exit_rev.clr & (EXI_SAVE_PAT | EXI_LOAD_PAT);
310 	vmcs_write(ENT_CONTROLS, ctrl_ent);
311 	vmcs_write(EXI_CONTROLS, ctrl_exi);
312 	ia32_pat = rdmsr(MSR_IA32_CR_PAT);
313 	vmcs_write(GUEST_PAT, 0x0);
314 	vmcs_write(HOST_PAT, ia32_pat);
315 	return VMX_TEST_START;
316 }
317 
318 static void test_ctrl_pat_main(void)
319 {
320 	u64 guest_ia32_pat;
321 
322 	guest_ia32_pat = rdmsr(MSR_IA32_CR_PAT);
323 	if (!(ctrl_enter_rev.clr & ENT_LOAD_PAT))
324 		printf("\tENT_LOAD_PAT is not supported.\n");
325 	else {
326 		if (guest_ia32_pat != 0) {
327 			report("Entry load PAT", 0);
328 			return;
329 		}
330 	}
331 	wrmsr(MSR_IA32_CR_PAT, 0x6);
332 	vmcall();
333 	guest_ia32_pat = rdmsr(MSR_IA32_CR_PAT);
334 	if (ctrl_enter_rev.clr & ENT_LOAD_PAT)
335 		report("Entry load PAT", guest_ia32_pat == ia32_pat);
336 }
337 
338 static int test_ctrl_pat_exit_handler(void)
339 {
340 	u64 guest_rip;
341 	ulong reason;
342 	u64 guest_pat;
343 
344 	guest_rip = vmcs_read(GUEST_RIP);
345 	reason = vmcs_read(EXI_REASON) & 0xff;
346 	switch (reason) {
347 	case VMX_VMCALL:
348 		guest_pat = vmcs_read(GUEST_PAT);
349 		if (!(ctrl_exit_rev.clr & EXI_SAVE_PAT)) {
350 			printf("\tEXI_SAVE_PAT is not supported\n");
351 			vmcs_write(GUEST_PAT, 0x6);
352 		} else {
353 			report("Exit save PAT", guest_pat == 0x6);
354 		}
355 		if (!(ctrl_exit_rev.clr & EXI_LOAD_PAT))
356 			printf("\tEXI_LOAD_PAT is not supported\n");
357 		else
358 			report("Exit load PAT", rdmsr(MSR_IA32_CR_PAT) == ia32_pat);
359 		vmcs_write(GUEST_PAT, ia32_pat);
360 		vmcs_write(GUEST_RIP, guest_rip + 3);
361 		return VMX_TEST_RESUME;
362 	default:
363 		printf("ERROR : Undefined exit reason, reason = %ld.\n", reason);
364 		break;
365 	}
366 	return VMX_TEST_VMEXIT;
367 }
368 
369 static int test_ctrl_efer_init(struct vmcs *vmcs)
370 {
371 	u64 ctrl_ent;
372 	u64 ctrl_exi;
373 
374 	msr_bmp_init();
375 	ctrl_ent = vmcs_read(ENT_CONTROLS) | ENT_LOAD_EFER;
376 	ctrl_exi = vmcs_read(EXI_CONTROLS) | EXI_SAVE_EFER | EXI_LOAD_EFER;
377 	vmcs_write(ENT_CONTROLS, ctrl_ent & ctrl_enter_rev.clr);
378 	vmcs_write(EXI_CONTROLS, ctrl_exi & ctrl_exit_rev.clr);
379 	ia32_efer = rdmsr(MSR_EFER);
380 	vmcs_write(GUEST_EFER, ia32_efer ^ EFER_NX);
381 	vmcs_write(HOST_EFER, ia32_efer ^ EFER_NX);
382 	return VMX_TEST_START;
383 }
384 
385 static void test_ctrl_efer_main(void)
386 {
387 	u64 guest_ia32_efer;
388 
389 	guest_ia32_efer = rdmsr(MSR_EFER);
390 	if (!(ctrl_enter_rev.clr & ENT_LOAD_EFER))
391 		printf("\tENT_LOAD_EFER is not supported.\n");
392 	else {
393 		if (guest_ia32_efer != (ia32_efer ^ EFER_NX)) {
394 			report("Entry load EFER", 0);
395 			return;
396 		}
397 	}
398 	wrmsr(MSR_EFER, ia32_efer);
399 	vmcall();
400 	guest_ia32_efer = rdmsr(MSR_EFER);
401 	if (ctrl_enter_rev.clr & ENT_LOAD_EFER)
402 		report("Entry load EFER", guest_ia32_efer == ia32_efer);
403 }
404 
405 static int test_ctrl_efer_exit_handler(void)
406 {
407 	u64 guest_rip;
408 	ulong reason;
409 	u64 guest_efer;
410 
411 	guest_rip = vmcs_read(GUEST_RIP);
412 	reason = vmcs_read(EXI_REASON) & 0xff;
413 	switch (reason) {
414 	case VMX_VMCALL:
415 		guest_efer = vmcs_read(GUEST_EFER);
416 		if (!(ctrl_exit_rev.clr & EXI_SAVE_EFER)) {
417 			printf("\tEXI_SAVE_EFER is not supported\n");
418 			vmcs_write(GUEST_EFER, ia32_efer);
419 		} else {
420 			report("Exit save EFER", guest_efer == ia32_efer);
421 		}
422 		if (!(ctrl_exit_rev.clr & EXI_LOAD_EFER)) {
423 			printf("\tEXI_LOAD_EFER is not supported\n");
424 			wrmsr(MSR_EFER, ia32_efer ^ EFER_NX);
425 		} else {
426 			report("Exit load EFER", rdmsr(MSR_EFER) == (ia32_efer ^ EFER_NX));
427 		}
428 		vmcs_write(GUEST_PAT, ia32_efer);
429 		vmcs_write(GUEST_RIP, guest_rip + 3);
430 		return VMX_TEST_RESUME;
431 	default:
432 		printf("ERROR : Undefined exit reason, reason = %ld.\n", reason);
433 		break;
434 	}
435 	return VMX_TEST_VMEXIT;
436 }
437 
438 u32 guest_cr0, guest_cr4;
439 
440 static void cr_shadowing_main(void)
441 {
442 	u32 cr0, cr4, tmp;
443 
444 	// Test read through
445 	vmx_set_test_stage(0);
446 	guest_cr0 = read_cr0();
447 	if (vmx_get_test_stage() == 1)
448 		report("Read through CR0", 0);
449 	else
450 		vmcall();
451 	vmx_set_test_stage(1);
452 	guest_cr4 = read_cr4();
453 	if (vmx_get_test_stage() == 2)
454 		report("Read through CR4", 0);
455 	else
456 		vmcall();
457 	// Test write through
458 	guest_cr0 = guest_cr0 ^ (X86_CR0_TS | X86_CR0_MP);
459 	guest_cr4 = guest_cr4 ^ (X86_CR4_TSD | X86_CR4_DE);
460 	vmx_set_test_stage(2);
461 	write_cr0(guest_cr0);
462 	if (vmx_get_test_stage() == 3)
463 		report("Write throuth CR0", 0);
464 	else
465 		vmcall();
466 	vmx_set_test_stage(3);
467 	write_cr4(guest_cr4);
468 	if (vmx_get_test_stage() == 4)
469 		report("Write through CR4", 0);
470 	else
471 		vmcall();
472 	// Test read shadow
473 	vmx_set_test_stage(4);
474 	vmcall();
475 	cr0 = read_cr0();
476 	if (vmx_get_test_stage() != 5)
477 		report("Read shadowing CR0", cr0 == guest_cr0);
478 	vmx_set_test_stage(5);
479 	cr4 = read_cr4();
480 	if (vmx_get_test_stage() != 6)
481 		report("Read shadowing CR4", cr4 == guest_cr4);
482 	// Test write shadow (same value with shadow)
483 	vmx_set_test_stage(6);
484 	write_cr0(guest_cr0);
485 	if (vmx_get_test_stage() == 7)
486 		report("Write shadowing CR0 (same value with shadow)", 0);
487 	else
488 		vmcall();
489 	vmx_set_test_stage(7);
490 	write_cr4(guest_cr4);
491 	if (vmx_get_test_stage() == 8)
492 		report("Write shadowing CR4 (same value with shadow)", 0);
493 	else
494 		vmcall();
495 	// Test write shadow (different value)
496 	vmx_set_test_stage(8);
497 	tmp = guest_cr0 ^ X86_CR0_TS;
498 	asm volatile("mov %0, %%rsi\n\t"
499 		"mov %%rsi, %%cr0\n\t"
500 		::"m"(tmp)
501 		:"rsi", "memory", "cc");
502 	report("Write shadowing different X86_CR0_TS", vmx_get_test_stage() == 9);
503 	vmx_set_test_stage(9);
504 	tmp = guest_cr0 ^ X86_CR0_MP;
505 	asm volatile("mov %0, %%rsi\n\t"
506 		"mov %%rsi, %%cr0\n\t"
507 		::"m"(tmp)
508 		:"rsi", "memory", "cc");
509 	report("Write shadowing different X86_CR0_MP", vmx_get_test_stage() == 10);
510 	vmx_set_test_stage(10);
511 	tmp = guest_cr4 ^ X86_CR4_TSD;
512 	asm volatile("mov %0, %%rsi\n\t"
513 		"mov %%rsi, %%cr4\n\t"
514 		::"m"(tmp)
515 		:"rsi", "memory", "cc");
516 	report("Write shadowing different X86_CR4_TSD", vmx_get_test_stage() == 11);
517 	vmx_set_test_stage(11);
518 	tmp = guest_cr4 ^ X86_CR4_DE;
519 	asm volatile("mov %0, %%rsi\n\t"
520 		"mov %%rsi, %%cr4\n\t"
521 		::"m"(tmp)
522 		:"rsi", "memory", "cc");
523 	report("Write shadowing different X86_CR4_DE", vmx_get_test_stage() == 12);
524 }
525 
526 static int cr_shadowing_exit_handler(void)
527 {
528 	u64 guest_rip;
529 	ulong reason;
530 	u32 insn_len;
531 	u32 exit_qual;
532 
533 	guest_rip = vmcs_read(GUEST_RIP);
534 	reason = vmcs_read(EXI_REASON) & 0xff;
535 	insn_len = vmcs_read(EXI_INST_LEN);
536 	exit_qual = vmcs_read(EXI_QUALIFICATION);
537 	switch (reason) {
538 	case VMX_VMCALL:
539 		switch (vmx_get_test_stage()) {
540 		case 0:
541 			report("Read through CR0", guest_cr0 == vmcs_read(GUEST_CR0));
542 			break;
543 		case 1:
544 			report("Read through CR4", guest_cr4 == vmcs_read(GUEST_CR4));
545 			break;
546 		case 2:
547 			report("Write through CR0", guest_cr0 == vmcs_read(GUEST_CR0));
548 			break;
549 		case 3:
550 			report("Write through CR4", guest_cr4 == vmcs_read(GUEST_CR4));
551 			break;
552 		case 4:
553 			guest_cr0 = vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP);
554 			guest_cr4 = vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE);
555 			vmcs_write(CR0_MASK, X86_CR0_TS | X86_CR0_MP);
556 			vmcs_write(CR0_READ_SHADOW, guest_cr0 & (X86_CR0_TS | X86_CR0_MP));
557 			vmcs_write(CR4_MASK, X86_CR4_TSD | X86_CR4_DE);
558 			vmcs_write(CR4_READ_SHADOW, guest_cr4 & (X86_CR4_TSD | X86_CR4_DE));
559 			break;
560 		case 6:
561 			report("Write shadowing CR0 (same value)",
562 					guest_cr0 == (vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP)));
563 			break;
564 		case 7:
565 			report("Write shadowing CR4 (same value)",
566 					guest_cr4 == (vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE)));
567 			break;
568 		default:
569 			// Should not reach here
570 			report("unexpected stage, %d", false,
571 			       vmx_get_test_stage());
572 			print_vmexit_info();
573 			return VMX_TEST_VMEXIT;
574 		}
575 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
576 		return VMX_TEST_RESUME;
577 	case VMX_CR:
578 		switch (vmx_get_test_stage()) {
579 		case 4:
580 			report("Read shadowing CR0", 0);
581 			vmx_inc_test_stage();
582 			break;
583 		case 5:
584 			report("Read shadowing CR4", 0);
585 			vmx_inc_test_stage();
586 			break;
587 		case 6:
588 			report("Write shadowing CR0 (same value)", 0);
589 			vmx_inc_test_stage();
590 			break;
591 		case 7:
592 			report("Write shadowing CR4 (same value)", 0);
593 			vmx_inc_test_stage();
594 			break;
595 		case 8:
596 		case 9:
597 			// 0x600 encodes "mov %esi, %cr0"
598 			if (exit_qual == 0x600)
599 				vmx_inc_test_stage();
600 			break;
601 		case 10:
602 		case 11:
603 			// 0x604 encodes "mov %esi, %cr4"
604 			if (exit_qual == 0x604)
605 				vmx_inc_test_stage();
606 			break;
607 		default:
608 			// Should not reach here
609 			report("unexpected stage, %d", false,
610 			       vmx_get_test_stage());
611 			print_vmexit_info();
612 			return VMX_TEST_VMEXIT;
613 		}
614 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
615 		return VMX_TEST_RESUME;
616 	default:
617 		report("Unknown exit reason, %ld", false, reason);
618 		print_vmexit_info();
619 	}
620 	return VMX_TEST_VMEXIT;
621 }
622 
623 static int iobmp_init(struct vmcs *vmcs)
624 {
625 	u32 ctrl_cpu0;
626 
627 	io_bitmap_a = alloc_page();
628 	io_bitmap_b = alloc_page();
629 	ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
630 	ctrl_cpu0 |= CPU_IO_BITMAP;
631 	ctrl_cpu0 &= (~CPU_IO);
632 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
633 	vmcs_write(IO_BITMAP_A, (u64)io_bitmap_a);
634 	vmcs_write(IO_BITMAP_B, (u64)io_bitmap_b);
635 	return VMX_TEST_START;
636 }
637 
638 static void iobmp_main(void)
639 {
640 	// stage 0, test IO pass
641 	vmx_set_test_stage(0);
642 	inb(0x5000);
643 	outb(0x0, 0x5000);
644 	report("I/O bitmap - I/O pass", vmx_get_test_stage() == 0);
645 	// test IO width, in/out
646 	((u8 *)io_bitmap_a)[0] = 0xFF;
647 	vmx_set_test_stage(2);
648 	inb(0x0);
649 	report("I/O bitmap - trap in", vmx_get_test_stage() == 3);
650 	vmx_set_test_stage(3);
651 	outw(0x0, 0x0);
652 	report("I/O bitmap - trap out", vmx_get_test_stage() == 4);
653 	vmx_set_test_stage(4);
654 	inl(0x0);
655 	report("I/O bitmap - I/O width, long", vmx_get_test_stage() == 5);
656 	// test low/high IO port
657 	vmx_set_test_stage(5);
658 	((u8 *)io_bitmap_a)[0x5000 / 8] = (1 << (0x5000 % 8));
659 	inb(0x5000);
660 	report("I/O bitmap - I/O port, low part", vmx_get_test_stage() == 6);
661 	vmx_set_test_stage(6);
662 	((u8 *)io_bitmap_b)[0x1000 / 8] = (1 << (0x1000 % 8));
663 	inb(0x9000);
664 	report("I/O bitmap - I/O port, high part", vmx_get_test_stage() == 7);
665 	// test partial pass
666 	vmx_set_test_stage(7);
667 	inl(0x4FFF);
668 	report("I/O bitmap - partial pass", vmx_get_test_stage() == 8);
669 	// test overrun
670 	vmx_set_test_stage(8);
671 	memset(io_bitmap_a, 0x0, PAGE_SIZE);
672 	memset(io_bitmap_b, 0x0, PAGE_SIZE);
673 	inl(0xFFFF);
674 	report("I/O bitmap - overrun", vmx_get_test_stage() == 9);
675 	vmx_set_test_stage(9);
676 	vmcall();
677 	outb(0x0, 0x0);
678 	report("I/O bitmap - ignore unconditional exiting",
679 	       vmx_get_test_stage() == 9);
680 	vmx_set_test_stage(10);
681 	vmcall();
682 	outb(0x0, 0x0);
683 	report("I/O bitmap - unconditional exiting",
684 	       vmx_get_test_stage() == 11);
685 }
686 
687 static int iobmp_exit_handler(void)
688 {
689 	u64 guest_rip;
690 	ulong reason, exit_qual;
691 	u32 insn_len, ctrl_cpu0;
692 
693 	guest_rip = vmcs_read(GUEST_RIP);
694 	reason = vmcs_read(EXI_REASON) & 0xff;
695 	exit_qual = vmcs_read(EXI_QUALIFICATION);
696 	insn_len = vmcs_read(EXI_INST_LEN);
697 	switch (reason) {
698 	case VMX_IO:
699 		switch (vmx_get_test_stage()) {
700 		case 0:
701 		case 1:
702 			vmx_inc_test_stage();
703 			break;
704 		case 2:
705 			report("I/O bitmap - I/O width, byte",
706 					(exit_qual & VMX_IO_SIZE_MASK) == _VMX_IO_BYTE);
707 			report("I/O bitmap - I/O direction, in", exit_qual & VMX_IO_IN);
708 			vmx_inc_test_stage();
709 			break;
710 		case 3:
711 			report("I/O bitmap - I/O width, word",
712 					(exit_qual & VMX_IO_SIZE_MASK) == _VMX_IO_WORD);
713 			report("I/O bitmap - I/O direction, out",
714 					!(exit_qual & VMX_IO_IN));
715 			vmx_inc_test_stage();
716 			break;
717 		case 4:
718 			report("I/O bitmap - I/O width, long",
719 					(exit_qual & VMX_IO_SIZE_MASK) == _VMX_IO_LONG);
720 			vmx_inc_test_stage();
721 			break;
722 		case 5:
723 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x5000)
724 				vmx_inc_test_stage();
725 			break;
726 		case 6:
727 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x9000)
728 				vmx_inc_test_stage();
729 			break;
730 		case 7:
731 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x4FFF)
732 				vmx_inc_test_stage();
733 			break;
734 		case 8:
735 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0xFFFF)
736 				vmx_inc_test_stage();
737 			break;
738 		case 9:
739 		case 10:
740 			ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
741 			vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0 & ~CPU_IO);
742 			vmx_inc_test_stage();
743 			break;
744 		default:
745 			// Should not reach here
746 			report("unexpected stage, %d", false,
747 			       vmx_get_test_stage());
748 			print_vmexit_info();
749 			return VMX_TEST_VMEXIT;
750 		}
751 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
752 		return VMX_TEST_RESUME;
753 	case VMX_VMCALL:
754 		switch (vmx_get_test_stage()) {
755 		case 9:
756 			ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
757 			ctrl_cpu0 |= CPU_IO | CPU_IO_BITMAP;
758 			vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
759 			break;
760 		case 10:
761 			ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
762 			ctrl_cpu0 = (ctrl_cpu0 & ~CPU_IO_BITMAP) | CPU_IO;
763 			vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
764 			break;
765 		default:
766 			// Should not reach here
767 			report("unexpected stage, %d", false,
768 			       vmx_get_test_stage());
769 			print_vmexit_info();
770 			return VMX_TEST_VMEXIT;
771 		}
772 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
773 		return VMX_TEST_RESUME;
774 	default:
775 		printf("guest_rip = %#lx\n", guest_rip);
776 		printf("\tERROR : Undefined exit reason, reason = %ld.\n", reason);
777 		break;
778 	}
779 	return VMX_TEST_VMEXIT;
780 }
781 
782 #define INSN_CPU0		0
783 #define INSN_CPU1		1
784 #define INSN_ALWAYS_TRAP	2
785 
786 #define FIELD_EXIT_QUAL		(1 << 0)
787 #define FIELD_INSN_INFO		(1 << 1)
788 
789 asm(
790 	"insn_hlt: hlt;ret\n\t"
791 	"insn_invlpg: invlpg 0x12345678;ret\n\t"
792 	"insn_mwait: xor %eax, %eax; xor %ecx, %ecx; mwait;ret\n\t"
793 	"insn_rdpmc: xor %ecx, %ecx; rdpmc;ret\n\t"
794 	"insn_rdtsc: rdtsc;ret\n\t"
795 	"insn_cr3_load: mov cr3,%rax; mov %rax,%cr3;ret\n\t"
796 	"insn_cr3_store: mov %cr3,%rax;ret\n\t"
797 #ifdef __x86_64__
798 	"insn_cr8_load: xor %eax, %eax; mov %rax,%cr8;ret\n\t"
799 	"insn_cr8_store: mov %cr8,%rax;ret\n\t"
800 #endif
801 	"insn_monitor: xor %eax, %eax; xor %ecx, %ecx; xor %edx, %edx; monitor;ret\n\t"
802 	"insn_pause: pause;ret\n\t"
803 	"insn_wbinvd: wbinvd;ret\n\t"
804 	"insn_cpuid: mov $10, %eax; cpuid;ret\n\t"
805 	"insn_invd: invd;ret\n\t"
806 	"insn_sgdt: sgdt gdt64_desc;ret\n\t"
807 	"insn_lgdt: lgdt gdt64_desc;ret\n\t"
808 	"insn_sidt: sidt idt_descr;ret\n\t"
809 	"insn_lidt: lidt idt_descr;ret\n\t"
810 	"insn_sldt: sldt %ax;ret\n\t"
811 	"insn_lldt: xor %eax, %eax; lldt %ax;ret\n\t"
812 	"insn_str: str %ax;ret\n\t"
813 	"insn_rdrand: rdrand %rax;ret\n\t"
814 	"insn_rdseed: rdseed %rax;ret\n\t"
815 );
816 extern void insn_hlt(void);
817 extern void insn_invlpg(void);
818 extern void insn_mwait(void);
819 extern void insn_rdpmc(void);
820 extern void insn_rdtsc(void);
821 extern void insn_cr3_load(void);
822 extern void insn_cr3_store(void);
823 #ifdef __x86_64__
824 extern void insn_cr8_load(void);
825 extern void insn_cr8_store(void);
826 #endif
827 extern void insn_monitor(void);
828 extern void insn_pause(void);
829 extern void insn_wbinvd(void);
830 extern void insn_sgdt(void);
831 extern void insn_lgdt(void);
832 extern void insn_sidt(void);
833 extern void insn_lidt(void);
834 extern void insn_sldt(void);
835 extern void insn_lldt(void);
836 extern void insn_str(void);
837 extern void insn_cpuid(void);
838 extern void insn_invd(void);
839 extern void insn_rdrand(void);
840 extern void insn_rdseed(void);
841 
842 u32 cur_insn;
843 u64 cr3;
844 
845 struct insn_table {
846 	const char *name;
847 	u32 flag;
848 	void (*insn_func)(void);
849 	u32 type;
850 	u32 reason;
851 	ulong exit_qual;
852 	u32 insn_info;
853 	// Use FIELD_EXIT_QUAL and FIELD_INSN_INFO to define
854 	// which field need to be tested, reason is always tested
855 	u32 test_field;
856 };
857 
858 /*
859  * Add more test cases of instruction intercept here. Elements in this
860  * table is:
861  *	name/control flag/insn function/type/exit reason/exit qulification/
862  *	instruction info/field to test
863  * The last field defines which fields (exit_qual and insn_info) need to be
864  * tested in exit handler. If set to 0, only "reason" is checked.
865  */
866 static struct insn_table insn_table[] = {
867 	// Flags for Primary Processor-Based VM-Execution Controls
868 	{"HLT",  CPU_HLT, insn_hlt, INSN_CPU0, 12, 0, 0, 0},
869 	{"INVLPG", CPU_INVLPG, insn_invlpg, INSN_CPU0, 14,
870 		0x12345678, 0, FIELD_EXIT_QUAL},
871 	{"MWAIT", CPU_MWAIT, insn_mwait, INSN_CPU0, 36, 0, 0, 0},
872 	{"RDPMC", CPU_RDPMC, insn_rdpmc, INSN_CPU0, 15, 0, 0, 0},
873 	{"RDTSC", CPU_RDTSC, insn_rdtsc, INSN_CPU0, 16, 0, 0, 0},
874 	{"CR3 load", CPU_CR3_LOAD, insn_cr3_load, INSN_CPU0, 28, 0x3, 0,
875 		FIELD_EXIT_QUAL},
876 	{"CR3 store", CPU_CR3_STORE, insn_cr3_store, INSN_CPU0, 28, 0x13, 0,
877 		FIELD_EXIT_QUAL},
878 #ifdef __x86_64__
879 	{"CR8 load", CPU_CR8_LOAD, insn_cr8_load, INSN_CPU0, 28, 0x8, 0,
880 		FIELD_EXIT_QUAL},
881 	{"CR8 store", CPU_CR8_STORE, insn_cr8_store, INSN_CPU0, 28, 0x18, 0,
882 		FIELD_EXIT_QUAL},
883 #endif
884 	{"MONITOR", CPU_MONITOR, insn_monitor, INSN_CPU0, 39, 0, 0, 0},
885 	{"PAUSE", CPU_PAUSE, insn_pause, INSN_CPU0, 40, 0, 0, 0},
886 	// Flags for Secondary Processor-Based VM-Execution Controls
887 	{"WBINVD", CPU_WBINVD, insn_wbinvd, INSN_CPU1, 54, 0, 0, 0},
888 	{"DESC_TABLE (SGDT)", CPU_DESC_TABLE, insn_sgdt, INSN_CPU1, 46, 0, 0, 0},
889 	{"DESC_TABLE (LGDT)", CPU_DESC_TABLE, insn_lgdt, INSN_CPU1, 46, 0, 0, 0},
890 	{"DESC_TABLE (SIDT)", CPU_DESC_TABLE, insn_sidt, INSN_CPU1, 46, 0, 0, 0},
891 	{"DESC_TABLE (LIDT)", CPU_DESC_TABLE, insn_lidt, INSN_CPU1, 46, 0, 0, 0},
892 	{"DESC_TABLE (SLDT)", CPU_DESC_TABLE, insn_sldt, INSN_CPU1, 47, 0, 0, 0},
893 	{"DESC_TABLE (LLDT)", CPU_DESC_TABLE, insn_lldt, INSN_CPU1, 47, 0, 0, 0},
894 	{"DESC_TABLE (STR)", CPU_DESC_TABLE, insn_str, INSN_CPU1, 47, 0, 0, 0},
895 	/* LTR causes a #GP if done with a busy selector, so it is not tested.  */
896 	{"RDRAND", CPU_RDRAND, insn_rdrand, INSN_CPU1, VMX_RDRAND, 0, 0, 0},
897 	{"RDSEED", CPU_RDSEED, insn_rdseed, INSN_CPU1, VMX_RDSEED, 0, 0, 0},
898 	// Instructions always trap
899 	{"CPUID", 0, insn_cpuid, INSN_ALWAYS_TRAP, 10, 0, 0, 0},
900 	{"INVD", 0, insn_invd, INSN_ALWAYS_TRAP, 13, 0, 0, 0},
901 	// Instructions never trap
902 	{NULL},
903 };
904 
905 static int insn_intercept_init(struct vmcs *vmcs)
906 {
907 	u32 ctrl_cpu;
908 
909 	ctrl_cpu = ctrl_cpu_rev[0].set | CPU_SECONDARY;
910 	ctrl_cpu &= ctrl_cpu_rev[0].clr;
911 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu);
912 	vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu_rev[1].set);
913 	cr3 = read_cr3();
914 	return VMX_TEST_START;
915 }
916 
917 static void insn_intercept_main(void)
918 {
919 	for (cur_insn = 0; insn_table[cur_insn].name != NULL; cur_insn++) {
920 		vmx_set_test_stage(cur_insn * 2);
921 		if ((insn_table[cur_insn].type == INSN_CPU0 &&
922 		     !(ctrl_cpu_rev[0].clr & insn_table[cur_insn].flag)) ||
923 		    (insn_table[cur_insn].type == INSN_CPU1 &&
924 		     !(ctrl_cpu_rev[1].clr & insn_table[cur_insn].flag))) {
925 			printf("\tCPU_CTRL%d.CPU_%s is not supported.\n",
926 			       insn_table[cur_insn].type - INSN_CPU0,
927 			       insn_table[cur_insn].name);
928 			continue;
929 		}
930 
931 		if ((insn_table[cur_insn].type == INSN_CPU0 &&
932 		     !(ctrl_cpu_rev[0].set & insn_table[cur_insn].flag)) ||
933 		    (insn_table[cur_insn].type == INSN_CPU1 &&
934 		     !(ctrl_cpu_rev[1].set & insn_table[cur_insn].flag))) {
935 			/* skip hlt, it stalls the guest and is tested below */
936 			if (insn_table[cur_insn].insn_func != insn_hlt)
937 				insn_table[cur_insn].insn_func();
938 			report("execute %s", vmx_get_test_stage() == cur_insn * 2,
939 					insn_table[cur_insn].name);
940 		} else if (insn_table[cur_insn].type != INSN_ALWAYS_TRAP)
941 			printf("\tCPU_CTRL%d.CPU_%s always traps.\n",
942 			       insn_table[cur_insn].type - INSN_CPU0,
943 			       insn_table[cur_insn].name);
944 
945 		vmcall();
946 
947 		insn_table[cur_insn].insn_func();
948 		report("intercept %s", vmx_get_test_stage() == cur_insn * 2 + 1,
949 				insn_table[cur_insn].name);
950 
951 		vmx_set_test_stage(cur_insn * 2 + 1);
952 		vmcall();
953 	}
954 }
955 
956 static int insn_intercept_exit_handler(void)
957 {
958 	u64 guest_rip;
959 	u32 reason;
960 	ulong exit_qual;
961 	u32 insn_len;
962 	u32 insn_info;
963 	bool pass;
964 
965 	guest_rip = vmcs_read(GUEST_RIP);
966 	reason = vmcs_read(EXI_REASON) & 0xff;
967 	exit_qual = vmcs_read(EXI_QUALIFICATION);
968 	insn_len = vmcs_read(EXI_INST_LEN);
969 	insn_info = vmcs_read(EXI_INST_INFO);
970 
971 	if (reason == VMX_VMCALL) {
972 		u32 val = 0;
973 
974 		if (insn_table[cur_insn].type == INSN_CPU0)
975 			val = vmcs_read(CPU_EXEC_CTRL0);
976 		else if (insn_table[cur_insn].type == INSN_CPU1)
977 			val = vmcs_read(CPU_EXEC_CTRL1);
978 
979 		if (vmx_get_test_stage() & 1)
980 			val &= ~insn_table[cur_insn].flag;
981 		else
982 			val |= insn_table[cur_insn].flag;
983 
984 		if (insn_table[cur_insn].type == INSN_CPU0)
985 			vmcs_write(CPU_EXEC_CTRL0, val | ctrl_cpu_rev[0].set);
986 		else if (insn_table[cur_insn].type == INSN_CPU1)
987 			vmcs_write(CPU_EXEC_CTRL1, val | ctrl_cpu_rev[1].set);
988 	} else {
989 		pass = (cur_insn * 2 == vmx_get_test_stage()) &&
990 			insn_table[cur_insn].reason == reason;
991 		if (insn_table[cur_insn].test_field & FIELD_EXIT_QUAL &&
992 		    insn_table[cur_insn].exit_qual != exit_qual)
993 			pass = false;
994 		if (insn_table[cur_insn].test_field & FIELD_INSN_INFO &&
995 		    insn_table[cur_insn].insn_info != insn_info)
996 			pass = false;
997 		if (pass)
998 			vmx_inc_test_stage();
999 	}
1000 	vmcs_write(GUEST_RIP, guest_rip + insn_len);
1001 	return VMX_TEST_RESUME;
1002 }
1003 
1004 /**
1005  * __setup_ept - Setup the VMCS fields to enable Extended Page Tables (EPT)
1006  * @hpa:	Host physical address of the top-level, a.k.a. root, EPT table
1007  * @enable_ad:	Whether or not to enable Access/Dirty bits for EPT entries
1008  *
1009  * Returns 0 on success, 1 on failure.
1010  *
1011  * Note that @hpa doesn't need to point at actual memory if VM-Launch is
1012  * expected to fail, e.g. setup_dummy_ept() arbitrarily passes '0' to satisfy
1013  * the various EPTP consistency checks, but doesn't ensure backing for HPA '0'.
1014  */
1015 static int __setup_ept(u64 hpa, bool enable_ad)
1016 {
1017 	if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY) ||
1018 	    !(ctrl_cpu_rev[1].clr & CPU_EPT)) {
1019 		printf("\tEPT is not supported");
1020 		return 1;
1021 	}
1022 	if (!(ept_vpid.val & EPT_CAP_WB)) {
1023 		printf("WB memtype for EPT walks not supported\n");
1024 		return 1;
1025 	}
1026 	if (!(ept_vpid.val & EPT_CAP_PWL4)) {
1027 		printf("\tPWL4 is not supported\n");
1028 		return 1;
1029 	}
1030 
1031 	eptp = EPT_MEM_TYPE_WB;
1032 	eptp |= (3 << EPTP_PG_WALK_LEN_SHIFT);
1033 	eptp |= hpa;
1034 	if (enable_ad)
1035 		eptp |= EPTP_AD_FLAG;
1036 
1037 	vmcs_write(EPTP, eptp);
1038 	vmcs_write(CPU_EXEC_CTRL0, vmcs_read(CPU_EXEC_CTRL0)| CPU_SECONDARY);
1039 	vmcs_write(CPU_EXEC_CTRL1, vmcs_read(CPU_EXEC_CTRL1)| CPU_EPT);
1040 
1041 	return 0;
1042 }
1043 
1044 /**
1045  * setup_ept - Enable Extended Page Tables (EPT) and setup an identity map
1046  * @enable_ad:	Whether or not to enable Access/Dirty bits for EPT entries
1047  *
1048  * Returns 0 on success, 1 on failure.
1049  *
1050  * This is the "real" function for setting up EPT tables, i.e. use this for
1051  * tests that need to run code in the guest with EPT enabled.
1052  */
1053 static int setup_ept(bool enable_ad)
1054 {
1055 	unsigned long end_of_memory;
1056 
1057 	pml4 = alloc_page();
1058 
1059 	if (__setup_ept(virt_to_phys(pml4), enable_ad))
1060 		return 1;
1061 
1062 	end_of_memory = fwcfg_get_u64(FW_CFG_RAM_SIZE);
1063 	if (end_of_memory < (1ul << 32))
1064 		end_of_memory = (1ul << 32);
1065 	/* Cannot use large EPT pages if we need to track EPT
1066 	 * accessed/dirty bits at 4K granularity.
1067 	 */
1068 	setup_ept_range(pml4, 0, end_of_memory, 0,
1069 			!enable_ad && ept_2m_supported(),
1070 			EPT_WA | EPT_RA | EPT_EA);
1071 	return 0;
1072 }
1073 
1074 /**
1075  * setup_dummy_ept - Enable Extended Page Tables (EPT) with a dummy root HPA
1076  *
1077  * Setup EPT using a semi-arbitrary dummy root HPA.  This function is intended
1078  * for use by tests that need EPT enabled to verify dependent VMCS controls
1079  * but never expect to fully enter the guest, i.e. don't need setup the actual
1080  * EPT tables.
1081  */
1082 static void setup_dummy_ept(void)
1083 {
1084 	if (__setup_ept(0, false))
1085 		report_abort("EPT setup unexpectedly failed");
1086 }
1087 
1088 static int enable_unrestricted_guest(void)
1089 {
1090 	if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY) ||
1091 	    !(ctrl_cpu_rev[1].clr & CPU_URG) ||
1092 	    !(ctrl_cpu_rev[1].clr & CPU_EPT))
1093 		return 1;
1094 
1095 	setup_dummy_ept();
1096 
1097 	vmcs_write(CPU_EXEC_CTRL0, vmcs_read(CPU_EXEC_CTRL0) | CPU_SECONDARY);
1098 	vmcs_write(CPU_EXEC_CTRL1, vmcs_read(CPU_EXEC_CTRL1) | CPU_URG);
1099 
1100 	return 0;
1101 }
1102 
1103 static void ept_enable_ad_bits(void)
1104 {
1105 	eptp |= EPTP_AD_FLAG;
1106 	vmcs_write(EPTP, eptp);
1107 }
1108 
1109 static void ept_disable_ad_bits(void)
1110 {
1111 	eptp &= ~EPTP_AD_FLAG;
1112 	vmcs_write(EPTP, eptp);
1113 }
1114 
1115 static void ept_enable_ad_bits_or_skip_test(void)
1116 {
1117 	if (!ept_ad_bits_supported())
1118 		test_skip("EPT AD bits not supported.");
1119 	ept_enable_ad_bits();
1120 }
1121 
1122 static int apic_version;
1123 
1124 static int ept_init_common(bool have_ad)
1125 {
1126 	int ret;
1127 	struct pci_dev pcidev;
1128 
1129 	if (setup_ept(have_ad))
1130 		return VMX_TEST_EXIT;
1131 	data_page1 = alloc_page();
1132 	data_page2 = alloc_page();
1133 	*((u32 *)data_page1) = MAGIC_VAL_1;
1134 	*((u32 *)data_page2) = MAGIC_VAL_2;
1135 	install_ept(pml4, (unsigned long)data_page1, (unsigned long)data_page2,
1136 			EPT_RA | EPT_WA | EPT_EA);
1137 
1138 	apic_version = apic_read(APIC_LVR);
1139 
1140 	ret = pci_find_dev(PCI_VENDOR_ID_REDHAT, PCI_DEVICE_ID_REDHAT_TEST);
1141 	if (ret != PCIDEVADDR_INVALID) {
1142 		pci_dev_init(&pcidev, ret);
1143 		pci_physaddr = pcidev.resource[PCI_TESTDEV_BAR_MEM];
1144 	}
1145 
1146 	return VMX_TEST_START;
1147 }
1148 
1149 static int ept_init(struct vmcs *vmcs)
1150 {
1151 	return ept_init_common(false);
1152 }
1153 
1154 static void ept_common(void)
1155 {
1156 	vmx_set_test_stage(0);
1157 	if (*((u32 *)data_page2) != MAGIC_VAL_1 ||
1158 			*((u32 *)data_page1) != MAGIC_VAL_1)
1159 		report("EPT basic framework - read", 0);
1160 	else {
1161 		*((u32 *)data_page2) = MAGIC_VAL_3;
1162 		vmcall();
1163 		if (vmx_get_test_stage() == 1) {
1164 			if (*((u32 *)data_page1) == MAGIC_VAL_3 &&
1165 					*((u32 *)data_page2) == MAGIC_VAL_2)
1166 				report("EPT basic framework", 1);
1167 			else
1168 				report("EPT basic framework - remap", 1);
1169 		}
1170 	}
1171 	// Test EPT Misconfigurations
1172 	vmx_set_test_stage(1);
1173 	vmcall();
1174 	*((u32 *)data_page1) = MAGIC_VAL_1;
1175 	if (vmx_get_test_stage() != 2) {
1176 		report("EPT misconfigurations", 0);
1177 		goto t1;
1178 	}
1179 	vmx_set_test_stage(2);
1180 	vmcall();
1181 	*((u32 *)data_page1) = MAGIC_VAL_1;
1182 	report("EPT misconfigurations", vmx_get_test_stage() == 3);
1183 t1:
1184 	// Test EPT violation
1185 	vmx_set_test_stage(3);
1186 	vmcall();
1187 	*((u32 *)data_page1) = MAGIC_VAL_1;
1188 	report("EPT violation - page permission", vmx_get_test_stage() == 4);
1189 	// Violation caused by EPT paging structure
1190 	vmx_set_test_stage(4);
1191 	vmcall();
1192 	*((u32 *)data_page1) = MAGIC_VAL_2;
1193 	report("EPT violation - paging structure", vmx_get_test_stage() == 5);
1194 
1195 	// MMIO Read/Write
1196 	vmx_set_test_stage(5);
1197 	vmcall();
1198 
1199 	*(u32 volatile *)pci_physaddr;
1200 	report("MMIO EPT violation - read", vmx_get_test_stage() == 6);
1201 
1202 	*(u32 volatile *)pci_physaddr = MAGIC_VAL_1;
1203 	report("MMIO EPT violation - write", vmx_get_test_stage() == 7);
1204 }
1205 
1206 static void ept_main(void)
1207 {
1208 	ept_common();
1209 
1210 	// Test EPT access to L1 MMIO
1211 	vmx_set_test_stage(7);
1212 	report("EPT - MMIO access", *((u32 *)0xfee00030UL) == apic_version);
1213 
1214 	// Test invalid operand for INVEPT
1215 	vmcall();
1216 	report("EPT - unsupported INVEPT", vmx_get_test_stage() == 8);
1217 }
1218 
1219 static bool invept_test(int type, u64 eptp)
1220 {
1221 	bool ret, supported;
1222 
1223 	supported = ept_vpid.val & (EPT_CAP_INVEPT_SINGLE >> INVEPT_SINGLE << type);
1224 	ret = invept(type, eptp);
1225 
1226 	if (ret == !supported)
1227 		return false;
1228 
1229 	if (!supported)
1230 		printf("WARNING: unsupported invept passed!\n");
1231 	else
1232 		printf("WARNING: invept failed!\n");
1233 
1234 	return true;
1235 }
1236 
1237 static int pml_exit_handler(void)
1238 {
1239 	u16 index, count;
1240 	ulong reason = vmcs_read(EXI_REASON) & 0xff;
1241 	u64 *pmlbuf = pml_log;
1242 	u64 guest_rip = vmcs_read(GUEST_RIP);;
1243 	u64 guest_cr3 = vmcs_read(GUEST_CR3);
1244 	u32 insn_len = vmcs_read(EXI_INST_LEN);
1245 
1246 	switch (reason) {
1247 	case VMX_VMCALL:
1248 		switch (vmx_get_test_stage()) {
1249 		case 0:
1250 			index = vmcs_read(GUEST_PML_INDEX);
1251 			for (count = index + 1; count < PML_INDEX; count++) {
1252 				if (pmlbuf[count] == (u64)data_page2) {
1253 					vmx_inc_test_stage();
1254 					clear_ept_ad(pml4, guest_cr3, (unsigned long)data_page2);
1255 					break;
1256 				}
1257 			}
1258 			break;
1259 		case 1:
1260 			index = vmcs_read(GUEST_PML_INDEX);
1261 			/* Keep clearing the dirty bit till a overflow */
1262 			clear_ept_ad(pml4, guest_cr3, (unsigned long)data_page2);
1263 			break;
1264 		default:
1265 			report("unexpected stage, %d.", false,
1266 			       vmx_get_test_stage());
1267 			print_vmexit_info();
1268 			return VMX_TEST_VMEXIT;
1269 		}
1270 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
1271 		return VMX_TEST_RESUME;
1272 	case VMX_PML_FULL:
1273 		vmx_inc_test_stage();
1274 		vmcs_write(GUEST_PML_INDEX, PML_INDEX - 1);
1275 		return VMX_TEST_RESUME;
1276 	default:
1277 		report("Unknown exit reason, %ld", false, reason);
1278 		print_vmexit_info();
1279 	}
1280 	return VMX_TEST_VMEXIT;
1281 }
1282 
1283 static int ept_exit_handler_common(bool have_ad)
1284 {
1285 	u64 guest_rip;
1286 	u64 guest_cr3;
1287 	ulong reason;
1288 	u32 insn_len;
1289 	u32 exit_qual;
1290 	static unsigned long data_page1_pte, data_page1_pte_pte, memaddr_pte;
1291 
1292 	guest_rip = vmcs_read(GUEST_RIP);
1293 	guest_cr3 = vmcs_read(GUEST_CR3);
1294 	reason = vmcs_read(EXI_REASON) & 0xff;
1295 	insn_len = vmcs_read(EXI_INST_LEN);
1296 	exit_qual = vmcs_read(EXI_QUALIFICATION);
1297 	switch (reason) {
1298 	case VMX_VMCALL:
1299 		switch (vmx_get_test_stage()) {
1300 		case 0:
1301 			check_ept_ad(pml4, guest_cr3,
1302 				     (unsigned long)data_page1,
1303 				     have_ad ? EPT_ACCESS_FLAG : 0,
1304 				     have_ad ? EPT_ACCESS_FLAG | EPT_DIRTY_FLAG : 0);
1305 			check_ept_ad(pml4, guest_cr3,
1306 				     (unsigned long)data_page2,
1307 				     have_ad ? EPT_ACCESS_FLAG | EPT_DIRTY_FLAG : 0,
1308 				     have_ad ? EPT_ACCESS_FLAG | EPT_DIRTY_FLAG : 0);
1309 			clear_ept_ad(pml4, guest_cr3, (unsigned long)data_page1);
1310 			clear_ept_ad(pml4, guest_cr3, (unsigned long)data_page2);
1311 			if (have_ad)
1312 				ept_sync(INVEPT_SINGLE, eptp);;
1313 			if (*((u32 *)data_page1) == MAGIC_VAL_3 &&
1314 					*((u32 *)data_page2) == MAGIC_VAL_2) {
1315 				vmx_inc_test_stage();
1316 				install_ept(pml4, (unsigned long)data_page2,
1317 						(unsigned long)data_page2,
1318 						EPT_RA | EPT_WA | EPT_EA);
1319 			} else
1320 				report("EPT basic framework - write", 0);
1321 			break;
1322 		case 1:
1323 			install_ept(pml4, (unsigned long)data_page1,
1324  				(unsigned long)data_page1, EPT_WA);
1325 			ept_sync(INVEPT_SINGLE, eptp);
1326 			break;
1327 		case 2:
1328 			install_ept(pml4, (unsigned long)data_page1,
1329  				(unsigned long)data_page1,
1330  				EPT_RA | EPT_WA | EPT_EA |
1331  				(2 << EPT_MEM_TYPE_SHIFT));
1332 			ept_sync(INVEPT_SINGLE, eptp);
1333 			break;
1334 		case 3:
1335 			clear_ept_ad(pml4, guest_cr3, (unsigned long)data_page1);
1336 			TEST_ASSERT(get_ept_pte(pml4, (unsigned long)data_page1,
1337 						1, &data_page1_pte));
1338 			set_ept_pte(pml4, (unsigned long)data_page1,
1339 				1, data_page1_pte & ~EPT_PRESENT);
1340 			ept_sync(INVEPT_SINGLE, eptp);
1341 			break;
1342 		case 4:
1343 			TEST_ASSERT(get_ept_pte(pml4, (unsigned long)data_page1,
1344 						2, &data_page1_pte));
1345 			data_page1_pte &= PAGE_MASK;
1346 			TEST_ASSERT(get_ept_pte(pml4, data_page1_pte,
1347 						2, &data_page1_pte_pte));
1348 			set_ept_pte(pml4, data_page1_pte, 2,
1349 				data_page1_pte_pte & ~EPT_PRESENT);
1350 			ept_sync(INVEPT_SINGLE, eptp);
1351 			break;
1352 		case 5:
1353 			install_ept(pml4, (unsigned long)pci_physaddr,
1354 				(unsigned long)pci_physaddr, 0);
1355 			ept_sync(INVEPT_SINGLE, eptp);
1356 			break;
1357 		case 7:
1358 			if (!invept_test(0, eptp))
1359 				vmx_inc_test_stage();
1360 			break;
1361 		// Should not reach here
1362 		default:
1363 			report("ERROR - unexpected stage, %d.", false,
1364 			       vmx_get_test_stage());
1365 			print_vmexit_info();
1366 			return VMX_TEST_VMEXIT;
1367 		}
1368 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
1369 		return VMX_TEST_RESUME;
1370 	case VMX_EPT_MISCONFIG:
1371 		switch (vmx_get_test_stage()) {
1372 		case 1:
1373 		case 2:
1374 			vmx_inc_test_stage();
1375 			install_ept(pml4, (unsigned long)data_page1,
1376  				(unsigned long)data_page1,
1377  				EPT_RA | EPT_WA | EPT_EA);
1378 			ept_sync(INVEPT_SINGLE, eptp);
1379 			break;
1380 		// Should not reach here
1381 		default:
1382 			report("ERROR - unexpected stage, %d.", false,
1383 			       vmx_get_test_stage());
1384 			print_vmexit_info();
1385 			return VMX_TEST_VMEXIT;
1386 		}
1387 		return VMX_TEST_RESUME;
1388 	case VMX_EPT_VIOLATION:
1389 		switch(vmx_get_test_stage()) {
1390 		case 3:
1391 			check_ept_ad(pml4, guest_cr3, (unsigned long)data_page1, 0,
1392 				     have_ad ? EPT_ACCESS_FLAG | EPT_DIRTY_FLAG : 0);
1393 			clear_ept_ad(pml4, guest_cr3, (unsigned long)data_page1);
1394 			if (exit_qual == (EPT_VLT_WR | EPT_VLT_LADDR_VLD |
1395 					EPT_VLT_PADDR))
1396 				vmx_inc_test_stage();
1397 			set_ept_pte(pml4, (unsigned long)data_page1,
1398 				1, data_page1_pte | (EPT_PRESENT));
1399 			ept_sync(INVEPT_SINGLE, eptp);
1400 			break;
1401 		case 4:
1402 			check_ept_ad(pml4, guest_cr3, (unsigned long)data_page1, 0,
1403 				     have_ad ? EPT_ACCESS_FLAG | EPT_DIRTY_FLAG : 0);
1404 			clear_ept_ad(pml4, guest_cr3, (unsigned long)data_page1);
1405 			if (exit_qual == (EPT_VLT_RD |
1406 					  (have_ad ? EPT_VLT_WR : 0) |
1407 					  EPT_VLT_LADDR_VLD))
1408 				vmx_inc_test_stage();
1409 			set_ept_pte(pml4, data_page1_pte, 2,
1410 				data_page1_pte_pte | (EPT_PRESENT));
1411 			ept_sync(INVEPT_SINGLE, eptp);
1412 			break;
1413 		case 5:
1414 			if (exit_qual & EPT_VLT_RD)
1415 				vmx_inc_test_stage();
1416 			TEST_ASSERT(get_ept_pte(pml4, (unsigned long)pci_physaddr,
1417 						1, &memaddr_pte));
1418 			set_ept_pte(pml4, memaddr_pte, 1, memaddr_pte | EPT_RA);
1419 			ept_sync(INVEPT_SINGLE, eptp);
1420 			break;
1421 		case 6:
1422 			if (exit_qual & EPT_VLT_WR)
1423 				vmx_inc_test_stage();
1424 			TEST_ASSERT(get_ept_pte(pml4, (unsigned long)pci_physaddr,
1425 						1, &memaddr_pte));
1426 			set_ept_pte(pml4, memaddr_pte, 1, memaddr_pte | EPT_RA | EPT_WA);
1427 			ept_sync(INVEPT_SINGLE, eptp);
1428 			break;
1429 		default:
1430 			// Should not reach here
1431 			report("ERROR : unexpected stage, %d", false,
1432 			       vmx_get_test_stage());
1433 			print_vmexit_info();
1434 			return VMX_TEST_VMEXIT;
1435 		}
1436 		return VMX_TEST_RESUME;
1437 	default:
1438 		report("Unknown exit reason, %ld", false, reason);
1439 		print_vmexit_info();
1440 	}
1441 	return VMX_TEST_VMEXIT;
1442 }
1443 
1444 static int ept_exit_handler(void)
1445 {
1446 	return ept_exit_handler_common(false);
1447 }
1448 
1449 static int eptad_init(struct vmcs *vmcs)
1450 {
1451 	int r = ept_init_common(true);
1452 
1453 	if (r == VMX_TEST_EXIT)
1454 		return r;
1455 
1456 	if ((rdmsr(MSR_IA32_VMX_EPT_VPID_CAP) & EPT_CAP_AD_FLAG) == 0) {
1457 		printf("\tEPT A/D bits are not supported");
1458 		return VMX_TEST_EXIT;
1459 	}
1460 
1461 	return r;
1462 }
1463 
1464 static int pml_init(struct vmcs *vmcs)
1465 {
1466 	u32 ctrl_cpu;
1467 	int r = eptad_init(vmcs);
1468 
1469 	if (r == VMX_TEST_EXIT)
1470 		return r;
1471 
1472 	if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY) ||
1473 		!(ctrl_cpu_rev[1].clr & CPU_PML)) {
1474 		printf("\tPML is not supported");
1475 		return VMX_TEST_EXIT;
1476 	}
1477 
1478 	pml_log = alloc_page();
1479 	vmcs_write(PMLADDR, (u64)pml_log);
1480 	vmcs_write(GUEST_PML_INDEX, PML_INDEX - 1);
1481 
1482 	ctrl_cpu = vmcs_read(CPU_EXEC_CTRL1) | CPU_PML;
1483 	vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu);
1484 
1485 	return VMX_TEST_START;
1486 }
1487 
1488 static void pml_main(void)
1489 {
1490 	int count = 0;
1491 
1492 	vmx_set_test_stage(0);
1493 	*((u32 *)data_page2) = 0x1;
1494 	vmcall();
1495 	report("PML - Dirty GPA Logging", vmx_get_test_stage() == 1);
1496 
1497 	while (vmx_get_test_stage() == 1) {
1498 		vmcall();
1499 		*((u32 *)data_page2) = 0x1;
1500 		if (count++ > PML_INDEX)
1501 			break;
1502 	}
1503 	report("PML Full Event", vmx_get_test_stage() == 2);
1504 }
1505 
1506 static void eptad_main(void)
1507 {
1508 	ept_common();
1509 }
1510 
1511 static int eptad_exit_handler(void)
1512 {
1513 	return ept_exit_handler_common(true);
1514 }
1515 
1516 static bool invvpid_test(int type, u16 vpid)
1517 {
1518 	bool ret, supported;
1519 
1520 	supported = ept_vpid.val &
1521 		(VPID_CAP_INVVPID_ADDR >> INVVPID_ADDR << type);
1522 	ret = invvpid(type, vpid, 0);
1523 
1524 	if (ret == !supported)
1525 		return false;
1526 
1527 	if (!supported)
1528 		printf("WARNING: unsupported invvpid passed!\n");
1529 	else
1530 		printf("WARNING: invvpid failed!\n");
1531 
1532 	return true;
1533 }
1534 
1535 static int vpid_init(struct vmcs *vmcs)
1536 {
1537 	u32 ctrl_cpu1;
1538 
1539 	if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY) ||
1540 		!(ctrl_cpu_rev[1].clr & CPU_VPID)) {
1541 		printf("\tVPID is not supported");
1542 		return VMX_TEST_EXIT;
1543 	}
1544 
1545 	ctrl_cpu1 = vmcs_read(CPU_EXEC_CTRL1);
1546 	ctrl_cpu1 |= CPU_VPID;
1547 	vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu1);
1548 	return VMX_TEST_START;
1549 }
1550 
1551 static void vpid_main(void)
1552 {
1553 	vmx_set_test_stage(0);
1554 	vmcall();
1555 	report("INVVPID SINGLE ADDRESS", vmx_get_test_stage() == 1);
1556 	vmx_set_test_stage(2);
1557 	vmcall();
1558 	report("INVVPID SINGLE", vmx_get_test_stage() == 3);
1559 	vmx_set_test_stage(4);
1560 	vmcall();
1561 	report("INVVPID ALL", vmx_get_test_stage() == 5);
1562 }
1563 
1564 static int vpid_exit_handler(void)
1565 {
1566 	u64 guest_rip;
1567 	ulong reason;
1568 	u32 insn_len;
1569 
1570 	guest_rip = vmcs_read(GUEST_RIP);
1571 	reason = vmcs_read(EXI_REASON) & 0xff;
1572 	insn_len = vmcs_read(EXI_INST_LEN);
1573 
1574 	switch (reason) {
1575 	case VMX_VMCALL:
1576 		switch(vmx_get_test_stage()) {
1577 		case 0:
1578 			if (!invvpid_test(INVVPID_ADDR, 1))
1579 				vmx_inc_test_stage();
1580 			break;
1581 		case 2:
1582 			if (!invvpid_test(INVVPID_CONTEXT_GLOBAL, 1))
1583 				vmx_inc_test_stage();
1584 			break;
1585 		case 4:
1586 			if (!invvpid_test(INVVPID_ALL, 1))
1587 				vmx_inc_test_stage();
1588 			break;
1589 		default:
1590 			report("ERROR: unexpected stage, %d", false,
1591 					vmx_get_test_stage());
1592 			print_vmexit_info();
1593 			return VMX_TEST_VMEXIT;
1594 		}
1595 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
1596 		return VMX_TEST_RESUME;
1597 	default:
1598 		report("Unknown exit reason, %ld", false, reason);
1599 		print_vmexit_info();
1600 	}
1601 	return VMX_TEST_VMEXIT;
1602 }
1603 
1604 #define TIMER_VECTOR	222
1605 
1606 static volatile bool timer_fired;
1607 
1608 static void timer_isr(isr_regs_t *regs)
1609 {
1610 	timer_fired = true;
1611 	apic_write(APIC_EOI, 0);
1612 }
1613 
1614 static int interrupt_init(struct vmcs *vmcs)
1615 {
1616 	msr_bmp_init();
1617 	vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) & ~PIN_EXTINT);
1618 	handle_irq(TIMER_VECTOR, timer_isr);
1619 	return VMX_TEST_START;
1620 }
1621 
1622 static void interrupt_main(void)
1623 {
1624 	long long start, loops;
1625 
1626 	vmx_set_test_stage(0);
1627 
1628 	apic_write(APIC_LVTT, TIMER_VECTOR);
1629 	irq_enable();
1630 
1631 	apic_write(APIC_TMICT, 1);
1632 	for (loops = 0; loops < 10000000 && !timer_fired; loops++)
1633 		asm volatile ("nop");
1634 	report("direct interrupt while running guest", timer_fired);
1635 
1636 	apic_write(APIC_TMICT, 0);
1637 	irq_disable();
1638 	vmcall();
1639 	timer_fired = false;
1640 	apic_write(APIC_TMICT, 1);
1641 	for (loops = 0; loops < 10000000 && !timer_fired; loops++)
1642 		asm volatile ("nop");
1643 	report("intercepted interrupt while running guest", timer_fired);
1644 
1645 	irq_enable();
1646 	apic_write(APIC_TMICT, 0);
1647 	irq_disable();
1648 	vmcall();
1649 	timer_fired = false;
1650 	start = rdtsc();
1651 	apic_write(APIC_TMICT, 1000000);
1652 
1653 	asm volatile ("sti; hlt");
1654 
1655 	report("direct interrupt + hlt",
1656 	       rdtsc() - start > 1000000 && timer_fired);
1657 
1658 	apic_write(APIC_TMICT, 0);
1659 	irq_disable();
1660 	vmcall();
1661 	timer_fired = false;
1662 	start = rdtsc();
1663 	apic_write(APIC_TMICT, 1000000);
1664 
1665 	asm volatile ("sti; hlt");
1666 
1667 	report("intercepted interrupt + hlt",
1668 	       rdtsc() - start > 10000 && timer_fired);
1669 
1670 	apic_write(APIC_TMICT, 0);
1671 	irq_disable();
1672 	vmcall();
1673 	timer_fired = false;
1674 	start = rdtsc();
1675 	apic_write(APIC_TMICT, 1000000);
1676 
1677 	irq_enable();
1678 	asm volatile ("nop");
1679 	vmcall();
1680 
1681 	report("direct interrupt + activity state hlt",
1682 	       rdtsc() - start > 10000 && timer_fired);
1683 
1684 	apic_write(APIC_TMICT, 0);
1685 	irq_disable();
1686 	vmcall();
1687 	timer_fired = false;
1688 	start = rdtsc();
1689 	apic_write(APIC_TMICT, 1000000);
1690 
1691 	irq_enable();
1692 	asm volatile ("nop");
1693 	vmcall();
1694 
1695 	report("intercepted interrupt + activity state hlt",
1696 	       rdtsc() - start > 10000 && timer_fired);
1697 
1698 	apic_write(APIC_TMICT, 0);
1699 	irq_disable();
1700 	vmx_set_test_stage(7);
1701 	vmcall();
1702 	timer_fired = false;
1703 	apic_write(APIC_TMICT, 1);
1704 	for (loops = 0; loops < 10000000 && !timer_fired; loops++)
1705 		asm volatile ("nop");
1706 	report("running a guest with interrupt acknowledgement set", timer_fired);
1707 
1708 	apic_write(APIC_TMICT, 0);
1709 	irq_enable();
1710 	timer_fired = false;
1711 	vmcall();
1712 	report("Inject an event to a halted guest", timer_fired);
1713 }
1714 
1715 static int interrupt_exit_handler(void)
1716 {
1717 	u64 guest_rip = vmcs_read(GUEST_RIP);
1718 	ulong reason = vmcs_read(EXI_REASON) & 0xff;
1719 	u32 insn_len = vmcs_read(EXI_INST_LEN);
1720 
1721 	switch (reason) {
1722 	case VMX_VMCALL:
1723 		switch (vmx_get_test_stage()) {
1724 		case 0:
1725 		case 2:
1726 		case 5:
1727 			vmcs_write(PIN_CONTROLS,
1728 				   vmcs_read(PIN_CONTROLS) | PIN_EXTINT);
1729 			break;
1730 		case 7:
1731 			vmcs_write(EXI_CONTROLS, vmcs_read(EXI_CONTROLS) | EXI_INTA);
1732 			vmcs_write(PIN_CONTROLS,
1733 				   vmcs_read(PIN_CONTROLS) | PIN_EXTINT);
1734 			break;
1735 		case 1:
1736 		case 3:
1737 			vmcs_write(PIN_CONTROLS,
1738 				   vmcs_read(PIN_CONTROLS) & ~PIN_EXTINT);
1739 			break;
1740 		case 4:
1741 		case 6:
1742 			vmcs_write(GUEST_ACTV_STATE, ACTV_HLT);
1743 			break;
1744 
1745 		case 8:
1746 			vmcs_write(GUEST_ACTV_STATE, ACTV_HLT);
1747 			vmcs_write(ENT_INTR_INFO,
1748 				   TIMER_VECTOR |
1749 				   (VMX_INTR_TYPE_EXT_INTR << INTR_INFO_INTR_TYPE_SHIFT) |
1750 				   INTR_INFO_VALID_MASK);
1751 			break;
1752 		}
1753 		vmx_inc_test_stage();
1754 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
1755 		return VMX_TEST_RESUME;
1756 	case VMX_EXTINT:
1757 		if (vmcs_read(EXI_CONTROLS) & EXI_INTA) {
1758 			int vector = vmcs_read(EXI_INTR_INFO) & 0xff;
1759 			handle_external_interrupt(vector);
1760 		} else {
1761 			irq_enable();
1762 			asm volatile ("nop");
1763 			irq_disable();
1764 		}
1765 		if (vmx_get_test_stage() >= 2)
1766 			vmcs_write(GUEST_ACTV_STATE, ACTV_ACTIVE);
1767 		return VMX_TEST_RESUME;
1768 	default:
1769 		report("Unknown exit reason, %ld", false, reason);
1770 		print_vmexit_info();
1771 	}
1772 
1773 	return VMX_TEST_VMEXIT;
1774 }
1775 
1776 static int dbgctls_init(struct vmcs *vmcs)
1777 {
1778 	u64 dr7 = 0x402;
1779 	u64 zero = 0;
1780 
1781 	msr_bmp_init();
1782 	asm volatile(
1783 		"mov %0,%%dr0\n\t"
1784 		"mov %0,%%dr1\n\t"
1785 		"mov %0,%%dr2\n\t"
1786 		"mov %1,%%dr7\n\t"
1787 		: : "r" (zero), "r" (dr7));
1788 	wrmsr(MSR_IA32_DEBUGCTLMSR, 0x1);
1789 	vmcs_write(GUEST_DR7, 0x404);
1790 	vmcs_write(GUEST_DEBUGCTL, 0x2);
1791 
1792 	vmcs_write(ENT_CONTROLS, vmcs_read(ENT_CONTROLS) | ENT_LOAD_DBGCTLS);
1793 	vmcs_write(EXI_CONTROLS, vmcs_read(EXI_CONTROLS) | EXI_SAVE_DBGCTLS);
1794 
1795 	return VMX_TEST_START;
1796 }
1797 
1798 static void dbgctls_main(void)
1799 {
1800 	u64 dr7, debugctl;
1801 
1802 	asm volatile("mov %%dr7,%0" : "=r" (dr7));
1803 	debugctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
1804 	/* Commented out: KVM does not support DEBUGCTL so far */
1805 	(void)debugctl;
1806 	report("Load debug controls", dr7 == 0x404 /* && debugctl == 0x2 */);
1807 
1808 	dr7 = 0x408;
1809 	asm volatile("mov %0,%%dr7" : : "r" (dr7));
1810 	wrmsr(MSR_IA32_DEBUGCTLMSR, 0x3);
1811 
1812 	vmx_set_test_stage(0);
1813 	vmcall();
1814 	report("Save debug controls", vmx_get_test_stage() == 1);
1815 
1816 	if (ctrl_enter_rev.set & ENT_LOAD_DBGCTLS ||
1817 	    ctrl_exit_rev.set & EXI_SAVE_DBGCTLS) {
1818 		printf("\tDebug controls are always loaded/saved\n");
1819 		return;
1820 	}
1821 	vmx_set_test_stage(2);
1822 	vmcall();
1823 
1824 	asm volatile("mov %%dr7,%0" : "=r" (dr7));
1825 	debugctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
1826 	/* Commented out: KVM does not support DEBUGCTL so far */
1827 	(void)debugctl;
1828 	report("Guest=host debug controls", dr7 == 0x402 /* && debugctl == 0x1 */);
1829 
1830 	dr7 = 0x408;
1831 	asm volatile("mov %0,%%dr7" : : "r" (dr7));
1832 	wrmsr(MSR_IA32_DEBUGCTLMSR, 0x3);
1833 
1834 	vmx_set_test_stage(3);
1835 	vmcall();
1836 	report("Don't save debug controls", vmx_get_test_stage() == 4);
1837 }
1838 
1839 static int dbgctls_exit_handler(void)
1840 {
1841 	unsigned int reason = vmcs_read(EXI_REASON) & 0xff;
1842 	u32 insn_len = vmcs_read(EXI_INST_LEN);
1843 	u64 guest_rip = vmcs_read(GUEST_RIP);
1844 	u64 dr7, debugctl;
1845 
1846 	asm volatile("mov %%dr7,%0" : "=r" (dr7));
1847 	debugctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
1848 
1849 	switch (reason) {
1850 	case VMX_VMCALL:
1851 		switch (vmx_get_test_stage()) {
1852 		case 0:
1853 			if (dr7 == 0x400 && debugctl == 0 &&
1854 			    vmcs_read(GUEST_DR7) == 0x408 /* &&
1855 			    Commented out: KVM does not support DEBUGCTL so far
1856 			    vmcs_read(GUEST_DEBUGCTL) == 0x3 */)
1857 				vmx_inc_test_stage();
1858 			break;
1859 		case 2:
1860 			dr7 = 0x402;
1861 			asm volatile("mov %0,%%dr7" : : "r" (dr7));
1862 			wrmsr(MSR_IA32_DEBUGCTLMSR, 0x1);
1863 			vmcs_write(GUEST_DR7, 0x404);
1864 			vmcs_write(GUEST_DEBUGCTL, 0x2);
1865 
1866 			vmcs_write(ENT_CONTROLS,
1867 				vmcs_read(ENT_CONTROLS) & ~ENT_LOAD_DBGCTLS);
1868 			vmcs_write(EXI_CONTROLS,
1869 				vmcs_read(EXI_CONTROLS) & ~EXI_SAVE_DBGCTLS);
1870 			break;
1871 		case 3:
1872 			if (dr7 == 0x400 && debugctl == 0 &&
1873 			    vmcs_read(GUEST_DR7) == 0x404 /* &&
1874 			    Commented out: KVM does not support DEBUGCTL so far
1875 			    vmcs_read(GUEST_DEBUGCTL) == 0x2 */)
1876 				vmx_inc_test_stage();
1877 			break;
1878 		}
1879 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
1880 		return VMX_TEST_RESUME;
1881 	default:
1882 		report("Unknown exit reason, %d", false, reason);
1883 		print_vmexit_info();
1884 	}
1885 	return VMX_TEST_VMEXIT;
1886 }
1887 
1888 struct vmx_msr_entry {
1889 	u32 index;
1890 	u32 reserved;
1891 	u64 value;
1892 } __attribute__((packed));
1893 
1894 #define MSR_MAGIC 0x31415926
1895 struct vmx_msr_entry *exit_msr_store, *entry_msr_load, *exit_msr_load;
1896 
1897 static int msr_switch_init(struct vmcs *vmcs)
1898 {
1899 	msr_bmp_init();
1900 	exit_msr_store = alloc_page();
1901 	exit_msr_load = alloc_page();
1902 	entry_msr_load = alloc_page();
1903 	entry_msr_load[0].index = MSR_KERNEL_GS_BASE;
1904 	entry_msr_load[0].value = MSR_MAGIC;
1905 
1906 	vmx_set_test_stage(1);
1907 	vmcs_write(ENT_MSR_LD_CNT, 1);
1908 	vmcs_write(ENTER_MSR_LD_ADDR, (u64)entry_msr_load);
1909 	vmcs_write(EXI_MSR_ST_CNT, 1);
1910 	vmcs_write(EXIT_MSR_ST_ADDR, (u64)exit_msr_store);
1911 	vmcs_write(EXI_MSR_LD_CNT, 1);
1912 	vmcs_write(EXIT_MSR_LD_ADDR, (u64)exit_msr_load);
1913 	return VMX_TEST_START;
1914 }
1915 
1916 static void msr_switch_main(void)
1917 {
1918 	if (vmx_get_test_stage() == 1) {
1919 		report("VM entry MSR load",
1920 			rdmsr(MSR_KERNEL_GS_BASE) == MSR_MAGIC);
1921 		vmx_set_test_stage(2);
1922 		wrmsr(MSR_KERNEL_GS_BASE, MSR_MAGIC + 1);
1923 		exit_msr_store[0].index = MSR_KERNEL_GS_BASE;
1924 		exit_msr_load[0].index = MSR_KERNEL_GS_BASE;
1925 		exit_msr_load[0].value = MSR_MAGIC + 2;
1926 	}
1927 	vmcall();
1928 }
1929 
1930 static int msr_switch_exit_handler(void)
1931 {
1932 	ulong reason;
1933 
1934 	reason = vmcs_read(EXI_REASON);
1935 	if (reason == VMX_VMCALL && vmx_get_test_stage() == 2) {
1936 		report("VM exit MSR store",
1937 			exit_msr_store[0].value == MSR_MAGIC + 1);
1938 		report("VM exit MSR load",
1939 			rdmsr(MSR_KERNEL_GS_BASE) == MSR_MAGIC + 2);
1940 		vmx_set_test_stage(3);
1941 		entry_msr_load[0].index = MSR_FS_BASE;
1942 		return VMX_TEST_RESUME;
1943 	}
1944 	printf("ERROR %s: unexpected stage=%u or reason=%lu\n",
1945 		__func__, vmx_get_test_stage(), reason);
1946 	return VMX_TEST_EXIT;
1947 }
1948 
1949 static int msr_switch_entry_failure(struct vmentry_failure *failure)
1950 {
1951 	ulong reason;
1952 
1953 	if (failure->early) {
1954 		printf("ERROR %s: early exit\n", __func__);
1955 		return VMX_TEST_EXIT;
1956 	}
1957 
1958 	reason = vmcs_read(EXI_REASON);
1959 	if (reason == (VMX_ENTRY_FAILURE | VMX_FAIL_MSR) &&
1960 	    vmx_get_test_stage() == 3) {
1961 		report("VM entry MSR load: try to load FS_BASE",
1962 			vmcs_read(EXI_QUALIFICATION) == 1);
1963 		return VMX_TEST_VMEXIT;
1964 	}
1965 	printf("ERROR %s: unexpected stage=%u or reason=%lu\n",
1966 		__func__, vmx_get_test_stage(), reason);
1967 	return VMX_TEST_EXIT;
1968 }
1969 
1970 static int vmmcall_init(struct vmcs *vmcs)
1971 {
1972 	vmcs_write(EXC_BITMAP, 1 << UD_VECTOR);
1973 	return VMX_TEST_START;
1974 }
1975 
1976 static void vmmcall_main(void)
1977 {
1978 	asm volatile(
1979 		"mov $0xABCD, %%rax\n\t"
1980 		"vmmcall\n\t"
1981 		::: "rax");
1982 
1983 	report("VMMCALL", 0);
1984 }
1985 
1986 static int vmmcall_exit_handler(void)
1987 {
1988 	ulong reason;
1989 
1990 	reason = vmcs_read(EXI_REASON);
1991 	switch (reason) {
1992 	case VMX_VMCALL:
1993 		printf("here\n");
1994 		report("VMMCALL triggers #UD", 0);
1995 		break;
1996 	case VMX_EXC_NMI:
1997 		report("VMMCALL triggers #UD",
1998 		       (vmcs_read(EXI_INTR_INFO) & 0xff) == UD_VECTOR);
1999 		break;
2000 	default:
2001 		report("Unknown exit reason, %ld", false, reason);
2002 		print_vmexit_info();
2003 	}
2004 
2005 	return VMX_TEST_VMEXIT;
2006 }
2007 
2008 static int disable_rdtscp_init(struct vmcs *vmcs)
2009 {
2010 	u32 ctrl_cpu1;
2011 
2012 	if (ctrl_cpu_rev[0].clr & CPU_SECONDARY) {
2013 		ctrl_cpu1 = vmcs_read(CPU_EXEC_CTRL1);
2014 		ctrl_cpu1 &= ~CPU_RDTSCP;
2015 		vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu1);
2016 	}
2017 
2018 	return VMX_TEST_START;
2019 }
2020 
2021 static void disable_rdtscp_ud_handler(struct ex_regs *regs)
2022 {
2023 	switch (vmx_get_test_stage()) {
2024 	case 0:
2025 		report("RDTSCP triggers #UD", true);
2026 		vmx_inc_test_stage();
2027 		regs->rip += 3;
2028 		break;
2029 	case 2:
2030 		report("RDPID triggers #UD", true);
2031 		vmx_inc_test_stage();
2032 		regs->rip += 4;
2033 		break;
2034 	}
2035 	return;
2036 
2037 }
2038 
2039 static void disable_rdtscp_main(void)
2040 {
2041 	/* Test that #UD is properly injected in L2.  */
2042 	handle_exception(UD_VECTOR, disable_rdtscp_ud_handler);
2043 
2044 	vmx_set_test_stage(0);
2045 	asm volatile("rdtscp" : : : "eax", "ecx", "edx");
2046 	vmcall();
2047 	asm volatile(".byte 0xf3, 0x0f, 0xc7, 0xf8" : : : "eax");
2048 
2049 	handle_exception(UD_VECTOR, 0);
2050 	vmcall();
2051 }
2052 
2053 static int disable_rdtscp_exit_handler(void)
2054 {
2055 	unsigned int reason = vmcs_read(EXI_REASON) & 0xff;
2056 
2057 	switch (reason) {
2058 	case VMX_VMCALL:
2059 		switch (vmx_get_test_stage()) {
2060 		case 0:
2061 			report("RDTSCP triggers #UD", false);
2062 			vmx_inc_test_stage();
2063 			/* fallthrough */
2064 		case 1:
2065 			vmx_inc_test_stage();
2066 			vmcs_write(GUEST_RIP, vmcs_read(GUEST_RIP) + 3);
2067 			return VMX_TEST_RESUME;
2068 		case 2:
2069 			report("RDPID triggers #UD", false);
2070 			break;
2071 		}
2072 		break;
2073 
2074 	default:
2075 		report("Unknown exit reason, %d", false, reason);
2076 		print_vmexit_info();
2077 	}
2078 	return VMX_TEST_VMEXIT;
2079 }
2080 
2081 static int int3_init(struct vmcs *vmcs)
2082 {
2083 	vmcs_write(EXC_BITMAP, ~0u);
2084 	return VMX_TEST_START;
2085 }
2086 
2087 static void int3_guest_main(void)
2088 {
2089 	asm volatile ("int3");
2090 }
2091 
2092 static int int3_exit_handler(void)
2093 {
2094 	u32 reason = vmcs_read(EXI_REASON);
2095 	u32 intr_info = vmcs_read(EXI_INTR_INFO);
2096 
2097 	report("L1 intercepts #BP", reason == VMX_EXC_NMI &&
2098 	       (intr_info & INTR_INFO_VALID_MASK) &&
2099 	       (intr_info & INTR_INFO_VECTOR_MASK) == BP_VECTOR &&
2100 	       ((intr_info & INTR_INFO_INTR_TYPE_MASK) >>
2101 		INTR_INFO_INTR_TYPE_SHIFT) == VMX_INTR_TYPE_SOFT_EXCEPTION);
2102 
2103 	return VMX_TEST_VMEXIT;
2104 }
2105 
2106 static int into_init(struct vmcs *vmcs)
2107 {
2108 	vmcs_write(EXC_BITMAP, ~0u);
2109 	return VMX_TEST_START;
2110 }
2111 
2112 static void into_guest_main(void)
2113 {
2114 	struct far_pointer32 fp = {
2115 		.offset = (uintptr_t)&&into,
2116 		.selector = KERNEL_CS32,
2117 	};
2118 	register uintptr_t rsp asm("rsp");
2119 
2120 	if (fp.offset != (uintptr_t)&&into) {
2121 		printf("Code address too high.\n");
2122 		return;
2123 	}
2124 	if ((u32)rsp != rsp) {
2125 		printf("Stack address too high.\n");
2126 		return;
2127 	}
2128 
2129 	asm goto ("lcall *%0" : : "m" (fp) : "rax" : into);
2130 	return;
2131 into:
2132 	asm volatile (".code32;"
2133 		      "movl $0x7fffffff, %eax;"
2134 		      "addl %eax, %eax;"
2135 		      "into;"
2136 		      "lret;"
2137 		      ".code64");
2138 	__builtin_unreachable();
2139 }
2140 
2141 static int into_exit_handler(void)
2142 {
2143 	u32 reason = vmcs_read(EXI_REASON);
2144 	u32 intr_info = vmcs_read(EXI_INTR_INFO);
2145 
2146 	report("L1 intercepts #OF", reason == VMX_EXC_NMI &&
2147 	       (intr_info & INTR_INFO_VALID_MASK) &&
2148 	       (intr_info & INTR_INFO_VECTOR_MASK) == OF_VECTOR &&
2149 	       ((intr_info & INTR_INFO_INTR_TYPE_MASK) >>
2150 		INTR_INFO_INTR_TYPE_SHIFT) == VMX_INTR_TYPE_SOFT_EXCEPTION);
2151 
2152 	return VMX_TEST_VMEXIT;
2153 }
2154 
2155 static void exit_monitor_from_l2_main(void)
2156 {
2157 	printf("Calling exit(0) from l2...\n");
2158 	exit(0);
2159 }
2160 
2161 static int exit_monitor_from_l2_handler(void)
2162 {
2163 	report("The guest should have killed the VMM", false);
2164 	return VMX_TEST_EXIT;
2165 }
2166 
2167 static void assert_exit_reason(u64 expected)
2168 {
2169 	u64 actual = vmcs_read(EXI_REASON);
2170 
2171 	TEST_ASSERT_EQ_MSG(expected, actual, "Expected %s, got %s.",
2172 			   exit_reason_description(expected),
2173 			   exit_reason_description(actual));
2174 }
2175 
2176 static void skip_exit_insn(void)
2177 {
2178 	u64 guest_rip = vmcs_read(GUEST_RIP);
2179 	u32 insn_len = vmcs_read(EXI_INST_LEN);
2180 	vmcs_write(GUEST_RIP, guest_rip + insn_len);
2181 }
2182 
2183 static void skip_exit_vmcall(void)
2184 {
2185 	assert_exit_reason(VMX_VMCALL);
2186 	skip_exit_insn();
2187 }
2188 
2189 static void v2_null_test_guest(void)
2190 {
2191 }
2192 
2193 static void v2_null_test(void)
2194 {
2195 	test_set_guest(v2_null_test_guest);
2196 	enter_guest();
2197 	report(__func__, 1);
2198 }
2199 
2200 static void v2_multiple_entries_test_guest(void)
2201 {
2202 	vmx_set_test_stage(1);
2203 	vmcall();
2204 	vmx_set_test_stage(2);
2205 }
2206 
2207 static void v2_multiple_entries_test(void)
2208 {
2209 	test_set_guest(v2_multiple_entries_test_guest);
2210 	enter_guest();
2211 	TEST_ASSERT_EQ(vmx_get_test_stage(), 1);
2212 	skip_exit_vmcall();
2213 	enter_guest();
2214 	TEST_ASSERT_EQ(vmx_get_test_stage(), 2);
2215 	report(__func__, 1);
2216 }
2217 
2218 static int fixture_test_data = 1;
2219 
2220 static void fixture_test_teardown(void *data)
2221 {
2222 	*((int *) data) = 1;
2223 }
2224 
2225 static void fixture_test_guest(void)
2226 {
2227 	fixture_test_data++;
2228 }
2229 
2230 
2231 static void fixture_test_setup(void)
2232 {
2233 	TEST_ASSERT_EQ_MSG(1, fixture_test_data,
2234 			   "fixture_test_teardown didn't run?!");
2235 	fixture_test_data = 2;
2236 	test_add_teardown(fixture_test_teardown, &fixture_test_data);
2237 	test_set_guest(fixture_test_guest);
2238 }
2239 
2240 static void fixture_test_case1(void)
2241 {
2242 	fixture_test_setup();
2243 	TEST_ASSERT_EQ(2, fixture_test_data);
2244 	enter_guest();
2245 	TEST_ASSERT_EQ(3, fixture_test_data);
2246 	report(__func__, 1);
2247 }
2248 
2249 static void fixture_test_case2(void)
2250 {
2251 	fixture_test_setup();
2252 	TEST_ASSERT_EQ(2, fixture_test_data);
2253 	enter_guest();
2254 	TEST_ASSERT_EQ(3, fixture_test_data);
2255 	report(__func__, 1);
2256 }
2257 
2258 enum ept_access_op {
2259 	OP_READ,
2260 	OP_WRITE,
2261 	OP_EXEC,
2262 	OP_FLUSH_TLB,
2263 	OP_EXIT,
2264 };
2265 
2266 static struct ept_access_test_data {
2267 	unsigned long gpa;
2268 	unsigned long *gva;
2269 	unsigned long hpa;
2270 	unsigned long *hva;
2271 	enum ept_access_op op;
2272 } ept_access_test_data;
2273 
2274 extern unsigned char ret42_start;
2275 extern unsigned char ret42_end;
2276 
2277 /* Returns 42. */
2278 asm(
2279 	".align 64\n"
2280 	"ret42_start:\n"
2281 	"mov $42, %eax\n"
2282 	"ret\n"
2283 	"ret42_end:\n"
2284 );
2285 
2286 static void
2287 diagnose_ept_violation_qual(u64 expected, u64 actual)
2288 {
2289 
2290 #define DIAGNOSE(flag)							\
2291 do {									\
2292 	if ((expected & flag) != (actual & flag))			\
2293 		printf(#flag " %sexpected\n",				\
2294 		       (expected & flag) ? "" : "un");			\
2295 } while (0)
2296 
2297 	DIAGNOSE(EPT_VLT_RD);
2298 	DIAGNOSE(EPT_VLT_WR);
2299 	DIAGNOSE(EPT_VLT_FETCH);
2300 	DIAGNOSE(EPT_VLT_PERM_RD);
2301 	DIAGNOSE(EPT_VLT_PERM_WR);
2302 	DIAGNOSE(EPT_VLT_PERM_EX);
2303 	DIAGNOSE(EPT_VLT_LADDR_VLD);
2304 	DIAGNOSE(EPT_VLT_PADDR);
2305 
2306 #undef DIAGNOSE
2307 }
2308 
2309 static void do_ept_access_op(enum ept_access_op op)
2310 {
2311 	ept_access_test_data.op = op;
2312 	enter_guest();
2313 }
2314 
2315 /*
2316  * Force the guest to flush its TLB (i.e., flush gva -> gpa mappings). Only
2317  * needed by tests that modify guest PTEs.
2318  */
2319 static void ept_access_test_guest_flush_tlb(void)
2320 {
2321 	do_ept_access_op(OP_FLUSH_TLB);
2322 	skip_exit_vmcall();
2323 }
2324 
2325 /*
2326  * Modifies the EPT entry at @level in the mapping of @gpa. First clears the
2327  * bits in @clear then sets the bits in @set. @mkhuge transforms the entry into
2328  * a huge page.
2329  */
2330 static unsigned long ept_twiddle(unsigned long gpa, bool mkhuge, int level,
2331 				 unsigned long clear, unsigned long set)
2332 {
2333 	struct ept_access_test_data *data = &ept_access_test_data;
2334 	unsigned long orig_pte;
2335 	unsigned long pte;
2336 
2337 	/* Screw with the mapping at the requested level. */
2338 	TEST_ASSERT(get_ept_pte(pml4, gpa, level, &orig_pte));
2339 	pte = orig_pte;
2340 	if (mkhuge)
2341 		pte = (orig_pte & ~EPT_ADDR_MASK) | data->hpa | EPT_LARGE_PAGE;
2342 	else
2343 		pte = orig_pte;
2344 	pte = (pte & ~clear) | set;
2345 	set_ept_pte(pml4, gpa, level, pte);
2346 	ept_sync(INVEPT_SINGLE, eptp);
2347 
2348 	return orig_pte;
2349 }
2350 
2351 static void ept_untwiddle(unsigned long gpa, int level, unsigned long orig_pte)
2352 {
2353 	set_ept_pte(pml4, gpa, level, orig_pte);
2354 }
2355 
2356 static void do_ept_violation(bool leaf, enum ept_access_op op,
2357 			     u64 expected_qual, u64 expected_paddr)
2358 {
2359 	u64 qual;
2360 
2361 	/* Try the access and observe the violation. */
2362 	do_ept_access_op(op);
2363 
2364 	assert_exit_reason(VMX_EPT_VIOLATION);
2365 
2366 	qual = vmcs_read(EXI_QUALIFICATION);
2367 
2368 	diagnose_ept_violation_qual(expected_qual, qual);
2369 	TEST_EXPECT_EQ(expected_qual, qual);
2370 
2371 	#if 0
2372 	/* Disable for now otherwise every test will fail */
2373 	TEST_EXPECT_EQ(vmcs_read(GUEST_LINEAR_ADDRESS),
2374 		       (unsigned long) (
2375 			       op == OP_EXEC ? data->gva + 1 : data->gva));
2376 	#endif
2377 	/*
2378 	 * TODO: tests that probe expected_paddr in pages other than the one at
2379 	 * the beginning of the 1g region.
2380 	 */
2381 	TEST_EXPECT_EQ(vmcs_read(INFO_PHYS_ADDR), expected_paddr);
2382 }
2383 
2384 static void
2385 ept_violation_at_level_mkhuge(bool mkhuge, int level, unsigned long clear,
2386 			      unsigned long set, enum ept_access_op op,
2387 			      u64 expected_qual)
2388 {
2389 	struct ept_access_test_data *data = &ept_access_test_data;
2390 	unsigned long orig_pte;
2391 
2392 	orig_pte = ept_twiddle(data->gpa, mkhuge, level, clear, set);
2393 
2394 	do_ept_violation(level == 1 || mkhuge, op, expected_qual,
2395 			 op == OP_EXEC ? data->gpa + sizeof(unsigned long) :
2396 					 data->gpa);
2397 
2398 	/* Fix the violation and resume the op loop. */
2399 	ept_untwiddle(data->gpa, level, orig_pte);
2400 	enter_guest();
2401 	skip_exit_vmcall();
2402 }
2403 
2404 static void
2405 ept_violation_at_level(int level, unsigned long clear, unsigned long set,
2406 		       enum ept_access_op op, u64 expected_qual)
2407 {
2408 	ept_violation_at_level_mkhuge(false, level, clear, set, op,
2409 				      expected_qual);
2410 	if (ept_huge_pages_supported(level))
2411 		ept_violation_at_level_mkhuge(true, level, clear, set, op,
2412 					      expected_qual);
2413 }
2414 
2415 static void ept_violation(unsigned long clear, unsigned long set,
2416 			  enum ept_access_op op, u64 expected_qual)
2417 {
2418 	ept_violation_at_level(1, clear, set, op, expected_qual);
2419 	ept_violation_at_level(2, clear, set, op, expected_qual);
2420 	ept_violation_at_level(3, clear, set, op, expected_qual);
2421 	ept_violation_at_level(4, clear, set, op, expected_qual);
2422 }
2423 
2424 static void ept_access_violation(unsigned long access, enum ept_access_op op,
2425 				       u64 expected_qual)
2426 {
2427 	ept_violation(EPT_PRESENT, access, op,
2428 		      expected_qual | EPT_VLT_LADDR_VLD | EPT_VLT_PADDR);
2429 }
2430 
2431 /*
2432  * For translations that don't involve a GVA, that is physical address (paddr)
2433  * accesses, EPT violations don't set the flag EPT_VLT_PADDR.  For a typical
2434  * guest memory access, the hardware does GVA -> GPA -> HPA.  However, certain
2435  * translations don't involve GVAs, such as when the hardware does the guest
2436  * page table walk. For example, in translating GVA_1 -> GPA_1, the guest MMU
2437  * might try to set an A bit on a guest PTE. If the GPA_2 that the PTE resides
2438  * on isn't present in the EPT, then the EPT violation will be for GPA_2 and
2439  * the EPT_VLT_PADDR bit will be clear in the exit qualification.
2440  *
2441  * Note that paddr violations can also be triggered by loading PAE page tables
2442  * with wonky addresses. We don't test that yet.
2443  *
2444  * This function modifies the EPT entry that maps the GPA that the guest page
2445  * table entry mapping ept_access_data.gva resides on.
2446  *
2447  *	@ept_access	EPT permissions to set. Other permissions are cleared.
2448  *
2449  *	@pte_ad		Set the A/D bits on the guest PTE accordingly.
2450  *
2451  *	@op		Guest operation to perform with ept_access_data.gva.
2452  *
2453  *	@expect_violation
2454  *			Is a violation expected during the paddr access?
2455  *
2456  *	@expected_qual	Expected qualification for the EPT violation.
2457  *			EPT_VLT_PADDR should be clear.
2458  */
2459 static void ept_access_paddr(unsigned long ept_access, unsigned long pte_ad,
2460 			     enum ept_access_op op, bool expect_violation,
2461 			     u64 expected_qual)
2462 {
2463 	struct ept_access_test_data *data = &ept_access_test_data;
2464 	unsigned long *ptep;
2465 	unsigned long gpa;
2466 	unsigned long orig_epte;
2467 
2468 	/* Modify the guest PTE mapping data->gva according to @pte_ad.  */
2469 	ptep = get_pte_level(current_page_table(), data->gva, /*level=*/1);
2470 	TEST_ASSERT(ptep);
2471 	TEST_ASSERT_EQ(*ptep & PT_ADDR_MASK, data->gpa);
2472 	*ptep = (*ptep & ~PT_AD_MASK) | pte_ad;
2473 	ept_access_test_guest_flush_tlb();
2474 
2475 	/*
2476 	 * Now modify the access bits on the EPT entry for the GPA that the
2477 	 * guest PTE resides on. Note that by modifying a single EPT entry,
2478 	 * we're potentially affecting 512 guest PTEs. However, we've carefully
2479 	 * constructed our test such that those other 511 PTEs aren't used by
2480 	 * the guest: data->gva is at the beginning of a 1G huge page, thus the
2481 	 * PTE we're modifying is at the beginning of a 4K page and the
2482 	 * following 511 entires are also under our control (and not touched by
2483 	 * the guest).
2484 	 */
2485 	gpa = virt_to_phys(ptep);
2486 	TEST_ASSERT_EQ(gpa & ~PAGE_MASK, 0);
2487 	/*
2488 	 * Make sure the guest page table page is mapped with a 4K EPT entry,
2489 	 * otherwise our level=1 twiddling below will fail. We use the
2490 	 * identity map (gpa = gpa) since page tables are shared with the host.
2491 	 */
2492 	install_ept(pml4, gpa, gpa, EPT_PRESENT);
2493 	orig_epte = ept_twiddle(gpa, /*mkhuge=*/0, /*level=*/1,
2494 				/*clear=*/EPT_PRESENT, /*set=*/ept_access);
2495 
2496 	if (expect_violation) {
2497 		do_ept_violation(/*leaf=*/true, op,
2498 				 expected_qual | EPT_VLT_LADDR_VLD, gpa);
2499 		ept_untwiddle(gpa, /*level=*/1, orig_epte);
2500 		do_ept_access_op(op);
2501 	} else {
2502 		do_ept_access_op(op);
2503 		ept_untwiddle(gpa, /*level=*/1, orig_epte);
2504 	}
2505 
2506 	TEST_ASSERT(*ptep & PT_ACCESSED_MASK);
2507 	if ((pte_ad & PT_DIRTY_MASK) || op == OP_WRITE)
2508 		TEST_ASSERT(*ptep & PT_DIRTY_MASK);
2509 
2510 	skip_exit_vmcall();
2511 }
2512 
2513 static void ept_access_allowed_paddr(unsigned long ept_access,
2514 				     unsigned long pte_ad,
2515 				     enum ept_access_op op)
2516 {
2517 	ept_access_paddr(ept_access, pte_ad, op, /*expect_violation=*/false,
2518 			 /*expected_qual=*/-1);
2519 }
2520 
2521 static void ept_access_violation_paddr(unsigned long ept_access,
2522 				       unsigned long pte_ad,
2523 				       enum ept_access_op op,
2524 				       u64 expected_qual)
2525 {
2526 	ept_access_paddr(ept_access, pte_ad, op, /*expect_violation=*/true,
2527 			 expected_qual);
2528 }
2529 
2530 
2531 static void ept_allowed_at_level_mkhuge(bool mkhuge, int level,
2532 					unsigned long clear,
2533 					unsigned long set,
2534 					enum ept_access_op op)
2535 {
2536 	struct ept_access_test_data *data = &ept_access_test_data;
2537 	unsigned long orig_pte;
2538 
2539 	orig_pte = ept_twiddle(data->gpa, mkhuge, level, clear, set);
2540 
2541 	/* No violation. Should proceed to vmcall. */
2542 	do_ept_access_op(op);
2543 	skip_exit_vmcall();
2544 
2545 	ept_untwiddle(data->gpa, level, orig_pte);
2546 }
2547 
2548 static void ept_allowed_at_level(int level, unsigned long clear,
2549 				 unsigned long set, enum ept_access_op op)
2550 {
2551 	ept_allowed_at_level_mkhuge(false, level, clear, set, op);
2552 	if (ept_huge_pages_supported(level))
2553 		ept_allowed_at_level_mkhuge(true, level, clear, set, op);
2554 }
2555 
2556 static void ept_allowed(unsigned long clear, unsigned long set,
2557 			enum ept_access_op op)
2558 {
2559 	ept_allowed_at_level(1, clear, set, op);
2560 	ept_allowed_at_level(2, clear, set, op);
2561 	ept_allowed_at_level(3, clear, set, op);
2562 	ept_allowed_at_level(4, clear, set, op);
2563 }
2564 
2565 static void ept_ignored_bit(int bit)
2566 {
2567 	/* Set the bit. */
2568 	ept_allowed(0, 1ul << bit, OP_READ);
2569 	ept_allowed(0, 1ul << bit, OP_WRITE);
2570 	ept_allowed(0, 1ul << bit, OP_EXEC);
2571 
2572 	/* Clear the bit. */
2573 	ept_allowed(1ul << bit, 0, OP_READ);
2574 	ept_allowed(1ul << bit, 0, OP_WRITE);
2575 	ept_allowed(1ul << bit, 0, OP_EXEC);
2576 }
2577 
2578 static void ept_access_allowed(unsigned long access, enum ept_access_op op)
2579 {
2580 	ept_allowed(EPT_PRESENT, access, op);
2581 }
2582 
2583 
2584 static void ept_misconfig_at_level_mkhuge_op(bool mkhuge, int level,
2585 					     unsigned long clear,
2586 					     unsigned long set,
2587 					     enum ept_access_op op)
2588 {
2589 	struct ept_access_test_data *data = &ept_access_test_data;
2590 	unsigned long orig_pte;
2591 
2592 	orig_pte = ept_twiddle(data->gpa, mkhuge, level, clear, set);
2593 
2594 	do_ept_access_op(op);
2595 	assert_exit_reason(VMX_EPT_MISCONFIG);
2596 
2597 	/* Intel 27.2.1, "For all other VM exits, this field is cleared." */
2598 	#if 0
2599 	/* broken: */
2600 	TEST_EXPECT_EQ_MSG(vmcs_read(EXI_QUALIFICATION), 0);
2601 	#endif
2602 	#if 0
2603 	/*
2604 	 * broken:
2605 	 * According to description of exit qual for EPT violation,
2606 	 * EPT_VLT_LADDR_VLD indicates if GUEST_LINEAR_ADDRESS is valid.
2607 	 * However, I can't find anything that says GUEST_LINEAR_ADDRESS ought
2608 	 * to be set for msiconfig.
2609 	 */
2610 	TEST_EXPECT_EQ(vmcs_read(GUEST_LINEAR_ADDRESS),
2611 		       (unsigned long) (
2612 			       op == OP_EXEC ? data->gva + 1 : data->gva));
2613 	#endif
2614 
2615 	/* Fix the violation and resume the op loop. */
2616 	ept_untwiddle(data->gpa, level, orig_pte);
2617 	enter_guest();
2618 	skip_exit_vmcall();
2619 }
2620 
2621 static void ept_misconfig_at_level_mkhuge(bool mkhuge, int level,
2622 					  unsigned long clear,
2623 					  unsigned long set)
2624 {
2625 	/* The op shouldn't matter (read, write, exec), so try them all! */
2626 	ept_misconfig_at_level_mkhuge_op(mkhuge, level, clear, set, OP_READ);
2627 	ept_misconfig_at_level_mkhuge_op(mkhuge, level, clear, set, OP_WRITE);
2628 	ept_misconfig_at_level_mkhuge_op(mkhuge, level, clear, set, OP_EXEC);
2629 }
2630 
2631 static void ept_misconfig_at_level(int level, unsigned long clear,
2632 				   unsigned long set)
2633 {
2634 	ept_misconfig_at_level_mkhuge(false, level, clear, set);
2635 	if (ept_huge_pages_supported(level))
2636 		ept_misconfig_at_level_mkhuge(true, level, clear, set);
2637 }
2638 
2639 static void ept_misconfig(unsigned long clear, unsigned long set)
2640 {
2641 	ept_misconfig_at_level(1, clear, set);
2642 	ept_misconfig_at_level(2, clear, set);
2643 	ept_misconfig_at_level(3, clear, set);
2644 	ept_misconfig_at_level(4, clear, set);
2645 }
2646 
2647 static void ept_access_misconfig(unsigned long access)
2648 {
2649 	ept_misconfig(EPT_PRESENT, access);
2650 }
2651 
2652 static void ept_reserved_bit_at_level_nohuge(int level, int bit)
2653 {
2654 	/* Setting the bit causes a misconfig. */
2655 	ept_misconfig_at_level_mkhuge(false, level, 0, 1ul << bit);
2656 
2657 	/* Making the entry non-present turns reserved bits into ignored. */
2658 	ept_violation_at_level(level, EPT_PRESENT, 1ul << bit, OP_READ,
2659 			       EPT_VLT_RD | EPT_VLT_LADDR_VLD | EPT_VLT_PADDR);
2660 }
2661 
2662 static void ept_reserved_bit_at_level_huge(int level, int bit)
2663 {
2664 	/* Setting the bit causes a misconfig. */
2665 	ept_misconfig_at_level_mkhuge(true, level, 0, 1ul << bit);
2666 
2667 	/* Making the entry non-present turns reserved bits into ignored. */
2668 	ept_violation_at_level(level, EPT_PRESENT, 1ul << bit, OP_READ,
2669 			       EPT_VLT_RD | EPT_VLT_LADDR_VLD | EPT_VLT_PADDR);
2670 }
2671 
2672 static void ept_reserved_bit_at_level(int level, int bit)
2673 {
2674 	/* Setting the bit causes a misconfig. */
2675 	ept_misconfig_at_level(level, 0, 1ul << bit);
2676 
2677 	/* Making the entry non-present turns reserved bits into ignored. */
2678 	ept_violation_at_level(level, EPT_PRESENT, 1ul << bit, OP_READ,
2679 			       EPT_VLT_RD | EPT_VLT_LADDR_VLD | EPT_VLT_PADDR);
2680 }
2681 
2682 static void ept_reserved_bit(int bit)
2683 {
2684 	ept_reserved_bit_at_level(1, bit);
2685 	ept_reserved_bit_at_level(2, bit);
2686 	ept_reserved_bit_at_level(3, bit);
2687 	ept_reserved_bit_at_level(4, bit);
2688 }
2689 
2690 #define PAGE_2M_ORDER 9
2691 #define PAGE_1G_ORDER 18
2692 
2693 static void *get_1g_page(void)
2694 {
2695 	static void *alloc;
2696 
2697 	if (!alloc)
2698 		alloc = alloc_pages(PAGE_1G_ORDER);
2699 	return alloc;
2700 }
2701 
2702 static void ept_access_test_teardown(void *unused)
2703 {
2704 	/* Exit the guest cleanly. */
2705 	do_ept_access_op(OP_EXIT);
2706 }
2707 
2708 static void ept_access_test_guest(void)
2709 {
2710 	struct ept_access_test_data *data = &ept_access_test_data;
2711 	int (*code)(void) = (int (*)(void)) &data->gva[1];
2712 
2713 	while (true) {
2714 		switch (data->op) {
2715 		case OP_READ:
2716 			TEST_ASSERT_EQ(*data->gva, MAGIC_VAL_1);
2717 			break;
2718 		case OP_WRITE:
2719 			*data->gva = MAGIC_VAL_2;
2720 			TEST_ASSERT_EQ(*data->gva, MAGIC_VAL_2);
2721 			*data->gva = MAGIC_VAL_1;
2722 			break;
2723 		case OP_EXEC:
2724 			TEST_ASSERT_EQ(42, code());
2725 			break;
2726 		case OP_FLUSH_TLB:
2727 			write_cr3(read_cr3());
2728 			break;
2729 		case OP_EXIT:
2730 			return;
2731 		default:
2732 			TEST_ASSERT_MSG(false, "Unknown op %d", data->op);
2733 		}
2734 		vmcall();
2735 	}
2736 }
2737 
2738 static void ept_access_test_setup(void)
2739 {
2740 	struct ept_access_test_data *data = &ept_access_test_data;
2741 	unsigned long npages = 1ul << PAGE_1G_ORDER;
2742 	unsigned long size = npages * PAGE_SIZE;
2743 	unsigned long *page_table = current_page_table();
2744 	unsigned long pte;
2745 
2746 	if (setup_ept(false))
2747 		test_skip("EPT not supported");
2748 
2749 	/* We use data->gpa = 1 << 39 so that test data has a separate pml4 entry */
2750 	if (cpuid_maxphyaddr() < 40)
2751 		test_skip("Test needs MAXPHYADDR >= 40");
2752 
2753 	test_set_guest(ept_access_test_guest);
2754 	test_add_teardown(ept_access_test_teardown, NULL);
2755 
2756 	data->hva = get_1g_page();
2757 	TEST_ASSERT(data->hva);
2758 	data->hpa = virt_to_phys(data->hva);
2759 
2760 	data->gpa = 1ul << 39;
2761 	data->gva = (void *) ALIGN((unsigned long) alloc_vpages(npages * 2),
2762 				   size);
2763 	TEST_ASSERT(!any_present_pages(page_table, data->gva, size));
2764 	install_pages(page_table, data->gpa, size, data->gva);
2765 
2766 	/*
2767 	 * Make sure nothing's mapped here so the tests that screw with the
2768 	 * pml4 entry don't inadvertently break something.
2769 	 */
2770 	TEST_ASSERT(get_ept_pte(pml4, data->gpa, 4, &pte) && pte == 0);
2771 	TEST_ASSERT(get_ept_pte(pml4, data->gpa + size - 1, 4, &pte) && pte == 0);
2772 	install_ept(pml4, data->hpa, data->gpa, EPT_PRESENT);
2773 
2774 	data->hva[0] = MAGIC_VAL_1;
2775 	memcpy(&data->hva[1], &ret42_start, &ret42_end - &ret42_start);
2776 }
2777 
2778 static void ept_access_test_not_present(void)
2779 {
2780 	ept_access_test_setup();
2781 	/* --- */
2782 	ept_access_violation(0, OP_READ, EPT_VLT_RD);
2783 	ept_access_violation(0, OP_WRITE, EPT_VLT_WR);
2784 	ept_access_violation(0, OP_EXEC, EPT_VLT_FETCH);
2785 }
2786 
2787 static void ept_access_test_read_only(void)
2788 {
2789 	ept_access_test_setup();
2790 
2791 	/* r-- */
2792 	ept_access_allowed(EPT_RA, OP_READ);
2793 	ept_access_violation(EPT_RA, OP_WRITE, EPT_VLT_WR | EPT_VLT_PERM_RD);
2794 	ept_access_violation(EPT_RA, OP_EXEC, EPT_VLT_FETCH | EPT_VLT_PERM_RD);
2795 }
2796 
2797 static void ept_access_test_write_only(void)
2798 {
2799 	ept_access_test_setup();
2800 	/* -w- */
2801 	ept_access_misconfig(EPT_WA);
2802 }
2803 
2804 static void ept_access_test_read_write(void)
2805 {
2806 	ept_access_test_setup();
2807 	/* rw- */
2808 	ept_access_allowed(EPT_RA | EPT_WA, OP_READ);
2809 	ept_access_allowed(EPT_RA | EPT_WA, OP_WRITE);
2810 	ept_access_violation(EPT_RA | EPT_WA, OP_EXEC,
2811 			   EPT_VLT_FETCH | EPT_VLT_PERM_RD | EPT_VLT_PERM_WR);
2812 }
2813 
2814 
2815 static void ept_access_test_execute_only(void)
2816 {
2817 	ept_access_test_setup();
2818 	/* --x */
2819 	if (ept_execute_only_supported()) {
2820 		ept_access_violation(EPT_EA, OP_READ,
2821 				     EPT_VLT_RD | EPT_VLT_PERM_EX);
2822 		ept_access_violation(EPT_EA, OP_WRITE,
2823 				     EPT_VLT_WR | EPT_VLT_PERM_EX);
2824 		ept_access_allowed(EPT_EA, OP_EXEC);
2825 	} else {
2826 		ept_access_misconfig(EPT_EA);
2827 	}
2828 }
2829 
2830 static void ept_access_test_read_execute(void)
2831 {
2832 	ept_access_test_setup();
2833 	/* r-x */
2834 	ept_access_allowed(EPT_RA | EPT_EA, OP_READ);
2835 	ept_access_violation(EPT_RA | EPT_EA, OP_WRITE,
2836 			   EPT_VLT_WR | EPT_VLT_PERM_RD | EPT_VLT_PERM_EX);
2837 	ept_access_allowed(EPT_RA | EPT_EA, OP_EXEC);
2838 }
2839 
2840 static void ept_access_test_write_execute(void)
2841 {
2842 	ept_access_test_setup();
2843 	/* -wx */
2844 	ept_access_misconfig(EPT_WA | EPT_EA);
2845 }
2846 
2847 static void ept_access_test_read_write_execute(void)
2848 {
2849 	ept_access_test_setup();
2850 	/* rwx */
2851 	ept_access_allowed(EPT_RA | EPT_WA | EPT_EA, OP_READ);
2852 	ept_access_allowed(EPT_RA | EPT_WA | EPT_EA, OP_WRITE);
2853 	ept_access_allowed(EPT_RA | EPT_WA | EPT_EA, OP_EXEC);
2854 }
2855 
2856 static void ept_access_test_reserved_bits(void)
2857 {
2858 	int i;
2859 	int maxphyaddr;
2860 
2861 	ept_access_test_setup();
2862 
2863 	/* Reserved bits above maxphyaddr. */
2864 	maxphyaddr = cpuid_maxphyaddr();
2865 	for (i = maxphyaddr; i <= 51; i++) {
2866 		report_prefix_pushf("reserved_bit=%d", i);
2867 		ept_reserved_bit(i);
2868 		report_prefix_pop();
2869 	}
2870 
2871 	/* Level-specific reserved bits. */
2872 	ept_reserved_bit_at_level_nohuge(2, 3);
2873 	ept_reserved_bit_at_level_nohuge(2, 4);
2874 	ept_reserved_bit_at_level_nohuge(2, 5);
2875 	ept_reserved_bit_at_level_nohuge(2, 6);
2876 	/* 2M alignment. */
2877 	for (i = 12; i < 20; i++) {
2878 		report_prefix_pushf("reserved_bit=%d", i);
2879 		ept_reserved_bit_at_level_huge(2, i);
2880 		report_prefix_pop();
2881 	}
2882 	ept_reserved_bit_at_level_nohuge(3, 3);
2883 	ept_reserved_bit_at_level_nohuge(3, 4);
2884 	ept_reserved_bit_at_level_nohuge(3, 5);
2885 	ept_reserved_bit_at_level_nohuge(3, 6);
2886 	/* 1G alignment. */
2887 	for (i = 12; i < 29; i++) {
2888 		report_prefix_pushf("reserved_bit=%d", i);
2889 		ept_reserved_bit_at_level_huge(3, i);
2890 		report_prefix_pop();
2891 	}
2892 	ept_reserved_bit_at_level(4, 3);
2893 	ept_reserved_bit_at_level(4, 4);
2894 	ept_reserved_bit_at_level(4, 5);
2895 	ept_reserved_bit_at_level(4, 6);
2896 	ept_reserved_bit_at_level(4, 7);
2897 }
2898 
2899 static void ept_access_test_ignored_bits(void)
2900 {
2901 	ept_access_test_setup();
2902 	/*
2903 	 * Bits ignored at every level. Bits 8 and 9 (A and D) are ignored as
2904 	 * far as translation is concerned even if AD bits are enabled in the
2905 	 * EPTP. Bit 63 is ignored because "EPT-violation #VE" VM-execution
2906 	 * control is 0.
2907 	 */
2908 	ept_ignored_bit(8);
2909 	ept_ignored_bit(9);
2910 	ept_ignored_bit(10);
2911 	ept_ignored_bit(11);
2912 	ept_ignored_bit(52);
2913 	ept_ignored_bit(53);
2914 	ept_ignored_bit(54);
2915 	ept_ignored_bit(55);
2916 	ept_ignored_bit(56);
2917 	ept_ignored_bit(57);
2918 	ept_ignored_bit(58);
2919 	ept_ignored_bit(59);
2920 	ept_ignored_bit(60);
2921 	ept_ignored_bit(61);
2922 	ept_ignored_bit(62);
2923 	ept_ignored_bit(63);
2924 }
2925 
2926 static void ept_access_test_paddr_not_present_ad_disabled(void)
2927 {
2928 	ept_access_test_setup();
2929 	ept_disable_ad_bits();
2930 
2931 	ept_access_violation_paddr(0, PT_AD_MASK, OP_READ, EPT_VLT_RD);
2932 	ept_access_violation_paddr(0, PT_AD_MASK, OP_WRITE, EPT_VLT_RD);
2933 	ept_access_violation_paddr(0, PT_AD_MASK, OP_EXEC, EPT_VLT_RD);
2934 }
2935 
2936 static void ept_access_test_paddr_not_present_ad_enabled(void)
2937 {
2938 	u64 qual = EPT_VLT_RD | EPT_VLT_WR;
2939 
2940 	ept_access_test_setup();
2941 	ept_enable_ad_bits_or_skip_test();
2942 
2943 	ept_access_violation_paddr(0, PT_AD_MASK, OP_READ, qual);
2944 	ept_access_violation_paddr(0, PT_AD_MASK, OP_WRITE, qual);
2945 	ept_access_violation_paddr(0, PT_AD_MASK, OP_EXEC, qual);
2946 }
2947 
2948 static void ept_access_test_paddr_read_only_ad_disabled(void)
2949 {
2950 	/*
2951 	 * When EPT AD bits are disabled, all accesses to guest paging
2952 	 * structures are reported separately as a read and (after
2953 	 * translation of the GPA to host physical address) a read+write
2954 	 * if the A/D bits have to be set.
2955 	 */
2956 	u64 qual = EPT_VLT_WR | EPT_VLT_RD | EPT_VLT_PERM_RD;
2957 
2958 	ept_access_test_setup();
2959 	ept_disable_ad_bits();
2960 
2961 	/* Can't update A bit, so all accesses fail. */
2962 	ept_access_violation_paddr(EPT_RA, 0, OP_READ, qual);
2963 	ept_access_violation_paddr(EPT_RA, 0, OP_WRITE, qual);
2964 	ept_access_violation_paddr(EPT_RA, 0, OP_EXEC, qual);
2965 	/* AD bits disabled, so only writes try to update the D bit. */
2966 	ept_access_allowed_paddr(EPT_RA, PT_ACCESSED_MASK, OP_READ);
2967 	ept_access_violation_paddr(EPT_RA, PT_ACCESSED_MASK, OP_WRITE, qual);
2968 	ept_access_allowed_paddr(EPT_RA, PT_ACCESSED_MASK, OP_EXEC);
2969 	/* Both A and D already set, so read-only is OK. */
2970 	ept_access_allowed_paddr(EPT_RA, PT_AD_MASK, OP_READ);
2971 	ept_access_allowed_paddr(EPT_RA, PT_AD_MASK, OP_WRITE);
2972 	ept_access_allowed_paddr(EPT_RA, PT_AD_MASK, OP_EXEC);
2973 }
2974 
2975 static void ept_access_test_paddr_read_only_ad_enabled(void)
2976 {
2977 	/*
2978 	 * When EPT AD bits are enabled, all accesses to guest paging
2979 	 * structures are considered writes as far as EPT translation
2980 	 * is concerned.
2981 	 */
2982 	u64 qual = EPT_VLT_WR | EPT_VLT_RD | EPT_VLT_PERM_RD;
2983 
2984 	ept_access_test_setup();
2985 	ept_enable_ad_bits_or_skip_test();
2986 
2987 	ept_access_violation_paddr(EPT_RA, 0, OP_READ, qual);
2988 	ept_access_violation_paddr(EPT_RA, 0, OP_WRITE, qual);
2989 	ept_access_violation_paddr(EPT_RA, 0, OP_EXEC, qual);
2990 	ept_access_violation_paddr(EPT_RA, PT_ACCESSED_MASK, OP_READ, qual);
2991 	ept_access_violation_paddr(EPT_RA, PT_ACCESSED_MASK, OP_WRITE, qual);
2992 	ept_access_violation_paddr(EPT_RA, PT_ACCESSED_MASK, OP_EXEC, qual);
2993 	ept_access_violation_paddr(EPT_RA, PT_AD_MASK, OP_READ, qual);
2994 	ept_access_violation_paddr(EPT_RA, PT_AD_MASK, OP_WRITE, qual);
2995 	ept_access_violation_paddr(EPT_RA, PT_AD_MASK, OP_EXEC, qual);
2996 }
2997 
2998 static void ept_access_test_paddr_read_write(void)
2999 {
3000 	ept_access_test_setup();
3001 	/* Read-write access to paging structure. */
3002 	ept_access_allowed_paddr(EPT_RA | EPT_WA, 0, OP_READ);
3003 	ept_access_allowed_paddr(EPT_RA | EPT_WA, 0, OP_WRITE);
3004 	ept_access_allowed_paddr(EPT_RA | EPT_WA, 0, OP_EXEC);
3005 }
3006 
3007 static void ept_access_test_paddr_read_write_execute(void)
3008 {
3009 	ept_access_test_setup();
3010 	/* RWX access to paging structure. */
3011 	ept_access_allowed_paddr(EPT_PRESENT, 0, OP_READ);
3012 	ept_access_allowed_paddr(EPT_PRESENT, 0, OP_WRITE);
3013 	ept_access_allowed_paddr(EPT_PRESENT, 0, OP_EXEC);
3014 }
3015 
3016 static void ept_access_test_paddr_read_execute_ad_disabled(void)
3017 {
3018   	/*
3019 	 * When EPT AD bits are disabled, all accesses to guest paging
3020 	 * structures are reported separately as a read and (after
3021 	 * translation of the GPA to host physical address) a read+write
3022 	 * if the A/D bits have to be set.
3023 	 */
3024 	u64 qual = EPT_VLT_WR | EPT_VLT_RD | EPT_VLT_PERM_RD | EPT_VLT_PERM_EX;
3025 
3026 	ept_access_test_setup();
3027 	ept_disable_ad_bits();
3028 
3029 	/* Can't update A bit, so all accesses fail. */
3030 	ept_access_violation_paddr(EPT_RA | EPT_EA, 0, OP_READ, qual);
3031 	ept_access_violation_paddr(EPT_RA | EPT_EA, 0, OP_WRITE, qual);
3032 	ept_access_violation_paddr(EPT_RA | EPT_EA, 0, OP_EXEC, qual);
3033 	/* AD bits disabled, so only writes try to update the D bit. */
3034 	ept_access_allowed_paddr(EPT_RA | EPT_EA, PT_ACCESSED_MASK, OP_READ);
3035 	ept_access_violation_paddr(EPT_RA | EPT_EA, PT_ACCESSED_MASK, OP_WRITE, qual);
3036 	ept_access_allowed_paddr(EPT_RA | EPT_EA, PT_ACCESSED_MASK, OP_EXEC);
3037 	/* Both A and D already set, so read-only is OK. */
3038 	ept_access_allowed_paddr(EPT_RA | EPT_EA, PT_AD_MASK, OP_READ);
3039 	ept_access_allowed_paddr(EPT_RA | EPT_EA, PT_AD_MASK, OP_WRITE);
3040 	ept_access_allowed_paddr(EPT_RA | EPT_EA, PT_AD_MASK, OP_EXEC);
3041 }
3042 
3043 static void ept_access_test_paddr_read_execute_ad_enabled(void)
3044 {
3045 	/*
3046 	 * When EPT AD bits are enabled, all accesses to guest paging
3047 	 * structures are considered writes as far as EPT translation
3048 	 * is concerned.
3049 	 */
3050 	u64 qual = EPT_VLT_WR | EPT_VLT_RD | EPT_VLT_PERM_RD | EPT_VLT_PERM_EX;
3051 
3052 	ept_access_test_setup();
3053 	ept_enable_ad_bits_or_skip_test();
3054 
3055 	ept_access_violation_paddr(EPT_RA | EPT_EA, 0, OP_READ, qual);
3056 	ept_access_violation_paddr(EPT_RA | EPT_EA, 0, OP_WRITE, qual);
3057 	ept_access_violation_paddr(EPT_RA | EPT_EA, 0, OP_EXEC, qual);
3058 	ept_access_violation_paddr(EPT_RA | EPT_EA, PT_ACCESSED_MASK, OP_READ, qual);
3059 	ept_access_violation_paddr(EPT_RA | EPT_EA, PT_ACCESSED_MASK, OP_WRITE, qual);
3060 	ept_access_violation_paddr(EPT_RA | EPT_EA, PT_ACCESSED_MASK, OP_EXEC, qual);
3061 	ept_access_violation_paddr(EPT_RA | EPT_EA, PT_AD_MASK, OP_READ, qual);
3062 	ept_access_violation_paddr(EPT_RA | EPT_EA, PT_AD_MASK, OP_WRITE, qual);
3063 	ept_access_violation_paddr(EPT_RA | EPT_EA, PT_AD_MASK, OP_EXEC, qual);
3064 }
3065 
3066 static void ept_access_test_paddr_not_present_page_fault(void)
3067 {
3068 	ept_access_test_setup();
3069 	/*
3070 	 * TODO: test no EPT violation as long as guest PF occurs. e.g., GPA is
3071 	 * page is read-only in EPT but GVA is also mapped read only in PT.
3072 	 * Thus guest page fault before host takes EPT violation for trying to
3073 	 * update A bit.
3074 	 */
3075 }
3076 
3077 static void ept_access_test_force_2m_page(void)
3078 {
3079 	ept_access_test_setup();
3080 
3081 	TEST_ASSERT_EQ(ept_2m_supported(), true);
3082 	ept_allowed_at_level_mkhuge(true, 2, 0, 0, OP_READ);
3083 	ept_violation_at_level_mkhuge(true, 2, EPT_PRESENT, EPT_RA, OP_WRITE,
3084 				      EPT_VLT_WR | EPT_VLT_PERM_RD |
3085 				      EPT_VLT_LADDR_VLD | EPT_VLT_PADDR);
3086 	ept_misconfig_at_level_mkhuge(true, 2, EPT_PRESENT, EPT_WA);
3087 }
3088 
3089 static bool invvpid_valid(u64 type, u64 vpid, u64 gla)
3090 {
3091 	u64 msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP);
3092 
3093 	TEST_ASSERT(msr & VPID_CAP_INVVPID);
3094 
3095 	if (type < INVVPID_ADDR || type > INVVPID_CONTEXT_LOCAL)
3096 		return false;
3097 
3098 	if (!(msr & (1ull << (type + VPID_CAP_INVVPID_TYPES_SHIFT))))
3099 		return false;
3100 
3101 	if (vpid >> 16)
3102 		return false;
3103 
3104 	if (type != INVVPID_ALL && !vpid)
3105 		return false;
3106 
3107 	if (type == INVVPID_ADDR && !is_canonical(gla))
3108 		return false;
3109 
3110 	return true;
3111 }
3112 
3113 static void try_invvpid(u64 type, u64 vpid, u64 gla)
3114 {
3115 	int rc;
3116 	bool valid = invvpid_valid(type, vpid, gla);
3117 	u64 expected = valid ? VMXERR_UNSUPPORTED_VMCS_COMPONENT
3118 		: VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID;
3119 	/*
3120 	 * Set VMX_INST_ERROR to VMXERR_UNVALID_VMCS_COMPONENT, so
3121 	 * that we can tell if it is updated by INVVPID.
3122 	 */
3123 	vmcs_read(~0);
3124 	rc = invvpid(type, vpid, gla);
3125 	report("INVVPID type %ld VPID %lx GLA %lx %s",
3126 	       !rc == valid, type, vpid, gla,
3127 	       valid ? "passes" : "fails");
3128 	report("After %s INVVPID, VMX_INST_ERR is %ld (actual %ld)",
3129 	       vmcs_read(VMX_INST_ERROR) == expected,
3130 	       rc ? "failed" : "successful",
3131 	       expected, vmcs_read(VMX_INST_ERROR));
3132 }
3133 
3134 static void ds_invvpid(void *data)
3135 {
3136 	u64 msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP);
3137 	u64 type = ffs(msr >> VPID_CAP_INVVPID_TYPES_SHIFT) - 1;
3138 
3139 	TEST_ASSERT(type >= INVVPID_ADDR && type <= INVVPID_CONTEXT_LOCAL);
3140 	asm volatile("invvpid %0, %1"
3141 		     :
3142 		     : "m"(*(struct invvpid_operand *)data),
3143 		       "r"(type));
3144 }
3145 
3146 /*
3147  * The SS override is ignored in 64-bit mode, so we use an addressing
3148  * mode with %rsp as the base register to generate an implicit SS
3149  * reference.
3150  */
3151 static void ss_invvpid(void *data)
3152 {
3153 	u64 msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP);
3154 	u64 type = ffs(msr >> VPID_CAP_INVVPID_TYPES_SHIFT) - 1;
3155 
3156 	TEST_ASSERT(type >= INVVPID_ADDR && type <= INVVPID_CONTEXT_LOCAL);
3157 	asm volatile("sub %%rsp,%0; invvpid (%%rsp,%0,1), %1"
3158 		     : "+r"(data)
3159 		     : "r"(type));
3160 }
3161 
3162 static void invvpid_test_gp(void)
3163 {
3164 	bool fault;
3165 
3166 	fault = test_for_exception(GP_VECTOR, &ds_invvpid,
3167 				   (void *)NONCANONICAL);
3168 	report("INVVPID with non-canonical DS operand raises #GP", fault);
3169 }
3170 
3171 static void invvpid_test_ss(void)
3172 {
3173 	bool fault;
3174 
3175 	fault = test_for_exception(SS_VECTOR, &ss_invvpid,
3176 				   (void *)NONCANONICAL);
3177 	report("INVVPID with non-canonical SS operand raises #SS", fault);
3178 }
3179 
3180 static void invvpid_test_pf(void)
3181 {
3182 	void *vpage = alloc_vpage();
3183 	bool fault;
3184 
3185 	fault = test_for_exception(PF_VECTOR, &ds_invvpid, vpage);
3186 	report("INVVPID with unmapped operand raises #PF", fault);
3187 }
3188 
3189 static void try_compat_invvpid(void *unused)
3190 {
3191 	struct far_pointer32 fp = {
3192 		.offset = (uintptr_t)&&invvpid,
3193 		.selector = KERNEL_CS32,
3194 	};
3195 	register uintptr_t rsp asm("rsp");
3196 
3197 	TEST_ASSERT_MSG(fp.offset == (uintptr_t)&&invvpid,
3198 			"Code address too high.");
3199 	TEST_ASSERT_MSG(rsp == (u32)rsp, "Stack address too high.");
3200 
3201 	asm goto ("lcall *%0" : : "m" (fp) : "rax" : invvpid);
3202 	return;
3203 invvpid:
3204 	asm volatile (".code32;"
3205 		      "invvpid (%eax), %eax;"
3206 		      "lret;"
3207 		      ".code64");
3208 	__builtin_unreachable();
3209 }
3210 
3211 static void invvpid_test_compatibility_mode(void)
3212 {
3213 	bool fault;
3214 
3215 	fault = test_for_exception(UD_VECTOR, &try_compat_invvpid, NULL);
3216 	report("Compatibility mode INVVPID raises #UD", fault);
3217 }
3218 
3219 static void invvpid_test_not_in_vmx_operation(void)
3220 {
3221 	bool fault;
3222 
3223 	TEST_ASSERT(!vmx_off());
3224 	fault = test_for_exception(UD_VECTOR, &ds_invvpid, NULL);
3225 	report("INVVPID outside of VMX operation raises #UD", fault);
3226 	TEST_ASSERT(!vmx_on());
3227 }
3228 
3229 /*
3230  * This does not test real-address mode, virtual-8086 mode, protected mode,
3231  * or CPL > 0.
3232  */
3233 static void invvpid_test_v2(void)
3234 {
3235 	u64 msr;
3236 	int i;
3237 	unsigned types = 0;
3238 	unsigned type;
3239 
3240 	if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY) ||
3241 	    !(ctrl_cpu_rev[1].clr & CPU_VPID))
3242 		test_skip("VPID not supported");
3243 
3244 	msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP);
3245 
3246 	if (!(msr & VPID_CAP_INVVPID))
3247 		test_skip("INVVPID not supported.\n");
3248 
3249 	if (msr & VPID_CAP_INVVPID_ADDR)
3250 		types |= 1u << INVVPID_ADDR;
3251 	if (msr & VPID_CAP_INVVPID_CXTGLB)
3252 		types |= 1u << INVVPID_CONTEXT_GLOBAL;
3253 	if (msr & VPID_CAP_INVVPID_ALL)
3254 		types |= 1u << INVVPID_ALL;
3255 	if (msr & VPID_CAP_INVVPID_CXTLOC)
3256 		types |= 1u << INVVPID_CONTEXT_LOCAL;
3257 
3258 	if (!types)
3259 		test_skip("No INVVPID types supported.\n");
3260 
3261 	for (i = -127; i < 128; i++)
3262 		try_invvpid(i, 0xffff, 0);
3263 
3264 	/*
3265 	 * VPID must not be more than 16 bits.
3266 	 */
3267 	for (i = 0; i < 64; i++)
3268 		for (type = 0; type < 4; type++)
3269 			if (types & (1u << type))
3270 				try_invvpid(type, 1ul << i, 0);
3271 
3272 	/*
3273 	 * VPID must not be zero, except for "all contexts."
3274 	 */
3275 	for (type = 0; type < 4; type++)
3276 		if (types & (1u << type))
3277 			try_invvpid(type, 0, 0);
3278 
3279 	/*
3280 	 * The gla operand is only validated for single-address INVVPID.
3281 	 */
3282 	if (types & (1u << INVVPID_ADDR))
3283 		try_invvpid(INVVPID_ADDR, 0xffff, NONCANONICAL);
3284 
3285 	invvpid_test_gp();
3286 	invvpid_test_ss();
3287 	invvpid_test_pf();
3288 	invvpid_test_compatibility_mode();
3289 	invvpid_test_not_in_vmx_operation();
3290 }
3291 
3292 /*
3293  * Test for early VMLAUNCH failure. Returns true if VMLAUNCH makes it
3294  * at least as far as the guest-state checks. Returns false if the
3295  * VMLAUNCH fails early and execution falls through to the next
3296  * instruction.
3297  */
3298 static bool vmlaunch_succeeds(void)
3299 {
3300 	u32 exit_reason;
3301 
3302 	/*
3303 	 * Indirectly set VMX_INST_ERR to 12 ("VMREAD/VMWRITE from/to
3304 	 * unsupported VMCS component"). The caller can then check
3305 	 * to see if a failed VM-entry sets VMX_INST_ERR as expected.
3306 	 */
3307 	vmcs_write(~0u, 0);
3308 
3309 	vmcs_write(HOST_RIP, (uintptr_t)&&success);
3310 	__asm__ __volatile__ goto ("vmwrite %%rsp, %0; vmlaunch"
3311 				   :
3312 				   : "r" ((u64)HOST_RSP)
3313 				   : "cc", "memory"
3314 				   : success);
3315 	return false;
3316 success:
3317 	exit_reason = vmcs_read(EXI_REASON);
3318 	TEST_ASSERT(exit_reason == (VMX_FAIL_STATE | VMX_ENTRY_FAILURE) ||
3319 		    exit_reason == (VMX_FAIL_MSR | VMX_ENTRY_FAILURE));
3320 	return true;
3321 }
3322 
3323 /*
3324  * Try to launch the current VMCS.
3325  */
3326 static void test_vmx_vmlaunch(u32 xerror, bool xfail)
3327 {
3328 	bool success = vmlaunch_succeeds();
3329 	u32 vmx_inst_err;
3330 
3331 	report_xfail("vmlaunch %s", xfail, success == !xerror,
3332 		     !xerror ? "succeeds" : "fails");
3333 	if (!success && xerror) {
3334 		vmx_inst_err = vmcs_read(VMX_INST_ERROR);
3335 		report("VMX inst error is %d (actual %d)",
3336 		       vmx_inst_err == xerror, xerror, vmx_inst_err);
3337 	}
3338 }
3339 
3340 static void test_vmx_invalid_controls(bool xfail)
3341 {
3342 	test_vmx_vmlaunch(VMXERR_ENTRY_INVALID_CONTROL_FIELD, xfail);
3343 }
3344 
3345 static void test_vmx_valid_controls(bool xfail)
3346 {
3347 	test_vmx_vmlaunch(0, xfail);
3348 }
3349 
3350 /*
3351  * Test a particular value of a VM-execution control bit, if the value
3352  * is required or if the value is zero.
3353  */
3354 static void test_rsvd_ctl_bit_value(const char *name, union vmx_ctrl_msr msr,
3355 				    enum Encoding encoding, unsigned bit,
3356 				    unsigned val)
3357 {
3358 	u32 mask = 1u << bit;
3359 	bool expected;
3360 	u32 controls;
3361 
3362 	if (msr.set & mask)
3363 		TEST_ASSERT(msr.clr & mask);
3364 
3365 	/*
3366 	 * We can't arbitrarily turn on a control bit, because it may
3367 	 * introduce dependencies on other VMCS fields. So, we only
3368 	 * test turning on bits that have a required setting.
3369 	 */
3370 	if (val && (msr.clr & mask) && !(msr.set & mask))
3371 		return;
3372 
3373 	report_prefix_pushf("%s %s bit %d",
3374 			    val ? "Set" : "Clear", name, bit);
3375 
3376 	controls = vmcs_read(encoding);
3377 	if (val) {
3378 		vmcs_write(encoding, msr.set | mask);
3379 		expected = (msr.clr & mask);
3380 	} else {
3381 		vmcs_write(encoding, msr.set & ~mask);
3382 		expected = !(msr.set & mask);
3383 	}
3384 	if (expected)
3385 		test_vmx_valid_controls(false);
3386 	else
3387 		test_vmx_invalid_controls(false);
3388 	vmcs_write(encoding, controls);
3389 	report_prefix_pop();
3390 }
3391 
3392 /*
3393  * Test reserved values of a VM-execution control bit, based on the
3394  * allowed bit settings from the corresponding VMX capability MSR.
3395  */
3396 static void test_rsvd_ctl_bit(const char *name, union vmx_ctrl_msr msr,
3397 			      enum Encoding encoding, unsigned bit)
3398 {
3399 	test_rsvd_ctl_bit_value(name, msr, encoding, bit, 0);
3400 	test_rsvd_ctl_bit_value(name, msr, encoding, bit, 1);
3401 }
3402 
3403 /*
3404  * Reserved bits in the pin-based VM-execution controls must be set
3405  * properly. Software may consult the VMX capability MSRs to determine
3406  * the proper settings.
3407  * [Intel SDM]
3408  */
3409 static void test_pin_based_ctls(void)
3410 {
3411 	unsigned bit;
3412 
3413 	printf("%s: %lx\n", basic.ctrl ? "MSR_IA32_VMX_TRUE_PIN" :
3414 	       "MSR_IA32_VMX_PINBASED_CTLS", ctrl_pin_rev.val);
3415 	for (bit = 0; bit < 32; bit++)
3416 		test_rsvd_ctl_bit("pin-based controls",
3417 				  ctrl_pin_rev, PIN_CONTROLS, bit);
3418 }
3419 
3420 /*
3421  * Reserved bits in the primary processor-based VM-execution controls
3422  * must be set properly. Software may consult the VMX capability MSRs
3423  * to determine the proper settings.
3424  * [Intel SDM]
3425  */
3426 static void test_primary_processor_based_ctls(void)
3427 {
3428 	unsigned bit;
3429 
3430 	printf("\n%s: %lx\n", basic.ctrl ? "MSR_IA32_VMX_TRUE_PROC" :
3431 	       "MSR_IA32_VMX_PROCBASED_CTLS", ctrl_cpu_rev[0].val);
3432 	for (bit = 0; bit < 32; bit++)
3433 		test_rsvd_ctl_bit("primary processor-based controls",
3434 				  ctrl_cpu_rev[0], CPU_EXEC_CTRL0, bit);
3435 }
3436 
3437 /*
3438  * If the "activate secondary controls" primary processor-based
3439  * VM-execution control is 1, reserved bits in the secondary
3440  * processor-based VM-execution controls must be cleared. Software may
3441  * consult the VMX capability MSRs to determine which bits are
3442  * reserved.
3443  * If the "activate secondary controls" primary processor-based
3444  * VM-execution control is 0 (or if the processor does not support the
3445  * 1-setting of that control), no checks are performed on the
3446  * secondary processor-based VM-execution controls.
3447  * [Intel SDM]
3448  */
3449 static void test_secondary_processor_based_ctls(void)
3450 {
3451 	u32 primary;
3452 	u32 secondary;
3453 	unsigned bit;
3454 
3455 	if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY))
3456 		return;
3457 
3458 	primary = vmcs_read(CPU_EXEC_CTRL0);
3459 	secondary = vmcs_read(CPU_EXEC_CTRL1);
3460 
3461 	vmcs_write(CPU_EXEC_CTRL0, primary | CPU_SECONDARY);
3462 	printf("\nMSR_IA32_VMX_PROCBASED_CTLS2: %lx\n", ctrl_cpu_rev[1].val);
3463 	for (bit = 0; bit < 32; bit++)
3464 		test_rsvd_ctl_bit("secondary processor-based controls",
3465 				  ctrl_cpu_rev[1], CPU_EXEC_CTRL1, bit);
3466 
3467 	/*
3468 	 * When the "activate secondary controls" VM-execution control
3469 	 * is clear, there are no checks on the secondary controls.
3470 	 */
3471 	vmcs_write(CPU_EXEC_CTRL0, primary & ~CPU_SECONDARY);
3472 	vmcs_write(CPU_EXEC_CTRL1, ~0);
3473 	report("Secondary processor-based controls ignored",
3474 	       vmlaunch_succeeds());
3475 	vmcs_write(CPU_EXEC_CTRL1, secondary);
3476 	vmcs_write(CPU_EXEC_CTRL0, primary);
3477 }
3478 
3479 static void try_cr3_target_count(unsigned i, unsigned max)
3480 {
3481 	report_prefix_pushf("CR3 target count 0x%x", i);
3482 	vmcs_write(CR3_TARGET_COUNT, i);
3483 	if (i <= max)
3484 		test_vmx_valid_controls(false);
3485 	else
3486 		test_vmx_invalid_controls(false);
3487 	report_prefix_pop();
3488 }
3489 
3490 /*
3491  * The CR3-target count must not be greater than 4. Future processors
3492  * may support a different number of CR3-target values. Software
3493  * should read the VMX capability MSR IA32_VMX_MISC to determine the
3494  * number of values supported.
3495  * [Intel SDM]
3496  */
3497 static void test_cr3_targets(void)
3498 {
3499 	unsigned supported_targets = (rdmsr(MSR_IA32_VMX_MISC) >> 16) & 0x1ff;
3500 	u32 cr3_targets = vmcs_read(CR3_TARGET_COUNT);
3501 	unsigned i;
3502 
3503 	printf("\nSupported CR3 targets: %d\n", supported_targets);
3504 	TEST_ASSERT(supported_targets <= 256);
3505 
3506 	try_cr3_target_count(-1u, supported_targets);
3507 	try_cr3_target_count(0x80000000, supported_targets);
3508 	try_cr3_target_count(0x7fffffff, supported_targets);
3509 	for (i = 0; i <= supported_targets + 1; i++)
3510 		try_cr3_target_count(i, supported_targets);
3511 	vmcs_write(CR3_TARGET_COUNT, cr3_targets);
3512 }
3513 
3514 /*
3515  * Test a particular address setting in the VMCS
3516  */
3517 static void test_vmcs_addr(const char *name,
3518 			   enum Encoding encoding,
3519 			   u64 align,
3520 			   bool ignored,
3521 			   bool xfail_beyond_mapped_ram,
3522 			   u64 addr)
3523 {
3524 	bool xfail =
3525 		(xfail_beyond_mapped_ram &&
3526 		 addr > fwcfg_get_u64(FW_CFG_RAM_SIZE) - align &&
3527 		 addr < (1ul << cpuid_maxphyaddr()));
3528 
3529 	report_prefix_pushf("%s = %lx", name, addr);
3530 	vmcs_write(encoding, addr);
3531 	if (ignored || (IS_ALIGNED(addr, align) &&
3532 	    addr < (1ul << cpuid_maxphyaddr())))
3533 		test_vmx_valid_controls(xfail);
3534 	else
3535 		test_vmx_invalid_controls(xfail);
3536 	report_prefix_pop();
3537 	xfail = false;
3538 }
3539 
3540 /*
3541  * Test interesting values for a VMCS address
3542  */
3543 static void test_vmcs_addr_values(const char *name,
3544 				  enum Encoding encoding,
3545 				  u64 align,
3546 				  bool ignored,
3547 				  bool xfail_beyond_mapped_ram,
3548 				  u32 bit_start, u32 bit_end)
3549 {
3550 	unsigned i;
3551 	u64 orig_val = vmcs_read(encoding);
3552 
3553 	for (i = bit_start; i <= bit_end; i++)
3554 		test_vmcs_addr(name, encoding, align, ignored,
3555 			       xfail_beyond_mapped_ram, 1ul << i);
3556 
3557 	test_vmcs_addr(name, encoding, align, ignored,
3558 		       xfail_beyond_mapped_ram, PAGE_SIZE - 1);
3559 	test_vmcs_addr(name, encoding, align, ignored,
3560 		       xfail_beyond_mapped_ram, PAGE_SIZE);
3561 	test_vmcs_addr(name, encoding, align, ignored,
3562 		       xfail_beyond_mapped_ram,
3563 		      (1ul << cpuid_maxphyaddr()) - PAGE_SIZE);
3564 	test_vmcs_addr(name, encoding, align, ignored,
3565 		       xfail_beyond_mapped_ram, -1ul);
3566 
3567 	vmcs_write(encoding, orig_val);
3568 }
3569 
3570 /*
3571  * Test a physical address reference in the VMCS, when the corresponding
3572  * feature is enabled and when the corresponding feature is disabled.
3573  */
3574 static void test_vmcs_addr_reference(u32 control_bit, enum Encoding field,
3575 				     const char *field_name,
3576 				     const char *control_name, u64 align,
3577 				     bool xfail_beyond_mapped_ram,
3578 				     bool control_primary)
3579 {
3580 	u32 primary = vmcs_read(CPU_EXEC_CTRL0);
3581 	u32 secondary = vmcs_read(CPU_EXEC_CTRL1);
3582 	u64 page_addr;
3583 
3584 	if (control_primary) {
3585 		if (!(ctrl_cpu_rev[0].clr & control_bit))
3586 			return;
3587 	} else {
3588 		if (!(ctrl_cpu_rev[1].clr & control_bit))
3589 			return;
3590 	}
3591 
3592 	page_addr = vmcs_read(field);
3593 
3594 	report_prefix_pushf("%s enabled", control_name);
3595 	if (control_primary) {
3596 		vmcs_write(CPU_EXEC_CTRL0, primary | control_bit);
3597 	} else {
3598 		vmcs_write(CPU_EXEC_CTRL0, primary | CPU_SECONDARY);
3599 		vmcs_write(CPU_EXEC_CTRL1, secondary | control_bit);
3600 	}
3601 
3602 	test_vmcs_addr_values(field_name, field, align, false,
3603 			      xfail_beyond_mapped_ram, 0, 63);
3604 	report_prefix_pop();
3605 
3606 	report_prefix_pushf("%s disabled", control_name);
3607 	if (control_primary) {
3608 		vmcs_write(CPU_EXEC_CTRL0, primary & ~control_bit);
3609 	} else {
3610 		vmcs_write(CPU_EXEC_CTRL0, primary & ~CPU_SECONDARY);
3611 		vmcs_write(CPU_EXEC_CTRL1, secondary & ~control_bit);
3612 	}
3613 
3614 	test_vmcs_addr_values(field_name, field, align, true, false, 0, 63);
3615 	report_prefix_pop();
3616 
3617 	vmcs_write(field, page_addr);
3618 	vmcs_write(CPU_EXEC_CTRL0, primary);
3619 	vmcs_write(CPU_EXEC_CTRL1, secondary);
3620 }
3621 
3622 /*
3623  * If the "use I/O bitmaps" VM-execution control is 1, bits 11:0 of
3624  * each I/O-bitmap address must be 0. Neither address should set any
3625  * bits beyond the processor's physical-address width.
3626  * [Intel SDM]
3627  */
3628 static void test_io_bitmaps(void)
3629 {
3630 	test_vmcs_addr_reference(CPU_IO_BITMAP, IO_BITMAP_A,
3631 				 "I/O bitmap A", "Use I/O bitmaps",
3632 				 PAGE_SIZE, false, true);
3633 	test_vmcs_addr_reference(CPU_IO_BITMAP, IO_BITMAP_B,
3634 				 "I/O bitmap B", "Use I/O bitmaps",
3635 				 PAGE_SIZE, false, true);
3636 }
3637 
3638 /*
3639  * If the "use MSR bitmaps" VM-execution control is 1, bits 11:0 of
3640  * the MSR-bitmap address must be 0. The address should not set any
3641  * bits beyond the processor's physical-address width.
3642  * [Intel SDM]
3643  */
3644 static void test_msr_bitmap(void)
3645 {
3646 	test_vmcs_addr_reference(CPU_MSR_BITMAP, MSR_BITMAP,
3647 				 "MSR bitmap", "Use MSR bitmaps",
3648 				 PAGE_SIZE, false, true);
3649 }
3650 
3651 /*
3652  * If the "use TPR shadow" VM-execution control is 1, the virtual-APIC
3653  * address must satisfy the following checks:
3654  * - Bits 11:0 of the address must be 0.
3655  * - The address should not set any bits beyond the processor's
3656  *   physical-address width.
3657  * [Intel SDM]
3658  */
3659 static void test_apic_virt_addr(void)
3660 {
3661 	/*
3662 	 * Ensure the processor will never use the virtual-APIC page, since
3663 	 * we will point it to invalid RAM.  Otherwise KVM is puzzled about
3664 	 * what we're trying to achieve and fails vmentry.
3665 	 */
3666 	u32 cpu_ctrls0 = vmcs_read(CPU_EXEC_CTRL0);
3667 	vmcs_write(CPU_EXEC_CTRL0, cpu_ctrls0 | CPU_CR8_LOAD | CPU_CR8_STORE);
3668 	test_vmcs_addr_reference(CPU_TPR_SHADOW, APIC_VIRT_ADDR,
3669 				 "virtual-APIC address", "Use TPR shadow",
3670 				 PAGE_SIZE, false, true);
3671 	vmcs_write(CPU_EXEC_CTRL0, cpu_ctrls0);
3672 }
3673 
3674 /*
3675  * If the "virtualize APIC-accesses" VM-execution control is 1, the
3676  * APIC-access address must satisfy the following checks:
3677  *  - Bits 11:0 of the address must be 0.
3678  *  - The address should not set any bits beyond the processor's
3679  *    physical-address width.
3680  * [Intel SDM]
3681  */
3682 static void test_apic_access_addr(void)
3683 {
3684 	void *apic_access_page = alloc_page();
3685 
3686 	vmcs_write(APIC_ACCS_ADDR, virt_to_phys(apic_access_page));
3687 
3688 	test_vmcs_addr_reference(CPU_VIRT_APIC_ACCESSES, APIC_ACCS_ADDR,
3689 				 "APIC-access address",
3690 				 "virtualize APIC-accesses", PAGE_SIZE,
3691 				 false, false);
3692 }
3693 
3694 static bool set_bit_pattern(u8 mask, u32 *secondary)
3695 {
3696 	u8 i;
3697 	bool flag = false;
3698 	u32 test_bits[3] = {
3699 		CPU_VIRT_X2APIC,
3700 		CPU_APIC_REG_VIRT,
3701 		CPU_VINTD
3702 	};
3703 
3704         for (i = 0; i < ARRAY_SIZE(test_bits); i++) {
3705 		if ((mask & (1u << i)) &&
3706 		    (ctrl_cpu_rev[1].clr & test_bits[i])) {
3707 			*secondary |= test_bits[i];
3708 			flag = true;
3709 		}
3710 	}
3711 
3712 	return (flag);
3713 }
3714 
3715 /*
3716  * If the "use TPR shadow" VM-execution control is 0, the following
3717  * VM-execution controls must also be 0:
3718  * 	- virtualize x2APIC mode
3719  *	- APIC-register virtualization
3720  *	- virtual-interrupt delivery
3721  *    [Intel SDM]
3722  *
3723  * 2. If the "virtualize x2APIC mode" VM-execution control is 1, the
3724  *    "virtualize APIC accesses" VM-execution control must be 0.
3725  *    [Intel SDM]
3726  */
3727 static void test_apic_virtual_ctls(void)
3728 {
3729 	u32 saved_primary = vmcs_read(CPU_EXEC_CTRL0);
3730 	u32 saved_secondary = vmcs_read(CPU_EXEC_CTRL1);
3731 	u32 primary = saved_primary;
3732 	u32 secondary = saved_secondary;
3733 	bool ctrl = false;
3734 	char str[10] = "disabled";
3735 	u8 i = 0, j;
3736 
3737 	/*
3738 	 * First test
3739 	 */
3740 	if (!((ctrl_cpu_rev[0].clr & (CPU_SECONDARY | CPU_TPR_SHADOW)) ==
3741 	    (CPU_SECONDARY | CPU_TPR_SHADOW)))
3742 		return;
3743 
3744 	primary |= CPU_SECONDARY;
3745 	primary &= ~CPU_TPR_SHADOW;
3746 	vmcs_write(CPU_EXEC_CTRL0, primary);
3747 
3748 	while (1) {
3749 		for (j = 1; j < 8; j++) {
3750 			secondary &= ~(CPU_VIRT_X2APIC | CPU_APIC_REG_VIRT | CPU_VINTD);
3751 			if (primary & CPU_TPR_SHADOW) {
3752 				ctrl = true;
3753 			} else {
3754 				if (! set_bit_pattern(j, &secondary))
3755 					ctrl = true;
3756 				else
3757 					ctrl = false;
3758 			}
3759 
3760 			vmcs_write(CPU_EXEC_CTRL1, secondary);
3761 			report_prefix_pushf("Use TPR shadow %s, virtualize x2APIC mode %s, APIC-register virtualization %s, virtual-interrupt delivery %s",
3762 				str, (secondary & CPU_VIRT_X2APIC) ? "enabled" : "disabled", (secondary & CPU_APIC_REG_VIRT) ? "enabled" : "disabled", (secondary & CPU_VINTD) ? "enabled" : "disabled");
3763 			if (ctrl)
3764 				test_vmx_valid_controls(false);
3765 			else
3766 				test_vmx_invalid_controls(false);
3767 			report_prefix_pop();
3768 		}
3769 
3770 		if (i == 1)
3771 			break;
3772 		i++;
3773 
3774 		primary |= CPU_TPR_SHADOW;
3775 		vmcs_write(CPU_EXEC_CTRL0, primary);
3776 		strcpy(str, "enabled");
3777 	}
3778 
3779 	/*
3780 	 * Second test
3781 	 */
3782 	u32 apic_virt_ctls = (CPU_VIRT_X2APIC | CPU_VIRT_APIC_ACCESSES);
3783 
3784 	primary = saved_primary;
3785 	secondary = saved_secondary;
3786 	if (!((ctrl_cpu_rev[1].clr & apic_virt_ctls) == apic_virt_ctls))
3787 		return;
3788 
3789 	vmcs_write(CPU_EXEC_CTRL0, primary | CPU_SECONDARY);
3790 	secondary &= ~CPU_VIRT_APIC_ACCESSES;
3791 	vmcs_write(CPU_EXEC_CTRL1, secondary & ~CPU_VIRT_X2APIC);
3792 	report_prefix_pushf("Virtualize x2APIC mode disabled; virtualize APIC access disabled");
3793 	test_vmx_valid_controls(false);
3794 	report_prefix_pop();
3795 
3796 	vmcs_write(CPU_EXEC_CTRL1, secondary | CPU_VIRT_APIC_ACCESSES);
3797 	report_prefix_pushf("Virtualize x2APIC mode disabled; virtualize APIC access enabled");
3798 	test_vmx_valid_controls(false);
3799 	report_prefix_pop();
3800 
3801 	vmcs_write(CPU_EXEC_CTRL1, secondary | CPU_VIRT_X2APIC);
3802 	report_prefix_pushf("Virtualize x2APIC mode enabled; virtualize APIC access enabled");
3803 	test_vmx_invalid_controls(false);
3804 	report_prefix_pop();
3805 
3806 	vmcs_write(CPU_EXEC_CTRL1, secondary & ~CPU_VIRT_APIC_ACCESSES);
3807 	report_prefix_pushf("Virtualize x2APIC mode enabled; virtualize APIC access disabled");
3808 	test_vmx_valid_controls(false);
3809 	report_prefix_pop();
3810 
3811 	vmcs_write(CPU_EXEC_CTRL0, saved_primary);
3812 	vmcs_write(CPU_EXEC_CTRL1, saved_secondary);
3813 }
3814 
3815 /*
3816  * If the "virtual-interrupt delivery" VM-execution control is 1, the
3817  * "external-interrupt exiting" VM-execution control must be 1.
3818  * [Intel SDM]
3819  */
3820 static void test_virtual_intr_ctls(void)
3821 {
3822 	u32 saved_primary = vmcs_read(CPU_EXEC_CTRL0);
3823 	u32 saved_secondary = vmcs_read(CPU_EXEC_CTRL1);
3824 	u32 saved_pin = vmcs_read(PIN_CONTROLS);
3825 	u32 primary = saved_primary;
3826 	u32 secondary = saved_secondary;
3827 	u32 pin = saved_pin;
3828 
3829 	if (!((ctrl_cpu_rev[1].clr & CPU_VINTD) &&
3830 	    (ctrl_pin_rev.clr & PIN_EXTINT)))
3831 		return;
3832 
3833 	vmcs_write(CPU_EXEC_CTRL0, primary | CPU_SECONDARY | CPU_TPR_SHADOW);
3834 	vmcs_write(CPU_EXEC_CTRL1, secondary & ~CPU_VINTD);
3835 	vmcs_write(PIN_CONTROLS, pin & ~PIN_EXTINT);
3836 	report_prefix_pushf("Virtualize interrupt-delivery disabled; external-interrupt exiting disabled");
3837 	test_vmx_valid_controls(false);
3838 	report_prefix_pop();
3839 
3840 	vmcs_write(CPU_EXEC_CTRL1, secondary | CPU_VINTD);
3841 	report_prefix_pushf("Virtualize interrupt-delivery enabled; external-interrupt exiting disabled");
3842 	test_vmx_invalid_controls(false);
3843 	report_prefix_pop();
3844 
3845 	vmcs_write(PIN_CONTROLS, pin | PIN_EXTINT);
3846 	report_prefix_pushf("Virtualize interrupt-delivery enabled; external-interrupt exiting enabled");
3847 	test_vmx_valid_controls(false);
3848 	report_prefix_pop();
3849 
3850 	vmcs_write(PIN_CONTROLS, pin & ~PIN_EXTINT);
3851 	report_prefix_pushf("Virtualize interrupt-delivery enabled; external-interrupt exiting disabled");
3852 	test_vmx_invalid_controls(false);
3853 	report_prefix_pop();
3854 
3855 	vmcs_write(CPU_EXEC_CTRL0, saved_primary);
3856 	vmcs_write(CPU_EXEC_CTRL1, saved_secondary);
3857 	vmcs_write(PIN_CONTROLS, saved_pin);
3858 }
3859 
3860 static void test_pi_desc_addr(u64 addr, bool ctrl)
3861 {
3862 	vmcs_write(POSTED_INTR_DESC_ADDR, addr);
3863 	report_prefix_pushf("Process-posted-interrupts enabled; posted-interrupt-descriptor-address 0x%lx", addr);
3864 	if (ctrl)
3865 		test_vmx_valid_controls(false);
3866 	else
3867 		test_vmx_invalid_controls(false);
3868 	report_prefix_pop();
3869 }
3870 
3871 /*
3872  * If the “process posted interrupts†VM-execution control is 1, the
3873  * following must be true:
3874  *
3875  *	- The “virtual-interrupt delivery†VM-execution control is 1.
3876  *	- The “acknowledge interrupt on exit†VM-exit control is 1.
3877  *	- The posted-interrupt notification vector has a value in the
3878  *	- range 0–255 (bits 15:8 are all 0).
3879  *	- Bits 5:0 of the posted-interrupt descriptor address are all 0.
3880  *	- The posted-interrupt descriptor address does not set any bits
3881  *	  beyond the processor's physical-address width.
3882  * [Intel SDM]
3883  */
3884 static void test_posted_intr(void)
3885 {
3886 	u32 saved_primary = vmcs_read(CPU_EXEC_CTRL0);
3887 	u32 saved_secondary = vmcs_read(CPU_EXEC_CTRL1);
3888 	u32 saved_pin = vmcs_read(PIN_CONTROLS);
3889 	u32 exit_ctl_saved = vmcs_read(EXI_CONTROLS);
3890 	u32 primary = saved_primary;
3891 	u32 secondary = saved_secondary;
3892 	u32 pin = saved_pin;
3893 	u32 exit_ctl = exit_ctl_saved;
3894 	u16 vec;
3895 	int i;
3896 
3897 	if (!((ctrl_pin_rev.clr & PIN_POST_INTR) &&
3898 	    (ctrl_cpu_rev[1].clr & CPU_VINTD) &&
3899 	    (ctrl_exit_rev.clr & EXI_INTA)))
3900 		return;
3901 
3902 	vmcs_write(CPU_EXEC_CTRL0, primary | CPU_SECONDARY | CPU_TPR_SHADOW);
3903 
3904 	/*
3905 	 * Test virtual-interrupt-delivery and acknowledge-interrupt-on-exit
3906 	 */
3907 	pin |= PIN_POST_INTR;
3908 	vmcs_write(PIN_CONTROLS, pin);
3909 	secondary &= ~CPU_VINTD;
3910 	vmcs_write(CPU_EXEC_CTRL1, secondary);
3911 	report_prefix_pushf("Process-posted-interrupts enabled; virtual-interrupt-delivery disabled");
3912 	test_vmx_invalid_controls(false);
3913 	report_prefix_pop();
3914 
3915 	secondary |= CPU_VINTD;
3916 	vmcs_write(CPU_EXEC_CTRL1, secondary);
3917 	report_prefix_pushf("Process-posted-interrupts enabled; virtual-interrupt-delivery enabled");
3918 	test_vmx_invalid_controls(false);
3919 	report_prefix_pop();
3920 
3921 	exit_ctl &= ~EXI_INTA;
3922 	vmcs_write(EXI_CONTROLS, exit_ctl);
3923 	report_prefix_pushf("Process-posted-interrupts enabled; virtual-interrupt-delivery enabled; acknowledge-interrupt-on-exit disabled");
3924 	test_vmx_invalid_controls(false);
3925 	report_prefix_pop();
3926 
3927 	exit_ctl |= EXI_INTA;
3928 	vmcs_write(EXI_CONTROLS, exit_ctl);
3929 	report_prefix_pushf("Process-posted-interrupts enabled; virtual-interrupt-delivery enabled; acknowledge-interrupt-on-exit enabled");
3930 	test_vmx_valid_controls(false);
3931 	report_prefix_pop();
3932 
3933 	secondary &= ~CPU_VINTD;
3934 	vmcs_write(CPU_EXEC_CTRL1, secondary);
3935 	report_prefix_pushf("Process-posted-interrupts enabled; virtual-interrupt-delivery disabled; acknowledge-interrupt-on-exit enabled");
3936 	test_vmx_invalid_controls(false);
3937 	report_prefix_pop();
3938 
3939 	secondary |= CPU_VINTD;
3940 	vmcs_write(CPU_EXEC_CTRL1, secondary);
3941 	report_prefix_pushf("Process-posted-interrupts enabled; virtual-interrupt-delivery enabled; acknowledge-interrupt-on-exit enabled");
3942 	test_vmx_valid_controls(false);
3943 	report_prefix_pop();
3944 
3945 	/*
3946 	 * Test posted-interrupt notification vector
3947 	 */
3948 	for (i = 0; i < 8; i++) {
3949 		vec = (1ul << i);
3950 		vmcs_write(PINV, vec);
3951 		report_prefix_pushf("Process-posted-interrupts enabled; posted-interrupt-notification-vector %u", vec);
3952 		test_vmx_valid_controls(false);
3953 		report_prefix_pop();
3954 	}
3955 	for (i = 8; i < 16; i++) {
3956 		vec = (1ul << i);
3957 		vmcs_write(PINV, vec);
3958 		report_prefix_pushf("Process-posted-interrupts enabled; posted-interrupt-notification-vector %u", vec);
3959 		test_vmx_invalid_controls(false);
3960 		report_prefix_pop();
3961 	}
3962 
3963 	vec &= ~(0xff << 8);
3964 	vmcs_write(PINV, vec);
3965 	report_prefix_pushf("Process-posted-interrupts enabled; posted-interrupt-notification-vector %u", vec);
3966 	test_vmx_valid_controls(false);
3967 	report_prefix_pop();
3968 
3969 	/*
3970 	 * Test posted-interrupt descriptor addresss
3971 	 */
3972 	for (i = 0; i < 6; i++) {
3973 		test_pi_desc_addr(1ul << i, false);
3974 	}
3975 
3976 	test_pi_desc_addr(0xf0, false);
3977 	test_pi_desc_addr(0xff, false);
3978 	test_pi_desc_addr(0x0f, false);
3979 	test_pi_desc_addr(0x8000, true);
3980 	test_pi_desc_addr(0x00, true);
3981 	test_pi_desc_addr(0xc000, true);
3982 
3983 	test_vmcs_addr_values("process-posted interrupts",
3984 			       POSTED_INTR_DESC_ADDR, 64,
3985 			       false, false, 0, 63);
3986 
3987 	vmcs_write(CPU_EXEC_CTRL0, saved_primary);
3988 	vmcs_write(CPU_EXEC_CTRL1, saved_secondary);
3989 	vmcs_write(PIN_CONTROLS, saved_pin);
3990 }
3991 
3992 static void test_apic_ctls(void)
3993 {
3994 	test_apic_virt_addr();
3995 	test_apic_access_addr();
3996 	test_apic_virtual_ctls();
3997 	test_virtual_intr_ctls();
3998 	test_posted_intr();
3999 }
4000 
4001 /*
4002  * If the “enable VPID†VM-execution control is 1, the value of the
4003  * of the VPID VM-execution control field must not be 0000H.
4004  * [Intel SDM]
4005  */
4006 static void test_vpid(void)
4007 {
4008 	u32 saved_primary = vmcs_read(CPU_EXEC_CTRL0);
4009 	u32 saved_secondary = vmcs_read(CPU_EXEC_CTRL1);
4010 	u16 vpid = 0x0000;
4011 	int i;
4012 
4013 	if (!((ctrl_cpu_rev[0].clr & CPU_SECONDARY) &&
4014 	    (ctrl_cpu_rev[1].clr & CPU_VPID))) {
4015 		test_skip("Secondary controls and/or VPID not supported");
4016 		return;
4017 	}
4018 
4019 	vmcs_write(CPU_EXEC_CTRL0, saved_primary | CPU_SECONDARY);
4020 	vmcs_write(CPU_EXEC_CTRL1, saved_secondary & ~CPU_VPID);
4021 	vmcs_write(VPID, vpid);
4022 	report_prefix_pushf("VPID disabled; VPID value %x", vpid);
4023 	test_vmx_valid_controls(false);
4024 	report_prefix_pop();
4025 
4026 	vmcs_write(CPU_EXEC_CTRL1, saved_secondary | CPU_VPID);
4027 	report_prefix_pushf("VPID enabled; VPID value %x", vpid);
4028 	test_vmx_invalid_controls(false);
4029 	report_prefix_pop();
4030 
4031 	for (i = 0; i < 16; i++) {
4032 		vpid = (short)1 << i;;
4033 		vmcs_write(VPID, vpid);
4034 		report_prefix_pushf("VPID enabled; VPID value %x", vpid);
4035 		test_vmx_valid_controls(false);
4036 		report_prefix_pop();
4037 	}
4038 
4039 	vmcs_write(CPU_EXEC_CTRL0, saved_primary);
4040 	vmcs_write(CPU_EXEC_CTRL1, saved_secondary);
4041 }
4042 
4043 static void set_vtpr(unsigned vtpr)
4044 {
4045 	*(u32 *)phys_to_virt(vmcs_read(APIC_VIRT_ADDR) + APIC_TASKPRI) = vtpr;
4046 }
4047 
4048 static void try_tpr_threshold_and_vtpr(unsigned threshold, unsigned vtpr)
4049 {
4050 	bool valid = true;
4051 	u32 primary = vmcs_read(CPU_EXEC_CTRL0);
4052 	u32 secondary = vmcs_read(CPU_EXEC_CTRL1);
4053 
4054 	if ((primary & CPU_TPR_SHADOW) &&
4055 	    (!(primary & CPU_SECONDARY) ||
4056 	     !(secondary & (CPU_VINTD | CPU_VIRT_APIC_ACCESSES))))
4057 		valid = (threshold & 0xf) <= ((vtpr >> 4) & 0xf);
4058 
4059 	set_vtpr(vtpr);
4060 	report_prefix_pushf("TPR threshold 0x%x, VTPR.class 0x%x",
4061 	    threshold, (vtpr >> 4) & 0xf);
4062 	if (valid)
4063 		test_vmx_valid_controls(false);
4064 	else
4065 		test_vmx_invalid_controls(false);
4066 	report_prefix_pop();
4067 }
4068 
4069 static void test_invalid_event_injection(void)
4070 {
4071 	u32 ent_intr_info_save = vmcs_read(ENT_INTR_INFO);
4072 	u32 ent_intr_error_save = vmcs_read(ENT_INTR_ERROR);
4073 	u32 ent_inst_len_save = vmcs_read(ENT_INST_LEN);
4074 	u32 primary_save = vmcs_read(CPU_EXEC_CTRL0);
4075 	u32 secondary_save = vmcs_read(CPU_EXEC_CTRL1);
4076 	u64 guest_cr0_save = vmcs_read(GUEST_CR0);
4077 	u32 ent_intr_info_base = INTR_INFO_VALID_MASK;
4078 	u32 ent_intr_info, ent_intr_err, ent_intr_len;
4079 	u32 cnt;
4080 
4081 	/* Setup */
4082 	report_prefix_push("invalid event injection");
4083 	vmcs_write(ENT_INTR_ERROR, 0x00000000);
4084 	vmcs_write(ENT_INST_LEN, 0x00000001);
4085 
4086 	/* The field’s interruption type is not set to a reserved value. */
4087 	ent_intr_info = ent_intr_info_base | INTR_TYPE_RESERVED | DE_VECTOR;
4088 	report_prefix_pushf("%s, VM-entry intr info=0x%x",
4089 			    "RESERVED interruption type invalid [-]",
4090 			    ent_intr_info);
4091 	vmcs_write(ENT_INTR_INFO, ent_intr_info);
4092 	test_vmx_invalid_controls(false);
4093 	report_prefix_pop();
4094 
4095 	ent_intr_info = ent_intr_info_base | INTR_TYPE_EXT_INTR |
4096 			DE_VECTOR;
4097 	report_prefix_pushf("%s, VM-entry intr info=0x%x",
4098 			    "RESERVED interruption type invalid [+]",
4099 			    ent_intr_info);
4100 	vmcs_write(ENT_INTR_INFO, ent_intr_info);
4101 	test_vmx_valid_controls(false);
4102 	report_prefix_pop();
4103 
4104 	/* If the interruption type is other event, the vector is 0. */
4105 	ent_intr_info = ent_intr_info_base | INTR_TYPE_OTHER_EVENT | DB_VECTOR;
4106 	report_prefix_pushf("%s, VM-entry intr info=0x%x",
4107 			    "(OTHER EVENT && vector != 0) invalid [-]",
4108 			    ent_intr_info);
4109 	vmcs_write(ENT_INTR_INFO, ent_intr_info);
4110 	test_vmx_invalid_controls(false);
4111 	report_prefix_pop();
4112 
4113 	/* If the interruption type is NMI, the vector is 2 (negative case). */
4114 	ent_intr_info = ent_intr_info_base | INTR_TYPE_NMI_INTR | DE_VECTOR;
4115 	report_prefix_pushf("%s, VM-entry intr info=0x%x",
4116 			    "(NMI && vector != 2) invalid [-]", ent_intr_info);
4117 	vmcs_write(ENT_INTR_INFO, ent_intr_info);
4118 	test_vmx_invalid_controls(false);
4119 	report_prefix_pop();
4120 
4121 	/* If the interruption type is NMI, the vector is 2 (positive case). */
4122 	ent_intr_info = ent_intr_info_base | INTR_TYPE_NMI_INTR | NMI_VECTOR;
4123 	report_prefix_pushf("%s, VM-entry intr info=0x%x",
4124 			    "(NMI && vector == 2) valid [+]", ent_intr_info);
4125 	vmcs_write(ENT_INTR_INFO, ent_intr_info);
4126 	test_vmx_valid_controls(false);
4127 	report_prefix_pop();
4128 
4129 	/*
4130 	 * If the interruption type
4131 	 * is HW exception, the vector is at most 31.
4132 	 */
4133 	ent_intr_info = ent_intr_info_base | INTR_TYPE_HARD_EXCEPTION | 0x20;
4134 	report_prefix_pushf("%s, VM-entry intr info=0x%x",
4135 			    "(HW exception && vector > 31) invalid [-]",
4136 			    ent_intr_info);
4137 	vmcs_write(ENT_INTR_INFO, ent_intr_info);
4138 	test_vmx_invalid_controls(false);
4139 	report_prefix_pop();
4140 
4141 	/*
4142 	 * deliver-error-code is 1 iff either
4143 	 * (a) the "unrestricted guest" VM-execution control is 0
4144 	 * (b) CR0.PE is set.
4145 	 */
4146 
4147 	/* Assert that unrestricted guest is disabled or unsupported */
4148 	assert(!(ctrl_cpu_rev[0].clr & CPU_SECONDARY) ||
4149 	       !(secondary_save & CPU_URG));
4150 
4151 	ent_intr_info = ent_intr_info_base | INTR_TYPE_HARD_EXCEPTION |
4152 			GP_VECTOR;
4153 	report_prefix_pushf("%s, VM-entry intr info=0x%x",
4154 			    "error code <-> (!URG || prot_mode) [-]",
4155 			    ent_intr_info);
4156 	vmcs_write(GUEST_CR0, guest_cr0_save & ~X86_CR0_PE & ~X86_CR0_PG);
4157 	vmcs_write(ENT_INTR_INFO, ent_intr_info);
4158 	test_vmx_invalid_controls(false);
4159 	report_prefix_pop();
4160 
4161 	ent_intr_info = ent_intr_info_base | INTR_INFO_DELIVER_CODE_MASK |
4162 			INTR_TYPE_HARD_EXCEPTION | GP_VECTOR;
4163 	report_prefix_pushf("%s, VM-entry intr info=0x%x",
4164 			    "error code <-> (!URG || prot_mode) [+]",
4165 			    ent_intr_info);
4166 	vmcs_write(GUEST_CR0, guest_cr0_save & ~X86_CR0_PE & ~X86_CR0_PG);
4167 	vmcs_write(ENT_INTR_INFO, ent_intr_info);
4168 	test_vmx_valid_controls(false);
4169 	report_prefix_pop();
4170 
4171 	if (enable_unrestricted_guest())
4172 		goto skip_unrestricted_guest;
4173 
4174 	ent_intr_info = ent_intr_info_base | INTR_INFO_DELIVER_CODE_MASK |
4175 			INTR_TYPE_HARD_EXCEPTION | GP_VECTOR;
4176 	report_prefix_pushf("%s, VM-entry intr info=0x%x",
4177 			    "error code <-> (!URG || prot_mode) [-]",
4178 			    ent_intr_info);
4179 	vmcs_write(GUEST_CR0, guest_cr0_save & ~X86_CR0_PE & ~X86_CR0_PG);
4180 	vmcs_write(ENT_INTR_INFO, ent_intr_info);
4181 	test_vmx_invalid_controls(false);
4182 	report_prefix_pop();
4183 
4184 	ent_intr_info = ent_intr_info_base | INTR_TYPE_HARD_EXCEPTION |
4185 			GP_VECTOR;
4186 	report_prefix_pushf("%s, VM-entry intr info=0x%x",
4187 			    "error code <-> (!URG || prot_mode) [-]",
4188 			    ent_intr_info);
4189 	vmcs_write(GUEST_CR0, guest_cr0_save | X86_CR0_PE);
4190 	vmcs_write(ENT_INTR_INFO, ent_intr_info);
4191 	test_vmx_invalid_controls(false);
4192 	report_prefix_pop();
4193 
4194 	vmcs_write(CPU_EXEC_CTRL1, secondary_save);
4195 	vmcs_write(CPU_EXEC_CTRL0, primary_save);
4196 
4197 skip_unrestricted_guest:
4198 	vmcs_write(GUEST_CR0, guest_cr0_save);
4199 
4200 	/* deliver-error-code is 1 iff the interruption type is HW exception */
4201 	report_prefix_push("error code <-> HW exception");
4202 	for (cnt = 0; cnt < 8; cnt++) {
4203 		u32 exception_type_mask = cnt << 8;
4204 		u32 deliver_error_code_mask =
4205 			exception_type_mask != INTR_TYPE_HARD_EXCEPTION ?
4206 			INTR_INFO_DELIVER_CODE_MASK : 0;
4207 
4208 		ent_intr_info = ent_intr_info_base | deliver_error_code_mask |
4209 				exception_type_mask | GP_VECTOR;
4210 		report_prefix_pushf("VM-entry intr info=0x%x [-]",
4211 				    ent_intr_info);
4212 		vmcs_write(ENT_INTR_INFO, ent_intr_info);
4213 		test_vmx_invalid_controls(false);
4214 		report_prefix_pop();
4215 	}
4216 	report_prefix_pop();
4217 
4218 	/*
4219 	 * deliver-error-code is 1 iff the the vector
4220 	 * indicates an exception that would normally deliver an error code
4221 	 */
4222 	report_prefix_push("error code <-> vector delivers error code");
4223 	for (cnt = 0; cnt < 32; cnt++) {
4224 		bool has_error_code = false;
4225 		u32 deliver_error_code_mask;
4226 
4227 		switch (cnt) {
4228 		case DF_VECTOR:
4229 		case TS_VECTOR:
4230 		case NP_VECTOR:
4231 		case SS_VECTOR:
4232 		case GP_VECTOR:
4233 		case PF_VECTOR:
4234 		case AC_VECTOR:
4235 			has_error_code = true;
4236 		}
4237 
4238 		/* Negative case */
4239 		deliver_error_code_mask = has_error_code ?
4240 						0 :
4241 						INTR_INFO_DELIVER_CODE_MASK;
4242 		ent_intr_info = ent_intr_info_base | deliver_error_code_mask |
4243 				INTR_TYPE_HARD_EXCEPTION | cnt;
4244 		report_prefix_pushf("VM-entry intr info=0x%x [-]",
4245 				    ent_intr_info);
4246 		vmcs_write(ENT_INTR_INFO, ent_intr_info);
4247 		test_vmx_invalid_controls(false);
4248 		report_prefix_pop();
4249 
4250 		/* Positive case */
4251 		deliver_error_code_mask = has_error_code ?
4252 						INTR_INFO_DELIVER_CODE_MASK :
4253 						0;
4254 		ent_intr_info = ent_intr_info_base | deliver_error_code_mask |
4255 				INTR_TYPE_HARD_EXCEPTION | cnt;
4256 		report_prefix_pushf("VM-entry intr info=0x%x [+]",
4257 				    ent_intr_info);
4258 		vmcs_write(ENT_INTR_INFO, ent_intr_info);
4259 		test_vmx_valid_controls(false);
4260 		report_prefix_pop();
4261 	}
4262 	report_prefix_pop();
4263 
4264 	/* Reserved bits in the field (30:12) are 0. */
4265 	report_prefix_push("reserved bits clear");
4266 	for (cnt = 12; cnt <= 30; cnt++) {
4267 		ent_intr_info = ent_intr_info_base |
4268 				INTR_INFO_DELIVER_CODE_MASK |
4269 				INTR_TYPE_HARD_EXCEPTION | GP_VECTOR |
4270 				(1U << cnt);
4271 		report_prefix_pushf("VM-entry intr info=0x%x [-]",
4272 				    ent_intr_info);
4273 		vmcs_write(ENT_INTR_INFO, ent_intr_info);
4274 		test_vmx_invalid_controls(false);
4275 		report_prefix_pop();
4276 	}
4277 	report_prefix_pop();
4278 
4279 	/*
4280 	 * If deliver-error-code is 1
4281 	 * bits 31:15 of the VM-entry exception error-code field are 0.
4282 	 */
4283 	ent_intr_info = ent_intr_info_base | INTR_INFO_DELIVER_CODE_MASK |
4284 			INTR_TYPE_HARD_EXCEPTION | GP_VECTOR;
4285 	report_prefix_pushf("%s, VM-entry intr info=0x%x",
4286 			    "VM-entry exception error code[31:15] clear",
4287 			    ent_intr_info);
4288 	vmcs_write(ENT_INTR_INFO, ent_intr_info);
4289 	for (cnt = 15; cnt <= 31; cnt++) {
4290 		ent_intr_err = 1U << cnt;
4291 		report_prefix_pushf("VM-entry intr error=0x%x [-]",
4292 				    ent_intr_err);
4293 		vmcs_write(ENT_INTR_ERROR, ent_intr_err);
4294 		test_vmx_invalid_controls(false);
4295 		report_prefix_pop();
4296 	}
4297 	vmcs_write(ENT_INTR_ERROR, 0x00000000);
4298 	report_prefix_pop();
4299 
4300 	/*
4301 	 * If the interruption type is software interrupt, software exception,
4302 	 * or privileged software exception, the VM-entry instruction-length
4303 	 * field is in the range 0–15.
4304 	 */
4305 
4306 	for (cnt = 0; cnt < 3; cnt++) {
4307 		switch (cnt) {
4308 		case 0:
4309 			ent_intr_info = ent_intr_info_base |
4310 					INTR_TYPE_SOFT_INTR;
4311 			break;
4312 		case 1:
4313 			ent_intr_info = ent_intr_info_base |
4314 					INTR_TYPE_SOFT_EXCEPTION;
4315 			break;
4316 		case 2:
4317 			ent_intr_info = ent_intr_info_base |
4318 					INTR_TYPE_PRIV_SW_EXCEPTION;
4319 			break;
4320 		}
4321 		report_prefix_pushf("%s, VM-entry intr info=0x%x",
4322 				    "VM-entry instruction-length check",
4323 				    ent_intr_info);
4324 		vmcs_write(ENT_INTR_INFO, ent_intr_info);
4325 
4326 		/* Instruction length set to -1 (0xFFFFFFFF) should fail */
4327 		ent_intr_len = -1;
4328 		report_prefix_pushf("VM-entry intr length = 0x%x [-]",
4329 				    ent_intr_len);
4330 		vmcs_write(ENT_INST_LEN, ent_intr_len);
4331 		test_vmx_invalid_controls(false);
4332 		report_prefix_pop();
4333 
4334 		/* Instruction length set to 16 should fail */
4335 		ent_intr_len = 0x00000010;
4336 		report_prefix_pushf("VM-entry intr length = 0x%x [-]",
4337 				    ent_intr_len);
4338 		vmcs_write(ENT_INST_LEN, 0x00000010);
4339 		test_vmx_invalid_controls(false);
4340 		report_prefix_pop();
4341 
4342 		report_prefix_pop();
4343 	}
4344 
4345 	/* Cleanup */
4346 	vmcs_write(ENT_INTR_INFO, ent_intr_info_save);
4347 	vmcs_write(ENT_INTR_ERROR, ent_intr_error_save);
4348 	vmcs_write(ENT_INST_LEN, ent_inst_len_save);
4349 	vmcs_write(CPU_EXEC_CTRL0, primary_save);
4350 	vmcs_write(CPU_EXEC_CTRL1, secondary_save);
4351 	vmcs_write(GUEST_CR0, guest_cr0_save);
4352 	report_prefix_pop();
4353 }
4354 
4355 /*
4356  * Test interesting vTPR values for a given TPR threshold.
4357  */
4358 static void test_vtpr_values(unsigned threshold)
4359 {
4360 	try_tpr_threshold_and_vtpr(threshold, (threshold - 1) << 4);
4361 	try_tpr_threshold_and_vtpr(threshold, threshold << 4);
4362 	try_tpr_threshold_and_vtpr(threshold, (threshold + 1) << 4);
4363 }
4364 
4365 static void try_tpr_threshold(unsigned threshold)
4366 {
4367 	bool valid = true;
4368 
4369 	u32 primary = vmcs_read(CPU_EXEC_CTRL0);
4370 	u32 secondary = vmcs_read(CPU_EXEC_CTRL1);
4371 
4372 	if ((primary & CPU_TPR_SHADOW) && !((primary & CPU_SECONDARY) &&
4373 	    (secondary & CPU_VINTD)))
4374 		valid = !(threshold >> 4);
4375 
4376 	set_vtpr(-1);
4377 	vmcs_write(TPR_THRESHOLD, threshold);
4378 	report_prefix_pushf("TPR threshold 0x%x, VTPR.class 0xf", threshold);
4379 	if (valid)
4380 		test_vmx_valid_controls(false);
4381 	else
4382 		test_vmx_invalid_controls(false);
4383 	report_prefix_pop();
4384 
4385 	if (valid)
4386 		test_vtpr_values(threshold);
4387 }
4388 
4389 /*
4390  * Test interesting TPR threshold values.
4391  */
4392 static void test_tpr_threshold_values(void)
4393 {
4394 	unsigned i;
4395 
4396 	for (i = 0; i < 0x10; i++)
4397 		try_tpr_threshold(i);
4398 	for (i = 4; i < 32; i++)
4399 		try_tpr_threshold(1u << i);
4400 	try_tpr_threshold(-1u);
4401 	try_tpr_threshold(0x7fffffff);
4402 }
4403 
4404 /*
4405  * This test covers the following two VM entry checks:
4406  *
4407  *      i) If the "use TPR shadow" VM-execution control is 1 and the
4408  *         "virtual-interrupt delivery" VM-execution control is 0, bits
4409  *         31:4 of the TPR threshold VM-execution control field must
4410 	   be 0.
4411  *         [Intel SDM]
4412  *
4413  *      ii) If the "use TPR shadow" VM-execution control is 1, the
4414  *          "virtual-interrupt delivery" VM-execution control is 0
4415  *          and the "virtualize APIC accesses" VM-execution control
4416  *          is 0, the value of bits 3:0 of the TPR threshold VM-execution
4417  *          control field must not be greater than the value of bits
4418  *          7:4 of VTPR.
4419  *          [Intel SDM]
4420  */
4421 static void test_tpr_threshold(void)
4422 {
4423 	u32 primary = vmcs_read(CPU_EXEC_CTRL0);
4424 	u64 apic_virt_addr = vmcs_read(APIC_VIRT_ADDR);
4425 	u64 threshold = vmcs_read(TPR_THRESHOLD);
4426 	void *virtual_apic_page;
4427 
4428 	if (!(ctrl_cpu_rev[0].clr & CPU_TPR_SHADOW))
4429 		return;
4430 
4431 	virtual_apic_page = alloc_page();
4432 	memset(virtual_apic_page, 0xff, PAGE_SIZE);
4433 	vmcs_write(APIC_VIRT_ADDR, virt_to_phys(virtual_apic_page));
4434 
4435 	vmcs_write(CPU_EXEC_CTRL0, primary & ~(CPU_TPR_SHADOW | CPU_SECONDARY));
4436 	report_prefix_pushf("Use TPR shadow disabled, secondary controls disabled");
4437 	test_tpr_threshold_values();
4438 	report_prefix_pop();
4439 	vmcs_write(CPU_EXEC_CTRL0, vmcs_read(CPU_EXEC_CTRL0) | CPU_TPR_SHADOW);
4440 	report_prefix_pushf("Use TPR shadow enabled, secondary controls disabled");
4441 	test_tpr_threshold_values();
4442 	report_prefix_pop();
4443 
4444 	if (!((ctrl_cpu_rev[0].clr & CPU_SECONDARY) &&
4445 	    (ctrl_cpu_rev[1].clr & (CPU_VINTD  | CPU_VIRT_APIC_ACCESSES))))
4446 		goto out;
4447 	u32 secondary = vmcs_read(CPU_EXEC_CTRL1);
4448 
4449 	if (ctrl_cpu_rev[1].clr & CPU_VINTD) {
4450 		vmcs_write(CPU_EXEC_CTRL1, CPU_VINTD);
4451 		report_prefix_pushf("Use TPR shadow enabled; secondary controls disabled; virtual-interrupt delivery enabled; virtualize APIC accesses disabled");
4452 		test_tpr_threshold_values();
4453 		report_prefix_pop();
4454 
4455 		vmcs_write(CPU_EXEC_CTRL0,
4456 			   vmcs_read(CPU_EXEC_CTRL0) | CPU_SECONDARY);
4457 		report_prefix_pushf("Use TPR shadow enabled; secondary controls enabled; virtual-interrupt delivery enabled; virtualize APIC accesses disabled");
4458 		test_tpr_threshold_values();
4459 		report_prefix_pop();
4460 	}
4461 
4462 	if (ctrl_cpu_rev[1].clr & CPU_VIRT_APIC_ACCESSES) {
4463 		vmcs_write(CPU_EXEC_CTRL0,
4464 			   vmcs_read(CPU_EXEC_CTRL0) & ~CPU_SECONDARY);
4465 		vmcs_write(CPU_EXEC_CTRL1, CPU_VIRT_APIC_ACCESSES);
4466 		report_prefix_pushf("Use TPR shadow enabled; secondary controls disabled; virtual-interrupt delivery enabled; virtualize APIC accesses enabled");
4467 		test_tpr_threshold_values();
4468 		report_prefix_pop();
4469 
4470 		vmcs_write(CPU_EXEC_CTRL0,
4471 			   vmcs_read(CPU_EXEC_CTRL0) | CPU_SECONDARY);
4472 		report_prefix_pushf("Use TPR shadow enabled; secondary controls enabled; virtual-interrupt delivery enabled; virtualize APIC accesses enabled");
4473 		test_tpr_threshold_values();
4474 		report_prefix_pop();
4475 	}
4476 
4477 	if ((ctrl_cpu_rev[1].clr &
4478 	     (CPU_VINTD | CPU_VIRT_APIC_ACCESSES)) ==
4479 	    (CPU_VINTD | CPU_VIRT_APIC_ACCESSES)) {
4480 		vmcs_write(CPU_EXEC_CTRL0,
4481 			   vmcs_read(CPU_EXEC_CTRL0) & ~CPU_SECONDARY);
4482 		vmcs_write(CPU_EXEC_CTRL1,
4483 			   CPU_VINTD | CPU_VIRT_APIC_ACCESSES);
4484 		report_prefix_pushf("Use TPR shadow enabled; secondary controls disabled; virtual-interrupt delivery enabled; virtualize APIC accesses enabled");
4485 		test_tpr_threshold_values();
4486 		report_prefix_pop();
4487 
4488 		vmcs_write(CPU_EXEC_CTRL0,
4489 			   vmcs_read(CPU_EXEC_CTRL0) | CPU_SECONDARY);
4490 		report_prefix_pushf("Use TPR shadow enabled; secondary controls enabled; virtual-interrupt delivery enabled; virtualize APIC accesses enabled");
4491 		test_tpr_threshold_values();
4492 		report_prefix_pop();
4493 	}
4494 
4495 	vmcs_write(CPU_EXEC_CTRL1, secondary);
4496 out:
4497 	vmcs_write(TPR_THRESHOLD, threshold);
4498 	vmcs_write(APIC_VIRT_ADDR, apic_virt_addr);
4499 	vmcs_write(CPU_EXEC_CTRL0, primary);
4500 }
4501 
4502 /*
4503  * This test verifies the following two vmentry checks:
4504  *
4505  *  If the "NMI exiting" VM-execution control is 0, "Virtual NMIs"
4506  *  VM-execution control must be 0.
4507  *  [Intel SDM]
4508  *
4509  *  If the “virtual NMIs” VM-execution control is 0, the “NMI-window
4510  *  exiting” VM-execution control must be 0.
4511  *  [Intel SDM]
4512  */
4513 static void test_nmi_ctrls(void)
4514 {
4515 	u32 pin_ctrls, cpu_ctrls0, test_pin_ctrls, test_cpu_ctrls0;
4516 
4517 	if ((ctrl_pin_rev.clr & (PIN_NMI | PIN_VIRT_NMI)) !=
4518 	    (PIN_NMI | PIN_VIRT_NMI)) {
4519 		test_skip("NMI exiting and Virtual NMIs are not supported !");
4520 		return;
4521 	}
4522 
4523 	/* Save the controls so that we can restore them after our tests */
4524 	pin_ctrls = vmcs_read(PIN_CONTROLS);
4525 	cpu_ctrls0 = vmcs_read(CPU_EXEC_CTRL0);
4526 
4527 	test_pin_ctrls = pin_ctrls & ~(PIN_NMI | PIN_VIRT_NMI);
4528 	test_cpu_ctrls0 = cpu_ctrls0 & ~CPU_NMI_WINDOW;
4529 
4530 	vmcs_write(PIN_CONTROLS, test_pin_ctrls);
4531 	report_prefix_pushf("NMI-exiting disabled, virtual-NMIs disabled");
4532 	test_vmx_valid_controls(false);
4533 	report_prefix_pop();
4534 
4535 	vmcs_write(PIN_CONTROLS, test_pin_ctrls | PIN_VIRT_NMI);
4536 	report_prefix_pushf("NMI-exiting disabled, virtual-NMIs enabled");
4537 	test_vmx_invalid_controls(false);
4538 	report_prefix_pop();
4539 
4540 	vmcs_write(PIN_CONTROLS, test_pin_ctrls | (PIN_NMI | PIN_VIRT_NMI));
4541 	report_prefix_pushf("NMI-exiting enabled, virtual-NMIs enabled");
4542 	test_vmx_valid_controls(false);
4543 	report_prefix_pop();
4544 
4545 	vmcs_write(PIN_CONTROLS, test_pin_ctrls | PIN_NMI);
4546 	report_prefix_pushf("NMI-exiting enabled, virtual-NMIs disabled");
4547 	test_vmx_valid_controls(false);
4548 	report_prefix_pop();
4549 
4550 	if (!(ctrl_cpu_rev[0].clr & CPU_NMI_WINDOW)) {
4551 		report_info("NMI-window exiting is not supported, skipping...");
4552 		goto done;
4553 	}
4554 
4555 	vmcs_write(PIN_CONTROLS, test_pin_ctrls);
4556 	vmcs_write(CPU_EXEC_CTRL0, test_cpu_ctrls0 | CPU_NMI_WINDOW);
4557 	report_prefix_pushf("Virtual-NMIs disabled, NMI-window-exiting enabled");
4558 	test_vmx_invalid_controls(false);
4559 	report_prefix_pop();
4560 
4561 	vmcs_write(PIN_CONTROLS, test_pin_ctrls);
4562 	vmcs_write(CPU_EXEC_CTRL0, test_cpu_ctrls0);
4563 	report_prefix_pushf("Virtual-NMIs disabled, NMI-window-exiting disabled");
4564 	test_vmx_valid_controls(false);
4565 	report_prefix_pop();
4566 
4567 	vmcs_write(PIN_CONTROLS, test_pin_ctrls | (PIN_NMI | PIN_VIRT_NMI));
4568 	vmcs_write(CPU_EXEC_CTRL0, test_cpu_ctrls0 | CPU_NMI_WINDOW);
4569 	report_prefix_pushf("Virtual-NMIs enabled, NMI-window-exiting enabled");
4570 	test_vmx_valid_controls(false);
4571 	report_prefix_pop();
4572 
4573 	vmcs_write(PIN_CONTROLS, test_pin_ctrls | (PIN_NMI | PIN_VIRT_NMI));
4574 	vmcs_write(CPU_EXEC_CTRL0, test_cpu_ctrls0);
4575 	report_prefix_pushf("Virtual-NMIs enabled, NMI-window-exiting disabled");
4576 	test_vmx_valid_controls(false);
4577 	report_prefix_pop();
4578 
4579 	/* Restore the controls to their original values */
4580 	vmcs_write(CPU_EXEC_CTRL0, cpu_ctrls0);
4581 done:
4582 	vmcs_write(PIN_CONTROLS, pin_ctrls);
4583 }
4584 
4585 static void test_eptp_ad_bit(u64 eptp, bool ctrl)
4586 {
4587 	vmcs_write(EPTP, eptp);
4588 	report_prefix_pushf("Enable-EPT enabled; EPT accessed and dirty flag %s",
4589 	    (eptp & EPTP_AD_FLAG) ? "1": "0");
4590 	if (ctrl)
4591 		test_vmx_valid_controls(false);
4592 	else
4593 		test_vmx_invalid_controls(false);
4594 	report_prefix_pop();
4595 
4596 }
4597 
4598 /*
4599  * 1. If the "enable EPT" VM-execution control is 1, the "EPTP VM-execution"
4600  *    control field must satisfy the following checks:
4601  *
4602  *     - The EPT memory type (bits 2:0) must be a value supported by the
4603  *	 processor as indicated in the IA32_VMX_EPT_VPID_CAP MSR.
4604  *     - Bits 5:3 (1 less than the EPT page-walk length) must be 3,
4605  *	 indicating an EPT page-walk length of 4.
4606  *     - Bit 6 (enable bit for accessed and dirty flags for EPT) must be
4607  *	 0 if bit 21 of the IA32_VMX_EPT_VPID_CAP MSR is read as 0,
4608  *	 indicating that the processor does not support accessed and dirty
4609  *	 dirty flags for EPT.
4610  *     - Reserved bits 11:7 and 63:N (where N is the processor's
4611  *	 physical-address width) must all be 0.
4612  *
4613  * 2. If the "unrestricted guest" VM-execution control is 1, the
4614  *    "enable EPT" VM-execution control must also be 1.
4615  */
4616 static void test_ept_eptp(void)
4617 {
4618 	u32 primary_saved = vmcs_read(CPU_EXEC_CTRL0);
4619 	u32 secondary_saved = vmcs_read(CPU_EXEC_CTRL1);
4620 	u64 eptp_saved = vmcs_read(EPTP);
4621 	u32 primary = primary_saved;
4622 	u32 secondary = secondary_saved;
4623 	u64 msr, eptp = eptp_saved;
4624 	bool un_cache = false;
4625 	bool wr_bk = false;
4626 	bool ctrl;
4627 	u32 i, maxphysaddr;
4628 	u64 j, resv_bits_mask = 0;
4629 
4630 	if (!((ctrl_cpu_rev[0].clr & CPU_SECONDARY) &&
4631 	    (ctrl_cpu_rev[1].clr & CPU_EPT))) {
4632 		test_skip("\"CPU secondary\" and/or \"enable EPT\" execution controls are not supported !");
4633 		return;
4634 	}
4635 
4636 	/*
4637 	 * Memory type (bits 2:0)
4638 	 */
4639 	msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP);
4640 	if (msr & EPT_CAP_UC)
4641 		un_cache = true;
4642 	if (msr & EPT_CAP_WB)
4643 		wr_bk = true;
4644 
4645 	primary |= CPU_SECONDARY;
4646 	vmcs_write(CPU_EXEC_CTRL0, primary);
4647 	secondary |= CPU_EPT;
4648 	vmcs_write(CPU_EXEC_CTRL1, secondary);
4649 	eptp = (eptp & ~EPTP_PG_WALK_LEN_MASK) |
4650 	    (3ul << EPTP_PG_WALK_LEN_SHIFT);
4651 	vmcs_write(EPTP, eptp);
4652 
4653 	for (i = 0; i < 8; i++) {
4654 		if (i == 0) {
4655 			if (un_cache) {
4656 				report_info("EPT paging structure memory-type is Un-cacheable\n");
4657 				ctrl = true;
4658 			} else {
4659 				ctrl = false;
4660 			}
4661 		} else if (i == 6) {
4662 			if (wr_bk) {
4663 				report_info("EPT paging structure memory-type is Write-back\n");
4664 				ctrl = true;
4665 			} else {
4666 				ctrl = false;
4667 			}
4668 		} else {
4669 			ctrl = false;
4670 		}
4671 
4672 		eptp = (eptp & ~EPT_MEM_TYPE_MASK) | i;
4673 		vmcs_write(EPTP, eptp);
4674 		report_prefix_pushf("Enable-EPT enabled; EPT memory type %lu",
4675 		    eptp & EPT_MEM_TYPE_MASK);
4676 		if (ctrl)
4677 			test_vmx_valid_controls(false);
4678 		else
4679 			test_vmx_invalid_controls(false);
4680 		report_prefix_pop();
4681 	}
4682 
4683 	eptp = (eptp & ~EPT_MEM_TYPE_MASK) | 6ul;
4684 
4685 	/*
4686 	 * Page walk length (bits 5:3)
4687 	 */
4688 	for (i = 0; i < 8; i++) {
4689 		eptp = (eptp & ~EPTP_PG_WALK_LEN_MASK) |
4690 		    (i << EPTP_PG_WALK_LEN_SHIFT);
4691 		if (i == 3)
4692 			ctrl = true;
4693 		else
4694 			ctrl = false;
4695 
4696 		vmcs_write(EPTP, eptp);
4697 		report_prefix_pushf("Enable-EPT enabled; EPT page walk length %lu",
4698 		    eptp & EPTP_PG_WALK_LEN_MASK);
4699 		if (ctrl)
4700 			test_vmx_valid_controls(false);
4701 		else
4702 			test_vmx_invalid_controls(false);
4703 		report_prefix_pop();
4704 	}
4705 
4706 	eptp = (eptp & ~EPTP_PG_WALK_LEN_MASK) |
4707 	    3ul << EPTP_PG_WALK_LEN_SHIFT;
4708 
4709 	/*
4710 	 * Accessed and dirty flag (bit 6)
4711 	 */
4712 	if (msr & EPT_CAP_AD_FLAG) {
4713 		report_info("Processor supports accessed and dirty flag");
4714 		eptp &= ~EPTP_AD_FLAG;
4715 		test_eptp_ad_bit(eptp, true);
4716 
4717 		eptp |= EPTP_AD_FLAG;
4718 		test_eptp_ad_bit(eptp, true);
4719 	} else {
4720 		report_info("Processor does not supports accessed and dirty flag");
4721 		eptp &= ~EPTP_AD_FLAG;
4722 		test_eptp_ad_bit(eptp, true);
4723 
4724 		eptp |= EPTP_AD_FLAG;
4725 		test_eptp_ad_bit(eptp, false);
4726 	}
4727 
4728 	/*
4729 	 * Reserved bits [11:7] and [63:N]
4730 	 */
4731 	for (i = 0; i < 32; i++) {
4732 		eptp = (eptp &
4733 		    ~(EPTP_RESERV_BITS_MASK << EPTP_RESERV_BITS_SHIFT)) |
4734 		    (i << EPTP_RESERV_BITS_SHIFT);
4735 		vmcs_write(EPTP, eptp);
4736 		report_prefix_pushf("Enable-EPT enabled; reserved bits [11:7] %lu",
4737 		    (eptp >> EPTP_RESERV_BITS_SHIFT) &
4738 		    EPTP_RESERV_BITS_MASK);
4739 		if (i == 0)
4740 			test_vmx_valid_controls(false);
4741 		else
4742 			test_vmx_invalid_controls(false);
4743 		report_prefix_pop();
4744 	}
4745 
4746 	eptp = (eptp & ~(EPTP_RESERV_BITS_MASK << EPTP_RESERV_BITS_SHIFT));
4747 
4748 	maxphysaddr = cpuid_maxphyaddr();
4749 	for (i = 0; i < (63 - maxphysaddr + 1); i++) {
4750 		resv_bits_mask |= 1ul << i;
4751 	}
4752 
4753 	for (j = maxphysaddr - 1; j <= 63; j++) {
4754 		eptp = (eptp & ~(resv_bits_mask << maxphysaddr)) |
4755 		    (j < maxphysaddr ? 0 : 1ul << j);
4756 		vmcs_write(EPTP, eptp);
4757 		report_prefix_pushf("Enable-EPT enabled; reserved bits [63:N] %lu",
4758 		    (eptp >> maxphysaddr) & resv_bits_mask);
4759 		if (j < maxphysaddr)
4760 			test_vmx_valid_controls(false);
4761 		else
4762 			test_vmx_invalid_controls(false);
4763 		report_prefix_pop();
4764 	}
4765 
4766 	secondary &= ~(CPU_EPT | CPU_URG);
4767 	vmcs_write(CPU_EXEC_CTRL1, secondary);
4768 	report_prefix_pushf("Enable-EPT disabled, unrestricted-guest disabled");
4769 	test_vmx_valid_controls(false);
4770 	report_prefix_pop();
4771 
4772 	secondary |= CPU_URG;
4773 	vmcs_write(CPU_EXEC_CTRL1, secondary);
4774 	report_prefix_pushf("Enable-EPT disabled, unrestricted-guest enabled");
4775 	test_vmx_invalid_controls(false);
4776 	report_prefix_pop();
4777 
4778 	secondary |= CPU_EPT;
4779 	setup_dummy_ept();
4780 	report_prefix_pushf("Enable-EPT enabled, unrestricted-guest enabled");
4781 	test_vmx_valid_controls(false);
4782 	report_prefix_pop();
4783 
4784 	secondary &= ~CPU_URG;
4785 	vmcs_write(CPU_EXEC_CTRL1, secondary);
4786 	report_prefix_pushf("Enable-EPT enabled, unrestricted-guest disabled");
4787 	test_vmx_valid_controls(false);
4788 	report_prefix_pop();
4789 
4790 	vmcs_write(CPU_EXEC_CTRL0, primary_saved);
4791 	vmcs_write(CPU_EXEC_CTRL1, secondary_saved);
4792 	vmcs_write(EPTP, eptp_saved);
4793 }
4794 
4795 /*
4796  * If the 'enable PML' VM-execution control is 1, the 'enable EPT'
4797  * VM-execution control must also be 1. In addition, the PML address
4798  * must satisfy the following checks:
4799  *
4800  *    * Bits 11:0 of the address must be 0.
4801  *    * The address should not set any bits beyond the processor's
4802  *	physical-address width.
4803  *
4804  *  [Intel SDM]
4805  */
4806 static void test_pml(void)
4807 {
4808 	u32 primary_saved = vmcs_read(CPU_EXEC_CTRL0);
4809 	u32 secondary_saved = vmcs_read(CPU_EXEC_CTRL1);
4810 	u32 primary = primary_saved;
4811 	u32 secondary = secondary_saved;
4812 
4813 	if (!((ctrl_cpu_rev[0].clr & CPU_SECONDARY) &&
4814 	    (ctrl_cpu_rev[1].clr & CPU_EPT) && (ctrl_cpu_rev[1].clr & CPU_PML))) {
4815 		test_skip("\"Secondary execution\" control or \"enable EPT\" control or \"enable PML\" control is not supported !");
4816 		return;
4817 	}
4818 
4819 	primary |= CPU_SECONDARY;
4820 	vmcs_write(CPU_EXEC_CTRL0, primary);
4821 	secondary &= ~(CPU_PML | CPU_EPT);
4822 	vmcs_write(CPU_EXEC_CTRL1, secondary);
4823 	report_prefix_pushf("enable-PML disabled, enable-EPT disabled");
4824 	test_vmx_valid_controls(false);
4825 	report_prefix_pop();
4826 
4827 	secondary |= CPU_PML;
4828 	vmcs_write(CPU_EXEC_CTRL1, secondary);
4829 	report_prefix_pushf("enable-PML enabled, enable-EPT disabled");
4830 	test_vmx_invalid_controls(false);
4831 	report_prefix_pop();
4832 
4833 	secondary |= CPU_EPT;
4834 	setup_dummy_ept();
4835 	report_prefix_pushf("enable-PML enabled, enable-EPT enabled");
4836 	test_vmx_valid_controls(false);
4837 	report_prefix_pop();
4838 
4839 	secondary &= ~CPU_PML;
4840 	vmcs_write(CPU_EXEC_CTRL1, secondary);
4841 	report_prefix_pushf("enable-PML disabled, enable EPT enabled");
4842 	test_vmx_valid_controls(false);
4843 	report_prefix_pop();
4844 
4845 	test_vmcs_addr_reference(CPU_PML, PMLADDR, "PML address", "PML",
4846 				 PAGE_SIZE, false, false);
4847 
4848 	vmcs_write(CPU_EXEC_CTRL0, primary_saved);
4849 	vmcs_write(CPU_EXEC_CTRL1, secondary_saved);
4850 }
4851 
4852  /*
4853  * If the "activate VMX-preemption timer" VM-execution control is 0, the
4854  * the "save VMX-preemption timer value" VM-exit control must also be 0.
4855  *
4856  *  [Intel SDM]
4857  */
4858 static void test_vmx_preemption_timer(void)
4859 {
4860 	u32 saved_pin = vmcs_read(PIN_CONTROLS);
4861 	u32 saved_exit = vmcs_read(EXI_CONTROLS);
4862 	u32 pin = saved_pin;
4863 	u32 exit = saved_exit;
4864 
4865 	if (!((ctrl_exit_rev.clr & EXI_SAVE_PREEMPT) ||
4866 	    (ctrl_pin_rev.clr & PIN_PREEMPT))) {
4867 		printf("\"Save-VMX-preemption-timer\" control and/or \"Enable-VMX-preemption-timer\" control is not supported\n");
4868 		return;
4869 	}
4870 
4871 	pin |= PIN_PREEMPT;
4872 	vmcs_write(PIN_CONTROLS, pin);
4873 	exit &= ~EXI_SAVE_PREEMPT;
4874 	vmcs_write(EXI_CONTROLS, exit);
4875 	report_prefix_pushf("enable-VMX-preemption-timer enabled, save-VMX-preemption-timer disabled");
4876 	test_vmx_valid_controls(false);
4877 	report_prefix_pop();
4878 
4879 	exit |= EXI_SAVE_PREEMPT;
4880 	vmcs_write(EXI_CONTROLS, exit);
4881 	report_prefix_pushf("enable-VMX-preemption-timer enabled, save-VMX-preemption-timer enabled");
4882 	test_vmx_valid_controls(false);
4883 	report_prefix_pop();
4884 
4885 	pin &= ~PIN_PREEMPT;
4886 	vmcs_write(PIN_CONTROLS, pin);
4887 	report_prefix_pushf("enable-VMX-preemption-timer disabled, save-VMX-preemption-timer enabled");
4888 	test_vmx_invalid_controls(false);
4889 	report_prefix_pop();
4890 
4891 	exit &= ~EXI_SAVE_PREEMPT;
4892 	vmcs_write(EXI_CONTROLS, exit);
4893 	report_prefix_pushf("enable-VMX-preemption-timer disabled, save-VMX-preemption-timer disabled");
4894 	test_vmx_valid_controls(false);
4895 	report_prefix_pop();
4896 
4897 	vmcs_write(PIN_CONTROLS, saved_pin);
4898 	vmcs_write(EXI_CONTROLS, saved_exit);
4899 }
4900 
4901 /*
4902  * Tests for VM-execution control fields
4903  */
4904 static void test_vm_execution_ctls(void)
4905 {
4906 	test_pin_based_ctls();
4907 	test_primary_processor_based_ctls();
4908 	test_secondary_processor_based_ctls();
4909 	test_cr3_targets();
4910 	test_io_bitmaps();
4911 	test_msr_bitmap();
4912 	test_apic_ctls();
4913 	test_tpr_threshold();
4914 	test_nmi_ctrls();
4915 	test_pml();
4916 	test_vpid();
4917 	test_ept_eptp();
4918 	test_vmx_preemption_timer();
4919 }
4920 
4921  /*
4922   * The following checks are performed for the VM-entry MSR-load address if
4923   * the VM-entry MSR-load count field is non-zero:
4924   *
4925   *    - The lower 4 bits of the VM-entry MSR-load address must be 0.
4926   *      The address should not set any bits beyond the processor’s
4927   *      physical-address width.
4928   *
4929   *    - The address of the last byte in the VM-entry MSR-load area
4930   *      should not set any bits beyond the processor’s physical-address
4931   *      width. The address of this last byte is VM-entry MSR-load address
4932   *      + (MSR count * 16) - 1. (The arithmetic used for the computation
4933   *      uses more bits than the processor’s physical-address width.)
4934   *
4935   *
4936   *  [Intel SDM]
4937   */
4938 static void test_entry_msr_load(void)
4939 {
4940 	entry_msr_load = alloc_page();
4941 	u64 tmp;
4942 	u32 entry_msr_ld_cnt = 1;
4943 	int i;
4944 	u32 addr_len = 64;
4945 
4946 	vmcs_write(ENT_MSR_LD_CNT, entry_msr_ld_cnt);
4947 
4948 	/* Check first 4 bits of VM-entry MSR-load address */
4949 	for (i = 0; i < 4; i++) {
4950 		tmp = (u64)entry_msr_load | 1ull << i;
4951 		vmcs_write(ENTER_MSR_LD_ADDR, tmp);
4952 		report_prefix_pushf("VM-entry MSR-load addr [4:0] %lx",
4953 				    tmp & 0xf);
4954 		test_vmx_invalid_controls(false);
4955 		report_prefix_pop();
4956 	}
4957 
4958 	if (basic.val & (1ul << 48))
4959 		addr_len = 32;
4960 
4961 	test_vmcs_addr_values("VM-entry-MSR-load address",
4962 				ENTER_MSR_LD_ADDR, 16, false, false,
4963 				4, addr_len - 1);
4964 
4965 	/*
4966 	 * Check last byte of VM-entry MSR-load address
4967 	 */
4968 	entry_msr_load = (struct vmx_msr_entry *)((u64)entry_msr_load & ~0xf);
4969 
4970 	for (i = (addr_len == 64 ? cpuid_maxphyaddr(): addr_len);
4971 							i < 64; i++) {
4972 		tmp = ((u64)entry_msr_load + entry_msr_ld_cnt * 16 - 1) |
4973 			1ul << i;
4974 		vmcs_write(ENTER_MSR_LD_ADDR,
4975 			   tmp - (entry_msr_ld_cnt * 16 - 1));
4976 		test_vmx_invalid_controls(false);
4977 	}
4978 
4979 	vmcs_write(ENT_MSR_LD_CNT, 2);
4980 	vmcs_write(ENTER_MSR_LD_ADDR, (1ULL << cpuid_maxphyaddr()) - 16);
4981 	test_vmx_invalid_controls(false);
4982 	vmcs_write(ENTER_MSR_LD_ADDR, (1ULL << cpuid_maxphyaddr()) - 32);
4983 	test_vmx_valid_controls(false);
4984 	vmcs_write(ENTER_MSR_LD_ADDR, (1ULL << cpuid_maxphyaddr()) - 48);
4985 	test_vmx_valid_controls(false);
4986 }
4987 
4988 static void guest_pat_main(void)
4989 {
4990 	while (1) {
4991 		if (vmx_get_test_stage() != 2)
4992 			vmcall();
4993 		else
4994 			break;
4995 	}
4996 
4997 	asm volatile("fnop");
4998 }
4999 
5000 static void report_guest_pat_test(const char *test, u32 xreason, u64 guest_pat)
5001 {
5002 	u32 reason = vmcs_read(EXI_REASON);
5003 	u64 guest_rip;
5004 	u32 insn_len;
5005 
5006 	report("%s, GUEST_PAT %lx", reason == xreason, test, guest_pat);
5007 
5008 	guest_rip = vmcs_read(GUEST_RIP);
5009 	insn_len = vmcs_read(EXI_INST_LEN);
5010 	if (! (reason & 0x80000021))
5011 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
5012 }
5013 
5014 /*
5015  * Tests for VM-entry control fields
5016  */
5017 static void test_vm_entry_ctls(void)
5018 {
5019 	test_invalid_event_injection();
5020 	test_entry_msr_load();
5021 }
5022 
5023 /*
5024  * The following checks are performed for the VM-exit MSR-store address if
5025  * the VM-exit MSR-store count field is non-zero:
5026  *
5027  *    - The lower 4 bits of the VM-exit MSR-store address must be 0.
5028  *      The address should not set any bits beyond the processor’s
5029  *      physical-address width.
5030  *
5031  *    - The address of the last byte in the VM-exit MSR-store area
5032  *      should not set any bits beyond the processor’s physical-address
5033  *      width. The address of this last byte is VM-exit MSR-store address
5034  *      + (MSR count * 16) - 1. (The arithmetic used for the computation
5035  *      uses more bits than the processor’s physical-address width.)
5036  *
5037  * If IA32_VMX_BASIC[48] is read as 1, neither address should set any bits
5038  * in the range 63:32.
5039  *
5040  *  [Intel SDM]
5041  */
5042 static void test_exit_msr_store(void)
5043 {
5044 	exit_msr_store = alloc_page();
5045 	u64 tmp;
5046 	u32 exit_msr_st_cnt = 1;
5047 	int i;
5048 	u32 addr_len = 64;
5049 
5050 	vmcs_write(EXI_MSR_ST_CNT, exit_msr_st_cnt);
5051 
5052 	/* Check first 4 bits of VM-exit MSR-store address */
5053 	for (i = 0; i < 4; i++) {
5054 		tmp = (u64)exit_msr_store | 1ull << i;
5055 		vmcs_write(EXIT_MSR_ST_ADDR, tmp);
5056 		report_prefix_pushf("VM-exit MSR-store addr [4:0] %lx",
5057 				    tmp & 0xf);
5058 		test_vmx_invalid_controls(false);
5059 		report_prefix_pop();
5060 	}
5061 
5062 	if (basic.val & (1ul << 48))
5063 		addr_len = 32;
5064 
5065 	test_vmcs_addr_values("VM-exit-MSR-store address",
5066 				EXIT_MSR_ST_ADDR, 16, false, false,
5067 				4, addr_len - 1);
5068 
5069 	/*
5070 	 * Check last byte of VM-exit MSR-store address
5071 	 */
5072 	exit_msr_store = (struct vmx_msr_entry *)((u64)exit_msr_store & ~0xf);
5073 
5074 	for (i = (addr_len == 64 ? cpuid_maxphyaddr(): addr_len);
5075 							i < 64; i++) {
5076 		tmp = ((u64)exit_msr_store + exit_msr_st_cnt * 16 - 1) |
5077 			1ul << i;
5078 		vmcs_write(EXIT_MSR_ST_ADDR,
5079 			   tmp - (exit_msr_st_cnt * 16 - 1));
5080 		test_vmx_invalid_controls(false);
5081 	}
5082 
5083 	vmcs_write(EXI_MSR_ST_CNT, 2);
5084 	vmcs_write(EXIT_MSR_ST_ADDR, (1ULL << cpuid_maxphyaddr()) - 16);
5085 	test_vmx_invalid_controls(false);
5086 	vmcs_write(EXIT_MSR_ST_ADDR, (1ULL << cpuid_maxphyaddr()) - 32);
5087 	test_vmx_valid_controls(false);
5088 	vmcs_write(EXIT_MSR_ST_ADDR, (1ULL << cpuid_maxphyaddr()) - 48);
5089 	test_vmx_valid_controls(false);
5090 }
5091 
5092 /*
5093  * Tests for VM-exit controls
5094  */
5095 static void test_vm_exit_ctls(void)
5096 {
5097 	test_exit_msr_store();
5098 }
5099 
5100 /*
5101  * Check that the virtual CPU checks all of the VMX controls as
5102  * documented in the Intel SDM.
5103  */
5104 static void vmx_controls_test(void)
5105 {
5106 	/*
5107 	 * Bit 1 of the guest's RFLAGS must be 1, or VM-entry will
5108 	 * fail due to invalid guest state, should we make it that
5109 	 * far.
5110 	 */
5111 	vmcs_write(GUEST_RFLAGS, 0);
5112 
5113 	test_vm_execution_ctls();
5114 	test_vm_exit_ctls();
5115 	test_vm_entry_ctls();
5116 }
5117 
5118 struct apic_reg_virt_config {
5119 	bool apic_register_virtualization;
5120 	bool use_tpr_shadow;
5121 	bool virtualize_apic_accesses;
5122 	bool virtualize_x2apic_mode;
5123 	bool activate_secondary_controls;
5124 };
5125 
5126 struct apic_reg_test {
5127 	const char *name;
5128 	struct apic_reg_virt_config apic_reg_virt_config;
5129 };
5130 
5131 struct apic_reg_virt_expectation {
5132 	enum Reason rd_exit_reason;
5133 	enum Reason wr_exit_reason;
5134 	u32 val;
5135 	u32 (*virt_fn)(u32);
5136 
5137 	/*
5138 	 * If false, accessing the APIC access address from L2 is treated as a
5139 	 * normal memory operation, rather than triggering virtualization.
5140 	 */
5141 	bool virtualize_apic_accesses;
5142 };
5143 
5144 static u32 apic_virt_identity(u32 val)
5145 {
5146 	return val;
5147 }
5148 
5149 static u32 apic_virt_nibble1(u32 val)
5150 {
5151 	return val & 0xf0;
5152 }
5153 
5154 static u32 apic_virt_byte3(u32 val)
5155 {
5156 	return val & (0xff << 24);
5157 }
5158 
5159 static bool apic_reg_virt_exit_expectation(
5160 	u32 reg, struct apic_reg_virt_config *config,
5161 	struct apic_reg_virt_expectation *expectation)
5162 {
5163 	/* Good configs, where some L2 APIC accesses are virtualized. */
5164 	bool virtualize_apic_accesses_only =
5165 		config->virtualize_apic_accesses &&
5166 		!config->use_tpr_shadow &&
5167 		!config->apic_register_virtualization &&
5168 		!config->virtualize_x2apic_mode &&
5169 		config->activate_secondary_controls;
5170 	bool virtualize_apic_accesses_and_use_tpr_shadow =
5171 		config->virtualize_apic_accesses &&
5172 		config->use_tpr_shadow &&
5173 		!config->apic_register_virtualization &&
5174 		!config->virtualize_x2apic_mode &&
5175 		config->activate_secondary_controls;
5176 	bool apic_register_virtualization =
5177 		config->virtualize_apic_accesses &&
5178 		config->use_tpr_shadow &&
5179 		config->apic_register_virtualization &&
5180 		!config->virtualize_x2apic_mode &&
5181 		config->activate_secondary_controls;
5182 
5183 	expectation->val = MAGIC_VAL_1;
5184 	expectation->virt_fn = apic_virt_identity;
5185 	expectation->virtualize_apic_accesses =
5186 		config->virtualize_apic_accesses &&
5187 		config->activate_secondary_controls;
5188 	if (virtualize_apic_accesses_only) {
5189 		expectation->rd_exit_reason = VMX_APIC_ACCESS;
5190 		expectation->wr_exit_reason = VMX_APIC_ACCESS;
5191 	} else if (virtualize_apic_accesses_and_use_tpr_shadow) {
5192 		switch (reg) {
5193 		case APIC_TASKPRI:
5194 			expectation->rd_exit_reason = VMX_VMCALL;
5195 			expectation->wr_exit_reason = VMX_VMCALL;
5196 			expectation->virt_fn = apic_virt_nibble1;
5197 			break;
5198 		default:
5199 			expectation->rd_exit_reason = VMX_APIC_ACCESS;
5200 			expectation->wr_exit_reason = VMX_APIC_ACCESS;
5201 		}
5202 	} else if (apic_register_virtualization) {
5203 		expectation->rd_exit_reason = VMX_VMCALL;
5204 
5205 		switch (reg) {
5206 		case APIC_ID:
5207 		case APIC_EOI:
5208 		case APIC_LDR:
5209 		case APIC_DFR:
5210 		case APIC_SPIV:
5211 		case APIC_ESR:
5212 		case APIC_ICR:
5213 		case APIC_LVTT:
5214 		case APIC_LVTTHMR:
5215 		case APIC_LVTPC:
5216 		case APIC_LVT0:
5217 		case APIC_LVT1:
5218 		case APIC_LVTERR:
5219 		case APIC_TMICT:
5220 		case APIC_TDCR:
5221 			expectation->wr_exit_reason = VMX_APIC_WRITE;
5222 			break;
5223 		case APIC_LVR:
5224 		case APIC_ISR ... APIC_ISR + 0x70:
5225 		case APIC_TMR ... APIC_TMR + 0x70:
5226 		case APIC_IRR ... APIC_IRR + 0x70:
5227 			expectation->wr_exit_reason = VMX_APIC_ACCESS;
5228 			break;
5229 		case APIC_TASKPRI:
5230 			expectation->wr_exit_reason = VMX_VMCALL;
5231 			expectation->virt_fn = apic_virt_nibble1;
5232 			break;
5233 		case APIC_ICR2:
5234 			expectation->wr_exit_reason = VMX_VMCALL;
5235 			expectation->virt_fn = apic_virt_byte3;
5236 			break;
5237 		default:
5238 			expectation->rd_exit_reason = VMX_APIC_ACCESS;
5239 			expectation->wr_exit_reason = VMX_APIC_ACCESS;
5240 		}
5241 	} else if (!expectation->virtualize_apic_accesses) {
5242 		/*
5243 		 * No APIC registers are directly virtualized. This includes
5244 		 * VTPR, which can be virtualized through MOV to/from CR8 via
5245 		 * the use TPR shadow control, but not through directly
5246 		 * accessing VTPR.
5247 		 */
5248 		expectation->rd_exit_reason = VMX_VMCALL;
5249 		expectation->wr_exit_reason = VMX_VMCALL;
5250 	} else {
5251 		printf("Cannot parse APIC register virtualization config:\n"
5252 		       "\tvirtualize_apic_accesses: %d\n"
5253 		       "\tuse_tpr_shadow: %d\n"
5254 		       "\tapic_register_virtualization: %d\n"
5255 		       "\tvirtualize_x2apic_mode: %d\n"
5256 		       "\tactivate_secondary_controls: %d\n",
5257 		       config->virtualize_apic_accesses,
5258 		       config->use_tpr_shadow,
5259 		       config->apic_register_virtualization,
5260 		       config->virtualize_x2apic_mode,
5261 		       config->activate_secondary_controls);
5262 
5263 		return false;
5264 	}
5265 
5266 	return true;
5267 }
5268 
5269 struct apic_reg_test apic_reg_tests[] = {
5270 	/* Good configs, where some L2 APIC accesses are virtualized. */
5271 	{
5272 		.name = "Virtualize APIC accesses",
5273 		.apic_reg_virt_config = {
5274 			.virtualize_apic_accesses = true,
5275 			.use_tpr_shadow = false,
5276 			.apic_register_virtualization = false,
5277 			.virtualize_x2apic_mode = false,
5278 			.activate_secondary_controls = true,
5279 		},
5280 	},
5281 	{
5282 		.name = "Virtualize APIC accesses + Use TPR shadow",
5283 		.apic_reg_virt_config = {
5284 			.virtualize_apic_accesses = true,
5285 			.use_tpr_shadow = true,
5286 			.apic_register_virtualization = false,
5287 			.virtualize_x2apic_mode = false,
5288 			.activate_secondary_controls = true,
5289 		},
5290 	},
5291 	{
5292 		.name = "APIC-register virtualization",
5293 		.apic_reg_virt_config = {
5294 			.virtualize_apic_accesses = true,
5295 			.use_tpr_shadow = true,
5296 			.apic_register_virtualization = true,
5297 			.virtualize_x2apic_mode = false,
5298 			.activate_secondary_controls = true,
5299 		},
5300 	},
5301 
5302 	/*
5303 	 * Test that the secondary processor-based VM-execution controls are
5304 	 * correctly ignored when "activate secondary controls" is disabled.
5305 	 */
5306 	{
5307 		.name = "Activate secondary controls off",
5308 		.apic_reg_virt_config = {
5309 			.virtualize_apic_accesses = true,
5310 			.use_tpr_shadow = false,
5311 			.apic_register_virtualization = true,
5312 			.virtualize_x2apic_mode = true,
5313 			.activate_secondary_controls = false,
5314 		},
5315 	},
5316 	{
5317 		.name = "Activate secondary controls off + Use TPR shadow",
5318 		.apic_reg_virt_config = {
5319 			.virtualize_apic_accesses = true,
5320 			.use_tpr_shadow = true,
5321 			.apic_register_virtualization = true,
5322 			.virtualize_x2apic_mode = true,
5323 			.activate_secondary_controls = false,
5324 		},
5325 	},
5326 
5327 	/*
5328 	 * Test that the APIC access address is treated like an arbitrary memory
5329 	 * address when "virtualize APIC accesses" is disabled.
5330 	 */
5331 	{
5332 		.name = "Virtualize APIC accesses off + Use TPR shadow",
5333 		.apic_reg_virt_config = {
5334 			.virtualize_apic_accesses = false,
5335 			.use_tpr_shadow = true,
5336 			.apic_register_virtualization = true,
5337 			.virtualize_x2apic_mode = true,
5338 			.activate_secondary_controls = true,
5339 		},
5340 	},
5341 
5342 	/*
5343 	 * Test that VM entry fails due to invalid controls when
5344 	 * "APIC-register virtualization" is enabled while "use TPR shadow" is
5345 	 * disabled.
5346 	 */
5347 	{
5348 		.name = "APIC-register virtualization + Use TPR shadow off",
5349 		.apic_reg_virt_config = {
5350 			.virtualize_apic_accesses = true,
5351 			.use_tpr_shadow = false,
5352 			.apic_register_virtualization = true,
5353 			.virtualize_x2apic_mode = false,
5354 			.activate_secondary_controls = true,
5355 		},
5356 	},
5357 
5358 	/*
5359 	 * Test that VM entry fails due to invalid controls when
5360 	 * "Virtualize x2APIC mode" is enabled while "use TPR shadow" is
5361 	 * disabled.
5362 	 */
5363 	{
5364 		.name = "Virtualize x2APIC mode + Use TPR shadow off",
5365 		.apic_reg_virt_config = {
5366 			.virtualize_apic_accesses = false,
5367 			.use_tpr_shadow = false,
5368 			.apic_register_virtualization = false,
5369 			.virtualize_x2apic_mode = true,
5370 			.activate_secondary_controls = true,
5371 		},
5372 	},
5373 	{
5374 		.name = "Virtualize x2APIC mode + Use TPR shadow off v2",
5375 		.apic_reg_virt_config = {
5376 			.virtualize_apic_accesses = false,
5377 			.use_tpr_shadow = false,
5378 			.apic_register_virtualization = true,
5379 			.virtualize_x2apic_mode = true,
5380 			.activate_secondary_controls = true,
5381 		},
5382 	},
5383 
5384 	/*
5385 	 * Test that VM entry fails due to invalid controls when
5386 	 * "virtualize x2APIC mode" is enabled while "virtualize APIC accesses"
5387 	 * is enabled.
5388 	 */
5389 	{
5390 		.name = "Virtualize x2APIC mode + Virtualize APIC accesses",
5391 		.apic_reg_virt_config = {
5392 			.virtualize_apic_accesses = true,
5393 			.use_tpr_shadow = true,
5394 			.apic_register_virtualization = false,
5395 			.virtualize_x2apic_mode = true,
5396 			.activate_secondary_controls = true,
5397 		},
5398 	},
5399 	{
5400 		.name = "Virtualize x2APIC mode + Virtualize APIC accesses v2",
5401 		.apic_reg_virt_config = {
5402 			.virtualize_apic_accesses = true,
5403 			.use_tpr_shadow = true,
5404 			.apic_register_virtualization = true,
5405 			.virtualize_x2apic_mode = true,
5406 			.activate_secondary_controls = true,
5407 		},
5408 	},
5409 };
5410 
5411 enum Apic_op {
5412 	APIC_OP_XAPIC_RD,
5413 	APIC_OP_XAPIC_WR,
5414 	TERMINATE,
5415 };
5416 
5417 static u32 vmx_xapic_read(u32 *apic_access_address, u32 reg)
5418 {
5419 	return *(volatile u32 *)((uintptr_t)apic_access_address + reg);
5420 }
5421 
5422 static void vmx_xapic_write(u32 *apic_access_address, u32 reg, u32 val)
5423 {
5424 	*(volatile u32 *)((uintptr_t)apic_access_address + reg) = val;
5425 }
5426 
5427 struct apic_reg_virt_guest_args {
5428 	enum Apic_op op;
5429 	u32 *apic_access_address;
5430 	u32 reg;
5431 	u32 val;
5432 	bool check_rd;
5433 	u32 (*virt_fn)(u32);
5434 } apic_reg_virt_guest_args;
5435 
5436 static void apic_reg_virt_guest(void)
5437 {
5438 	volatile struct apic_reg_virt_guest_args *args =
5439 		&apic_reg_virt_guest_args;
5440 
5441 	for (;;) {
5442 		enum Apic_op op = args->op;
5443 		u32 *apic_access_address = args->apic_access_address;
5444 		u32 reg = args->reg;
5445 		u32 val = args->val;
5446 		bool check_rd = args->check_rd;
5447 		u32 (*virt_fn)(u32) = args->virt_fn;
5448 
5449 		if (op == TERMINATE)
5450 			break;
5451 
5452 		if (op == APIC_OP_XAPIC_RD) {
5453 			u32 ret = vmx_xapic_read(apic_access_address, reg);
5454 
5455 			if (check_rd) {
5456 				u32 want = virt_fn(val);
5457 				u32 got = virt_fn(ret);
5458 
5459 				report("read 0x%x, expected 0x%x.",
5460 				       got == want, got, want);
5461 			}
5462 		} else if (op == APIC_OP_XAPIC_WR) {
5463 			vmx_xapic_write(apic_access_address, reg, val);
5464 		}
5465 
5466 		/*
5467 		 * The L1 should always execute a vmcall after it's done testing
5468 		 * an individual APIC operation. This helps to validate that the
5469 		 * L1 and L2 are in sync with each other, as expected.
5470 		 */
5471 		vmcall();
5472 	}
5473 }
5474 
5475 static void test_xapic_rd(
5476 	u32 reg, struct apic_reg_virt_expectation *expectation,
5477 	u32 *apic_access_address, u32 *virtual_apic_page)
5478 {
5479 	u32 val = expectation->val;
5480 	u32 exit_reason_want = expectation->rd_exit_reason;
5481 	struct apic_reg_virt_guest_args *args = &apic_reg_virt_guest_args;
5482 
5483 	report_prefix_pushf("xapic - reading 0x%03x", reg);
5484 
5485 	/* Configure guest to do an xapic read */
5486 	args->op = APIC_OP_XAPIC_RD;
5487 	args->apic_access_address = apic_access_address;
5488 	args->reg = reg;
5489 	args->val = val;
5490 	args->check_rd = exit_reason_want == VMX_VMCALL;
5491 	args->virt_fn = expectation->virt_fn;
5492 
5493 	/* Setup virtual APIC page */
5494 	if (!expectation->virtualize_apic_accesses) {
5495 		apic_access_address[apic_reg_index(reg)] = val;
5496 		virtual_apic_page[apic_reg_index(reg)] = 0;
5497 	} else if (exit_reason_want == VMX_VMCALL) {
5498 		apic_access_address[apic_reg_index(reg)] = 0;
5499 		virtual_apic_page[apic_reg_index(reg)] = val;
5500 	}
5501 
5502 	/* Enter guest */
5503 	enter_guest();
5504 
5505 	/*
5506 	 * Validate the behavior and
5507 	 * pass a magic value back to the guest.
5508 	 */
5509 	if (exit_reason_want == VMX_APIC_ACCESS) {
5510 		u32 apic_page_offset = vmcs_read(EXI_QUALIFICATION) & 0xfff;
5511 
5512 		assert_exit_reason(exit_reason_want);
5513 		report("got APIC access exit @ page offset 0x%03x, want 0x%03x",
5514 		       apic_page_offset == reg, apic_page_offset, reg);
5515 		skip_exit_insn();
5516 
5517 		/* Reenter guest so it can consume/check rcx and exit again. */
5518 		enter_guest();
5519 	} else if (exit_reason_want != VMX_VMCALL) {
5520 		report("Oops, bad exit expectation: %u.", false,
5521 		       exit_reason_want);
5522 	}
5523 
5524 	skip_exit_vmcall();
5525 	report_prefix_pop();
5526 }
5527 
5528 static void test_xapic_wr(
5529 	u32 reg, struct apic_reg_virt_expectation *expectation,
5530 	u32 *apic_access_address, u32 *virtual_apic_page)
5531 {
5532 	u32 val = expectation->val;
5533 	u32 exit_reason_want = expectation->wr_exit_reason;
5534 	struct apic_reg_virt_guest_args *args = &apic_reg_virt_guest_args;
5535 	bool virtualized =
5536 		expectation->virtualize_apic_accesses &&
5537 		(exit_reason_want == VMX_APIC_WRITE ||
5538 		 exit_reason_want == VMX_VMCALL);
5539 	bool checked = false;
5540 
5541 	report_prefix_pushf("xapic - writing 0x%x to 0x%03x", val, reg);
5542 
5543 	/* Configure guest to do an xapic read */
5544 	args->op = APIC_OP_XAPIC_WR;
5545 	args->apic_access_address = apic_access_address;
5546 	args->reg = reg;
5547 	args->val = val;
5548 
5549 	/* Setup virtual APIC page */
5550 	if (virtualized || !expectation->virtualize_apic_accesses) {
5551 		apic_access_address[apic_reg_index(reg)] = 0;
5552 		virtual_apic_page[apic_reg_index(reg)] = 0;
5553 	}
5554 
5555 	/* Enter guest */
5556 	enter_guest();
5557 
5558 	/*
5559 	 * Validate the behavior and
5560 	 * pass a magic value back to the guest.
5561 	 */
5562 	if (exit_reason_want == VMX_APIC_ACCESS) {
5563 		u32 apic_page_offset = vmcs_read(EXI_QUALIFICATION) & 0xfff;
5564 
5565 		assert_exit_reason(exit_reason_want);
5566 		report("got APIC access exit @ page offset 0x%03x, want 0x%03x",
5567 		       apic_page_offset == reg, apic_page_offset, reg);
5568 		skip_exit_insn();
5569 
5570 		/* Reenter guest so it can consume/check rcx and exit again. */
5571 		enter_guest();
5572 	} else if (exit_reason_want == VMX_APIC_WRITE) {
5573 		assert_exit_reason(exit_reason_want);
5574 		report("got APIC write exit @ page offset 0x%03x; val is 0x%x, want 0x%x",
5575 		       virtual_apic_page[apic_reg_index(reg)] == val,
5576 		       apic_reg_index(reg),
5577 		       virtual_apic_page[apic_reg_index(reg)], val);
5578 		checked = true;
5579 
5580 		/* Reenter guest so it can consume/check rcx and exit again. */
5581 		enter_guest();
5582 	} else if (exit_reason_want != VMX_VMCALL) {
5583 		report("Oops, bad exit expectation: %u.", false,
5584 		       exit_reason_want);
5585 	}
5586 
5587 	assert_exit_reason(VMX_VMCALL);
5588 	if (virtualized && !checked) {
5589 		u32 want = expectation->virt_fn(val);
5590 		u32 got = virtual_apic_page[apic_reg_index(reg)];
5591 		got = expectation->virt_fn(got);
5592 
5593 		report("exitless write; val is 0x%x, want 0x%x",
5594 		       got == want, got, want);
5595 	} else if (!expectation->virtualize_apic_accesses && !checked) {
5596 		u32 got = apic_access_address[apic_reg_index(reg)];
5597 
5598 		report("non-virtualized write; val is 0x%x, want 0x%x",
5599 		       got == val, got, val);
5600 	} else if (!expectation->virtualize_apic_accesses && checked) {
5601 		report("Non-virtualized write was prematurely checked!", false);
5602 	}
5603 
5604 	skip_exit_vmcall();
5605 	report_prefix_pop();
5606 }
5607 
5608 enum Config_type {
5609 	CONFIG_TYPE_GOOD,
5610 	CONFIG_TYPE_UNSUPPORTED,
5611 	CONFIG_TYPE_VMENTRY_FAILS_EARLY,
5612 };
5613 
5614 static enum Config_type configure_apic_reg_virt_test(
5615 	struct apic_reg_virt_config *apic_reg_virt_config)
5616 {
5617 	u32 cpu_exec_ctrl0 = vmcs_read(CPU_EXEC_CTRL0);
5618 	u32 cpu_exec_ctrl1 = vmcs_read(CPU_EXEC_CTRL1);
5619 	/* Configs where L2 entry fails early, due to invalid controls. */
5620 	bool use_tpr_shadow_incorrectly_off =
5621 		!apic_reg_virt_config->use_tpr_shadow &&
5622 		(apic_reg_virt_config->apic_register_virtualization ||
5623 		 apic_reg_virt_config->virtualize_x2apic_mode) &&
5624 		apic_reg_virt_config->activate_secondary_controls;
5625 	bool virtualize_apic_accesses_incorrectly_on =
5626 		apic_reg_virt_config->virtualize_apic_accesses &&
5627 		apic_reg_virt_config->virtualize_x2apic_mode &&
5628 		apic_reg_virt_config->activate_secondary_controls;
5629 	bool vmentry_fails_early =
5630 		use_tpr_shadow_incorrectly_off ||
5631 		virtualize_apic_accesses_incorrectly_on;
5632 
5633 	if (apic_reg_virt_config->activate_secondary_controls) {
5634 		if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY)) {
5635 			printf("VM-execution control \"activate secondary controls\" NOT supported.\n");
5636 			return CONFIG_TYPE_UNSUPPORTED;
5637 		}
5638 		cpu_exec_ctrl0 |= CPU_SECONDARY;
5639 	} else {
5640 		cpu_exec_ctrl0 &= ~CPU_SECONDARY;
5641 	}
5642 
5643 	if (apic_reg_virt_config->virtualize_apic_accesses) {
5644 		if (!(ctrl_cpu_rev[1].clr & CPU_VIRT_APIC_ACCESSES)) {
5645 			printf("VM-execution control \"virtualize APIC accesses\" NOT supported.\n");
5646 			return CONFIG_TYPE_UNSUPPORTED;
5647 		}
5648 		cpu_exec_ctrl1 |= CPU_VIRT_APIC_ACCESSES;
5649 	} else {
5650 		cpu_exec_ctrl1 &= ~CPU_VIRT_APIC_ACCESSES;
5651 	}
5652 
5653 	if (apic_reg_virt_config->use_tpr_shadow) {
5654 		if (!(ctrl_cpu_rev[0].clr & CPU_TPR_SHADOW)) {
5655 			printf("VM-execution control \"use TPR shadow\" NOT supported.\n");
5656 			return CONFIG_TYPE_UNSUPPORTED;
5657 		}
5658 		cpu_exec_ctrl0 |= CPU_TPR_SHADOW;
5659 	} else {
5660 		cpu_exec_ctrl0 &= ~CPU_TPR_SHADOW;
5661 	}
5662 
5663 	if (apic_reg_virt_config->apic_register_virtualization) {
5664 		if (!(ctrl_cpu_rev[1].clr & CPU_APIC_REG_VIRT)) {
5665 			printf("VM-execution control \"APIC-register virtualization\" NOT supported.\n");
5666 			return CONFIG_TYPE_UNSUPPORTED;
5667 		}
5668 		cpu_exec_ctrl1 |= CPU_APIC_REG_VIRT;
5669 	} else {
5670 		cpu_exec_ctrl1 &= ~CPU_APIC_REG_VIRT;
5671 	}
5672 
5673 	if (apic_reg_virt_config->virtualize_x2apic_mode) {
5674 		if (!(ctrl_cpu_rev[1].clr & CPU_VIRT_X2APIC)) {
5675 			printf("VM-execution control \"virtualize x2APIC mode\" NOT supported.\n");
5676 			return CONFIG_TYPE_UNSUPPORTED;
5677 		}
5678 		cpu_exec_ctrl1 |= CPU_VIRT_X2APIC;
5679 	} else {
5680 		cpu_exec_ctrl1 &= ~CPU_VIRT_X2APIC;
5681 	}
5682 
5683 	vmcs_write(CPU_EXEC_CTRL0, cpu_exec_ctrl0);
5684 	vmcs_write(CPU_EXEC_CTRL1, cpu_exec_ctrl1);
5685 
5686 	if (vmentry_fails_early)
5687 		return CONFIG_TYPE_VMENTRY_FAILS_EARLY;
5688 
5689 	return CONFIG_TYPE_GOOD;
5690 }
5691 
5692 static bool cpu_has_apicv(void)
5693 {
5694 	return ((ctrl_cpu_rev[1].clr & CPU_APIC_REG_VIRT) &&
5695 		(ctrl_cpu_rev[1].clr & CPU_VINTD) &&
5696 		(ctrl_pin_rev.clr & PIN_POST_INTR));
5697 }
5698 
5699 /* Validates APIC register access across valid virtualization configurations. */
5700 static void apic_reg_virt_test(void)
5701 {
5702 	u32 *apic_access_address;
5703 	u32 *virtual_apic_page;
5704 	u64 control;
5705 	u64 cpu_exec_ctrl0 = vmcs_read(CPU_EXEC_CTRL0);
5706 	u64 cpu_exec_ctrl1 = vmcs_read(CPU_EXEC_CTRL1);
5707 	int i;
5708 	struct apic_reg_virt_guest_args *args = &apic_reg_virt_guest_args;
5709 
5710 	if (!cpu_has_apicv()) {
5711 		report_skip(__func__);
5712 		return;
5713 	}
5714 
5715 	control = cpu_exec_ctrl1;
5716 	control &= ~CPU_VINTD;
5717 	vmcs_write(CPU_EXEC_CTRL1, control);
5718 
5719 	test_set_guest(apic_reg_virt_guest);
5720 
5721 	/*
5722 	 * From the SDM: The 1-setting of the "virtualize APIC accesses"
5723 	 * VM-execution is guaranteed to apply only if translations to the
5724 	 * APIC-access address use a 4-KByte page.
5725 	 */
5726 	apic_access_address = alloc_page();
5727 	force_4k_page(apic_access_address);
5728 	vmcs_write(APIC_ACCS_ADDR, virt_to_phys(apic_access_address));
5729 
5730 	virtual_apic_page = alloc_page();
5731 	vmcs_write(APIC_VIRT_ADDR, virt_to_phys(virtual_apic_page));
5732 
5733 	for (i = 0; i < ARRAY_SIZE(apic_reg_tests); i++) {
5734 		struct apic_reg_test *apic_reg_test = &apic_reg_tests[i];
5735 		struct apic_reg_virt_config *apic_reg_virt_config =
5736 				&apic_reg_test->apic_reg_virt_config;
5737 		enum Config_type config_type;
5738 		u32 reg;
5739 
5740 		printf("--- %s test ---\n", apic_reg_test->name);
5741 		config_type =
5742 			configure_apic_reg_virt_test(apic_reg_virt_config);
5743 		if (config_type == CONFIG_TYPE_UNSUPPORTED) {
5744 			printf("Skip because of missing features.\n");
5745 			continue;
5746 		}
5747 
5748 		if (config_type == CONFIG_TYPE_VMENTRY_FAILS_EARLY) {
5749 			enter_guest_with_bad_controls();
5750 			continue;
5751 		}
5752 
5753 		for (reg = 0; reg < PAGE_SIZE / sizeof(u32); reg += 0x10) {
5754 			struct apic_reg_virt_expectation expectation = {};
5755 			bool ok;
5756 
5757 			ok = apic_reg_virt_exit_expectation(
5758 				reg, apic_reg_virt_config, &expectation);
5759 			if (!ok) {
5760 				report("Malformed test.", false);
5761 				break;
5762 			}
5763 
5764 			test_xapic_rd(reg, &expectation, apic_access_address,
5765 				      virtual_apic_page);
5766 			test_xapic_wr(reg, &expectation, apic_access_address,
5767 				      virtual_apic_page);
5768 		}
5769 	}
5770 
5771 	/* Terminate the guest */
5772 	vmcs_write(CPU_EXEC_CTRL0, cpu_exec_ctrl0);
5773 	vmcs_write(CPU_EXEC_CTRL1, cpu_exec_ctrl1);
5774 	args->op = TERMINATE;
5775 	enter_guest();
5776 	assert_exit_reason(VMX_VMCALL);
5777 }
5778 
5779 struct virt_x2apic_mode_config {
5780 	struct apic_reg_virt_config apic_reg_virt_config;
5781 	bool virtual_interrupt_delivery;
5782 	bool use_msr_bitmaps;
5783 	bool disable_x2apic_msr_intercepts;
5784 	bool disable_x2apic;
5785 };
5786 
5787 struct virt_x2apic_mode_test_case {
5788 	const char *name;
5789 	struct virt_x2apic_mode_config virt_x2apic_mode_config;
5790 };
5791 
5792 enum Virt_x2apic_mode_behavior_type {
5793 	X2APIC_ACCESS_VIRTUALIZED,
5794 	X2APIC_ACCESS_PASSED_THROUGH,
5795 	X2APIC_ACCESS_TRIGGERS_GP,
5796 };
5797 
5798 struct virt_x2apic_mode_expectation {
5799 	enum Reason rd_exit_reason;
5800 	enum Reason wr_exit_reason;
5801 
5802 	/*
5803 	 * RDMSR and WRMSR handle 64-bit values. However, except for ICR, all of
5804 	 * the x2APIC registers are 32 bits. Notice:
5805 	 *   1. vmx_x2apic_read() clears the upper 32 bits for 32-bit registers.
5806 	 *   2. vmx_x2apic_write() expects the val arg to be well-formed.
5807 	 */
5808 	u64 rd_val;
5809 	u64 wr_val;
5810 
5811 	/*
5812 	 * Compares input to virtualized output;
5813 	 * 1st arg is pointer to return expected virtualization output.
5814 	 */
5815 	u64 (*virt_fn)(u64);
5816 
5817 	enum Virt_x2apic_mode_behavior_type rd_behavior;
5818 	enum Virt_x2apic_mode_behavior_type wr_behavior;
5819 	bool wr_only;
5820 };
5821 
5822 static u64 virt_x2apic_mode_identity(u64 val)
5823 {
5824 	return val;
5825 }
5826 
5827 static u64 virt_x2apic_mode_nibble1(u64 val)
5828 {
5829 	return val & 0xf0;
5830 }
5831 
5832 static void virt_x2apic_mode_rd_expectation(
5833 	u32 reg, bool virt_x2apic_mode_on, bool disable_x2apic,
5834 	bool apic_register_virtualization, bool virtual_interrupt_delivery,
5835 	struct virt_x2apic_mode_expectation *expectation)
5836 {
5837 	bool readable =
5838 		!x2apic_reg_reserved(reg) &&
5839 		reg != APIC_EOI &&
5840 		reg != APIC_CMCI;
5841 
5842 	expectation->rd_exit_reason = VMX_VMCALL;
5843 	expectation->virt_fn = virt_x2apic_mode_identity;
5844 	if (virt_x2apic_mode_on && apic_register_virtualization) {
5845 		expectation->rd_val = MAGIC_VAL_1;
5846 		if (reg == APIC_PROCPRI && virtual_interrupt_delivery)
5847 			expectation->virt_fn = virt_x2apic_mode_nibble1;
5848 		else if (reg == APIC_TASKPRI)
5849 			expectation->virt_fn = virt_x2apic_mode_nibble1;
5850 		expectation->rd_behavior = X2APIC_ACCESS_VIRTUALIZED;
5851 	} else if (virt_x2apic_mode_on && !apic_register_virtualization &&
5852 		   reg == APIC_TASKPRI) {
5853 		expectation->rd_val = MAGIC_VAL_1;
5854 		expectation->virt_fn = virt_x2apic_mode_nibble1;
5855 		expectation->rd_behavior = X2APIC_ACCESS_VIRTUALIZED;
5856 	} else if (!disable_x2apic && readable) {
5857 		expectation->rd_val = apic_read(reg);
5858 		expectation->rd_behavior = X2APIC_ACCESS_PASSED_THROUGH;
5859 	} else {
5860 		expectation->rd_behavior = X2APIC_ACCESS_TRIGGERS_GP;
5861 	}
5862 }
5863 
5864 /*
5865  * get_x2apic_wr_val() creates an innocuous write value for an x2APIC register.
5866  *
5867  * For writable registers, get_x2apic_wr_val() deposits the write value into the
5868  * val pointer arg and returns true. For non-writable registers, val is not
5869  * modified and get_x2apic_wr_val() returns false.
5870  *
5871  * CMCI, including the LVT CMCI register, is disabled by default. Thus,
5872  * get_x2apic_wr_val() treats this register as non-writable.
5873  */
5874 static bool get_x2apic_wr_val(u32 reg, u64 *val)
5875 {
5876 	switch (reg) {
5877 	case APIC_TASKPRI:
5878 		/* Bits 31:8 are reserved. */
5879 		*val &= 0xff;
5880 		break;
5881 	case APIC_EOI:
5882 	case APIC_ESR:
5883 	case APIC_TMICT:
5884 		/*
5885 		 * EOI, ESR: WRMSR of a non-zero value causes #GP(0).
5886 		 * TMICT: A write of 0 to the initial-count register effectively
5887 		 *        stops the local APIC timer, in both one-shot and
5888 		 *        periodic mode.
5889 		 */
5890 		*val = 0;
5891 		break;
5892 	case APIC_SPIV:
5893 	case APIC_LVTT:
5894 	case APIC_LVTTHMR:
5895 	case APIC_LVTPC:
5896 	case APIC_LVT0:
5897 	case APIC_LVT1:
5898 	case APIC_LVTERR:
5899 	case APIC_TDCR:
5900 		/*
5901 		 * To avoid writing a 1 to a reserved bit or causing some other
5902 		 * unintended side effect, read the current value and use it as
5903 		 * the write value.
5904 		 */
5905 		*val = apic_read(reg);
5906 		break;
5907 	case APIC_ICR:
5908 		*val = 0x40000 | 0xf1;
5909 		break;
5910 	case APIC_SELF_IPI:
5911 		/*
5912 		 * With special processing (i.e., virtualize x2APIC mode +
5913 		 * virtual interrupt delivery), writing zero causes an
5914 		 * APIC-write VM exit. We plan to add a test for enabling
5915 		 * "virtual-interrupt delivery" in VMCS12, and that's where we
5916 		 * will test a self IPI with special processing.
5917 		 */
5918 		*val = 0x0;
5919 		break;
5920 	default:
5921 		return false;
5922 	}
5923 
5924 	return true;
5925 }
5926 
5927 static bool special_processing_applies(u32 reg, u64 *val,
5928 				       bool virt_int_delivery)
5929 {
5930 	bool special_processing =
5931 		(reg == APIC_TASKPRI) ||
5932 		(virt_int_delivery &&
5933 		 (reg == APIC_EOI || reg == APIC_SELF_IPI));
5934 
5935 	if (special_processing) {
5936 		TEST_ASSERT(get_x2apic_wr_val(reg, val));
5937 		return true;
5938 	}
5939 
5940 	return false;
5941 }
5942 
5943 static void virt_x2apic_mode_wr_expectation(
5944 	u32 reg, bool virt_x2apic_mode_on, bool disable_x2apic,
5945 	bool virt_int_delivery,
5946 	struct virt_x2apic_mode_expectation *expectation)
5947 {
5948 	expectation->wr_exit_reason = VMX_VMCALL;
5949 	expectation->wr_val = MAGIC_VAL_1;
5950 	expectation->wr_only = false;
5951 
5952 	if (virt_x2apic_mode_on &&
5953 	    special_processing_applies(reg, &expectation->wr_val,
5954 				       virt_int_delivery)) {
5955 		expectation->wr_behavior = X2APIC_ACCESS_VIRTUALIZED;
5956 		if (reg == APIC_SELF_IPI)
5957 			expectation->wr_exit_reason = VMX_APIC_WRITE;
5958 	} else if (!disable_x2apic &&
5959 		   get_x2apic_wr_val(reg, &expectation->wr_val)) {
5960 		expectation->wr_behavior = X2APIC_ACCESS_PASSED_THROUGH;
5961 		if (reg == APIC_EOI || reg == APIC_SELF_IPI)
5962 			expectation->wr_only = true;
5963 		if (reg == APIC_ICR)
5964 			expectation->wr_exit_reason = VMX_EXTINT;
5965 	} else {
5966 		expectation->wr_behavior = X2APIC_ACCESS_TRIGGERS_GP;
5967 		/*
5968 		 * Writing 1 to a reserved bit triggers a #GP.
5969 		 * Thus, set the write value to 0, which seems
5970 		 * the most likely to detect a missed #GP.
5971 		 */
5972 		expectation->wr_val = 0;
5973 	}
5974 }
5975 
5976 static void virt_x2apic_mode_exit_expectation(
5977 	u32 reg, struct virt_x2apic_mode_config *config,
5978 	struct virt_x2apic_mode_expectation *expectation)
5979 {
5980 	struct apic_reg_virt_config *base_config =
5981 		&config->apic_reg_virt_config;
5982 	bool virt_x2apic_mode_on =
5983 		base_config->virtualize_x2apic_mode &&
5984 		config->use_msr_bitmaps &&
5985 		config->disable_x2apic_msr_intercepts &&
5986 		base_config->activate_secondary_controls;
5987 
5988 	virt_x2apic_mode_wr_expectation(
5989 		reg, virt_x2apic_mode_on, config->disable_x2apic,
5990 		config->virtual_interrupt_delivery, expectation);
5991 	virt_x2apic_mode_rd_expectation(
5992 		reg, virt_x2apic_mode_on, config->disable_x2apic,
5993 		base_config->apic_register_virtualization,
5994 		config->virtual_interrupt_delivery, expectation);
5995 }
5996 
5997 struct virt_x2apic_mode_test_case virt_x2apic_mode_tests[] = {
5998 	/*
5999 	 * Baseline "virtualize x2APIC mode" configuration:
6000 	 *   - virtualize x2APIC mode
6001 	 *   - virtual-interrupt delivery
6002 	 *   - APIC-register virtualization
6003 	 *   - x2APIC MSR intercepts disabled
6004 	 *
6005 	 * Reads come from virtual APIC page, special processing applies to
6006 	 * VTPR, EOI, and SELF IPI, and all other writes pass through to L1
6007 	 * APIC.
6008 	 */
6009 	{
6010 		.name = "Baseline",
6011 		.virt_x2apic_mode_config = {
6012 			.virtual_interrupt_delivery = true,
6013 			.use_msr_bitmaps = true,
6014 			.disable_x2apic_msr_intercepts = true,
6015 			.disable_x2apic = false,
6016 			.apic_reg_virt_config = {
6017 				.apic_register_virtualization = true,
6018 				.use_tpr_shadow = true,
6019 				.virtualize_apic_accesses = false,
6020 				.virtualize_x2apic_mode = true,
6021 				.activate_secondary_controls = true,
6022 			},
6023 		},
6024 	},
6025 	{
6026 		.name = "Baseline w/ x2apic disabled",
6027 		.virt_x2apic_mode_config = {
6028 			.virtual_interrupt_delivery = true,
6029 			.use_msr_bitmaps = true,
6030 			.disable_x2apic_msr_intercepts = true,
6031 			.disable_x2apic = true,
6032 			.apic_reg_virt_config = {
6033 				.apic_register_virtualization = true,
6034 				.use_tpr_shadow = true,
6035 				.virtualize_apic_accesses = false,
6036 				.virtualize_x2apic_mode = true,
6037 				.activate_secondary_controls = true,
6038 			},
6039 		},
6040 	},
6041 
6042 	/*
6043 	 * Baseline, minus virtual-interrupt delivery. Reads come from virtual
6044 	 * APIC page, special processing applies to VTPR, and all other writes
6045 	 * pass through to L1 APIC.
6046 	 */
6047 	{
6048 		.name = "Baseline - virtual interrupt delivery",
6049 		.virt_x2apic_mode_config = {
6050 			.virtual_interrupt_delivery = false,
6051 			.use_msr_bitmaps = true,
6052 			.disable_x2apic_msr_intercepts = true,
6053 			.disable_x2apic = false,
6054 			.apic_reg_virt_config = {
6055 				.apic_register_virtualization = true,
6056 				.use_tpr_shadow = true,
6057 				.virtualize_apic_accesses = false,
6058 				.virtualize_x2apic_mode = true,
6059 				.activate_secondary_controls = true,
6060 			},
6061 		},
6062 	},
6063 
6064 	/*
6065 	 * Baseline, minus APIC-register virtualization. x2APIC reads pass
6066 	 * through to L1's APIC, unless reading VTPR
6067 	 */
6068 	{
6069 		.name = "Virtualize x2APIC mode, no APIC reg virt",
6070 		.virt_x2apic_mode_config = {
6071 			.virtual_interrupt_delivery = true,
6072 			.use_msr_bitmaps = true,
6073 			.disable_x2apic_msr_intercepts = true,
6074 			.disable_x2apic = false,
6075 			.apic_reg_virt_config = {
6076 				.apic_register_virtualization = false,
6077 				.use_tpr_shadow = true,
6078 				.virtualize_apic_accesses = false,
6079 				.virtualize_x2apic_mode = true,
6080 				.activate_secondary_controls = true,
6081 			},
6082 		},
6083 	},
6084 	{
6085 		.name = "Virtualize x2APIC mode, no APIC reg virt, x2APIC off",
6086 		.virt_x2apic_mode_config = {
6087 			.virtual_interrupt_delivery = true,
6088 			.use_msr_bitmaps = true,
6089 			.disable_x2apic_msr_intercepts = true,
6090 			.disable_x2apic = true,
6091 			.apic_reg_virt_config = {
6092 				.apic_register_virtualization = false,
6093 				.use_tpr_shadow = true,
6094 				.virtualize_apic_accesses = false,
6095 				.virtualize_x2apic_mode = true,
6096 				.activate_secondary_controls = true,
6097 			},
6098 		},
6099 	},
6100 
6101 	/*
6102 	 * Enable "virtualize x2APIC mode" and "APIC-register virtualization",
6103 	 * and disable intercepts for the x2APIC MSRs, but fail to enable
6104 	 * "activate secondary controls" (i.e. L2 gets access to L1's x2APIC
6105 	 * MSRs).
6106 	 */
6107 	{
6108 		.name = "Fail to enable activate secondary controls",
6109 		.virt_x2apic_mode_config = {
6110 			.virtual_interrupt_delivery = true,
6111 			.use_msr_bitmaps = true,
6112 			.disable_x2apic_msr_intercepts = true,
6113 			.disable_x2apic = false,
6114 			.apic_reg_virt_config = {
6115 				.apic_register_virtualization = true,
6116 				.use_tpr_shadow = true,
6117 				.virtualize_apic_accesses = false,
6118 				.virtualize_x2apic_mode = true,
6119 				.activate_secondary_controls = false,
6120 			},
6121 		},
6122 	},
6123 
6124 	/*
6125 	 * Enable "APIC-register virtualization" and enable "activate secondary
6126 	 * controls" and disable intercepts for the x2APIC MSRs, but do not
6127 	 * enable the "virtualize x2APIC mode" VM-execution control (i.e. L2
6128 	 * gets access to L1's x2APIC MSRs).
6129 	 */
6130 	{
6131 		.name = "Fail to enable virtualize x2APIC mode",
6132 		.virt_x2apic_mode_config = {
6133 			.virtual_interrupt_delivery = true,
6134 			.use_msr_bitmaps = true,
6135 			.disable_x2apic_msr_intercepts = true,
6136 			.disable_x2apic = false,
6137 			.apic_reg_virt_config = {
6138 				.apic_register_virtualization = true,
6139 				.use_tpr_shadow = true,
6140 				.virtualize_apic_accesses = false,
6141 				.virtualize_x2apic_mode = false,
6142 				.activate_secondary_controls = true,
6143 			},
6144 		},
6145 	},
6146 
6147 	/*
6148 	 * Disable "Virtualize x2APIC mode", disable x2APIC MSR intercepts, and
6149 	 * enable "APIC-register virtualization" --> L2 gets L1's x2APIC MSRs.
6150 	 */
6151 	{
6152 		.name = "Baseline",
6153 		.virt_x2apic_mode_config = {
6154 			.virtual_interrupt_delivery = true,
6155 			.use_msr_bitmaps = true,
6156 			.disable_x2apic_msr_intercepts = true,
6157 			.disable_x2apic = false,
6158 			.apic_reg_virt_config = {
6159 				.apic_register_virtualization = true,
6160 				.use_tpr_shadow = true,
6161 				.virtualize_apic_accesses = false,
6162 				.virtualize_x2apic_mode = false,
6163 				.activate_secondary_controls = true,
6164 			},
6165 		},
6166 	},
6167 };
6168 
6169 enum X2apic_op {
6170 	X2APIC_OP_RD,
6171 	X2APIC_OP_WR,
6172 	X2APIC_TERMINATE,
6173 };
6174 
6175 static u64 vmx_x2apic_read(u32 reg)
6176 {
6177 	u32 msr_addr = x2apic_msr(reg);
6178 	u64 val;
6179 
6180 	val = rdmsr(msr_addr);
6181 
6182 	return val;
6183 }
6184 
6185 static void vmx_x2apic_write(u32 reg, u64 val)
6186 {
6187 	u32 msr_addr = x2apic_msr(reg);
6188 
6189 	wrmsr(msr_addr, val);
6190 }
6191 
6192 struct virt_x2apic_mode_guest_args {
6193 	enum X2apic_op op;
6194 	u32 reg;
6195 	u64 val;
6196 	bool should_gp;
6197 	u64 (*virt_fn)(u64);
6198 } virt_x2apic_mode_guest_args;
6199 
6200 static volatile bool handle_x2apic_gp_ran;
6201 static volatile u32 handle_x2apic_gp_insn_len;
6202 static void handle_x2apic_gp(struct ex_regs *regs)
6203 {
6204 	handle_x2apic_gp_ran = true;
6205 	regs->rip += handle_x2apic_gp_insn_len;
6206 }
6207 
6208 static handler setup_x2apic_gp_handler(void)
6209 {
6210 	handler old_handler;
6211 
6212 	old_handler = handle_exception(GP_VECTOR, handle_x2apic_gp);
6213 	/* RDMSR and WRMSR are both 2 bytes, assuming no prefixes. */
6214 	handle_x2apic_gp_insn_len = 2;
6215 
6216 	return old_handler;
6217 }
6218 
6219 static void teardown_x2apic_gp_handler(handler old_handler)
6220 {
6221 	handle_exception(GP_VECTOR, old_handler);
6222 
6223 	/*
6224 	 * Defensively reset instruction length, so that if the handler is
6225 	 * incorrectly used, it will loop infinitely, rather than run off into
6226 	 * la la land.
6227 	 */
6228 	handle_x2apic_gp_insn_len = 0;
6229 	handle_x2apic_gp_ran = false;
6230 }
6231 
6232 static void virt_x2apic_mode_guest(void)
6233 {
6234 	volatile struct virt_x2apic_mode_guest_args *args =
6235 		&virt_x2apic_mode_guest_args;
6236 
6237 	for (;;) {
6238 		enum X2apic_op op = args->op;
6239 		u32 reg = args->reg;
6240 		u64 val = args->val;
6241 		bool should_gp = args->should_gp;
6242 		u64 (*virt_fn)(u64) = args->virt_fn;
6243 		handler old_handler;
6244 
6245 		if (op == X2APIC_TERMINATE)
6246 			break;
6247 
6248 		if (should_gp) {
6249 			TEST_ASSERT(!handle_x2apic_gp_ran);
6250 			old_handler = setup_x2apic_gp_handler();
6251 		}
6252 
6253 		if (op == X2APIC_OP_RD) {
6254 			u64 ret = vmx_x2apic_read(reg);
6255 
6256 			if (!should_gp) {
6257 				u64 want = virt_fn(val);
6258 				u64 got = virt_fn(ret);
6259 
6260 				report("APIC read; got 0x%lx, want 0x%lx.",
6261 				       got == want, got, want);
6262 			}
6263 		} else if (op == X2APIC_OP_WR) {
6264 			vmx_x2apic_write(reg, val);
6265 		}
6266 
6267 		if (should_gp) {
6268 			report("x2APIC op triggered GP.",
6269 			       handle_x2apic_gp_ran);
6270 			teardown_x2apic_gp_handler(old_handler);
6271 		}
6272 
6273 		/*
6274 		 * The L1 should always execute a vmcall after it's done testing
6275 		 * an individual APIC operation. This helps to validate that the
6276 		 * L1 and L2 are in sync with each other, as expected.
6277 		 */
6278 		vmcall();
6279 	}
6280 }
6281 
6282 static void test_x2apic_rd(
6283 	u32 reg, struct virt_x2apic_mode_expectation *expectation,
6284 	u32 *virtual_apic_page)
6285 {
6286 	u64 val = expectation->rd_val;
6287 	u32 exit_reason_want = expectation->rd_exit_reason;
6288 	struct virt_x2apic_mode_guest_args *args = &virt_x2apic_mode_guest_args;
6289 
6290 	report_prefix_pushf("x2apic - reading 0x%03x", reg);
6291 
6292 	/* Configure guest to do an x2apic read */
6293 	args->op = X2APIC_OP_RD;
6294 	args->reg = reg;
6295 	args->val = val;
6296 	args->should_gp = expectation->rd_behavior == X2APIC_ACCESS_TRIGGERS_GP;
6297 	args->virt_fn = expectation->virt_fn;
6298 
6299 	/* Setup virtual APIC page */
6300 	if (expectation->rd_behavior == X2APIC_ACCESS_VIRTUALIZED)
6301 		virtual_apic_page[apic_reg_index(reg)] = (u32)val;
6302 
6303 	/* Enter guest */
6304 	enter_guest();
6305 
6306 	if (exit_reason_want != VMX_VMCALL) {
6307 		report("Oops, bad exit expectation: %u.", false,
6308 		       exit_reason_want);
6309 	}
6310 
6311 	skip_exit_vmcall();
6312 	report_prefix_pop();
6313 }
6314 
6315 static volatile bool handle_x2apic_ipi_ran;
6316 static void handle_x2apic_ipi(isr_regs_t *regs)
6317 {
6318 	handle_x2apic_ipi_ran = true;
6319 	eoi();
6320 }
6321 
6322 static void test_x2apic_wr(
6323 	u32 reg, struct virt_x2apic_mode_expectation *expectation,
6324 	u32 *virtual_apic_page)
6325 {
6326 	u64 val = expectation->wr_val;
6327 	u32 exit_reason_want = expectation->wr_exit_reason;
6328 	struct virt_x2apic_mode_guest_args *args = &virt_x2apic_mode_guest_args;
6329 	int ipi_vector = 0xf1;
6330 	u32 restore_val = 0;
6331 
6332 	report_prefix_pushf("x2apic - writing 0x%lx to 0x%03x", val, reg);
6333 
6334 	/* Configure guest to do an x2apic read */
6335 	args->op = X2APIC_OP_WR;
6336 	args->reg = reg;
6337 	args->val = val;
6338 	args->should_gp = expectation->wr_behavior == X2APIC_ACCESS_TRIGGERS_GP;
6339 
6340 	/* Setup virtual APIC page */
6341 	if (expectation->wr_behavior == X2APIC_ACCESS_VIRTUALIZED)
6342 		virtual_apic_page[apic_reg_index(reg)] = 0;
6343 	if (expectation->wr_behavior == X2APIC_ACCESS_PASSED_THROUGH && !expectation->wr_only)
6344 		restore_val = apic_read(reg);
6345 
6346 	/* Setup IPI handler */
6347 	handle_x2apic_ipi_ran = false;
6348 	handle_irq(ipi_vector, handle_x2apic_ipi);
6349 
6350 	/* Enter guest */
6351 	enter_guest();
6352 
6353 	/*
6354 	 * Validate the behavior and
6355 	 * pass a magic value back to the guest.
6356 	 */
6357 	if (exit_reason_want == VMX_EXTINT) {
6358 		assert_exit_reason(exit_reason_want);
6359 
6360 		/* Clear the external interrupt. */
6361 		irq_enable();
6362 		asm volatile ("nop");
6363 		irq_disable();
6364 		report("Got pending interrupt after IRQ enabled.",
6365 		       handle_x2apic_ipi_ran);
6366 
6367 		enter_guest();
6368 	} else if (exit_reason_want == VMX_APIC_WRITE) {
6369 		assert_exit_reason(exit_reason_want);
6370 		report("got APIC write exit @ page offset 0x%03x; val is 0x%x, want 0x%lx",
6371 		       virtual_apic_page[apic_reg_index(reg)] == val,
6372 		       apic_reg_index(reg),
6373 		       virtual_apic_page[apic_reg_index(reg)], val);
6374 
6375 		/* Reenter guest so it can consume/check rcx and exit again. */
6376 		enter_guest();
6377 	} else if (exit_reason_want != VMX_VMCALL) {
6378 		report("Oops, bad exit expectation: %u.", false,
6379 		       exit_reason_want);
6380 	}
6381 
6382 	assert_exit_reason(VMX_VMCALL);
6383 	if (expectation->wr_behavior == X2APIC_ACCESS_VIRTUALIZED) {
6384 		u64 want = val;
6385 		u32 got = virtual_apic_page[apic_reg_index(reg)];
6386 
6387 		report("x2APIC write; got 0x%x, want 0x%lx",
6388 		       got == want, got, want);
6389 	} else if (expectation->wr_behavior == X2APIC_ACCESS_PASSED_THROUGH) {
6390 		if (!expectation->wr_only) {
6391 			u32 got = apic_read(reg);
6392 			bool ok;
6393 
6394 			/*
6395 			 * When L1's TPR is passed through to L2, the lower
6396 			 * nibble can be lost. For example, if L2 executes
6397 			 * WRMSR(0x808, 0x78), then, L1 might read 0x70.
6398 			 *
6399 			 * Here's how the lower nibble can get lost:
6400 			 *   1. L2 executes WRMSR(0x808, 0x78).
6401 			 *   2. L2 exits to L0 with a WRMSR exit.
6402 			 *   3. L0 emulates WRMSR, by writing L1's TPR.
6403 			 *   4. L0 re-enters L2.
6404 			 *   5. L2 exits to L0 (reason doesn't matter).
6405 			 *   6. L0 reflects L2's exit to L1.
6406 			 *   7. Before entering L1, L0 exits to user-space
6407 			 *      (e.g., to satisfy TPR access reporting).
6408 			 *   8. User-space executes KVM_SET_REGS ioctl, which
6409 			 *      clears the lower nibble of L1's TPR.
6410 			 */
6411 			if (reg == APIC_TASKPRI) {
6412 				got = apic_virt_nibble1(got);
6413 				val = apic_virt_nibble1(val);
6414 			}
6415 
6416 			ok = got == val;
6417 			report("non-virtualized write; val is 0x%x, want 0x%lx",
6418 			       ok, got, val);
6419 			apic_write(reg, restore_val);
6420 		} else {
6421 			report("non-virtualized and write-only OK", true);
6422 		}
6423 	}
6424 	skip_exit_insn();
6425 
6426 	report_prefix_pop();
6427 }
6428 
6429 static enum Config_type configure_virt_x2apic_mode_test(
6430 	struct virt_x2apic_mode_config *virt_x2apic_mode_config,
6431 	u8 *msr_bitmap_page)
6432 {
6433 	int msr;
6434 	u32 cpu_exec_ctrl0 = vmcs_read(CPU_EXEC_CTRL0);
6435 	u64 cpu_exec_ctrl1 = vmcs_read(CPU_EXEC_CTRL1);
6436 
6437 	/* x2apic-specific VMCS config */
6438 	if (virt_x2apic_mode_config->use_msr_bitmaps) {
6439 		/* virt_x2apic_mode_test() checks for MSR bitmaps support */
6440 		cpu_exec_ctrl0 |= CPU_MSR_BITMAP;
6441 	} else {
6442 		cpu_exec_ctrl0 &= ~CPU_MSR_BITMAP;
6443 	}
6444 
6445 	if (virt_x2apic_mode_config->virtual_interrupt_delivery) {
6446 		if (!(ctrl_cpu_rev[1].clr & CPU_VINTD)) {
6447 			report_skip("VM-execution control \"virtual-interrupt delivery\" NOT supported.\n");
6448 			return CONFIG_TYPE_UNSUPPORTED;
6449 		}
6450 		cpu_exec_ctrl1 |= CPU_VINTD;
6451 	} else {
6452 		cpu_exec_ctrl1 &= ~CPU_VINTD;
6453 	}
6454 
6455 	vmcs_write(CPU_EXEC_CTRL0, cpu_exec_ctrl0);
6456 	vmcs_write(CPU_EXEC_CTRL1, cpu_exec_ctrl1);
6457 
6458 	/* x2APIC MSR intercepts are usually off for "Virtualize x2APIC mode" */
6459 	for (msr = 0x800; msr <= 0x8ff; msr++) {
6460 		if (virt_x2apic_mode_config->disable_x2apic_msr_intercepts) {
6461 			clear_bit(msr, msr_bitmap_page + 0x000);
6462 			clear_bit(msr, msr_bitmap_page + 0x800);
6463 		} else {
6464 			set_bit(msr, msr_bitmap_page + 0x000);
6465 			set_bit(msr, msr_bitmap_page + 0x800);
6466 		}
6467 	}
6468 
6469 	/* x2APIC mode can impact virtualization */
6470 	reset_apic();
6471 	if (!virt_x2apic_mode_config->disable_x2apic)
6472 		enable_x2apic();
6473 
6474 	return configure_apic_reg_virt_test(
6475 		&virt_x2apic_mode_config->apic_reg_virt_config);
6476 }
6477 
6478 static void virt_x2apic_mode_test(void)
6479 {
6480 	u32 *virtual_apic_page;
6481 	u8 *msr_bitmap_page;
6482 	u64 cpu_exec_ctrl0 = vmcs_read(CPU_EXEC_CTRL0);
6483 	u64 cpu_exec_ctrl1 = vmcs_read(CPU_EXEC_CTRL1);
6484 	int i;
6485 	struct virt_x2apic_mode_guest_args *args = &virt_x2apic_mode_guest_args;
6486 
6487 	if (!cpu_has_apicv()) {
6488 		report_skip(__func__);
6489 		return;
6490 	}
6491 
6492 	/*
6493 	 * This is to exercise an issue in KVM's logic to merge L0's and L1's
6494 	 * MSR bitmaps. Previously, an L1 could get at L0's x2APIC MSRs by
6495 	 * writing the IA32_SPEC_CTRL MSR or the IA32_PRED_CMD MSRs. KVM would
6496 	 * then proceed to manipulate the MSR bitmaps, as if VMCS12 had the
6497 	 * "Virtualize x2APIC mod" control set, even when it didn't.
6498 	 */
6499 	if (has_spec_ctrl())
6500 		wrmsr(MSR_IA32_SPEC_CTRL, 1);
6501 
6502 	/*
6503 	 * Check that VMCS12 supports:
6504 	 *   - "Virtual-APIC address", indicated by "use TPR shadow"
6505 	 *   - "MSR-bitmap address", indicated by "use MSR bitmaps"
6506 	 */
6507 	if (!(ctrl_cpu_rev[0].clr & CPU_TPR_SHADOW)) {
6508 		report_skip("VM-execution control \"use TPR shadow\" NOT supported.\n");
6509 		return;
6510 	} else if (!(ctrl_cpu_rev[0].clr & CPU_MSR_BITMAP)) {
6511 		report_skip("VM-execution control \"use MSR bitmaps\" NOT supported.\n");
6512 		return;
6513 	}
6514 
6515 	test_set_guest(virt_x2apic_mode_guest);
6516 
6517 	virtual_apic_page = alloc_page();
6518 	vmcs_write(APIC_VIRT_ADDR, virt_to_phys(virtual_apic_page));
6519 
6520 	msr_bitmap_page = alloc_page();
6521 	memset(msr_bitmap_page, 0xff, PAGE_SIZE);
6522 	vmcs_write(MSR_BITMAP, virt_to_phys(msr_bitmap_page));
6523 
6524 	for (i = 0; i < ARRAY_SIZE(virt_x2apic_mode_tests); i++) {
6525 		struct virt_x2apic_mode_test_case *virt_x2apic_mode_test_case =
6526 			&virt_x2apic_mode_tests[i];
6527 		struct virt_x2apic_mode_config *virt_x2apic_mode_config =
6528 			&virt_x2apic_mode_test_case->virt_x2apic_mode_config;
6529 		enum Config_type config_type;
6530 		u32 reg;
6531 
6532 		printf("--- %s test ---\n", virt_x2apic_mode_test_case->name);
6533 		config_type =
6534 			configure_virt_x2apic_mode_test(virt_x2apic_mode_config,
6535 							msr_bitmap_page);
6536 		if (config_type == CONFIG_TYPE_UNSUPPORTED) {
6537 			report_skip("Skip because of missing features.\n");
6538 			continue;
6539 		} else if (config_type == CONFIG_TYPE_VMENTRY_FAILS_EARLY) {
6540 			enter_guest_with_bad_controls();
6541 			continue;
6542 		}
6543 
6544 		for (reg = 0; reg < PAGE_SIZE / sizeof(u32); reg += 0x10) {
6545 			struct virt_x2apic_mode_expectation expectation;
6546 
6547 			virt_x2apic_mode_exit_expectation(
6548 				reg, virt_x2apic_mode_config, &expectation);
6549 
6550 			test_x2apic_rd(reg, &expectation, virtual_apic_page);
6551 			test_x2apic_wr(reg, &expectation, virtual_apic_page);
6552 		}
6553 	}
6554 
6555 
6556 	/* Terminate the guest */
6557 	vmcs_write(CPU_EXEC_CTRL0, cpu_exec_ctrl0);
6558 	vmcs_write(CPU_EXEC_CTRL1, cpu_exec_ctrl1);
6559 	args->op = X2APIC_TERMINATE;
6560 	enter_guest();
6561 	assert_exit_reason(VMX_VMCALL);
6562 }
6563 
6564 /*
6565  * On processors that support Intel 64 architecture, the IA32_SYSENTER_ESP
6566  * field and the IA32_SYSENTER_EIP field must each contain a canonical
6567  * address.
6568  *
6569  *  [Intel SDM]
6570  */
6571 static void test_sysenter_field(u32 field, const char *name)
6572 {
6573 	u64 addr_saved = vmcs_read(field);
6574 
6575 	vmcs_write(field, NONCANONICAL);
6576 	report_prefix_pushf("%s non-canonical", name);
6577 	test_vmx_vmlaunch(VMXERR_ENTRY_INVALID_HOST_STATE_FIELD, false);
6578 	report_prefix_pop();
6579 
6580 	vmcs_write(field, 0xffffffff);
6581 	report_prefix_pushf("%s canonical", name);
6582 	test_vmx_vmlaunch(0, false);
6583 	report_prefix_pop();
6584 
6585 	vmcs_write(field, addr_saved);
6586 }
6587 
6588 static void test_ctl_reg(const char *cr_name, u64 cr, u64 fixed0, u64 fixed1)
6589 {
6590 	u64 val;
6591 	u64 cr_saved = vmcs_read(cr);
6592 	int i;
6593 
6594 	val = fixed0 & fixed1;
6595 	if (cr == HOST_CR4)
6596 		vmcs_write(cr, val | X86_CR4_PAE);
6597 	else
6598 		vmcs_write(cr, val);
6599 	report_prefix_pushf("%s %lx", cr_name, val);
6600 	if (val == fixed0)
6601 		test_vmx_vmlaunch(0, false);
6602 	else
6603 		test_vmx_vmlaunch(VMXERR_ENTRY_INVALID_HOST_STATE_FIELD,
6604 				  false);
6605 	report_prefix_pop();
6606 
6607 	for (i = 0; i < 64; i++) {
6608 
6609 		/* Set a bit when the corresponding bit in fixed1 is 0 */
6610 		if ((fixed1 & (1ull << i)) == 0) {
6611 			if (cr == HOST_CR4 && ((1ull << i) & X86_CR4_SMEP ||
6612 					       (1ull << i) & X86_CR4_SMAP))
6613 				continue;
6614 
6615 			vmcs_write(cr, cr_saved | (1ull << i));
6616 			report_prefix_pushf("%s %llx", cr_name,
6617 						cr_saved | (1ull << i));
6618 			test_vmx_vmlaunch(
6619 					VMXERR_ENTRY_INVALID_HOST_STATE_FIELD,
6620 					false);
6621 			report_prefix_pop();
6622 		}
6623 
6624 		/* Unset a bit when the corresponding bit in fixed0 is 1 */
6625 		if (fixed0 & (1ull << i)) {
6626 			vmcs_write(cr, cr_saved & ~(1ull << i));
6627 			report_prefix_pushf("%s %llx", cr_name,
6628 						cr_saved & ~(1ull << i));
6629 			test_vmx_vmlaunch(
6630 					VMXERR_ENTRY_INVALID_HOST_STATE_FIELD,
6631 					false);
6632 			report_prefix_pop();
6633 		}
6634 	}
6635 
6636 	vmcs_write(cr, cr_saved);
6637 }
6638 
6639 /*
6640  * 1. The CR0 field must not set any bit to a value not supported in VMX
6641  *    operation.
6642  * 2. The CR4 field must not set any bit to a value not supported in VMX
6643  *    operation.
6644  * 3. On processors that support Intel 64 architecture, the CR3 field must
6645  *    be such that bits 63:52 and bits in the range 51:32 beyond the
6646  *    processor’s physical-address width must be 0.
6647  *
6648  *  [Intel SDM]
6649  */
6650 static void test_host_ctl_regs(void)
6651 {
6652 	u64 fixed0, fixed1, cr3, cr3_saved;
6653 	int i;
6654 
6655 	/* Test CR0 */
6656 	fixed0 = rdmsr(MSR_IA32_VMX_CR0_FIXED0);
6657 	fixed1 = rdmsr(MSR_IA32_VMX_CR0_FIXED1);
6658 	test_ctl_reg("HOST_CR0", HOST_CR0, fixed0, fixed1);
6659 
6660 	/* Test CR4 */
6661 	fixed0 = rdmsr(MSR_IA32_VMX_CR4_FIXED0);
6662 	fixed1 = rdmsr(MSR_IA32_VMX_CR4_FIXED1) &
6663 		 ~(X86_CR4_SMEP | X86_CR4_SMAP);
6664 	test_ctl_reg("HOST_CR4", HOST_CR4, fixed0, fixed1);
6665 
6666 	/* Test CR3 */
6667 	cr3_saved = vmcs_read(HOST_CR3);
6668 	for (i = cpuid_maxphyaddr(); i < 64; i++) {
6669 		cr3 = cr3_saved | (1ul << i);
6670 		vmcs_write(HOST_CR3, cr3);
6671 		report_prefix_pushf("HOST_CR3 %lx", cr3);
6672 		test_vmx_vmlaunch(VMXERR_ENTRY_INVALID_HOST_STATE_FIELD,
6673 				  false);
6674 		report_prefix_pop();
6675 	}
6676 
6677 	vmcs_write(HOST_CR3, cr3_saved);
6678 }
6679 
6680 /*
6681  * PAT values higher than 8 are uninteresting since they're likely lumped
6682  * in with "8". We only test values above 8 one bit at a time,
6683  * in order to reduce the number of VM-Entries and keep the runtime reasonable.
6684  */
6685 #define	PAT_VAL_LIMIT	8
6686 
6687 static void test_pat(u32 field, const char * field_name, u32 ctrl_field,
6688 		     u64 ctrl_bit)
6689 {
6690 	u32 ctrl_saved = vmcs_read(ctrl_field);
6691 	u64 pat_saved = vmcs_read(field);
6692 	u64 i, val;
6693 	u32 j;
6694 	int error;
6695 
6696 	vmcs_clear_bits(ctrl_field, ctrl_bit);
6697 	if (field == GUEST_PAT) {
6698 		vmx_set_test_stage(1);
6699 		test_set_guest(guest_pat_main);
6700 	}
6701 
6702 	for (i = 0; i < 256; i = (i < PAT_VAL_LIMIT) ? i + 1 : i * 2) {
6703 		/* Test PAT0..PAT7 fields */
6704 		for (j = 0; j < (i ? 8 : 1); j++) {
6705 			val = i << j * 8;
6706 			vmcs_write(field, val);
6707 			if (field == HOST_PAT) {
6708 				report_prefix_pushf("%s %lx", field_name, val);
6709 				test_vmx_vmlaunch(0, false);
6710 				report_prefix_pop();
6711 
6712 			} else {	// GUEST_PAT
6713 				enter_guest_with_invalid_guest_state();
6714 				report_guest_pat_test("ENT_LOAD_PAT enabled",
6715 						       VMX_VMCALL, val);
6716 			}
6717 		}
6718 	}
6719 
6720 	vmcs_set_bits(ctrl_field, ctrl_bit);
6721 	for (i = 0; i < 256; i = (i < PAT_VAL_LIMIT) ? i + 1 : i * 2) {
6722 		/* Test PAT0..PAT7 fields */
6723 		for (j = 0; j < (i ? 8 : 1); j++) {
6724 			val = i << j * 8;
6725 			vmcs_write(field, val);
6726 
6727 			if (field == HOST_PAT) {
6728 				report_prefix_pushf("%s %lx", field_name, val);
6729 				if (i == 0x2 || i == 0x3 || i >= 0x8)
6730 					error =
6731 					VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
6732 				else
6733 					error = 0;
6734 
6735 				test_vmx_vmlaunch(error, false);
6736 				report_prefix_pop();
6737 
6738 			} else {	// GUEST_PAT
6739 				if (i == 0x2 || i == 0x3 || i >= 0x8) {
6740 					enter_guest_with_invalid_guest_state();
6741 					report_guest_pat_test("ENT_LOAD_PAT "
6742 								"enabled",
6743 							     VMX_FAIL_STATE |
6744 							     VMX_ENTRY_FAILURE,
6745 							     val);
6746 				} else {
6747 					enter_guest();
6748 					report_guest_pat_test("ENT_LOAD_PAT "
6749 							      "enabled",
6750 							      VMX_VMCALL,
6751 							      val);
6752 				}
6753 			}
6754 
6755 		}
6756 	}
6757 
6758 	if (field == GUEST_PAT) {
6759 		/*
6760 		 * Let the guest finish execution
6761 		 */
6762 		vmx_set_test_stage(2);
6763 		vmcs_write(field, pat_saved);
6764 		enter_guest();
6765 	}
6766 
6767 	vmcs_write(ctrl_field, ctrl_saved);
6768 	vmcs_write(field, pat_saved);
6769 }
6770 
6771 /*
6772  *  If the "load IA32_PAT" VM-exit control is 1, the value of the field
6773  *  for the IA32_PAT MSR must be one that could be written by WRMSR
6774  *  without fault at CPL 0. Specifically, each of the 8 bytes in the
6775  *  field must have one of the values 0 (UC), 1 (WC), 4 (WT), 5 (WP),
6776  *  6 (WB), or 7 (UC-).
6777  *
6778  *  [Intel SDM]
6779  */
6780 static void test_load_host_pat(void)
6781 {
6782 	/*
6783 	 * "load IA32_PAT" VM-exit control
6784 	 */
6785 	if (!(ctrl_exit_rev.clr & EXI_LOAD_PAT)) {
6786 		printf("\"Load-IA32-PAT\" exit control not supported\n");
6787 		return;
6788 	}
6789 
6790 	test_pat(HOST_PAT, "HOST_PAT", EXI_CONTROLS, EXI_LOAD_PAT);
6791 }
6792 
6793 /*
6794  * Check that the virtual CPU checks the VMX Host State Area as
6795  * documented in the Intel SDM.
6796  */
6797 static void vmx_host_state_area_test(void)
6798 {
6799 	/*
6800 	 * Bit 1 of the guest's RFLAGS must be 1, or VM-entry will
6801 	 * fail due to invalid guest state, should we make it that
6802 	 * far.
6803 	 */
6804 	vmcs_write(GUEST_RFLAGS, 0);
6805 
6806 	test_host_ctl_regs();
6807 
6808 	test_sysenter_field(HOST_SYSENTER_ESP, "HOST_SYSENTER_ESP");
6809 	test_sysenter_field(HOST_SYSENTER_EIP, "HOST_SYSENTER_EIP");
6810 
6811 	test_load_host_pat();
6812 }
6813 
6814 /*
6815  *  If the "load IA32_PAT" VM-entry control is 1, the value of the field
6816  *  for the IA32_PAT MSR must be one that could be written by WRMSR
6817  *  without fault at CPL 0. Specifically, each of the 8 bytes in the
6818  *  field must have one of the values 0 (UC), 1 (WC), 4 (WT), 5 (WP),
6819  *  6 (WB), or 7 (UC-).
6820  *
6821  *  [Intel SDM]
6822  */
6823 static void test_load_guest_pat(void)
6824 {
6825 	/*
6826 	 * "load IA32_PAT" VM-entry control
6827 	 */
6828 	if (!(ctrl_exit_rev.clr & ENT_LOAD_PAT)) {
6829 		printf("\"Load-IA32-PAT\" entry control not supported\n");
6830 		return;
6831 	}
6832 
6833 	test_pat(GUEST_PAT, "GUEST_PAT", ENT_CONTROLS, ENT_LOAD_PAT);
6834 }
6835 
6836 /*
6837  * Check that the virtual CPU checks the VMX Guest State Area as
6838  * documented in the Intel SDM.
6839  */
6840 static void vmx_guest_state_area_test(void)
6841 {
6842 	test_load_guest_pat();
6843 }
6844 
6845 static bool valid_vmcs_for_vmentry(void)
6846 {
6847 	struct vmcs *current_vmcs = NULL;
6848 
6849 	if (vmcs_save(&current_vmcs))
6850 		return false;
6851 
6852 	return current_vmcs && !current_vmcs->hdr.shadow_vmcs;
6853 }
6854 
6855 static void try_vmentry_in_movss_shadow(void)
6856 {
6857 	u32 vm_inst_err;
6858 	u32 flags;
6859 	bool early_failure = false;
6860 	u32 expected_flags = X86_EFLAGS_FIXED;
6861 	bool valid_vmcs = valid_vmcs_for_vmentry();
6862 
6863 	expected_flags |= valid_vmcs ? X86_EFLAGS_ZF : X86_EFLAGS_CF;
6864 
6865 	/*
6866 	 * Indirectly set VM_INST_ERR to 12 ("VMREAD/VMWRITE from/to
6867 	 * unsupported VMCS component").
6868 	 */
6869 	vmcs_write(~0u, 0);
6870 
6871 	__asm__ __volatile__ ("mov %[host_rsp], %%edx;"
6872 			      "vmwrite %%rsp, %%rdx;"
6873 			      "mov 0f, %%rax;"
6874 			      "mov %[host_rip], %%edx;"
6875 			      "vmwrite %%rax, %%rdx;"
6876 			      "mov $-1, %%ah;"
6877 			      "sahf;"
6878 			      "mov %%ss, %%ax;"
6879 			      "mov %%ax, %%ss;"
6880 			      "vmlaunch;"
6881 			      "mov $1, %[early_failure];"
6882 			      "0: lahf;"
6883 			      "movzbl %%ah, %[flags]"
6884 			      : [early_failure] "+r" (early_failure),
6885 				[flags] "=&a" (flags)
6886 			      : [host_rsp] "i" (HOST_RSP),
6887 				[host_rip] "i" (HOST_RIP)
6888 			      : "rdx", "cc", "memory");
6889 	vm_inst_err = vmcs_read(VMX_INST_ERROR);
6890 
6891 	report("Early VM-entry failure", early_failure);
6892 	report("RFLAGS[8:0] is %x (actual %x)", flags == expected_flags,
6893 	       expected_flags, flags);
6894 	if (valid_vmcs)
6895 		report("VM-instruction error is %d (actual %d)",
6896 		       vm_inst_err == VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS,
6897 		       VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS, vm_inst_err);
6898 }
6899 
6900 static void vmentry_movss_shadow_test(void)
6901 {
6902 	struct vmcs *orig_vmcs;
6903 
6904 	TEST_ASSERT(!vmcs_save(&orig_vmcs));
6905 
6906 	/*
6907 	 * Set the launched flag on the current VMCS to verify the correct
6908 	 * error priority, below.
6909 	 */
6910 	test_set_guest(v2_null_test_guest);
6911 	enter_guest();
6912 
6913 	/*
6914 	 * With bit 1 of the guest's RFLAGS clear, VM-entry should
6915 	 * fail due to invalid guest state (if we make it that far).
6916 	 */
6917 	vmcs_write(GUEST_RFLAGS, 0);
6918 
6919 	/*
6920 	 * "VM entry with events blocked by MOV SS" takes precedence over
6921 	 * "VMLAUNCH with non-clear VMCS."
6922 	 */
6923 	report_prefix_push("valid current-VMCS");
6924 	try_vmentry_in_movss_shadow();
6925 	report_prefix_pop();
6926 
6927 	/*
6928 	 * VMfailInvalid takes precedence over "VM entry with events
6929 	 * blocked by MOV SS."
6930 	 */
6931 	TEST_ASSERT(!vmcs_clear(orig_vmcs));
6932 	report_prefix_push("no current-VMCS");
6933 	try_vmentry_in_movss_shadow();
6934 	report_prefix_pop();
6935 
6936 	TEST_ASSERT(!make_vmcs_current(orig_vmcs));
6937 	vmcs_write(GUEST_RFLAGS, X86_EFLAGS_FIXED);
6938 }
6939 
6940 #define X86_FEATURE_PCID       (1 << 17)
6941 #define X86_FEATURE_MCE        (1 << 7)
6942 
6943 static int write_cr4_checking(unsigned long val)
6944 {
6945 	asm volatile(ASM_TRY("1f")
6946 		     "mov %0, %%cr4\n\t"
6947 		     "1:": : "r" (val));
6948 	return exception_vector();
6949 }
6950 
6951 static void vmx_cr_load_test(void)
6952 {
6953 	struct cpuid _cpuid = cpuid(1);
6954 	unsigned long cr4 = read_cr4(), cr3 = read_cr3();
6955 
6956 	if (!(_cpuid.c & X86_FEATURE_PCID)) {
6957 		report_skip("PCID not detected");
6958 		return;
6959 	}
6960 	if (!(_cpuid.d & X86_FEATURE_MCE)) {
6961 		report_skip("MCE not detected");
6962 		return;
6963 	}
6964 
6965 	TEST_ASSERT(!(cr4 & (X86_CR4_PCIDE | X86_CR4_MCE)));
6966 	TEST_ASSERT(!(cr3 & X86_CR3_PCID_MASK));
6967 
6968 	/* Enable PCID for L1. */
6969 	cr4 |= X86_CR4_PCIDE;
6970 	cr3 |= 0x1;
6971 	TEST_ASSERT(!write_cr4_checking(cr4));
6972 	write_cr3(cr3);
6973 
6974 	test_set_guest(v2_null_test_guest);
6975 	vmcs_write(HOST_CR4, cr4);
6976 	vmcs_write(HOST_CR3, cr3);
6977 	enter_guest();
6978 
6979 	/*
6980 	 * No exception is expected.
6981 	 *
6982 	 * NB. KVM loads the last guest write to CR4 into CR4 read
6983 	 *     shadow. In order to trigger an exit to KVM, we can set a
6984 	 *     bit that was zero in the above CR4 write and is owned by
6985 	 *     KVM. We choose to set CR4.MCE, which shall have no side
6986 	 *     effect because normally no guest MCE (e.g., as the result
6987 	 *     of bad memory) would happen during this test.
6988 	 */
6989 	TEST_ASSERT(!write_cr4_checking(cr4 | X86_CR4_MCE));
6990 
6991 	/* Cleanup L1 state: disable PCID. */
6992 	write_cr3(cr3 & ~X86_CR3_PCID_MASK);
6993 	TEST_ASSERT(!write_cr4_checking(cr4 & ~X86_CR4_PCIDE));
6994 }
6995 
6996 static void vmx_nm_test_guest(void)
6997 {
6998 	write_cr0(read_cr0() | X86_CR0_TS);
6999 	asm volatile("fnop");
7000 }
7001 
7002 static void check_nm_exit(const char *test)
7003 {
7004 	u32 reason = vmcs_read(EXI_REASON);
7005 	u32 intr_info = vmcs_read(EXI_INTR_INFO);
7006 	const u32 expected = INTR_INFO_VALID_MASK | INTR_TYPE_HARD_EXCEPTION |
7007 		NM_VECTOR;
7008 
7009 	report("%s", reason == VMX_EXC_NMI && intr_info == expected, test);
7010 }
7011 
7012 /*
7013  * This test checks that:
7014  *
7015  * (a) If L2 launches with CR0.TS clear, but later sets CR0.TS, then
7016  *     a subsequent #NM VM-exit is reflected to L1.
7017  *
7018  * (b) If L2 launches with CR0.TS clear and CR0.EM set, then a
7019  *     subsequent #NM VM-exit is reflected to L1.
7020  */
7021 static void vmx_nm_test(void)
7022 {
7023 	unsigned long cr0 = read_cr0();
7024 
7025 	test_set_guest(vmx_nm_test_guest);
7026 
7027 	/*
7028 	 * L1 wants to intercept #NM exceptions encountered in L2.
7029 	 */
7030 	vmcs_write(EXC_BITMAP, 1 << NM_VECTOR);
7031 
7032 	/*
7033 	 * Launch L2 with CR0.TS clear, but don't claim host ownership of
7034 	 * any CR0 bits. L2 will set CR0.TS and then try to execute fnop,
7035 	 * which will raise #NM. L0 should reflect the #NM VM-exit to L1.
7036 	 */
7037 	vmcs_write(CR0_MASK, 0);
7038 	vmcs_write(GUEST_CR0, cr0 & ~X86_CR0_TS);
7039 	enter_guest();
7040 	check_nm_exit("fnop with CR0.TS set in L2 triggers #NM VM-exit to L1");
7041 
7042 	/*
7043 	 * Re-enter L2 at the fnop instruction, with CR0.TS clear but
7044 	 * CR0.EM set. The fnop will still raise #NM, and L0 should
7045 	 * reflect the #NM VM-exit to L1.
7046 	 */
7047 	vmcs_write(GUEST_CR0, (cr0 & ~X86_CR0_TS) | X86_CR0_EM);
7048 	enter_guest();
7049 	check_nm_exit("fnop with CR0.EM set in L2 triggers #NM VM-exit to L1");
7050 
7051 	/*
7052 	 * Re-enter L2 at the fnop instruction, with both CR0.TS and
7053 	 * CR0.EM clear. There will be no #NM, and the L2 guest should
7054 	 * exit normally.
7055 	 */
7056 	vmcs_write(GUEST_CR0, cr0 & ~(X86_CR0_TS | X86_CR0_EM));
7057 	enter_guest();
7058 }
7059 
7060 bool vmx_pending_event_ipi_fired;
7061 static void vmx_pending_event_ipi_isr(isr_regs_t *regs)
7062 {
7063 	vmx_pending_event_ipi_fired = true;
7064 	eoi();
7065 }
7066 
7067 bool vmx_pending_event_guest_run;
7068 static void vmx_pending_event_guest(void)
7069 {
7070 	vmcall();
7071 	vmx_pending_event_guest_run = true;
7072 }
7073 
7074 static void vmx_pending_event_test_core(bool guest_hlt)
7075 {
7076 	int ipi_vector = 0xf1;
7077 
7078 	vmx_pending_event_ipi_fired = false;
7079 	handle_irq(ipi_vector, vmx_pending_event_ipi_isr);
7080 
7081 	vmx_pending_event_guest_run = false;
7082 	test_set_guest(vmx_pending_event_guest);
7083 
7084 	vmcs_set_bits(PIN_CONTROLS, PIN_EXTINT);
7085 
7086 	enter_guest();
7087 	skip_exit_vmcall();
7088 
7089 	if (guest_hlt)
7090 		vmcs_write(GUEST_ACTV_STATE, ACTV_HLT);
7091 
7092 	irq_disable();
7093 	apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL |
7094 				   APIC_DM_FIXED | ipi_vector,
7095 				   0);
7096 
7097 	enter_guest();
7098 
7099 	assert_exit_reason(VMX_EXTINT);
7100 	report("Guest did not run before host received IPI",
7101 		   !vmx_pending_event_guest_run);
7102 
7103 	irq_enable();
7104 	asm volatile ("nop");
7105 	irq_disable();
7106 	report("Got pending interrupt after IRQ enabled",
7107 		   vmx_pending_event_ipi_fired);
7108 
7109 	if (guest_hlt)
7110 		vmcs_write(GUEST_ACTV_STATE, ACTV_ACTIVE);
7111 
7112 	enter_guest();
7113 	report("Guest finished running when no interrupt",
7114 		   vmx_pending_event_guest_run);
7115 }
7116 
7117 static void vmx_pending_event_test(void)
7118 {
7119 	vmx_pending_event_test_core(false);
7120 }
7121 
7122 static void vmx_pending_event_hlt_test(void)
7123 {
7124 	vmx_pending_event_test_core(true);
7125 }
7126 
7127 static int vmx_window_test_ud_count;
7128 
7129 static void vmx_window_test_ud_handler(struct ex_regs *regs)
7130 {
7131 	vmx_window_test_ud_count++;
7132 }
7133 
7134 static void vmx_nmi_window_test_guest(void)
7135 {
7136 	handle_exception(UD_VECTOR, vmx_window_test_ud_handler);
7137 
7138 	asm volatile("vmcall\n\t"
7139 		     "nop\n\t");
7140 
7141 	handle_exception(UD_VECTOR, NULL);
7142 }
7143 
7144 static void verify_nmi_window_exit(u64 rip)
7145 {
7146 	u32 exit_reason = vmcs_read(EXI_REASON);
7147 
7148 	report("Exit reason (%d) is 'NMI window'",
7149 	       exit_reason == VMX_NMI_WINDOW, exit_reason);
7150 	report("RIP (%#lx) is %#lx", vmcs_read(GUEST_RIP) == rip,
7151 	       vmcs_read(GUEST_RIP), rip);
7152 	report("Activity state (%ld) is 'ACTIVE'",
7153 	       vmcs_read(GUEST_ACTV_STATE) == ACTV_ACTIVE,
7154 	       vmcs_read(GUEST_ACTV_STATE));
7155 }
7156 
7157 static void vmx_nmi_window_test(void)
7158 {
7159 	u64 nop_addr;
7160 	void *ud_fault_addr = get_idt_addr(&boot_idt[UD_VECTOR]);
7161 
7162 	if (!(ctrl_pin_rev.clr & PIN_VIRT_NMI)) {
7163 		report_skip("CPU does not support the \"Virtual NMIs\" VM-execution control.");
7164 		return;
7165 	}
7166 
7167 	if (!(ctrl_cpu_rev[0].clr & CPU_NMI_WINDOW)) {
7168 		report_skip("CPU does not support the \"NMI-window exiting\" VM-execution control.");
7169 		return;
7170 	}
7171 
7172 	vmx_window_test_ud_count = 0;
7173 
7174 	report_prefix_push("NMI-window");
7175 	test_set_guest(vmx_nmi_window_test_guest);
7176 	vmcs_set_bits(PIN_CONTROLS, PIN_VIRT_NMI);
7177 	enter_guest();
7178 	skip_exit_vmcall();
7179 	nop_addr = vmcs_read(GUEST_RIP);
7180 
7181 	/*
7182 	 * Ask for "NMI-window exiting," and expect an immediate VM-exit.
7183 	 * RIP will not advance.
7184 	 */
7185 	report_prefix_push("active, no blocking");
7186 	vmcs_set_bits(CPU_EXEC_CTRL0, CPU_NMI_WINDOW);
7187 	enter_guest();
7188 	verify_nmi_window_exit(nop_addr);
7189 	report_prefix_pop();
7190 
7191 	/*
7192 	 * Ask for "NMI-window exiting" in a MOV-SS shadow, and expect
7193 	 * a VM-exit on the next instruction after the nop. (The nop
7194 	 * is one byte.)
7195 	 */
7196 	report_prefix_push("active, blocking by MOV-SS");
7197 	vmcs_write(GUEST_INTR_STATE, GUEST_INTR_STATE_MOVSS);
7198 	enter_guest();
7199 	verify_nmi_window_exit(nop_addr + 1);
7200 	report_prefix_pop();
7201 
7202 	/*
7203 	 * Ask for "NMI-window exiting" (with event injection), and
7204 	 * expect a VM-exit after the event is injected. (RIP should
7205 	 * be at the address specified in the IDT entry for #UD.)
7206 	 */
7207 	report_prefix_push("active, no blocking, injecting #UD");
7208 	vmcs_write(ENT_INTR_INFO,
7209 		   INTR_INFO_VALID_MASK | INTR_TYPE_HARD_EXCEPTION | UD_VECTOR);
7210 	enter_guest();
7211 	verify_nmi_window_exit((u64)ud_fault_addr);
7212 	report_prefix_pop();
7213 
7214 	/*
7215 	 * Ask for "NMI-window exiting" with NMI blocking, and expect
7216 	 * a VM-exit after the next IRET (i.e. after the #UD handler
7217 	 * returns). So, RIP should be back at one byte past the nop.
7218 	 */
7219 	report_prefix_push("active, blocking by NMI");
7220 	vmcs_write(GUEST_INTR_STATE, GUEST_INTR_STATE_NMI);
7221 	enter_guest();
7222 	verify_nmi_window_exit(nop_addr + 1);
7223 	report("#UD handler executed once (actual %d times)",
7224 	       vmx_window_test_ud_count == 1,
7225 	       vmx_window_test_ud_count);
7226 	report_prefix_pop();
7227 
7228 	if (!(rdmsr(MSR_IA32_VMX_MISC) & (1 << 6))) {
7229 		report_skip("CPU does not support activity state HLT.");
7230 	} else {
7231 		/*
7232 		 * Ask for "NMI-window exiting" when entering activity
7233 		 * state HLT, and expect an immediate VM-exit. RIP is
7234 		 * still one byte past the nop.
7235 		 */
7236 		report_prefix_push("halted, no blocking");
7237 		vmcs_write(GUEST_ACTV_STATE, ACTV_HLT);
7238 		enter_guest();
7239 		verify_nmi_window_exit(nop_addr + 1);
7240 		report_prefix_pop();
7241 
7242 		/*
7243 		 * Ask for "NMI-window exiting" when entering activity
7244 		 * state HLT (with event injection), and expect a
7245 		 * VM-exit after the event is injected. (RIP should be
7246 		 * at the address specified in the IDT entry for #UD.)
7247 		 */
7248 		report_prefix_push("halted, no blocking, injecting #UD");
7249 		vmcs_write(GUEST_ACTV_STATE, ACTV_HLT);
7250 		vmcs_write(ENT_INTR_INFO,
7251 			   INTR_INFO_VALID_MASK | INTR_TYPE_HARD_EXCEPTION |
7252 			   UD_VECTOR);
7253 		enter_guest();
7254 		verify_nmi_window_exit((u64)ud_fault_addr);
7255 		report_prefix_pop();
7256 	}
7257 
7258 	vmcs_clear_bits(CPU_EXEC_CTRL0, CPU_NMI_WINDOW);
7259 	enter_guest();
7260 	report_prefix_pop();
7261 }
7262 
7263 static void vmx_intr_window_test_guest(void)
7264 {
7265 	handle_exception(UD_VECTOR, vmx_window_test_ud_handler);
7266 
7267 	/*
7268 	 * The two consecutive STIs are to ensure that only the first
7269 	 * one has a shadow. Note that NOP and STI are one byte
7270 	 * instructions.
7271 	 */
7272 	asm volatile("vmcall\n\t"
7273 		     "nop\n\t"
7274 		     "sti\n\t"
7275 		     "sti\n\t");
7276 
7277 	handle_exception(UD_VECTOR, NULL);
7278 }
7279 
7280 static void verify_intr_window_exit(u64 rip)
7281 {
7282 	u32 exit_reason = vmcs_read(EXI_REASON);
7283 
7284 	report("Exit reason (%d) is 'interrupt window'",
7285 	       exit_reason == VMX_INTR_WINDOW, exit_reason);
7286 	report("RIP (%#lx) is %#lx", vmcs_read(GUEST_RIP) == rip,
7287 	       vmcs_read(GUEST_RIP), rip);
7288 	report("Activity state (%ld) is 'ACTIVE'",
7289 	       vmcs_read(GUEST_ACTV_STATE) == ACTV_ACTIVE,
7290 	       vmcs_read(GUEST_ACTV_STATE));
7291 }
7292 
7293 static void vmx_intr_window_test(void)
7294 {
7295 	u64 vmcall_addr;
7296 	u64 nop_addr;
7297 	unsigned int orig_ud_gate_type;
7298 	void *ud_fault_addr = get_idt_addr(&boot_idt[UD_VECTOR]);
7299 
7300 	if (!(ctrl_cpu_rev[0].clr & CPU_INTR_WINDOW)) {
7301 		report_skip("CPU does not support the \"interrupt-window exiting\" VM-execution control.");
7302 		return;
7303 	}
7304 
7305 	/*
7306 	 * Change the IDT entry for #UD from interrupt gate to trap gate,
7307 	 * so that it won't clear RFLAGS.IF. We don't want interrupts to
7308 	 * be disabled after vectoring a #UD.
7309 	 */
7310 	orig_ud_gate_type = boot_idt[UD_VECTOR].type;
7311 	boot_idt[UD_VECTOR].type = 15;
7312 
7313 	report_prefix_push("interrupt-window");
7314 	test_set_guest(vmx_intr_window_test_guest);
7315 	enter_guest();
7316 	assert_exit_reason(VMX_VMCALL);
7317 	vmcall_addr = vmcs_read(GUEST_RIP);
7318 
7319 	/*
7320 	 * Ask for "interrupt-window exiting" with RFLAGS.IF set and
7321 	 * no blocking; expect an immediate VM-exit. Note that we have
7322 	 * not advanced past the vmcall instruction yet, so RIP should
7323 	 * point to the vmcall instruction.
7324 	 */
7325 	report_prefix_push("active, no blocking, RFLAGS.IF=1");
7326 	vmcs_set_bits(CPU_EXEC_CTRL0, CPU_INTR_WINDOW);
7327 	vmcs_write(GUEST_RFLAGS, X86_EFLAGS_FIXED | X86_EFLAGS_IF);
7328 	enter_guest();
7329 	verify_intr_window_exit(vmcall_addr);
7330 	report_prefix_pop();
7331 
7332 	/*
7333 	 * Ask for "interrupt-window exiting" (with event injection)
7334 	 * with RFLAGS.IF set and no blocking; expect a VM-exit after
7335 	 * the event is injected. That is, RIP should should be at the
7336 	 * address specified in the IDT entry for #UD.
7337 	 */
7338 	report_prefix_push("active, no blocking, RFLAGS.IF=1, injecting #UD");
7339 	vmcs_write(ENT_INTR_INFO,
7340 		   INTR_INFO_VALID_MASK | INTR_TYPE_HARD_EXCEPTION | UD_VECTOR);
7341 	vmcall_addr = vmcs_read(GUEST_RIP);
7342 	enter_guest();
7343 	verify_intr_window_exit((u64)ud_fault_addr);
7344 	report_prefix_pop();
7345 
7346 	/*
7347 	 * Let the L2 guest run through the IRET, back to the VMCALL.
7348 	 * We have to clear the "interrupt-window exiting"
7349 	 * VM-execution control, or it would just keep causing
7350 	 * VM-exits. Then, advance past the VMCALL and set the
7351 	 * "interrupt-window exiting" VM-execution control again.
7352 	 */
7353 	vmcs_clear_bits(CPU_EXEC_CTRL0, CPU_INTR_WINDOW);
7354 	enter_guest();
7355 	skip_exit_vmcall();
7356 	nop_addr = vmcs_read(GUEST_RIP);
7357 	vmcs_set_bits(CPU_EXEC_CTRL0, CPU_INTR_WINDOW);
7358 
7359 	/*
7360 	 * Ask for "interrupt-window exiting" in a MOV-SS shadow with
7361 	 * RFLAGS.IF set, and expect a VM-exit on the next
7362 	 * instruction. (NOP is one byte.)
7363 	 */
7364 	report_prefix_push("active, blocking by MOV-SS, RFLAGS.IF=1");
7365 	vmcs_write(GUEST_INTR_STATE, GUEST_INTR_STATE_MOVSS);
7366 	enter_guest();
7367 	verify_intr_window_exit(nop_addr + 1);
7368 	report_prefix_pop();
7369 
7370 	/*
7371 	 * Back up to the NOP and ask for "interrupt-window exiting"
7372 	 * in an STI shadow with RFLAGS.IF set, and expect a VM-exit
7373 	 * on the next instruction. (NOP is one byte.)
7374 	 */
7375 	report_prefix_push("active, blocking by STI, RFLAGS.IF=1");
7376 	vmcs_write(GUEST_RIP, nop_addr);
7377 	vmcs_write(GUEST_INTR_STATE, GUEST_INTR_STATE_STI);
7378 	enter_guest();
7379 	verify_intr_window_exit(nop_addr + 1);
7380 	report_prefix_pop();
7381 
7382 	/*
7383 	 * Ask for "interrupt-window exiting" with RFLAGS.IF clear,
7384 	 * and expect a VM-exit on the instruction following the STI
7385 	 * shadow. Only the first STI (which is one byte past the NOP)
7386 	 * should have a shadow. The second STI (which is two bytes
7387 	 * past the NOP) has no shadow. Therefore, the interrupt
7388 	 * window opens at three bytes past the NOP.
7389 	 */
7390 	report_prefix_push("active, RFLAGS.IF = 0");
7391 	vmcs_write(GUEST_RFLAGS, X86_EFLAGS_FIXED);
7392 	enter_guest();
7393 	verify_intr_window_exit(nop_addr + 3);
7394 	report_prefix_pop();
7395 
7396 	if (!(rdmsr(MSR_IA32_VMX_MISC) & (1 << 6))) {
7397 		report_skip("CPU does not support activity state HLT.");
7398 	} else {
7399 		/*
7400 		 * Ask for "interrupt-window exiting" when entering
7401 		 * activity state HLT, and expect an immediate
7402 		 * VM-exit. RIP is still three bytes past the nop.
7403 		 */
7404 		report_prefix_push("halted, no blocking");
7405 		vmcs_write(GUEST_ACTV_STATE, ACTV_HLT);
7406 		enter_guest();
7407 		verify_intr_window_exit(nop_addr + 3);
7408 		report_prefix_pop();
7409 
7410 		/*
7411 		 * Ask for "interrupt-window exiting" when entering
7412 		 * activity state HLT (with event injection), and
7413 		 * expect a VM-exit after the event is injected. That
7414 		 * is, RIP should should be at the address specified
7415 		 * in the IDT entry for #UD.
7416 		 */
7417 		report_prefix_push("halted, no blocking, injecting #UD");
7418 		vmcs_write(GUEST_ACTV_STATE, ACTV_HLT);
7419 		vmcs_write(ENT_INTR_INFO,
7420 			   INTR_INFO_VALID_MASK | INTR_TYPE_HARD_EXCEPTION |
7421 			   UD_VECTOR);
7422 		enter_guest();
7423 		verify_intr_window_exit((u64)ud_fault_addr);
7424 		report_prefix_pop();
7425 	}
7426 
7427 	boot_idt[UD_VECTOR].type = orig_ud_gate_type;
7428 	vmcs_clear_bits(CPU_EXEC_CTRL0, CPU_INTR_WINDOW);
7429 	enter_guest();
7430 	report_prefix_pop();
7431 }
7432 
7433 #define GUEST_TSC_OFFSET (1u << 30)
7434 
7435 static u64 guest_tsc;
7436 
7437 static void vmx_store_tsc_test_guest(void)
7438 {
7439 	guest_tsc = rdtsc();
7440 }
7441 
7442 /*
7443  * This test ensures that when IA32_TSC is in the VM-exit MSR-store
7444  * list, the value saved is not subject to the TSC offset that is
7445  * applied to RDTSC/RDTSCP/RDMSR(IA32_TSC) in guest execution.
7446  */
7447 static void vmx_store_tsc_test(void)
7448 {
7449 	struct vmx_msr_entry msr_entry = { .index = MSR_IA32_TSC };
7450 	u64 low, high;
7451 
7452 	if (!(ctrl_cpu_rev[0].clr & CPU_USE_TSC_OFFSET)) {
7453 		report_skip("'Use TSC offsetting' not supported");
7454 		return;
7455 	}
7456 
7457 	test_set_guest(vmx_store_tsc_test_guest);
7458 
7459 	vmcs_set_bits(CPU_EXEC_CTRL0, CPU_USE_TSC_OFFSET);
7460 	vmcs_write(EXI_MSR_ST_CNT, 1);
7461 	vmcs_write(EXIT_MSR_ST_ADDR, virt_to_phys(&msr_entry));
7462 	vmcs_write(TSC_OFFSET, GUEST_TSC_OFFSET);
7463 
7464 	low = rdtsc();
7465 	enter_guest();
7466 	high = rdtsc();
7467 
7468 	report("RDTSC value in the guest (%lu) is in range [%lu, %lu]",
7469 	       low + GUEST_TSC_OFFSET <= guest_tsc &&
7470 	       guest_tsc <= high + GUEST_TSC_OFFSET,
7471 	       guest_tsc, low + GUEST_TSC_OFFSET, high + GUEST_TSC_OFFSET);
7472 	report("IA32_TSC value saved in the VM-exit MSR-store list (%lu) is in range [%lu, %lu]",
7473 	       low <= msr_entry.value && msr_entry.value <= high,
7474 	       msr_entry.value, low, high);
7475 }
7476 
7477 static void vmx_db_test_guest(void)
7478 {
7479 	/*
7480 	 * For a hardware generated single-step #DB.
7481 	 */
7482 	asm volatile("vmcall;"
7483 		     "nop;"
7484 		     ".Lpost_nop:");
7485 	/*
7486 	 * ...in a MOVSS shadow, with pending debug exceptions.
7487 	 */
7488 	asm volatile("vmcall;"
7489 		     "nop;"
7490 		     ".Lpost_movss_nop:");
7491 	/*
7492 	 * For an L0 synthesized single-step #DB. (L0 intercepts WBINVD and
7493 	 * emulates it in software.)
7494 	 */
7495 	asm volatile("vmcall;"
7496 		     "wbinvd;"
7497 		     ".Lpost_wbinvd:");
7498 	/*
7499 	 * ...in a MOVSS shadow, with pending debug exceptions.
7500 	 */
7501 	asm volatile("vmcall;"
7502 		     "wbinvd;"
7503 		     ".Lpost_movss_wbinvd:");
7504 	/*
7505 	 * For a hardware generated single-step #DB in a transactional region.
7506 	 */
7507 	asm volatile("vmcall;"
7508 		     ".Lxbegin: xbegin .Lskip_rtm;"
7509 		     "xend;"
7510 		     ".Lskip_rtm:");
7511 }
7512 
7513 /*
7514  * Clear the pending debug exceptions and RFLAGS.TF and re-enter
7515  * L2. No #DB is delivered and L2 continues to the next point of
7516  * interest.
7517  */
7518 static void dismiss_db(void)
7519 {
7520 	vmcs_write(GUEST_PENDING_DEBUG, 0);
7521 	vmcs_write(GUEST_RFLAGS, X86_EFLAGS_FIXED);
7522 	enter_guest();
7523 }
7524 
7525 /*
7526  * Check a variety of VMCS fields relevant to an intercepted #DB exception.
7527  * Then throw away the #DB exception and resume L2.
7528  */
7529 static void check_db_exit(bool xfail_qual, bool xfail_dr6, bool xfail_pdbg,
7530 			  void *expected_rip, u64 expected_exit_qual,
7531 			  u64 expected_dr6)
7532 {
7533 	u32 reason = vmcs_read(EXI_REASON);
7534 	u32 intr_info = vmcs_read(EXI_INTR_INFO);
7535 	u64 exit_qual = vmcs_read(EXI_QUALIFICATION);
7536 	u64 guest_rip = vmcs_read(GUEST_RIP);
7537 	u64 guest_pending_dbg = vmcs_read(GUEST_PENDING_DEBUG);
7538 	u64 dr6 = read_dr6();
7539 	const u32 expected_intr_info = INTR_INFO_VALID_MASK |
7540 		INTR_TYPE_HARD_EXCEPTION | DB_VECTOR;
7541 
7542 	report("Expected #DB VM-exit",
7543 	       reason == VMX_EXC_NMI && intr_info == expected_intr_info);
7544 	report("Expected RIP %p (actual %lx)", (u64)expected_rip == guest_rip,
7545 	       expected_rip, guest_rip);
7546 	report_xfail("Expected pending debug exceptions 0 (actual %lx)",
7547 		     xfail_pdbg, 0 == guest_pending_dbg, guest_pending_dbg);
7548 	report_xfail("Expected exit qualification %lx (actual %lx)", xfail_qual,
7549 		     expected_exit_qual == exit_qual,
7550 		     expected_exit_qual, exit_qual);
7551 	report_xfail("Expected DR6 %lx (actual %lx)", xfail_dr6,
7552 		     expected_dr6 == dr6, expected_dr6, dr6);
7553 	dismiss_db();
7554 }
7555 
7556 /*
7557  * Assuming the guest has just exited on a VMCALL instruction, skip
7558  * over the vmcall, and set the guest's RFLAGS.TF in the VMCS. If
7559  * pending debug exceptions are non-zero, set the VMCS up as if the
7560  * previous instruction was a MOVSS that generated the indicated
7561  * pending debug exceptions. Then enter L2.
7562  */
7563 static void single_step_guest(const char *test_name, u64 starting_dr6,
7564 			      u64 pending_debug_exceptions)
7565 {
7566 	printf("\n%s\n", test_name);
7567 	skip_exit_vmcall();
7568 	write_dr6(starting_dr6);
7569 	vmcs_write(GUEST_RFLAGS, X86_EFLAGS_FIXED | X86_EFLAGS_TF);
7570 	if (pending_debug_exceptions) {
7571 		vmcs_write(GUEST_PENDING_DEBUG, pending_debug_exceptions);
7572 		vmcs_write(GUEST_INTR_STATE, GUEST_INTR_STATE_MOVSS);
7573 	}
7574 	enter_guest();
7575 }
7576 
7577 /*
7578  * When L1 intercepts #DB, verify that a single-step trap clears
7579  * pending debug exceptions, populates the exit qualification field
7580  * properly, and that DR6 is not prematurely clobbered. In a
7581  * (simulated) MOVSS shadow, make sure that the pending debug
7582  * exception bits are properly accumulated into the exit qualification
7583  * field.
7584  */
7585 static void vmx_db_test(void)
7586 {
7587 	/*
7588 	 * We are going to set a few arbitrary bits in DR6 to verify that
7589 	 * (a) DR6 is not modified by an intercepted #DB, and
7590 	 * (b) stale bits in DR6 (DR6.BD, in particular) don't leak into
7591          *     the exit qualification field for a subsequent #DB exception.
7592 	 */
7593 	const u64 starting_dr6 = DR6_RESERVED | BIT(13) | DR_TRAP3 | DR_TRAP1;
7594 	extern char post_nop asm(".Lpost_nop");
7595 	extern char post_movss_nop asm(".Lpost_movss_nop");
7596 	extern char post_wbinvd asm(".Lpost_wbinvd");
7597 	extern char post_movss_wbinvd asm(".Lpost_movss_wbinvd");
7598 	extern char xbegin asm(".Lxbegin");
7599 	extern char skip_rtm asm(".Lskip_rtm");
7600 
7601 	/*
7602 	 * L1 wants to intercept #DB exceptions encountered in L2.
7603 	 */
7604 	vmcs_write(EXC_BITMAP, BIT(DB_VECTOR));
7605 
7606 	/*
7607 	 * Start L2 and run it up to the first point of interest.
7608 	 */
7609 	test_set_guest(vmx_db_test_guest);
7610 	enter_guest();
7611 
7612 	/*
7613 	 * Hardware-delivered #DB trap for single-step sets the
7614 	 * standard that L0 has to follow for emulated instructions.
7615 	 */
7616 	single_step_guest("Hardware delivered single-step", starting_dr6, 0);
7617 	check_db_exit(false, false, false, &post_nop, DR_STEP, starting_dr6);
7618 
7619 	/*
7620 	 * Hardware-delivered #DB trap for single-step in MOVSS shadow
7621 	 * also sets the standard that L0 has to follow for emulated
7622 	 * instructions. Here, we establish the VMCS pending debug
7623 	 * exceptions to indicate that the simulated MOVSS triggered a
7624 	 * data breakpoint as well as the single-step trap.
7625 	 */
7626 	single_step_guest("Hardware delivered single-step in MOVSS shadow",
7627 			  starting_dr6, BIT(12) | DR_STEP | DR_TRAP0 );
7628 	check_db_exit(false, false, false, &post_movss_nop, DR_STEP | DR_TRAP0,
7629 		      starting_dr6);
7630 
7631 	/*
7632 	 * L0 synthesized #DB trap for single-step is buggy, because
7633 	 * kvm (a) clobbers DR6 too early, and (b) tries its best to
7634 	 * reconstitute the exit qualification from the prematurely
7635 	 * modified DR6, but fails miserably.
7636 	 */
7637 	single_step_guest("Software synthesized single-step", starting_dr6, 0);
7638 	check_db_exit(true, true, false, &post_wbinvd, DR_STEP, starting_dr6);
7639 
7640 	/*
7641 	 * L0 synthesized #DB trap for single-step in MOVSS shadow is
7642 	 * even worse, because L0 also leaves the pending debug
7643 	 * exceptions in the VMCS instead of accumulating them into
7644 	 * the exit qualification field for the #DB exception.
7645 	 */
7646 	single_step_guest("Software synthesized single-step in MOVSS shadow",
7647 			  starting_dr6, BIT(12) | DR_STEP | DR_TRAP0);
7648 	check_db_exit(true, true, true, &post_movss_wbinvd, DR_STEP | DR_TRAP0,
7649 		      starting_dr6);
7650 
7651 	/*
7652 	 * Optional RTM test for hardware that supports RTM, to
7653 	 * demonstrate that the current volume 3 of the SDM
7654 	 * (325384-067US), table 27-1 is incorrect. Bit 16 of the exit
7655 	 * qualification for debug exceptions is not reserved. It is
7656 	 * set to 1 if a debug exception (#DB) or a breakpoint
7657 	 * exception (#BP) occurs inside an RTM region while advanced
7658 	 * debugging of RTM transactional regions is enabled.
7659 	 */
7660 	if (cpuid(7).b & BIT(11)) {
7661 		vmcs_write(ENT_CONTROLS,
7662 			   vmcs_read(ENT_CONTROLS) | ENT_LOAD_DBGCTLS);
7663 		/*
7664 		 * Set DR7.RTM[bit 11] and IA32_DEBUGCTL.RTM[bit 15]
7665 		 * in the guest to enable advanced debugging of RTM
7666 		 * transactional regions.
7667 		 */
7668 		vmcs_write(GUEST_DR7, BIT(11));
7669 		vmcs_write(GUEST_DEBUGCTL, BIT(15));
7670 		single_step_guest("Hardware delivered single-step in "
7671 				  "transactional region", starting_dr6, 0);
7672 		check_db_exit(false, false, false, &xbegin, BIT(16),
7673 			      starting_dr6);
7674 	} else {
7675 		vmcs_write(GUEST_RIP, (u64)&skip_rtm);
7676 		enter_guest();
7677 	}
7678 }
7679 
7680 static void enable_vid(void)
7681 {
7682 	void *virtual_apic_page;
7683 
7684 	assert(cpu_has_apicv());
7685 
7686 	disable_intercept_for_x2apic_msrs();
7687 
7688 	virtual_apic_page = alloc_page();
7689 	vmcs_write(APIC_VIRT_ADDR, (u64)virtual_apic_page);
7690 
7691 	vmcs_set_bits(PIN_CONTROLS, PIN_EXTINT);
7692 
7693 	vmcs_write(EOI_EXIT_BITMAP0, 0x0);
7694 	vmcs_write(EOI_EXIT_BITMAP1, 0x0);
7695 	vmcs_write(EOI_EXIT_BITMAP2, 0x0);
7696 	vmcs_write(EOI_EXIT_BITMAP3, 0x0);
7697 
7698 	vmcs_set_bits(CPU_EXEC_CTRL0, CPU_SECONDARY | CPU_TPR_SHADOW);
7699 	vmcs_set_bits(CPU_EXEC_CTRL1, CPU_VINTD | CPU_VIRT_X2APIC);
7700 }
7701 
7702 static void trigger_ioapic_scan_thread(void *data)
7703 {
7704 	/* Wait until other CPU entered L2 */
7705 	while (vmx_get_test_stage() != 1)
7706 		;
7707 
7708 	/* Trigger ioapic scan */
7709 	ioapic_set_redir(0xf, 0x79, TRIGGER_LEVEL);
7710 	vmx_set_test_stage(2);
7711 }
7712 
7713 static void irq_79_handler_guest(isr_regs_t *regs)
7714 {
7715 	eoi();
7716 
7717 	/* L1 expects vmexit on VMX_VMCALL and not VMX_EOI_INDUCED */
7718 	vmcall();
7719 }
7720 
7721 /*
7722  * Constant for num of busy-loop iterations after which
7723  * a timer interrupt should have happened in host
7724  */
7725 #define TIMER_INTERRUPT_DELAY 100000000
7726 
7727 static void vmx_eoi_bitmap_ioapic_scan_test_guest(void)
7728 {
7729 	handle_irq(0x79, irq_79_handler_guest);
7730 	irq_enable();
7731 
7732 	/* Signal to L1 CPU to trigger ioapic scan */
7733 	vmx_set_test_stage(1);
7734 	/* Wait until L1 CPU to trigger ioapic scan */
7735 	while (vmx_get_test_stage() != 2)
7736 		;
7737 
7738 	/*
7739 	 * Wait for L0 timer interrupt to be raised while we run in L2
7740 	 * such that L0 will process the IOAPIC scan request before
7741 	 * resuming L2
7742 	 */
7743 	delay(TIMER_INTERRUPT_DELAY);
7744 
7745 	asm volatile ("int $0x79");
7746 }
7747 
7748 static void vmx_eoi_bitmap_ioapic_scan_test(void)
7749 {
7750 	if (!cpu_has_apicv() || (cpu_count() < 2)) {
7751 		report_skip(__func__);
7752 		return;
7753 	}
7754 
7755 	enable_vid();
7756 
7757 	on_cpu_async(1, trigger_ioapic_scan_thread, NULL);
7758 	test_set_guest(vmx_eoi_bitmap_ioapic_scan_test_guest);
7759 
7760 	/*
7761 	 * Launch L2.
7762 	 * We expect the exit reason to be VMX_VMCALL (and not EOI INDUCED).
7763 	 * In case the reason isn't VMX_VMCALL, the asserion inside
7764 	 * skip_exit_vmcall() will fail.
7765 	 */
7766 	enter_guest();
7767 	skip_exit_vmcall();
7768 
7769 	/* Let L2 finish */
7770 	enter_guest();
7771 	report(__func__, 1);
7772 }
7773 
7774 #define HLT_WITH_RVI_VECTOR		(0xf1)
7775 
7776 bool vmx_hlt_with_rvi_guest_isr_fired;
7777 static void vmx_hlt_with_rvi_guest_isr(isr_regs_t *regs)
7778 {
7779 	vmx_hlt_with_rvi_guest_isr_fired = true;
7780 	eoi();
7781 }
7782 
7783 static void vmx_hlt_with_rvi_guest(void)
7784 {
7785 	handle_irq(HLT_WITH_RVI_VECTOR, vmx_hlt_with_rvi_guest_isr);
7786 
7787 	irq_enable();
7788 	asm volatile ("nop");
7789 
7790 	vmcall();
7791 }
7792 
7793 static void vmx_hlt_with_rvi_test(void)
7794 {
7795 	if (!cpu_has_apicv()) {
7796 		report_skip(__func__);
7797 		return;
7798 	}
7799 
7800 	enable_vid();
7801 
7802 	vmx_hlt_with_rvi_guest_isr_fired = false;
7803 	test_set_guest(vmx_hlt_with_rvi_guest);
7804 
7805 	enter_guest();
7806 	skip_exit_vmcall();
7807 
7808 	vmcs_write(GUEST_ACTV_STATE, ACTV_HLT);
7809 	vmcs_write(GUEST_INT_STATUS, HLT_WITH_RVI_VECTOR);
7810 	enter_guest();
7811 
7812 	report("Interrupt raised in guest", vmx_hlt_with_rvi_guest_isr_fired);
7813 }
7814 
7815 static void set_irq_line_thread(void *data)
7816 {
7817 	/* Wait until other CPU entered L2 */
7818 	while (vmx_get_test_stage() != 1)
7819 		;
7820 
7821 	/* Set irq-line 0xf to raise vector 0x78 for vCPU 0 */
7822 	ioapic_set_redir(0xf, 0x78, TRIGGER_LEVEL);
7823 	vmx_set_test_stage(2);
7824 }
7825 
7826 static bool irq_78_handler_vmcall_before_eoi;
7827 static void irq_78_handler_guest(isr_regs_t *regs)
7828 {
7829 	set_irq_line(0xf, 0);
7830 	if (irq_78_handler_vmcall_before_eoi)
7831 		vmcall();
7832 	eoi();
7833 	vmcall();
7834 }
7835 
7836 static void vmx_apic_passthrough_guest(void)
7837 {
7838 	handle_irq(0x78, irq_78_handler_guest);
7839 	irq_enable();
7840 
7841 	/* If requested, wait for other CPU to trigger ioapic scan */
7842 	if (vmx_get_test_stage() < 1) {
7843 		vmx_set_test_stage(1);
7844 		while (vmx_get_test_stage() != 2)
7845 			;
7846 	}
7847 
7848 	set_irq_line(0xf, 1);
7849 }
7850 
7851 static void vmx_apic_passthrough(bool set_irq_line_from_thread)
7852 {
7853 	if (set_irq_line_from_thread && (cpu_count() < 2)) {
7854 		report_skip(__func__);
7855 		return;
7856 	}
7857 
7858 	u64 cpu_ctrl_0 = CPU_SECONDARY;
7859 	u64 cpu_ctrl_1 = 0;
7860 
7861 	disable_intercept_for_x2apic_msrs();
7862 
7863 	vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) & ~PIN_EXTINT);
7864 
7865 	vmcs_write(CPU_EXEC_CTRL0, vmcs_read(CPU_EXEC_CTRL0) | cpu_ctrl_0);
7866 	vmcs_write(CPU_EXEC_CTRL1, vmcs_read(CPU_EXEC_CTRL1) | cpu_ctrl_1);
7867 
7868 	if (set_irq_line_from_thread) {
7869 		irq_78_handler_vmcall_before_eoi = false;
7870 		on_cpu_async(1, set_irq_line_thread, NULL);
7871 	} else {
7872 		irq_78_handler_vmcall_before_eoi = true;
7873 		ioapic_set_redir(0xf, 0x78, TRIGGER_LEVEL);
7874 		vmx_set_test_stage(2);
7875 	}
7876 	test_set_guest(vmx_apic_passthrough_guest);
7877 
7878 	if (irq_78_handler_vmcall_before_eoi) {
7879 		/* Before EOI remote_irr should still be set */
7880 		enter_guest();
7881 		skip_exit_vmcall();
7882 		TEST_ASSERT_EQ_MSG(1, (int)ioapic_read_redir(0xf).remote_irr,
7883 			"IOAPIC pass-through: remote_irr=1 before EOI");
7884 	}
7885 
7886 	/* After EOI remote_irr should be cleared */
7887 	enter_guest();
7888 	skip_exit_vmcall();
7889 	TEST_ASSERT_EQ_MSG(0, (int)ioapic_read_redir(0xf).remote_irr,
7890 		"IOAPIC pass-through: remote_irr=0 after EOI");
7891 
7892 	/* Let L2 finish */
7893 	enter_guest();
7894 	report(__func__, 1);
7895 }
7896 
7897 static void vmx_apic_passthrough_test(void)
7898 {
7899 	vmx_apic_passthrough(false);
7900 }
7901 
7902 static void vmx_apic_passthrough_thread_test(void)
7903 {
7904 	vmx_apic_passthrough(true);
7905 }
7906 
7907 enum vmcs_access {
7908 	ACCESS_VMREAD,
7909 	ACCESS_VMWRITE,
7910 	ACCESS_NONE,
7911 };
7912 
7913 struct vmcs_shadow_test_common {
7914 	enum vmcs_access op;
7915 	enum Reason reason;
7916 	u64 field;
7917 	u64 value;
7918 	u64 flags;
7919 	u64 time;
7920 } l1_l2_common;
7921 
7922 static inline u64 vmread_flags(u64 field, u64 *val)
7923 {
7924 	u64 flags;
7925 
7926 	asm volatile ("vmread %2, %1; pushf; pop %0"
7927 		      : "=r" (flags), "=rm" (*val) : "r" (field) : "cc");
7928 	return flags & X86_EFLAGS_ALU;
7929 }
7930 
7931 static inline u64 vmwrite_flags(u64 field, u64 val)
7932 {
7933 	u64 flags;
7934 
7935 	asm volatile ("vmwrite %1, %2; pushf; pop %0"
7936 		      : "=r"(flags) : "rm" (val), "r" (field) : "cc");
7937 	return flags & X86_EFLAGS_ALU;
7938 }
7939 
7940 static void vmx_vmcs_shadow_test_guest(void)
7941 {
7942 	struct vmcs_shadow_test_common *c = &l1_l2_common;
7943 	u64 start;
7944 
7945 	while (c->op != ACCESS_NONE) {
7946 		start = rdtsc();
7947 		switch (c->op) {
7948 		default:
7949 			c->flags = -1ull;
7950 			break;
7951 		case ACCESS_VMREAD:
7952 			c->flags = vmread_flags(c->field, &c->value);
7953 			break;
7954 		case ACCESS_VMWRITE:
7955 			c->flags = vmwrite_flags(c->field, 0);
7956 			break;
7957 		}
7958 		c->time = rdtsc() - start;
7959 		vmcall();
7960 	}
7961 }
7962 
7963 static u64 vmread_from_shadow(u64 field)
7964 {
7965 	struct vmcs *primary;
7966 	struct vmcs *shadow;
7967 	u64 value;
7968 
7969 	TEST_ASSERT(!vmcs_save(&primary));
7970 	shadow = (struct vmcs *)vmcs_read(VMCS_LINK_PTR);
7971 	TEST_ASSERT(!make_vmcs_current(shadow));
7972 	value = vmcs_read(field);
7973 	TEST_ASSERT(!make_vmcs_current(primary));
7974 	return value;
7975 }
7976 
7977 static u64 vmwrite_to_shadow(u64 field, u64 value)
7978 {
7979 	struct vmcs *primary;
7980 	struct vmcs *shadow;
7981 
7982 	TEST_ASSERT(!vmcs_save(&primary));
7983 	shadow = (struct vmcs *)vmcs_read(VMCS_LINK_PTR);
7984 	TEST_ASSERT(!make_vmcs_current(shadow));
7985 	vmcs_write(field, value);
7986 	value = vmcs_read(field);
7987 	TEST_ASSERT(!make_vmcs_current(primary));
7988 	return value;
7989 }
7990 
7991 static void vmcs_shadow_test_access(u8 *bitmap[2], enum vmcs_access access)
7992 {
7993 	struct vmcs_shadow_test_common *c = &l1_l2_common;
7994 
7995 	c->op = access;
7996 	vmcs_write(VMX_INST_ERROR, 0);
7997 	enter_guest();
7998 	c->reason = vmcs_read(EXI_REASON) & 0xffff;
7999 	if (c->reason != VMX_VMCALL) {
8000 		skip_exit_insn();
8001 		enter_guest();
8002 	}
8003 	skip_exit_vmcall();
8004 }
8005 
8006 static void vmcs_shadow_test_field(u8 *bitmap[2], u64 field)
8007 {
8008 	struct vmcs_shadow_test_common *c = &l1_l2_common;
8009 	struct vmcs *shadow;
8010 	u64 value;
8011 	uintptr_t flags[2];
8012 	bool good_shadow;
8013 	u32 vmx_inst_error;
8014 
8015 	report_prefix_pushf("field %lx", field);
8016 	c->field = field;
8017 
8018 	shadow = (struct vmcs *)vmcs_read(VMCS_LINK_PTR);
8019 	if (shadow != (struct vmcs *)-1ull) {
8020 		flags[ACCESS_VMREAD] = vmread_flags(field, &value);
8021 		flags[ACCESS_VMWRITE] = vmwrite_flags(field, value);
8022 		good_shadow = !flags[ACCESS_VMREAD] && !flags[ACCESS_VMWRITE];
8023 	} else {
8024 		/*
8025 		 * When VMCS link pointer is -1ull, VMWRITE/VMREAD on
8026 		 * shadowed-fields should fail with setting RFLAGS.CF.
8027 		 */
8028 		flags[ACCESS_VMREAD] = X86_EFLAGS_CF;
8029 		flags[ACCESS_VMWRITE] = X86_EFLAGS_CF;
8030 		good_shadow = false;
8031 	}
8032 
8033 	/* Intercept both VMREAD and VMWRITE. */
8034 	report_prefix_push("no VMREAD/VMWRITE permission");
8035 	/* VMWRITE/VMREAD done on reserved-bit should always intercept */
8036 	if (!(field >> VMCS_FIELD_RESERVED_SHIFT)) {
8037 		set_bit(field, bitmap[ACCESS_VMREAD]);
8038 		set_bit(field, bitmap[ACCESS_VMWRITE]);
8039 	}
8040 	vmcs_shadow_test_access(bitmap, ACCESS_VMWRITE);
8041 	report("not shadowed for VMWRITE", c->reason == VMX_VMWRITE);
8042 	vmcs_shadow_test_access(bitmap, ACCESS_VMREAD);
8043 	report("not shadowed for VMREAD", c->reason == VMX_VMREAD);
8044 	report_prefix_pop();
8045 
8046 	if (field >> VMCS_FIELD_RESERVED_SHIFT)
8047 		goto out;
8048 
8049 	/* Permit shadowed VMREAD. */
8050 	report_prefix_push("VMREAD permission only");
8051 	clear_bit(field, bitmap[ACCESS_VMREAD]);
8052 	set_bit(field, bitmap[ACCESS_VMWRITE]);
8053 	if (good_shadow)
8054 		value = vmwrite_to_shadow(field, MAGIC_VAL_1 + field);
8055 	vmcs_shadow_test_access(bitmap, ACCESS_VMWRITE);
8056 	report("not shadowed for VMWRITE", c->reason == VMX_VMWRITE);
8057 	vmcs_shadow_test_access(bitmap, ACCESS_VMREAD);
8058 	vmx_inst_error = vmcs_read(VMX_INST_ERROR);
8059 	report("shadowed for VMREAD (in %ld cycles)", c->reason == VMX_VMCALL,
8060 	       c->time);
8061 	report("ALU flags after VMREAD (%lx) are as expected (%lx)",
8062 	       c->flags == flags[ACCESS_VMREAD],
8063 	       c->flags, flags[ACCESS_VMREAD]);
8064 	if (good_shadow)
8065 		report("value read from shadow (%lx) is as expected (%lx)",
8066 		       c->value == value, c->value, value);
8067 	else if (shadow != (struct vmcs *)-1ull && flags[ACCESS_VMREAD])
8068 		report("VMX_INST_ERROR (%d) is as expected (%d)",
8069 		       vmx_inst_error == VMXERR_UNSUPPORTED_VMCS_COMPONENT,
8070 		       vmx_inst_error, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
8071 	report_prefix_pop();
8072 
8073 	/* Permit shadowed VMWRITE. */
8074 	report_prefix_push("VMWRITE permission only");
8075 	set_bit(field, bitmap[ACCESS_VMREAD]);
8076 	clear_bit(field, bitmap[ACCESS_VMWRITE]);
8077 	if (good_shadow)
8078 		vmwrite_to_shadow(field, MAGIC_VAL_1 + field);
8079 	vmcs_shadow_test_access(bitmap, ACCESS_VMWRITE);
8080 	vmx_inst_error = vmcs_read(VMX_INST_ERROR);
8081 	report("shadowed for VMWRITE (in %ld cycles)", c->reason == VMX_VMCALL,
8082 		c->time);
8083 	report("ALU flags after VMWRITE (%lx) are as expected (%lx)",
8084 	       c->flags == flags[ACCESS_VMREAD],
8085 	       c->flags, flags[ACCESS_VMREAD]);
8086 	if (good_shadow) {
8087 		value = vmread_from_shadow(field);
8088 		report("shadow VMCS value (%lx) is as expected (%lx)",
8089 		       value == 0, value, 0ul);
8090 	} else if (shadow != (struct vmcs *)-1ull && flags[ACCESS_VMWRITE]) {
8091 		report("VMX_INST_ERROR (%d) is as expected (%d)",
8092 		       vmx_inst_error == VMXERR_UNSUPPORTED_VMCS_COMPONENT,
8093 		       vmx_inst_error, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
8094 	}
8095 	vmcs_shadow_test_access(bitmap, ACCESS_VMREAD);
8096 	report("not shadowed for VMREAD", c->reason == VMX_VMREAD);
8097 	report_prefix_pop();
8098 
8099 	/* Permit shadowed VMREAD and VMWRITE. */
8100 	report_prefix_push("VMREAD and VMWRITE permission");
8101 	clear_bit(field, bitmap[ACCESS_VMREAD]);
8102 	clear_bit(field, bitmap[ACCESS_VMWRITE]);
8103 	if (good_shadow)
8104 		vmwrite_to_shadow(field, MAGIC_VAL_1 + field);
8105 	vmcs_shadow_test_access(bitmap, ACCESS_VMWRITE);
8106 	vmx_inst_error = vmcs_read(VMX_INST_ERROR);
8107 	report("shadowed for VMWRITE (in %ld cycles)", c->reason == VMX_VMCALL,
8108 		c->time);
8109 	report("ALU flags after VMWRITE (%lx) are as expected (%lx)",
8110 	       c->flags == flags[ACCESS_VMREAD],
8111 	       c->flags, flags[ACCESS_VMREAD]);
8112 	if (good_shadow) {
8113 		value = vmread_from_shadow(field);
8114 		report("shadow VMCS value (%lx) is as expected (%lx)",
8115 		       value == 0, value, 0ul);
8116 	} else if (shadow != (struct vmcs *)-1ull && flags[ACCESS_VMWRITE]) {
8117 		report("VMX_INST_ERROR (%d) is as expected (%d)",
8118 		       vmx_inst_error == VMXERR_UNSUPPORTED_VMCS_COMPONENT,
8119 		       vmx_inst_error, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
8120 	}
8121 	vmcs_shadow_test_access(bitmap, ACCESS_VMREAD);
8122 	vmx_inst_error = vmcs_read(VMX_INST_ERROR);
8123 	report("shadowed for VMREAD (in %ld cycles)", c->reason == VMX_VMCALL,
8124 	       c->time);
8125 	report("ALU flags after VMREAD (%lx) are as expected (%lx)",
8126 	       c->flags == flags[ACCESS_VMREAD],
8127 	       c->flags, flags[ACCESS_VMREAD]);
8128 	if (good_shadow)
8129 		report("value read from shadow (%lx) is as expected (%lx)",
8130 		       c->value == 0, c->value, 0ul);
8131 	else if (shadow != (struct vmcs *)-1ull && flags[ACCESS_VMREAD])
8132 		report("VMX_INST_ERROR (%d) is as expected (%d)",
8133 		       vmx_inst_error == VMXERR_UNSUPPORTED_VMCS_COMPONENT,
8134 		       vmx_inst_error, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
8135 	report_prefix_pop();
8136 
8137 out:
8138 	report_prefix_pop();
8139 }
8140 
8141 static void vmx_vmcs_shadow_test_body(u8 *bitmap[2])
8142 {
8143 	unsigned base;
8144 	unsigned index;
8145 	unsigned bit;
8146 	unsigned highest_index = rdmsr(MSR_IA32_VMX_VMCS_ENUM);
8147 
8148 	/* Run test on all possible valid VMCS fields */
8149 	for (base = 0;
8150 	     base < (1 << VMCS_FIELD_RESERVED_SHIFT);
8151 	     base += (1 << VMCS_FIELD_TYPE_SHIFT))
8152 		for (index = 0; index <= highest_index; index++)
8153 			vmcs_shadow_test_field(bitmap, base + index);
8154 
8155 	/*
8156 	 * Run tests on some invalid VMCS fields
8157 	 * (Have reserved bit set).
8158 	 */
8159 	for (bit = VMCS_FIELD_RESERVED_SHIFT; bit < VMCS_FIELD_BIT_SIZE; bit++)
8160 		vmcs_shadow_test_field(bitmap, (1ull << bit));
8161 }
8162 
8163 static void vmx_vmcs_shadow_test(void)
8164 {
8165 	u8 *bitmap[2];
8166 	struct vmcs *shadow;
8167 
8168 	if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY)) {
8169 		printf("\t'Activate secondary controls' not supported.\n");
8170 		return;
8171 	}
8172 
8173 	if (!(ctrl_cpu_rev[1].clr & CPU_SHADOW_VMCS)) {
8174 		printf("\t'VMCS shadowing' not supported.\n");
8175 		return;
8176 	}
8177 
8178 	if (!(rdmsr(MSR_IA32_VMX_MISC) &
8179 	      MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS)) {
8180 		printf("\tVMWRITE can't modify VM-exit information fields.\n");
8181 		return;
8182 	}
8183 
8184 	test_set_guest(vmx_vmcs_shadow_test_guest);
8185 
8186 	bitmap[ACCESS_VMREAD] = alloc_page();
8187 	bitmap[ACCESS_VMWRITE] = alloc_page();
8188 
8189 	vmcs_write(VMREAD_BITMAP, virt_to_phys(bitmap[ACCESS_VMREAD]));
8190 	vmcs_write(VMWRITE_BITMAP, virt_to_phys(bitmap[ACCESS_VMWRITE]));
8191 
8192 	shadow = alloc_page();
8193 	shadow->hdr.revision_id = basic.revision;
8194 	shadow->hdr.shadow_vmcs = 1;
8195 	TEST_ASSERT(!vmcs_clear(shadow));
8196 
8197 	vmcs_clear_bits(CPU_EXEC_CTRL0, CPU_RDTSC);
8198 	vmcs_set_bits(CPU_EXEC_CTRL0, CPU_SECONDARY);
8199 	vmcs_set_bits(CPU_EXEC_CTRL1, CPU_SHADOW_VMCS);
8200 
8201 	vmcs_write(VMCS_LINK_PTR, virt_to_phys(shadow));
8202 	report_prefix_push("valid link pointer");
8203 	vmx_vmcs_shadow_test_body(bitmap);
8204 	report_prefix_pop();
8205 
8206 	vmcs_write(VMCS_LINK_PTR, -1ull);
8207 	report_prefix_push("invalid link pointer");
8208 	vmx_vmcs_shadow_test_body(bitmap);
8209 	report_prefix_pop();
8210 
8211 	l1_l2_common.op = ACCESS_NONE;
8212 	enter_guest();
8213 }
8214 
8215 
8216 
8217 static int invalid_msr_init(struct vmcs *vmcs)
8218 {
8219 	if (!(ctrl_pin_rev.clr & PIN_PREEMPT)) {
8220 		printf("\tPreemption timer is not supported\n");
8221 		return VMX_TEST_EXIT;
8222 	}
8223 	vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) | PIN_PREEMPT);
8224 	preempt_val = 10000000;
8225 	vmcs_write(PREEMPT_TIMER_VALUE, preempt_val);
8226 	preempt_scale = rdmsr(MSR_IA32_VMX_MISC) & 0x1F;
8227 
8228 	if (!(ctrl_exit_rev.clr & EXI_SAVE_PREEMPT))
8229 		printf("\tSave preemption value is not supported\n");
8230 
8231 	vmcs_write(ENT_MSR_LD_CNT, 1);
8232 	vmcs_write(ENTER_MSR_LD_ADDR, (u64)0x13370000);
8233 
8234 	return VMX_TEST_START;
8235 }
8236 
8237 
8238 static void invalid_msr_main(void)
8239 {
8240 	report("Invalid MSR load", 0);
8241 }
8242 
8243 static int invalid_msr_exit_handler(void)
8244 {
8245 	report("Invalid MSR load", 0);
8246 	print_vmexit_info();
8247 	return VMX_TEST_EXIT;
8248 }
8249 
8250 static int invalid_msr_entry_failure(struct vmentry_failure *failure)
8251 {
8252 	ulong reason;
8253 
8254 	reason = vmcs_read(EXI_REASON);
8255 	report("Invalid MSR load", reason == (0x80000000u | VMX_FAIL_MSR));
8256 	return VMX_TEST_VMEXIT;
8257 }
8258 
8259 
8260 #define TEST(name) { #name, .v2 = name }
8261 
8262 /* name/init/guest_main/exit_handler/syscall_handler/guest_regs */
8263 struct vmx_test vmx_tests[] = {
8264 	{ "null", NULL, basic_guest_main, basic_exit_handler, NULL, {0} },
8265 	{ "vmenter", NULL, vmenter_main, vmenter_exit_handler, NULL, {0} },
8266 	{ "preemption timer", preemption_timer_init, preemption_timer_main,
8267 		preemption_timer_exit_handler, NULL, {0} },
8268 	{ "control field PAT", test_ctrl_pat_init, test_ctrl_pat_main,
8269 		test_ctrl_pat_exit_handler, NULL, {0} },
8270 	{ "control field EFER", test_ctrl_efer_init, test_ctrl_efer_main,
8271 		test_ctrl_efer_exit_handler, NULL, {0} },
8272 	{ "CR shadowing", NULL, cr_shadowing_main,
8273 		cr_shadowing_exit_handler, NULL, {0} },
8274 	{ "I/O bitmap", iobmp_init, iobmp_main, iobmp_exit_handler,
8275 		NULL, {0} },
8276 	{ "instruction intercept", insn_intercept_init, insn_intercept_main,
8277 		insn_intercept_exit_handler, NULL, {0} },
8278 	{ "EPT A/D disabled", ept_init, ept_main, ept_exit_handler, NULL, {0} },
8279 	{ "EPT A/D enabled", eptad_init, eptad_main, eptad_exit_handler, NULL, {0} },
8280 	{ "PML", pml_init, pml_main, pml_exit_handler, NULL, {0} },
8281 	{ "VPID", vpid_init, vpid_main, vpid_exit_handler, NULL, {0} },
8282 	{ "interrupt", interrupt_init, interrupt_main,
8283 		interrupt_exit_handler, NULL, {0} },
8284 	{ "debug controls", dbgctls_init, dbgctls_main, dbgctls_exit_handler,
8285 		NULL, {0} },
8286 	{ "MSR switch", msr_switch_init, msr_switch_main,
8287 		msr_switch_exit_handler, NULL, {0}, msr_switch_entry_failure },
8288 	{ "vmmcall", vmmcall_init, vmmcall_main, vmmcall_exit_handler, NULL, {0} },
8289 	{ "disable RDTSCP", disable_rdtscp_init, disable_rdtscp_main,
8290 		disable_rdtscp_exit_handler, NULL, {0} },
8291 	{ "int3", int3_init, int3_guest_main, int3_exit_handler, NULL, {0} },
8292 	{ "into", into_init, into_guest_main, into_exit_handler, NULL, {0} },
8293 	{ "exit_monitor_from_l2_test", NULL, exit_monitor_from_l2_main,
8294 		exit_monitor_from_l2_handler, NULL, {0} },
8295 	{ "invalid_msr", invalid_msr_init, invalid_msr_main,
8296 		invalid_msr_exit_handler, NULL, {0}, invalid_msr_entry_failure},
8297 	/* Basic V2 tests. */
8298 	TEST(v2_null_test),
8299 	TEST(v2_multiple_entries_test),
8300 	TEST(fixture_test_case1),
8301 	TEST(fixture_test_case2),
8302 	/* Opcode tests. */
8303 	TEST(invvpid_test_v2),
8304 	/* VM-entry tests */
8305 	TEST(vmx_controls_test),
8306 	TEST(vmx_host_state_area_test),
8307 	TEST(vmx_guest_state_area_test),
8308 	TEST(vmentry_movss_shadow_test),
8309 	/* APICv tests */
8310 	TEST(vmx_eoi_bitmap_ioapic_scan_test),
8311 	TEST(vmx_hlt_with_rvi_test),
8312 	TEST(apic_reg_virt_test),
8313 	TEST(virt_x2apic_mode_test),
8314 	/* APIC pass-through tests */
8315 	TEST(vmx_apic_passthrough_test),
8316 	TEST(vmx_apic_passthrough_thread_test),
8317 	/* VMCS Shadowing tests */
8318 	TEST(vmx_vmcs_shadow_test),
8319 	/* Regression tests */
8320 	TEST(vmx_cr_load_test),
8321 	TEST(vmx_nm_test),
8322 	TEST(vmx_db_test),
8323 	TEST(vmx_nmi_window_test),
8324 	TEST(vmx_intr_window_test),
8325 	TEST(vmx_pending_event_test),
8326 	TEST(vmx_pending_event_hlt_test),
8327 	TEST(vmx_store_tsc_test),
8328 	/* EPT access tests. */
8329 	TEST(ept_access_test_not_present),
8330 	TEST(ept_access_test_read_only),
8331 	TEST(ept_access_test_write_only),
8332 	TEST(ept_access_test_read_write),
8333 	TEST(ept_access_test_execute_only),
8334 	TEST(ept_access_test_read_execute),
8335 	TEST(ept_access_test_write_execute),
8336 	TEST(ept_access_test_read_write_execute),
8337 	TEST(ept_access_test_reserved_bits),
8338 	TEST(ept_access_test_ignored_bits),
8339 	TEST(ept_access_test_paddr_not_present_ad_disabled),
8340 	TEST(ept_access_test_paddr_not_present_ad_enabled),
8341 	TEST(ept_access_test_paddr_read_only_ad_disabled),
8342 	TEST(ept_access_test_paddr_read_only_ad_enabled),
8343 	TEST(ept_access_test_paddr_read_write),
8344 	TEST(ept_access_test_paddr_read_write_execute),
8345 	TEST(ept_access_test_paddr_read_execute_ad_disabled),
8346 	TEST(ept_access_test_paddr_read_execute_ad_enabled),
8347 	TEST(ept_access_test_paddr_not_present_page_fault),
8348 	TEST(ept_access_test_force_2m_page),
8349 	{ NULL, NULL, NULL, NULL, NULL, {0} },
8350 };
8351