xref: /kvm-unit-tests/x86/vmx_tests.c (revision dc54cb48438e2b5f66f8bbefe688d56fcc1268ff)
1 /*
2  * All test cases of nested virtualization should be in this file
3  *
4  * Author : Arthur Chunqi Li <yzt356@gmail.com>
5  */
6 
7 #include <asm/debugreg.h>
8 
9 #include "vmx.h"
10 #include "msr.h"
11 #include "processor.h"
12 #include "vm.h"
13 #include "pci.h"
14 #include "fwcfg.h"
15 #include "isr.h"
16 #include "desc.h"
17 #include "apic.h"
18 #include "types.h"
19 #include "vmalloc.h"
20 #include "alloc_page.h"
21 #include "smp.h"
22 #include "delay.h"
23 
24 #define NONCANONICAL            0xaaaaaaaaaaaaaaaaull
25 
26 #define VPID_CAP_INVVPID_TYPES_SHIFT 40
27 
28 u64 ia32_pat;
29 u64 ia32_efer;
30 void *io_bitmap_a, *io_bitmap_b;
31 u16 ioport;
32 
33 unsigned long *pml4;
34 u64 eptp;
35 void *data_page1, *data_page2;
36 
37 phys_addr_t pci_physaddr;
38 
39 void *pml_log;
40 #define PML_INDEX 512
41 
42 static inline unsigned ffs(unsigned x)
43 {
44 	int pos = -1;
45 
46 	__asm__ __volatile__("bsf %1, %%eax; cmovnz %%eax, %0"
47 			     : "+r"(pos) : "rm"(x) : "eax");
48 	return pos + 1;
49 }
50 
51 static inline void vmcall(void)
52 {
53 	asm volatile("vmcall");
54 }
55 
56 static void basic_guest_main(void)
57 {
58 	report(1, "Basic VMX test");
59 }
60 
61 static int basic_exit_handler(void)
62 {
63 	report(0, "Basic VMX test");
64 	print_vmexit_info();
65 	return VMX_TEST_EXIT;
66 }
67 
68 static void vmenter_main(void)
69 {
70 	u64 rax;
71 	u64 rsp, resume_rsp;
72 
73 	report(1, "test vmlaunch");
74 
75 	asm volatile(
76 		"mov %%rsp, %0\n\t"
77 		"mov %3, %%rax\n\t"
78 		"vmcall\n\t"
79 		"mov %%rax, %1\n\t"
80 		"mov %%rsp, %2\n\t"
81 		: "=r"(rsp), "=r"(rax), "=r"(resume_rsp)
82 		: "g"(0xABCD));
83 	report((rax == 0xFFFF) && (rsp == resume_rsp), "test vmresume");
84 }
85 
86 static int vmenter_exit_handler(void)
87 {
88 	u64 guest_rip;
89 	ulong reason;
90 
91 	guest_rip = vmcs_read(GUEST_RIP);
92 	reason = vmcs_read(EXI_REASON) & 0xff;
93 	switch (reason) {
94 	case VMX_VMCALL:
95 		if (regs.rax != 0xABCD) {
96 			report(0, "test vmresume");
97 			return VMX_TEST_VMEXIT;
98 		}
99 		regs.rax = 0xFFFF;
100 		vmcs_write(GUEST_RIP, guest_rip + 3);
101 		return VMX_TEST_RESUME;
102 	default:
103 		report(0, "test vmresume");
104 		print_vmexit_info();
105 	}
106 	return VMX_TEST_VMEXIT;
107 }
108 
109 u32 preempt_scale;
110 volatile unsigned long long tsc_val;
111 volatile u32 preempt_val;
112 u64 saved_rip;
113 
114 static int preemption_timer_init(struct vmcs *vmcs)
115 {
116 	if (!(ctrl_pin_rev.clr & PIN_PREEMPT)) {
117 		printf("\tPreemption timer is not supported\n");
118 		return VMX_TEST_EXIT;
119 	}
120 	vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) | PIN_PREEMPT);
121 	preempt_val = 10000000;
122 	vmcs_write(PREEMPT_TIMER_VALUE, preempt_val);
123 	preempt_scale = rdmsr(MSR_IA32_VMX_MISC) & 0x1F;
124 
125 	if (!(ctrl_exit_rev.clr & EXI_SAVE_PREEMPT))
126 		printf("\tSave preemption value is not supported\n");
127 
128 	return VMX_TEST_START;
129 }
130 
131 static void preemption_timer_main(void)
132 {
133 	tsc_val = rdtsc();
134 	if (ctrl_exit_rev.clr & EXI_SAVE_PREEMPT) {
135 		vmx_set_test_stage(0);
136 		vmcall();
137 		if (vmx_get_test_stage() == 1)
138 			vmcall();
139 	}
140 	vmx_set_test_stage(1);
141 	while (vmx_get_test_stage() == 1) {
142 		if (((rdtsc() - tsc_val) >> preempt_scale)
143 				> 10 * preempt_val) {
144 			vmx_set_test_stage(2);
145 			vmcall();
146 		}
147 	}
148 	tsc_val = rdtsc();
149 	asm volatile ("hlt");
150 	vmcall();
151 	vmx_set_test_stage(5);
152 	vmcall();
153 }
154 
155 static int preemption_timer_exit_handler(void)
156 {
157 	bool guest_halted;
158 	u64 guest_rip;
159 	ulong reason;
160 	u32 insn_len;
161 	u32 ctrl_exit;
162 
163 	guest_rip = vmcs_read(GUEST_RIP);
164 	reason = vmcs_read(EXI_REASON) & 0xff;
165 	insn_len = vmcs_read(EXI_INST_LEN);
166 	switch (reason) {
167 	case VMX_PREEMPT:
168 		switch (vmx_get_test_stage()) {
169 		case 1:
170 		case 2:
171 			report(((rdtsc() - tsc_val) >> preempt_scale) >= preempt_val,
172 			       "busy-wait for preemption timer");
173 			vmx_set_test_stage(3);
174 			vmcs_write(PREEMPT_TIMER_VALUE, preempt_val);
175 			return VMX_TEST_RESUME;
176 		case 3:
177 			guest_halted =
178 				(vmcs_read(GUEST_ACTV_STATE) == ACTV_HLT);
179 			report(((rdtsc() - tsc_val) >> preempt_scale) >= preempt_val
180 			        && guest_halted,
181 			       "preemption timer during hlt");
182 			vmx_set_test_stage(4);
183 			vmcs_write(PIN_CONTROLS,
184 				   vmcs_read(PIN_CONTROLS) & ~PIN_PREEMPT);
185 			vmcs_write(EXI_CONTROLS,
186 				   vmcs_read(EXI_CONTROLS) & ~EXI_SAVE_PREEMPT);
187 			vmcs_write(GUEST_ACTV_STATE, ACTV_ACTIVE);
188 			return VMX_TEST_RESUME;
189 		case 4:
190 			report(saved_rip == guest_rip,
191 			       "preemption timer with 0 value");
192 			break;
193 		default:
194 			report(false, "Invalid stage.");
195 			print_vmexit_info();
196 			break;
197 		}
198 		break;
199 	case VMX_VMCALL:
200 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
201 		switch (vmx_get_test_stage()) {
202 		case 0:
203 			report(vmcs_read(PREEMPT_TIMER_VALUE) == preempt_val,
204 			       "Keep preemption value");
205 			vmx_set_test_stage(1);
206 			vmcs_write(PREEMPT_TIMER_VALUE, preempt_val);
207 			ctrl_exit = (vmcs_read(EXI_CONTROLS) |
208 				EXI_SAVE_PREEMPT) & ctrl_exit_rev.clr;
209 			vmcs_write(EXI_CONTROLS, ctrl_exit);
210 			return VMX_TEST_RESUME;
211 		case 1:
212 			report(vmcs_read(PREEMPT_TIMER_VALUE) < preempt_val,
213 			       "Save preemption value");
214 			return VMX_TEST_RESUME;
215 		case 2:
216 			report(0, "busy-wait for preemption timer");
217 			vmx_set_test_stage(3);
218 			vmcs_write(PREEMPT_TIMER_VALUE, preempt_val);
219 			return VMX_TEST_RESUME;
220 		case 3:
221 			report(0, "preemption timer during hlt");
222 			vmx_set_test_stage(4);
223 			/* fall through */
224 		case 4:
225 			vmcs_write(PIN_CONTROLS,
226 				   vmcs_read(PIN_CONTROLS) | PIN_PREEMPT);
227 			vmcs_write(PREEMPT_TIMER_VALUE, 0);
228 			saved_rip = guest_rip + insn_len;
229 			return VMX_TEST_RESUME;
230 		case 5:
231 			report(0,
232 			       "preemption timer with 0 value (vmcall stage 5)");
233 			break;
234 		default:
235 			// Should not reach here
236 			report(false, "unexpected stage, %d",
237 			       vmx_get_test_stage());
238 			print_vmexit_info();
239 			return VMX_TEST_VMEXIT;
240 		}
241 		break;
242 	default:
243 		report(false, "Unknown exit reason, %ld", reason);
244 		print_vmexit_info();
245 	}
246 	vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) & ~PIN_PREEMPT);
247 	return VMX_TEST_VMEXIT;
248 }
249 
250 static void msr_bmp_init(void)
251 {
252 	void *msr_bitmap;
253 	u32 ctrl_cpu0;
254 
255 	msr_bitmap = alloc_page();
256 	ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
257 	ctrl_cpu0 |= CPU_MSR_BITMAP;
258 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
259 	vmcs_write(MSR_BITMAP, (u64)msr_bitmap);
260 }
261 
262 static void *get_msr_bitmap(void)
263 {
264 	void *msr_bitmap;
265 
266 	if (vmcs_read(CPU_EXEC_CTRL0) & CPU_MSR_BITMAP) {
267 		msr_bitmap = (void *)vmcs_read(MSR_BITMAP);
268 	} else {
269 		msr_bitmap = alloc_page();
270 		memset(msr_bitmap, 0xff, PAGE_SIZE);
271 		vmcs_write(MSR_BITMAP, (u64)msr_bitmap);
272 		vmcs_set_bits(CPU_EXEC_CTRL0, CPU_MSR_BITMAP);
273 	}
274 
275 	return msr_bitmap;
276 }
277 
278 static void disable_intercept_for_x2apic_msrs(void)
279 {
280 	unsigned long *msr_bitmap = (unsigned long *)get_msr_bitmap();
281 	u32 msr;
282 
283 	for (msr = APIC_BASE_MSR;
284 		 msr < (APIC_BASE_MSR+0xff);
285 		 msr += BITS_PER_LONG) {
286 		unsigned int word = msr / BITS_PER_LONG;
287 
288 		msr_bitmap[word] = 0;
289 		msr_bitmap[word + (0x800 / sizeof(long))] = 0;
290 	}
291 }
292 
293 static int test_ctrl_pat_init(struct vmcs *vmcs)
294 {
295 	u64 ctrl_ent;
296 	u64 ctrl_exi;
297 
298 	msr_bmp_init();
299 	if (!(ctrl_exit_rev.clr & EXI_SAVE_PAT) &&
300 	    !(ctrl_exit_rev.clr & EXI_LOAD_PAT) &&
301 	    !(ctrl_enter_rev.clr & ENT_LOAD_PAT)) {
302 		printf("\tSave/load PAT is not supported\n");
303 		return 1;
304 	}
305 
306 	ctrl_ent = vmcs_read(ENT_CONTROLS);
307 	ctrl_exi = vmcs_read(EXI_CONTROLS);
308 	ctrl_ent |= ctrl_enter_rev.clr & ENT_LOAD_PAT;
309 	ctrl_exi |= ctrl_exit_rev.clr & (EXI_SAVE_PAT | EXI_LOAD_PAT);
310 	vmcs_write(ENT_CONTROLS, ctrl_ent);
311 	vmcs_write(EXI_CONTROLS, ctrl_exi);
312 	ia32_pat = rdmsr(MSR_IA32_CR_PAT);
313 	vmcs_write(GUEST_PAT, 0x0);
314 	vmcs_write(HOST_PAT, ia32_pat);
315 	return VMX_TEST_START;
316 }
317 
318 static void test_ctrl_pat_main(void)
319 {
320 	u64 guest_ia32_pat;
321 
322 	guest_ia32_pat = rdmsr(MSR_IA32_CR_PAT);
323 	if (!(ctrl_enter_rev.clr & ENT_LOAD_PAT))
324 		printf("\tENT_LOAD_PAT is not supported.\n");
325 	else {
326 		if (guest_ia32_pat != 0) {
327 			report(0, "Entry load PAT");
328 			return;
329 		}
330 	}
331 	wrmsr(MSR_IA32_CR_PAT, 0x6);
332 	vmcall();
333 	guest_ia32_pat = rdmsr(MSR_IA32_CR_PAT);
334 	if (ctrl_enter_rev.clr & ENT_LOAD_PAT)
335 		report(guest_ia32_pat == ia32_pat, "Entry load PAT");
336 }
337 
338 static int test_ctrl_pat_exit_handler(void)
339 {
340 	u64 guest_rip;
341 	ulong reason;
342 	u64 guest_pat;
343 
344 	guest_rip = vmcs_read(GUEST_RIP);
345 	reason = vmcs_read(EXI_REASON) & 0xff;
346 	switch (reason) {
347 	case VMX_VMCALL:
348 		guest_pat = vmcs_read(GUEST_PAT);
349 		if (!(ctrl_exit_rev.clr & EXI_SAVE_PAT)) {
350 			printf("\tEXI_SAVE_PAT is not supported\n");
351 			vmcs_write(GUEST_PAT, 0x6);
352 		} else {
353 			report(guest_pat == 0x6, "Exit save PAT");
354 		}
355 		if (!(ctrl_exit_rev.clr & EXI_LOAD_PAT))
356 			printf("\tEXI_LOAD_PAT is not supported\n");
357 		else
358 			report(rdmsr(MSR_IA32_CR_PAT) == ia32_pat,
359 			       "Exit load PAT");
360 		vmcs_write(GUEST_PAT, ia32_pat);
361 		vmcs_write(GUEST_RIP, guest_rip + 3);
362 		return VMX_TEST_RESUME;
363 	default:
364 		printf("ERROR : Undefined exit reason, reason = %ld.\n", reason);
365 		break;
366 	}
367 	return VMX_TEST_VMEXIT;
368 }
369 
370 static int test_ctrl_efer_init(struct vmcs *vmcs)
371 {
372 	u64 ctrl_ent;
373 	u64 ctrl_exi;
374 
375 	msr_bmp_init();
376 	ctrl_ent = vmcs_read(ENT_CONTROLS) | ENT_LOAD_EFER;
377 	ctrl_exi = vmcs_read(EXI_CONTROLS) | EXI_SAVE_EFER | EXI_LOAD_EFER;
378 	vmcs_write(ENT_CONTROLS, ctrl_ent & ctrl_enter_rev.clr);
379 	vmcs_write(EXI_CONTROLS, ctrl_exi & ctrl_exit_rev.clr);
380 	ia32_efer = rdmsr(MSR_EFER);
381 	vmcs_write(GUEST_EFER, ia32_efer ^ EFER_NX);
382 	vmcs_write(HOST_EFER, ia32_efer ^ EFER_NX);
383 	return VMX_TEST_START;
384 }
385 
386 static void test_ctrl_efer_main(void)
387 {
388 	u64 guest_ia32_efer;
389 
390 	guest_ia32_efer = rdmsr(MSR_EFER);
391 	if (!(ctrl_enter_rev.clr & ENT_LOAD_EFER))
392 		printf("\tENT_LOAD_EFER is not supported.\n");
393 	else {
394 		if (guest_ia32_efer != (ia32_efer ^ EFER_NX)) {
395 			report(0, "Entry load EFER");
396 			return;
397 		}
398 	}
399 	wrmsr(MSR_EFER, ia32_efer);
400 	vmcall();
401 	guest_ia32_efer = rdmsr(MSR_EFER);
402 	if (ctrl_enter_rev.clr & ENT_LOAD_EFER)
403 		report(guest_ia32_efer == ia32_efer, "Entry load EFER");
404 }
405 
406 static int test_ctrl_efer_exit_handler(void)
407 {
408 	u64 guest_rip;
409 	ulong reason;
410 	u64 guest_efer;
411 
412 	guest_rip = vmcs_read(GUEST_RIP);
413 	reason = vmcs_read(EXI_REASON) & 0xff;
414 	switch (reason) {
415 	case VMX_VMCALL:
416 		guest_efer = vmcs_read(GUEST_EFER);
417 		if (!(ctrl_exit_rev.clr & EXI_SAVE_EFER)) {
418 			printf("\tEXI_SAVE_EFER is not supported\n");
419 			vmcs_write(GUEST_EFER, ia32_efer);
420 		} else {
421 			report(guest_efer == ia32_efer, "Exit save EFER");
422 		}
423 		if (!(ctrl_exit_rev.clr & EXI_LOAD_EFER)) {
424 			printf("\tEXI_LOAD_EFER is not supported\n");
425 			wrmsr(MSR_EFER, ia32_efer ^ EFER_NX);
426 		} else {
427 			report(rdmsr(MSR_EFER) == (ia32_efer ^ EFER_NX),
428 			       "Exit load EFER");
429 		}
430 		vmcs_write(GUEST_PAT, ia32_efer);
431 		vmcs_write(GUEST_RIP, guest_rip + 3);
432 		return VMX_TEST_RESUME;
433 	default:
434 		printf("ERROR : Undefined exit reason, reason = %ld.\n", reason);
435 		break;
436 	}
437 	return VMX_TEST_VMEXIT;
438 }
439 
440 u32 guest_cr0, guest_cr4;
441 
442 static void cr_shadowing_main(void)
443 {
444 	u32 cr0, cr4, tmp;
445 
446 	// Test read through
447 	vmx_set_test_stage(0);
448 	guest_cr0 = read_cr0();
449 	if (vmx_get_test_stage() == 1)
450 		report(0, "Read through CR0");
451 	else
452 		vmcall();
453 	vmx_set_test_stage(1);
454 	guest_cr4 = read_cr4();
455 	if (vmx_get_test_stage() == 2)
456 		report(0, "Read through CR4");
457 	else
458 		vmcall();
459 	// Test write through
460 	guest_cr0 = guest_cr0 ^ (X86_CR0_TS | X86_CR0_MP);
461 	guest_cr4 = guest_cr4 ^ (X86_CR4_TSD | X86_CR4_DE);
462 	vmx_set_test_stage(2);
463 	write_cr0(guest_cr0);
464 	if (vmx_get_test_stage() == 3)
465 		report(0, "Write throuth CR0");
466 	else
467 		vmcall();
468 	vmx_set_test_stage(3);
469 	write_cr4(guest_cr4);
470 	if (vmx_get_test_stage() == 4)
471 		report(0, "Write through CR4");
472 	else
473 		vmcall();
474 	// Test read shadow
475 	vmx_set_test_stage(4);
476 	vmcall();
477 	cr0 = read_cr0();
478 	if (vmx_get_test_stage() != 5)
479 		report(cr0 == guest_cr0, "Read shadowing CR0");
480 	vmx_set_test_stage(5);
481 	cr4 = read_cr4();
482 	if (vmx_get_test_stage() != 6)
483 		report(cr4 == guest_cr4, "Read shadowing CR4");
484 	// Test write shadow (same value with shadow)
485 	vmx_set_test_stage(6);
486 	write_cr0(guest_cr0);
487 	if (vmx_get_test_stage() == 7)
488 		report(0, "Write shadowing CR0 (same value with shadow)");
489 	else
490 		vmcall();
491 	vmx_set_test_stage(7);
492 	write_cr4(guest_cr4);
493 	if (vmx_get_test_stage() == 8)
494 		report(0, "Write shadowing CR4 (same value with shadow)");
495 	else
496 		vmcall();
497 	// Test write shadow (different value)
498 	vmx_set_test_stage(8);
499 	tmp = guest_cr0 ^ X86_CR0_TS;
500 	asm volatile("mov %0, %%rsi\n\t"
501 		"mov %%rsi, %%cr0\n\t"
502 		::"m"(tmp)
503 		:"rsi", "memory", "cc");
504 	report(vmx_get_test_stage() == 9,
505 	       "Write shadowing different X86_CR0_TS");
506 	vmx_set_test_stage(9);
507 	tmp = guest_cr0 ^ X86_CR0_MP;
508 	asm volatile("mov %0, %%rsi\n\t"
509 		"mov %%rsi, %%cr0\n\t"
510 		::"m"(tmp)
511 		:"rsi", "memory", "cc");
512 	report(vmx_get_test_stage() == 10,
513 	       "Write shadowing different X86_CR0_MP");
514 	vmx_set_test_stage(10);
515 	tmp = guest_cr4 ^ X86_CR4_TSD;
516 	asm volatile("mov %0, %%rsi\n\t"
517 		"mov %%rsi, %%cr4\n\t"
518 		::"m"(tmp)
519 		:"rsi", "memory", "cc");
520 	report(vmx_get_test_stage() == 11,
521 	       "Write shadowing different X86_CR4_TSD");
522 	vmx_set_test_stage(11);
523 	tmp = guest_cr4 ^ X86_CR4_DE;
524 	asm volatile("mov %0, %%rsi\n\t"
525 		"mov %%rsi, %%cr4\n\t"
526 		::"m"(tmp)
527 		:"rsi", "memory", "cc");
528 	report(vmx_get_test_stage() == 12,
529 	       "Write shadowing different X86_CR4_DE");
530 }
531 
532 static int cr_shadowing_exit_handler(void)
533 {
534 	u64 guest_rip;
535 	ulong reason;
536 	u32 insn_len;
537 	u32 exit_qual;
538 
539 	guest_rip = vmcs_read(GUEST_RIP);
540 	reason = vmcs_read(EXI_REASON) & 0xff;
541 	insn_len = vmcs_read(EXI_INST_LEN);
542 	exit_qual = vmcs_read(EXI_QUALIFICATION);
543 	switch (reason) {
544 	case VMX_VMCALL:
545 		switch (vmx_get_test_stage()) {
546 		case 0:
547 			report(guest_cr0 == vmcs_read(GUEST_CR0),
548 			       "Read through CR0");
549 			break;
550 		case 1:
551 			report(guest_cr4 == vmcs_read(GUEST_CR4),
552 			       "Read through CR4");
553 			break;
554 		case 2:
555 			report(guest_cr0 == vmcs_read(GUEST_CR0),
556 			       "Write through CR0");
557 			break;
558 		case 3:
559 			report(guest_cr4 == vmcs_read(GUEST_CR4),
560 			       "Write through CR4");
561 			break;
562 		case 4:
563 			guest_cr0 = vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP);
564 			guest_cr4 = vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE);
565 			vmcs_write(CR0_MASK, X86_CR0_TS | X86_CR0_MP);
566 			vmcs_write(CR0_READ_SHADOW, guest_cr0 & (X86_CR0_TS | X86_CR0_MP));
567 			vmcs_write(CR4_MASK, X86_CR4_TSD | X86_CR4_DE);
568 			vmcs_write(CR4_READ_SHADOW, guest_cr4 & (X86_CR4_TSD | X86_CR4_DE));
569 			break;
570 		case 6:
571 			report(guest_cr0 == (vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP)),
572 			       "Write shadowing CR0 (same value)");
573 			break;
574 		case 7:
575 			report(guest_cr4 == (vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE)),
576 			       "Write shadowing CR4 (same value)");
577 			break;
578 		default:
579 			// Should not reach here
580 			report(false, "unexpected stage, %d",
581 			       vmx_get_test_stage());
582 			print_vmexit_info();
583 			return VMX_TEST_VMEXIT;
584 		}
585 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
586 		return VMX_TEST_RESUME;
587 	case VMX_CR:
588 		switch (vmx_get_test_stage()) {
589 		case 4:
590 			report(0, "Read shadowing CR0");
591 			vmx_inc_test_stage();
592 			break;
593 		case 5:
594 			report(0, "Read shadowing CR4");
595 			vmx_inc_test_stage();
596 			break;
597 		case 6:
598 			report(0, "Write shadowing CR0 (same value)");
599 			vmx_inc_test_stage();
600 			break;
601 		case 7:
602 			report(0, "Write shadowing CR4 (same value)");
603 			vmx_inc_test_stage();
604 			break;
605 		case 8:
606 		case 9:
607 			// 0x600 encodes "mov %esi, %cr0"
608 			if (exit_qual == 0x600)
609 				vmx_inc_test_stage();
610 			break;
611 		case 10:
612 		case 11:
613 			// 0x604 encodes "mov %esi, %cr4"
614 			if (exit_qual == 0x604)
615 				vmx_inc_test_stage();
616 			break;
617 		default:
618 			// Should not reach here
619 			report(false, "unexpected stage, %d",
620 			       vmx_get_test_stage());
621 			print_vmexit_info();
622 			return VMX_TEST_VMEXIT;
623 		}
624 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
625 		return VMX_TEST_RESUME;
626 	default:
627 		report(false, "Unknown exit reason, %ld", reason);
628 		print_vmexit_info();
629 	}
630 	return VMX_TEST_VMEXIT;
631 }
632 
633 static int iobmp_init(struct vmcs *vmcs)
634 {
635 	u32 ctrl_cpu0;
636 
637 	io_bitmap_a = alloc_page();
638 	io_bitmap_b = alloc_page();
639 	ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
640 	ctrl_cpu0 |= CPU_IO_BITMAP;
641 	ctrl_cpu0 &= (~CPU_IO);
642 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
643 	vmcs_write(IO_BITMAP_A, (u64)io_bitmap_a);
644 	vmcs_write(IO_BITMAP_B, (u64)io_bitmap_b);
645 	return VMX_TEST_START;
646 }
647 
648 static void iobmp_main(void)
649 {
650 	// stage 0, test IO pass
651 	vmx_set_test_stage(0);
652 	inb(0x5000);
653 	outb(0x0, 0x5000);
654 	report(vmx_get_test_stage() == 0, "I/O bitmap - I/O pass");
655 	// test IO width, in/out
656 	((u8 *)io_bitmap_a)[0] = 0xFF;
657 	vmx_set_test_stage(2);
658 	inb(0x0);
659 	report(vmx_get_test_stage() == 3, "I/O bitmap - trap in");
660 	vmx_set_test_stage(3);
661 	outw(0x0, 0x0);
662 	report(vmx_get_test_stage() == 4, "I/O bitmap - trap out");
663 	vmx_set_test_stage(4);
664 	inl(0x0);
665 	report(vmx_get_test_stage() == 5, "I/O bitmap - I/O width, long");
666 	// test low/high IO port
667 	vmx_set_test_stage(5);
668 	((u8 *)io_bitmap_a)[0x5000 / 8] = (1 << (0x5000 % 8));
669 	inb(0x5000);
670 	report(vmx_get_test_stage() == 6, "I/O bitmap - I/O port, low part");
671 	vmx_set_test_stage(6);
672 	((u8 *)io_bitmap_b)[0x1000 / 8] = (1 << (0x1000 % 8));
673 	inb(0x9000);
674 	report(vmx_get_test_stage() == 7, "I/O bitmap - I/O port, high part");
675 	// test partial pass
676 	vmx_set_test_stage(7);
677 	inl(0x4FFF);
678 	report(vmx_get_test_stage() == 8, "I/O bitmap - partial pass");
679 	// test overrun
680 	vmx_set_test_stage(8);
681 	memset(io_bitmap_a, 0x0, PAGE_SIZE);
682 	memset(io_bitmap_b, 0x0, PAGE_SIZE);
683 	inl(0xFFFF);
684 	report(vmx_get_test_stage() == 9, "I/O bitmap - overrun");
685 	vmx_set_test_stage(9);
686 	vmcall();
687 	outb(0x0, 0x0);
688 	report(vmx_get_test_stage() == 9,
689 	       "I/O bitmap - ignore unconditional exiting");
690 	vmx_set_test_stage(10);
691 	vmcall();
692 	outb(0x0, 0x0);
693 	report(vmx_get_test_stage() == 11,
694 	       "I/O bitmap - unconditional exiting");
695 }
696 
697 static int iobmp_exit_handler(void)
698 {
699 	u64 guest_rip;
700 	ulong reason, exit_qual;
701 	u32 insn_len, ctrl_cpu0;
702 
703 	guest_rip = vmcs_read(GUEST_RIP);
704 	reason = vmcs_read(EXI_REASON) & 0xff;
705 	exit_qual = vmcs_read(EXI_QUALIFICATION);
706 	insn_len = vmcs_read(EXI_INST_LEN);
707 	switch (reason) {
708 	case VMX_IO:
709 		switch (vmx_get_test_stage()) {
710 		case 0:
711 		case 1:
712 			vmx_inc_test_stage();
713 			break;
714 		case 2:
715 			report((exit_qual & VMX_IO_SIZE_MASK) == _VMX_IO_BYTE,
716 			       "I/O bitmap - I/O width, byte");
717 			report(exit_qual & VMX_IO_IN,
718 			       "I/O bitmap - I/O direction, in");
719 			vmx_inc_test_stage();
720 			break;
721 		case 3:
722 			report((exit_qual & VMX_IO_SIZE_MASK) == _VMX_IO_WORD,
723 			       "I/O bitmap - I/O width, word");
724 			report(!(exit_qual & VMX_IO_IN),
725 			       "I/O bitmap - I/O direction, out");
726 			vmx_inc_test_stage();
727 			break;
728 		case 4:
729 			report((exit_qual & VMX_IO_SIZE_MASK) == _VMX_IO_LONG,
730 			       "I/O bitmap - I/O width, long");
731 			vmx_inc_test_stage();
732 			break;
733 		case 5:
734 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x5000)
735 				vmx_inc_test_stage();
736 			break;
737 		case 6:
738 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x9000)
739 				vmx_inc_test_stage();
740 			break;
741 		case 7:
742 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x4FFF)
743 				vmx_inc_test_stage();
744 			break;
745 		case 8:
746 			if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0xFFFF)
747 				vmx_inc_test_stage();
748 			break;
749 		case 9:
750 		case 10:
751 			ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
752 			vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0 & ~CPU_IO);
753 			vmx_inc_test_stage();
754 			break;
755 		default:
756 			// Should not reach here
757 			report(false, "unexpected stage, %d",
758 			       vmx_get_test_stage());
759 			print_vmexit_info();
760 			return VMX_TEST_VMEXIT;
761 		}
762 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
763 		return VMX_TEST_RESUME;
764 	case VMX_VMCALL:
765 		switch (vmx_get_test_stage()) {
766 		case 9:
767 			ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
768 			ctrl_cpu0 |= CPU_IO | CPU_IO_BITMAP;
769 			vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
770 			break;
771 		case 10:
772 			ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0);
773 			ctrl_cpu0 = (ctrl_cpu0 & ~CPU_IO_BITMAP) | CPU_IO;
774 			vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0);
775 			break;
776 		default:
777 			// Should not reach here
778 			report(false, "unexpected stage, %d",
779 			       vmx_get_test_stage());
780 			print_vmexit_info();
781 			return VMX_TEST_VMEXIT;
782 		}
783 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
784 		return VMX_TEST_RESUME;
785 	default:
786 		printf("guest_rip = %#lx\n", guest_rip);
787 		printf("\tERROR : Undefined exit reason, reason = %ld.\n", reason);
788 		break;
789 	}
790 	return VMX_TEST_VMEXIT;
791 }
792 
793 #define INSN_CPU0		0
794 #define INSN_CPU1		1
795 #define INSN_ALWAYS_TRAP	2
796 
797 #define FIELD_EXIT_QUAL		(1 << 0)
798 #define FIELD_INSN_INFO		(1 << 1)
799 
800 asm(
801 	"insn_hlt: hlt;ret\n\t"
802 	"insn_invlpg: invlpg 0x12345678;ret\n\t"
803 	"insn_mwait: xor %eax, %eax; xor %ecx, %ecx; mwait;ret\n\t"
804 	"insn_rdpmc: xor %ecx, %ecx; rdpmc;ret\n\t"
805 	"insn_rdtsc: rdtsc;ret\n\t"
806 	"insn_cr3_load: mov cr3,%rax; mov %rax,%cr3;ret\n\t"
807 	"insn_cr3_store: mov %cr3,%rax;ret\n\t"
808 #ifdef __x86_64__
809 	"insn_cr8_load: xor %eax, %eax; mov %rax,%cr8;ret\n\t"
810 	"insn_cr8_store: mov %cr8,%rax;ret\n\t"
811 #endif
812 	"insn_monitor: xor %eax, %eax; xor %ecx, %ecx; xor %edx, %edx; monitor;ret\n\t"
813 	"insn_pause: pause;ret\n\t"
814 	"insn_wbinvd: wbinvd;ret\n\t"
815 	"insn_cpuid: mov $10, %eax; cpuid;ret\n\t"
816 	"insn_invd: invd;ret\n\t"
817 	"insn_sgdt: sgdt gdt64_desc;ret\n\t"
818 	"insn_lgdt: lgdt gdt64_desc;ret\n\t"
819 	"insn_sidt: sidt idt_descr;ret\n\t"
820 	"insn_lidt: lidt idt_descr;ret\n\t"
821 	"insn_sldt: sldt %ax;ret\n\t"
822 	"insn_lldt: xor %eax, %eax; lldt %ax;ret\n\t"
823 	"insn_str: str %ax;ret\n\t"
824 	"insn_rdrand: rdrand %rax;ret\n\t"
825 	"insn_rdseed: rdseed %rax;ret\n\t"
826 );
827 extern void insn_hlt(void);
828 extern void insn_invlpg(void);
829 extern void insn_mwait(void);
830 extern void insn_rdpmc(void);
831 extern void insn_rdtsc(void);
832 extern void insn_cr3_load(void);
833 extern void insn_cr3_store(void);
834 #ifdef __x86_64__
835 extern void insn_cr8_load(void);
836 extern void insn_cr8_store(void);
837 #endif
838 extern void insn_monitor(void);
839 extern void insn_pause(void);
840 extern void insn_wbinvd(void);
841 extern void insn_sgdt(void);
842 extern void insn_lgdt(void);
843 extern void insn_sidt(void);
844 extern void insn_lidt(void);
845 extern void insn_sldt(void);
846 extern void insn_lldt(void);
847 extern void insn_str(void);
848 extern void insn_cpuid(void);
849 extern void insn_invd(void);
850 extern void insn_rdrand(void);
851 extern void insn_rdseed(void);
852 
853 u32 cur_insn;
854 u64 cr3;
855 
856 #define X86_FEATURE_MONITOR	(1 << 3)
857 
858 typedef bool (*supported_fn)(void);
859 
860 static bool monitor_supported(void)
861 {
862 	return this_cpu_has(X86_FEATURE_MWAIT);
863 }
864 
865 struct insn_table {
866 	const char *name;
867 	u32 flag;
868 	void (*insn_func)(void);
869 	u32 type;
870 	u32 reason;
871 	ulong exit_qual;
872 	u32 insn_info;
873 	// Use FIELD_EXIT_QUAL and FIELD_INSN_INFO to define
874 	// which field need to be tested, reason is always tested
875 	u32 test_field;
876 	const supported_fn supported_fn;
877 	u8 disabled;
878 };
879 
880 /*
881  * Add more test cases of instruction intercept here. Elements in this
882  * table is:
883  *	name/control flag/insn function/type/exit reason/exit qulification/
884  *	instruction info/field to test
885  * The last field defines which fields (exit_qual and insn_info) need to be
886  * tested in exit handler. If set to 0, only "reason" is checked.
887  */
888 static struct insn_table insn_table[] = {
889 	// Flags for Primary Processor-Based VM-Execution Controls
890 	{"HLT",  CPU_HLT, insn_hlt, INSN_CPU0, 12, 0, 0, 0},
891 	{"INVLPG", CPU_INVLPG, insn_invlpg, INSN_CPU0, 14,
892 		0x12345678, 0, FIELD_EXIT_QUAL},
893 	{"MWAIT", CPU_MWAIT, insn_mwait, INSN_CPU0, 36, 0, 0, 0, &monitor_supported},
894 	{"RDPMC", CPU_RDPMC, insn_rdpmc, INSN_CPU0, 15, 0, 0, 0},
895 	{"RDTSC", CPU_RDTSC, insn_rdtsc, INSN_CPU0, 16, 0, 0, 0},
896 	{"CR3 load", CPU_CR3_LOAD, insn_cr3_load, INSN_CPU0, 28, 0x3, 0,
897 		FIELD_EXIT_QUAL},
898 	{"CR3 store", CPU_CR3_STORE, insn_cr3_store, INSN_CPU0, 28, 0x13, 0,
899 		FIELD_EXIT_QUAL},
900 #ifdef __x86_64__
901 	{"CR8 load", CPU_CR8_LOAD, insn_cr8_load, INSN_CPU0, 28, 0x8, 0,
902 		FIELD_EXIT_QUAL},
903 	{"CR8 store", CPU_CR8_STORE, insn_cr8_store, INSN_CPU0, 28, 0x18, 0,
904 		FIELD_EXIT_QUAL},
905 #endif
906 	{"MONITOR", CPU_MONITOR, insn_monitor, INSN_CPU0, 39, 0, 0, 0, &monitor_supported},
907 	{"PAUSE", CPU_PAUSE, insn_pause, INSN_CPU0, 40, 0, 0, 0},
908 	// Flags for Secondary Processor-Based VM-Execution Controls
909 	{"WBINVD", CPU_WBINVD, insn_wbinvd, INSN_CPU1, 54, 0, 0, 0},
910 	{"DESC_TABLE (SGDT)", CPU_DESC_TABLE, insn_sgdt, INSN_CPU1, 46, 0, 0, 0},
911 	{"DESC_TABLE (LGDT)", CPU_DESC_TABLE, insn_lgdt, INSN_CPU1, 46, 0, 0, 0},
912 	{"DESC_TABLE (SIDT)", CPU_DESC_TABLE, insn_sidt, INSN_CPU1, 46, 0, 0, 0},
913 	{"DESC_TABLE (LIDT)", CPU_DESC_TABLE, insn_lidt, INSN_CPU1, 46, 0, 0, 0},
914 	{"DESC_TABLE (SLDT)", CPU_DESC_TABLE, insn_sldt, INSN_CPU1, 47, 0, 0, 0},
915 	{"DESC_TABLE (LLDT)", CPU_DESC_TABLE, insn_lldt, INSN_CPU1, 47, 0, 0, 0},
916 	{"DESC_TABLE (STR)", CPU_DESC_TABLE, insn_str, INSN_CPU1, 47, 0, 0, 0},
917 	/* LTR causes a #GP if done with a busy selector, so it is not tested.  */
918 	{"RDRAND", CPU_RDRAND, insn_rdrand, INSN_CPU1, VMX_RDRAND, 0, 0, 0},
919 	{"RDSEED", CPU_RDSEED, insn_rdseed, INSN_CPU1, VMX_RDSEED, 0, 0, 0},
920 	// Instructions always trap
921 	{"CPUID", 0, insn_cpuid, INSN_ALWAYS_TRAP, 10, 0, 0, 0},
922 	{"INVD", 0, insn_invd, INSN_ALWAYS_TRAP, 13, 0, 0, 0},
923 	// Instructions never trap
924 	{NULL},
925 };
926 
927 static int insn_intercept_init(struct vmcs *vmcs)
928 {
929 	u32 ctrl_cpu, cur_insn;
930 
931 	ctrl_cpu = ctrl_cpu_rev[0].set | CPU_SECONDARY;
932 	ctrl_cpu &= ctrl_cpu_rev[0].clr;
933 	vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu);
934 	vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu_rev[1].set);
935 	cr3 = read_cr3();
936 
937 	for (cur_insn = 0; insn_table[cur_insn].name != NULL; cur_insn++) {
938 		if (insn_table[cur_insn].supported_fn == NULL)
939 			continue;
940 		insn_table[cur_insn].disabled = !insn_table[cur_insn].supported_fn();
941 	}
942 	return VMX_TEST_START;
943 }
944 
945 static void insn_intercept_main(void)
946 {
947 	for (cur_insn = 0; insn_table[cur_insn].name != NULL; cur_insn++) {
948 		vmx_set_test_stage(cur_insn * 2);
949 		if ((insn_table[cur_insn].type == INSN_CPU0 &&
950 		     !(ctrl_cpu_rev[0].clr & insn_table[cur_insn].flag)) ||
951 		    (insn_table[cur_insn].type == INSN_CPU1 &&
952 		     !(ctrl_cpu_rev[1].clr & insn_table[cur_insn].flag))) {
953 			printf("\tCPU_CTRL%d.CPU_%s is not supported.\n",
954 			       insn_table[cur_insn].type - INSN_CPU0,
955 			       insn_table[cur_insn].name);
956 			continue;
957 		}
958 
959 		if (insn_table[cur_insn].disabled) {
960 			printf("\tFeature required for %s is not supported.\n",
961 			       insn_table[cur_insn].name);
962 			continue;
963 		}
964 
965 		if ((insn_table[cur_insn].type == INSN_CPU0 &&
966 		     !(ctrl_cpu_rev[0].set & insn_table[cur_insn].flag)) ||
967 		    (insn_table[cur_insn].type == INSN_CPU1 &&
968 		     !(ctrl_cpu_rev[1].set & insn_table[cur_insn].flag))) {
969 			/* skip hlt, it stalls the guest and is tested below */
970 			if (insn_table[cur_insn].insn_func != insn_hlt)
971 				insn_table[cur_insn].insn_func();
972 			report(vmx_get_test_stage() == cur_insn * 2,
973 					"execute %s",
974 					insn_table[cur_insn].name);
975 		} else if (insn_table[cur_insn].type != INSN_ALWAYS_TRAP)
976 			printf("\tCPU_CTRL%d.CPU_%s always traps.\n",
977 			       insn_table[cur_insn].type - INSN_CPU0,
978 			       insn_table[cur_insn].name);
979 
980 		vmcall();
981 
982 		insn_table[cur_insn].insn_func();
983 		report(vmx_get_test_stage() == cur_insn * 2 + 1,
984 				"intercept %s",
985 				insn_table[cur_insn].name);
986 
987 		vmx_set_test_stage(cur_insn * 2 + 1);
988 		vmcall();
989 	}
990 }
991 
992 static int insn_intercept_exit_handler(void)
993 {
994 	u64 guest_rip;
995 	u32 reason;
996 	ulong exit_qual;
997 	u32 insn_len;
998 	u32 insn_info;
999 	bool pass;
1000 
1001 	guest_rip = vmcs_read(GUEST_RIP);
1002 	reason = vmcs_read(EXI_REASON) & 0xff;
1003 	exit_qual = vmcs_read(EXI_QUALIFICATION);
1004 	insn_len = vmcs_read(EXI_INST_LEN);
1005 	insn_info = vmcs_read(EXI_INST_INFO);
1006 
1007 	if (reason == VMX_VMCALL) {
1008 		u32 val = 0;
1009 
1010 		if (insn_table[cur_insn].type == INSN_CPU0)
1011 			val = vmcs_read(CPU_EXEC_CTRL0);
1012 		else if (insn_table[cur_insn].type == INSN_CPU1)
1013 			val = vmcs_read(CPU_EXEC_CTRL1);
1014 
1015 		if (vmx_get_test_stage() & 1)
1016 			val &= ~insn_table[cur_insn].flag;
1017 		else
1018 			val |= insn_table[cur_insn].flag;
1019 
1020 		if (insn_table[cur_insn].type == INSN_CPU0)
1021 			vmcs_write(CPU_EXEC_CTRL0, val | ctrl_cpu_rev[0].set);
1022 		else if (insn_table[cur_insn].type == INSN_CPU1)
1023 			vmcs_write(CPU_EXEC_CTRL1, val | ctrl_cpu_rev[1].set);
1024 	} else {
1025 		pass = (cur_insn * 2 == vmx_get_test_stage()) &&
1026 			insn_table[cur_insn].reason == reason;
1027 		if (insn_table[cur_insn].test_field & FIELD_EXIT_QUAL &&
1028 		    insn_table[cur_insn].exit_qual != exit_qual)
1029 			pass = false;
1030 		if (insn_table[cur_insn].test_field & FIELD_INSN_INFO &&
1031 		    insn_table[cur_insn].insn_info != insn_info)
1032 			pass = false;
1033 		if (pass)
1034 			vmx_inc_test_stage();
1035 	}
1036 	vmcs_write(GUEST_RIP, guest_rip + insn_len);
1037 	return VMX_TEST_RESUME;
1038 }
1039 
1040 /**
1041  * __setup_ept - Setup the VMCS fields to enable Extended Page Tables (EPT)
1042  * @hpa:	Host physical address of the top-level, a.k.a. root, EPT table
1043  * @enable_ad:	Whether or not to enable Access/Dirty bits for EPT entries
1044  *
1045  * Returns 0 on success, 1 on failure.
1046  *
1047  * Note that @hpa doesn't need to point at actual memory if VM-Launch is
1048  * expected to fail, e.g. setup_dummy_ept() arbitrarily passes '0' to satisfy
1049  * the various EPTP consistency checks, but doesn't ensure backing for HPA '0'.
1050  */
1051 static int __setup_ept(u64 hpa, bool enable_ad)
1052 {
1053 	if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY) ||
1054 	    !(ctrl_cpu_rev[1].clr & CPU_EPT)) {
1055 		printf("\tEPT is not supported");
1056 		return 1;
1057 	}
1058 	if (!(ept_vpid.val & EPT_CAP_WB)) {
1059 		printf("WB memtype for EPT walks not supported\n");
1060 		return 1;
1061 	}
1062 	if (!(ept_vpid.val & EPT_CAP_PWL4)) {
1063 		printf("\tPWL4 is not supported\n");
1064 		return 1;
1065 	}
1066 
1067 	eptp = EPT_MEM_TYPE_WB;
1068 	eptp |= (3 << EPTP_PG_WALK_LEN_SHIFT);
1069 	eptp |= hpa;
1070 	if (enable_ad)
1071 		eptp |= EPTP_AD_FLAG;
1072 
1073 	vmcs_write(EPTP, eptp);
1074 	vmcs_write(CPU_EXEC_CTRL0, vmcs_read(CPU_EXEC_CTRL0)| CPU_SECONDARY);
1075 	vmcs_write(CPU_EXEC_CTRL1, vmcs_read(CPU_EXEC_CTRL1)| CPU_EPT);
1076 
1077 	return 0;
1078 }
1079 
1080 /**
1081  * setup_ept - Enable Extended Page Tables (EPT) and setup an identity map
1082  * @enable_ad:	Whether or not to enable Access/Dirty bits for EPT entries
1083  *
1084  * Returns 0 on success, 1 on failure.
1085  *
1086  * This is the "real" function for setting up EPT tables, i.e. use this for
1087  * tests that need to run code in the guest with EPT enabled.
1088  */
1089 static int setup_ept(bool enable_ad)
1090 {
1091 	unsigned long end_of_memory;
1092 
1093 	pml4 = alloc_page();
1094 
1095 	if (__setup_ept(virt_to_phys(pml4), enable_ad))
1096 		return 1;
1097 
1098 	end_of_memory = fwcfg_get_u64(FW_CFG_RAM_SIZE);
1099 	if (end_of_memory < (1ul << 32))
1100 		end_of_memory = (1ul << 32);
1101 	/* Cannot use large EPT pages if we need to track EPT
1102 	 * accessed/dirty bits at 4K granularity.
1103 	 */
1104 	setup_ept_range(pml4, 0, end_of_memory, 0,
1105 			!enable_ad && ept_2m_supported(),
1106 			EPT_WA | EPT_RA | EPT_EA);
1107 	return 0;
1108 }
1109 
1110 /**
1111  * setup_dummy_ept - Enable Extended Page Tables (EPT) with a dummy root HPA
1112  *
1113  * Setup EPT using a semi-arbitrary dummy root HPA.  This function is intended
1114  * for use by tests that need EPT enabled to verify dependent VMCS controls
1115  * but never expect to fully enter the guest, i.e. don't need setup the actual
1116  * EPT tables.
1117  */
1118 static void setup_dummy_ept(void)
1119 {
1120 	if (__setup_ept(0, false))
1121 		report_abort("EPT setup unexpectedly failed");
1122 }
1123 
1124 static int enable_unrestricted_guest(void)
1125 {
1126 	if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY) ||
1127 	    !(ctrl_cpu_rev[1].clr & CPU_URG) ||
1128 	    !(ctrl_cpu_rev[1].clr & CPU_EPT))
1129 		return 1;
1130 
1131 	setup_dummy_ept();
1132 
1133 	vmcs_write(CPU_EXEC_CTRL0, vmcs_read(CPU_EXEC_CTRL0) | CPU_SECONDARY);
1134 	vmcs_write(CPU_EXEC_CTRL1, vmcs_read(CPU_EXEC_CTRL1) | CPU_URG);
1135 
1136 	return 0;
1137 }
1138 
1139 static void ept_enable_ad_bits(void)
1140 {
1141 	eptp |= EPTP_AD_FLAG;
1142 	vmcs_write(EPTP, eptp);
1143 }
1144 
1145 static void ept_disable_ad_bits(void)
1146 {
1147 	eptp &= ~EPTP_AD_FLAG;
1148 	vmcs_write(EPTP, eptp);
1149 }
1150 
1151 static int ept_ad_enabled(void)
1152 {
1153 	return eptp & EPTP_AD_FLAG;
1154 }
1155 
1156 static void ept_enable_ad_bits_or_skip_test(void)
1157 {
1158 	if (!ept_ad_bits_supported())
1159 		test_skip("EPT AD bits not supported.");
1160 	ept_enable_ad_bits();
1161 }
1162 
1163 static int apic_version;
1164 
1165 static int ept_init_common(bool have_ad)
1166 {
1167 	int ret;
1168 	struct pci_dev pcidev;
1169 
1170 	if (setup_ept(have_ad))
1171 		return VMX_TEST_EXIT;
1172 	data_page1 = alloc_page();
1173 	data_page2 = alloc_page();
1174 	*((u32 *)data_page1) = MAGIC_VAL_1;
1175 	*((u32 *)data_page2) = MAGIC_VAL_2;
1176 	install_ept(pml4, (unsigned long)data_page1, (unsigned long)data_page2,
1177 			EPT_RA | EPT_WA | EPT_EA);
1178 
1179 	apic_version = apic_read(APIC_LVR);
1180 
1181 	ret = pci_find_dev(PCI_VENDOR_ID_REDHAT, PCI_DEVICE_ID_REDHAT_TEST);
1182 	if (ret != PCIDEVADDR_INVALID) {
1183 		pci_dev_init(&pcidev, ret);
1184 		pci_physaddr = pcidev.resource[PCI_TESTDEV_BAR_MEM];
1185 	}
1186 
1187 	return VMX_TEST_START;
1188 }
1189 
1190 static int ept_init(struct vmcs *vmcs)
1191 {
1192 	return ept_init_common(false);
1193 }
1194 
1195 static void ept_common(void)
1196 {
1197 	vmx_set_test_stage(0);
1198 	if (*((u32 *)data_page2) != MAGIC_VAL_1 ||
1199 			*((u32 *)data_page1) != MAGIC_VAL_1)
1200 		report(0, "EPT basic framework - read");
1201 	else {
1202 		*((u32 *)data_page2) = MAGIC_VAL_3;
1203 		vmcall();
1204 		if (vmx_get_test_stage() == 1) {
1205 			if (*((u32 *)data_page1) == MAGIC_VAL_3 &&
1206 					*((u32 *)data_page2) == MAGIC_VAL_2)
1207 				report(1, "EPT basic framework");
1208 			else
1209 				report(1, "EPT basic framework - remap");
1210 		}
1211 	}
1212 	// Test EPT Misconfigurations
1213 	vmx_set_test_stage(1);
1214 	vmcall();
1215 	*((u32 *)data_page1) = MAGIC_VAL_1;
1216 	if (vmx_get_test_stage() != 2) {
1217 		report(0, "EPT misconfigurations");
1218 		goto t1;
1219 	}
1220 	vmx_set_test_stage(2);
1221 	vmcall();
1222 	*((u32 *)data_page1) = MAGIC_VAL_1;
1223 	report(vmx_get_test_stage() == 3, "EPT misconfigurations");
1224 t1:
1225 	// Test EPT violation
1226 	vmx_set_test_stage(3);
1227 	vmcall();
1228 	*((u32 *)data_page1) = MAGIC_VAL_1;
1229 	report(vmx_get_test_stage() == 4, "EPT violation - page permission");
1230 	// Violation caused by EPT paging structure
1231 	vmx_set_test_stage(4);
1232 	vmcall();
1233 	*((u32 *)data_page1) = MAGIC_VAL_2;
1234 	report(vmx_get_test_stage() == 5, "EPT violation - paging structure");
1235 
1236 	// MMIO Read/Write
1237 	vmx_set_test_stage(5);
1238 	vmcall();
1239 
1240 	*(u32 volatile *)pci_physaddr;
1241 	report(vmx_get_test_stage() == 6, "MMIO EPT violation - read");
1242 
1243 	*(u32 volatile *)pci_physaddr = MAGIC_VAL_1;
1244 	report(vmx_get_test_stage() == 7, "MMIO EPT violation - write");
1245 }
1246 
1247 static void ept_main(void)
1248 {
1249 	ept_common();
1250 
1251 	// Test EPT access to L1 MMIO
1252 	vmx_set_test_stage(7);
1253 	report(*((u32 *)0xfee00030UL) == apic_version, "EPT - MMIO access");
1254 
1255 	// Test invalid operand for INVEPT
1256 	vmcall();
1257 	report(vmx_get_test_stage() == 8, "EPT - unsupported INVEPT");
1258 }
1259 
1260 static bool invept_test(int type, u64 eptp)
1261 {
1262 	bool ret, supported;
1263 
1264 	supported = ept_vpid.val & (EPT_CAP_INVEPT_SINGLE >> INVEPT_SINGLE << type);
1265 	ret = invept(type, eptp);
1266 
1267 	if (ret == !supported)
1268 		return false;
1269 
1270 	if (!supported)
1271 		printf("WARNING: unsupported invept passed!\n");
1272 	else
1273 		printf("WARNING: invept failed!\n");
1274 
1275 	return true;
1276 }
1277 
1278 static int pml_exit_handler(void)
1279 {
1280 	u16 index, count;
1281 	ulong reason = vmcs_read(EXI_REASON) & 0xff;
1282 	u64 *pmlbuf = pml_log;
1283 	u64 guest_rip = vmcs_read(GUEST_RIP);;
1284 	u64 guest_cr3 = vmcs_read(GUEST_CR3);
1285 	u32 insn_len = vmcs_read(EXI_INST_LEN);
1286 
1287 	switch (reason) {
1288 	case VMX_VMCALL:
1289 		switch (vmx_get_test_stage()) {
1290 		case 0:
1291 			index = vmcs_read(GUEST_PML_INDEX);
1292 			for (count = index + 1; count < PML_INDEX; count++) {
1293 				if (pmlbuf[count] == (u64)data_page2) {
1294 					vmx_inc_test_stage();
1295 					clear_ept_ad(pml4, guest_cr3, (unsigned long)data_page2);
1296 					break;
1297 				}
1298 			}
1299 			break;
1300 		case 1:
1301 			index = vmcs_read(GUEST_PML_INDEX);
1302 			/* Keep clearing the dirty bit till a overflow */
1303 			clear_ept_ad(pml4, guest_cr3, (unsigned long)data_page2);
1304 			break;
1305 		default:
1306 			report(false, "unexpected stage, %d.",
1307 			       vmx_get_test_stage());
1308 			print_vmexit_info();
1309 			return VMX_TEST_VMEXIT;
1310 		}
1311 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
1312 		return VMX_TEST_RESUME;
1313 	case VMX_PML_FULL:
1314 		vmx_inc_test_stage();
1315 		vmcs_write(GUEST_PML_INDEX, PML_INDEX - 1);
1316 		return VMX_TEST_RESUME;
1317 	default:
1318 		report(false, "Unknown exit reason, %ld", reason);
1319 		print_vmexit_info();
1320 	}
1321 	return VMX_TEST_VMEXIT;
1322 }
1323 
1324 static int ept_exit_handler_common(bool have_ad)
1325 {
1326 	u64 guest_rip;
1327 	u64 guest_cr3;
1328 	ulong reason;
1329 	u32 insn_len;
1330 	u32 exit_qual;
1331 	static unsigned long data_page1_pte, data_page1_pte_pte, memaddr_pte;
1332 
1333 	guest_rip = vmcs_read(GUEST_RIP);
1334 	guest_cr3 = vmcs_read(GUEST_CR3);
1335 	reason = vmcs_read(EXI_REASON) & 0xff;
1336 	insn_len = vmcs_read(EXI_INST_LEN);
1337 	exit_qual = vmcs_read(EXI_QUALIFICATION);
1338 	switch (reason) {
1339 	case VMX_VMCALL:
1340 		switch (vmx_get_test_stage()) {
1341 		case 0:
1342 			check_ept_ad(pml4, guest_cr3,
1343 				     (unsigned long)data_page1,
1344 				     have_ad ? EPT_ACCESS_FLAG : 0,
1345 				     have_ad ? EPT_ACCESS_FLAG | EPT_DIRTY_FLAG : 0);
1346 			check_ept_ad(pml4, guest_cr3,
1347 				     (unsigned long)data_page2,
1348 				     have_ad ? EPT_ACCESS_FLAG | EPT_DIRTY_FLAG : 0,
1349 				     have_ad ? EPT_ACCESS_FLAG | EPT_DIRTY_FLAG : 0);
1350 			clear_ept_ad(pml4, guest_cr3, (unsigned long)data_page1);
1351 			clear_ept_ad(pml4, guest_cr3, (unsigned long)data_page2);
1352 			if (have_ad)
1353 				ept_sync(INVEPT_SINGLE, eptp);;
1354 			if (*((u32 *)data_page1) == MAGIC_VAL_3 &&
1355 					*((u32 *)data_page2) == MAGIC_VAL_2) {
1356 				vmx_inc_test_stage();
1357 				install_ept(pml4, (unsigned long)data_page2,
1358 						(unsigned long)data_page2,
1359 						EPT_RA | EPT_WA | EPT_EA);
1360 			} else
1361 				report(0, "EPT basic framework - write");
1362 			break;
1363 		case 1:
1364 			install_ept(pml4, (unsigned long)data_page1,
1365  				(unsigned long)data_page1, EPT_WA);
1366 			ept_sync(INVEPT_SINGLE, eptp);
1367 			break;
1368 		case 2:
1369 			install_ept(pml4, (unsigned long)data_page1,
1370  				(unsigned long)data_page1,
1371  				EPT_RA | EPT_WA | EPT_EA |
1372  				(2 << EPT_MEM_TYPE_SHIFT));
1373 			ept_sync(INVEPT_SINGLE, eptp);
1374 			break;
1375 		case 3:
1376 			clear_ept_ad(pml4, guest_cr3, (unsigned long)data_page1);
1377 			TEST_ASSERT(get_ept_pte(pml4, (unsigned long)data_page1,
1378 						1, &data_page1_pte));
1379 			set_ept_pte(pml4, (unsigned long)data_page1,
1380 				1, data_page1_pte & ~EPT_PRESENT);
1381 			ept_sync(INVEPT_SINGLE, eptp);
1382 			break;
1383 		case 4:
1384 			TEST_ASSERT(get_ept_pte(pml4, (unsigned long)data_page1,
1385 						2, &data_page1_pte));
1386 			data_page1_pte &= PAGE_MASK;
1387 			TEST_ASSERT(get_ept_pte(pml4, data_page1_pte,
1388 						2, &data_page1_pte_pte));
1389 			set_ept_pte(pml4, data_page1_pte, 2,
1390 				data_page1_pte_pte & ~EPT_PRESENT);
1391 			ept_sync(INVEPT_SINGLE, eptp);
1392 			break;
1393 		case 5:
1394 			install_ept(pml4, (unsigned long)pci_physaddr,
1395 				(unsigned long)pci_physaddr, 0);
1396 			ept_sync(INVEPT_SINGLE, eptp);
1397 			break;
1398 		case 7:
1399 			if (!invept_test(0, eptp))
1400 				vmx_inc_test_stage();
1401 			break;
1402 		// Should not reach here
1403 		default:
1404 			report(false, "ERROR - unexpected stage, %d.",
1405 			       vmx_get_test_stage());
1406 			print_vmexit_info();
1407 			return VMX_TEST_VMEXIT;
1408 		}
1409 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
1410 		return VMX_TEST_RESUME;
1411 	case VMX_EPT_MISCONFIG:
1412 		switch (vmx_get_test_stage()) {
1413 		case 1:
1414 		case 2:
1415 			vmx_inc_test_stage();
1416 			install_ept(pml4, (unsigned long)data_page1,
1417  				(unsigned long)data_page1,
1418  				EPT_RA | EPT_WA | EPT_EA);
1419 			ept_sync(INVEPT_SINGLE, eptp);
1420 			break;
1421 		// Should not reach here
1422 		default:
1423 			report(false, "ERROR - unexpected stage, %d.",
1424 			       vmx_get_test_stage());
1425 			print_vmexit_info();
1426 			return VMX_TEST_VMEXIT;
1427 		}
1428 		return VMX_TEST_RESUME;
1429 	case VMX_EPT_VIOLATION:
1430 		/*
1431 		 * Exit-qualifications are masked not to account for advanced
1432 		 * VM-exit information. Once KVM supports this feature, this
1433 		 * masking should be removed.
1434 		 */
1435 		exit_qual &= ~EPT_VLT_GUEST_MASK;
1436 
1437 		switch(vmx_get_test_stage()) {
1438 		case 3:
1439 			check_ept_ad(pml4, guest_cr3, (unsigned long)data_page1, 0,
1440 				     have_ad ? EPT_ACCESS_FLAG | EPT_DIRTY_FLAG : 0);
1441 			clear_ept_ad(pml4, guest_cr3, (unsigned long)data_page1);
1442 			if (exit_qual == (EPT_VLT_WR | EPT_VLT_LADDR_VLD |
1443 					EPT_VLT_PADDR))
1444 				vmx_inc_test_stage();
1445 			set_ept_pte(pml4, (unsigned long)data_page1,
1446 				1, data_page1_pte | (EPT_PRESENT));
1447 			ept_sync(INVEPT_SINGLE, eptp);
1448 			break;
1449 		case 4:
1450 			check_ept_ad(pml4, guest_cr3, (unsigned long)data_page1, 0,
1451 				     have_ad ? EPT_ACCESS_FLAG | EPT_DIRTY_FLAG : 0);
1452 			clear_ept_ad(pml4, guest_cr3, (unsigned long)data_page1);
1453 			if (exit_qual == (EPT_VLT_RD |
1454 					  (have_ad ? EPT_VLT_WR : 0) |
1455 					  EPT_VLT_LADDR_VLD))
1456 				vmx_inc_test_stage();
1457 			set_ept_pte(pml4, data_page1_pte, 2,
1458 				data_page1_pte_pte | (EPT_PRESENT));
1459 			ept_sync(INVEPT_SINGLE, eptp);
1460 			break;
1461 		case 5:
1462 			if (exit_qual & EPT_VLT_RD)
1463 				vmx_inc_test_stage();
1464 			TEST_ASSERT(get_ept_pte(pml4, (unsigned long)pci_physaddr,
1465 						1, &memaddr_pte));
1466 			set_ept_pte(pml4, memaddr_pte, 1, memaddr_pte | EPT_RA);
1467 			ept_sync(INVEPT_SINGLE, eptp);
1468 			break;
1469 		case 6:
1470 			if (exit_qual & EPT_VLT_WR)
1471 				vmx_inc_test_stage();
1472 			TEST_ASSERT(get_ept_pte(pml4, (unsigned long)pci_physaddr,
1473 						1, &memaddr_pte));
1474 			set_ept_pte(pml4, memaddr_pte, 1, memaddr_pte | EPT_RA | EPT_WA);
1475 			ept_sync(INVEPT_SINGLE, eptp);
1476 			break;
1477 		default:
1478 			// Should not reach here
1479 			report(false, "ERROR : unexpected stage, %d",
1480 			       vmx_get_test_stage());
1481 			print_vmexit_info();
1482 			return VMX_TEST_VMEXIT;
1483 		}
1484 		return VMX_TEST_RESUME;
1485 	default:
1486 		report(false, "Unknown exit reason, %ld", reason);
1487 		print_vmexit_info();
1488 	}
1489 	return VMX_TEST_VMEXIT;
1490 }
1491 
1492 static int ept_exit_handler(void)
1493 {
1494 	return ept_exit_handler_common(false);
1495 }
1496 
1497 static int eptad_init(struct vmcs *vmcs)
1498 {
1499 	int r = ept_init_common(true);
1500 
1501 	if (r == VMX_TEST_EXIT)
1502 		return r;
1503 
1504 	if ((rdmsr(MSR_IA32_VMX_EPT_VPID_CAP) & EPT_CAP_AD_FLAG) == 0) {
1505 		printf("\tEPT A/D bits are not supported");
1506 		return VMX_TEST_EXIT;
1507 	}
1508 
1509 	return r;
1510 }
1511 
1512 static int pml_init(struct vmcs *vmcs)
1513 {
1514 	u32 ctrl_cpu;
1515 	int r = eptad_init(vmcs);
1516 
1517 	if (r == VMX_TEST_EXIT)
1518 		return r;
1519 
1520 	if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY) ||
1521 		!(ctrl_cpu_rev[1].clr & CPU_PML)) {
1522 		printf("\tPML is not supported");
1523 		return VMX_TEST_EXIT;
1524 	}
1525 
1526 	pml_log = alloc_page();
1527 	vmcs_write(PMLADDR, (u64)pml_log);
1528 	vmcs_write(GUEST_PML_INDEX, PML_INDEX - 1);
1529 
1530 	ctrl_cpu = vmcs_read(CPU_EXEC_CTRL1) | CPU_PML;
1531 	vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu);
1532 
1533 	return VMX_TEST_START;
1534 }
1535 
1536 static void pml_main(void)
1537 {
1538 	int count = 0;
1539 
1540 	vmx_set_test_stage(0);
1541 	*((u32 *)data_page2) = 0x1;
1542 	vmcall();
1543 	report(vmx_get_test_stage() == 1, "PML - Dirty GPA Logging");
1544 
1545 	while (vmx_get_test_stage() == 1) {
1546 		vmcall();
1547 		*((u32 *)data_page2) = 0x1;
1548 		if (count++ > PML_INDEX)
1549 			break;
1550 	}
1551 	report(vmx_get_test_stage() == 2, "PML Full Event");
1552 }
1553 
1554 static void eptad_main(void)
1555 {
1556 	ept_common();
1557 }
1558 
1559 static int eptad_exit_handler(void)
1560 {
1561 	return ept_exit_handler_common(true);
1562 }
1563 
1564 static bool invvpid_test(int type, u16 vpid)
1565 {
1566 	bool ret, supported;
1567 
1568 	supported = ept_vpid.val &
1569 		(VPID_CAP_INVVPID_ADDR >> INVVPID_ADDR << type);
1570 	ret = invvpid(type, vpid, 0);
1571 
1572 	if (ret == !supported)
1573 		return false;
1574 
1575 	if (!supported)
1576 		printf("WARNING: unsupported invvpid passed!\n");
1577 	else
1578 		printf("WARNING: invvpid failed!\n");
1579 
1580 	return true;
1581 }
1582 
1583 static int vpid_init(struct vmcs *vmcs)
1584 {
1585 	u32 ctrl_cpu1;
1586 
1587 	if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY) ||
1588 		!(ctrl_cpu_rev[1].clr & CPU_VPID)) {
1589 		printf("\tVPID is not supported");
1590 		return VMX_TEST_EXIT;
1591 	}
1592 
1593 	ctrl_cpu1 = vmcs_read(CPU_EXEC_CTRL1);
1594 	ctrl_cpu1 |= CPU_VPID;
1595 	vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu1);
1596 	return VMX_TEST_START;
1597 }
1598 
1599 static void vpid_main(void)
1600 {
1601 	vmx_set_test_stage(0);
1602 	vmcall();
1603 	report(vmx_get_test_stage() == 1, "INVVPID SINGLE ADDRESS");
1604 	vmx_set_test_stage(2);
1605 	vmcall();
1606 	report(vmx_get_test_stage() == 3, "INVVPID SINGLE");
1607 	vmx_set_test_stage(4);
1608 	vmcall();
1609 	report(vmx_get_test_stage() == 5, "INVVPID ALL");
1610 }
1611 
1612 static int vpid_exit_handler(void)
1613 {
1614 	u64 guest_rip;
1615 	ulong reason;
1616 	u32 insn_len;
1617 
1618 	guest_rip = vmcs_read(GUEST_RIP);
1619 	reason = vmcs_read(EXI_REASON) & 0xff;
1620 	insn_len = vmcs_read(EXI_INST_LEN);
1621 
1622 	switch (reason) {
1623 	case VMX_VMCALL:
1624 		switch(vmx_get_test_stage()) {
1625 		case 0:
1626 			if (!invvpid_test(INVVPID_ADDR, 1))
1627 				vmx_inc_test_stage();
1628 			break;
1629 		case 2:
1630 			if (!invvpid_test(INVVPID_CONTEXT_GLOBAL, 1))
1631 				vmx_inc_test_stage();
1632 			break;
1633 		case 4:
1634 			if (!invvpid_test(INVVPID_ALL, 1))
1635 				vmx_inc_test_stage();
1636 			break;
1637 		default:
1638 			report(false, "ERROR: unexpected stage, %d",
1639 					vmx_get_test_stage());
1640 			print_vmexit_info();
1641 			return VMX_TEST_VMEXIT;
1642 		}
1643 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
1644 		return VMX_TEST_RESUME;
1645 	default:
1646 		report(false, "Unknown exit reason, %ld", reason);
1647 		print_vmexit_info();
1648 	}
1649 	return VMX_TEST_VMEXIT;
1650 }
1651 
1652 #define TIMER_VECTOR	222
1653 
1654 static volatile bool timer_fired;
1655 
1656 static void timer_isr(isr_regs_t *regs)
1657 {
1658 	timer_fired = true;
1659 	apic_write(APIC_EOI, 0);
1660 }
1661 
1662 static int interrupt_init(struct vmcs *vmcs)
1663 {
1664 	msr_bmp_init();
1665 	vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) & ~PIN_EXTINT);
1666 	handle_irq(TIMER_VECTOR, timer_isr);
1667 	return VMX_TEST_START;
1668 }
1669 
1670 static void interrupt_main(void)
1671 {
1672 	long long start, loops;
1673 
1674 	vmx_set_test_stage(0);
1675 
1676 	apic_write(APIC_LVTT, TIMER_VECTOR);
1677 	irq_enable();
1678 
1679 	apic_write(APIC_TMICT, 1);
1680 	for (loops = 0; loops < 10000000 && !timer_fired; loops++)
1681 		asm volatile ("nop");
1682 	report(timer_fired, "direct interrupt while running guest");
1683 
1684 	apic_write(APIC_TMICT, 0);
1685 	irq_disable();
1686 	vmcall();
1687 	timer_fired = false;
1688 	apic_write(APIC_TMICT, 1);
1689 	for (loops = 0; loops < 10000000 && !timer_fired; loops++)
1690 		asm volatile ("nop");
1691 	report(timer_fired, "intercepted interrupt while running guest");
1692 
1693 	irq_enable();
1694 	apic_write(APIC_TMICT, 0);
1695 	irq_disable();
1696 	vmcall();
1697 	timer_fired = false;
1698 	start = rdtsc();
1699 	apic_write(APIC_TMICT, 1000000);
1700 
1701 	asm volatile ("sti; hlt");
1702 
1703 	report(rdtsc() - start > 1000000 && timer_fired,
1704 	       "direct interrupt + hlt");
1705 
1706 	apic_write(APIC_TMICT, 0);
1707 	irq_disable();
1708 	vmcall();
1709 	timer_fired = false;
1710 	start = rdtsc();
1711 	apic_write(APIC_TMICT, 1000000);
1712 
1713 	asm volatile ("sti; hlt");
1714 
1715 	report(rdtsc() - start > 10000 && timer_fired,
1716 	       "intercepted interrupt + hlt");
1717 
1718 	apic_write(APIC_TMICT, 0);
1719 	irq_disable();
1720 	vmcall();
1721 	timer_fired = false;
1722 	start = rdtsc();
1723 	apic_write(APIC_TMICT, 1000000);
1724 
1725 	irq_enable();
1726 	asm volatile ("nop");
1727 	vmcall();
1728 
1729 	report(rdtsc() - start > 10000 && timer_fired,
1730 	       "direct interrupt + activity state hlt");
1731 
1732 	apic_write(APIC_TMICT, 0);
1733 	irq_disable();
1734 	vmcall();
1735 	timer_fired = false;
1736 	start = rdtsc();
1737 	apic_write(APIC_TMICT, 1000000);
1738 
1739 	irq_enable();
1740 	asm volatile ("nop");
1741 	vmcall();
1742 
1743 	report(rdtsc() - start > 10000 && timer_fired,
1744 	       "intercepted interrupt + activity state hlt");
1745 
1746 	apic_write(APIC_TMICT, 0);
1747 	irq_disable();
1748 	vmx_set_test_stage(7);
1749 	vmcall();
1750 	timer_fired = false;
1751 	apic_write(APIC_TMICT, 1);
1752 	for (loops = 0; loops < 10000000 && !timer_fired; loops++)
1753 		asm volatile ("nop");
1754 	report(timer_fired,
1755 	       "running a guest with interrupt acknowledgement set");
1756 
1757 	apic_write(APIC_TMICT, 0);
1758 	irq_enable();
1759 	timer_fired = false;
1760 	vmcall();
1761 	report(timer_fired, "Inject an event to a halted guest");
1762 }
1763 
1764 static int interrupt_exit_handler(void)
1765 {
1766 	u64 guest_rip = vmcs_read(GUEST_RIP);
1767 	ulong reason = vmcs_read(EXI_REASON) & 0xff;
1768 	u32 insn_len = vmcs_read(EXI_INST_LEN);
1769 
1770 	switch (reason) {
1771 	case VMX_VMCALL:
1772 		switch (vmx_get_test_stage()) {
1773 		case 0:
1774 		case 2:
1775 		case 5:
1776 			vmcs_write(PIN_CONTROLS,
1777 				   vmcs_read(PIN_CONTROLS) | PIN_EXTINT);
1778 			break;
1779 		case 7:
1780 			vmcs_write(EXI_CONTROLS, vmcs_read(EXI_CONTROLS) | EXI_INTA);
1781 			vmcs_write(PIN_CONTROLS,
1782 				   vmcs_read(PIN_CONTROLS) | PIN_EXTINT);
1783 			break;
1784 		case 1:
1785 		case 3:
1786 			vmcs_write(PIN_CONTROLS,
1787 				   vmcs_read(PIN_CONTROLS) & ~PIN_EXTINT);
1788 			break;
1789 		case 4:
1790 		case 6:
1791 			vmcs_write(GUEST_ACTV_STATE, ACTV_HLT);
1792 			break;
1793 
1794 		case 8:
1795 			vmcs_write(GUEST_ACTV_STATE, ACTV_HLT);
1796 			vmcs_write(ENT_INTR_INFO,
1797 				   TIMER_VECTOR |
1798 				   (VMX_INTR_TYPE_EXT_INTR << INTR_INFO_INTR_TYPE_SHIFT) |
1799 				   INTR_INFO_VALID_MASK);
1800 			break;
1801 		}
1802 		vmx_inc_test_stage();
1803 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
1804 		return VMX_TEST_RESUME;
1805 	case VMX_EXTINT:
1806 		if (vmcs_read(EXI_CONTROLS) & EXI_INTA) {
1807 			int vector = vmcs_read(EXI_INTR_INFO) & 0xff;
1808 			handle_external_interrupt(vector);
1809 		} else {
1810 			irq_enable();
1811 			asm volatile ("nop");
1812 			irq_disable();
1813 		}
1814 		if (vmx_get_test_stage() >= 2)
1815 			vmcs_write(GUEST_ACTV_STATE, ACTV_ACTIVE);
1816 		return VMX_TEST_RESUME;
1817 	default:
1818 		report(false, "Unknown exit reason, %ld", reason);
1819 		print_vmexit_info();
1820 	}
1821 
1822 	return VMX_TEST_VMEXIT;
1823 }
1824 
1825 static int dbgctls_init(struct vmcs *vmcs)
1826 {
1827 	u64 dr7 = 0x402;
1828 	u64 zero = 0;
1829 
1830 	msr_bmp_init();
1831 	asm volatile(
1832 		"mov %0,%%dr0\n\t"
1833 		"mov %0,%%dr1\n\t"
1834 		"mov %0,%%dr2\n\t"
1835 		"mov %1,%%dr7\n\t"
1836 		: : "r" (zero), "r" (dr7));
1837 	wrmsr(MSR_IA32_DEBUGCTLMSR, 0x1);
1838 	vmcs_write(GUEST_DR7, 0x404);
1839 	vmcs_write(GUEST_DEBUGCTL, 0x2);
1840 
1841 	vmcs_write(ENT_CONTROLS, vmcs_read(ENT_CONTROLS) | ENT_LOAD_DBGCTLS);
1842 	vmcs_write(EXI_CONTROLS, vmcs_read(EXI_CONTROLS) | EXI_SAVE_DBGCTLS);
1843 
1844 	return VMX_TEST_START;
1845 }
1846 
1847 static void dbgctls_main(void)
1848 {
1849 	u64 dr7, debugctl;
1850 
1851 	asm volatile("mov %%dr7,%0" : "=r" (dr7));
1852 	debugctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
1853 	/* Commented out: KVM does not support DEBUGCTL so far */
1854 	(void)debugctl;
1855 	report(dr7 == 0x404, "Load debug controls" /* && debugctl == 0x2 */);
1856 
1857 	dr7 = 0x408;
1858 	asm volatile("mov %0,%%dr7" : : "r" (dr7));
1859 	wrmsr(MSR_IA32_DEBUGCTLMSR, 0x3);
1860 
1861 	vmx_set_test_stage(0);
1862 	vmcall();
1863 	report(vmx_get_test_stage() == 1, "Save debug controls");
1864 
1865 	if (ctrl_enter_rev.set & ENT_LOAD_DBGCTLS ||
1866 	    ctrl_exit_rev.set & EXI_SAVE_DBGCTLS) {
1867 		printf("\tDebug controls are always loaded/saved\n");
1868 		return;
1869 	}
1870 	vmx_set_test_stage(2);
1871 	vmcall();
1872 
1873 	asm volatile("mov %%dr7,%0" : "=r" (dr7));
1874 	debugctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
1875 	/* Commented out: KVM does not support DEBUGCTL so far */
1876 	(void)debugctl;
1877 	report(dr7 == 0x402,
1878 	       "Guest=host debug controls" /* && debugctl == 0x1 */);
1879 
1880 	dr7 = 0x408;
1881 	asm volatile("mov %0,%%dr7" : : "r" (dr7));
1882 	wrmsr(MSR_IA32_DEBUGCTLMSR, 0x3);
1883 
1884 	vmx_set_test_stage(3);
1885 	vmcall();
1886 	report(vmx_get_test_stage() == 4, "Don't save debug controls");
1887 }
1888 
1889 static int dbgctls_exit_handler(void)
1890 {
1891 	unsigned int reason = vmcs_read(EXI_REASON) & 0xff;
1892 	u32 insn_len = vmcs_read(EXI_INST_LEN);
1893 	u64 guest_rip = vmcs_read(GUEST_RIP);
1894 	u64 dr7, debugctl;
1895 
1896 	asm volatile("mov %%dr7,%0" : "=r" (dr7));
1897 	debugctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
1898 
1899 	switch (reason) {
1900 	case VMX_VMCALL:
1901 		switch (vmx_get_test_stage()) {
1902 		case 0:
1903 			if (dr7 == 0x400 && debugctl == 0 &&
1904 			    vmcs_read(GUEST_DR7) == 0x408 /* &&
1905 			    Commented out: KVM does not support DEBUGCTL so far
1906 			    vmcs_read(GUEST_DEBUGCTL) == 0x3 */)
1907 				vmx_inc_test_stage();
1908 			break;
1909 		case 2:
1910 			dr7 = 0x402;
1911 			asm volatile("mov %0,%%dr7" : : "r" (dr7));
1912 			wrmsr(MSR_IA32_DEBUGCTLMSR, 0x1);
1913 			vmcs_write(GUEST_DR7, 0x404);
1914 			vmcs_write(GUEST_DEBUGCTL, 0x2);
1915 
1916 			vmcs_write(ENT_CONTROLS,
1917 				vmcs_read(ENT_CONTROLS) & ~ENT_LOAD_DBGCTLS);
1918 			vmcs_write(EXI_CONTROLS,
1919 				vmcs_read(EXI_CONTROLS) & ~EXI_SAVE_DBGCTLS);
1920 			break;
1921 		case 3:
1922 			if (dr7 == 0x400 && debugctl == 0 &&
1923 			    vmcs_read(GUEST_DR7) == 0x404 /* &&
1924 			    Commented out: KVM does not support DEBUGCTL so far
1925 			    vmcs_read(GUEST_DEBUGCTL) == 0x2 */)
1926 				vmx_inc_test_stage();
1927 			break;
1928 		}
1929 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
1930 		return VMX_TEST_RESUME;
1931 	default:
1932 		report(false, "Unknown exit reason, %d", reason);
1933 		print_vmexit_info();
1934 	}
1935 	return VMX_TEST_VMEXIT;
1936 }
1937 
1938 struct vmx_msr_entry {
1939 	u32 index;
1940 	u32 reserved;
1941 	u64 value;
1942 } __attribute__((packed));
1943 
1944 #define MSR_MAGIC 0x31415926
1945 struct vmx_msr_entry *exit_msr_store, *entry_msr_load, *exit_msr_load;
1946 
1947 static int msr_switch_init(struct vmcs *vmcs)
1948 {
1949 	msr_bmp_init();
1950 	exit_msr_store = alloc_page();
1951 	exit_msr_load = alloc_page();
1952 	entry_msr_load = alloc_page();
1953 	entry_msr_load[0].index = MSR_KERNEL_GS_BASE;
1954 	entry_msr_load[0].value = MSR_MAGIC;
1955 
1956 	vmx_set_test_stage(1);
1957 	vmcs_write(ENT_MSR_LD_CNT, 1);
1958 	vmcs_write(ENTER_MSR_LD_ADDR, (u64)entry_msr_load);
1959 	vmcs_write(EXI_MSR_ST_CNT, 1);
1960 	vmcs_write(EXIT_MSR_ST_ADDR, (u64)exit_msr_store);
1961 	vmcs_write(EXI_MSR_LD_CNT, 1);
1962 	vmcs_write(EXIT_MSR_LD_ADDR, (u64)exit_msr_load);
1963 	return VMX_TEST_START;
1964 }
1965 
1966 static void msr_switch_main(void)
1967 {
1968 	if (vmx_get_test_stage() == 1) {
1969 		report(rdmsr(MSR_KERNEL_GS_BASE) == MSR_MAGIC,
1970 		       "VM entry MSR load");
1971 		vmx_set_test_stage(2);
1972 		wrmsr(MSR_KERNEL_GS_BASE, MSR_MAGIC + 1);
1973 		exit_msr_store[0].index = MSR_KERNEL_GS_BASE;
1974 		exit_msr_load[0].index = MSR_KERNEL_GS_BASE;
1975 		exit_msr_load[0].value = MSR_MAGIC + 2;
1976 	}
1977 	vmcall();
1978 }
1979 
1980 static int msr_switch_exit_handler(void)
1981 {
1982 	ulong reason;
1983 
1984 	reason = vmcs_read(EXI_REASON);
1985 	if (reason == VMX_VMCALL && vmx_get_test_stage() == 2) {
1986 		report(exit_msr_store[0].value == MSR_MAGIC + 1,
1987 		       "VM exit MSR store");
1988 		report(rdmsr(MSR_KERNEL_GS_BASE) == MSR_MAGIC + 2,
1989 		       "VM exit MSR load");
1990 		vmx_set_test_stage(3);
1991 		entry_msr_load[0].index = MSR_FS_BASE;
1992 		return VMX_TEST_RESUME;
1993 	}
1994 	printf("ERROR %s: unexpected stage=%u or reason=%lu\n",
1995 		__func__, vmx_get_test_stage(), reason);
1996 	return VMX_TEST_EXIT;
1997 }
1998 
1999 static int msr_switch_entry_failure(struct vmentry_failure *failure)
2000 {
2001 	ulong reason;
2002 
2003 	if (failure->early) {
2004 		printf("ERROR %s: early exit\n", __func__);
2005 		return VMX_TEST_EXIT;
2006 	}
2007 
2008 	reason = vmcs_read(EXI_REASON);
2009 	if (reason == (VMX_ENTRY_FAILURE | VMX_FAIL_MSR) &&
2010 	    vmx_get_test_stage() == 3) {
2011 		report(vmcs_read(EXI_QUALIFICATION) == 1,
2012 		       "VM entry MSR load: try to load FS_BASE");
2013 		return VMX_TEST_VMEXIT;
2014 	}
2015 	printf("ERROR %s: unexpected stage=%u or reason=%lu\n",
2016 		__func__, vmx_get_test_stage(), reason);
2017 	return VMX_TEST_EXIT;
2018 }
2019 
2020 static int vmmcall_init(struct vmcs *vmcs)
2021 {
2022 	vmcs_write(EXC_BITMAP, 1 << UD_VECTOR);
2023 	return VMX_TEST_START;
2024 }
2025 
2026 static void vmmcall_main(void)
2027 {
2028 	asm volatile(
2029 		"mov $0xABCD, %%rax\n\t"
2030 		"vmmcall\n\t"
2031 		::: "rax");
2032 
2033 	report(0, "VMMCALL");
2034 }
2035 
2036 static int vmmcall_exit_handler(void)
2037 {
2038 	ulong reason;
2039 
2040 	reason = vmcs_read(EXI_REASON);
2041 	switch (reason) {
2042 	case VMX_VMCALL:
2043 		printf("here\n");
2044 		report(0, "VMMCALL triggers #UD");
2045 		break;
2046 	case VMX_EXC_NMI:
2047 		report((vmcs_read(EXI_INTR_INFO) & 0xff) == UD_VECTOR,
2048 		       "VMMCALL triggers #UD");
2049 		break;
2050 	default:
2051 		report(false, "Unknown exit reason, %ld", reason);
2052 		print_vmexit_info();
2053 	}
2054 
2055 	return VMX_TEST_VMEXIT;
2056 }
2057 
2058 static int disable_rdtscp_init(struct vmcs *vmcs)
2059 {
2060 	u32 ctrl_cpu1;
2061 
2062 	if (ctrl_cpu_rev[0].clr & CPU_SECONDARY) {
2063 		ctrl_cpu1 = vmcs_read(CPU_EXEC_CTRL1);
2064 		ctrl_cpu1 &= ~CPU_RDTSCP;
2065 		vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu1);
2066 	}
2067 
2068 	return VMX_TEST_START;
2069 }
2070 
2071 static void disable_rdtscp_ud_handler(struct ex_regs *regs)
2072 {
2073 	switch (vmx_get_test_stage()) {
2074 	case 0:
2075 		report(true, "RDTSCP triggers #UD");
2076 		vmx_inc_test_stage();
2077 		regs->rip += 3;
2078 		break;
2079 	case 2:
2080 		report(true, "RDPID triggers #UD");
2081 		vmx_inc_test_stage();
2082 		regs->rip += 4;
2083 		break;
2084 	}
2085 	return;
2086 
2087 }
2088 
2089 static void disable_rdtscp_main(void)
2090 {
2091 	/* Test that #UD is properly injected in L2.  */
2092 	handle_exception(UD_VECTOR, disable_rdtscp_ud_handler);
2093 
2094 	vmx_set_test_stage(0);
2095 	asm volatile("rdtscp" : : : "eax", "ecx", "edx");
2096 	vmcall();
2097 	asm volatile(".byte 0xf3, 0x0f, 0xc7, 0xf8" : : : "eax");
2098 
2099 	handle_exception(UD_VECTOR, 0);
2100 	vmcall();
2101 }
2102 
2103 static int disable_rdtscp_exit_handler(void)
2104 {
2105 	unsigned int reason = vmcs_read(EXI_REASON) & 0xff;
2106 
2107 	switch (reason) {
2108 	case VMX_VMCALL:
2109 		switch (vmx_get_test_stage()) {
2110 		case 0:
2111 			report(false, "RDTSCP triggers #UD");
2112 			vmx_inc_test_stage();
2113 			/* fallthrough */
2114 		case 1:
2115 			vmx_inc_test_stage();
2116 			vmcs_write(GUEST_RIP, vmcs_read(GUEST_RIP) + 3);
2117 			return VMX_TEST_RESUME;
2118 		case 2:
2119 			report(false, "RDPID triggers #UD");
2120 			break;
2121 		}
2122 		break;
2123 
2124 	default:
2125 		report(false, "Unknown exit reason, %d", reason);
2126 		print_vmexit_info();
2127 	}
2128 	return VMX_TEST_VMEXIT;
2129 }
2130 
2131 static int int3_init(struct vmcs *vmcs)
2132 {
2133 	vmcs_write(EXC_BITMAP, ~0u);
2134 	return VMX_TEST_START;
2135 }
2136 
2137 static void int3_guest_main(void)
2138 {
2139 	asm volatile ("int3");
2140 }
2141 
2142 static int int3_exit_handler(void)
2143 {
2144 	u32 reason = vmcs_read(EXI_REASON);
2145 	u32 intr_info = vmcs_read(EXI_INTR_INFO);
2146 
2147 	report(reason == VMX_EXC_NMI && (intr_info & INTR_INFO_VALID_MASK) &&
2148 	       (intr_info & INTR_INFO_VECTOR_MASK) == BP_VECTOR &&
2149 	       ((intr_info & INTR_INFO_INTR_TYPE_MASK) >>
2150 	        INTR_INFO_INTR_TYPE_SHIFT) == VMX_INTR_TYPE_SOFT_EXCEPTION,
2151 	       "L1 intercepts #BP");
2152 
2153 	return VMX_TEST_VMEXIT;
2154 }
2155 
2156 static int into_init(struct vmcs *vmcs)
2157 {
2158 	vmcs_write(EXC_BITMAP, ~0u);
2159 	return VMX_TEST_START;
2160 }
2161 
2162 static void into_guest_main(void)
2163 {
2164 	struct far_pointer32 fp = {
2165 		.offset = (uintptr_t)&&into,
2166 		.selector = KERNEL_CS32,
2167 	};
2168 	register uintptr_t rsp asm("rsp");
2169 
2170 	if (fp.offset != (uintptr_t)&&into) {
2171 		printf("Code address too high.\n");
2172 		return;
2173 	}
2174 	if ((u32)rsp != rsp) {
2175 		printf("Stack address too high.\n");
2176 		return;
2177 	}
2178 
2179 	asm goto ("lcall *%0" : : "m" (fp) : "rax" : into);
2180 	return;
2181 into:
2182 	asm volatile (".code32;"
2183 		      "movl $0x7fffffff, %eax;"
2184 		      "addl %eax, %eax;"
2185 		      "into;"
2186 		      "lret;"
2187 		      ".code64");
2188 	__builtin_unreachable();
2189 }
2190 
2191 static int into_exit_handler(void)
2192 {
2193 	u32 reason = vmcs_read(EXI_REASON);
2194 	u32 intr_info = vmcs_read(EXI_INTR_INFO);
2195 
2196 	report(reason == VMX_EXC_NMI && (intr_info & INTR_INFO_VALID_MASK) &&
2197 	       (intr_info & INTR_INFO_VECTOR_MASK) == OF_VECTOR &&
2198 	       ((intr_info & INTR_INFO_INTR_TYPE_MASK) >>
2199 	        INTR_INFO_INTR_TYPE_SHIFT) == VMX_INTR_TYPE_SOFT_EXCEPTION,
2200 	       "L1 intercepts #OF");
2201 
2202 	return VMX_TEST_VMEXIT;
2203 }
2204 
2205 static void exit_monitor_from_l2_main(void)
2206 {
2207 	printf("Calling exit(0) from l2...\n");
2208 	exit(0);
2209 }
2210 
2211 static int exit_monitor_from_l2_handler(void)
2212 {
2213 	report(false, "The guest should have killed the VMM");
2214 	return VMX_TEST_EXIT;
2215 }
2216 
2217 static void assert_exit_reason(u64 expected)
2218 {
2219 	u64 actual = vmcs_read(EXI_REASON);
2220 
2221 	TEST_ASSERT_EQ_MSG(expected, actual, "Expected %s, got %s.",
2222 			   exit_reason_description(expected),
2223 			   exit_reason_description(actual));
2224 }
2225 
2226 static void skip_exit_insn(void)
2227 {
2228 	u64 guest_rip = vmcs_read(GUEST_RIP);
2229 	u32 insn_len = vmcs_read(EXI_INST_LEN);
2230 	vmcs_write(GUEST_RIP, guest_rip + insn_len);
2231 }
2232 
2233 static void skip_exit_vmcall(void)
2234 {
2235 	assert_exit_reason(VMX_VMCALL);
2236 	skip_exit_insn();
2237 }
2238 
2239 static void v2_null_test_guest(void)
2240 {
2241 }
2242 
2243 static void v2_null_test(void)
2244 {
2245 	test_set_guest(v2_null_test_guest);
2246 	enter_guest();
2247 	report(1, __func__);
2248 }
2249 
2250 static void v2_multiple_entries_test_guest(void)
2251 {
2252 	vmx_set_test_stage(1);
2253 	vmcall();
2254 	vmx_set_test_stage(2);
2255 }
2256 
2257 static void v2_multiple_entries_test(void)
2258 {
2259 	test_set_guest(v2_multiple_entries_test_guest);
2260 	enter_guest();
2261 	TEST_ASSERT_EQ(vmx_get_test_stage(), 1);
2262 	skip_exit_vmcall();
2263 	enter_guest();
2264 	TEST_ASSERT_EQ(vmx_get_test_stage(), 2);
2265 	report(1, __func__);
2266 }
2267 
2268 static int fixture_test_data = 1;
2269 
2270 static void fixture_test_teardown(void *data)
2271 {
2272 	*((int *) data) = 1;
2273 }
2274 
2275 static void fixture_test_guest(void)
2276 {
2277 	fixture_test_data++;
2278 }
2279 
2280 
2281 static void fixture_test_setup(void)
2282 {
2283 	TEST_ASSERT_EQ_MSG(1, fixture_test_data,
2284 			   "fixture_test_teardown didn't run?!");
2285 	fixture_test_data = 2;
2286 	test_add_teardown(fixture_test_teardown, &fixture_test_data);
2287 	test_set_guest(fixture_test_guest);
2288 }
2289 
2290 static void fixture_test_case1(void)
2291 {
2292 	fixture_test_setup();
2293 	TEST_ASSERT_EQ(2, fixture_test_data);
2294 	enter_guest();
2295 	TEST_ASSERT_EQ(3, fixture_test_data);
2296 	report(1, __func__);
2297 }
2298 
2299 static void fixture_test_case2(void)
2300 {
2301 	fixture_test_setup();
2302 	TEST_ASSERT_EQ(2, fixture_test_data);
2303 	enter_guest();
2304 	TEST_ASSERT_EQ(3, fixture_test_data);
2305 	report(1, __func__);
2306 }
2307 
2308 enum ept_access_op {
2309 	OP_READ,
2310 	OP_WRITE,
2311 	OP_EXEC,
2312 	OP_FLUSH_TLB,
2313 	OP_EXIT,
2314 };
2315 
2316 static struct ept_access_test_data {
2317 	unsigned long gpa;
2318 	unsigned long *gva;
2319 	unsigned long hpa;
2320 	unsigned long *hva;
2321 	enum ept_access_op op;
2322 } ept_access_test_data;
2323 
2324 extern unsigned char ret42_start;
2325 extern unsigned char ret42_end;
2326 
2327 /* Returns 42. */
2328 asm(
2329 	".align 64\n"
2330 	"ret42_start:\n"
2331 	"mov $42, %eax\n"
2332 	"ret\n"
2333 	"ret42_end:\n"
2334 );
2335 
2336 static void
2337 diagnose_ept_violation_qual(u64 expected, u64 actual)
2338 {
2339 
2340 #define DIAGNOSE(flag)							\
2341 do {									\
2342 	if ((expected & flag) != (actual & flag))			\
2343 		printf(#flag " %sexpected\n",				\
2344 		       (expected & flag) ? "" : "un");			\
2345 } while (0)
2346 
2347 	DIAGNOSE(EPT_VLT_RD);
2348 	DIAGNOSE(EPT_VLT_WR);
2349 	DIAGNOSE(EPT_VLT_FETCH);
2350 	DIAGNOSE(EPT_VLT_PERM_RD);
2351 	DIAGNOSE(EPT_VLT_PERM_WR);
2352 	DIAGNOSE(EPT_VLT_PERM_EX);
2353 	DIAGNOSE(EPT_VLT_LADDR_VLD);
2354 	DIAGNOSE(EPT_VLT_PADDR);
2355 
2356 #undef DIAGNOSE
2357 }
2358 
2359 static void do_ept_access_op(enum ept_access_op op)
2360 {
2361 	ept_access_test_data.op = op;
2362 	enter_guest();
2363 }
2364 
2365 /*
2366  * Force the guest to flush its TLB (i.e., flush gva -> gpa mappings). Only
2367  * needed by tests that modify guest PTEs.
2368  */
2369 static void ept_access_test_guest_flush_tlb(void)
2370 {
2371 	do_ept_access_op(OP_FLUSH_TLB);
2372 	skip_exit_vmcall();
2373 }
2374 
2375 /*
2376  * Modifies the EPT entry at @level in the mapping of @gpa. First clears the
2377  * bits in @clear then sets the bits in @set. @mkhuge transforms the entry into
2378  * a huge page.
2379  */
2380 static unsigned long ept_twiddle(unsigned long gpa, bool mkhuge, int level,
2381 				 unsigned long clear, unsigned long set)
2382 {
2383 	struct ept_access_test_data *data = &ept_access_test_data;
2384 	unsigned long orig_pte;
2385 	unsigned long pte;
2386 
2387 	/* Screw with the mapping at the requested level. */
2388 	TEST_ASSERT(get_ept_pte(pml4, gpa, level, &orig_pte));
2389 	pte = orig_pte;
2390 	if (mkhuge)
2391 		pte = (orig_pte & ~EPT_ADDR_MASK) | data->hpa | EPT_LARGE_PAGE;
2392 	else
2393 		pte = orig_pte;
2394 	pte = (pte & ~clear) | set;
2395 	set_ept_pte(pml4, gpa, level, pte);
2396 	ept_sync(INVEPT_SINGLE, eptp);
2397 
2398 	return orig_pte;
2399 }
2400 
2401 static void ept_untwiddle(unsigned long gpa, int level, unsigned long orig_pte)
2402 {
2403 	set_ept_pte(pml4, gpa, level, orig_pte);
2404 	ept_sync(INVEPT_SINGLE, eptp);
2405 }
2406 
2407 static void do_ept_violation(bool leaf, enum ept_access_op op,
2408 			     u64 expected_qual, u64 expected_paddr)
2409 {
2410 	u64 qual;
2411 
2412 	/* Try the access and observe the violation. */
2413 	do_ept_access_op(op);
2414 
2415 	assert_exit_reason(VMX_EPT_VIOLATION);
2416 
2417 	qual = vmcs_read(EXI_QUALIFICATION);
2418 
2419 	/* Mask undefined bits (which may later be defined in certain cases). */
2420 	qual &= ~(EPT_VLT_GUEST_USER | EPT_VLT_GUEST_RW | EPT_VLT_GUEST_EX |
2421 		 EPT_VLT_PERM_USER_EX);
2422 
2423 	diagnose_ept_violation_qual(expected_qual, qual);
2424 	TEST_EXPECT_EQ(expected_qual, qual);
2425 
2426 	#if 0
2427 	/* Disable for now otherwise every test will fail */
2428 	TEST_EXPECT_EQ(vmcs_read(GUEST_LINEAR_ADDRESS),
2429 		       (unsigned long) (
2430 			       op == OP_EXEC ? data->gva + 1 : data->gva));
2431 	#endif
2432 	/*
2433 	 * TODO: tests that probe expected_paddr in pages other than the one at
2434 	 * the beginning of the 1g region.
2435 	 */
2436 	TEST_EXPECT_EQ(vmcs_read(INFO_PHYS_ADDR), expected_paddr);
2437 }
2438 
2439 static void
2440 ept_violation_at_level_mkhuge(bool mkhuge, int level, unsigned long clear,
2441 			      unsigned long set, enum ept_access_op op,
2442 			      u64 expected_qual)
2443 {
2444 	struct ept_access_test_data *data = &ept_access_test_data;
2445 	unsigned long orig_pte;
2446 
2447 	orig_pte = ept_twiddle(data->gpa, mkhuge, level, clear, set);
2448 
2449 	do_ept_violation(level == 1 || mkhuge, op, expected_qual,
2450 			 op == OP_EXEC ? data->gpa + sizeof(unsigned long) :
2451 					 data->gpa);
2452 
2453 	/* Fix the violation and resume the op loop. */
2454 	ept_untwiddle(data->gpa, level, orig_pte);
2455 	enter_guest();
2456 	skip_exit_vmcall();
2457 }
2458 
2459 static void
2460 ept_violation_at_level(int level, unsigned long clear, unsigned long set,
2461 		       enum ept_access_op op, u64 expected_qual)
2462 {
2463 	ept_violation_at_level_mkhuge(false, level, clear, set, op,
2464 				      expected_qual);
2465 	if (ept_huge_pages_supported(level))
2466 		ept_violation_at_level_mkhuge(true, level, clear, set, op,
2467 					      expected_qual);
2468 }
2469 
2470 static void ept_violation(unsigned long clear, unsigned long set,
2471 			  enum ept_access_op op, u64 expected_qual)
2472 {
2473 	ept_violation_at_level(1, clear, set, op, expected_qual);
2474 	ept_violation_at_level(2, clear, set, op, expected_qual);
2475 	ept_violation_at_level(3, clear, set, op, expected_qual);
2476 	ept_violation_at_level(4, clear, set, op, expected_qual);
2477 }
2478 
2479 static void ept_access_violation(unsigned long access, enum ept_access_op op,
2480 				       u64 expected_qual)
2481 {
2482 	ept_violation(EPT_PRESENT, access, op,
2483 		      expected_qual | EPT_VLT_LADDR_VLD | EPT_VLT_PADDR);
2484 }
2485 
2486 /*
2487  * For translations that don't involve a GVA, that is physical address (paddr)
2488  * accesses, EPT violations don't set the flag EPT_VLT_PADDR.  For a typical
2489  * guest memory access, the hardware does GVA -> GPA -> HPA.  However, certain
2490  * translations don't involve GVAs, such as when the hardware does the guest
2491  * page table walk. For example, in translating GVA_1 -> GPA_1, the guest MMU
2492  * might try to set an A bit on a guest PTE. If the GPA_2 that the PTE resides
2493  * on isn't present in the EPT, then the EPT violation will be for GPA_2 and
2494  * the EPT_VLT_PADDR bit will be clear in the exit qualification.
2495  *
2496  * Note that paddr violations can also be triggered by loading PAE page tables
2497  * with wonky addresses. We don't test that yet.
2498  *
2499  * This function modifies the EPT entry that maps the GPA that the guest page
2500  * table entry mapping ept_access_test_data.gva resides on.
2501  *
2502  *	@ept_access	EPT permissions to set. Other permissions are cleared.
2503  *
2504  *	@pte_ad		Set the A/D bits on the guest PTE accordingly.
2505  *
2506  *	@op		Guest operation to perform with
2507  *			ept_access_test_data.gva.
2508  *
2509  *	@expect_violation
2510  *			Is a violation expected during the paddr access?
2511  *
2512  *	@expected_qual	Expected qualification for the EPT violation.
2513  *			EPT_VLT_PADDR should be clear.
2514  */
2515 static void ept_access_paddr(unsigned long ept_access, unsigned long pte_ad,
2516 			     enum ept_access_op op, bool expect_violation,
2517 			     u64 expected_qual)
2518 {
2519 	struct ept_access_test_data *data = &ept_access_test_data;
2520 	unsigned long *ptep;
2521 	unsigned long gpa;
2522 	unsigned long orig_epte;
2523 	unsigned long epte;
2524 	int i;
2525 
2526 	/* Modify the guest PTE mapping data->gva according to @pte_ad.  */
2527 	ptep = get_pte_level(current_page_table(), data->gva, /*level=*/1);
2528 	TEST_ASSERT(ptep);
2529 	TEST_ASSERT_EQ(*ptep & PT_ADDR_MASK, data->gpa);
2530 	*ptep = (*ptep & ~PT_AD_MASK) | pte_ad;
2531 	ept_access_test_guest_flush_tlb();
2532 
2533 	/*
2534 	 * Now modify the access bits on the EPT entry for the GPA that the
2535 	 * guest PTE resides on. Note that by modifying a single EPT entry,
2536 	 * we're potentially affecting 512 guest PTEs. However, we've carefully
2537 	 * constructed our test such that those other 511 PTEs aren't used by
2538 	 * the guest: data->gva is at the beginning of a 1G huge page, thus the
2539 	 * PTE we're modifying is at the beginning of a 4K page and the
2540 	 * following 511 entires are also under our control (and not touched by
2541 	 * the guest).
2542 	 */
2543 	gpa = virt_to_phys(ptep);
2544 	TEST_ASSERT_EQ(gpa & ~PAGE_MASK, 0);
2545 	/*
2546 	 * Make sure the guest page table page is mapped with a 4K EPT entry,
2547 	 * otherwise our level=1 twiddling below will fail. We use the
2548 	 * identity map (gpa = gpa) since page tables are shared with the host.
2549 	 */
2550 	install_ept(pml4, gpa, gpa, EPT_PRESENT);
2551 	orig_epte = ept_twiddle(gpa, /*mkhuge=*/0, /*level=*/1,
2552 				/*clear=*/EPT_PRESENT, /*set=*/ept_access);
2553 
2554 	if (expect_violation) {
2555 		do_ept_violation(/*leaf=*/true, op,
2556 				 expected_qual | EPT_VLT_LADDR_VLD, gpa);
2557 		ept_untwiddle(gpa, /*level=*/1, orig_epte);
2558 		do_ept_access_op(op);
2559 	} else {
2560 		do_ept_access_op(op);
2561 		if (ept_ad_enabled()) {
2562 			for (i = EPT_PAGE_LEVEL; i > 0; i--) {
2563 				TEST_ASSERT(get_ept_pte(pml4, gpa, i, &epte));
2564 				TEST_ASSERT(epte & EPT_ACCESS_FLAG);
2565 				if (i == 1)
2566 					TEST_ASSERT(epte & EPT_DIRTY_FLAG);
2567 				else
2568 					TEST_ASSERT_EQ(epte & EPT_DIRTY_FLAG, 0);
2569 			}
2570 		}
2571 
2572 		ept_untwiddle(gpa, /*level=*/1, orig_epte);
2573 	}
2574 
2575 	TEST_ASSERT(*ptep & PT_ACCESSED_MASK);
2576 	if ((pte_ad & PT_DIRTY_MASK) || op == OP_WRITE)
2577 		TEST_ASSERT(*ptep & PT_DIRTY_MASK);
2578 
2579 	skip_exit_vmcall();
2580 }
2581 
2582 static void ept_access_allowed_paddr(unsigned long ept_access,
2583 				     unsigned long pte_ad,
2584 				     enum ept_access_op op)
2585 {
2586 	ept_access_paddr(ept_access, pte_ad, op, /*expect_violation=*/false,
2587 			 /*expected_qual=*/-1);
2588 }
2589 
2590 static void ept_access_violation_paddr(unsigned long ept_access,
2591 				       unsigned long pte_ad,
2592 				       enum ept_access_op op,
2593 				       u64 expected_qual)
2594 {
2595 	ept_access_paddr(ept_access, pte_ad, op, /*expect_violation=*/true,
2596 			 expected_qual);
2597 }
2598 
2599 
2600 static void ept_allowed_at_level_mkhuge(bool mkhuge, int level,
2601 					unsigned long clear,
2602 					unsigned long set,
2603 					enum ept_access_op op)
2604 {
2605 	struct ept_access_test_data *data = &ept_access_test_data;
2606 	unsigned long orig_pte;
2607 
2608 	orig_pte = ept_twiddle(data->gpa, mkhuge, level, clear, set);
2609 
2610 	/* No violation. Should proceed to vmcall. */
2611 	do_ept_access_op(op);
2612 	skip_exit_vmcall();
2613 
2614 	ept_untwiddle(data->gpa, level, orig_pte);
2615 }
2616 
2617 static void ept_allowed_at_level(int level, unsigned long clear,
2618 				 unsigned long set, enum ept_access_op op)
2619 {
2620 	ept_allowed_at_level_mkhuge(false, level, clear, set, op);
2621 	if (ept_huge_pages_supported(level))
2622 		ept_allowed_at_level_mkhuge(true, level, clear, set, op);
2623 }
2624 
2625 static void ept_allowed(unsigned long clear, unsigned long set,
2626 			enum ept_access_op op)
2627 {
2628 	ept_allowed_at_level(1, clear, set, op);
2629 	ept_allowed_at_level(2, clear, set, op);
2630 	ept_allowed_at_level(3, clear, set, op);
2631 	ept_allowed_at_level(4, clear, set, op);
2632 }
2633 
2634 static void ept_ignored_bit(int bit)
2635 {
2636 	/* Set the bit. */
2637 	ept_allowed(0, 1ul << bit, OP_READ);
2638 	ept_allowed(0, 1ul << bit, OP_WRITE);
2639 	ept_allowed(0, 1ul << bit, OP_EXEC);
2640 
2641 	/* Clear the bit. */
2642 	ept_allowed(1ul << bit, 0, OP_READ);
2643 	ept_allowed(1ul << bit, 0, OP_WRITE);
2644 	ept_allowed(1ul << bit, 0, OP_EXEC);
2645 }
2646 
2647 static void ept_access_allowed(unsigned long access, enum ept_access_op op)
2648 {
2649 	ept_allowed(EPT_PRESENT, access, op);
2650 }
2651 
2652 
2653 static void ept_misconfig_at_level_mkhuge_op(bool mkhuge, int level,
2654 					     unsigned long clear,
2655 					     unsigned long set,
2656 					     enum ept_access_op op)
2657 {
2658 	struct ept_access_test_data *data = &ept_access_test_data;
2659 	unsigned long orig_pte;
2660 
2661 	orig_pte = ept_twiddle(data->gpa, mkhuge, level, clear, set);
2662 
2663 	do_ept_access_op(op);
2664 	assert_exit_reason(VMX_EPT_MISCONFIG);
2665 
2666 	/* Intel 27.2.1, "For all other VM exits, this field is cleared." */
2667 	#if 0
2668 	/* broken: */
2669 	TEST_EXPECT_EQ_MSG(vmcs_read(EXI_QUALIFICATION), 0);
2670 	#endif
2671 	#if 0
2672 	/*
2673 	 * broken:
2674 	 * According to description of exit qual for EPT violation,
2675 	 * EPT_VLT_LADDR_VLD indicates if GUEST_LINEAR_ADDRESS is valid.
2676 	 * However, I can't find anything that says GUEST_LINEAR_ADDRESS ought
2677 	 * to be set for msiconfig.
2678 	 */
2679 	TEST_EXPECT_EQ(vmcs_read(GUEST_LINEAR_ADDRESS),
2680 		       (unsigned long) (
2681 			       op == OP_EXEC ? data->gva + 1 : data->gva));
2682 	#endif
2683 
2684 	/* Fix the violation and resume the op loop. */
2685 	ept_untwiddle(data->gpa, level, orig_pte);
2686 	enter_guest();
2687 	skip_exit_vmcall();
2688 }
2689 
2690 static void ept_misconfig_at_level_mkhuge(bool mkhuge, int level,
2691 					  unsigned long clear,
2692 					  unsigned long set)
2693 {
2694 	/* The op shouldn't matter (read, write, exec), so try them all! */
2695 	ept_misconfig_at_level_mkhuge_op(mkhuge, level, clear, set, OP_READ);
2696 	ept_misconfig_at_level_mkhuge_op(mkhuge, level, clear, set, OP_WRITE);
2697 	ept_misconfig_at_level_mkhuge_op(mkhuge, level, clear, set, OP_EXEC);
2698 }
2699 
2700 static void ept_misconfig_at_level(int level, unsigned long clear,
2701 				   unsigned long set)
2702 {
2703 	ept_misconfig_at_level_mkhuge(false, level, clear, set);
2704 	if (ept_huge_pages_supported(level))
2705 		ept_misconfig_at_level_mkhuge(true, level, clear, set);
2706 }
2707 
2708 static void ept_misconfig(unsigned long clear, unsigned long set)
2709 {
2710 	ept_misconfig_at_level(1, clear, set);
2711 	ept_misconfig_at_level(2, clear, set);
2712 	ept_misconfig_at_level(3, clear, set);
2713 	ept_misconfig_at_level(4, clear, set);
2714 }
2715 
2716 static void ept_access_misconfig(unsigned long access)
2717 {
2718 	ept_misconfig(EPT_PRESENT, access);
2719 }
2720 
2721 static void ept_reserved_bit_at_level_nohuge(int level, int bit)
2722 {
2723 	/* Setting the bit causes a misconfig. */
2724 	ept_misconfig_at_level_mkhuge(false, level, 0, 1ul << bit);
2725 
2726 	/* Making the entry non-present turns reserved bits into ignored. */
2727 	ept_violation_at_level(level, EPT_PRESENT, 1ul << bit, OP_READ,
2728 			       EPT_VLT_RD | EPT_VLT_LADDR_VLD | EPT_VLT_PADDR);
2729 }
2730 
2731 static void ept_reserved_bit_at_level_huge(int level, int bit)
2732 {
2733 	/* Setting the bit causes a misconfig. */
2734 	ept_misconfig_at_level_mkhuge(true, level, 0, 1ul << bit);
2735 
2736 	/* Making the entry non-present turns reserved bits into ignored. */
2737 	ept_violation_at_level(level, EPT_PRESENT, 1ul << bit, OP_READ,
2738 			       EPT_VLT_RD | EPT_VLT_LADDR_VLD | EPT_VLT_PADDR);
2739 }
2740 
2741 static void ept_reserved_bit_at_level(int level, int bit)
2742 {
2743 	/* Setting the bit causes a misconfig. */
2744 	ept_misconfig_at_level(level, 0, 1ul << bit);
2745 
2746 	/* Making the entry non-present turns reserved bits into ignored. */
2747 	ept_violation_at_level(level, EPT_PRESENT, 1ul << bit, OP_READ,
2748 			       EPT_VLT_RD | EPT_VLT_LADDR_VLD | EPT_VLT_PADDR);
2749 }
2750 
2751 static void ept_reserved_bit(int bit)
2752 {
2753 	ept_reserved_bit_at_level(1, bit);
2754 	ept_reserved_bit_at_level(2, bit);
2755 	ept_reserved_bit_at_level(3, bit);
2756 	ept_reserved_bit_at_level(4, bit);
2757 }
2758 
2759 #define PAGE_2M_ORDER 9
2760 #define PAGE_1G_ORDER 18
2761 
2762 static void *get_1g_page(void)
2763 {
2764 	static void *alloc;
2765 
2766 	if (!alloc)
2767 		alloc = alloc_pages(PAGE_1G_ORDER);
2768 	return alloc;
2769 }
2770 
2771 static void ept_access_test_teardown(void *unused)
2772 {
2773 	/* Exit the guest cleanly. */
2774 	do_ept_access_op(OP_EXIT);
2775 }
2776 
2777 static void ept_access_test_guest(void)
2778 {
2779 	struct ept_access_test_data *data = &ept_access_test_data;
2780 	int (*code)(void) = (int (*)(void)) &data->gva[1];
2781 
2782 	while (true) {
2783 		switch (data->op) {
2784 		case OP_READ:
2785 			TEST_ASSERT_EQ(*data->gva, MAGIC_VAL_1);
2786 			break;
2787 		case OP_WRITE:
2788 			*data->gva = MAGIC_VAL_2;
2789 			TEST_ASSERT_EQ(*data->gva, MAGIC_VAL_2);
2790 			*data->gva = MAGIC_VAL_1;
2791 			break;
2792 		case OP_EXEC:
2793 			TEST_ASSERT_EQ(42, code());
2794 			break;
2795 		case OP_FLUSH_TLB:
2796 			write_cr3(read_cr3());
2797 			break;
2798 		case OP_EXIT:
2799 			return;
2800 		default:
2801 			TEST_ASSERT_MSG(false, "Unknown op %d", data->op);
2802 		}
2803 		vmcall();
2804 	}
2805 }
2806 
2807 static void ept_access_test_setup(void)
2808 {
2809 	struct ept_access_test_data *data = &ept_access_test_data;
2810 	unsigned long npages = 1ul << PAGE_1G_ORDER;
2811 	unsigned long size = npages * PAGE_SIZE;
2812 	unsigned long *page_table = current_page_table();
2813 	unsigned long pte;
2814 
2815 	if (setup_ept(false))
2816 		test_skip("EPT not supported");
2817 
2818 	/* We use data->gpa = 1 << 39 so that test data has a separate pml4 entry */
2819 	if (cpuid_maxphyaddr() < 40)
2820 		test_skip("Test needs MAXPHYADDR >= 40");
2821 
2822 	test_set_guest(ept_access_test_guest);
2823 	test_add_teardown(ept_access_test_teardown, NULL);
2824 
2825 	data->hva = get_1g_page();
2826 	TEST_ASSERT(data->hva);
2827 	data->hpa = virt_to_phys(data->hva);
2828 
2829 	data->gpa = 1ul << 39;
2830 	data->gva = (void *) ALIGN((unsigned long) alloc_vpages(npages * 2),
2831 				   size);
2832 	TEST_ASSERT(!any_present_pages(page_table, data->gva, size));
2833 	install_pages(page_table, data->gpa, size, data->gva);
2834 
2835 	/*
2836 	 * Make sure nothing's mapped here so the tests that screw with the
2837 	 * pml4 entry don't inadvertently break something.
2838 	 */
2839 	TEST_ASSERT(get_ept_pte(pml4, data->gpa, 4, &pte) && pte == 0);
2840 	TEST_ASSERT(get_ept_pte(pml4, data->gpa + size - 1, 4, &pte) && pte == 0);
2841 	install_ept(pml4, data->hpa, data->gpa, EPT_PRESENT);
2842 
2843 	data->hva[0] = MAGIC_VAL_1;
2844 	memcpy(&data->hva[1], &ret42_start, &ret42_end - &ret42_start);
2845 }
2846 
2847 static void ept_access_test_not_present(void)
2848 {
2849 	ept_access_test_setup();
2850 	/* --- */
2851 	ept_access_violation(0, OP_READ, EPT_VLT_RD);
2852 	ept_access_violation(0, OP_WRITE, EPT_VLT_WR);
2853 	ept_access_violation(0, OP_EXEC, EPT_VLT_FETCH);
2854 }
2855 
2856 static void ept_access_test_read_only(void)
2857 {
2858 	ept_access_test_setup();
2859 
2860 	/* r-- */
2861 	ept_access_allowed(EPT_RA, OP_READ);
2862 	ept_access_violation(EPT_RA, OP_WRITE, EPT_VLT_WR | EPT_VLT_PERM_RD);
2863 	ept_access_violation(EPT_RA, OP_EXEC, EPT_VLT_FETCH | EPT_VLT_PERM_RD);
2864 }
2865 
2866 static void ept_access_test_write_only(void)
2867 {
2868 	ept_access_test_setup();
2869 	/* -w- */
2870 	ept_access_misconfig(EPT_WA);
2871 }
2872 
2873 static void ept_access_test_read_write(void)
2874 {
2875 	ept_access_test_setup();
2876 	/* rw- */
2877 	ept_access_allowed(EPT_RA | EPT_WA, OP_READ);
2878 	ept_access_allowed(EPT_RA | EPT_WA, OP_WRITE);
2879 	ept_access_violation(EPT_RA | EPT_WA, OP_EXEC,
2880 			   EPT_VLT_FETCH | EPT_VLT_PERM_RD | EPT_VLT_PERM_WR);
2881 }
2882 
2883 
2884 static void ept_access_test_execute_only(void)
2885 {
2886 	ept_access_test_setup();
2887 	/* --x */
2888 	if (ept_execute_only_supported()) {
2889 		ept_access_violation(EPT_EA, OP_READ,
2890 				     EPT_VLT_RD | EPT_VLT_PERM_EX);
2891 		ept_access_violation(EPT_EA, OP_WRITE,
2892 				     EPT_VLT_WR | EPT_VLT_PERM_EX);
2893 		ept_access_allowed(EPT_EA, OP_EXEC);
2894 	} else {
2895 		ept_access_misconfig(EPT_EA);
2896 	}
2897 }
2898 
2899 static void ept_access_test_read_execute(void)
2900 {
2901 	ept_access_test_setup();
2902 	/* r-x */
2903 	ept_access_allowed(EPT_RA | EPT_EA, OP_READ);
2904 	ept_access_violation(EPT_RA | EPT_EA, OP_WRITE,
2905 			   EPT_VLT_WR | EPT_VLT_PERM_RD | EPT_VLT_PERM_EX);
2906 	ept_access_allowed(EPT_RA | EPT_EA, OP_EXEC);
2907 }
2908 
2909 static void ept_access_test_write_execute(void)
2910 {
2911 	ept_access_test_setup();
2912 	/* -wx */
2913 	ept_access_misconfig(EPT_WA | EPT_EA);
2914 }
2915 
2916 static void ept_access_test_read_write_execute(void)
2917 {
2918 	ept_access_test_setup();
2919 	/* rwx */
2920 	ept_access_allowed(EPT_RA | EPT_WA | EPT_EA, OP_READ);
2921 	ept_access_allowed(EPT_RA | EPT_WA | EPT_EA, OP_WRITE);
2922 	ept_access_allowed(EPT_RA | EPT_WA | EPT_EA, OP_EXEC);
2923 }
2924 
2925 static void ept_access_test_reserved_bits(void)
2926 {
2927 	int i;
2928 	int maxphyaddr;
2929 
2930 	ept_access_test_setup();
2931 
2932 	/* Reserved bits above maxphyaddr. */
2933 	maxphyaddr = cpuid_maxphyaddr();
2934 	for (i = maxphyaddr; i <= 51; i++) {
2935 		report_prefix_pushf("reserved_bit=%d", i);
2936 		ept_reserved_bit(i);
2937 		report_prefix_pop();
2938 	}
2939 
2940 	/* Level-specific reserved bits. */
2941 	ept_reserved_bit_at_level_nohuge(2, 3);
2942 	ept_reserved_bit_at_level_nohuge(2, 4);
2943 	ept_reserved_bit_at_level_nohuge(2, 5);
2944 	ept_reserved_bit_at_level_nohuge(2, 6);
2945 	/* 2M alignment. */
2946 	for (i = 12; i < 20; i++) {
2947 		report_prefix_pushf("reserved_bit=%d", i);
2948 		ept_reserved_bit_at_level_huge(2, i);
2949 		report_prefix_pop();
2950 	}
2951 	ept_reserved_bit_at_level_nohuge(3, 3);
2952 	ept_reserved_bit_at_level_nohuge(3, 4);
2953 	ept_reserved_bit_at_level_nohuge(3, 5);
2954 	ept_reserved_bit_at_level_nohuge(3, 6);
2955 	/* 1G alignment. */
2956 	for (i = 12; i < 29; i++) {
2957 		report_prefix_pushf("reserved_bit=%d", i);
2958 		ept_reserved_bit_at_level_huge(3, i);
2959 		report_prefix_pop();
2960 	}
2961 	ept_reserved_bit_at_level(4, 3);
2962 	ept_reserved_bit_at_level(4, 4);
2963 	ept_reserved_bit_at_level(4, 5);
2964 	ept_reserved_bit_at_level(4, 6);
2965 	ept_reserved_bit_at_level(4, 7);
2966 }
2967 
2968 static void ept_access_test_ignored_bits(void)
2969 {
2970 	ept_access_test_setup();
2971 	/*
2972 	 * Bits ignored at every level. Bits 8 and 9 (A and D) are ignored as
2973 	 * far as translation is concerned even if AD bits are enabled in the
2974 	 * EPTP. Bit 63 is ignored because "EPT-violation #VE" VM-execution
2975 	 * control is 0.
2976 	 */
2977 	ept_ignored_bit(8);
2978 	ept_ignored_bit(9);
2979 	ept_ignored_bit(10);
2980 	ept_ignored_bit(11);
2981 	ept_ignored_bit(52);
2982 	ept_ignored_bit(53);
2983 	ept_ignored_bit(54);
2984 	ept_ignored_bit(55);
2985 	ept_ignored_bit(56);
2986 	ept_ignored_bit(57);
2987 	ept_ignored_bit(58);
2988 	ept_ignored_bit(59);
2989 	ept_ignored_bit(60);
2990 	ept_ignored_bit(61);
2991 	ept_ignored_bit(62);
2992 	ept_ignored_bit(63);
2993 }
2994 
2995 static void ept_access_test_paddr_not_present_ad_disabled(void)
2996 {
2997 	ept_access_test_setup();
2998 	ept_disable_ad_bits();
2999 
3000 	ept_access_violation_paddr(0, PT_AD_MASK, OP_READ, EPT_VLT_RD);
3001 	ept_access_violation_paddr(0, PT_AD_MASK, OP_WRITE, EPT_VLT_RD);
3002 	ept_access_violation_paddr(0, PT_AD_MASK, OP_EXEC, EPT_VLT_RD);
3003 }
3004 
3005 static void ept_access_test_paddr_not_present_ad_enabled(void)
3006 {
3007 	u64 qual = EPT_VLT_RD | EPT_VLT_WR;
3008 
3009 	ept_access_test_setup();
3010 	ept_enable_ad_bits_or_skip_test();
3011 
3012 	ept_access_violation_paddr(0, PT_AD_MASK, OP_READ, qual);
3013 	ept_access_violation_paddr(0, PT_AD_MASK, OP_WRITE, qual);
3014 	ept_access_violation_paddr(0, PT_AD_MASK, OP_EXEC, qual);
3015 }
3016 
3017 static void ept_access_test_paddr_read_only_ad_disabled(void)
3018 {
3019 	/*
3020 	 * When EPT AD bits are disabled, all accesses to guest paging
3021 	 * structures are reported separately as a read and (after
3022 	 * translation of the GPA to host physical address) a read+write
3023 	 * if the A/D bits have to be set.
3024 	 */
3025 	u64 qual = EPT_VLT_WR | EPT_VLT_RD | EPT_VLT_PERM_RD;
3026 
3027 	ept_access_test_setup();
3028 	ept_disable_ad_bits();
3029 
3030 	/* Can't update A bit, so all accesses fail. */
3031 	ept_access_violation_paddr(EPT_RA, 0, OP_READ, qual);
3032 	ept_access_violation_paddr(EPT_RA, 0, OP_WRITE, qual);
3033 	ept_access_violation_paddr(EPT_RA, 0, OP_EXEC, qual);
3034 	/* AD bits disabled, so only writes try to update the D bit. */
3035 	ept_access_allowed_paddr(EPT_RA, PT_ACCESSED_MASK, OP_READ);
3036 	ept_access_violation_paddr(EPT_RA, PT_ACCESSED_MASK, OP_WRITE, qual);
3037 	ept_access_allowed_paddr(EPT_RA, PT_ACCESSED_MASK, OP_EXEC);
3038 	/* Both A and D already set, so read-only is OK. */
3039 	ept_access_allowed_paddr(EPT_RA, PT_AD_MASK, OP_READ);
3040 	ept_access_allowed_paddr(EPT_RA, PT_AD_MASK, OP_WRITE);
3041 	ept_access_allowed_paddr(EPT_RA, PT_AD_MASK, OP_EXEC);
3042 }
3043 
3044 static void ept_access_test_paddr_read_only_ad_enabled(void)
3045 {
3046 	/*
3047 	 * When EPT AD bits are enabled, all accesses to guest paging
3048 	 * structures are considered writes as far as EPT translation
3049 	 * is concerned.
3050 	 */
3051 	u64 qual = EPT_VLT_WR | EPT_VLT_RD | EPT_VLT_PERM_RD;
3052 
3053 	ept_access_test_setup();
3054 	ept_enable_ad_bits_or_skip_test();
3055 
3056 	ept_access_violation_paddr(EPT_RA, 0, OP_READ, qual);
3057 	ept_access_violation_paddr(EPT_RA, 0, OP_WRITE, qual);
3058 	ept_access_violation_paddr(EPT_RA, 0, OP_EXEC, qual);
3059 	ept_access_violation_paddr(EPT_RA, PT_ACCESSED_MASK, OP_READ, qual);
3060 	ept_access_violation_paddr(EPT_RA, PT_ACCESSED_MASK, OP_WRITE, qual);
3061 	ept_access_violation_paddr(EPT_RA, PT_ACCESSED_MASK, OP_EXEC, qual);
3062 	ept_access_violation_paddr(EPT_RA, PT_AD_MASK, OP_READ, qual);
3063 	ept_access_violation_paddr(EPT_RA, PT_AD_MASK, OP_WRITE, qual);
3064 	ept_access_violation_paddr(EPT_RA, PT_AD_MASK, OP_EXEC, qual);
3065 }
3066 
3067 static void ept_access_test_paddr_read_write(void)
3068 {
3069 	ept_access_test_setup();
3070 	/* Read-write access to paging structure. */
3071 	ept_access_allowed_paddr(EPT_RA | EPT_WA, 0, OP_READ);
3072 	ept_access_allowed_paddr(EPT_RA | EPT_WA, 0, OP_WRITE);
3073 	ept_access_allowed_paddr(EPT_RA | EPT_WA, 0, OP_EXEC);
3074 }
3075 
3076 static void ept_access_test_paddr_read_write_execute(void)
3077 {
3078 	ept_access_test_setup();
3079 	/* RWX access to paging structure. */
3080 	ept_access_allowed_paddr(EPT_PRESENT, 0, OP_READ);
3081 	ept_access_allowed_paddr(EPT_PRESENT, 0, OP_WRITE);
3082 	ept_access_allowed_paddr(EPT_PRESENT, 0, OP_EXEC);
3083 }
3084 
3085 static void ept_access_test_paddr_read_execute_ad_disabled(void)
3086 {
3087   	/*
3088 	 * When EPT AD bits are disabled, all accesses to guest paging
3089 	 * structures are reported separately as a read and (after
3090 	 * translation of the GPA to host physical address) a read+write
3091 	 * if the A/D bits have to be set.
3092 	 */
3093 	u64 qual = EPT_VLT_WR | EPT_VLT_RD | EPT_VLT_PERM_RD | EPT_VLT_PERM_EX;
3094 
3095 	ept_access_test_setup();
3096 	ept_disable_ad_bits();
3097 
3098 	/* Can't update A bit, so all accesses fail. */
3099 	ept_access_violation_paddr(EPT_RA | EPT_EA, 0, OP_READ, qual);
3100 	ept_access_violation_paddr(EPT_RA | EPT_EA, 0, OP_WRITE, qual);
3101 	ept_access_violation_paddr(EPT_RA | EPT_EA, 0, OP_EXEC, qual);
3102 	/* AD bits disabled, so only writes try to update the D bit. */
3103 	ept_access_allowed_paddr(EPT_RA | EPT_EA, PT_ACCESSED_MASK, OP_READ);
3104 	ept_access_violation_paddr(EPT_RA | EPT_EA, PT_ACCESSED_MASK, OP_WRITE, qual);
3105 	ept_access_allowed_paddr(EPT_RA | EPT_EA, PT_ACCESSED_MASK, OP_EXEC);
3106 	/* Both A and D already set, so read-only is OK. */
3107 	ept_access_allowed_paddr(EPT_RA | EPT_EA, PT_AD_MASK, OP_READ);
3108 	ept_access_allowed_paddr(EPT_RA | EPT_EA, PT_AD_MASK, OP_WRITE);
3109 	ept_access_allowed_paddr(EPT_RA | EPT_EA, PT_AD_MASK, OP_EXEC);
3110 }
3111 
3112 static void ept_access_test_paddr_read_execute_ad_enabled(void)
3113 {
3114 	/*
3115 	 * When EPT AD bits are enabled, all accesses to guest paging
3116 	 * structures are considered writes as far as EPT translation
3117 	 * is concerned.
3118 	 */
3119 	u64 qual = EPT_VLT_WR | EPT_VLT_RD | EPT_VLT_PERM_RD | EPT_VLT_PERM_EX;
3120 
3121 	ept_access_test_setup();
3122 	ept_enable_ad_bits_or_skip_test();
3123 
3124 	ept_access_violation_paddr(EPT_RA | EPT_EA, 0, OP_READ, qual);
3125 	ept_access_violation_paddr(EPT_RA | EPT_EA, 0, OP_WRITE, qual);
3126 	ept_access_violation_paddr(EPT_RA | EPT_EA, 0, OP_EXEC, qual);
3127 	ept_access_violation_paddr(EPT_RA | EPT_EA, PT_ACCESSED_MASK, OP_READ, qual);
3128 	ept_access_violation_paddr(EPT_RA | EPT_EA, PT_ACCESSED_MASK, OP_WRITE, qual);
3129 	ept_access_violation_paddr(EPT_RA | EPT_EA, PT_ACCESSED_MASK, OP_EXEC, qual);
3130 	ept_access_violation_paddr(EPT_RA | EPT_EA, PT_AD_MASK, OP_READ, qual);
3131 	ept_access_violation_paddr(EPT_RA | EPT_EA, PT_AD_MASK, OP_WRITE, qual);
3132 	ept_access_violation_paddr(EPT_RA | EPT_EA, PT_AD_MASK, OP_EXEC, qual);
3133 }
3134 
3135 static void ept_access_test_paddr_not_present_page_fault(void)
3136 {
3137 	ept_access_test_setup();
3138 	/*
3139 	 * TODO: test no EPT violation as long as guest PF occurs. e.g., GPA is
3140 	 * page is read-only in EPT but GVA is also mapped read only in PT.
3141 	 * Thus guest page fault before host takes EPT violation for trying to
3142 	 * update A bit.
3143 	 */
3144 }
3145 
3146 static void ept_access_test_force_2m_page(void)
3147 {
3148 	ept_access_test_setup();
3149 
3150 	TEST_ASSERT_EQ(ept_2m_supported(), true);
3151 	ept_allowed_at_level_mkhuge(true, 2, 0, 0, OP_READ);
3152 	ept_violation_at_level_mkhuge(true, 2, EPT_PRESENT, EPT_RA, OP_WRITE,
3153 				      EPT_VLT_WR | EPT_VLT_PERM_RD |
3154 				      EPT_VLT_LADDR_VLD | EPT_VLT_PADDR);
3155 	ept_misconfig_at_level_mkhuge(true, 2, EPT_PRESENT, EPT_WA);
3156 }
3157 
3158 static bool invvpid_valid(u64 type, u64 vpid, u64 gla)
3159 {
3160 	u64 msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP);
3161 
3162 	TEST_ASSERT(msr & VPID_CAP_INVVPID);
3163 
3164 	if (type < INVVPID_ADDR || type > INVVPID_CONTEXT_LOCAL)
3165 		return false;
3166 
3167 	if (!(msr & (1ull << (type + VPID_CAP_INVVPID_TYPES_SHIFT))))
3168 		return false;
3169 
3170 	if (vpid >> 16)
3171 		return false;
3172 
3173 	if (type != INVVPID_ALL && !vpid)
3174 		return false;
3175 
3176 	if (type == INVVPID_ADDR && !is_canonical(gla))
3177 		return false;
3178 
3179 	return true;
3180 }
3181 
3182 static void try_invvpid(u64 type, u64 vpid, u64 gla)
3183 {
3184 	int rc;
3185 	bool valid = invvpid_valid(type, vpid, gla);
3186 	u64 expected = valid ? VMXERR_UNSUPPORTED_VMCS_COMPONENT
3187 		: VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID;
3188 	/*
3189 	 * Set VMX_INST_ERROR to VMXERR_UNVALID_VMCS_COMPONENT, so
3190 	 * that we can tell if it is updated by INVVPID.
3191 	 */
3192 	vmcs_read(~0);
3193 	rc = invvpid(type, vpid, gla);
3194 	report(!rc == valid, "INVVPID type %ld VPID %lx GLA %lx %s", type,
3195 	       vpid, gla,
3196 	       valid ? "passes" : "fails");
3197 	report(vmcs_read(VMX_INST_ERROR) == expected,
3198 	       "After %s INVVPID, VMX_INST_ERR is %ld (actual %ld)",
3199 	       rc ? "failed" : "successful",
3200 	       expected, vmcs_read(VMX_INST_ERROR));
3201 }
3202 
3203 static void ds_invvpid(void *data)
3204 {
3205 	u64 msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP);
3206 	u64 type = ffs(msr >> VPID_CAP_INVVPID_TYPES_SHIFT) - 1;
3207 
3208 	TEST_ASSERT(type >= INVVPID_ADDR && type <= INVVPID_CONTEXT_LOCAL);
3209 	asm volatile("invvpid %0, %1"
3210 		     :
3211 		     : "m"(*(struct invvpid_operand *)data),
3212 		       "r"(type));
3213 }
3214 
3215 /*
3216  * The SS override is ignored in 64-bit mode, so we use an addressing
3217  * mode with %rsp as the base register to generate an implicit SS
3218  * reference.
3219  */
3220 static void ss_invvpid(void *data)
3221 {
3222 	u64 msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP);
3223 	u64 type = ffs(msr >> VPID_CAP_INVVPID_TYPES_SHIFT) - 1;
3224 
3225 	TEST_ASSERT(type >= INVVPID_ADDR && type <= INVVPID_CONTEXT_LOCAL);
3226 	asm volatile("sub %%rsp,%0; invvpid (%%rsp,%0,1), %1"
3227 		     : "+r"(data)
3228 		     : "r"(type));
3229 }
3230 
3231 static void invvpid_test_gp(void)
3232 {
3233 	bool fault;
3234 
3235 	fault = test_for_exception(GP_VECTOR, &ds_invvpid,
3236 				   (void *)NONCANONICAL);
3237 	report(fault, "INVVPID with non-canonical DS operand raises #GP");
3238 }
3239 
3240 static void invvpid_test_ss(void)
3241 {
3242 	bool fault;
3243 
3244 	fault = test_for_exception(SS_VECTOR, &ss_invvpid,
3245 				   (void *)NONCANONICAL);
3246 	report(fault, "INVVPID with non-canonical SS operand raises #SS");
3247 }
3248 
3249 static void invvpid_test_pf(void)
3250 {
3251 	void *vpage = alloc_vpage();
3252 	bool fault;
3253 
3254 	fault = test_for_exception(PF_VECTOR, &ds_invvpid, vpage);
3255 	report(fault, "INVVPID with unmapped operand raises #PF");
3256 }
3257 
3258 static void try_compat_invvpid(void *unused)
3259 {
3260 	struct far_pointer32 fp = {
3261 		.offset = (uintptr_t)&&invvpid,
3262 		.selector = KERNEL_CS32,
3263 	};
3264 	register uintptr_t rsp asm("rsp");
3265 
3266 	TEST_ASSERT_MSG(fp.offset == (uintptr_t)&&invvpid,
3267 			"Code address too high.");
3268 	TEST_ASSERT_MSG(rsp == (u32)rsp, "Stack address too high.");
3269 
3270 	asm goto ("lcall *%0" : : "m" (fp) : "rax" : invvpid);
3271 	return;
3272 invvpid:
3273 	asm volatile (".code32;"
3274 		      "invvpid (%eax), %eax;"
3275 		      "lret;"
3276 		      ".code64");
3277 	__builtin_unreachable();
3278 }
3279 
3280 static void invvpid_test_compatibility_mode(void)
3281 {
3282 	bool fault;
3283 
3284 	fault = test_for_exception(UD_VECTOR, &try_compat_invvpid, NULL);
3285 	report(fault, "Compatibility mode INVVPID raises #UD");
3286 }
3287 
3288 static void invvpid_test_not_in_vmx_operation(void)
3289 {
3290 	bool fault;
3291 
3292 	TEST_ASSERT(!vmx_off());
3293 	fault = test_for_exception(UD_VECTOR, &ds_invvpid, NULL);
3294 	report(fault, "INVVPID outside of VMX operation raises #UD");
3295 	TEST_ASSERT(!vmx_on());
3296 }
3297 
3298 /*
3299  * This does not test real-address mode, virtual-8086 mode, protected mode,
3300  * or CPL > 0.
3301  */
3302 static void invvpid_test_v2(void)
3303 {
3304 	u64 msr;
3305 	int i;
3306 	unsigned types = 0;
3307 	unsigned type;
3308 
3309 	if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY) ||
3310 	    !(ctrl_cpu_rev[1].clr & CPU_VPID))
3311 		test_skip("VPID not supported");
3312 
3313 	msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP);
3314 
3315 	if (!(msr & VPID_CAP_INVVPID))
3316 		test_skip("INVVPID not supported.\n");
3317 
3318 	if (msr & VPID_CAP_INVVPID_ADDR)
3319 		types |= 1u << INVVPID_ADDR;
3320 	if (msr & VPID_CAP_INVVPID_CXTGLB)
3321 		types |= 1u << INVVPID_CONTEXT_GLOBAL;
3322 	if (msr & VPID_CAP_INVVPID_ALL)
3323 		types |= 1u << INVVPID_ALL;
3324 	if (msr & VPID_CAP_INVVPID_CXTLOC)
3325 		types |= 1u << INVVPID_CONTEXT_LOCAL;
3326 
3327 	if (!types)
3328 		test_skip("No INVVPID types supported.\n");
3329 
3330 	for (i = -127; i < 128; i++)
3331 		try_invvpid(i, 0xffff, 0);
3332 
3333 	/*
3334 	 * VPID must not be more than 16 bits.
3335 	 */
3336 	for (i = 0; i < 64; i++)
3337 		for (type = 0; type < 4; type++)
3338 			if (types & (1u << type))
3339 				try_invvpid(type, 1ul << i, 0);
3340 
3341 	/*
3342 	 * VPID must not be zero, except for "all contexts."
3343 	 */
3344 	for (type = 0; type < 4; type++)
3345 		if (types & (1u << type))
3346 			try_invvpid(type, 0, 0);
3347 
3348 	/*
3349 	 * The gla operand is only validated for single-address INVVPID.
3350 	 */
3351 	if (types & (1u << INVVPID_ADDR))
3352 		try_invvpid(INVVPID_ADDR, 0xffff, NONCANONICAL);
3353 
3354 	invvpid_test_gp();
3355 	invvpid_test_ss();
3356 	invvpid_test_pf();
3357 	invvpid_test_compatibility_mode();
3358 	invvpid_test_not_in_vmx_operation();
3359 }
3360 
3361 /*
3362  * Test for early VMLAUNCH failure. Returns true if VMLAUNCH makes it
3363  * at least as far as the guest-state checks. Returns false if the
3364  * VMLAUNCH fails early and execution falls through to the next
3365  * instruction.
3366  */
3367 static bool vmlaunch_succeeds(void)
3368 {
3369 	u32 exit_reason;
3370 
3371 	/*
3372 	 * Indirectly set VMX_INST_ERR to 12 ("VMREAD/VMWRITE from/to
3373 	 * unsupported VMCS component"). The caller can then check
3374 	 * to see if a failed VM-entry sets VMX_INST_ERR as expected.
3375 	 */
3376 	vmcs_write(~0u, 0);
3377 
3378 	vmcs_write(HOST_RIP, (uintptr_t)&&success);
3379 	__asm__ __volatile__ goto ("vmwrite %%rsp, %0; vmlaunch"
3380 				   :
3381 				   : "r" ((u64)HOST_RSP)
3382 				   : "cc", "memory"
3383 				   : success);
3384 	return false;
3385 success:
3386 	exit_reason = vmcs_read(EXI_REASON);
3387 	TEST_ASSERT(exit_reason == (VMX_FAIL_STATE | VMX_ENTRY_FAILURE) ||
3388 		    exit_reason == (VMX_FAIL_MSR | VMX_ENTRY_FAILURE));
3389 	return true;
3390 }
3391 
3392 /*
3393  * Try to launch the current VMCS.
3394  */
3395 static void test_vmx_vmlaunch(u32 xerror)
3396 {
3397 	bool success = vmlaunch_succeeds();
3398 	u32 vmx_inst_err;
3399 
3400 	report(success == !xerror, "vmlaunch %s",
3401 	       !xerror ? "succeeds" : "fails");
3402 	if (!success && xerror) {
3403 		vmx_inst_err = vmcs_read(VMX_INST_ERROR);
3404 		report(vmx_inst_err == xerror,
3405 		       "VMX inst error is %d (actual %d)", xerror,
3406 		       vmx_inst_err);
3407 	}
3408 }
3409 
3410 static void test_vmx_invalid_controls(void)
3411 {
3412 	test_vmx_vmlaunch(VMXERR_ENTRY_INVALID_CONTROL_FIELD);
3413 }
3414 
3415 static void test_vmx_valid_controls(void)
3416 {
3417 	test_vmx_vmlaunch(0);
3418 }
3419 
3420 /*
3421  * Test a particular value of a VM-execution control bit, if the value
3422  * is required or if the value is zero.
3423  */
3424 static void test_rsvd_ctl_bit_value(const char *name, union vmx_ctrl_msr msr,
3425 				    enum Encoding encoding, unsigned bit,
3426 				    unsigned val)
3427 {
3428 	u32 mask = 1u << bit;
3429 	bool expected;
3430 	u32 controls;
3431 
3432 	if (msr.set & mask)
3433 		TEST_ASSERT(msr.clr & mask);
3434 
3435 	/*
3436 	 * We can't arbitrarily turn on a control bit, because it may
3437 	 * introduce dependencies on other VMCS fields. So, we only
3438 	 * test turning on bits that have a required setting.
3439 	 */
3440 	if (val && (msr.clr & mask) && !(msr.set & mask))
3441 		return;
3442 
3443 	report_prefix_pushf("%s %s bit %d",
3444 			    val ? "Set" : "Clear", name, bit);
3445 
3446 	controls = vmcs_read(encoding);
3447 	if (val) {
3448 		vmcs_write(encoding, msr.set | mask);
3449 		expected = (msr.clr & mask);
3450 	} else {
3451 		vmcs_write(encoding, msr.set & ~mask);
3452 		expected = !(msr.set & mask);
3453 	}
3454 	if (expected)
3455 		test_vmx_valid_controls();
3456 	else
3457 		test_vmx_invalid_controls();
3458 	vmcs_write(encoding, controls);
3459 	report_prefix_pop();
3460 }
3461 
3462 /*
3463  * Test reserved values of a VM-execution control bit, based on the
3464  * allowed bit settings from the corresponding VMX capability MSR.
3465  */
3466 static void test_rsvd_ctl_bit(const char *name, union vmx_ctrl_msr msr,
3467 			      enum Encoding encoding, unsigned bit)
3468 {
3469 	test_rsvd_ctl_bit_value(name, msr, encoding, bit, 0);
3470 	test_rsvd_ctl_bit_value(name, msr, encoding, bit, 1);
3471 }
3472 
3473 /*
3474  * Reserved bits in the pin-based VM-execution controls must be set
3475  * properly. Software may consult the VMX capability MSRs to determine
3476  * the proper settings.
3477  * [Intel SDM]
3478  */
3479 static void test_pin_based_ctls(void)
3480 {
3481 	unsigned bit;
3482 
3483 	printf("%s: %lx\n", basic.ctrl ? "MSR_IA32_VMX_TRUE_PIN" :
3484 	       "MSR_IA32_VMX_PINBASED_CTLS", ctrl_pin_rev.val);
3485 	for (bit = 0; bit < 32; bit++)
3486 		test_rsvd_ctl_bit("pin-based controls",
3487 				  ctrl_pin_rev, PIN_CONTROLS, bit);
3488 }
3489 
3490 /*
3491  * Reserved bits in the primary processor-based VM-execution controls
3492  * must be set properly. Software may consult the VMX capability MSRs
3493  * to determine the proper settings.
3494  * [Intel SDM]
3495  */
3496 static void test_primary_processor_based_ctls(void)
3497 {
3498 	unsigned bit;
3499 
3500 	printf("\n%s: %lx\n", basic.ctrl ? "MSR_IA32_VMX_TRUE_PROC" :
3501 	       "MSR_IA32_VMX_PROCBASED_CTLS", ctrl_cpu_rev[0].val);
3502 	for (bit = 0; bit < 32; bit++)
3503 		test_rsvd_ctl_bit("primary processor-based controls",
3504 				  ctrl_cpu_rev[0], CPU_EXEC_CTRL0, bit);
3505 }
3506 
3507 /*
3508  * If the "activate secondary controls" primary processor-based
3509  * VM-execution control is 1, reserved bits in the secondary
3510  * processor-based VM-execution controls must be cleared. Software may
3511  * consult the VMX capability MSRs to determine which bits are
3512  * reserved.
3513  * If the "activate secondary controls" primary processor-based
3514  * VM-execution control is 0 (or if the processor does not support the
3515  * 1-setting of that control), no checks are performed on the
3516  * secondary processor-based VM-execution controls.
3517  * [Intel SDM]
3518  */
3519 static void test_secondary_processor_based_ctls(void)
3520 {
3521 	u32 primary;
3522 	u32 secondary;
3523 	unsigned bit;
3524 
3525 	if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY))
3526 		return;
3527 
3528 	primary = vmcs_read(CPU_EXEC_CTRL0);
3529 	secondary = vmcs_read(CPU_EXEC_CTRL1);
3530 
3531 	vmcs_write(CPU_EXEC_CTRL0, primary | CPU_SECONDARY);
3532 	printf("\nMSR_IA32_VMX_PROCBASED_CTLS2: %lx\n", ctrl_cpu_rev[1].val);
3533 	for (bit = 0; bit < 32; bit++)
3534 		test_rsvd_ctl_bit("secondary processor-based controls",
3535 				  ctrl_cpu_rev[1], CPU_EXEC_CTRL1, bit);
3536 
3537 	/*
3538 	 * When the "activate secondary controls" VM-execution control
3539 	 * is clear, there are no checks on the secondary controls.
3540 	 */
3541 	vmcs_write(CPU_EXEC_CTRL0, primary & ~CPU_SECONDARY);
3542 	vmcs_write(CPU_EXEC_CTRL1, ~0);
3543 	report(vmlaunch_succeeds(),
3544 	       "Secondary processor-based controls ignored");
3545 	vmcs_write(CPU_EXEC_CTRL1, secondary);
3546 	vmcs_write(CPU_EXEC_CTRL0, primary);
3547 }
3548 
3549 static void try_cr3_target_count(unsigned i, unsigned max)
3550 {
3551 	report_prefix_pushf("CR3 target count 0x%x", i);
3552 	vmcs_write(CR3_TARGET_COUNT, i);
3553 	if (i <= max)
3554 		test_vmx_valid_controls();
3555 	else
3556 		test_vmx_invalid_controls();
3557 	report_prefix_pop();
3558 }
3559 
3560 /*
3561  * The CR3-target count must not be greater than 4. Future processors
3562  * may support a different number of CR3-target values. Software
3563  * should read the VMX capability MSR IA32_VMX_MISC to determine the
3564  * number of values supported.
3565  * [Intel SDM]
3566  */
3567 static void test_cr3_targets(void)
3568 {
3569 	unsigned supported_targets = (rdmsr(MSR_IA32_VMX_MISC) >> 16) & 0x1ff;
3570 	u32 cr3_targets = vmcs_read(CR3_TARGET_COUNT);
3571 	unsigned i;
3572 
3573 	printf("\nSupported CR3 targets: %d\n", supported_targets);
3574 	TEST_ASSERT(supported_targets <= 256);
3575 
3576 	try_cr3_target_count(-1u, supported_targets);
3577 	try_cr3_target_count(0x80000000, supported_targets);
3578 	try_cr3_target_count(0x7fffffff, supported_targets);
3579 	for (i = 0; i <= supported_targets + 1; i++)
3580 		try_cr3_target_count(i, supported_targets);
3581 	vmcs_write(CR3_TARGET_COUNT, cr3_targets);
3582 }
3583 
3584 /*
3585  * Test a particular address setting in the VMCS
3586  */
3587 static void test_vmcs_addr(const char *name,
3588 			   enum Encoding encoding,
3589 			   u64 align,
3590 			   bool ignored,
3591 			   bool skip_beyond_mapped_ram,
3592 			   u64 addr)
3593 {
3594 	report_prefix_pushf("%s = %lx", name, addr);
3595 	vmcs_write(encoding, addr);
3596 	if (skip_beyond_mapped_ram &&
3597 	    addr > fwcfg_get_u64(FW_CFG_RAM_SIZE) - align &&
3598 	    addr < (1ul << cpuid_maxphyaddr()))
3599 		printf("Skipping physical address beyond mapped RAM\n");
3600 	else if (ignored || (IS_ALIGNED(addr, align) &&
3601 	    addr < (1ul << cpuid_maxphyaddr())))
3602 		test_vmx_valid_controls();
3603 	else
3604 		test_vmx_invalid_controls();
3605 	report_prefix_pop();
3606 }
3607 
3608 /*
3609  * Test interesting values for a VMCS address
3610  */
3611 static void test_vmcs_addr_values(const char *name,
3612 				  enum Encoding encoding,
3613 				  u64 align,
3614 				  bool ignored,
3615 				  bool skip_beyond_mapped_ram,
3616 				  u32 bit_start, u32 bit_end)
3617 {
3618 	unsigned i;
3619 	u64 orig_val = vmcs_read(encoding);
3620 
3621 	for (i = bit_start; i <= bit_end; i++)
3622 		test_vmcs_addr(name, encoding, align, ignored,
3623 			       skip_beyond_mapped_ram, 1ul << i);
3624 
3625 	test_vmcs_addr(name, encoding, align, ignored,
3626 		       skip_beyond_mapped_ram, PAGE_SIZE - 1);
3627 	test_vmcs_addr(name, encoding, align, ignored,
3628 		       skip_beyond_mapped_ram, PAGE_SIZE);
3629 	test_vmcs_addr(name, encoding, align, ignored,
3630 		       skip_beyond_mapped_ram,
3631 		      (1ul << cpuid_maxphyaddr()) - PAGE_SIZE);
3632 	test_vmcs_addr(name, encoding, align, ignored,
3633 		       skip_beyond_mapped_ram, -1ul);
3634 
3635 	vmcs_write(encoding, orig_val);
3636 }
3637 
3638 /*
3639  * Test a physical address reference in the VMCS, when the corresponding
3640  * feature is enabled and when the corresponding feature is disabled.
3641  */
3642 static void test_vmcs_addr_reference(u32 control_bit, enum Encoding field,
3643 				     const char *field_name,
3644 				     const char *control_name, u64 align,
3645 				     bool skip_beyond_mapped_ram,
3646 				     bool control_primary)
3647 {
3648 	u32 primary = vmcs_read(CPU_EXEC_CTRL0);
3649 	u32 secondary = vmcs_read(CPU_EXEC_CTRL1);
3650 	u64 page_addr;
3651 
3652 	if (control_primary) {
3653 		if (!(ctrl_cpu_rev[0].clr & control_bit))
3654 			return;
3655 	} else {
3656 		if (!(ctrl_cpu_rev[1].clr & control_bit))
3657 			return;
3658 	}
3659 
3660 	page_addr = vmcs_read(field);
3661 
3662 	report_prefix_pushf("%s enabled", control_name);
3663 	if (control_primary) {
3664 		vmcs_write(CPU_EXEC_CTRL0, primary | control_bit);
3665 	} else {
3666 		vmcs_write(CPU_EXEC_CTRL0, primary | CPU_SECONDARY);
3667 		vmcs_write(CPU_EXEC_CTRL1, secondary | control_bit);
3668 	}
3669 
3670 	test_vmcs_addr_values(field_name, field, align, false,
3671 			      skip_beyond_mapped_ram, 0, 63);
3672 	report_prefix_pop();
3673 
3674 	report_prefix_pushf("%s disabled", control_name);
3675 	if (control_primary) {
3676 		vmcs_write(CPU_EXEC_CTRL0, primary & ~control_bit);
3677 	} else {
3678 		vmcs_write(CPU_EXEC_CTRL0, primary & ~CPU_SECONDARY);
3679 		vmcs_write(CPU_EXEC_CTRL1, secondary & ~control_bit);
3680 	}
3681 
3682 	test_vmcs_addr_values(field_name, field, align, true, false, 0, 63);
3683 	report_prefix_pop();
3684 
3685 	vmcs_write(field, page_addr);
3686 	vmcs_write(CPU_EXEC_CTRL0, primary);
3687 	vmcs_write(CPU_EXEC_CTRL1, secondary);
3688 }
3689 
3690 /*
3691  * If the "use I/O bitmaps" VM-execution control is 1, bits 11:0 of
3692  * each I/O-bitmap address must be 0. Neither address should set any
3693  * bits beyond the processor's physical-address width.
3694  * [Intel SDM]
3695  */
3696 static void test_io_bitmaps(void)
3697 {
3698 	test_vmcs_addr_reference(CPU_IO_BITMAP, IO_BITMAP_A,
3699 				 "I/O bitmap A", "Use I/O bitmaps",
3700 				 PAGE_SIZE, false, true);
3701 	test_vmcs_addr_reference(CPU_IO_BITMAP, IO_BITMAP_B,
3702 				 "I/O bitmap B", "Use I/O bitmaps",
3703 				 PAGE_SIZE, false, true);
3704 }
3705 
3706 /*
3707  * If the "use MSR bitmaps" VM-execution control is 1, bits 11:0 of
3708  * the MSR-bitmap address must be 0. The address should not set any
3709  * bits beyond the processor's physical-address width.
3710  * [Intel SDM]
3711  */
3712 static void test_msr_bitmap(void)
3713 {
3714 	test_vmcs_addr_reference(CPU_MSR_BITMAP, MSR_BITMAP,
3715 				 "MSR bitmap", "Use MSR bitmaps",
3716 				 PAGE_SIZE, false, true);
3717 }
3718 
3719 /*
3720  * If the "use TPR shadow" VM-execution control is 1, the virtual-APIC
3721  * address must satisfy the following checks:
3722  * - Bits 11:0 of the address must be 0.
3723  * - The address should not set any bits beyond the processor's
3724  *   physical-address width.
3725  * [Intel SDM]
3726  */
3727 static void test_apic_virt_addr(void)
3728 {
3729 	/*
3730 	 * Ensure the processor will never use the virtual-APIC page, since
3731 	 * we will point it to invalid RAM.  Otherwise KVM is puzzled about
3732 	 * what we're trying to achieve and fails vmentry.
3733 	 */
3734 	u32 cpu_ctrls0 = vmcs_read(CPU_EXEC_CTRL0);
3735 	vmcs_write(CPU_EXEC_CTRL0, cpu_ctrls0 | CPU_CR8_LOAD | CPU_CR8_STORE);
3736 	test_vmcs_addr_reference(CPU_TPR_SHADOW, APIC_VIRT_ADDR,
3737 				 "virtual-APIC address", "Use TPR shadow",
3738 				 PAGE_SIZE, false, true);
3739 	vmcs_write(CPU_EXEC_CTRL0, cpu_ctrls0);
3740 }
3741 
3742 /*
3743  * If the "virtualize APIC-accesses" VM-execution control is 1, the
3744  * APIC-access address must satisfy the following checks:
3745  *  - Bits 11:0 of the address must be 0.
3746  *  - The address should not set any bits beyond the processor's
3747  *    physical-address width.
3748  * [Intel SDM]
3749  */
3750 static void test_apic_access_addr(void)
3751 {
3752 	void *apic_access_page = alloc_page();
3753 
3754 	vmcs_write(APIC_ACCS_ADDR, virt_to_phys(apic_access_page));
3755 
3756 	test_vmcs_addr_reference(CPU_VIRT_APIC_ACCESSES, APIC_ACCS_ADDR,
3757 				 "APIC-access address",
3758 				 "virtualize APIC-accesses", PAGE_SIZE,
3759 				 true, false);
3760 }
3761 
3762 static bool set_bit_pattern(u8 mask, u32 *secondary)
3763 {
3764 	u8 i;
3765 	bool flag = false;
3766 	u32 test_bits[3] = {
3767 		CPU_VIRT_X2APIC,
3768 		CPU_APIC_REG_VIRT,
3769 		CPU_VINTD
3770 	};
3771 
3772         for (i = 0; i < ARRAY_SIZE(test_bits); i++) {
3773 		if ((mask & (1u << i)) &&
3774 		    (ctrl_cpu_rev[1].clr & test_bits[i])) {
3775 			*secondary |= test_bits[i];
3776 			flag = true;
3777 		}
3778 	}
3779 
3780 	return (flag);
3781 }
3782 
3783 /*
3784  * If the "use TPR shadow" VM-execution control is 0, the following
3785  * VM-execution controls must also be 0:
3786  * 	- virtualize x2APIC mode
3787  *	- APIC-register virtualization
3788  *	- virtual-interrupt delivery
3789  *    [Intel SDM]
3790  *
3791  * 2. If the "virtualize x2APIC mode" VM-execution control is 1, the
3792  *    "virtualize APIC accesses" VM-execution control must be 0.
3793  *    [Intel SDM]
3794  */
3795 static void test_apic_virtual_ctls(void)
3796 {
3797 	u32 saved_primary = vmcs_read(CPU_EXEC_CTRL0);
3798 	u32 saved_secondary = vmcs_read(CPU_EXEC_CTRL1);
3799 	u32 primary = saved_primary;
3800 	u32 secondary = saved_secondary;
3801 	bool ctrl = false;
3802 	char str[10] = "disabled";
3803 	u8 i = 0, j;
3804 
3805 	/*
3806 	 * First test
3807 	 */
3808 	if (!((ctrl_cpu_rev[0].clr & (CPU_SECONDARY | CPU_TPR_SHADOW)) ==
3809 	    (CPU_SECONDARY | CPU_TPR_SHADOW)))
3810 		return;
3811 
3812 	primary |= CPU_SECONDARY;
3813 	primary &= ~CPU_TPR_SHADOW;
3814 	vmcs_write(CPU_EXEC_CTRL0, primary);
3815 
3816 	while (1) {
3817 		for (j = 1; j < 8; j++) {
3818 			secondary &= ~(CPU_VIRT_X2APIC | CPU_APIC_REG_VIRT | CPU_VINTD);
3819 			if (primary & CPU_TPR_SHADOW) {
3820 				ctrl = true;
3821 			} else {
3822 				if (! set_bit_pattern(j, &secondary))
3823 					ctrl = true;
3824 				else
3825 					ctrl = false;
3826 			}
3827 
3828 			vmcs_write(CPU_EXEC_CTRL1, secondary);
3829 			report_prefix_pushf("Use TPR shadow %s, virtualize x2APIC mode %s, APIC-register virtualization %s, virtual-interrupt delivery %s",
3830 				str, (secondary & CPU_VIRT_X2APIC) ? "enabled" : "disabled", (secondary & CPU_APIC_REG_VIRT) ? "enabled" : "disabled", (secondary & CPU_VINTD) ? "enabled" : "disabled");
3831 			if (ctrl)
3832 				test_vmx_valid_controls();
3833 			else
3834 				test_vmx_invalid_controls();
3835 			report_prefix_pop();
3836 		}
3837 
3838 		if (i == 1)
3839 			break;
3840 		i++;
3841 
3842 		primary |= CPU_TPR_SHADOW;
3843 		vmcs_write(CPU_EXEC_CTRL0, primary);
3844 		strcpy(str, "enabled");
3845 	}
3846 
3847 	/*
3848 	 * Second test
3849 	 */
3850 	u32 apic_virt_ctls = (CPU_VIRT_X2APIC | CPU_VIRT_APIC_ACCESSES);
3851 
3852 	primary = saved_primary;
3853 	secondary = saved_secondary;
3854 	if (!((ctrl_cpu_rev[1].clr & apic_virt_ctls) == apic_virt_ctls))
3855 		return;
3856 
3857 	vmcs_write(CPU_EXEC_CTRL0, primary | CPU_SECONDARY);
3858 	secondary &= ~CPU_VIRT_APIC_ACCESSES;
3859 	vmcs_write(CPU_EXEC_CTRL1, secondary & ~CPU_VIRT_X2APIC);
3860 	report_prefix_pushf("Virtualize x2APIC mode disabled; virtualize APIC access disabled");
3861 	test_vmx_valid_controls();
3862 	report_prefix_pop();
3863 
3864 	vmcs_write(CPU_EXEC_CTRL1, secondary | CPU_VIRT_APIC_ACCESSES);
3865 	report_prefix_pushf("Virtualize x2APIC mode disabled; virtualize APIC access enabled");
3866 	test_vmx_valid_controls();
3867 	report_prefix_pop();
3868 
3869 	vmcs_write(CPU_EXEC_CTRL1, secondary | CPU_VIRT_X2APIC);
3870 	report_prefix_pushf("Virtualize x2APIC mode enabled; virtualize APIC access enabled");
3871 	test_vmx_invalid_controls();
3872 	report_prefix_pop();
3873 
3874 	vmcs_write(CPU_EXEC_CTRL1, secondary & ~CPU_VIRT_APIC_ACCESSES);
3875 	report_prefix_pushf("Virtualize x2APIC mode enabled; virtualize APIC access disabled");
3876 	test_vmx_valid_controls();
3877 	report_prefix_pop();
3878 
3879 	vmcs_write(CPU_EXEC_CTRL0, saved_primary);
3880 	vmcs_write(CPU_EXEC_CTRL1, saved_secondary);
3881 }
3882 
3883 /*
3884  * If the "virtual-interrupt delivery" VM-execution control is 1, the
3885  * "external-interrupt exiting" VM-execution control must be 1.
3886  * [Intel SDM]
3887  */
3888 static void test_virtual_intr_ctls(void)
3889 {
3890 	u32 saved_primary = vmcs_read(CPU_EXEC_CTRL0);
3891 	u32 saved_secondary = vmcs_read(CPU_EXEC_CTRL1);
3892 	u32 saved_pin = vmcs_read(PIN_CONTROLS);
3893 	u32 primary = saved_primary;
3894 	u32 secondary = saved_secondary;
3895 	u32 pin = saved_pin;
3896 
3897 	if (!((ctrl_cpu_rev[1].clr & CPU_VINTD) &&
3898 	    (ctrl_pin_rev.clr & PIN_EXTINT)))
3899 		return;
3900 
3901 	vmcs_write(CPU_EXEC_CTRL0, primary | CPU_SECONDARY | CPU_TPR_SHADOW);
3902 	vmcs_write(CPU_EXEC_CTRL1, secondary & ~CPU_VINTD);
3903 	vmcs_write(PIN_CONTROLS, pin & ~PIN_EXTINT);
3904 	report_prefix_pushf("Virtualize interrupt-delivery disabled; external-interrupt exiting disabled");
3905 	test_vmx_valid_controls();
3906 	report_prefix_pop();
3907 
3908 	vmcs_write(CPU_EXEC_CTRL1, secondary | CPU_VINTD);
3909 	report_prefix_pushf("Virtualize interrupt-delivery enabled; external-interrupt exiting disabled");
3910 	test_vmx_invalid_controls();
3911 	report_prefix_pop();
3912 
3913 	vmcs_write(PIN_CONTROLS, pin | PIN_EXTINT);
3914 	report_prefix_pushf("Virtualize interrupt-delivery enabled; external-interrupt exiting enabled");
3915 	test_vmx_valid_controls();
3916 	report_prefix_pop();
3917 
3918 	vmcs_write(PIN_CONTROLS, pin & ~PIN_EXTINT);
3919 	report_prefix_pushf("Virtualize interrupt-delivery enabled; external-interrupt exiting disabled");
3920 	test_vmx_invalid_controls();
3921 	report_prefix_pop();
3922 
3923 	vmcs_write(CPU_EXEC_CTRL0, saved_primary);
3924 	vmcs_write(CPU_EXEC_CTRL1, saved_secondary);
3925 	vmcs_write(PIN_CONTROLS, saved_pin);
3926 }
3927 
3928 static void test_pi_desc_addr(u64 addr, bool ctrl)
3929 {
3930 	vmcs_write(POSTED_INTR_DESC_ADDR, addr);
3931 	report_prefix_pushf("Process-posted-interrupts enabled; posted-interrupt-descriptor-address 0x%lx", addr);
3932 	if (ctrl)
3933 		test_vmx_valid_controls();
3934 	else
3935 		test_vmx_invalid_controls();
3936 	report_prefix_pop();
3937 }
3938 
3939 /*
3940  * If the “process posted interrupts†VM-execution control is 1, the
3941  * following must be true:
3942  *
3943  *	- The “virtual-interrupt delivery†VM-execution control is 1.
3944  *	- The “acknowledge interrupt on exit†VM-exit control is 1.
3945  *	- The posted-interrupt notification vector has a value in the
3946  *	- range 0–255 (bits 15:8 are all 0).
3947  *	- Bits 5:0 of the posted-interrupt descriptor address are all 0.
3948  *	- The posted-interrupt descriptor address does not set any bits
3949  *	  beyond the processor's physical-address width.
3950  * [Intel SDM]
3951  */
3952 static void test_posted_intr(void)
3953 {
3954 	u32 saved_primary = vmcs_read(CPU_EXEC_CTRL0);
3955 	u32 saved_secondary = vmcs_read(CPU_EXEC_CTRL1);
3956 	u32 saved_pin = vmcs_read(PIN_CONTROLS);
3957 	u32 exit_ctl_saved = vmcs_read(EXI_CONTROLS);
3958 	u32 primary = saved_primary;
3959 	u32 secondary = saved_secondary;
3960 	u32 pin = saved_pin;
3961 	u32 exit_ctl = exit_ctl_saved;
3962 	u16 vec;
3963 	int i;
3964 
3965 	if (!((ctrl_pin_rev.clr & PIN_POST_INTR) &&
3966 	    (ctrl_cpu_rev[1].clr & CPU_VINTD) &&
3967 	    (ctrl_exit_rev.clr & EXI_INTA)))
3968 		return;
3969 
3970 	vmcs_write(CPU_EXEC_CTRL0, primary | CPU_SECONDARY | CPU_TPR_SHADOW);
3971 
3972 	/*
3973 	 * Test virtual-interrupt-delivery and acknowledge-interrupt-on-exit
3974 	 */
3975 	pin |= PIN_POST_INTR;
3976 	vmcs_write(PIN_CONTROLS, pin);
3977 	secondary &= ~CPU_VINTD;
3978 	vmcs_write(CPU_EXEC_CTRL1, secondary);
3979 	report_prefix_pushf("Process-posted-interrupts enabled; virtual-interrupt-delivery disabled");
3980 	test_vmx_invalid_controls();
3981 	report_prefix_pop();
3982 
3983 	secondary |= CPU_VINTD;
3984 	vmcs_write(CPU_EXEC_CTRL1, secondary);
3985 	report_prefix_pushf("Process-posted-interrupts enabled; virtual-interrupt-delivery enabled");
3986 	test_vmx_invalid_controls();
3987 	report_prefix_pop();
3988 
3989 	exit_ctl &= ~EXI_INTA;
3990 	vmcs_write(EXI_CONTROLS, exit_ctl);
3991 	report_prefix_pushf("Process-posted-interrupts enabled; virtual-interrupt-delivery enabled; acknowledge-interrupt-on-exit disabled");
3992 	test_vmx_invalid_controls();
3993 	report_prefix_pop();
3994 
3995 	exit_ctl |= EXI_INTA;
3996 	vmcs_write(EXI_CONTROLS, exit_ctl);
3997 	report_prefix_pushf("Process-posted-interrupts enabled; virtual-interrupt-delivery enabled; acknowledge-interrupt-on-exit enabled");
3998 	test_vmx_valid_controls();
3999 	report_prefix_pop();
4000 
4001 	secondary &= ~CPU_VINTD;
4002 	vmcs_write(CPU_EXEC_CTRL1, secondary);
4003 	report_prefix_pushf("Process-posted-interrupts enabled; virtual-interrupt-delivery disabled; acknowledge-interrupt-on-exit enabled");
4004 	test_vmx_invalid_controls();
4005 	report_prefix_pop();
4006 
4007 	secondary |= CPU_VINTD;
4008 	vmcs_write(CPU_EXEC_CTRL1, secondary);
4009 	report_prefix_pushf("Process-posted-interrupts enabled; virtual-interrupt-delivery enabled; acknowledge-interrupt-on-exit enabled");
4010 	test_vmx_valid_controls();
4011 	report_prefix_pop();
4012 
4013 	/*
4014 	 * Test posted-interrupt notification vector
4015 	 */
4016 	for (i = 0; i < 8; i++) {
4017 		vec = (1ul << i);
4018 		vmcs_write(PINV, vec);
4019 		report_prefix_pushf("Process-posted-interrupts enabled; posted-interrupt-notification-vector %u", vec);
4020 		test_vmx_valid_controls();
4021 		report_prefix_pop();
4022 	}
4023 	for (i = 8; i < 16; i++) {
4024 		vec = (1ul << i);
4025 		vmcs_write(PINV, vec);
4026 		report_prefix_pushf("Process-posted-interrupts enabled; posted-interrupt-notification-vector %u", vec);
4027 		test_vmx_invalid_controls();
4028 		report_prefix_pop();
4029 	}
4030 
4031 	vec &= ~(0xff << 8);
4032 	vmcs_write(PINV, vec);
4033 	report_prefix_pushf("Process-posted-interrupts enabled; posted-interrupt-notification-vector %u", vec);
4034 	test_vmx_valid_controls();
4035 	report_prefix_pop();
4036 
4037 	/*
4038 	 * Test posted-interrupt descriptor addresss
4039 	 */
4040 	for (i = 0; i < 6; i++) {
4041 		test_pi_desc_addr(1ul << i, false);
4042 	}
4043 
4044 	test_pi_desc_addr(0xf0, false);
4045 	test_pi_desc_addr(0xff, false);
4046 	test_pi_desc_addr(0x0f, false);
4047 	test_pi_desc_addr(0x8000, true);
4048 	test_pi_desc_addr(0x00, true);
4049 	test_pi_desc_addr(0xc000, true);
4050 
4051 	test_vmcs_addr_values("process-posted interrupts",
4052 			       POSTED_INTR_DESC_ADDR, 64,
4053 			       false, false, 0, 63);
4054 
4055 	vmcs_write(CPU_EXEC_CTRL0, saved_primary);
4056 	vmcs_write(CPU_EXEC_CTRL1, saved_secondary);
4057 	vmcs_write(PIN_CONTROLS, saved_pin);
4058 }
4059 
4060 static void test_apic_ctls(void)
4061 {
4062 	test_apic_virt_addr();
4063 	test_apic_access_addr();
4064 	test_apic_virtual_ctls();
4065 	test_virtual_intr_ctls();
4066 	test_posted_intr();
4067 }
4068 
4069 /*
4070  * If the “enable VPID†VM-execution control is 1, the value of the
4071  * of the VPID VM-execution control field must not be 0000H.
4072  * [Intel SDM]
4073  */
4074 static void test_vpid(void)
4075 {
4076 	u32 saved_primary = vmcs_read(CPU_EXEC_CTRL0);
4077 	u32 saved_secondary = vmcs_read(CPU_EXEC_CTRL1);
4078 	u16 vpid = 0x0000;
4079 	int i;
4080 
4081 	if (!((ctrl_cpu_rev[0].clr & CPU_SECONDARY) &&
4082 	    (ctrl_cpu_rev[1].clr & CPU_VPID))) {
4083 		printf("Secondary controls and/or VPID not supported\n");
4084 		return;
4085 	}
4086 
4087 	vmcs_write(CPU_EXEC_CTRL0, saved_primary | CPU_SECONDARY);
4088 	vmcs_write(CPU_EXEC_CTRL1, saved_secondary & ~CPU_VPID);
4089 	vmcs_write(VPID, vpid);
4090 	report_prefix_pushf("VPID disabled; VPID value %x", vpid);
4091 	test_vmx_valid_controls();
4092 	report_prefix_pop();
4093 
4094 	vmcs_write(CPU_EXEC_CTRL1, saved_secondary | CPU_VPID);
4095 	report_prefix_pushf("VPID enabled; VPID value %x", vpid);
4096 	test_vmx_invalid_controls();
4097 	report_prefix_pop();
4098 
4099 	for (i = 0; i < 16; i++) {
4100 		vpid = (short)1 << i;;
4101 		vmcs_write(VPID, vpid);
4102 		report_prefix_pushf("VPID enabled; VPID value %x", vpid);
4103 		test_vmx_valid_controls();
4104 		report_prefix_pop();
4105 	}
4106 
4107 	vmcs_write(CPU_EXEC_CTRL0, saved_primary);
4108 	vmcs_write(CPU_EXEC_CTRL1, saved_secondary);
4109 }
4110 
4111 static void set_vtpr(unsigned vtpr)
4112 {
4113 	*(u32 *)phys_to_virt(vmcs_read(APIC_VIRT_ADDR) + APIC_TASKPRI) = vtpr;
4114 }
4115 
4116 static void try_tpr_threshold_and_vtpr(unsigned threshold, unsigned vtpr)
4117 {
4118 	bool valid = true;
4119 	u32 primary = vmcs_read(CPU_EXEC_CTRL0);
4120 	u32 secondary = vmcs_read(CPU_EXEC_CTRL1);
4121 
4122 	if ((primary & CPU_TPR_SHADOW) &&
4123 	    (!(primary & CPU_SECONDARY) ||
4124 	     !(secondary & (CPU_VINTD | CPU_VIRT_APIC_ACCESSES))))
4125 		valid = (threshold & 0xf) <= ((vtpr >> 4) & 0xf);
4126 
4127 	set_vtpr(vtpr);
4128 	report_prefix_pushf("TPR threshold 0x%x, VTPR.class 0x%x",
4129 	    threshold, (vtpr >> 4) & 0xf);
4130 	if (valid)
4131 		test_vmx_valid_controls();
4132 	else
4133 		test_vmx_invalid_controls();
4134 	report_prefix_pop();
4135 }
4136 
4137 static void test_invalid_event_injection(void)
4138 {
4139 	u32 ent_intr_info_save = vmcs_read(ENT_INTR_INFO);
4140 	u32 ent_intr_error_save = vmcs_read(ENT_INTR_ERROR);
4141 	u32 ent_inst_len_save = vmcs_read(ENT_INST_LEN);
4142 	u32 primary_save = vmcs_read(CPU_EXEC_CTRL0);
4143 	u32 secondary_save = vmcs_read(CPU_EXEC_CTRL1);
4144 	u64 guest_cr0_save = vmcs_read(GUEST_CR0);
4145 	u32 ent_intr_info_base = INTR_INFO_VALID_MASK;
4146 	u32 ent_intr_info, ent_intr_err, ent_intr_len;
4147 	u32 cnt;
4148 
4149 	/* Setup */
4150 	report_prefix_push("invalid event injection");
4151 	vmcs_write(ENT_INTR_ERROR, 0x00000000);
4152 	vmcs_write(ENT_INST_LEN, 0x00000001);
4153 
4154 	/* The field’s interruption type is not set to a reserved value. */
4155 	ent_intr_info = ent_intr_info_base | INTR_TYPE_RESERVED | DE_VECTOR;
4156 	report_prefix_pushf("%s, VM-entry intr info=0x%x",
4157 			    "RESERVED interruption type invalid [-]",
4158 			    ent_intr_info);
4159 	vmcs_write(ENT_INTR_INFO, ent_intr_info);
4160 	test_vmx_invalid_controls();
4161 	report_prefix_pop();
4162 
4163 	ent_intr_info = ent_intr_info_base | INTR_TYPE_EXT_INTR |
4164 			DE_VECTOR;
4165 	report_prefix_pushf("%s, VM-entry intr info=0x%x",
4166 			    "RESERVED interruption type invalid [+]",
4167 			    ent_intr_info);
4168 	vmcs_write(ENT_INTR_INFO, ent_intr_info);
4169 	test_vmx_valid_controls();
4170 	report_prefix_pop();
4171 
4172 	/* If the interruption type is other event, the vector is 0. */
4173 	ent_intr_info = ent_intr_info_base | INTR_TYPE_OTHER_EVENT | DB_VECTOR;
4174 	report_prefix_pushf("%s, VM-entry intr info=0x%x",
4175 			    "(OTHER EVENT && vector != 0) invalid [-]",
4176 			    ent_intr_info);
4177 	vmcs_write(ENT_INTR_INFO, ent_intr_info);
4178 	test_vmx_invalid_controls();
4179 	report_prefix_pop();
4180 
4181 	/* If the interruption type is NMI, the vector is 2 (negative case). */
4182 	ent_intr_info = ent_intr_info_base | INTR_TYPE_NMI_INTR | DE_VECTOR;
4183 	report_prefix_pushf("%s, VM-entry intr info=0x%x",
4184 			    "(NMI && vector != 2) invalid [-]", ent_intr_info);
4185 	vmcs_write(ENT_INTR_INFO, ent_intr_info);
4186 	test_vmx_invalid_controls();
4187 	report_prefix_pop();
4188 
4189 	/* If the interruption type is NMI, the vector is 2 (positive case). */
4190 	ent_intr_info = ent_intr_info_base | INTR_TYPE_NMI_INTR | NMI_VECTOR;
4191 	report_prefix_pushf("%s, VM-entry intr info=0x%x",
4192 			    "(NMI && vector == 2) valid [+]", ent_intr_info);
4193 	vmcs_write(ENT_INTR_INFO, ent_intr_info);
4194 	test_vmx_valid_controls();
4195 	report_prefix_pop();
4196 
4197 	/*
4198 	 * If the interruption type
4199 	 * is HW exception, the vector is at most 31.
4200 	 */
4201 	ent_intr_info = ent_intr_info_base | INTR_TYPE_HARD_EXCEPTION | 0x20;
4202 	report_prefix_pushf("%s, VM-entry intr info=0x%x",
4203 			    "(HW exception && vector > 31) invalid [-]",
4204 			    ent_intr_info);
4205 	vmcs_write(ENT_INTR_INFO, ent_intr_info);
4206 	test_vmx_invalid_controls();
4207 	report_prefix_pop();
4208 
4209 	/*
4210 	 * deliver-error-code is 1 iff either
4211 	 * (a) the "unrestricted guest" VM-execution control is 0
4212 	 * (b) CR0.PE is set.
4213 	 */
4214 
4215 	/* Assert that unrestricted guest is disabled or unsupported */
4216 	assert(!(ctrl_cpu_rev[0].clr & CPU_SECONDARY) ||
4217 	       !(secondary_save & CPU_URG));
4218 
4219 	ent_intr_info = ent_intr_info_base | INTR_TYPE_HARD_EXCEPTION |
4220 			GP_VECTOR;
4221 	report_prefix_pushf("%s, VM-entry intr info=0x%x",
4222 			    "error code <-> (!URG || prot_mode) [-]",
4223 			    ent_intr_info);
4224 	vmcs_write(GUEST_CR0, guest_cr0_save & ~X86_CR0_PE & ~X86_CR0_PG);
4225 	vmcs_write(ENT_INTR_INFO, ent_intr_info);
4226 	test_vmx_invalid_controls();
4227 	report_prefix_pop();
4228 
4229 	ent_intr_info = ent_intr_info_base | INTR_INFO_DELIVER_CODE_MASK |
4230 			INTR_TYPE_HARD_EXCEPTION | GP_VECTOR;
4231 	report_prefix_pushf("%s, VM-entry intr info=0x%x",
4232 			    "error code <-> (!URG || prot_mode) [+]",
4233 			    ent_intr_info);
4234 	vmcs_write(GUEST_CR0, guest_cr0_save & ~X86_CR0_PE & ~X86_CR0_PG);
4235 	vmcs_write(ENT_INTR_INFO, ent_intr_info);
4236 	test_vmx_valid_controls();
4237 	report_prefix_pop();
4238 
4239 	if (enable_unrestricted_guest())
4240 		goto skip_unrestricted_guest;
4241 
4242 	ent_intr_info = ent_intr_info_base | INTR_INFO_DELIVER_CODE_MASK |
4243 			INTR_TYPE_HARD_EXCEPTION | GP_VECTOR;
4244 	report_prefix_pushf("%s, VM-entry intr info=0x%x",
4245 			    "error code <-> (!URG || prot_mode) [-]",
4246 			    ent_intr_info);
4247 	vmcs_write(GUEST_CR0, guest_cr0_save & ~X86_CR0_PE & ~X86_CR0_PG);
4248 	vmcs_write(ENT_INTR_INFO, ent_intr_info);
4249 	test_vmx_invalid_controls();
4250 	report_prefix_pop();
4251 
4252 	ent_intr_info = ent_intr_info_base | INTR_TYPE_HARD_EXCEPTION |
4253 			GP_VECTOR;
4254 	report_prefix_pushf("%s, VM-entry intr info=0x%x",
4255 			    "error code <-> (!URG || prot_mode) [-]",
4256 			    ent_intr_info);
4257 	vmcs_write(GUEST_CR0, guest_cr0_save | X86_CR0_PE);
4258 	vmcs_write(ENT_INTR_INFO, ent_intr_info);
4259 	test_vmx_invalid_controls();
4260 	report_prefix_pop();
4261 
4262 	vmcs_write(CPU_EXEC_CTRL1, secondary_save);
4263 	vmcs_write(CPU_EXEC_CTRL0, primary_save);
4264 
4265 skip_unrestricted_guest:
4266 	vmcs_write(GUEST_CR0, guest_cr0_save);
4267 
4268 	/* deliver-error-code is 1 iff the interruption type is HW exception */
4269 	report_prefix_push("error code <-> HW exception");
4270 	for (cnt = 0; cnt < 8; cnt++) {
4271 		u32 exception_type_mask = cnt << 8;
4272 		u32 deliver_error_code_mask =
4273 			exception_type_mask != INTR_TYPE_HARD_EXCEPTION ?
4274 			INTR_INFO_DELIVER_CODE_MASK : 0;
4275 
4276 		ent_intr_info = ent_intr_info_base | deliver_error_code_mask |
4277 				exception_type_mask | GP_VECTOR;
4278 		report_prefix_pushf("VM-entry intr info=0x%x [-]",
4279 				    ent_intr_info);
4280 		vmcs_write(ENT_INTR_INFO, ent_intr_info);
4281 		test_vmx_invalid_controls();
4282 		report_prefix_pop();
4283 	}
4284 	report_prefix_pop();
4285 
4286 	/*
4287 	 * deliver-error-code is 1 iff the the vector
4288 	 * indicates an exception that would normally deliver an error code
4289 	 */
4290 	report_prefix_push("error code <-> vector delivers error code");
4291 	for (cnt = 0; cnt < 32; cnt++) {
4292 		bool has_error_code = false;
4293 		u32 deliver_error_code_mask;
4294 
4295 		switch (cnt) {
4296 		case DF_VECTOR:
4297 		case TS_VECTOR:
4298 		case NP_VECTOR:
4299 		case SS_VECTOR:
4300 		case GP_VECTOR:
4301 		case PF_VECTOR:
4302 		case AC_VECTOR:
4303 			has_error_code = true;
4304 		}
4305 
4306 		/* Negative case */
4307 		deliver_error_code_mask = has_error_code ?
4308 						0 :
4309 						INTR_INFO_DELIVER_CODE_MASK;
4310 		ent_intr_info = ent_intr_info_base | deliver_error_code_mask |
4311 				INTR_TYPE_HARD_EXCEPTION | cnt;
4312 		report_prefix_pushf("VM-entry intr info=0x%x [-]",
4313 				    ent_intr_info);
4314 		vmcs_write(ENT_INTR_INFO, ent_intr_info);
4315 		test_vmx_invalid_controls();
4316 		report_prefix_pop();
4317 
4318 		/* Positive case */
4319 		deliver_error_code_mask = has_error_code ?
4320 						INTR_INFO_DELIVER_CODE_MASK :
4321 						0;
4322 		ent_intr_info = ent_intr_info_base | deliver_error_code_mask |
4323 				INTR_TYPE_HARD_EXCEPTION | cnt;
4324 		report_prefix_pushf("VM-entry intr info=0x%x [+]",
4325 				    ent_intr_info);
4326 		vmcs_write(ENT_INTR_INFO, ent_intr_info);
4327 		test_vmx_valid_controls();
4328 		report_prefix_pop();
4329 	}
4330 	report_prefix_pop();
4331 
4332 	/* Reserved bits in the field (30:12) are 0. */
4333 	report_prefix_push("reserved bits clear");
4334 	for (cnt = 12; cnt <= 30; cnt++) {
4335 		ent_intr_info = ent_intr_info_base |
4336 				INTR_INFO_DELIVER_CODE_MASK |
4337 				INTR_TYPE_HARD_EXCEPTION | GP_VECTOR |
4338 				(1U << cnt);
4339 		report_prefix_pushf("VM-entry intr info=0x%x [-]",
4340 				    ent_intr_info);
4341 		vmcs_write(ENT_INTR_INFO, ent_intr_info);
4342 		test_vmx_invalid_controls();
4343 		report_prefix_pop();
4344 	}
4345 	report_prefix_pop();
4346 
4347 	/*
4348 	 * If deliver-error-code is 1
4349 	 * bits 31:16 of the VM-entry exception error-code field are 0.
4350 	 */
4351 	ent_intr_info = ent_intr_info_base | INTR_INFO_DELIVER_CODE_MASK |
4352 			INTR_TYPE_HARD_EXCEPTION | GP_VECTOR;
4353 	report_prefix_pushf("%s, VM-entry intr info=0x%x",
4354 			    "VM-entry exception error code[31:16] clear",
4355 			    ent_intr_info);
4356 	vmcs_write(ENT_INTR_INFO, ent_intr_info);
4357 	for (cnt = 16; cnt <= 31; cnt++) {
4358 		ent_intr_err = 1U << cnt;
4359 		report_prefix_pushf("VM-entry intr error=0x%x [-]",
4360 				    ent_intr_err);
4361 		vmcs_write(ENT_INTR_ERROR, ent_intr_err);
4362 		test_vmx_invalid_controls();
4363 		report_prefix_pop();
4364 	}
4365 	vmcs_write(ENT_INTR_ERROR, 0x00000000);
4366 	report_prefix_pop();
4367 
4368 	/*
4369 	 * If the interruption type is software interrupt, software exception,
4370 	 * or privileged software exception, the VM-entry instruction-length
4371 	 * field is in the range 0–15.
4372 	 */
4373 
4374 	for (cnt = 0; cnt < 3; cnt++) {
4375 		switch (cnt) {
4376 		case 0:
4377 			ent_intr_info = ent_intr_info_base |
4378 					INTR_TYPE_SOFT_INTR;
4379 			break;
4380 		case 1:
4381 			ent_intr_info = ent_intr_info_base |
4382 					INTR_TYPE_SOFT_EXCEPTION;
4383 			break;
4384 		case 2:
4385 			ent_intr_info = ent_intr_info_base |
4386 					INTR_TYPE_PRIV_SW_EXCEPTION;
4387 			break;
4388 		}
4389 		report_prefix_pushf("%s, VM-entry intr info=0x%x",
4390 				    "VM-entry instruction-length check",
4391 				    ent_intr_info);
4392 		vmcs_write(ENT_INTR_INFO, ent_intr_info);
4393 
4394 		/* Instruction length set to -1 (0xFFFFFFFF) should fail */
4395 		ent_intr_len = -1;
4396 		report_prefix_pushf("VM-entry intr length = 0x%x [-]",
4397 				    ent_intr_len);
4398 		vmcs_write(ENT_INST_LEN, ent_intr_len);
4399 		test_vmx_invalid_controls();
4400 		report_prefix_pop();
4401 
4402 		/* Instruction length set to 16 should fail */
4403 		ent_intr_len = 0x00000010;
4404 		report_prefix_pushf("VM-entry intr length = 0x%x [-]",
4405 				    ent_intr_len);
4406 		vmcs_write(ENT_INST_LEN, 0x00000010);
4407 		test_vmx_invalid_controls();
4408 		report_prefix_pop();
4409 
4410 		report_prefix_pop();
4411 	}
4412 
4413 	/* Cleanup */
4414 	vmcs_write(ENT_INTR_INFO, ent_intr_info_save);
4415 	vmcs_write(ENT_INTR_ERROR, ent_intr_error_save);
4416 	vmcs_write(ENT_INST_LEN, ent_inst_len_save);
4417 	vmcs_write(CPU_EXEC_CTRL0, primary_save);
4418 	vmcs_write(CPU_EXEC_CTRL1, secondary_save);
4419 	vmcs_write(GUEST_CR0, guest_cr0_save);
4420 	report_prefix_pop();
4421 }
4422 
4423 /*
4424  * Test interesting vTPR values for a given TPR threshold.
4425  */
4426 static void test_vtpr_values(unsigned threshold)
4427 {
4428 	try_tpr_threshold_and_vtpr(threshold, (threshold - 1) << 4);
4429 	try_tpr_threshold_and_vtpr(threshold, threshold << 4);
4430 	try_tpr_threshold_and_vtpr(threshold, (threshold + 1) << 4);
4431 }
4432 
4433 static void try_tpr_threshold(unsigned threshold)
4434 {
4435 	bool valid = true;
4436 
4437 	u32 primary = vmcs_read(CPU_EXEC_CTRL0);
4438 	u32 secondary = vmcs_read(CPU_EXEC_CTRL1);
4439 
4440 	if ((primary & CPU_TPR_SHADOW) && !((primary & CPU_SECONDARY) &&
4441 	    (secondary & CPU_VINTD)))
4442 		valid = !(threshold >> 4);
4443 
4444 	set_vtpr(-1);
4445 	vmcs_write(TPR_THRESHOLD, threshold);
4446 	report_prefix_pushf("TPR threshold 0x%x, VTPR.class 0xf", threshold);
4447 	if (valid)
4448 		test_vmx_valid_controls();
4449 	else
4450 		test_vmx_invalid_controls();
4451 	report_prefix_pop();
4452 
4453 	if (valid)
4454 		test_vtpr_values(threshold);
4455 }
4456 
4457 /*
4458  * Test interesting TPR threshold values.
4459  */
4460 static void test_tpr_threshold_values(void)
4461 {
4462 	unsigned i;
4463 
4464 	for (i = 0; i < 0x10; i++)
4465 		try_tpr_threshold(i);
4466 	for (i = 4; i < 32; i++)
4467 		try_tpr_threshold(1u << i);
4468 	try_tpr_threshold(-1u);
4469 	try_tpr_threshold(0x7fffffff);
4470 }
4471 
4472 /*
4473  * This test covers the following two VM entry checks:
4474  *
4475  *      i) If the "use TPR shadow" VM-execution control is 1 and the
4476  *         "virtual-interrupt delivery" VM-execution control is 0, bits
4477  *         31:4 of the TPR threshold VM-execution control field must
4478 	   be 0.
4479  *         [Intel SDM]
4480  *
4481  *      ii) If the "use TPR shadow" VM-execution control is 1, the
4482  *          "virtual-interrupt delivery" VM-execution control is 0
4483  *          and the "virtualize APIC accesses" VM-execution control
4484  *          is 0, the value of bits 3:0 of the TPR threshold VM-execution
4485  *          control field must not be greater than the value of bits
4486  *          7:4 of VTPR.
4487  *          [Intel SDM]
4488  */
4489 static void test_tpr_threshold(void)
4490 {
4491 	u32 primary = vmcs_read(CPU_EXEC_CTRL0);
4492 	u64 apic_virt_addr = vmcs_read(APIC_VIRT_ADDR);
4493 	u64 threshold = vmcs_read(TPR_THRESHOLD);
4494 	void *virtual_apic_page;
4495 
4496 	if (!(ctrl_cpu_rev[0].clr & CPU_TPR_SHADOW))
4497 		return;
4498 
4499 	virtual_apic_page = alloc_page();
4500 	memset(virtual_apic_page, 0xff, PAGE_SIZE);
4501 	vmcs_write(APIC_VIRT_ADDR, virt_to_phys(virtual_apic_page));
4502 
4503 	vmcs_write(CPU_EXEC_CTRL0, primary & ~(CPU_TPR_SHADOW | CPU_SECONDARY));
4504 	report_prefix_pushf("Use TPR shadow disabled, secondary controls disabled");
4505 	test_tpr_threshold_values();
4506 	report_prefix_pop();
4507 	vmcs_write(CPU_EXEC_CTRL0, vmcs_read(CPU_EXEC_CTRL0) | CPU_TPR_SHADOW);
4508 	report_prefix_pushf("Use TPR shadow enabled, secondary controls disabled");
4509 	test_tpr_threshold_values();
4510 	report_prefix_pop();
4511 
4512 	if (!((ctrl_cpu_rev[0].clr & CPU_SECONDARY) &&
4513 	    (ctrl_cpu_rev[1].clr & (CPU_VINTD  | CPU_VIRT_APIC_ACCESSES))))
4514 		goto out;
4515 	u32 secondary = vmcs_read(CPU_EXEC_CTRL1);
4516 
4517 	if (ctrl_cpu_rev[1].clr & CPU_VINTD) {
4518 		vmcs_write(CPU_EXEC_CTRL1, CPU_VINTD);
4519 		report_prefix_pushf("Use TPR shadow enabled; secondary controls disabled; virtual-interrupt delivery enabled; virtualize APIC accesses disabled");
4520 		test_tpr_threshold_values();
4521 		report_prefix_pop();
4522 
4523 		vmcs_write(CPU_EXEC_CTRL0,
4524 			   vmcs_read(CPU_EXEC_CTRL0) | CPU_SECONDARY);
4525 		report_prefix_pushf("Use TPR shadow enabled; secondary controls enabled; virtual-interrupt delivery enabled; virtualize APIC accesses disabled");
4526 		test_tpr_threshold_values();
4527 		report_prefix_pop();
4528 	}
4529 
4530 	if (ctrl_cpu_rev[1].clr & CPU_VIRT_APIC_ACCESSES) {
4531 		vmcs_write(CPU_EXEC_CTRL0,
4532 			   vmcs_read(CPU_EXEC_CTRL0) & ~CPU_SECONDARY);
4533 		vmcs_write(CPU_EXEC_CTRL1, CPU_VIRT_APIC_ACCESSES);
4534 		report_prefix_pushf("Use TPR shadow enabled; secondary controls disabled; virtual-interrupt delivery enabled; virtualize APIC accesses enabled");
4535 		test_tpr_threshold_values();
4536 		report_prefix_pop();
4537 
4538 		vmcs_write(CPU_EXEC_CTRL0,
4539 			   vmcs_read(CPU_EXEC_CTRL0) | CPU_SECONDARY);
4540 		report_prefix_pushf("Use TPR shadow enabled; secondary controls enabled; virtual-interrupt delivery enabled; virtualize APIC accesses enabled");
4541 		test_tpr_threshold_values();
4542 		report_prefix_pop();
4543 	}
4544 
4545 	if ((ctrl_cpu_rev[1].clr &
4546 	     (CPU_VINTD | CPU_VIRT_APIC_ACCESSES)) ==
4547 	    (CPU_VINTD | CPU_VIRT_APIC_ACCESSES)) {
4548 		vmcs_write(CPU_EXEC_CTRL0,
4549 			   vmcs_read(CPU_EXEC_CTRL0) & ~CPU_SECONDARY);
4550 		vmcs_write(CPU_EXEC_CTRL1,
4551 			   CPU_VINTD | CPU_VIRT_APIC_ACCESSES);
4552 		report_prefix_pushf("Use TPR shadow enabled; secondary controls disabled; virtual-interrupt delivery enabled; virtualize APIC accesses enabled");
4553 		test_tpr_threshold_values();
4554 		report_prefix_pop();
4555 
4556 		vmcs_write(CPU_EXEC_CTRL0,
4557 			   vmcs_read(CPU_EXEC_CTRL0) | CPU_SECONDARY);
4558 		report_prefix_pushf("Use TPR shadow enabled; secondary controls enabled; virtual-interrupt delivery enabled; virtualize APIC accesses enabled");
4559 		test_tpr_threshold_values();
4560 		report_prefix_pop();
4561 	}
4562 
4563 	vmcs_write(CPU_EXEC_CTRL1, secondary);
4564 out:
4565 	vmcs_write(TPR_THRESHOLD, threshold);
4566 	vmcs_write(APIC_VIRT_ADDR, apic_virt_addr);
4567 	vmcs_write(CPU_EXEC_CTRL0, primary);
4568 }
4569 
4570 /*
4571  * This test verifies the following two vmentry checks:
4572  *
4573  *  If the "NMI exiting" VM-execution control is 0, "Virtual NMIs"
4574  *  VM-execution control must be 0.
4575  *  [Intel SDM]
4576  *
4577  *  If the “virtual NMIs” VM-execution control is 0, the “NMI-window
4578  *  exiting” VM-execution control must be 0.
4579  *  [Intel SDM]
4580  */
4581 static void test_nmi_ctrls(void)
4582 {
4583 	u32 pin_ctrls, cpu_ctrls0, test_pin_ctrls, test_cpu_ctrls0;
4584 
4585 	if ((ctrl_pin_rev.clr & (PIN_NMI | PIN_VIRT_NMI)) !=
4586 	    (PIN_NMI | PIN_VIRT_NMI)) {
4587 		printf("NMI exiting and Virtual NMIs are not supported !\n");
4588 		return;
4589 	}
4590 
4591 	/* Save the controls so that we can restore them after our tests */
4592 	pin_ctrls = vmcs_read(PIN_CONTROLS);
4593 	cpu_ctrls0 = vmcs_read(CPU_EXEC_CTRL0);
4594 
4595 	test_pin_ctrls = pin_ctrls & ~(PIN_NMI | PIN_VIRT_NMI);
4596 	test_cpu_ctrls0 = cpu_ctrls0 & ~CPU_NMI_WINDOW;
4597 
4598 	vmcs_write(PIN_CONTROLS, test_pin_ctrls);
4599 	report_prefix_pushf("NMI-exiting disabled, virtual-NMIs disabled");
4600 	test_vmx_valid_controls();
4601 	report_prefix_pop();
4602 
4603 	vmcs_write(PIN_CONTROLS, test_pin_ctrls | PIN_VIRT_NMI);
4604 	report_prefix_pushf("NMI-exiting disabled, virtual-NMIs enabled");
4605 	test_vmx_invalid_controls();
4606 	report_prefix_pop();
4607 
4608 	vmcs_write(PIN_CONTROLS, test_pin_ctrls | (PIN_NMI | PIN_VIRT_NMI));
4609 	report_prefix_pushf("NMI-exiting enabled, virtual-NMIs enabled");
4610 	test_vmx_valid_controls();
4611 	report_prefix_pop();
4612 
4613 	vmcs_write(PIN_CONTROLS, test_pin_ctrls | PIN_NMI);
4614 	report_prefix_pushf("NMI-exiting enabled, virtual-NMIs disabled");
4615 	test_vmx_valid_controls();
4616 	report_prefix_pop();
4617 
4618 	if (!(ctrl_cpu_rev[0].clr & CPU_NMI_WINDOW)) {
4619 		report_info("NMI-window exiting is not supported, skipping...");
4620 		goto done;
4621 	}
4622 
4623 	vmcs_write(PIN_CONTROLS, test_pin_ctrls);
4624 	vmcs_write(CPU_EXEC_CTRL0, test_cpu_ctrls0 | CPU_NMI_WINDOW);
4625 	report_prefix_pushf("Virtual-NMIs disabled, NMI-window-exiting enabled");
4626 	test_vmx_invalid_controls();
4627 	report_prefix_pop();
4628 
4629 	vmcs_write(PIN_CONTROLS, test_pin_ctrls);
4630 	vmcs_write(CPU_EXEC_CTRL0, test_cpu_ctrls0);
4631 	report_prefix_pushf("Virtual-NMIs disabled, NMI-window-exiting disabled");
4632 	test_vmx_valid_controls();
4633 	report_prefix_pop();
4634 
4635 	vmcs_write(PIN_CONTROLS, test_pin_ctrls | (PIN_NMI | PIN_VIRT_NMI));
4636 	vmcs_write(CPU_EXEC_CTRL0, test_cpu_ctrls0 | CPU_NMI_WINDOW);
4637 	report_prefix_pushf("Virtual-NMIs enabled, NMI-window-exiting enabled");
4638 	test_vmx_valid_controls();
4639 	report_prefix_pop();
4640 
4641 	vmcs_write(PIN_CONTROLS, test_pin_ctrls | (PIN_NMI | PIN_VIRT_NMI));
4642 	vmcs_write(CPU_EXEC_CTRL0, test_cpu_ctrls0);
4643 	report_prefix_pushf("Virtual-NMIs enabled, NMI-window-exiting disabled");
4644 	test_vmx_valid_controls();
4645 	report_prefix_pop();
4646 
4647 	/* Restore the controls to their original values */
4648 	vmcs_write(CPU_EXEC_CTRL0, cpu_ctrls0);
4649 done:
4650 	vmcs_write(PIN_CONTROLS, pin_ctrls);
4651 }
4652 
4653 static void test_eptp_ad_bit(u64 eptp, bool ctrl)
4654 {
4655 	vmcs_write(EPTP, eptp);
4656 	report_prefix_pushf("Enable-EPT enabled; EPT accessed and dirty flag %s",
4657 	    (eptp & EPTP_AD_FLAG) ? "1": "0");
4658 	if (ctrl)
4659 		test_vmx_valid_controls();
4660 	else
4661 		test_vmx_invalid_controls();
4662 	report_prefix_pop();
4663 
4664 }
4665 
4666 /*
4667  * 1. If the "enable EPT" VM-execution control is 1, the "EPTP VM-execution"
4668  *    control field must satisfy the following checks:
4669  *
4670  *     - The EPT memory type (bits 2:0) must be a value supported by the
4671  *	 processor as indicated in the IA32_VMX_EPT_VPID_CAP MSR.
4672  *     - Bits 5:3 (1 less than the EPT page-walk length) must be 3,
4673  *	 indicating an EPT page-walk length of 4.
4674  *     - Bit 6 (enable bit for accessed and dirty flags for EPT) must be
4675  *	 0 if bit 21 of the IA32_VMX_EPT_VPID_CAP MSR is read as 0,
4676  *	 indicating that the processor does not support accessed and dirty
4677  *	 dirty flags for EPT.
4678  *     - Reserved bits 11:7 and 63:N (where N is the processor's
4679  *	 physical-address width) must all be 0.
4680  *
4681  * 2. If the "unrestricted guest" VM-execution control is 1, the
4682  *    "enable EPT" VM-execution control must also be 1.
4683  */
4684 static void test_ept_eptp(void)
4685 {
4686 	u32 primary_saved = vmcs_read(CPU_EXEC_CTRL0);
4687 	u32 secondary_saved = vmcs_read(CPU_EXEC_CTRL1);
4688 	u64 eptp_saved = vmcs_read(EPTP);
4689 	u32 primary = primary_saved;
4690 	u32 secondary = secondary_saved;
4691 	u64 msr, eptp = eptp_saved;
4692 	bool un_cache = false;
4693 	bool wr_bk = false;
4694 	bool ctrl;
4695 	u32 i, maxphysaddr;
4696 	u64 j, resv_bits_mask = 0;
4697 
4698 	if (!((ctrl_cpu_rev[0].clr & CPU_SECONDARY) &&
4699 	    (ctrl_cpu_rev[1].clr & CPU_EPT))) {
4700 		printf("\"CPU secondary\" and/or \"enable EPT\" execution controls are not supported !\n");
4701 		return;
4702 	}
4703 
4704 	/*
4705 	 * Memory type (bits 2:0)
4706 	 */
4707 	msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP);
4708 	if (msr & EPT_CAP_UC)
4709 		un_cache = true;
4710 	if (msr & EPT_CAP_WB)
4711 		wr_bk = true;
4712 
4713 	primary |= CPU_SECONDARY;
4714 	vmcs_write(CPU_EXEC_CTRL0, primary);
4715 	secondary |= CPU_EPT;
4716 	vmcs_write(CPU_EXEC_CTRL1, secondary);
4717 	eptp = (eptp & ~EPTP_PG_WALK_LEN_MASK) |
4718 	    (3ul << EPTP_PG_WALK_LEN_SHIFT);
4719 	vmcs_write(EPTP, eptp);
4720 
4721 	for (i = 0; i < 8; i++) {
4722 		if (i == 0) {
4723 			if (un_cache) {
4724 				report_info("EPT paging structure memory-type is Un-cacheable\n");
4725 				ctrl = true;
4726 			} else {
4727 				ctrl = false;
4728 			}
4729 		} else if (i == 6) {
4730 			if (wr_bk) {
4731 				report_info("EPT paging structure memory-type is Write-back\n");
4732 				ctrl = true;
4733 			} else {
4734 				ctrl = false;
4735 			}
4736 		} else {
4737 			ctrl = false;
4738 		}
4739 
4740 		eptp = (eptp & ~EPT_MEM_TYPE_MASK) | i;
4741 		vmcs_write(EPTP, eptp);
4742 		report_prefix_pushf("Enable-EPT enabled; EPT memory type %lu",
4743 		    eptp & EPT_MEM_TYPE_MASK);
4744 		if (ctrl)
4745 			test_vmx_valid_controls();
4746 		else
4747 			test_vmx_invalid_controls();
4748 		report_prefix_pop();
4749 	}
4750 
4751 	eptp = (eptp & ~EPT_MEM_TYPE_MASK) | 6ul;
4752 
4753 	/*
4754 	 * Page walk length (bits 5:3)
4755 	 */
4756 	for (i = 0; i < 8; i++) {
4757 		eptp = (eptp & ~EPTP_PG_WALK_LEN_MASK) |
4758 		    (i << EPTP_PG_WALK_LEN_SHIFT);
4759 		if (i == 3)
4760 			ctrl = true;
4761 		else
4762 			ctrl = false;
4763 
4764 		vmcs_write(EPTP, eptp);
4765 		report_prefix_pushf("Enable-EPT enabled; EPT page walk length %lu",
4766 		    eptp & EPTP_PG_WALK_LEN_MASK);
4767 		if (ctrl)
4768 			test_vmx_valid_controls();
4769 		else
4770 			test_vmx_invalid_controls();
4771 		report_prefix_pop();
4772 	}
4773 
4774 	eptp = (eptp & ~EPTP_PG_WALK_LEN_MASK) |
4775 	    3ul << EPTP_PG_WALK_LEN_SHIFT;
4776 
4777 	/*
4778 	 * Accessed and dirty flag (bit 6)
4779 	 */
4780 	if (msr & EPT_CAP_AD_FLAG) {
4781 		report_info("Processor supports accessed and dirty flag");
4782 		eptp &= ~EPTP_AD_FLAG;
4783 		test_eptp_ad_bit(eptp, true);
4784 
4785 		eptp |= EPTP_AD_FLAG;
4786 		test_eptp_ad_bit(eptp, true);
4787 	} else {
4788 		report_info("Processor does not supports accessed and dirty flag");
4789 		eptp &= ~EPTP_AD_FLAG;
4790 		test_eptp_ad_bit(eptp, true);
4791 
4792 		eptp |= EPTP_AD_FLAG;
4793 		test_eptp_ad_bit(eptp, false);
4794 	}
4795 
4796 	/*
4797 	 * Reserved bits [11:7] and [63:N]
4798 	 */
4799 	for (i = 0; i < 32; i++) {
4800 		eptp = (eptp &
4801 		    ~(EPTP_RESERV_BITS_MASK << EPTP_RESERV_BITS_SHIFT)) |
4802 		    (i << EPTP_RESERV_BITS_SHIFT);
4803 		vmcs_write(EPTP, eptp);
4804 		report_prefix_pushf("Enable-EPT enabled; reserved bits [11:7] %lu",
4805 		    (eptp >> EPTP_RESERV_BITS_SHIFT) &
4806 		    EPTP_RESERV_BITS_MASK);
4807 		if (i == 0)
4808 			test_vmx_valid_controls();
4809 		else
4810 			test_vmx_invalid_controls();
4811 		report_prefix_pop();
4812 	}
4813 
4814 	eptp = (eptp & ~(EPTP_RESERV_BITS_MASK << EPTP_RESERV_BITS_SHIFT));
4815 
4816 	maxphysaddr = cpuid_maxphyaddr();
4817 	for (i = 0; i < (63 - maxphysaddr + 1); i++) {
4818 		resv_bits_mask |= 1ul << i;
4819 	}
4820 
4821 	for (j = maxphysaddr - 1; j <= 63; j++) {
4822 		eptp = (eptp & ~(resv_bits_mask << maxphysaddr)) |
4823 		    (j < maxphysaddr ? 0 : 1ul << j);
4824 		vmcs_write(EPTP, eptp);
4825 		report_prefix_pushf("Enable-EPT enabled; reserved bits [63:N] %lu",
4826 		    (eptp >> maxphysaddr) & resv_bits_mask);
4827 		if (j < maxphysaddr)
4828 			test_vmx_valid_controls();
4829 		else
4830 			test_vmx_invalid_controls();
4831 		report_prefix_pop();
4832 	}
4833 
4834 	secondary &= ~(CPU_EPT | CPU_URG);
4835 	vmcs_write(CPU_EXEC_CTRL1, secondary);
4836 	report_prefix_pushf("Enable-EPT disabled, unrestricted-guest disabled");
4837 	test_vmx_valid_controls();
4838 	report_prefix_pop();
4839 
4840 	if (!(ctrl_cpu_rev[1].clr & CPU_URG))
4841 		goto skip_unrestricted_guest;
4842 
4843 	secondary |= CPU_URG;
4844 	vmcs_write(CPU_EXEC_CTRL1, secondary);
4845 	report_prefix_pushf("Enable-EPT disabled, unrestricted-guest enabled");
4846 	test_vmx_invalid_controls();
4847 	report_prefix_pop();
4848 
4849 	secondary |= CPU_EPT;
4850 	setup_dummy_ept();
4851 	report_prefix_pushf("Enable-EPT enabled, unrestricted-guest enabled");
4852 	test_vmx_valid_controls();
4853 	report_prefix_pop();
4854 
4855 skip_unrestricted_guest:
4856 	secondary &= ~CPU_URG;
4857 	vmcs_write(CPU_EXEC_CTRL1, secondary);
4858 	report_prefix_pushf("Enable-EPT enabled, unrestricted-guest disabled");
4859 	test_vmx_valid_controls();
4860 	report_prefix_pop();
4861 
4862 	vmcs_write(CPU_EXEC_CTRL0, primary_saved);
4863 	vmcs_write(CPU_EXEC_CTRL1, secondary_saved);
4864 	vmcs_write(EPTP, eptp_saved);
4865 }
4866 
4867 /*
4868  * If the 'enable PML' VM-execution control is 1, the 'enable EPT'
4869  * VM-execution control must also be 1. In addition, the PML address
4870  * must satisfy the following checks:
4871  *
4872  *    * Bits 11:0 of the address must be 0.
4873  *    * The address should not set any bits beyond the processor's
4874  *	physical-address width.
4875  *
4876  *  [Intel SDM]
4877  */
4878 static void test_pml(void)
4879 {
4880 	u32 primary_saved = vmcs_read(CPU_EXEC_CTRL0);
4881 	u32 secondary_saved = vmcs_read(CPU_EXEC_CTRL1);
4882 	u32 primary = primary_saved;
4883 	u32 secondary = secondary_saved;
4884 
4885 	if (!((ctrl_cpu_rev[0].clr & CPU_SECONDARY) &&
4886 	    (ctrl_cpu_rev[1].clr & CPU_EPT) && (ctrl_cpu_rev[1].clr & CPU_PML))) {
4887 		printf("\"Secondary execution\" control or \"enable EPT\" control or \"enable PML\" control is not supported !\n");
4888 		return;
4889 	}
4890 
4891 	primary |= CPU_SECONDARY;
4892 	vmcs_write(CPU_EXEC_CTRL0, primary);
4893 	secondary &= ~(CPU_PML | CPU_EPT);
4894 	vmcs_write(CPU_EXEC_CTRL1, secondary);
4895 	report_prefix_pushf("enable-PML disabled, enable-EPT disabled");
4896 	test_vmx_valid_controls();
4897 	report_prefix_pop();
4898 
4899 	secondary |= CPU_PML;
4900 	vmcs_write(CPU_EXEC_CTRL1, secondary);
4901 	report_prefix_pushf("enable-PML enabled, enable-EPT disabled");
4902 	test_vmx_invalid_controls();
4903 	report_prefix_pop();
4904 
4905 	secondary |= CPU_EPT;
4906 	setup_dummy_ept();
4907 	report_prefix_pushf("enable-PML enabled, enable-EPT enabled");
4908 	test_vmx_valid_controls();
4909 	report_prefix_pop();
4910 
4911 	secondary &= ~CPU_PML;
4912 	vmcs_write(CPU_EXEC_CTRL1, secondary);
4913 	report_prefix_pushf("enable-PML disabled, enable EPT enabled");
4914 	test_vmx_valid_controls();
4915 	report_prefix_pop();
4916 
4917 	test_vmcs_addr_reference(CPU_PML, PMLADDR, "PML address", "PML",
4918 				 PAGE_SIZE, false, false);
4919 
4920 	vmcs_write(CPU_EXEC_CTRL0, primary_saved);
4921 	vmcs_write(CPU_EXEC_CTRL1, secondary_saved);
4922 }
4923 
4924  /*
4925  * If the "activate VMX-preemption timer" VM-execution control is 0, the
4926  * the "save VMX-preemption timer value" VM-exit control must also be 0.
4927  *
4928  *  [Intel SDM]
4929  */
4930 static void test_vmx_preemption_timer(void)
4931 {
4932 	u32 saved_pin = vmcs_read(PIN_CONTROLS);
4933 	u32 saved_exit = vmcs_read(EXI_CONTROLS);
4934 	u32 pin = saved_pin;
4935 	u32 exit = saved_exit;
4936 
4937 	if (!((ctrl_exit_rev.clr & EXI_SAVE_PREEMPT) ||
4938 	    (ctrl_pin_rev.clr & PIN_PREEMPT))) {
4939 		printf("\"Save-VMX-preemption-timer\" control and/or \"Enable-VMX-preemption-timer\" control is not supported\n");
4940 		return;
4941 	}
4942 
4943 	pin |= PIN_PREEMPT;
4944 	vmcs_write(PIN_CONTROLS, pin);
4945 	exit &= ~EXI_SAVE_PREEMPT;
4946 	vmcs_write(EXI_CONTROLS, exit);
4947 	report_prefix_pushf("enable-VMX-preemption-timer enabled, save-VMX-preemption-timer disabled");
4948 	test_vmx_valid_controls();
4949 	report_prefix_pop();
4950 
4951 	exit |= EXI_SAVE_PREEMPT;
4952 	vmcs_write(EXI_CONTROLS, exit);
4953 	report_prefix_pushf("enable-VMX-preemption-timer enabled, save-VMX-preemption-timer enabled");
4954 	test_vmx_valid_controls();
4955 	report_prefix_pop();
4956 
4957 	pin &= ~PIN_PREEMPT;
4958 	vmcs_write(PIN_CONTROLS, pin);
4959 	report_prefix_pushf("enable-VMX-preemption-timer disabled, save-VMX-preemption-timer enabled");
4960 	test_vmx_invalid_controls();
4961 	report_prefix_pop();
4962 
4963 	exit &= ~EXI_SAVE_PREEMPT;
4964 	vmcs_write(EXI_CONTROLS, exit);
4965 	report_prefix_pushf("enable-VMX-preemption-timer disabled, save-VMX-preemption-timer disabled");
4966 	test_vmx_valid_controls();
4967 	report_prefix_pop();
4968 
4969 	vmcs_write(PIN_CONTROLS, saved_pin);
4970 	vmcs_write(EXI_CONTROLS, saved_exit);
4971 }
4972 
4973 extern unsigned char test_mtf1;
4974 extern unsigned char test_mtf2;
4975 extern unsigned char test_mtf3;
4976 
4977 static void test_mtf_guest(void)
4978 {
4979 	asm ("vmcall;\n\t"
4980 	     "out %al, $0x80;\n\t"
4981 	     "test_mtf1:\n\t"
4982 	     "vmcall;\n\t"
4983 	     "out %al, $0x80;\n\t"
4984 	     "test_mtf2:\n\t"
4985 	     /*
4986 	      * Prepare for the 'MOV CR3' test. Attempt to induce a
4987 	      * general-protection fault by moving a non-canonical address into
4988 	      * CR3. The 'MOV CR3' instruction does not take an imm64 operand,
4989 	      * so we must MOV the desired value into a register first.
4990 	      *
4991 	      * MOV RAX is done before the VMCALL such that MTF is only enabled
4992 	      * for the instruction under test.
4993 	      */
4994 	     "mov $0x8000000000000000, %rax;\n\t"
4995 	     "vmcall;\n\t"
4996 	     "mov %rax, %cr3;\n\t"
4997 	     "test_mtf3:\n\t"
4998 	     "vmcall;\n\t"
4999 	     /*
5000 	      * ICEBP/INT1 instruction. Though the instruction is now
5001 	      * documented, don't rely on assemblers enumerating the
5002 	      * instruction. Resort to hand assembly.
5003 	      */
5004 	     ".byte 0xf1;\n\t");
5005 }
5006 
5007 static void test_mtf_gp_handler(struct ex_regs *regs)
5008 {
5009 	regs->rip = (unsigned long) &test_mtf3;
5010 }
5011 
5012 static void test_mtf_db_handler(struct ex_regs *regs)
5013 {
5014 }
5015 
5016 static void enable_mtf(void)
5017 {
5018 	u32 ctrl0 = vmcs_read(CPU_EXEC_CTRL0);
5019 
5020 	vmcs_write(CPU_EXEC_CTRL0, ctrl0 | CPU_MTF);
5021 }
5022 
5023 static void disable_mtf(void)
5024 {
5025 	u32 ctrl0 = vmcs_read(CPU_EXEC_CTRL0);
5026 
5027 	vmcs_write(CPU_EXEC_CTRL0, ctrl0 & ~CPU_MTF);
5028 }
5029 
5030 static void enable_tf(void)
5031 {
5032 	unsigned long rflags = vmcs_read(GUEST_RFLAGS);
5033 
5034 	vmcs_write(GUEST_RFLAGS, rflags | X86_EFLAGS_TF);
5035 }
5036 
5037 static void disable_tf(void)
5038 {
5039 	unsigned long rflags = vmcs_read(GUEST_RFLAGS);
5040 
5041 	vmcs_write(GUEST_RFLAGS, rflags & ~X86_EFLAGS_TF);
5042 }
5043 
5044 static void report_mtf(const char *insn_name, unsigned long exp_rip)
5045 {
5046 	unsigned long rip = vmcs_read(GUEST_RIP);
5047 
5048 	assert_exit_reason(VMX_MTF);
5049 	report(rip == exp_rip, "MTF VM-exit after %s instruction. RIP: 0x%lx (expected 0x%lx)",
5050 	       insn_name, rip, exp_rip);
5051 }
5052 
5053 static void vmx_mtf_test(void)
5054 {
5055 	unsigned long pending_dbg;
5056 	handler old_gp, old_db;
5057 
5058 	if (!(ctrl_cpu_rev[0].clr & CPU_MTF)) {
5059 		printf("CPU does not support the 'monitor trap flag' processor-based VM-execution control.\n");
5060 		return;
5061 	}
5062 
5063 	test_set_guest(test_mtf_guest);
5064 
5065 	/* Expect an MTF VM-exit after OUT instruction */
5066 	enter_guest();
5067 	skip_exit_vmcall();
5068 
5069 	enable_mtf();
5070 	enter_guest();
5071 	report_mtf("OUT", (unsigned long) &test_mtf1);
5072 	disable_mtf();
5073 
5074 	/*
5075 	 * Concurrent #DB trap and MTF on instruction boundary. Expect MTF
5076 	 * VM-exit with populated 'pending debug exceptions' VMCS field.
5077 	 */
5078 	enter_guest();
5079 	skip_exit_vmcall();
5080 
5081 	enable_mtf();
5082 	enable_tf();
5083 
5084 	enter_guest();
5085 	report_mtf("OUT", (unsigned long) &test_mtf2);
5086 	pending_dbg = vmcs_read(GUEST_PENDING_DEBUG);
5087 	report(pending_dbg & DR_STEP,
5088 	       "'pending debug exceptions' field after MTF VM-exit: 0x%lx (expected 0x%lx)",
5089 	       pending_dbg, (unsigned long) DR_STEP);
5090 
5091 	disable_mtf();
5092 	disable_tf();
5093 	vmcs_write(GUEST_PENDING_DEBUG, 0);
5094 
5095 	/*
5096 	 * #GP exception takes priority over MTF. Expect MTF VM-exit with RIP
5097 	 * advanced to first instruction of #GP handler.
5098 	 */
5099 	enter_guest();
5100 	skip_exit_vmcall();
5101 
5102 	old_gp = handle_exception(GP_VECTOR, test_mtf_gp_handler);
5103 
5104 	enable_mtf();
5105 	enter_guest();
5106 	report_mtf("MOV CR3", (unsigned long) get_idt_addr(&boot_idt[GP_VECTOR]));
5107 	disable_mtf();
5108 
5109 	/*
5110 	 * Concurrent MTF and privileged software exception (i.e. ICEBP/INT1).
5111 	 * MTF should follow the delivery of #DB trap, though the SDM doesn't
5112 	 * provide clear indication of the relative priority.
5113 	 */
5114 	enter_guest();
5115 	skip_exit_vmcall();
5116 
5117 	handle_exception(GP_VECTOR, old_gp);
5118 	old_db = handle_exception(DB_VECTOR, test_mtf_db_handler);
5119 
5120 	enable_mtf();
5121 	enter_guest();
5122 	report_mtf("INT1", (unsigned long) get_idt_addr(&boot_idt[DB_VECTOR]));
5123 	disable_mtf();
5124 
5125 	enter_guest();
5126 	handle_exception(DB_VECTOR, old_db);
5127 }
5128 
5129 /*
5130  * Tests for VM-execution control fields
5131  */
5132 static void test_vm_execution_ctls(void)
5133 {
5134 	test_pin_based_ctls();
5135 	test_primary_processor_based_ctls();
5136 	test_secondary_processor_based_ctls();
5137 	test_cr3_targets();
5138 	test_io_bitmaps();
5139 	test_msr_bitmap();
5140 	test_apic_ctls();
5141 	test_tpr_threshold();
5142 	test_nmi_ctrls();
5143 	test_pml();
5144 	test_vpid();
5145 	test_ept_eptp();
5146 	test_vmx_preemption_timer();
5147 }
5148 
5149  /*
5150   * The following checks are performed for the VM-entry MSR-load address if
5151   * the VM-entry MSR-load count field is non-zero:
5152   *
5153   *    - The lower 4 bits of the VM-entry MSR-load address must be 0.
5154   *      The address should not set any bits beyond the processor’s
5155   *      physical-address width.
5156   *
5157   *    - The address of the last byte in the VM-entry MSR-load area
5158   *      should not set any bits beyond the processor’s physical-address
5159   *      width. The address of this last byte is VM-entry MSR-load address
5160   *      + (MSR count * 16) - 1. (The arithmetic used for the computation
5161   *      uses more bits than the processor’s physical-address width.)
5162   *
5163   *
5164   *  [Intel SDM]
5165   */
5166 static void test_entry_msr_load(void)
5167 {
5168 	entry_msr_load = alloc_page();
5169 	u64 tmp;
5170 	u32 entry_msr_ld_cnt = 1;
5171 	int i;
5172 	u32 addr_len = 64;
5173 
5174 	vmcs_write(ENT_MSR_LD_CNT, entry_msr_ld_cnt);
5175 
5176 	/* Check first 4 bits of VM-entry MSR-load address */
5177 	for (i = 0; i < 4; i++) {
5178 		tmp = (u64)entry_msr_load | 1ull << i;
5179 		vmcs_write(ENTER_MSR_LD_ADDR, tmp);
5180 		report_prefix_pushf("VM-entry MSR-load addr [4:0] %lx",
5181 				    tmp & 0xf);
5182 		test_vmx_invalid_controls();
5183 		report_prefix_pop();
5184 	}
5185 
5186 	if (basic.val & (1ul << 48))
5187 		addr_len = 32;
5188 
5189 	test_vmcs_addr_values("VM-entry-MSR-load address",
5190 				ENTER_MSR_LD_ADDR, 16, false, false,
5191 				4, addr_len - 1);
5192 
5193 	/*
5194 	 * Check last byte of VM-entry MSR-load address
5195 	 */
5196 	entry_msr_load = (struct vmx_msr_entry *)((u64)entry_msr_load & ~0xf);
5197 
5198 	for (i = (addr_len == 64 ? cpuid_maxphyaddr(): addr_len);
5199 							i < 64; i++) {
5200 		tmp = ((u64)entry_msr_load + entry_msr_ld_cnt * 16 - 1) |
5201 			1ul << i;
5202 		vmcs_write(ENTER_MSR_LD_ADDR,
5203 			   tmp - (entry_msr_ld_cnt * 16 - 1));
5204 		test_vmx_invalid_controls();
5205 	}
5206 
5207 	vmcs_write(ENT_MSR_LD_CNT, 2);
5208 	vmcs_write(ENTER_MSR_LD_ADDR, (1ULL << cpuid_maxphyaddr()) - 16);
5209 	test_vmx_invalid_controls();
5210 	vmcs_write(ENTER_MSR_LD_ADDR, (1ULL << cpuid_maxphyaddr()) - 32);
5211 	test_vmx_valid_controls();
5212 	vmcs_write(ENTER_MSR_LD_ADDR, (1ULL << cpuid_maxphyaddr()) - 48);
5213 	test_vmx_valid_controls();
5214 }
5215 
5216 static struct vmx_state_area_test_data {
5217 	u32 msr;
5218 	u64 exp;
5219 	bool enabled;
5220 } vmx_state_area_test_data;
5221 
5222 static void guest_state_test_main(void)
5223 {
5224 	u64 obs;
5225 	struct vmx_state_area_test_data *data = &vmx_state_area_test_data;
5226 
5227 	while (1) {
5228 		if (vmx_get_test_stage() == 2)
5229 			break;
5230 
5231 		if (data->enabled) {
5232 			obs = rdmsr(data->msr);
5233 			report(data->exp == obs,
5234 			       "Guest state is 0x%lx (expected 0x%lx)",
5235 			       obs, data->exp);
5236 		}
5237 
5238 		vmcall();
5239 	}
5240 
5241 	asm volatile("fnop");
5242 }
5243 
5244 static void advance_guest_state_test(void)
5245 {
5246 	u32 reason = vmcs_read(EXI_REASON);
5247 	if (! (reason & 0x80000000)) {
5248 		u64 guest_rip = vmcs_read(GUEST_RIP);
5249 		u32 insn_len = vmcs_read(EXI_INST_LEN);
5250 		vmcs_write(GUEST_RIP, guest_rip + insn_len);
5251 	}
5252 }
5253 
5254 static void report_guest_state_test(const char *test, u32 xreason,
5255 				    u64 field, const char * field_name)
5256 {
5257 	u32 reason = vmcs_read(EXI_REASON);
5258 
5259 	report(reason == xreason, "%s, %s %lx", test, field_name, field);
5260 	advance_guest_state_test();
5261 }
5262 
5263 /*
5264  * Tests for VM-entry control fields
5265  */
5266 static void test_vm_entry_ctls(void)
5267 {
5268 	test_invalid_event_injection();
5269 	test_entry_msr_load();
5270 }
5271 
5272 /*
5273  * The following checks are performed for the VM-exit MSR-store address if
5274  * the VM-exit MSR-store count field is non-zero:
5275  *
5276  *    - The lower 4 bits of the VM-exit MSR-store address must be 0.
5277  *      The address should not set any bits beyond the processor’s
5278  *      physical-address width.
5279  *
5280  *    - The address of the last byte in the VM-exit MSR-store area
5281  *      should not set any bits beyond the processor’s physical-address
5282  *      width. The address of this last byte is VM-exit MSR-store address
5283  *      + (MSR count * 16) - 1. (The arithmetic used for the computation
5284  *      uses more bits than the processor’s physical-address width.)
5285  *
5286  * If IA32_VMX_BASIC[48] is read as 1, neither address should set any bits
5287  * in the range 63:32.
5288  *
5289  *  [Intel SDM]
5290  */
5291 static void test_exit_msr_store(void)
5292 {
5293 	exit_msr_store = alloc_page();
5294 	u64 tmp;
5295 	u32 exit_msr_st_cnt = 1;
5296 	int i;
5297 	u32 addr_len = 64;
5298 
5299 	vmcs_write(EXI_MSR_ST_CNT, exit_msr_st_cnt);
5300 
5301 	/* Check first 4 bits of VM-exit MSR-store address */
5302 	for (i = 0; i < 4; i++) {
5303 		tmp = (u64)exit_msr_store | 1ull << i;
5304 		vmcs_write(EXIT_MSR_ST_ADDR, tmp);
5305 		report_prefix_pushf("VM-exit MSR-store addr [4:0] %lx",
5306 				    tmp & 0xf);
5307 		test_vmx_invalid_controls();
5308 		report_prefix_pop();
5309 	}
5310 
5311 	if (basic.val & (1ul << 48))
5312 		addr_len = 32;
5313 
5314 	test_vmcs_addr_values("VM-exit-MSR-store address",
5315 				EXIT_MSR_ST_ADDR, 16, false, false,
5316 				4, addr_len - 1);
5317 
5318 	/*
5319 	 * Check last byte of VM-exit MSR-store address
5320 	 */
5321 	exit_msr_store = (struct vmx_msr_entry *)((u64)exit_msr_store & ~0xf);
5322 
5323 	for (i = (addr_len == 64 ? cpuid_maxphyaddr(): addr_len);
5324 							i < 64; i++) {
5325 		tmp = ((u64)exit_msr_store + exit_msr_st_cnt * 16 - 1) |
5326 			1ul << i;
5327 		vmcs_write(EXIT_MSR_ST_ADDR,
5328 			   tmp - (exit_msr_st_cnt * 16 - 1));
5329 		test_vmx_invalid_controls();
5330 	}
5331 
5332 	vmcs_write(EXI_MSR_ST_CNT, 2);
5333 	vmcs_write(EXIT_MSR_ST_ADDR, (1ULL << cpuid_maxphyaddr()) - 16);
5334 	test_vmx_invalid_controls();
5335 	vmcs_write(EXIT_MSR_ST_ADDR, (1ULL << cpuid_maxphyaddr()) - 32);
5336 	test_vmx_valid_controls();
5337 	vmcs_write(EXIT_MSR_ST_ADDR, (1ULL << cpuid_maxphyaddr()) - 48);
5338 	test_vmx_valid_controls();
5339 }
5340 
5341 /*
5342  * Tests for VM-exit controls
5343  */
5344 static void test_vm_exit_ctls(void)
5345 {
5346 	test_exit_msr_store();
5347 }
5348 
5349 /*
5350  * Check that the virtual CPU checks all of the VMX controls as
5351  * documented in the Intel SDM.
5352  */
5353 static void vmx_controls_test(void)
5354 {
5355 	/*
5356 	 * Bit 1 of the guest's RFLAGS must be 1, or VM-entry will
5357 	 * fail due to invalid guest state, should we make it that
5358 	 * far.
5359 	 */
5360 	vmcs_write(GUEST_RFLAGS, 0);
5361 
5362 	test_vm_execution_ctls();
5363 	test_vm_exit_ctls();
5364 	test_vm_entry_ctls();
5365 }
5366 
5367 struct apic_reg_virt_config {
5368 	bool apic_register_virtualization;
5369 	bool use_tpr_shadow;
5370 	bool virtualize_apic_accesses;
5371 	bool virtualize_x2apic_mode;
5372 	bool activate_secondary_controls;
5373 };
5374 
5375 struct apic_reg_test {
5376 	const char *name;
5377 	struct apic_reg_virt_config apic_reg_virt_config;
5378 };
5379 
5380 struct apic_reg_virt_expectation {
5381 	enum Reason rd_exit_reason;
5382 	enum Reason wr_exit_reason;
5383 	u32 val;
5384 	u32 (*virt_fn)(u32);
5385 
5386 	/*
5387 	 * If false, accessing the APIC access address from L2 is treated as a
5388 	 * normal memory operation, rather than triggering virtualization.
5389 	 */
5390 	bool virtualize_apic_accesses;
5391 };
5392 
5393 static u32 apic_virt_identity(u32 val)
5394 {
5395 	return val;
5396 }
5397 
5398 static u32 apic_virt_nibble1(u32 val)
5399 {
5400 	return val & 0xf0;
5401 }
5402 
5403 static u32 apic_virt_byte3(u32 val)
5404 {
5405 	return val & (0xff << 24);
5406 }
5407 
5408 static bool apic_reg_virt_exit_expectation(
5409 	u32 reg, struct apic_reg_virt_config *config,
5410 	struct apic_reg_virt_expectation *expectation)
5411 {
5412 	/* Good configs, where some L2 APIC accesses are virtualized. */
5413 	bool virtualize_apic_accesses_only =
5414 		config->virtualize_apic_accesses &&
5415 		!config->use_tpr_shadow &&
5416 		!config->apic_register_virtualization &&
5417 		!config->virtualize_x2apic_mode &&
5418 		config->activate_secondary_controls;
5419 	bool virtualize_apic_accesses_and_use_tpr_shadow =
5420 		config->virtualize_apic_accesses &&
5421 		config->use_tpr_shadow &&
5422 		!config->apic_register_virtualization &&
5423 		!config->virtualize_x2apic_mode &&
5424 		config->activate_secondary_controls;
5425 	bool apic_register_virtualization =
5426 		config->virtualize_apic_accesses &&
5427 		config->use_tpr_shadow &&
5428 		config->apic_register_virtualization &&
5429 		!config->virtualize_x2apic_mode &&
5430 		config->activate_secondary_controls;
5431 
5432 	expectation->val = MAGIC_VAL_1;
5433 	expectation->virt_fn = apic_virt_identity;
5434 	expectation->virtualize_apic_accesses =
5435 		config->virtualize_apic_accesses &&
5436 		config->activate_secondary_controls;
5437 	if (virtualize_apic_accesses_only) {
5438 		expectation->rd_exit_reason = VMX_APIC_ACCESS;
5439 		expectation->wr_exit_reason = VMX_APIC_ACCESS;
5440 	} else if (virtualize_apic_accesses_and_use_tpr_shadow) {
5441 		switch (reg) {
5442 		case APIC_TASKPRI:
5443 			expectation->rd_exit_reason = VMX_VMCALL;
5444 			expectation->wr_exit_reason = VMX_VMCALL;
5445 			expectation->virt_fn = apic_virt_nibble1;
5446 			break;
5447 		default:
5448 			expectation->rd_exit_reason = VMX_APIC_ACCESS;
5449 			expectation->wr_exit_reason = VMX_APIC_ACCESS;
5450 		}
5451 	} else if (apic_register_virtualization) {
5452 		expectation->rd_exit_reason = VMX_VMCALL;
5453 
5454 		switch (reg) {
5455 		case APIC_ID:
5456 		case APIC_EOI:
5457 		case APIC_LDR:
5458 		case APIC_DFR:
5459 		case APIC_SPIV:
5460 		case APIC_ESR:
5461 		case APIC_ICR:
5462 		case APIC_LVTT:
5463 		case APIC_LVTTHMR:
5464 		case APIC_LVTPC:
5465 		case APIC_LVT0:
5466 		case APIC_LVT1:
5467 		case APIC_LVTERR:
5468 		case APIC_TMICT:
5469 		case APIC_TDCR:
5470 			expectation->wr_exit_reason = VMX_APIC_WRITE;
5471 			break;
5472 		case APIC_LVR:
5473 		case APIC_ISR ... APIC_ISR + 0x70:
5474 		case APIC_TMR ... APIC_TMR + 0x70:
5475 		case APIC_IRR ... APIC_IRR + 0x70:
5476 			expectation->wr_exit_reason = VMX_APIC_ACCESS;
5477 			break;
5478 		case APIC_TASKPRI:
5479 			expectation->wr_exit_reason = VMX_VMCALL;
5480 			expectation->virt_fn = apic_virt_nibble1;
5481 			break;
5482 		case APIC_ICR2:
5483 			expectation->wr_exit_reason = VMX_VMCALL;
5484 			expectation->virt_fn = apic_virt_byte3;
5485 			break;
5486 		default:
5487 			expectation->rd_exit_reason = VMX_APIC_ACCESS;
5488 			expectation->wr_exit_reason = VMX_APIC_ACCESS;
5489 		}
5490 	} else if (!expectation->virtualize_apic_accesses) {
5491 		/*
5492 		 * No APIC registers are directly virtualized. This includes
5493 		 * VTPR, which can be virtualized through MOV to/from CR8 via
5494 		 * the use TPR shadow control, but not through directly
5495 		 * accessing VTPR.
5496 		 */
5497 		expectation->rd_exit_reason = VMX_VMCALL;
5498 		expectation->wr_exit_reason = VMX_VMCALL;
5499 	} else {
5500 		printf("Cannot parse APIC register virtualization config:\n"
5501 		       "\tvirtualize_apic_accesses: %d\n"
5502 		       "\tuse_tpr_shadow: %d\n"
5503 		       "\tapic_register_virtualization: %d\n"
5504 		       "\tvirtualize_x2apic_mode: %d\n"
5505 		       "\tactivate_secondary_controls: %d\n",
5506 		       config->virtualize_apic_accesses,
5507 		       config->use_tpr_shadow,
5508 		       config->apic_register_virtualization,
5509 		       config->virtualize_x2apic_mode,
5510 		       config->activate_secondary_controls);
5511 
5512 		return false;
5513 	}
5514 
5515 	return true;
5516 }
5517 
5518 struct apic_reg_test apic_reg_tests[] = {
5519 	/* Good configs, where some L2 APIC accesses are virtualized. */
5520 	{
5521 		.name = "Virtualize APIC accesses",
5522 		.apic_reg_virt_config = {
5523 			.virtualize_apic_accesses = true,
5524 			.use_tpr_shadow = false,
5525 			.apic_register_virtualization = false,
5526 			.virtualize_x2apic_mode = false,
5527 			.activate_secondary_controls = true,
5528 		},
5529 	},
5530 	{
5531 		.name = "Virtualize APIC accesses + Use TPR shadow",
5532 		.apic_reg_virt_config = {
5533 			.virtualize_apic_accesses = true,
5534 			.use_tpr_shadow = true,
5535 			.apic_register_virtualization = false,
5536 			.virtualize_x2apic_mode = false,
5537 			.activate_secondary_controls = true,
5538 		},
5539 	},
5540 	{
5541 		.name = "APIC-register virtualization",
5542 		.apic_reg_virt_config = {
5543 			.virtualize_apic_accesses = true,
5544 			.use_tpr_shadow = true,
5545 			.apic_register_virtualization = true,
5546 			.virtualize_x2apic_mode = false,
5547 			.activate_secondary_controls = true,
5548 		},
5549 	},
5550 
5551 	/*
5552 	 * Test that the secondary processor-based VM-execution controls are
5553 	 * correctly ignored when "activate secondary controls" is disabled.
5554 	 */
5555 	{
5556 		.name = "Activate secondary controls off",
5557 		.apic_reg_virt_config = {
5558 			.virtualize_apic_accesses = true,
5559 			.use_tpr_shadow = false,
5560 			.apic_register_virtualization = true,
5561 			.virtualize_x2apic_mode = true,
5562 			.activate_secondary_controls = false,
5563 		},
5564 	},
5565 	{
5566 		.name = "Activate secondary controls off + Use TPR shadow",
5567 		.apic_reg_virt_config = {
5568 			.virtualize_apic_accesses = true,
5569 			.use_tpr_shadow = true,
5570 			.apic_register_virtualization = true,
5571 			.virtualize_x2apic_mode = true,
5572 			.activate_secondary_controls = false,
5573 		},
5574 	},
5575 
5576 	/*
5577 	 * Test that the APIC access address is treated like an arbitrary memory
5578 	 * address when "virtualize APIC accesses" is disabled.
5579 	 */
5580 	{
5581 		.name = "Virtualize APIC accesses off + Use TPR shadow",
5582 		.apic_reg_virt_config = {
5583 			.virtualize_apic_accesses = false,
5584 			.use_tpr_shadow = true,
5585 			.apic_register_virtualization = true,
5586 			.virtualize_x2apic_mode = true,
5587 			.activate_secondary_controls = true,
5588 		},
5589 	},
5590 
5591 	/*
5592 	 * Test that VM entry fails due to invalid controls when
5593 	 * "APIC-register virtualization" is enabled while "use TPR shadow" is
5594 	 * disabled.
5595 	 */
5596 	{
5597 		.name = "APIC-register virtualization + Use TPR shadow off",
5598 		.apic_reg_virt_config = {
5599 			.virtualize_apic_accesses = true,
5600 			.use_tpr_shadow = false,
5601 			.apic_register_virtualization = true,
5602 			.virtualize_x2apic_mode = false,
5603 			.activate_secondary_controls = true,
5604 		},
5605 	},
5606 
5607 	/*
5608 	 * Test that VM entry fails due to invalid controls when
5609 	 * "Virtualize x2APIC mode" is enabled while "use TPR shadow" is
5610 	 * disabled.
5611 	 */
5612 	{
5613 		.name = "Virtualize x2APIC mode + Use TPR shadow off",
5614 		.apic_reg_virt_config = {
5615 			.virtualize_apic_accesses = false,
5616 			.use_tpr_shadow = false,
5617 			.apic_register_virtualization = false,
5618 			.virtualize_x2apic_mode = true,
5619 			.activate_secondary_controls = true,
5620 		},
5621 	},
5622 	{
5623 		.name = "Virtualize x2APIC mode + Use TPR shadow off v2",
5624 		.apic_reg_virt_config = {
5625 			.virtualize_apic_accesses = false,
5626 			.use_tpr_shadow = false,
5627 			.apic_register_virtualization = true,
5628 			.virtualize_x2apic_mode = true,
5629 			.activate_secondary_controls = true,
5630 		},
5631 	},
5632 
5633 	/*
5634 	 * Test that VM entry fails due to invalid controls when
5635 	 * "virtualize x2APIC mode" is enabled while "virtualize APIC accesses"
5636 	 * is enabled.
5637 	 */
5638 	{
5639 		.name = "Virtualize x2APIC mode + Virtualize APIC accesses",
5640 		.apic_reg_virt_config = {
5641 			.virtualize_apic_accesses = true,
5642 			.use_tpr_shadow = true,
5643 			.apic_register_virtualization = false,
5644 			.virtualize_x2apic_mode = true,
5645 			.activate_secondary_controls = true,
5646 		},
5647 	},
5648 	{
5649 		.name = "Virtualize x2APIC mode + Virtualize APIC accesses v2",
5650 		.apic_reg_virt_config = {
5651 			.virtualize_apic_accesses = true,
5652 			.use_tpr_shadow = true,
5653 			.apic_register_virtualization = true,
5654 			.virtualize_x2apic_mode = true,
5655 			.activate_secondary_controls = true,
5656 		},
5657 	},
5658 };
5659 
5660 enum Apic_op {
5661 	APIC_OP_XAPIC_RD,
5662 	APIC_OP_XAPIC_WR,
5663 	TERMINATE,
5664 };
5665 
5666 static u32 vmx_xapic_read(u32 *apic_access_address, u32 reg)
5667 {
5668 	return *(volatile u32 *)((uintptr_t)apic_access_address + reg);
5669 }
5670 
5671 static void vmx_xapic_write(u32 *apic_access_address, u32 reg, u32 val)
5672 {
5673 	*(volatile u32 *)((uintptr_t)apic_access_address + reg) = val;
5674 }
5675 
5676 struct apic_reg_virt_guest_args {
5677 	enum Apic_op op;
5678 	u32 *apic_access_address;
5679 	u32 reg;
5680 	u32 val;
5681 	bool check_rd;
5682 	u32 (*virt_fn)(u32);
5683 } apic_reg_virt_guest_args;
5684 
5685 static void apic_reg_virt_guest(void)
5686 {
5687 	volatile struct apic_reg_virt_guest_args *args =
5688 		&apic_reg_virt_guest_args;
5689 
5690 	for (;;) {
5691 		enum Apic_op op = args->op;
5692 		u32 *apic_access_address = args->apic_access_address;
5693 		u32 reg = args->reg;
5694 		u32 val = args->val;
5695 		bool check_rd = args->check_rd;
5696 		u32 (*virt_fn)(u32) = args->virt_fn;
5697 
5698 		if (op == TERMINATE)
5699 			break;
5700 
5701 		if (op == APIC_OP_XAPIC_RD) {
5702 			u32 ret = vmx_xapic_read(apic_access_address, reg);
5703 
5704 			if (check_rd) {
5705 				u32 want = virt_fn(val);
5706 				u32 got = virt_fn(ret);
5707 
5708 				report(got == want,
5709 				       "read 0x%x, expected 0x%x.", got, want);
5710 			}
5711 		} else if (op == APIC_OP_XAPIC_WR) {
5712 			vmx_xapic_write(apic_access_address, reg, val);
5713 		}
5714 
5715 		/*
5716 		 * The L1 should always execute a vmcall after it's done testing
5717 		 * an individual APIC operation. This helps to validate that the
5718 		 * L1 and L2 are in sync with each other, as expected.
5719 		 */
5720 		vmcall();
5721 	}
5722 }
5723 
5724 static void test_xapic_rd(
5725 	u32 reg, struct apic_reg_virt_expectation *expectation,
5726 	u32 *apic_access_address, u32 *virtual_apic_page)
5727 {
5728 	u32 val = expectation->val;
5729 	u32 exit_reason_want = expectation->rd_exit_reason;
5730 	struct apic_reg_virt_guest_args *args = &apic_reg_virt_guest_args;
5731 
5732 	report_prefix_pushf("xapic - reading 0x%03x", reg);
5733 
5734 	/* Configure guest to do an xapic read */
5735 	args->op = APIC_OP_XAPIC_RD;
5736 	args->apic_access_address = apic_access_address;
5737 	args->reg = reg;
5738 	args->val = val;
5739 	args->check_rd = exit_reason_want == VMX_VMCALL;
5740 	args->virt_fn = expectation->virt_fn;
5741 
5742 	/* Setup virtual APIC page */
5743 	if (!expectation->virtualize_apic_accesses) {
5744 		apic_access_address[apic_reg_index(reg)] = val;
5745 		virtual_apic_page[apic_reg_index(reg)] = 0;
5746 	} else if (exit_reason_want == VMX_VMCALL) {
5747 		apic_access_address[apic_reg_index(reg)] = 0;
5748 		virtual_apic_page[apic_reg_index(reg)] = val;
5749 	}
5750 
5751 	/* Enter guest */
5752 	enter_guest();
5753 
5754 	/*
5755 	 * Validate the behavior and
5756 	 * pass a magic value back to the guest.
5757 	 */
5758 	if (exit_reason_want == VMX_APIC_ACCESS) {
5759 		u32 apic_page_offset = vmcs_read(EXI_QUALIFICATION) & 0xfff;
5760 
5761 		assert_exit_reason(exit_reason_want);
5762 		report(apic_page_offset == reg,
5763 		       "got APIC access exit @ page offset 0x%03x, want 0x%03x",
5764 		       apic_page_offset, reg);
5765 		skip_exit_insn();
5766 
5767 		/* Reenter guest so it can consume/check rcx and exit again. */
5768 		enter_guest();
5769 	} else if (exit_reason_want != VMX_VMCALL) {
5770 		report(false, "Oops, bad exit expectation: %u.",
5771 		       exit_reason_want);
5772 	}
5773 
5774 	skip_exit_vmcall();
5775 	report_prefix_pop();
5776 }
5777 
5778 static void test_xapic_wr(
5779 	u32 reg, struct apic_reg_virt_expectation *expectation,
5780 	u32 *apic_access_address, u32 *virtual_apic_page)
5781 {
5782 	u32 val = expectation->val;
5783 	u32 exit_reason_want = expectation->wr_exit_reason;
5784 	struct apic_reg_virt_guest_args *args = &apic_reg_virt_guest_args;
5785 	bool virtualized =
5786 		expectation->virtualize_apic_accesses &&
5787 		(exit_reason_want == VMX_APIC_WRITE ||
5788 		 exit_reason_want == VMX_VMCALL);
5789 	bool checked = false;
5790 
5791 	report_prefix_pushf("xapic - writing 0x%x to 0x%03x", val, reg);
5792 
5793 	/* Configure guest to do an xapic read */
5794 	args->op = APIC_OP_XAPIC_WR;
5795 	args->apic_access_address = apic_access_address;
5796 	args->reg = reg;
5797 	args->val = val;
5798 
5799 	/* Setup virtual APIC page */
5800 	if (virtualized || !expectation->virtualize_apic_accesses) {
5801 		apic_access_address[apic_reg_index(reg)] = 0;
5802 		virtual_apic_page[apic_reg_index(reg)] = 0;
5803 	}
5804 
5805 	/* Enter guest */
5806 	enter_guest();
5807 
5808 	/*
5809 	 * Validate the behavior and
5810 	 * pass a magic value back to the guest.
5811 	 */
5812 	if (exit_reason_want == VMX_APIC_ACCESS) {
5813 		u32 apic_page_offset = vmcs_read(EXI_QUALIFICATION) & 0xfff;
5814 
5815 		assert_exit_reason(exit_reason_want);
5816 		report(apic_page_offset == reg,
5817 		       "got APIC access exit @ page offset 0x%03x, want 0x%03x",
5818 		       apic_page_offset, reg);
5819 		skip_exit_insn();
5820 
5821 		/* Reenter guest so it can consume/check rcx and exit again. */
5822 		enter_guest();
5823 	} else if (exit_reason_want == VMX_APIC_WRITE) {
5824 		assert_exit_reason(exit_reason_want);
5825 		report(virtual_apic_page[apic_reg_index(reg)] == val,
5826 		       "got APIC write exit @ page offset 0x%03x; val is 0x%x, want 0x%x",
5827 		       apic_reg_index(reg),
5828 		       virtual_apic_page[apic_reg_index(reg)], val);
5829 		checked = true;
5830 
5831 		/* Reenter guest so it can consume/check rcx and exit again. */
5832 		enter_guest();
5833 	} else if (exit_reason_want != VMX_VMCALL) {
5834 		report(false, "Oops, bad exit expectation: %u.",
5835 		       exit_reason_want);
5836 	}
5837 
5838 	assert_exit_reason(VMX_VMCALL);
5839 	if (virtualized && !checked) {
5840 		u32 want = expectation->virt_fn(val);
5841 		u32 got = virtual_apic_page[apic_reg_index(reg)];
5842 		got = expectation->virt_fn(got);
5843 
5844 		report(got == want, "exitless write; val is 0x%x, want 0x%x",
5845 		       got, want);
5846 	} else if (!expectation->virtualize_apic_accesses && !checked) {
5847 		u32 got = apic_access_address[apic_reg_index(reg)];
5848 
5849 		report(got == val,
5850 		       "non-virtualized write; val is 0x%x, want 0x%x", got,
5851 		       val);
5852 	} else if (!expectation->virtualize_apic_accesses && checked) {
5853 		report(false,
5854 		       "Non-virtualized write was prematurely checked!");
5855 	}
5856 
5857 	skip_exit_vmcall();
5858 	report_prefix_pop();
5859 }
5860 
5861 enum Config_type {
5862 	CONFIG_TYPE_GOOD,
5863 	CONFIG_TYPE_UNSUPPORTED,
5864 	CONFIG_TYPE_VMENTRY_FAILS_EARLY,
5865 };
5866 
5867 static enum Config_type configure_apic_reg_virt_test(
5868 	struct apic_reg_virt_config *apic_reg_virt_config)
5869 {
5870 	u32 cpu_exec_ctrl0 = vmcs_read(CPU_EXEC_CTRL0);
5871 	u32 cpu_exec_ctrl1 = vmcs_read(CPU_EXEC_CTRL1);
5872 	/* Configs where L2 entry fails early, due to invalid controls. */
5873 	bool use_tpr_shadow_incorrectly_off =
5874 		!apic_reg_virt_config->use_tpr_shadow &&
5875 		(apic_reg_virt_config->apic_register_virtualization ||
5876 		 apic_reg_virt_config->virtualize_x2apic_mode) &&
5877 		apic_reg_virt_config->activate_secondary_controls;
5878 	bool virtualize_apic_accesses_incorrectly_on =
5879 		apic_reg_virt_config->virtualize_apic_accesses &&
5880 		apic_reg_virt_config->virtualize_x2apic_mode &&
5881 		apic_reg_virt_config->activate_secondary_controls;
5882 	bool vmentry_fails_early =
5883 		use_tpr_shadow_incorrectly_off ||
5884 		virtualize_apic_accesses_incorrectly_on;
5885 
5886 	if (apic_reg_virt_config->activate_secondary_controls) {
5887 		if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY)) {
5888 			printf("VM-execution control \"activate secondary controls\" NOT supported.\n");
5889 			return CONFIG_TYPE_UNSUPPORTED;
5890 		}
5891 		cpu_exec_ctrl0 |= CPU_SECONDARY;
5892 	} else {
5893 		cpu_exec_ctrl0 &= ~CPU_SECONDARY;
5894 	}
5895 
5896 	if (apic_reg_virt_config->virtualize_apic_accesses) {
5897 		if (!(ctrl_cpu_rev[1].clr & CPU_VIRT_APIC_ACCESSES)) {
5898 			printf("VM-execution control \"virtualize APIC accesses\" NOT supported.\n");
5899 			return CONFIG_TYPE_UNSUPPORTED;
5900 		}
5901 		cpu_exec_ctrl1 |= CPU_VIRT_APIC_ACCESSES;
5902 	} else {
5903 		cpu_exec_ctrl1 &= ~CPU_VIRT_APIC_ACCESSES;
5904 	}
5905 
5906 	if (apic_reg_virt_config->use_tpr_shadow) {
5907 		if (!(ctrl_cpu_rev[0].clr & CPU_TPR_SHADOW)) {
5908 			printf("VM-execution control \"use TPR shadow\" NOT supported.\n");
5909 			return CONFIG_TYPE_UNSUPPORTED;
5910 		}
5911 		cpu_exec_ctrl0 |= CPU_TPR_SHADOW;
5912 	} else {
5913 		cpu_exec_ctrl0 &= ~CPU_TPR_SHADOW;
5914 	}
5915 
5916 	if (apic_reg_virt_config->apic_register_virtualization) {
5917 		if (!(ctrl_cpu_rev[1].clr & CPU_APIC_REG_VIRT)) {
5918 			printf("VM-execution control \"APIC-register virtualization\" NOT supported.\n");
5919 			return CONFIG_TYPE_UNSUPPORTED;
5920 		}
5921 		cpu_exec_ctrl1 |= CPU_APIC_REG_VIRT;
5922 	} else {
5923 		cpu_exec_ctrl1 &= ~CPU_APIC_REG_VIRT;
5924 	}
5925 
5926 	if (apic_reg_virt_config->virtualize_x2apic_mode) {
5927 		if (!(ctrl_cpu_rev[1].clr & CPU_VIRT_X2APIC)) {
5928 			printf("VM-execution control \"virtualize x2APIC mode\" NOT supported.\n");
5929 			return CONFIG_TYPE_UNSUPPORTED;
5930 		}
5931 		cpu_exec_ctrl1 |= CPU_VIRT_X2APIC;
5932 	} else {
5933 		cpu_exec_ctrl1 &= ~CPU_VIRT_X2APIC;
5934 	}
5935 
5936 	vmcs_write(CPU_EXEC_CTRL0, cpu_exec_ctrl0);
5937 	vmcs_write(CPU_EXEC_CTRL1, cpu_exec_ctrl1);
5938 
5939 	if (vmentry_fails_early)
5940 		return CONFIG_TYPE_VMENTRY_FAILS_EARLY;
5941 
5942 	return CONFIG_TYPE_GOOD;
5943 }
5944 
5945 static bool cpu_has_apicv(void)
5946 {
5947 	return ((ctrl_cpu_rev[1].clr & CPU_APIC_REG_VIRT) &&
5948 		(ctrl_cpu_rev[1].clr & CPU_VINTD) &&
5949 		(ctrl_pin_rev.clr & PIN_POST_INTR));
5950 }
5951 
5952 /* Validates APIC register access across valid virtualization configurations. */
5953 static void apic_reg_virt_test(void)
5954 {
5955 	u32 *apic_access_address;
5956 	u32 *virtual_apic_page;
5957 	u64 control;
5958 	u64 cpu_exec_ctrl0 = vmcs_read(CPU_EXEC_CTRL0);
5959 	u64 cpu_exec_ctrl1 = vmcs_read(CPU_EXEC_CTRL1);
5960 	int i;
5961 	struct apic_reg_virt_guest_args *args = &apic_reg_virt_guest_args;
5962 
5963 	if (!cpu_has_apicv()) {
5964 		report_skip(__func__);
5965 		return;
5966 	}
5967 
5968 	control = cpu_exec_ctrl1;
5969 	control &= ~CPU_VINTD;
5970 	vmcs_write(CPU_EXEC_CTRL1, control);
5971 
5972 	test_set_guest(apic_reg_virt_guest);
5973 
5974 	/*
5975 	 * From the SDM: The 1-setting of the "virtualize APIC accesses"
5976 	 * VM-execution is guaranteed to apply only if translations to the
5977 	 * APIC-access address use a 4-KByte page.
5978 	 */
5979 	apic_access_address = alloc_page();
5980 	force_4k_page(apic_access_address);
5981 	vmcs_write(APIC_ACCS_ADDR, virt_to_phys(apic_access_address));
5982 
5983 	virtual_apic_page = alloc_page();
5984 	vmcs_write(APIC_VIRT_ADDR, virt_to_phys(virtual_apic_page));
5985 
5986 	for (i = 0; i < ARRAY_SIZE(apic_reg_tests); i++) {
5987 		struct apic_reg_test *apic_reg_test = &apic_reg_tests[i];
5988 		struct apic_reg_virt_config *apic_reg_virt_config =
5989 				&apic_reg_test->apic_reg_virt_config;
5990 		enum Config_type config_type;
5991 		u32 reg;
5992 
5993 		printf("--- %s test ---\n", apic_reg_test->name);
5994 		config_type =
5995 			configure_apic_reg_virt_test(apic_reg_virt_config);
5996 		if (config_type == CONFIG_TYPE_UNSUPPORTED) {
5997 			printf("Skip because of missing features.\n");
5998 			continue;
5999 		}
6000 
6001 		if (config_type == CONFIG_TYPE_VMENTRY_FAILS_EARLY) {
6002 			enter_guest_with_bad_controls();
6003 			continue;
6004 		}
6005 
6006 		for (reg = 0; reg < PAGE_SIZE / sizeof(u32); reg += 0x10) {
6007 			struct apic_reg_virt_expectation expectation = {};
6008 			bool ok;
6009 
6010 			ok = apic_reg_virt_exit_expectation(
6011 				reg, apic_reg_virt_config, &expectation);
6012 			if (!ok) {
6013 				report(false, "Malformed test.");
6014 				break;
6015 			}
6016 
6017 			test_xapic_rd(reg, &expectation, apic_access_address,
6018 				      virtual_apic_page);
6019 			test_xapic_wr(reg, &expectation, apic_access_address,
6020 				      virtual_apic_page);
6021 		}
6022 	}
6023 
6024 	/* Terminate the guest */
6025 	vmcs_write(CPU_EXEC_CTRL0, cpu_exec_ctrl0);
6026 	vmcs_write(CPU_EXEC_CTRL1, cpu_exec_ctrl1);
6027 	args->op = TERMINATE;
6028 	enter_guest();
6029 	assert_exit_reason(VMX_VMCALL);
6030 }
6031 
6032 struct virt_x2apic_mode_config {
6033 	struct apic_reg_virt_config apic_reg_virt_config;
6034 	bool virtual_interrupt_delivery;
6035 	bool use_msr_bitmaps;
6036 	bool disable_x2apic_msr_intercepts;
6037 	bool disable_x2apic;
6038 };
6039 
6040 struct virt_x2apic_mode_test_case {
6041 	const char *name;
6042 	struct virt_x2apic_mode_config virt_x2apic_mode_config;
6043 };
6044 
6045 enum Virt_x2apic_mode_behavior_type {
6046 	X2APIC_ACCESS_VIRTUALIZED,
6047 	X2APIC_ACCESS_PASSED_THROUGH,
6048 	X2APIC_ACCESS_TRIGGERS_GP,
6049 };
6050 
6051 struct virt_x2apic_mode_expectation {
6052 	enum Reason rd_exit_reason;
6053 	enum Reason wr_exit_reason;
6054 
6055 	/*
6056 	 * RDMSR and WRMSR handle 64-bit values. However, except for ICR, all of
6057 	 * the x2APIC registers are 32 bits. Notice:
6058 	 *   1. vmx_x2apic_read() clears the upper 32 bits for 32-bit registers.
6059 	 *   2. vmx_x2apic_write() expects the val arg to be well-formed.
6060 	 */
6061 	u64 rd_val;
6062 	u64 wr_val;
6063 
6064 	/*
6065 	 * Compares input to virtualized output;
6066 	 * 1st arg is pointer to return expected virtualization output.
6067 	 */
6068 	u64 (*virt_fn)(u64);
6069 
6070 	enum Virt_x2apic_mode_behavior_type rd_behavior;
6071 	enum Virt_x2apic_mode_behavior_type wr_behavior;
6072 	bool wr_only;
6073 };
6074 
6075 static u64 virt_x2apic_mode_identity(u64 val)
6076 {
6077 	return val;
6078 }
6079 
6080 static u64 virt_x2apic_mode_nibble1(u64 val)
6081 {
6082 	return val & 0xf0;
6083 }
6084 
6085 static void virt_x2apic_mode_rd_expectation(
6086 	u32 reg, bool virt_x2apic_mode_on, bool disable_x2apic,
6087 	bool apic_register_virtualization, bool virtual_interrupt_delivery,
6088 	struct virt_x2apic_mode_expectation *expectation)
6089 {
6090 	bool readable =
6091 		!x2apic_reg_reserved(reg) &&
6092 		reg != APIC_EOI;
6093 
6094 	expectation->rd_exit_reason = VMX_VMCALL;
6095 	expectation->virt_fn = virt_x2apic_mode_identity;
6096 	if (virt_x2apic_mode_on && apic_register_virtualization) {
6097 		expectation->rd_val = MAGIC_VAL_1;
6098 		if (reg == APIC_PROCPRI && virtual_interrupt_delivery)
6099 			expectation->virt_fn = virt_x2apic_mode_nibble1;
6100 		else if (reg == APIC_TASKPRI)
6101 			expectation->virt_fn = virt_x2apic_mode_nibble1;
6102 		expectation->rd_behavior = X2APIC_ACCESS_VIRTUALIZED;
6103 	} else if (virt_x2apic_mode_on && !apic_register_virtualization &&
6104 		   reg == APIC_TASKPRI) {
6105 		expectation->rd_val = MAGIC_VAL_1;
6106 		expectation->virt_fn = virt_x2apic_mode_nibble1;
6107 		expectation->rd_behavior = X2APIC_ACCESS_VIRTUALIZED;
6108 	} else if (!disable_x2apic && readable) {
6109 		expectation->rd_val = apic_read(reg);
6110 		expectation->rd_behavior = X2APIC_ACCESS_PASSED_THROUGH;
6111 	} else {
6112 		expectation->rd_behavior = X2APIC_ACCESS_TRIGGERS_GP;
6113 	}
6114 }
6115 
6116 /*
6117  * get_x2apic_wr_val() creates an innocuous write value for an x2APIC register.
6118  *
6119  * For writable registers, get_x2apic_wr_val() deposits the write value into the
6120  * val pointer arg and returns true. For non-writable registers, val is not
6121  * modified and get_x2apic_wr_val() returns false.
6122  */
6123 static bool get_x2apic_wr_val(u32 reg, u64 *val)
6124 {
6125 	switch (reg) {
6126 	case APIC_TASKPRI:
6127 		/* Bits 31:8 are reserved. */
6128 		*val &= 0xff;
6129 		break;
6130 	case APIC_EOI:
6131 	case APIC_ESR:
6132 	case APIC_TMICT:
6133 		/*
6134 		 * EOI, ESR: WRMSR of a non-zero value causes #GP(0).
6135 		 * TMICT: A write of 0 to the initial-count register effectively
6136 		 *        stops the local APIC timer, in both one-shot and
6137 		 *        periodic mode.
6138 		 */
6139 		*val = 0;
6140 		break;
6141 	case APIC_SPIV:
6142 	case APIC_LVTT:
6143 	case APIC_LVTTHMR:
6144 	case APIC_LVTPC:
6145 	case APIC_LVT0:
6146 	case APIC_LVT1:
6147 	case APIC_LVTERR:
6148 	case APIC_TDCR:
6149 		/*
6150 		 * To avoid writing a 1 to a reserved bit or causing some other
6151 		 * unintended side effect, read the current value and use it as
6152 		 * the write value.
6153 		 */
6154 		*val = apic_read(reg);
6155 		break;
6156 	case APIC_CMCI:
6157 		if (!apic_lvt_entry_supported(6))
6158 			return false;
6159 		*val = apic_read(reg);
6160 		break;
6161 	case APIC_ICR:
6162 		*val = 0x40000 | 0xf1;
6163 		break;
6164 	case APIC_SELF_IPI:
6165 		/*
6166 		 * With special processing (i.e., virtualize x2APIC mode +
6167 		 * virtual interrupt delivery), writing zero causes an
6168 		 * APIC-write VM exit. We plan to add a test for enabling
6169 		 * "virtual-interrupt delivery" in VMCS12, and that's where we
6170 		 * will test a self IPI with special processing.
6171 		 */
6172 		*val = 0x0;
6173 		break;
6174 	default:
6175 		return false;
6176 	}
6177 
6178 	return true;
6179 }
6180 
6181 static bool special_processing_applies(u32 reg, u64 *val,
6182 				       bool virt_int_delivery)
6183 {
6184 	bool special_processing =
6185 		(reg == APIC_TASKPRI) ||
6186 		(virt_int_delivery &&
6187 		 (reg == APIC_EOI || reg == APIC_SELF_IPI));
6188 
6189 	if (special_processing) {
6190 		TEST_ASSERT(get_x2apic_wr_val(reg, val));
6191 		return true;
6192 	}
6193 
6194 	return false;
6195 }
6196 
6197 static void virt_x2apic_mode_wr_expectation(
6198 	u32 reg, bool virt_x2apic_mode_on, bool disable_x2apic,
6199 	bool virt_int_delivery,
6200 	struct virt_x2apic_mode_expectation *expectation)
6201 {
6202 	expectation->wr_exit_reason = VMX_VMCALL;
6203 	expectation->wr_val = MAGIC_VAL_1;
6204 	expectation->wr_only = false;
6205 
6206 	if (virt_x2apic_mode_on &&
6207 	    special_processing_applies(reg, &expectation->wr_val,
6208 				       virt_int_delivery)) {
6209 		expectation->wr_behavior = X2APIC_ACCESS_VIRTUALIZED;
6210 		if (reg == APIC_SELF_IPI)
6211 			expectation->wr_exit_reason = VMX_APIC_WRITE;
6212 	} else if (!disable_x2apic &&
6213 		   get_x2apic_wr_val(reg, &expectation->wr_val)) {
6214 		expectation->wr_behavior = X2APIC_ACCESS_PASSED_THROUGH;
6215 		if (reg == APIC_EOI || reg == APIC_SELF_IPI)
6216 			expectation->wr_only = true;
6217 		if (reg == APIC_ICR)
6218 			expectation->wr_exit_reason = VMX_EXTINT;
6219 	} else {
6220 		expectation->wr_behavior = X2APIC_ACCESS_TRIGGERS_GP;
6221 		/*
6222 		 * Writing 1 to a reserved bit triggers a #GP.
6223 		 * Thus, set the write value to 0, which seems
6224 		 * the most likely to detect a missed #GP.
6225 		 */
6226 		expectation->wr_val = 0;
6227 	}
6228 }
6229 
6230 static void virt_x2apic_mode_exit_expectation(
6231 	u32 reg, struct virt_x2apic_mode_config *config,
6232 	struct virt_x2apic_mode_expectation *expectation)
6233 {
6234 	struct apic_reg_virt_config *base_config =
6235 		&config->apic_reg_virt_config;
6236 	bool virt_x2apic_mode_on =
6237 		base_config->virtualize_x2apic_mode &&
6238 		config->use_msr_bitmaps &&
6239 		config->disable_x2apic_msr_intercepts &&
6240 		base_config->activate_secondary_controls;
6241 
6242 	virt_x2apic_mode_wr_expectation(
6243 		reg, virt_x2apic_mode_on, config->disable_x2apic,
6244 		config->virtual_interrupt_delivery, expectation);
6245 	virt_x2apic_mode_rd_expectation(
6246 		reg, virt_x2apic_mode_on, config->disable_x2apic,
6247 		base_config->apic_register_virtualization,
6248 		config->virtual_interrupt_delivery, expectation);
6249 }
6250 
6251 struct virt_x2apic_mode_test_case virt_x2apic_mode_tests[] = {
6252 	/*
6253 	 * Baseline "virtualize x2APIC mode" configuration:
6254 	 *   - virtualize x2APIC mode
6255 	 *   - virtual-interrupt delivery
6256 	 *   - APIC-register virtualization
6257 	 *   - x2APIC MSR intercepts disabled
6258 	 *
6259 	 * Reads come from virtual APIC page, special processing applies to
6260 	 * VTPR, EOI, and SELF IPI, and all other writes pass through to L1
6261 	 * APIC.
6262 	 */
6263 	{
6264 		.name = "Baseline",
6265 		.virt_x2apic_mode_config = {
6266 			.virtual_interrupt_delivery = true,
6267 			.use_msr_bitmaps = true,
6268 			.disable_x2apic_msr_intercepts = true,
6269 			.disable_x2apic = false,
6270 			.apic_reg_virt_config = {
6271 				.apic_register_virtualization = true,
6272 				.use_tpr_shadow = true,
6273 				.virtualize_apic_accesses = false,
6274 				.virtualize_x2apic_mode = true,
6275 				.activate_secondary_controls = true,
6276 			},
6277 		},
6278 	},
6279 	{
6280 		.name = "Baseline w/ x2apic disabled",
6281 		.virt_x2apic_mode_config = {
6282 			.virtual_interrupt_delivery = true,
6283 			.use_msr_bitmaps = true,
6284 			.disable_x2apic_msr_intercepts = true,
6285 			.disable_x2apic = true,
6286 			.apic_reg_virt_config = {
6287 				.apic_register_virtualization = true,
6288 				.use_tpr_shadow = true,
6289 				.virtualize_apic_accesses = false,
6290 				.virtualize_x2apic_mode = true,
6291 				.activate_secondary_controls = true,
6292 			},
6293 		},
6294 	},
6295 
6296 	/*
6297 	 * Baseline, minus virtual-interrupt delivery. Reads come from virtual
6298 	 * APIC page, special processing applies to VTPR, and all other writes
6299 	 * pass through to L1 APIC.
6300 	 */
6301 	{
6302 		.name = "Baseline - virtual interrupt delivery",
6303 		.virt_x2apic_mode_config = {
6304 			.virtual_interrupt_delivery = false,
6305 			.use_msr_bitmaps = true,
6306 			.disable_x2apic_msr_intercepts = true,
6307 			.disable_x2apic = false,
6308 			.apic_reg_virt_config = {
6309 				.apic_register_virtualization = true,
6310 				.use_tpr_shadow = true,
6311 				.virtualize_apic_accesses = false,
6312 				.virtualize_x2apic_mode = true,
6313 				.activate_secondary_controls = true,
6314 			},
6315 		},
6316 	},
6317 
6318 	/*
6319 	 * Baseline, minus APIC-register virtualization. x2APIC reads pass
6320 	 * through to L1's APIC, unless reading VTPR
6321 	 */
6322 	{
6323 		.name = "Virtualize x2APIC mode, no APIC reg virt",
6324 		.virt_x2apic_mode_config = {
6325 			.virtual_interrupt_delivery = true,
6326 			.use_msr_bitmaps = true,
6327 			.disable_x2apic_msr_intercepts = true,
6328 			.disable_x2apic = false,
6329 			.apic_reg_virt_config = {
6330 				.apic_register_virtualization = false,
6331 				.use_tpr_shadow = true,
6332 				.virtualize_apic_accesses = false,
6333 				.virtualize_x2apic_mode = true,
6334 				.activate_secondary_controls = true,
6335 			},
6336 		},
6337 	},
6338 	{
6339 		.name = "Virtualize x2APIC mode, no APIC reg virt, x2APIC off",
6340 		.virt_x2apic_mode_config = {
6341 			.virtual_interrupt_delivery = true,
6342 			.use_msr_bitmaps = true,
6343 			.disable_x2apic_msr_intercepts = true,
6344 			.disable_x2apic = true,
6345 			.apic_reg_virt_config = {
6346 				.apic_register_virtualization = false,
6347 				.use_tpr_shadow = true,
6348 				.virtualize_apic_accesses = false,
6349 				.virtualize_x2apic_mode = true,
6350 				.activate_secondary_controls = true,
6351 			},
6352 		},
6353 	},
6354 
6355 	/*
6356 	 * Enable "virtualize x2APIC mode" and "APIC-register virtualization",
6357 	 * and disable intercepts for the x2APIC MSRs, but fail to enable
6358 	 * "activate secondary controls" (i.e. L2 gets access to L1's x2APIC
6359 	 * MSRs).
6360 	 */
6361 	{
6362 		.name = "Fail to enable activate secondary controls",
6363 		.virt_x2apic_mode_config = {
6364 			.virtual_interrupt_delivery = true,
6365 			.use_msr_bitmaps = true,
6366 			.disable_x2apic_msr_intercepts = true,
6367 			.disable_x2apic = false,
6368 			.apic_reg_virt_config = {
6369 				.apic_register_virtualization = true,
6370 				.use_tpr_shadow = true,
6371 				.virtualize_apic_accesses = false,
6372 				.virtualize_x2apic_mode = true,
6373 				.activate_secondary_controls = false,
6374 			},
6375 		},
6376 	},
6377 
6378 	/*
6379 	 * Enable "APIC-register virtualization" and enable "activate secondary
6380 	 * controls" and disable intercepts for the x2APIC MSRs, but do not
6381 	 * enable the "virtualize x2APIC mode" VM-execution control (i.e. L2
6382 	 * gets access to L1's x2APIC MSRs).
6383 	 */
6384 	{
6385 		.name = "Fail to enable virtualize x2APIC mode",
6386 		.virt_x2apic_mode_config = {
6387 			.virtual_interrupt_delivery = true,
6388 			.use_msr_bitmaps = true,
6389 			.disable_x2apic_msr_intercepts = true,
6390 			.disable_x2apic = false,
6391 			.apic_reg_virt_config = {
6392 				.apic_register_virtualization = true,
6393 				.use_tpr_shadow = true,
6394 				.virtualize_apic_accesses = false,
6395 				.virtualize_x2apic_mode = false,
6396 				.activate_secondary_controls = true,
6397 			},
6398 		},
6399 	},
6400 
6401 	/*
6402 	 * Disable "Virtualize x2APIC mode", disable x2APIC MSR intercepts, and
6403 	 * enable "APIC-register virtualization" --> L2 gets L1's x2APIC MSRs.
6404 	 */
6405 	{
6406 		.name = "Baseline",
6407 		.virt_x2apic_mode_config = {
6408 			.virtual_interrupt_delivery = true,
6409 			.use_msr_bitmaps = true,
6410 			.disable_x2apic_msr_intercepts = true,
6411 			.disable_x2apic = false,
6412 			.apic_reg_virt_config = {
6413 				.apic_register_virtualization = true,
6414 				.use_tpr_shadow = true,
6415 				.virtualize_apic_accesses = false,
6416 				.virtualize_x2apic_mode = false,
6417 				.activate_secondary_controls = true,
6418 			},
6419 		},
6420 	},
6421 };
6422 
6423 enum X2apic_op {
6424 	X2APIC_OP_RD,
6425 	X2APIC_OP_WR,
6426 	X2APIC_TERMINATE,
6427 };
6428 
6429 static u64 vmx_x2apic_read(u32 reg)
6430 {
6431 	u32 msr_addr = x2apic_msr(reg);
6432 	u64 val;
6433 
6434 	val = rdmsr(msr_addr);
6435 
6436 	return val;
6437 }
6438 
6439 static void vmx_x2apic_write(u32 reg, u64 val)
6440 {
6441 	u32 msr_addr = x2apic_msr(reg);
6442 
6443 	wrmsr(msr_addr, val);
6444 }
6445 
6446 struct virt_x2apic_mode_guest_args {
6447 	enum X2apic_op op;
6448 	u32 reg;
6449 	u64 val;
6450 	bool should_gp;
6451 	u64 (*virt_fn)(u64);
6452 } virt_x2apic_mode_guest_args;
6453 
6454 static volatile bool handle_x2apic_gp_ran;
6455 static volatile u32 handle_x2apic_gp_insn_len;
6456 static void handle_x2apic_gp(struct ex_regs *regs)
6457 {
6458 	handle_x2apic_gp_ran = true;
6459 	regs->rip += handle_x2apic_gp_insn_len;
6460 }
6461 
6462 static handler setup_x2apic_gp_handler(void)
6463 {
6464 	handler old_handler;
6465 
6466 	old_handler = handle_exception(GP_VECTOR, handle_x2apic_gp);
6467 	/* RDMSR and WRMSR are both 2 bytes, assuming no prefixes. */
6468 	handle_x2apic_gp_insn_len = 2;
6469 
6470 	return old_handler;
6471 }
6472 
6473 static void teardown_x2apic_gp_handler(handler old_handler)
6474 {
6475 	handle_exception(GP_VECTOR, old_handler);
6476 
6477 	/*
6478 	 * Defensively reset instruction length, so that if the handler is
6479 	 * incorrectly used, it will loop infinitely, rather than run off into
6480 	 * la la land.
6481 	 */
6482 	handle_x2apic_gp_insn_len = 0;
6483 	handle_x2apic_gp_ran = false;
6484 }
6485 
6486 static void virt_x2apic_mode_guest(void)
6487 {
6488 	volatile struct virt_x2apic_mode_guest_args *args =
6489 		&virt_x2apic_mode_guest_args;
6490 
6491 	for (;;) {
6492 		enum X2apic_op op = args->op;
6493 		u32 reg = args->reg;
6494 		u64 val = args->val;
6495 		bool should_gp = args->should_gp;
6496 		u64 (*virt_fn)(u64) = args->virt_fn;
6497 		handler old_handler;
6498 
6499 		if (op == X2APIC_TERMINATE)
6500 			break;
6501 
6502 		if (should_gp) {
6503 			TEST_ASSERT(!handle_x2apic_gp_ran);
6504 			old_handler = setup_x2apic_gp_handler();
6505 		}
6506 
6507 		if (op == X2APIC_OP_RD) {
6508 			u64 ret = vmx_x2apic_read(reg);
6509 
6510 			if (!should_gp) {
6511 				u64 want = virt_fn(val);
6512 				u64 got = virt_fn(ret);
6513 
6514 				report(got == want,
6515 				       "APIC read; got 0x%lx, want 0x%lx.",
6516 				       got, want);
6517 			}
6518 		} else if (op == X2APIC_OP_WR) {
6519 			vmx_x2apic_write(reg, val);
6520 		}
6521 
6522 		if (should_gp) {
6523 			report(handle_x2apic_gp_ran,
6524 			       "x2APIC op triggered GP.");
6525 			teardown_x2apic_gp_handler(old_handler);
6526 		}
6527 
6528 		/*
6529 		 * The L1 should always execute a vmcall after it's done testing
6530 		 * an individual APIC operation. This helps to validate that the
6531 		 * L1 and L2 are in sync with each other, as expected.
6532 		 */
6533 		vmcall();
6534 	}
6535 }
6536 
6537 static void test_x2apic_rd(
6538 	u32 reg, struct virt_x2apic_mode_expectation *expectation,
6539 	u32 *virtual_apic_page)
6540 {
6541 	u64 val = expectation->rd_val;
6542 	u32 exit_reason_want = expectation->rd_exit_reason;
6543 	struct virt_x2apic_mode_guest_args *args = &virt_x2apic_mode_guest_args;
6544 
6545 	report_prefix_pushf("x2apic - reading 0x%03x", reg);
6546 
6547 	/* Configure guest to do an x2apic read */
6548 	args->op = X2APIC_OP_RD;
6549 	args->reg = reg;
6550 	args->val = val;
6551 	args->should_gp = expectation->rd_behavior == X2APIC_ACCESS_TRIGGERS_GP;
6552 	args->virt_fn = expectation->virt_fn;
6553 
6554 	/* Setup virtual APIC page */
6555 	if (expectation->rd_behavior == X2APIC_ACCESS_VIRTUALIZED)
6556 		virtual_apic_page[apic_reg_index(reg)] = (u32)val;
6557 
6558 	/* Enter guest */
6559 	enter_guest();
6560 
6561 	if (exit_reason_want != VMX_VMCALL) {
6562 		report(false, "Oops, bad exit expectation: %u.",
6563 		       exit_reason_want);
6564 	}
6565 
6566 	skip_exit_vmcall();
6567 	report_prefix_pop();
6568 }
6569 
6570 static volatile bool handle_x2apic_ipi_ran;
6571 static void handle_x2apic_ipi(isr_regs_t *regs)
6572 {
6573 	handle_x2apic_ipi_ran = true;
6574 	eoi();
6575 }
6576 
6577 static void test_x2apic_wr(
6578 	u32 reg, struct virt_x2apic_mode_expectation *expectation,
6579 	u32 *virtual_apic_page)
6580 {
6581 	u64 val = expectation->wr_val;
6582 	u32 exit_reason_want = expectation->wr_exit_reason;
6583 	struct virt_x2apic_mode_guest_args *args = &virt_x2apic_mode_guest_args;
6584 	int ipi_vector = 0xf1;
6585 	u32 restore_val = 0;
6586 
6587 	report_prefix_pushf("x2apic - writing 0x%lx to 0x%03x", val, reg);
6588 
6589 	/* Configure guest to do an x2apic read */
6590 	args->op = X2APIC_OP_WR;
6591 	args->reg = reg;
6592 	args->val = val;
6593 	args->should_gp = expectation->wr_behavior == X2APIC_ACCESS_TRIGGERS_GP;
6594 
6595 	/* Setup virtual APIC page */
6596 	if (expectation->wr_behavior == X2APIC_ACCESS_VIRTUALIZED)
6597 		virtual_apic_page[apic_reg_index(reg)] = 0;
6598 	if (expectation->wr_behavior == X2APIC_ACCESS_PASSED_THROUGH && !expectation->wr_only)
6599 		restore_val = apic_read(reg);
6600 
6601 	/* Setup IPI handler */
6602 	handle_x2apic_ipi_ran = false;
6603 	handle_irq(ipi_vector, handle_x2apic_ipi);
6604 
6605 	/* Enter guest */
6606 	enter_guest();
6607 
6608 	/*
6609 	 * Validate the behavior and
6610 	 * pass a magic value back to the guest.
6611 	 */
6612 	if (exit_reason_want == VMX_EXTINT) {
6613 		assert_exit_reason(exit_reason_want);
6614 
6615 		/* Clear the external interrupt. */
6616 		irq_enable();
6617 		asm volatile ("nop");
6618 		irq_disable();
6619 		report(handle_x2apic_ipi_ran,
6620 		       "Got pending interrupt after IRQ enabled.");
6621 
6622 		enter_guest();
6623 	} else if (exit_reason_want == VMX_APIC_WRITE) {
6624 		assert_exit_reason(exit_reason_want);
6625 		report(virtual_apic_page[apic_reg_index(reg)] == val,
6626 		       "got APIC write exit @ page offset 0x%03x; val is 0x%x, want 0x%lx",
6627 		       apic_reg_index(reg),
6628 		       virtual_apic_page[apic_reg_index(reg)], val);
6629 
6630 		/* Reenter guest so it can consume/check rcx and exit again. */
6631 		enter_guest();
6632 	} else if (exit_reason_want != VMX_VMCALL) {
6633 		report(false, "Oops, bad exit expectation: %u.",
6634 		       exit_reason_want);
6635 	}
6636 
6637 	assert_exit_reason(VMX_VMCALL);
6638 	if (expectation->wr_behavior == X2APIC_ACCESS_VIRTUALIZED) {
6639 		u64 want = val;
6640 		u32 got = virtual_apic_page[apic_reg_index(reg)];
6641 
6642 		report(got == want, "x2APIC write; got 0x%x, want 0x%lx", got,
6643 		       want);
6644 	} else if (expectation->wr_behavior == X2APIC_ACCESS_PASSED_THROUGH) {
6645 		if (!expectation->wr_only) {
6646 			u32 got = apic_read(reg);
6647 			bool ok;
6648 
6649 			/*
6650 			 * When L1's TPR is passed through to L2, the lower
6651 			 * nibble can be lost. For example, if L2 executes
6652 			 * WRMSR(0x808, 0x78), then, L1 might read 0x70.
6653 			 *
6654 			 * Here's how the lower nibble can get lost:
6655 			 *   1. L2 executes WRMSR(0x808, 0x78).
6656 			 *   2. L2 exits to L0 with a WRMSR exit.
6657 			 *   3. L0 emulates WRMSR, by writing L1's TPR.
6658 			 *   4. L0 re-enters L2.
6659 			 *   5. L2 exits to L0 (reason doesn't matter).
6660 			 *   6. L0 reflects L2's exit to L1.
6661 			 *   7. Before entering L1, L0 exits to user-space
6662 			 *      (e.g., to satisfy TPR access reporting).
6663 			 *   8. User-space executes KVM_SET_REGS ioctl, which
6664 			 *      clears the lower nibble of L1's TPR.
6665 			 */
6666 			if (reg == APIC_TASKPRI) {
6667 				got = apic_virt_nibble1(got);
6668 				val = apic_virt_nibble1(val);
6669 			}
6670 
6671 			ok = got == val;
6672 			report(ok,
6673 			       "non-virtualized write; val is 0x%x, want 0x%lx",
6674 			       got, val);
6675 			apic_write(reg, restore_val);
6676 		} else {
6677 			report(true, "non-virtualized and write-only OK");
6678 		}
6679 	}
6680 	skip_exit_insn();
6681 
6682 	report_prefix_pop();
6683 }
6684 
6685 static enum Config_type configure_virt_x2apic_mode_test(
6686 	struct virt_x2apic_mode_config *virt_x2apic_mode_config,
6687 	u8 *msr_bitmap_page)
6688 {
6689 	int msr;
6690 	u32 cpu_exec_ctrl0 = vmcs_read(CPU_EXEC_CTRL0);
6691 	u64 cpu_exec_ctrl1 = vmcs_read(CPU_EXEC_CTRL1);
6692 
6693 	/* x2apic-specific VMCS config */
6694 	if (virt_x2apic_mode_config->use_msr_bitmaps) {
6695 		/* virt_x2apic_mode_test() checks for MSR bitmaps support */
6696 		cpu_exec_ctrl0 |= CPU_MSR_BITMAP;
6697 	} else {
6698 		cpu_exec_ctrl0 &= ~CPU_MSR_BITMAP;
6699 	}
6700 
6701 	if (virt_x2apic_mode_config->virtual_interrupt_delivery) {
6702 		if (!(ctrl_cpu_rev[1].clr & CPU_VINTD)) {
6703 			report_skip("VM-execution control \"virtual-interrupt delivery\" NOT supported.\n");
6704 			return CONFIG_TYPE_UNSUPPORTED;
6705 		}
6706 		cpu_exec_ctrl1 |= CPU_VINTD;
6707 	} else {
6708 		cpu_exec_ctrl1 &= ~CPU_VINTD;
6709 	}
6710 
6711 	vmcs_write(CPU_EXEC_CTRL0, cpu_exec_ctrl0);
6712 	vmcs_write(CPU_EXEC_CTRL1, cpu_exec_ctrl1);
6713 
6714 	/* x2APIC MSR intercepts are usually off for "Virtualize x2APIC mode" */
6715 	for (msr = 0x800; msr <= 0x8ff; msr++) {
6716 		if (virt_x2apic_mode_config->disable_x2apic_msr_intercepts) {
6717 			clear_bit(msr, msr_bitmap_page + 0x000);
6718 			clear_bit(msr, msr_bitmap_page + 0x800);
6719 		} else {
6720 			set_bit(msr, msr_bitmap_page + 0x000);
6721 			set_bit(msr, msr_bitmap_page + 0x800);
6722 		}
6723 	}
6724 
6725 	/* x2APIC mode can impact virtualization */
6726 	reset_apic();
6727 	if (!virt_x2apic_mode_config->disable_x2apic)
6728 		enable_x2apic();
6729 
6730 	return configure_apic_reg_virt_test(
6731 		&virt_x2apic_mode_config->apic_reg_virt_config);
6732 }
6733 
6734 static void virt_x2apic_mode_test(void)
6735 {
6736 	u32 *virtual_apic_page;
6737 	u8 *msr_bitmap_page;
6738 	u64 cpu_exec_ctrl0 = vmcs_read(CPU_EXEC_CTRL0);
6739 	u64 cpu_exec_ctrl1 = vmcs_read(CPU_EXEC_CTRL1);
6740 	int i;
6741 	struct virt_x2apic_mode_guest_args *args = &virt_x2apic_mode_guest_args;
6742 
6743 	if (!cpu_has_apicv()) {
6744 		report_skip(__func__);
6745 		return;
6746 	}
6747 
6748 	/*
6749 	 * This is to exercise an issue in KVM's logic to merge L0's and L1's
6750 	 * MSR bitmaps. Previously, an L1 could get at L0's x2APIC MSRs by
6751 	 * writing the IA32_SPEC_CTRL MSR or the IA32_PRED_CMD MSRs. KVM would
6752 	 * then proceed to manipulate the MSR bitmaps, as if VMCS12 had the
6753 	 * "Virtualize x2APIC mod" control set, even when it didn't.
6754 	 */
6755 	if (has_spec_ctrl())
6756 		wrmsr(MSR_IA32_SPEC_CTRL, 1);
6757 
6758 	/*
6759 	 * Check that VMCS12 supports:
6760 	 *   - "Virtual-APIC address", indicated by "use TPR shadow"
6761 	 *   - "MSR-bitmap address", indicated by "use MSR bitmaps"
6762 	 */
6763 	if (!(ctrl_cpu_rev[0].clr & CPU_TPR_SHADOW)) {
6764 		report_skip("VM-execution control \"use TPR shadow\" NOT supported.\n");
6765 		return;
6766 	} else if (!(ctrl_cpu_rev[0].clr & CPU_MSR_BITMAP)) {
6767 		report_skip("VM-execution control \"use MSR bitmaps\" NOT supported.\n");
6768 		return;
6769 	}
6770 
6771 	test_set_guest(virt_x2apic_mode_guest);
6772 
6773 	virtual_apic_page = alloc_page();
6774 	vmcs_write(APIC_VIRT_ADDR, virt_to_phys(virtual_apic_page));
6775 
6776 	msr_bitmap_page = alloc_page();
6777 	memset(msr_bitmap_page, 0xff, PAGE_SIZE);
6778 	vmcs_write(MSR_BITMAP, virt_to_phys(msr_bitmap_page));
6779 
6780 	for (i = 0; i < ARRAY_SIZE(virt_x2apic_mode_tests); i++) {
6781 		struct virt_x2apic_mode_test_case *virt_x2apic_mode_test_case =
6782 			&virt_x2apic_mode_tests[i];
6783 		struct virt_x2apic_mode_config *virt_x2apic_mode_config =
6784 			&virt_x2apic_mode_test_case->virt_x2apic_mode_config;
6785 		enum Config_type config_type;
6786 		u32 reg;
6787 
6788 		printf("--- %s test ---\n", virt_x2apic_mode_test_case->name);
6789 		config_type =
6790 			configure_virt_x2apic_mode_test(virt_x2apic_mode_config,
6791 							msr_bitmap_page);
6792 		if (config_type == CONFIG_TYPE_UNSUPPORTED) {
6793 			report_skip("Skip because of missing features.\n");
6794 			continue;
6795 		} else if (config_type == CONFIG_TYPE_VMENTRY_FAILS_EARLY) {
6796 			enter_guest_with_bad_controls();
6797 			continue;
6798 		}
6799 
6800 		for (reg = 0; reg < PAGE_SIZE / sizeof(u32); reg += 0x10) {
6801 			struct virt_x2apic_mode_expectation expectation;
6802 
6803 			virt_x2apic_mode_exit_expectation(
6804 				reg, virt_x2apic_mode_config, &expectation);
6805 
6806 			test_x2apic_rd(reg, &expectation, virtual_apic_page);
6807 			test_x2apic_wr(reg, &expectation, virtual_apic_page);
6808 		}
6809 	}
6810 
6811 
6812 	/* Terminate the guest */
6813 	vmcs_write(CPU_EXEC_CTRL0, cpu_exec_ctrl0);
6814 	vmcs_write(CPU_EXEC_CTRL1, cpu_exec_ctrl1);
6815 	args->op = X2APIC_TERMINATE;
6816 	enter_guest();
6817 	assert_exit_reason(VMX_VMCALL);
6818 }
6819 
6820 static void test_ctl_reg(const char *cr_name, u64 cr, u64 fixed0, u64 fixed1)
6821 {
6822 	u64 val;
6823 	u64 cr_saved = vmcs_read(cr);
6824 	int i;
6825 
6826 	val = fixed0 & fixed1;
6827 	if (cr == HOST_CR4)
6828 		vmcs_write(cr, val | X86_CR4_PAE);
6829 	else
6830 		vmcs_write(cr, val);
6831 	report_prefix_pushf("%s %lx", cr_name, val);
6832 	if (val == fixed0)
6833 		test_vmx_vmlaunch(0);
6834 	else
6835 		test_vmx_vmlaunch(VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
6836 	report_prefix_pop();
6837 
6838 	for (i = 0; i < 64; i++) {
6839 
6840 		/* Set a bit when the corresponding bit in fixed1 is 0 */
6841 		if ((fixed1 & (1ull << i)) == 0) {
6842 			if (cr == HOST_CR4 && ((1ull << i) & X86_CR4_SMEP ||
6843 					       (1ull << i) & X86_CR4_SMAP))
6844 				continue;
6845 
6846 			vmcs_write(cr, cr_saved | (1ull << i));
6847 			report_prefix_pushf("%s %llx", cr_name,
6848 						cr_saved | (1ull << i));
6849 			test_vmx_vmlaunch(
6850 				VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
6851 			report_prefix_pop();
6852 		}
6853 
6854 		/* Unset a bit when the corresponding bit in fixed0 is 1 */
6855 		if (fixed0 & (1ull << i)) {
6856 			vmcs_write(cr, cr_saved & ~(1ull << i));
6857 			report_prefix_pushf("%s %llx", cr_name,
6858 						cr_saved & ~(1ull << i));
6859 			test_vmx_vmlaunch(
6860 				VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
6861 			report_prefix_pop();
6862 		}
6863 	}
6864 
6865 	vmcs_write(cr, cr_saved);
6866 }
6867 
6868 /*
6869  * 1. The CR0 field must not set any bit to a value not supported in VMX
6870  *    operation.
6871  * 2. The CR4 field must not set any bit to a value not supported in VMX
6872  *    operation.
6873  * 3. On processors that support Intel 64 architecture, the CR3 field must
6874  *    be such that bits 63:52 and bits in the range 51:32 beyond the
6875  *    processor’s physical-address width must be 0.
6876  *
6877  *  [Intel SDM]
6878  */
6879 static void test_host_ctl_regs(void)
6880 {
6881 	u64 fixed0, fixed1, cr3, cr3_saved;
6882 	int i;
6883 
6884 	/* Test CR0 */
6885 	fixed0 = rdmsr(MSR_IA32_VMX_CR0_FIXED0);
6886 	fixed1 = rdmsr(MSR_IA32_VMX_CR0_FIXED1);
6887 	test_ctl_reg("HOST_CR0", HOST_CR0, fixed0, fixed1);
6888 
6889 	/* Test CR4 */
6890 	fixed0 = rdmsr(MSR_IA32_VMX_CR4_FIXED0);
6891 	fixed1 = rdmsr(MSR_IA32_VMX_CR4_FIXED1) &
6892 		 ~(X86_CR4_SMEP | X86_CR4_SMAP);
6893 	test_ctl_reg("HOST_CR4", HOST_CR4, fixed0, fixed1);
6894 
6895 	/* Test CR3 */
6896 	cr3_saved = vmcs_read(HOST_CR3);
6897 	for (i = cpuid_maxphyaddr(); i < 64; i++) {
6898 		cr3 = cr3_saved | (1ul << i);
6899 		vmcs_write(HOST_CR3, cr3);
6900 		report_prefix_pushf("HOST_CR3 %lx", cr3);
6901 		test_vmx_vmlaunch(VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
6902 		report_prefix_pop();
6903 	}
6904 
6905 	vmcs_write(HOST_CR3, cr3_saved);
6906 }
6907 
6908 static void test_efer_vmlaunch(u32 fld, bool ok)
6909 {
6910 	if (fld == HOST_EFER) {
6911 		if (ok)
6912 			test_vmx_vmlaunch(0);
6913 		else
6914 			test_vmx_vmlaunch(VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
6915 	} else {
6916 		if (ok) {
6917 			enter_guest();
6918 			report(vmcs_read(EXI_REASON) == VMX_VMCALL,
6919 			       "vmlaunch succeeds");
6920 		} else {
6921 			enter_guest_with_invalid_guest_state();
6922 			report(vmcs_read(EXI_REASON) == (VMX_ENTRY_FAILURE | VMX_FAIL_STATE),
6923 			       "vmlaunch fails");
6924 		}
6925 		advance_guest_state_test();
6926 	}
6927 }
6928 
6929 static void test_efer_one(u32 fld, const char * fld_name, u64 efer,
6930 			  u32 ctrl_fld, u64 ctrl,
6931 			  int i, const char *efer_bit_name)
6932 {
6933 	bool ok;
6934 
6935 	ok = true;
6936 	if (ctrl_fld == EXI_CONTROLS && (ctrl & EXI_LOAD_EFER)) {
6937 		if (!!(efer & EFER_LMA) != !!(ctrl & EXI_HOST_64))
6938 			ok = false;
6939 		if (!!(efer & EFER_LME) != !!(ctrl & EXI_HOST_64))
6940 			ok = false;
6941 	}
6942 	if (ctrl_fld == ENT_CONTROLS && (ctrl & ENT_LOAD_EFER)) {
6943 		/* Check LMA too since CR0.PG is set.  */
6944 		if (!!(efer & EFER_LMA) != !!(ctrl & ENT_GUEST_64))
6945 			ok = false;
6946 		if (!!(efer & EFER_LME) != !!(ctrl & ENT_GUEST_64))
6947 			ok = false;
6948 	}
6949 
6950 	/*
6951 	 * Skip the test if it would enter the guest in 32-bit mode.
6952 	 * Perhaps write the test in assembly and make sure it
6953 	 * can be run in either mode?
6954 	 */
6955 	if (fld == GUEST_EFER && ok && !(ctrl & ENT_GUEST_64))
6956 		return;
6957 
6958 	vmcs_write(ctrl_fld, ctrl);
6959 	vmcs_write(fld, efer);
6960 	report_prefix_pushf("%s %s bit turned %s, controls %s",
6961 			    fld_name, efer_bit_name,
6962 			    (i & 1) ? "on" : "off",
6963 			    (i & 2) ? "on" : "off");
6964 
6965 	test_efer_vmlaunch(fld, ok);
6966 	report_prefix_pop();
6967 }
6968 
6969 static void test_efer_bit(u32 fld, const char * fld_name,
6970 			  u32 ctrl_fld, u64 ctrl_bit, u64 efer_bit,
6971 			  const char *efer_bit_name)
6972 {
6973 	u64 efer_saved = vmcs_read(fld);
6974 	u32 ctrl_saved = vmcs_read(ctrl_fld);
6975 	int i;
6976 
6977 	for (i = 0; i < 4; i++) {
6978 		u64 efer = efer_saved & ~efer_bit;
6979 		u64 ctrl = ctrl_saved & ~ctrl_bit;
6980 
6981 		if (i & 1)
6982 			efer |= efer_bit;
6983 		if (i & 2)
6984 			ctrl |= ctrl_bit;
6985 
6986 		test_efer_one(fld, fld_name, efer, ctrl_fld, ctrl,
6987 			      i, efer_bit_name);
6988 	}
6989 
6990 	vmcs_write(ctrl_fld, ctrl_saved);
6991 	vmcs_write(fld, efer_saved);
6992 }
6993 
6994 static void test_efer(u32 fld, const char * fld_name, u32 ctrl_fld,
6995 		      u64 ctrl_bit1, u64 ctrl_bit2)
6996 {
6997 	u64 efer_saved = vmcs_read(fld);
6998 	u32 ctrl_saved = vmcs_read(ctrl_fld);
6999 	u64 efer_reserved_bits =  ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
7000 	u64 i;
7001 	u64 efer;
7002 
7003 	if (cpu_has_efer_nx())
7004 		efer_reserved_bits &= ~EFER_NX;
7005 
7006 	if (!ctrl_bit1) {
7007 		printf("\"Load-IA32-EFER\" exit control not supported\n");
7008 		goto test_entry_exit_mode;
7009 	}
7010 
7011 	report_prefix_pushf("%s %lx", fld_name, efer_saved);
7012 	test_efer_vmlaunch(fld, true);
7013 	report_prefix_pop();
7014 
7015 	/*
7016 	 * Check reserved bits
7017 	 */
7018 	vmcs_write(ctrl_fld, ctrl_saved & ~ctrl_bit1);
7019 	for (i = 0; i < 64; i++) {
7020 		if ((1ull << i) & efer_reserved_bits) {
7021 			efer = efer_saved | (1ull << i);
7022 			vmcs_write(fld, efer);
7023 			report_prefix_pushf("%s %lx", fld_name, efer);
7024 			test_efer_vmlaunch(fld, true);
7025 			report_prefix_pop();
7026 		}
7027 	}
7028 
7029 	vmcs_write(ctrl_fld, ctrl_saved | ctrl_bit1);
7030 	for (i = 0; i < 64; i++) {
7031 		if ((1ull << i) & efer_reserved_bits) {
7032 			efer = efer_saved | (1ull << i);
7033 			vmcs_write(fld, efer);
7034 			report_prefix_pushf("%s %lx", fld_name, efer);
7035 			test_efer_vmlaunch(fld, false);
7036 			report_prefix_pop();
7037 		}
7038 	}
7039 
7040 	vmcs_write(ctrl_fld, ctrl_saved);
7041 	vmcs_write(fld, efer_saved);
7042 
7043 	/*
7044 	 * Check LMA and LME bits
7045 	 */
7046 	test_efer_bit(fld, fld_name,
7047 		      ctrl_fld, ctrl_bit1,
7048 		      EFER_LMA,
7049 		      "EFER_LMA");
7050 	test_efer_bit(fld, fld_name,
7051 		      ctrl_fld, ctrl_bit1,
7052 		      EFER_LME,
7053 		      "EFER_LME");
7054 
7055 test_entry_exit_mode:
7056 	test_efer_bit(fld, fld_name,
7057 		      ctrl_fld, ctrl_bit2,
7058 		      EFER_LMA,
7059 		      "EFER_LMA");
7060 	test_efer_bit(fld, fld_name,
7061 		      ctrl_fld, ctrl_bit2,
7062 		      EFER_LME,
7063 		      "EFER_LME");
7064 }
7065 
7066 /*
7067  * If the 'load IA32_EFER' VM-exit control is 1, bits reserved in the
7068  * IA32_EFER MSR must be 0 in the field for that register. In addition,
7069  * the values of the LMA and LME bits in the field must each be that of
7070  * the 'host address-space size' VM-exit control.
7071  *
7072  *  [Intel SDM]
7073  */
7074 static void test_host_efer(void)
7075 {
7076 	test_efer(HOST_EFER, "HOST_EFER", EXI_CONTROLS,
7077 		  ctrl_exit_rev.clr & EXI_LOAD_EFER,
7078 		  EXI_HOST_64);
7079 }
7080 
7081 /*
7082  * If the 'load IA32_EFER' VM-enter control is 1, bits reserved in the
7083  * IA32_EFER MSR must be 0 in the field for that register. In addition,
7084  * the values of the LMA and LME bits in the field must each be that of
7085  * the 'IA32e-mode guest' VM-exit control.
7086  */
7087 static void test_guest_efer(void)
7088 {
7089 	if (!(ctrl_enter_rev.clr & ENT_LOAD_EFER)) {
7090 		printf("\"Load-IA32-EFER\" entry control not supported\n");
7091 		return;
7092 	}
7093 
7094 	vmcs_write(GUEST_EFER, rdmsr(MSR_EFER));
7095 	test_efer(GUEST_EFER, "GUEST_EFER", ENT_CONTROLS,
7096 		  ctrl_enter_rev.clr & ENT_LOAD_EFER,
7097 		  ENT_GUEST_64);
7098 }
7099 
7100 /*
7101  * PAT values higher than 8 are uninteresting since they're likely lumped
7102  * in with "8". We only test values above 8 one bit at a time,
7103  * in order to reduce the number of VM-Entries and keep the runtime reasonable.
7104  */
7105 #define	PAT_VAL_LIMIT	8
7106 
7107 static void test_pat(u32 field, const char * field_name, u32 ctrl_field,
7108 		     u64 ctrl_bit)
7109 {
7110 	u32 ctrl_saved = vmcs_read(ctrl_field);
7111 	u64 pat_saved = vmcs_read(field);
7112 	u64 i, val;
7113 	u32 j;
7114 	int error;
7115 
7116 	vmcs_clear_bits(ctrl_field, ctrl_bit);
7117 
7118 	for (i = 0; i < 256; i = (i < PAT_VAL_LIMIT) ? i + 1 : i * 2) {
7119 		/* Test PAT0..PAT7 fields */
7120 		for (j = 0; j < (i ? 8 : 1); j++) {
7121 			val = i << j * 8;
7122 			vmcs_write(field, val);
7123 			if (field == HOST_PAT) {
7124 				report_prefix_pushf("%s %lx", field_name, val);
7125 				test_vmx_vmlaunch(0);
7126 				report_prefix_pop();
7127 
7128 			} else {	// GUEST_PAT
7129 				enter_guest();
7130 				report_guest_state_test("ENT_LOAD_PAT enabled",
7131 							VMX_VMCALL, val,
7132 							"GUEST_PAT");
7133 			}
7134 		}
7135 	}
7136 
7137 	vmcs_set_bits(ctrl_field, ctrl_bit);
7138 	for (i = 0; i < 256; i = (i < PAT_VAL_LIMIT) ? i + 1 : i * 2) {
7139 		/* Test PAT0..PAT7 fields */
7140 		for (j = 0; j < (i ? 8 : 1); j++) {
7141 			val = i << j * 8;
7142 			vmcs_write(field, val);
7143 
7144 			if (field == HOST_PAT) {
7145 				report_prefix_pushf("%s %lx", field_name, val);
7146 				if (i == 0x2 || i == 0x3 || i >= 0x8)
7147 					error =
7148 					VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
7149 				else
7150 					error = 0;
7151 
7152 				test_vmx_vmlaunch(error);
7153 				report_prefix_pop();
7154 
7155 			} else {	// GUEST_PAT
7156 				if (i == 0x2 || i == 0x3 || i >= 0x8) {
7157 					enter_guest_with_invalid_guest_state();
7158 					report_guest_state_test("ENT_LOAD_PAT "
7159 							        "enabled",
7160 							        VMX_FAIL_STATE | VMX_ENTRY_FAILURE,
7161 							        val,
7162 							        "GUEST_PAT");
7163 				} else {
7164 					enter_guest();
7165 					report_guest_state_test("ENT_LOAD_PAT "
7166 							        "enabled",
7167 							        VMX_VMCALL,
7168 							        val,
7169 							        "GUEST_PAT");
7170 				}
7171 			}
7172 
7173 		}
7174 	}
7175 
7176 	vmcs_write(ctrl_field, ctrl_saved);
7177 	vmcs_write(field, pat_saved);
7178 }
7179 
7180 /*
7181  *  If the "load IA32_PAT" VM-exit control is 1, the value of the field
7182  *  for the IA32_PAT MSR must be one that could be written by WRMSR
7183  *  without fault at CPL 0. Specifically, each of the 8 bytes in the
7184  *  field must have one of the values 0 (UC), 1 (WC), 4 (WT), 5 (WP),
7185  *  6 (WB), or 7 (UC-).
7186  *
7187  *  [Intel SDM]
7188  */
7189 static void test_load_host_pat(void)
7190 {
7191 	/*
7192 	 * "load IA32_PAT" VM-exit control
7193 	 */
7194 	if (!(ctrl_exit_rev.clr & EXI_LOAD_PAT)) {
7195 		printf("\"Load-IA32-PAT\" exit control not supported\n");
7196 		return;
7197 	}
7198 
7199 	test_pat(HOST_PAT, "HOST_PAT", EXI_CONTROLS, EXI_LOAD_PAT);
7200 }
7201 
7202 union cpuidA_eax {
7203 	struct {
7204 		unsigned int version_id:8;
7205 		unsigned int num_counters_gp:8;
7206 		unsigned int bit_width:8;
7207 		unsigned int mask_length:8;
7208 	} split;
7209 	unsigned int full;
7210 };
7211 
7212 union cpuidA_edx {
7213 	struct {
7214 		unsigned int num_counters_fixed:5;
7215 		unsigned int bit_width_fixed:8;
7216 		unsigned int reserved:9;
7217 	} split;
7218 	unsigned int full;
7219 };
7220 
7221 static bool valid_pgc(u64 val)
7222 {
7223 	struct cpuid id;
7224 	union cpuidA_eax eax;
7225 	union cpuidA_edx edx;
7226 	u64 mask;
7227 
7228 	id = cpuid(0xA);
7229 	eax.full = id.a;
7230 	edx.full = id.d;
7231 	mask = ~(((1ull << eax.split.num_counters_gp) - 1) |
7232 		 (((1ull << edx.split.num_counters_fixed) - 1) << 32));
7233 
7234 	return !(val & mask);
7235 }
7236 
7237 static void test_pgc_vmlaunch(u32 xerror, u32 xreason, bool xfail, bool host)
7238 {
7239 	u32 inst_err;
7240 	u64 obs;
7241 	bool success;
7242 	struct vmx_state_area_test_data *data = &vmx_state_area_test_data;
7243 
7244 	if (host) {
7245 		success = vmlaunch_succeeds();
7246 		obs = rdmsr(data->msr);
7247 		if (!success) {
7248 			inst_err = vmcs_read(VMX_INST_ERROR);
7249 			report(xerror == inst_err, "vmlaunch failed, "
7250 			       "VMX Inst Error is %d (expected %d)",
7251 			       inst_err, xerror);
7252 		} else {
7253 			report(!data->enabled || data->exp == obs,
7254 			       "Host state is 0x%lx (expected 0x%lx)",
7255 			       obs, data->exp);
7256 			report(success != xfail, "vmlaunch succeeded");
7257 		}
7258 	} else {
7259 		if (xfail) {
7260 			enter_guest_with_invalid_guest_state();
7261 		} else {
7262 			enter_guest();
7263 		}
7264 		report_guest_state_test("load GUEST_PERF_GLOBAL_CTRL",
7265 					xreason, GUEST_PERF_GLOBAL_CTRL,
7266 					"GUEST_PERF_GLOBAL_CTRL");
7267 	}
7268 }
7269 
7270 /*
7271  * test_load_perf_global_ctrl is a generic function for testing the
7272  * "load IA32_PERF_GLOBAL_CTRL" VM-{Entry,Exit} controls. This test function
7273  * tests the provided ctrl_val when disabled and enabled.
7274  *
7275  * @nr: VMCS field number corresponding to the host/guest state field
7276  * @name: Name of the above VMCS field for printing in test report
7277  * @ctrl_nr: VMCS field number corresponding to the VM-{Entry,Exit} control
7278  * @ctrl_val: Bit to set on the ctrl_field
7279  */
7280 static void test_perf_global_ctrl(u32 nr, const char *name, u32 ctrl_nr,
7281 				  const char *ctrl_name, u64 ctrl_val)
7282 {
7283 	u64 ctrl_saved = vmcs_read(ctrl_nr);
7284 	u64 pgc_saved = vmcs_read(nr);
7285 	u64 i, val;
7286 	bool host = nr == HOST_PERF_GLOBAL_CTRL;
7287 	struct vmx_state_area_test_data *data = &vmx_state_area_test_data;
7288 
7289 	data->msr = MSR_CORE_PERF_GLOBAL_CTRL;
7290 	msr_bmp_init();
7291 	vmcs_write(ctrl_nr, ctrl_saved & ~ctrl_val);
7292 	data->enabled = false;
7293 	report_prefix_pushf("\"load IA32_PERF_GLOBAL_CTRL\"=0 on %s",
7294 			    ctrl_name);
7295 
7296 	for (i = 0; i < 64; i++) {
7297 		val = 1ull << i;
7298 		vmcs_write(nr, val);
7299 		report_prefix_pushf("%s = 0x%lx", name, val);
7300 		test_pgc_vmlaunch(0, VMX_VMCALL, false, host);
7301 		report_prefix_pop();
7302 	}
7303 	report_prefix_pop();
7304 
7305 	vmcs_write(ctrl_nr, ctrl_saved | ctrl_val);
7306 	data->enabled = true;
7307 	report_prefix_pushf("\"load IA32_PERF_GLOBAL_CTRL\"=1 on %s",
7308 			    ctrl_name);
7309 	for (i = 0; i < 64; i++) {
7310 		val = 1ull << i;
7311 		data->exp = val;
7312 		vmcs_write(nr, val);
7313 		report_prefix_pushf("%s = 0x%lx", name, val);
7314 		if (valid_pgc(val)) {
7315 			test_pgc_vmlaunch(0, VMX_VMCALL, false, host);
7316 		} else {
7317 			if (host)
7318 				test_pgc_vmlaunch(
7319 					VMXERR_ENTRY_INVALID_HOST_STATE_FIELD,
7320 					0,
7321 					true,
7322 					host);
7323 			else
7324 				test_pgc_vmlaunch(
7325 					0,
7326 					VMX_ENTRY_FAILURE | VMX_FAIL_STATE,
7327 					true,
7328 					host);
7329 		}
7330 		report_prefix_pop();
7331 	}
7332 
7333 	data->enabled = false;
7334 	report_prefix_pop();
7335 	vmcs_write(ctrl_nr, ctrl_saved);
7336 	vmcs_write(nr, pgc_saved);
7337 }
7338 
7339 static void test_load_host_perf_global_ctrl(void)
7340 {
7341 	if (!(ctrl_exit_rev.clr & EXI_LOAD_PERF)) {
7342 		printf("\"load IA32_PERF_GLOBAL_CTRL\" exit control not supported\n");
7343 		return;
7344 	}
7345 
7346 	test_perf_global_ctrl(HOST_PERF_GLOBAL_CTRL, "HOST_PERF_GLOBAL_CTRL",
7347 				   EXI_CONTROLS, "EXI_CONTROLS", EXI_LOAD_PERF);
7348 }
7349 
7350 
7351 static void test_load_guest_perf_global_ctrl(void)
7352 {
7353 	if (!(ctrl_enter_rev.clr & ENT_LOAD_PERF)) {
7354 		printf("\"load IA32_PERF_GLOBAL_CTRL\" entry control not supported\n");
7355 		return;
7356 	}
7357 
7358 	test_perf_global_ctrl(GUEST_PERF_GLOBAL_CTRL, "GUEST_PERF_GLOBAL_CTRL",
7359 				   ENT_CONTROLS, "ENT_CONTROLS", ENT_LOAD_PERF);
7360 }
7361 
7362 
7363 /*
7364  * test_vmcs_field - test a value for the given VMCS field
7365  * @field: VMCS field
7366  * @field_name: string name of VMCS field
7367  * @bit_start: starting bit
7368  * @bit_end: ending bit
7369  * @val: value that the bit range must or must not contain
7370  * @valid_val: whether value given in 'val' must be valid or not
7371  * @error: expected VMCS error when vmentry fails for an invalid value
7372  */
7373 static void test_vmcs_field(u64 field, const char *field_name, u32 bit_start,
7374 			    u32 bit_end, u64 val, bool valid_val, u32 error)
7375 {
7376 	u64 field_saved = vmcs_read(field);
7377 	u32 i;
7378 	u64 tmp;
7379 	u32 bit_on;
7380 	u64 mask = ~0ull;
7381 
7382 	mask = (mask >> bit_end) << bit_end;
7383 	mask = mask | ((1 << bit_start) - 1);
7384 	tmp = (field_saved & mask) | (val << bit_start);
7385 
7386 	vmcs_write(field, tmp);
7387 	report_prefix_pushf("%s %lx", field_name, tmp);
7388 	if (valid_val)
7389 		test_vmx_vmlaunch(0);
7390 	else
7391 		test_vmx_vmlaunch(error);
7392 	report_prefix_pop();
7393 
7394 	for (i = bit_start; i <= bit_end; i = i + 2) {
7395 		bit_on = ((1ull < i) & (val << bit_start)) ? 0 : 1;
7396 		if (bit_on)
7397 			tmp = field_saved | (1ull << i);
7398 		else
7399 			tmp = field_saved & ~(1ull << i);
7400 		vmcs_write(field, tmp);
7401 		report_prefix_pushf("%s %lx", field_name, tmp);
7402 		if (valid_val)
7403 			test_vmx_vmlaunch(error);
7404 		else
7405 			test_vmx_vmlaunch(0);
7406 		report_prefix_pop();
7407 	}
7408 
7409 	vmcs_write(field, field_saved);
7410 }
7411 
7412 static void test_canonical(u64 field, const char * field_name, bool host)
7413 {
7414 	u64 addr_saved = vmcs_read(field);
7415 
7416 	if (is_canonical(addr_saved)) {
7417 		if (host) {
7418 			report_prefix_pushf("%s %lx", field_name, addr_saved);
7419 			test_vmx_vmlaunch(0);
7420 			report_prefix_pop();
7421 		} else {
7422 			enter_guest();
7423 			report_guest_state_test("Test canonical address",
7424 						VMX_VMCALL, addr_saved,
7425 						field_name);
7426 		}
7427 
7428 		vmcs_write(field, NONCANONICAL);
7429 
7430 		if (host) {
7431 			report_prefix_pushf("%s %llx", field_name, NONCANONICAL);
7432 			test_vmx_vmlaunch(VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
7433 			report_prefix_pop();
7434 		} else {
7435 			enter_guest_with_invalid_guest_state();
7436 			report_guest_state_test("Test canonical address",
7437 					        VMX_FAIL_STATE | VMX_ENTRY_FAILURE,
7438 					        NONCANONICAL, field_name);
7439 		}
7440 
7441 		vmcs_write(field, addr_saved);
7442 	} else {
7443 		if (host) {
7444 			report_prefix_pushf("%s %llx", field_name, NONCANONICAL);
7445 			test_vmx_vmlaunch(VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
7446 			report_prefix_pop();
7447 		} else {
7448 			enter_guest_with_invalid_guest_state();
7449 			report_guest_state_test("Test canonical address",
7450 					        VMX_FAIL_STATE | VMX_ENTRY_FAILURE,
7451 					        NONCANONICAL, field_name);
7452 		}
7453 	}
7454 }
7455 
7456 #define TEST_RPL_TI_FLAGS(reg, name)				\
7457 	test_vmcs_field(reg, name, 0, 2, 0x0, true,		\
7458 			VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
7459 
7460 #define TEST_CS_TR_FLAGS(reg, name)				\
7461 	test_vmcs_field(reg, name, 3, 15, 0x0000, false,	\
7462 			VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
7463 
7464 /*
7465  * 1. In the selector field for each of CS, SS, DS, ES, FS, GS and TR, the
7466  *    RPL (bits 1:0) and the TI flag (bit 2) must be 0.
7467  * 2. The selector fields for CS and TR cannot be 0000H.
7468  * 3. The selector field for SS cannot be 0000H if the "host address-space
7469  *    size" VM-exit control is 0.
7470  * 4. On processors that support Intel 64 architecture, the base-address
7471  *    fields for FS, GS and TR must contain canonical addresses.
7472  */
7473 static void test_host_segment_regs(void)
7474 {
7475 	u16 selector_saved;
7476 
7477 	/*
7478 	 * Test RPL and TI flags
7479 	 */
7480 	TEST_RPL_TI_FLAGS(HOST_SEL_CS, "HOST_SEL_CS");
7481 	TEST_RPL_TI_FLAGS(HOST_SEL_SS, "HOST_SEL_SS");
7482 	TEST_RPL_TI_FLAGS(HOST_SEL_DS, "HOST_SEL_DS");
7483 	TEST_RPL_TI_FLAGS(HOST_SEL_ES, "HOST_SEL_ES");
7484 	TEST_RPL_TI_FLAGS(HOST_SEL_FS, "HOST_SEL_FS");
7485 	TEST_RPL_TI_FLAGS(HOST_SEL_GS, "HOST_SEL_GS");
7486 	TEST_RPL_TI_FLAGS(HOST_SEL_TR, "HOST_SEL_TR");
7487 
7488 	/*
7489 	 * Test that CS and TR fields can not be 0x0000
7490 	 */
7491 	TEST_CS_TR_FLAGS(HOST_SEL_CS, "HOST_SEL_CS");
7492 	TEST_CS_TR_FLAGS(HOST_SEL_TR, "HOST_SEL_TR");
7493 
7494 	/*
7495 	 * SS field can not be 0x0000 if "host address-space size" VM-exit
7496 	 * control is 0
7497 	 */
7498 	selector_saved = vmcs_read(HOST_SEL_SS);
7499 	vmcs_write(HOST_SEL_SS, 0);
7500 	report_prefix_pushf("HOST_SEL_SS 0");
7501 	if (vmcs_read(EXI_CONTROLS) & EXI_HOST_64) {
7502 		test_vmx_vmlaunch(0);
7503 	} else {
7504 		test_vmx_vmlaunch(VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
7505 	}
7506 	report_prefix_pop();
7507 
7508 	vmcs_write(HOST_SEL_SS, selector_saved);
7509 
7510 #ifdef __x86_64__
7511 	/*
7512 	 * Base address for FS, GS and TR must be canonical
7513 	 */
7514 	test_canonical(HOST_BASE_FS, "HOST_BASE_FS", true);
7515 	test_canonical(HOST_BASE_GS, "HOST_BASE_GS", true);
7516 	test_canonical(HOST_BASE_TR, "HOST_BASE_TR", true);
7517 #endif
7518 }
7519 
7520 /*
7521  *  On processors that support Intel 64 architecture, the base-address
7522  *  fields for GDTR and IDTR must contain canonical addresses.
7523  */
7524 static void test_host_desc_tables(void)
7525 {
7526 #ifdef __x86_64__
7527 	test_canonical(HOST_BASE_GDTR, "HOST_BASE_GDTR", true);
7528 	test_canonical(HOST_BASE_IDTR, "HOST_BASE_IDTR", true);
7529 #endif
7530 }
7531 
7532 /*
7533  * If the "host address-space size" VM-exit control is 0, the following must
7534  * hold:
7535  *    - The "IA-32e mode guest" VM-entry control is 0.
7536  *    - Bit 17 of the CR4 field (corresponding to CR4.PCIDE) is 0.
7537  *    - Bits 63:32 in the RIP field are 0.
7538  *
7539  * If the "host address-space size" VM-exit control is 1, the following must
7540  * hold:
7541  *    - Bit 5 of the CR4 field (corresponding to CR4.PAE) is 1.
7542  *    - The RIP field contains a canonical address.
7543  *
7544  */
7545 static void test_host_addr_size(void)
7546 {
7547 	u64 cr4_saved = vmcs_read(HOST_CR4);
7548 	u64 rip_saved = vmcs_read(HOST_RIP);
7549 	u64 entry_ctrl_saved = vmcs_read(ENT_CONTROLS);
7550 	int i;
7551 	u64 tmp;
7552 
7553 	if (vmcs_read(EXI_CONTROLS) & EXI_HOST_64) {
7554 		vmcs_write(ENT_CONTROLS, entry_ctrl_saved | ENT_GUEST_64);
7555 		report_prefix_pushf("\"IA-32e mode guest\" enabled");
7556 		test_vmx_vmlaunch(0);
7557 		report_prefix_pop();
7558 
7559 		vmcs_write(HOST_CR4, cr4_saved | X86_CR4_PCIDE);
7560 		report_prefix_pushf("\"CR4.PCIDE\" set");
7561 		test_vmx_vmlaunch(0);
7562 		report_prefix_pop();
7563 
7564 		for (i = 32; i <= 63; i = i + 4) {
7565 			tmp = rip_saved | 1ull << i;
7566 			vmcs_write(HOST_RIP, tmp);
7567 			report_prefix_pushf("HOST_RIP %lx", tmp);
7568 			test_vmx_vmlaunch(0);
7569 			report_prefix_pop();
7570 		}
7571 
7572 		if (cr4_saved & X86_CR4_PAE) {
7573 			vmcs_write(HOST_CR4, cr4_saved  & ~X86_CR4_PAE);
7574 			report_prefix_pushf("\"CR4.PAE\" unset");
7575 			test_vmx_vmlaunch(VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
7576 		} else {
7577 			report_prefix_pushf("\"CR4.PAE\" set");
7578 			test_vmx_vmlaunch(0);
7579 		}
7580 		report_prefix_pop();
7581 
7582 		vmcs_write(HOST_RIP, NONCANONICAL);
7583 		report_prefix_pushf("HOST_RIP %llx", NONCANONICAL);
7584 		test_vmx_vmlaunch(VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
7585 		report_prefix_pop();
7586 
7587 		vmcs_write(ENT_CONTROLS, entry_ctrl_saved | ENT_GUEST_64);
7588 		vmcs_write(HOST_RIP, rip_saved);
7589 		vmcs_write(HOST_CR4, cr4_saved);
7590 	}
7591 }
7592 
7593 /*
7594  * Check that the virtual CPU checks the VMX Host State Area as
7595  * documented in the Intel SDM.
7596  */
7597 static void vmx_host_state_area_test(void)
7598 {
7599 	/*
7600 	 * Bit 1 of the guest's RFLAGS must be 1, or VM-entry will
7601 	 * fail due to invalid guest state, should we make it that
7602 	 * far.
7603 	 */
7604 	vmcs_write(GUEST_RFLAGS, 0);
7605 
7606 	test_host_ctl_regs();
7607 
7608 	test_canonical(HOST_SYSENTER_ESP, "HOST_SYSENTER_ESP", true);
7609 	test_canonical(HOST_SYSENTER_EIP, "HOST_SYSENTER_EIP", true);
7610 
7611 	test_host_efer();
7612 	test_load_host_pat();
7613 	test_host_segment_regs();
7614 	test_host_desc_tables();
7615 	test_host_addr_size();
7616 	test_load_host_perf_global_ctrl();
7617 }
7618 
7619 /*
7620  * If the "load debug controls" VM-entry control is 1, bits 63:32 in
7621  * the DR7 field must be 0.
7622  *
7623  * [Intel SDM]
7624  */
7625 static void test_guest_dr7(void)
7626 {
7627 	u32 ent_saved = vmcs_read(ENT_CONTROLS);
7628 	u64 dr7_saved = vmcs_read(GUEST_DR7);
7629 	u64 val;
7630 	int i;
7631 
7632 	if (ctrl_enter_rev.set & ENT_LOAD_DBGCTLS) {
7633 		vmcs_clear_bits(ENT_CONTROLS, ENT_LOAD_DBGCTLS);
7634 		for (i = 0; i < 64; i++) {
7635 			val = 1ull << i;
7636 			vmcs_write(GUEST_DR7, val);
7637 			enter_guest();
7638 			report_guest_state_test("ENT_LOAD_DBGCTLS disabled",
7639 						VMX_VMCALL, val, "GUEST_DR7");
7640 		}
7641 	}
7642 	if (ctrl_enter_rev.clr & ENT_LOAD_DBGCTLS) {
7643 		vmcs_set_bits(ENT_CONTROLS, ENT_LOAD_DBGCTLS);
7644 		for (i = 0; i < 64; i++) {
7645 			val = 1ull << i;
7646 			vmcs_write(GUEST_DR7, val);
7647 			if (i < 32)
7648 				enter_guest();
7649 			else
7650 				enter_guest_with_invalid_guest_state();
7651 			report_guest_state_test("ENT_LOAD_DBGCTLS enabled",
7652 						i < 32 ? VMX_VMCALL :
7653 						VMX_ENTRY_FAILURE |
7654 						VMX_FAIL_STATE,
7655 						val, "GUEST_DR7");
7656 		}
7657 	}
7658 	vmcs_write(GUEST_DR7, dr7_saved);
7659 	vmcs_write(ENT_CONTROLS, ent_saved);
7660 }
7661 
7662 /*
7663  *  If the "load IA32_PAT" VM-entry control is 1, the value of the field
7664  *  for the IA32_PAT MSR must be one that could be written by WRMSR
7665  *  without fault at CPL 0. Specifically, each of the 8 bytes in the
7666  *  field must have one of the values 0 (UC), 1 (WC), 4 (WT), 5 (WP),
7667  *  6 (WB), or 7 (UC-).
7668  *
7669  *  [Intel SDM]
7670  */
7671 static void test_load_guest_pat(void)
7672 {
7673 	/*
7674 	 * "load IA32_PAT" VM-entry control
7675 	 */
7676 	if (!(ctrl_enter_rev.clr & ENT_LOAD_PAT)) {
7677 		printf("\"Load-IA32-PAT\" entry control not supported\n");
7678 		return;
7679 	}
7680 
7681 	test_pat(GUEST_PAT, "GUEST_PAT", ENT_CONTROLS, ENT_LOAD_PAT);
7682 }
7683 
7684 /*
7685  * Check that the virtual CPU checks the VMX Guest State Area as
7686  * documented in the Intel SDM.
7687  */
7688 static void vmx_guest_state_area_test(void)
7689 {
7690 	vmx_set_test_stage(1);
7691 	test_set_guest(guest_state_test_main);
7692 
7693 	/*
7694 	 * The IA32_SYSENTER_ESP field and the IA32_SYSENTER_EIP field
7695 	 * must each contain a canonical address.
7696 	 */
7697 	test_canonical(GUEST_SYSENTER_ESP, "GUEST_SYSENTER_ESP", false);
7698 	test_canonical(GUEST_SYSENTER_EIP, "GUEST_SYSENTER_EIP", false);
7699 
7700 	test_guest_dr7();
7701 	test_load_guest_pat();
7702 	test_guest_efer();
7703 	test_load_guest_perf_global_ctrl();
7704 
7705 	/*
7706 	 * Let the guest finish execution
7707 	 */
7708 	vmx_set_test_stage(2);
7709 	enter_guest();
7710 }
7711 
7712 static bool valid_vmcs_for_vmentry(void)
7713 {
7714 	struct vmcs *current_vmcs = NULL;
7715 
7716 	if (vmcs_save(&current_vmcs))
7717 		return false;
7718 
7719 	return current_vmcs && !current_vmcs->hdr.shadow_vmcs;
7720 }
7721 
7722 static void try_vmentry_in_movss_shadow(void)
7723 {
7724 	u32 vm_inst_err;
7725 	u32 flags;
7726 	bool early_failure = false;
7727 	u32 expected_flags = X86_EFLAGS_FIXED;
7728 	bool valid_vmcs = valid_vmcs_for_vmentry();
7729 
7730 	expected_flags |= valid_vmcs ? X86_EFLAGS_ZF : X86_EFLAGS_CF;
7731 
7732 	/*
7733 	 * Indirectly set VM_INST_ERR to 12 ("VMREAD/VMWRITE from/to
7734 	 * unsupported VMCS component").
7735 	 */
7736 	vmcs_write(~0u, 0);
7737 
7738 	__asm__ __volatile__ ("mov %[host_rsp], %%edx;"
7739 			      "vmwrite %%rsp, %%rdx;"
7740 			      "mov 0f, %%rax;"
7741 			      "mov %[host_rip], %%edx;"
7742 			      "vmwrite %%rax, %%rdx;"
7743 			      "mov $-1, %%ah;"
7744 			      "sahf;"
7745 			      "mov %%ss, %%ax;"
7746 			      "mov %%ax, %%ss;"
7747 			      "vmlaunch;"
7748 			      "mov $1, %[early_failure];"
7749 			      "0: lahf;"
7750 			      "movzbl %%ah, %[flags]"
7751 			      : [early_failure] "+r" (early_failure),
7752 				[flags] "=&a" (flags)
7753 			      : [host_rsp] "i" (HOST_RSP),
7754 				[host_rip] "i" (HOST_RIP)
7755 			      : "rdx", "cc", "memory");
7756 	vm_inst_err = vmcs_read(VMX_INST_ERROR);
7757 
7758 	report(early_failure, "Early VM-entry failure");
7759 	report(flags == expected_flags, "RFLAGS[8:0] is %x (actual %x)",
7760 	       expected_flags, flags);
7761 	if (valid_vmcs)
7762 		report(vm_inst_err == VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS,
7763 		       "VM-instruction error is %d (actual %d)",
7764 		       VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS, vm_inst_err);
7765 }
7766 
7767 static void vmentry_movss_shadow_test(void)
7768 {
7769 	struct vmcs *orig_vmcs;
7770 
7771 	TEST_ASSERT(!vmcs_save(&orig_vmcs));
7772 
7773 	/*
7774 	 * Set the launched flag on the current VMCS to verify the correct
7775 	 * error priority, below.
7776 	 */
7777 	test_set_guest(v2_null_test_guest);
7778 	enter_guest();
7779 
7780 	/*
7781 	 * With bit 1 of the guest's RFLAGS clear, VM-entry should
7782 	 * fail due to invalid guest state (if we make it that far).
7783 	 */
7784 	vmcs_write(GUEST_RFLAGS, 0);
7785 
7786 	/*
7787 	 * "VM entry with events blocked by MOV SS" takes precedence over
7788 	 * "VMLAUNCH with non-clear VMCS."
7789 	 */
7790 	report_prefix_push("valid current-VMCS");
7791 	try_vmentry_in_movss_shadow();
7792 	report_prefix_pop();
7793 
7794 	/*
7795 	 * VMfailInvalid takes precedence over "VM entry with events
7796 	 * blocked by MOV SS."
7797 	 */
7798 	TEST_ASSERT(!vmcs_clear(orig_vmcs));
7799 	report_prefix_push("no current-VMCS");
7800 	try_vmentry_in_movss_shadow();
7801 	report_prefix_pop();
7802 
7803 	TEST_ASSERT(!make_vmcs_current(orig_vmcs));
7804 	vmcs_write(GUEST_RFLAGS, X86_EFLAGS_FIXED);
7805 }
7806 
7807 static void vmx_cr_load_test(void)
7808 {
7809 	unsigned long cr3, cr4, orig_cr3, orig_cr4;
7810 
7811 	orig_cr4 = read_cr4();
7812 	orig_cr3 = read_cr3();
7813 
7814 	if (!this_cpu_has(X86_FEATURE_PCID)) {
7815 		report_skip("PCID not detected");
7816 		return;
7817 	}
7818 	if (!this_cpu_has(X86_FEATURE_MCE)) {
7819 		report_skip("MCE not detected");
7820 		return;
7821 	}
7822 
7823 	TEST_ASSERT(!(orig_cr3 & X86_CR3_PCID_MASK));
7824 
7825 	/* Enable PCID for L1. */
7826 	cr4 = orig_cr4 | X86_CR4_PCIDE;
7827 	cr3 = orig_cr3 | 0x1;
7828 	TEST_ASSERT(!write_cr4_checking(cr4));
7829 	write_cr3(cr3);
7830 
7831 	test_set_guest(v2_null_test_guest);
7832 	vmcs_write(HOST_CR4, cr4);
7833 	vmcs_write(HOST_CR3, cr3);
7834 	enter_guest();
7835 
7836 	/*
7837 	 * No exception is expected.
7838 	 *
7839 	 * NB. KVM loads the last guest write to CR4 into CR4 read
7840 	 *     shadow. In order to trigger an exit to KVM, we can toggle a
7841 	 *     bit that is owned by KVM. We use CR4.MCE, which shall
7842 	 *     have no side effect because normally no guest MCE (e.g., as the
7843 	 *     result of bad memory) would happen during this test.
7844 	 */
7845 	TEST_ASSERT(!write_cr4_checking(cr4 ^ X86_CR4_MCE));
7846 
7847 	/* Cleanup L1 state. */
7848 	write_cr3(orig_cr3);
7849 	TEST_ASSERT(!write_cr4_checking(orig_cr4));
7850 }
7851 
7852 static void vmx_nm_test_guest(void)
7853 {
7854 	write_cr0(read_cr0() | X86_CR0_TS);
7855 	asm volatile("fnop");
7856 }
7857 
7858 static void check_nm_exit(const char *test)
7859 {
7860 	u32 reason = vmcs_read(EXI_REASON);
7861 	u32 intr_info = vmcs_read(EXI_INTR_INFO);
7862 	const u32 expected = INTR_INFO_VALID_MASK | INTR_TYPE_HARD_EXCEPTION |
7863 		NM_VECTOR;
7864 
7865 	report(reason == VMX_EXC_NMI && intr_info == expected, "%s", test);
7866 }
7867 
7868 /*
7869  * This test checks that:
7870  *
7871  * (a) If L2 launches with CR0.TS clear, but later sets CR0.TS, then
7872  *     a subsequent #NM VM-exit is reflected to L1.
7873  *
7874  * (b) If L2 launches with CR0.TS clear and CR0.EM set, then a
7875  *     subsequent #NM VM-exit is reflected to L1.
7876  */
7877 static void vmx_nm_test(void)
7878 {
7879 	unsigned long cr0 = read_cr0();
7880 
7881 	test_set_guest(vmx_nm_test_guest);
7882 
7883 	/*
7884 	 * L1 wants to intercept #NM exceptions encountered in L2.
7885 	 */
7886 	vmcs_write(EXC_BITMAP, 1 << NM_VECTOR);
7887 
7888 	/*
7889 	 * Launch L2 with CR0.TS clear, but don't claim host ownership of
7890 	 * any CR0 bits. L2 will set CR0.TS and then try to execute fnop,
7891 	 * which will raise #NM. L0 should reflect the #NM VM-exit to L1.
7892 	 */
7893 	vmcs_write(CR0_MASK, 0);
7894 	vmcs_write(GUEST_CR0, cr0 & ~X86_CR0_TS);
7895 	enter_guest();
7896 	check_nm_exit("fnop with CR0.TS set in L2 triggers #NM VM-exit to L1");
7897 
7898 	/*
7899 	 * Re-enter L2 at the fnop instruction, with CR0.TS clear but
7900 	 * CR0.EM set. The fnop will still raise #NM, and L0 should
7901 	 * reflect the #NM VM-exit to L1.
7902 	 */
7903 	vmcs_write(GUEST_CR0, (cr0 & ~X86_CR0_TS) | X86_CR0_EM);
7904 	enter_guest();
7905 	check_nm_exit("fnop with CR0.EM set in L2 triggers #NM VM-exit to L1");
7906 
7907 	/*
7908 	 * Re-enter L2 at the fnop instruction, with both CR0.TS and
7909 	 * CR0.EM clear. There will be no #NM, and the L2 guest should
7910 	 * exit normally.
7911 	 */
7912 	vmcs_write(GUEST_CR0, cr0 & ~(X86_CR0_TS | X86_CR0_EM));
7913 	enter_guest();
7914 }
7915 
7916 bool vmx_pending_event_ipi_fired;
7917 static void vmx_pending_event_ipi_isr(isr_regs_t *regs)
7918 {
7919 	vmx_pending_event_ipi_fired = true;
7920 	eoi();
7921 }
7922 
7923 bool vmx_pending_event_guest_run;
7924 static void vmx_pending_event_guest(void)
7925 {
7926 	vmcall();
7927 	vmx_pending_event_guest_run = true;
7928 }
7929 
7930 static void vmx_pending_event_test_core(bool guest_hlt)
7931 {
7932 	int ipi_vector = 0xf1;
7933 
7934 	vmx_pending_event_ipi_fired = false;
7935 	handle_irq(ipi_vector, vmx_pending_event_ipi_isr);
7936 
7937 	vmx_pending_event_guest_run = false;
7938 	test_set_guest(vmx_pending_event_guest);
7939 
7940 	vmcs_set_bits(PIN_CONTROLS, PIN_EXTINT);
7941 
7942 	enter_guest();
7943 	skip_exit_vmcall();
7944 
7945 	if (guest_hlt)
7946 		vmcs_write(GUEST_ACTV_STATE, ACTV_HLT);
7947 
7948 	irq_disable();
7949 	apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL |
7950 				   APIC_DM_FIXED | ipi_vector,
7951 				   0);
7952 
7953 	enter_guest();
7954 
7955 	assert_exit_reason(VMX_EXTINT);
7956 	report(!vmx_pending_event_guest_run,
7957 	       "Guest did not run before host received IPI");
7958 
7959 	irq_enable();
7960 	asm volatile ("nop");
7961 	irq_disable();
7962 	report(vmx_pending_event_ipi_fired,
7963 	       "Got pending interrupt after IRQ enabled");
7964 
7965 	if (guest_hlt)
7966 		vmcs_write(GUEST_ACTV_STATE, ACTV_ACTIVE);
7967 
7968 	enter_guest();
7969 	report(vmx_pending_event_guest_run,
7970 	       "Guest finished running when no interrupt");
7971 }
7972 
7973 static void vmx_pending_event_test(void)
7974 {
7975 	vmx_pending_event_test_core(false);
7976 }
7977 
7978 static void vmx_pending_event_hlt_test(void)
7979 {
7980 	vmx_pending_event_test_core(true);
7981 }
7982 
7983 static int vmx_window_test_db_count;
7984 
7985 static void vmx_window_test_db_handler(struct ex_regs *regs)
7986 {
7987 	vmx_window_test_db_count++;
7988 }
7989 
7990 static void vmx_nmi_window_test_guest(void)
7991 {
7992 	handle_exception(DB_VECTOR, vmx_window_test_db_handler);
7993 
7994 	asm volatile("vmcall\n\t"
7995 		     "nop\n\t");
7996 
7997 	handle_exception(DB_VECTOR, NULL);
7998 }
7999 
8000 static void verify_nmi_window_exit(u64 rip)
8001 {
8002 	u32 exit_reason = vmcs_read(EXI_REASON);
8003 
8004 	report(exit_reason == VMX_NMI_WINDOW,
8005 	       "Exit reason (%d) is 'NMI window'", exit_reason);
8006 	report(vmcs_read(GUEST_RIP) == rip, "RIP (%#lx) is %#lx",
8007 	       vmcs_read(GUEST_RIP), rip);
8008 	vmcs_write(GUEST_ACTV_STATE, ACTV_ACTIVE);
8009 }
8010 
8011 static void vmx_nmi_window_test(void)
8012 {
8013 	u64 nop_addr;
8014 	void *db_fault_addr = get_idt_addr(&boot_idt[DB_VECTOR]);
8015 
8016 	if (!(ctrl_pin_rev.clr & PIN_VIRT_NMI)) {
8017 		report_skip("CPU does not support the \"Virtual NMIs\" VM-execution control.");
8018 		return;
8019 	}
8020 
8021 	if (!(ctrl_cpu_rev[0].clr & CPU_NMI_WINDOW)) {
8022 		report_skip("CPU does not support the \"NMI-window exiting\" VM-execution control.");
8023 		return;
8024 	}
8025 
8026 	vmx_window_test_db_count = 0;
8027 
8028 	report_prefix_push("NMI-window");
8029 	test_set_guest(vmx_nmi_window_test_guest);
8030 	vmcs_set_bits(PIN_CONTROLS, PIN_VIRT_NMI);
8031 	enter_guest();
8032 	skip_exit_vmcall();
8033 	nop_addr = vmcs_read(GUEST_RIP);
8034 
8035 	/*
8036 	 * Ask for "NMI-window exiting," and expect an immediate VM-exit.
8037 	 * RIP will not advance.
8038 	 */
8039 	report_prefix_push("active, no blocking");
8040 	vmcs_set_bits(CPU_EXEC_CTRL0, CPU_NMI_WINDOW);
8041 	enter_guest();
8042 	verify_nmi_window_exit(nop_addr);
8043 	report_prefix_pop();
8044 
8045 	/*
8046 	 * Ask for "NMI-window exiting" in a MOV-SS shadow, and expect
8047 	 * a VM-exit on the next instruction after the nop. (The nop
8048 	 * is one byte.)
8049 	 */
8050 	report_prefix_push("active, blocking by MOV-SS");
8051 	vmcs_write(GUEST_INTR_STATE, GUEST_INTR_STATE_MOVSS);
8052 	enter_guest();
8053 	verify_nmi_window_exit(nop_addr + 1);
8054 	report_prefix_pop();
8055 
8056 	/*
8057 	 * Ask for "NMI-window exiting" (with event injection), and
8058 	 * expect a VM-exit after the event is injected. (RIP should
8059 	 * be at the address specified in the IDT entry for #DB.)
8060 	 */
8061 	report_prefix_push("active, no blocking, injecting #DB");
8062 	vmcs_write(ENT_INTR_INFO,
8063 		   INTR_INFO_VALID_MASK | INTR_TYPE_HARD_EXCEPTION | DB_VECTOR);
8064 	enter_guest();
8065 	verify_nmi_window_exit((u64)db_fault_addr);
8066 	report_prefix_pop();
8067 
8068 	/*
8069 	 * Ask for "NMI-window exiting" with NMI blocking, and expect
8070 	 * a VM-exit after the next IRET (i.e. after the #DB handler
8071 	 * returns). So, RIP should be back at one byte past the nop.
8072 	 */
8073 	report_prefix_push("active, blocking by NMI");
8074 	vmcs_write(GUEST_INTR_STATE, GUEST_INTR_STATE_NMI);
8075 	enter_guest();
8076 	verify_nmi_window_exit(nop_addr + 1);
8077 	report(vmx_window_test_db_count == 1,
8078 	       "#DB handler executed once (actual %d times)",
8079 	       vmx_window_test_db_count);
8080 	report_prefix_pop();
8081 
8082 	if (!(rdmsr(MSR_IA32_VMX_MISC) & (1 << 6))) {
8083 		report_skip("CPU does not support activity state HLT.");
8084 	} else {
8085 		/*
8086 		 * Ask for "NMI-window exiting" when entering activity
8087 		 * state HLT, and expect an immediate VM-exit. RIP is
8088 		 * still one byte past the nop.
8089 		 */
8090 		report_prefix_push("halted, no blocking");
8091 		vmcs_write(GUEST_ACTV_STATE, ACTV_HLT);
8092 		enter_guest();
8093 		verify_nmi_window_exit(nop_addr + 1);
8094 		report_prefix_pop();
8095 
8096 		/*
8097 		 * Ask for "NMI-window exiting" when entering activity
8098 		 * state HLT (with event injection), and expect a
8099 		 * VM-exit after the event is injected. (RIP should be
8100 		 * at the address specified in the IDT entry for #DB.)
8101 		 */
8102 		report_prefix_push("halted, no blocking, injecting #DB");
8103 		vmcs_write(GUEST_ACTV_STATE, ACTV_HLT);
8104 		vmcs_write(ENT_INTR_INFO,
8105 			   INTR_INFO_VALID_MASK | INTR_TYPE_HARD_EXCEPTION |
8106 			   DB_VECTOR);
8107 		enter_guest();
8108 		verify_nmi_window_exit((u64)db_fault_addr);
8109 		report_prefix_pop();
8110 	}
8111 
8112 	vmcs_clear_bits(CPU_EXEC_CTRL0, CPU_NMI_WINDOW);
8113 	enter_guest();
8114 	report_prefix_pop();
8115 }
8116 
8117 static void vmx_intr_window_test_guest(void)
8118 {
8119 	handle_exception(DB_VECTOR, vmx_window_test_db_handler);
8120 
8121 	/*
8122 	 * The two consecutive STIs are to ensure that only the first
8123 	 * one has a shadow. Note that NOP and STI are one byte
8124 	 * instructions.
8125 	 */
8126 	asm volatile("vmcall\n\t"
8127 		     "nop\n\t"
8128 		     "sti\n\t"
8129 		     "sti\n\t");
8130 
8131 	handle_exception(DB_VECTOR, NULL);
8132 }
8133 
8134 static void verify_intr_window_exit(u64 rip)
8135 {
8136 	u32 exit_reason = vmcs_read(EXI_REASON);
8137 
8138 	report(exit_reason == VMX_INTR_WINDOW,
8139 	       "Exit reason (%d) is 'interrupt window'", exit_reason);
8140 	report(vmcs_read(GUEST_RIP) == rip, "RIP (%#lx) is %#lx",
8141 	       vmcs_read(GUEST_RIP), rip);
8142 	vmcs_write(GUEST_ACTV_STATE, ACTV_ACTIVE);
8143 }
8144 
8145 static void vmx_intr_window_test(void)
8146 {
8147 	u64 vmcall_addr;
8148 	u64 nop_addr;
8149 	unsigned int orig_db_gate_type;
8150 	void *db_fault_addr = get_idt_addr(&boot_idt[DB_VECTOR]);
8151 
8152 	if (!(ctrl_cpu_rev[0].clr & CPU_INTR_WINDOW)) {
8153 		report_skip("CPU does not support the \"interrupt-window exiting\" VM-execution control.");
8154 		return;
8155 	}
8156 
8157 	/*
8158 	 * Change the IDT entry for #DB from interrupt gate to trap gate,
8159 	 * so that it won't clear RFLAGS.IF. We don't want interrupts to
8160 	 * be disabled after vectoring a #DB.
8161 	 */
8162 	orig_db_gate_type = boot_idt[DB_VECTOR].type;
8163 	boot_idt[DB_VECTOR].type = 15;
8164 
8165 	report_prefix_push("interrupt-window");
8166 	test_set_guest(vmx_intr_window_test_guest);
8167 	enter_guest();
8168 	assert_exit_reason(VMX_VMCALL);
8169 	vmcall_addr = vmcs_read(GUEST_RIP);
8170 
8171 	/*
8172 	 * Ask for "interrupt-window exiting" with RFLAGS.IF set and
8173 	 * no blocking; expect an immediate VM-exit. Note that we have
8174 	 * not advanced past the vmcall instruction yet, so RIP should
8175 	 * point to the vmcall instruction.
8176 	 */
8177 	report_prefix_push("active, no blocking, RFLAGS.IF=1");
8178 	vmcs_set_bits(CPU_EXEC_CTRL0, CPU_INTR_WINDOW);
8179 	vmcs_write(GUEST_RFLAGS, X86_EFLAGS_FIXED | X86_EFLAGS_IF);
8180 	enter_guest();
8181 	verify_intr_window_exit(vmcall_addr);
8182 	report_prefix_pop();
8183 
8184 	/*
8185 	 * Ask for "interrupt-window exiting" (with event injection)
8186 	 * with RFLAGS.IF set and no blocking; expect a VM-exit after
8187 	 * the event is injected. That is, RIP should should be at the
8188 	 * address specified in the IDT entry for #DB.
8189 	 */
8190 	report_prefix_push("active, no blocking, RFLAGS.IF=1, injecting #DB");
8191 	vmcs_write(ENT_INTR_INFO,
8192 		   INTR_INFO_VALID_MASK | INTR_TYPE_HARD_EXCEPTION | DB_VECTOR);
8193 	vmcall_addr = vmcs_read(GUEST_RIP);
8194 	enter_guest();
8195 	verify_intr_window_exit((u64)db_fault_addr);
8196 	report_prefix_pop();
8197 
8198 	/*
8199 	 * Let the L2 guest run through the IRET, back to the VMCALL.
8200 	 * We have to clear the "interrupt-window exiting"
8201 	 * VM-execution control, or it would just keep causing
8202 	 * VM-exits. Then, advance past the VMCALL and set the
8203 	 * "interrupt-window exiting" VM-execution control again.
8204 	 */
8205 	vmcs_clear_bits(CPU_EXEC_CTRL0, CPU_INTR_WINDOW);
8206 	enter_guest();
8207 	skip_exit_vmcall();
8208 	nop_addr = vmcs_read(GUEST_RIP);
8209 	vmcs_set_bits(CPU_EXEC_CTRL0, CPU_INTR_WINDOW);
8210 
8211 	/*
8212 	 * Ask for "interrupt-window exiting" in a MOV-SS shadow with
8213 	 * RFLAGS.IF set, and expect a VM-exit on the next
8214 	 * instruction. (NOP is one byte.)
8215 	 */
8216 	report_prefix_push("active, blocking by MOV-SS, RFLAGS.IF=1");
8217 	vmcs_write(GUEST_INTR_STATE, GUEST_INTR_STATE_MOVSS);
8218 	enter_guest();
8219 	verify_intr_window_exit(nop_addr + 1);
8220 	report_prefix_pop();
8221 
8222 	/*
8223 	 * Back up to the NOP and ask for "interrupt-window exiting"
8224 	 * in an STI shadow with RFLAGS.IF set, and expect a VM-exit
8225 	 * on the next instruction. (NOP is one byte.)
8226 	 */
8227 	report_prefix_push("active, blocking by STI, RFLAGS.IF=1");
8228 	vmcs_write(GUEST_RIP, nop_addr);
8229 	vmcs_write(GUEST_INTR_STATE, GUEST_INTR_STATE_STI);
8230 	enter_guest();
8231 	verify_intr_window_exit(nop_addr + 1);
8232 	report_prefix_pop();
8233 
8234 	/*
8235 	 * Ask for "interrupt-window exiting" with RFLAGS.IF clear,
8236 	 * and expect a VM-exit on the instruction following the STI
8237 	 * shadow. Only the first STI (which is one byte past the NOP)
8238 	 * should have a shadow. The second STI (which is two bytes
8239 	 * past the NOP) has no shadow. Therefore, the interrupt
8240 	 * window opens at three bytes past the NOP.
8241 	 */
8242 	report_prefix_push("active, RFLAGS.IF = 0");
8243 	vmcs_write(GUEST_RFLAGS, X86_EFLAGS_FIXED);
8244 	enter_guest();
8245 	verify_intr_window_exit(nop_addr + 3);
8246 	report_prefix_pop();
8247 
8248 	if (!(rdmsr(MSR_IA32_VMX_MISC) & (1 << 6))) {
8249 		report_skip("CPU does not support activity state HLT.");
8250 	} else {
8251 		/*
8252 		 * Ask for "interrupt-window exiting" when entering
8253 		 * activity state HLT, and expect an immediate
8254 		 * VM-exit. RIP is still three bytes past the nop.
8255 		 */
8256 		report_prefix_push("halted, no blocking");
8257 		vmcs_write(GUEST_ACTV_STATE, ACTV_HLT);
8258 		enter_guest();
8259 		verify_intr_window_exit(nop_addr + 3);
8260 		report_prefix_pop();
8261 
8262 		/*
8263 		 * Ask for "interrupt-window exiting" when entering
8264 		 * activity state HLT (with event injection), and
8265 		 * expect a VM-exit after the event is injected. That
8266 		 * is, RIP should should be at the address specified
8267 		 * in the IDT entry for #DB.
8268 		 */
8269 		report_prefix_push("halted, no blocking, injecting #DB");
8270 		vmcs_write(GUEST_ACTV_STATE, ACTV_HLT);
8271 		vmcs_write(ENT_INTR_INFO,
8272 			   INTR_INFO_VALID_MASK | INTR_TYPE_HARD_EXCEPTION |
8273 			   DB_VECTOR);
8274 		enter_guest();
8275 		verify_intr_window_exit((u64)db_fault_addr);
8276 		report_prefix_pop();
8277 	}
8278 
8279 	boot_idt[DB_VECTOR].type = orig_db_gate_type;
8280 	vmcs_clear_bits(CPU_EXEC_CTRL0, CPU_INTR_WINDOW);
8281 	enter_guest();
8282 	report_prefix_pop();
8283 }
8284 
8285 #define GUEST_TSC_OFFSET (1u << 30)
8286 
8287 static u64 guest_tsc;
8288 
8289 static void vmx_store_tsc_test_guest(void)
8290 {
8291 	guest_tsc = rdtsc();
8292 }
8293 
8294 /*
8295  * This test ensures that when IA32_TSC is in the VM-exit MSR-store
8296  * list, the value saved is not subject to the TSC offset that is
8297  * applied to RDTSC/RDTSCP/RDMSR(IA32_TSC) in guest execution.
8298  */
8299 static void vmx_store_tsc_test(void)
8300 {
8301 	struct vmx_msr_entry msr_entry = { .index = MSR_IA32_TSC };
8302 	u64 low, high;
8303 
8304 	if (!(ctrl_cpu_rev[0].clr & CPU_USE_TSC_OFFSET)) {
8305 		report_skip("'Use TSC offsetting' not supported");
8306 		return;
8307 	}
8308 
8309 	test_set_guest(vmx_store_tsc_test_guest);
8310 
8311 	vmcs_set_bits(CPU_EXEC_CTRL0, CPU_USE_TSC_OFFSET);
8312 	vmcs_write(EXI_MSR_ST_CNT, 1);
8313 	vmcs_write(EXIT_MSR_ST_ADDR, virt_to_phys(&msr_entry));
8314 	vmcs_write(TSC_OFFSET, GUEST_TSC_OFFSET);
8315 
8316 	low = rdtsc();
8317 	enter_guest();
8318 	high = rdtsc();
8319 
8320 	report(low + GUEST_TSC_OFFSET <= guest_tsc &&
8321 	       guest_tsc <= high + GUEST_TSC_OFFSET,
8322 	       "RDTSC value in the guest (%lu) is in range [%lu, %lu]",
8323 	       guest_tsc, low + GUEST_TSC_OFFSET, high + GUEST_TSC_OFFSET);
8324 	report(low <= msr_entry.value && msr_entry.value <= high,
8325 	       "IA32_TSC value saved in the VM-exit MSR-store list (%lu) is in range [%lu, %lu]",
8326 	       msr_entry.value, low, high);
8327 }
8328 
8329 static void vmx_db_test_guest(void)
8330 {
8331 	/*
8332 	 * For a hardware generated single-step #DB.
8333 	 */
8334 	asm volatile("vmcall;"
8335 		     "nop;"
8336 		     ".Lpost_nop:");
8337 	/*
8338 	 * ...in a MOVSS shadow, with pending debug exceptions.
8339 	 */
8340 	asm volatile("vmcall;"
8341 		     "nop;"
8342 		     ".Lpost_movss_nop:");
8343 	/*
8344 	 * For an L0 synthesized single-step #DB. (L0 intercepts WBINVD and
8345 	 * emulates it in software.)
8346 	 */
8347 	asm volatile("vmcall;"
8348 		     "wbinvd;"
8349 		     ".Lpost_wbinvd:");
8350 	/*
8351 	 * ...in a MOVSS shadow, with pending debug exceptions.
8352 	 */
8353 	asm volatile("vmcall;"
8354 		     "wbinvd;"
8355 		     ".Lpost_movss_wbinvd:");
8356 	/*
8357 	 * For a hardware generated single-step #DB in a transactional region.
8358 	 */
8359 	asm volatile("vmcall;"
8360 		     ".Lxbegin: xbegin .Lskip_rtm;"
8361 		     "xend;"
8362 		     ".Lskip_rtm:");
8363 }
8364 
8365 /*
8366  * Clear the pending debug exceptions and RFLAGS.TF and re-enter
8367  * L2. No #DB is delivered and L2 continues to the next point of
8368  * interest.
8369  */
8370 static void dismiss_db(void)
8371 {
8372 	vmcs_write(GUEST_PENDING_DEBUG, 0);
8373 	vmcs_write(GUEST_RFLAGS, X86_EFLAGS_FIXED);
8374 	enter_guest();
8375 }
8376 
8377 /*
8378  * Check a variety of VMCS fields relevant to an intercepted #DB exception.
8379  * Then throw away the #DB exception and resume L2.
8380  */
8381 static void check_db_exit(bool xfail_qual, bool xfail_dr6, bool xfail_pdbg,
8382 			  void *expected_rip, u64 expected_exit_qual,
8383 			  u64 expected_dr6)
8384 {
8385 	u32 reason = vmcs_read(EXI_REASON);
8386 	u32 intr_info = vmcs_read(EXI_INTR_INFO);
8387 	u64 exit_qual = vmcs_read(EXI_QUALIFICATION);
8388 	u64 guest_rip = vmcs_read(GUEST_RIP);
8389 	u64 guest_pending_dbg = vmcs_read(GUEST_PENDING_DEBUG);
8390 	u64 dr6 = read_dr6();
8391 	const u32 expected_intr_info = INTR_INFO_VALID_MASK |
8392 		INTR_TYPE_HARD_EXCEPTION | DB_VECTOR;
8393 
8394 	report(reason == VMX_EXC_NMI && intr_info == expected_intr_info,
8395 	       "Expected #DB VM-exit");
8396 	report((u64)expected_rip == guest_rip, "Expected RIP %p (actual %lx)",
8397 	       expected_rip, guest_rip);
8398 	report_xfail(xfail_pdbg, 0 == guest_pending_dbg,
8399 		     "Expected pending debug exceptions 0 (actual %lx)",
8400 		     guest_pending_dbg);
8401 	report_xfail(xfail_qual, expected_exit_qual == exit_qual,
8402 		     "Expected exit qualification %lx (actual %lx)",
8403 		     expected_exit_qual, exit_qual);
8404 	report_xfail(xfail_dr6, expected_dr6 == dr6,
8405 		     "Expected DR6 %lx (actual %lx)", expected_dr6, dr6);
8406 	dismiss_db();
8407 }
8408 
8409 /*
8410  * Assuming the guest has just exited on a VMCALL instruction, skip
8411  * over the vmcall, and set the guest's RFLAGS.TF in the VMCS. If
8412  * pending debug exceptions are non-zero, set the VMCS up as if the
8413  * previous instruction was a MOVSS that generated the indicated
8414  * pending debug exceptions. Then enter L2.
8415  */
8416 static void single_step_guest(const char *test_name, u64 starting_dr6,
8417 			      u64 pending_debug_exceptions)
8418 {
8419 	printf("\n%s\n", test_name);
8420 	skip_exit_vmcall();
8421 	write_dr6(starting_dr6);
8422 	vmcs_write(GUEST_RFLAGS, X86_EFLAGS_FIXED | X86_EFLAGS_TF);
8423 	if (pending_debug_exceptions) {
8424 		vmcs_write(GUEST_PENDING_DEBUG, pending_debug_exceptions);
8425 		vmcs_write(GUEST_INTR_STATE, GUEST_INTR_STATE_MOVSS);
8426 	}
8427 	enter_guest();
8428 }
8429 
8430 /*
8431  * When L1 intercepts #DB, verify that a single-step trap clears
8432  * pending debug exceptions, populates the exit qualification field
8433  * properly, and that DR6 is not prematurely clobbered. In a
8434  * (simulated) MOVSS shadow, make sure that the pending debug
8435  * exception bits are properly accumulated into the exit qualification
8436  * field.
8437  */
8438 static void vmx_db_test(void)
8439 {
8440 	/*
8441 	 * We are going to set a few arbitrary bits in DR6 to verify that
8442 	 * (a) DR6 is not modified by an intercepted #DB, and
8443 	 * (b) stale bits in DR6 (DR6.BD, in particular) don't leak into
8444          *     the exit qualification field for a subsequent #DB exception.
8445 	 */
8446 	const u64 starting_dr6 = DR6_RESERVED | BIT(13) | DR_TRAP3 | DR_TRAP1;
8447 	extern char post_nop asm(".Lpost_nop");
8448 	extern char post_movss_nop asm(".Lpost_movss_nop");
8449 	extern char post_wbinvd asm(".Lpost_wbinvd");
8450 	extern char post_movss_wbinvd asm(".Lpost_movss_wbinvd");
8451 	extern char xbegin asm(".Lxbegin");
8452 	extern char skip_rtm asm(".Lskip_rtm");
8453 
8454 	/*
8455 	 * L1 wants to intercept #DB exceptions encountered in L2.
8456 	 */
8457 	vmcs_write(EXC_BITMAP, BIT(DB_VECTOR));
8458 
8459 	/*
8460 	 * Start L2 and run it up to the first point of interest.
8461 	 */
8462 	test_set_guest(vmx_db_test_guest);
8463 	enter_guest();
8464 
8465 	/*
8466 	 * Hardware-delivered #DB trap for single-step sets the
8467 	 * standard that L0 has to follow for emulated instructions.
8468 	 */
8469 	single_step_guest("Hardware delivered single-step", starting_dr6, 0);
8470 	check_db_exit(false, false, false, &post_nop, DR_STEP, starting_dr6);
8471 
8472 	/*
8473 	 * Hardware-delivered #DB trap for single-step in MOVSS shadow
8474 	 * also sets the standard that L0 has to follow for emulated
8475 	 * instructions. Here, we establish the VMCS pending debug
8476 	 * exceptions to indicate that the simulated MOVSS triggered a
8477 	 * data breakpoint as well as the single-step trap.
8478 	 */
8479 	single_step_guest("Hardware delivered single-step in MOVSS shadow",
8480 			  starting_dr6, BIT(12) | DR_STEP | DR_TRAP0 );
8481 	check_db_exit(false, false, false, &post_movss_nop, DR_STEP | DR_TRAP0,
8482 		      starting_dr6);
8483 
8484 	/*
8485 	 * L0 synthesized #DB trap for single-step is buggy, because
8486 	 * kvm (a) clobbers DR6 too early, and (b) tries its best to
8487 	 * reconstitute the exit qualification from the prematurely
8488 	 * modified DR6, but fails miserably.
8489 	 */
8490 	single_step_guest("Software synthesized single-step", starting_dr6, 0);
8491 	check_db_exit(false, false, false, &post_wbinvd, DR_STEP, starting_dr6);
8492 
8493 	/*
8494 	 * L0 synthesized #DB trap for single-step in MOVSS shadow is
8495 	 * even worse, because L0 also leaves the pending debug
8496 	 * exceptions in the VMCS instead of accumulating them into
8497 	 * the exit qualification field for the #DB exception.
8498 	 */
8499 	single_step_guest("Software synthesized single-step in MOVSS shadow",
8500 			  starting_dr6, BIT(12) | DR_STEP | DR_TRAP0);
8501 	check_db_exit(true, false, true, &post_movss_wbinvd, DR_STEP | DR_TRAP0,
8502 		      starting_dr6);
8503 
8504 	/*
8505 	 * Optional RTM test for hardware that supports RTM, to
8506 	 * demonstrate that the current volume 3 of the SDM
8507 	 * (325384-067US), table 27-1 is incorrect. Bit 16 of the exit
8508 	 * qualification for debug exceptions is not reserved. It is
8509 	 * set to 1 if a debug exception (#DB) or a breakpoint
8510 	 * exception (#BP) occurs inside an RTM region while advanced
8511 	 * debugging of RTM transactional regions is enabled.
8512 	 */
8513 	if (this_cpu_has(X86_FEATURE_RTM)) {
8514 		vmcs_write(ENT_CONTROLS,
8515 			   vmcs_read(ENT_CONTROLS) | ENT_LOAD_DBGCTLS);
8516 		/*
8517 		 * Set DR7.RTM[bit 11] and IA32_DEBUGCTL.RTM[bit 15]
8518 		 * in the guest to enable advanced debugging of RTM
8519 		 * transactional regions.
8520 		 */
8521 		vmcs_write(GUEST_DR7, BIT(11));
8522 		vmcs_write(GUEST_DEBUGCTL, BIT(15));
8523 		single_step_guest("Hardware delivered single-step in "
8524 				  "transactional region", starting_dr6, 0);
8525 		check_db_exit(false, false, false, &xbegin, BIT(16),
8526 			      starting_dr6);
8527 	} else {
8528 		vmcs_write(GUEST_RIP, (u64)&skip_rtm);
8529 		enter_guest();
8530 	}
8531 }
8532 
8533 static void enable_vid(void)
8534 {
8535 	void *virtual_apic_page;
8536 
8537 	assert(cpu_has_apicv());
8538 
8539 	disable_intercept_for_x2apic_msrs();
8540 
8541 	virtual_apic_page = alloc_page();
8542 	vmcs_write(APIC_VIRT_ADDR, (u64)virtual_apic_page);
8543 
8544 	vmcs_set_bits(PIN_CONTROLS, PIN_EXTINT);
8545 
8546 	vmcs_write(EOI_EXIT_BITMAP0, 0x0);
8547 	vmcs_write(EOI_EXIT_BITMAP1, 0x0);
8548 	vmcs_write(EOI_EXIT_BITMAP2, 0x0);
8549 	vmcs_write(EOI_EXIT_BITMAP3, 0x0);
8550 
8551 	vmcs_set_bits(CPU_EXEC_CTRL0, CPU_SECONDARY | CPU_TPR_SHADOW);
8552 	vmcs_set_bits(CPU_EXEC_CTRL1, CPU_VINTD | CPU_VIRT_X2APIC);
8553 }
8554 
8555 static void trigger_ioapic_scan_thread(void *data)
8556 {
8557 	/* Wait until other CPU entered L2 */
8558 	while (vmx_get_test_stage() != 1)
8559 		;
8560 
8561 	/* Trigger ioapic scan */
8562 	ioapic_set_redir(0xf, 0x79, TRIGGER_LEVEL);
8563 	vmx_set_test_stage(2);
8564 }
8565 
8566 static void irq_79_handler_guest(isr_regs_t *regs)
8567 {
8568 	eoi();
8569 
8570 	/* L1 expects vmexit on VMX_VMCALL and not VMX_EOI_INDUCED */
8571 	vmcall();
8572 }
8573 
8574 /*
8575  * Constant for num of busy-loop iterations after which
8576  * a timer interrupt should have happened in host
8577  */
8578 #define TIMER_INTERRUPT_DELAY 100000000
8579 
8580 static void vmx_eoi_bitmap_ioapic_scan_test_guest(void)
8581 {
8582 	handle_irq(0x79, irq_79_handler_guest);
8583 	irq_enable();
8584 
8585 	/* Signal to L1 CPU to trigger ioapic scan */
8586 	vmx_set_test_stage(1);
8587 	/* Wait until L1 CPU to trigger ioapic scan */
8588 	while (vmx_get_test_stage() != 2)
8589 		;
8590 
8591 	/*
8592 	 * Wait for L0 timer interrupt to be raised while we run in L2
8593 	 * such that L0 will process the IOAPIC scan request before
8594 	 * resuming L2
8595 	 */
8596 	delay(TIMER_INTERRUPT_DELAY);
8597 
8598 	asm volatile ("int $0x79");
8599 }
8600 
8601 static void vmx_eoi_bitmap_ioapic_scan_test(void)
8602 {
8603 	if (!cpu_has_apicv() || (cpu_count() < 2)) {
8604 		report_skip(__func__);
8605 		return;
8606 	}
8607 
8608 	enable_vid();
8609 
8610 	on_cpu_async(1, trigger_ioapic_scan_thread, NULL);
8611 	test_set_guest(vmx_eoi_bitmap_ioapic_scan_test_guest);
8612 
8613 	/*
8614 	 * Launch L2.
8615 	 * We expect the exit reason to be VMX_VMCALL (and not EOI INDUCED).
8616 	 * In case the reason isn't VMX_VMCALL, the asserion inside
8617 	 * skip_exit_vmcall() will fail.
8618 	 */
8619 	enter_guest();
8620 	skip_exit_vmcall();
8621 
8622 	/* Let L2 finish */
8623 	enter_guest();
8624 	report(1, __func__);
8625 }
8626 
8627 #define HLT_WITH_RVI_VECTOR		(0xf1)
8628 
8629 bool vmx_hlt_with_rvi_guest_isr_fired;
8630 static void vmx_hlt_with_rvi_guest_isr(isr_regs_t *regs)
8631 {
8632 	vmx_hlt_with_rvi_guest_isr_fired = true;
8633 	eoi();
8634 }
8635 
8636 static void vmx_hlt_with_rvi_guest(void)
8637 {
8638 	handle_irq(HLT_WITH_RVI_VECTOR, vmx_hlt_with_rvi_guest_isr);
8639 
8640 	irq_enable();
8641 	asm volatile ("nop");
8642 
8643 	vmcall();
8644 }
8645 
8646 static void vmx_hlt_with_rvi_test(void)
8647 {
8648 	if (!cpu_has_apicv()) {
8649 		report_skip(__func__);
8650 		return;
8651 	}
8652 
8653 	enable_vid();
8654 
8655 	vmx_hlt_with_rvi_guest_isr_fired = false;
8656 	test_set_guest(vmx_hlt_with_rvi_guest);
8657 
8658 	enter_guest();
8659 	skip_exit_vmcall();
8660 
8661 	vmcs_write(GUEST_ACTV_STATE, ACTV_HLT);
8662 	vmcs_write(GUEST_INT_STATUS, HLT_WITH_RVI_VECTOR);
8663 	enter_guest();
8664 
8665 	report(vmx_hlt_with_rvi_guest_isr_fired, "Interrupt raised in guest");
8666 }
8667 
8668 static void set_irq_line_thread(void *data)
8669 {
8670 	/* Wait until other CPU entered L2 */
8671 	while (vmx_get_test_stage() != 1)
8672 		;
8673 
8674 	/* Set irq-line 0xf to raise vector 0x78 for vCPU 0 */
8675 	ioapic_set_redir(0xf, 0x78, TRIGGER_LEVEL);
8676 	vmx_set_test_stage(2);
8677 }
8678 
8679 static bool irq_78_handler_vmcall_before_eoi;
8680 static void irq_78_handler_guest(isr_regs_t *regs)
8681 {
8682 	set_irq_line(0xf, 0);
8683 	if (irq_78_handler_vmcall_before_eoi)
8684 		vmcall();
8685 	eoi();
8686 	vmcall();
8687 }
8688 
8689 static void vmx_apic_passthrough_guest(void)
8690 {
8691 	handle_irq(0x78, irq_78_handler_guest);
8692 	irq_enable();
8693 
8694 	/* If requested, wait for other CPU to trigger ioapic scan */
8695 	if (vmx_get_test_stage() < 1) {
8696 		vmx_set_test_stage(1);
8697 		while (vmx_get_test_stage() != 2)
8698 			;
8699 	}
8700 
8701 	set_irq_line(0xf, 1);
8702 }
8703 
8704 static void vmx_apic_passthrough(bool set_irq_line_from_thread)
8705 {
8706 	if (set_irq_line_from_thread && (cpu_count() < 2)) {
8707 		report_skip(__func__);
8708 		return;
8709 	}
8710 
8711 	/* Test device is required for generating IRQs */
8712 	if (!test_device_enabled()) {
8713 		report_skip(__func__);
8714 		return;
8715 	}
8716 	u64 cpu_ctrl_0 = CPU_SECONDARY;
8717 	u64 cpu_ctrl_1 = 0;
8718 
8719 	disable_intercept_for_x2apic_msrs();
8720 
8721 	vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) & ~PIN_EXTINT);
8722 
8723 	vmcs_write(CPU_EXEC_CTRL0, vmcs_read(CPU_EXEC_CTRL0) | cpu_ctrl_0);
8724 	vmcs_write(CPU_EXEC_CTRL1, vmcs_read(CPU_EXEC_CTRL1) | cpu_ctrl_1);
8725 
8726 	if (set_irq_line_from_thread) {
8727 		irq_78_handler_vmcall_before_eoi = false;
8728 		on_cpu_async(1, set_irq_line_thread, NULL);
8729 	} else {
8730 		irq_78_handler_vmcall_before_eoi = true;
8731 		ioapic_set_redir(0xf, 0x78, TRIGGER_LEVEL);
8732 		vmx_set_test_stage(2);
8733 	}
8734 	test_set_guest(vmx_apic_passthrough_guest);
8735 
8736 	if (irq_78_handler_vmcall_before_eoi) {
8737 		/* Before EOI remote_irr should still be set */
8738 		enter_guest();
8739 		skip_exit_vmcall();
8740 		TEST_ASSERT_EQ_MSG(1, (int)ioapic_read_redir(0xf).remote_irr,
8741 			"IOAPIC pass-through: remote_irr=1 before EOI");
8742 	}
8743 
8744 	/* After EOI remote_irr should be cleared */
8745 	enter_guest();
8746 	skip_exit_vmcall();
8747 	TEST_ASSERT_EQ_MSG(0, (int)ioapic_read_redir(0xf).remote_irr,
8748 		"IOAPIC pass-through: remote_irr=0 after EOI");
8749 
8750 	/* Let L2 finish */
8751 	enter_guest();
8752 	report(1, __func__);
8753 }
8754 
8755 static void vmx_apic_passthrough_test(void)
8756 {
8757 	vmx_apic_passthrough(false);
8758 }
8759 
8760 static void vmx_apic_passthrough_thread_test(void)
8761 {
8762 	vmx_apic_passthrough(true);
8763 }
8764 
8765 static void vmx_apic_passthrough_tpr_threshold_guest(void)
8766 {
8767 	cli();
8768 	apic_set_tpr(0);
8769 }
8770 
8771 static bool vmx_apic_passthrough_tpr_threshold_ipi_isr_fired;
8772 static void vmx_apic_passthrough_tpr_threshold_ipi_isr(isr_regs_t *regs)
8773 {
8774 	vmx_apic_passthrough_tpr_threshold_ipi_isr_fired = true;
8775 	eoi();
8776 }
8777 
8778 static void vmx_apic_passthrough_tpr_threshold_test(void)
8779 {
8780 	int ipi_vector = 0xe1;
8781 
8782 	disable_intercept_for_x2apic_msrs();
8783 	vmcs_clear_bits(PIN_CONTROLS, PIN_EXTINT);
8784 
8785 	/* Raise L0 TPR-threshold by queueing vector in LAPIC IRR */
8786 	cli();
8787 	apic_set_tpr((ipi_vector >> 4) + 1);
8788 	apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL |
8789 			APIC_DM_FIXED | ipi_vector,
8790 			0);
8791 
8792 	test_set_guest(vmx_apic_passthrough_tpr_threshold_guest);
8793 	enter_guest();
8794 
8795 	report(apic_get_tpr() == 0, "TPR was zero by guest");
8796 
8797 	/* Clean pending self-IPI */
8798 	vmx_apic_passthrough_tpr_threshold_ipi_isr_fired = false;
8799 	handle_irq(ipi_vector, vmx_apic_passthrough_tpr_threshold_ipi_isr);
8800 	sti();
8801 	asm volatile ("nop");
8802 	report(vmx_apic_passthrough_tpr_threshold_ipi_isr_fired, "self-IPI fired");
8803 
8804 	report(1, __func__);
8805 }
8806 
8807 static u64 init_signal_test_exit_reason;
8808 static bool init_signal_test_thread_continued;
8809 
8810 static void init_signal_test_thread(void *data)
8811 {
8812 	struct vmcs *test_vmcs = data;
8813 
8814 	/* Enter VMX operation (i.e. exec VMXON) */
8815 	u64 *ap_vmxon_region = alloc_page();
8816 	enable_vmx();
8817 	init_vmx(ap_vmxon_region);
8818 	_vmx_on(ap_vmxon_region);
8819 
8820 	/* Signal CPU have entered VMX operation */
8821 	vmx_set_test_stage(1);
8822 
8823 	/* Wait for BSP CPU to send INIT signal */
8824 	while (vmx_get_test_stage() != 2)
8825 		;
8826 
8827 	/*
8828 	 * Signal that we continue as usual as INIT signal
8829 	 * should be blocked while CPU is in VMX operation
8830 	 */
8831 	vmx_set_test_stage(3);
8832 
8833 	/* Wait for signal to enter VMX non-root mode */
8834 	while (vmx_get_test_stage() != 4)
8835 		;
8836 
8837 	/* Enter VMX non-root mode */
8838 	test_set_guest(v2_null_test_guest);
8839 	make_vmcs_current(test_vmcs);
8840 	enter_guest();
8841 	/* Save exit reason for BSP CPU to compare to expected result */
8842 	init_signal_test_exit_reason = vmcs_read(EXI_REASON);
8843 	/* VMCLEAR test-vmcs so it could be loaded by BSP CPU */
8844 	vmcs_clear(test_vmcs);
8845 	launched = false;
8846 	/* Signal that CPU exited to VMX root mode */
8847 	vmx_set_test_stage(5);
8848 
8849 	/* Wait for BSP CPU to signal to exit VMX operation */
8850 	while (vmx_get_test_stage() != 6)
8851 		;
8852 
8853 	/* Exit VMX operation (i.e. exec VMXOFF) */
8854 	vmx_off();
8855 
8856 	/*
8857 	 * Signal to BSP CPU that we continue as usual as INIT signal
8858 	 * should have been consumed by VMX_INIT exit from guest
8859 	 */
8860 	vmx_set_test_stage(7);
8861 
8862 	/* Wait for BSP CPU to signal to enter VMX operation */
8863 	while (vmx_get_test_stage() != 8)
8864 		;
8865 	/* Enter VMX operation (i.e. exec VMXON) */
8866 	_vmx_on(ap_vmxon_region);
8867 	/* Signal to BSP we are in VMX operation */
8868 	vmx_set_test_stage(9);
8869 
8870 	/* Wait for BSP CPU to send INIT signal */
8871 	while (vmx_get_test_stage() != 10)
8872 		;
8873 
8874 	/* Exit VMX operation (i.e. exec VMXOFF) */
8875 	vmx_off();
8876 
8877 	/*
8878 	 * Exiting VMX operation should result in latched
8879 	 * INIT signal being processed. Therefore, we should
8880 	 * never reach the below code. Thus, signal to BSP
8881 	 * CPU if we have reached here so it is able to
8882 	 * report an issue if it happens.
8883 	 */
8884 	init_signal_test_thread_continued = true;
8885 }
8886 
8887 #define INIT_SIGNAL_TEST_DELAY	100000000ULL
8888 
8889 static void vmx_init_signal_test(void)
8890 {
8891 	struct vmcs *test_vmcs;
8892 
8893 	if (cpu_count() < 2) {
8894 		report_skip(__func__);
8895 		return;
8896 	}
8897 
8898 	/* VMCLEAR test-vmcs so it could be loaded by other CPU */
8899 	vmcs_save(&test_vmcs);
8900 	vmcs_clear(test_vmcs);
8901 
8902 	vmx_set_test_stage(0);
8903 	on_cpu_async(1, init_signal_test_thread, test_vmcs);
8904 
8905 	/* Wait for other CPU to enter VMX operation */
8906 	while (vmx_get_test_stage() != 1)
8907 		;
8908 
8909 	/* Send INIT signal to other CPU */
8910 	apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_INIT | APIC_INT_ASSERT,
8911 				   id_map[1]);
8912 	/* Signal other CPU we have sent INIT signal */
8913 	vmx_set_test_stage(2);
8914 
8915 	/*
8916 	 * Wait reasonable amount of time for INIT signal to
8917 	 * be received on other CPU and verify that other CPU
8918 	 * have proceed as usual to next test stage as INIT
8919 	 * signal should be blocked while other CPU in
8920 	 * VMX operation
8921 	 */
8922 	delay(INIT_SIGNAL_TEST_DELAY);
8923 	report(vmx_get_test_stage() == 3,
8924 	       "INIT signal blocked when CPU in VMX operation");
8925 	/* No point to continue if we failed at this point */
8926 	if (vmx_get_test_stage() != 3)
8927 		return;
8928 
8929 	/* Signal other CPU to enter VMX non-root mode */
8930 	init_signal_test_exit_reason = -1ull;
8931 	vmx_set_test_stage(4);
8932 	/*
8933 	 * Wait reasonable amont of time for other CPU
8934 	 * to exit to VMX root mode
8935 	 */
8936 	delay(INIT_SIGNAL_TEST_DELAY);
8937 	if (vmx_get_test_stage() != 5) {
8938 		report(false, "Pending INIT signal didn't result in VMX exit");
8939 		return;
8940 	}
8941 	report(init_signal_test_exit_reason == VMX_INIT,
8942 			"INIT signal during VMX non-root mode result in exit-reason %s (%lu)",
8943 			exit_reason_description(init_signal_test_exit_reason),
8944 			init_signal_test_exit_reason);
8945 
8946 	/* Run guest to completion */
8947 	make_vmcs_current(test_vmcs);
8948 	enter_guest();
8949 
8950 	/* Signal other CPU to exit VMX operation */
8951 	init_signal_test_thread_continued = false;
8952 	vmx_set_test_stage(6);
8953 
8954 	/* Wait reasonable amount of time for other CPU to exit VMX operation */
8955 	delay(INIT_SIGNAL_TEST_DELAY);
8956 	report(vmx_get_test_stage() == 7,
8957 	       "INIT signal consumed on VMX_INIT exit");
8958 	/* No point to continue if we failed at this point */
8959 	if (vmx_get_test_stage() != 7)
8960 		return;
8961 
8962 	/* Signal other CPU to enter VMX operation */
8963 	vmx_set_test_stage(8);
8964 	/* Wait for other CPU to enter VMX operation */
8965 	while (vmx_get_test_stage() != 9)
8966 		;
8967 
8968 	/* Send INIT signal to other CPU */
8969 	apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_INIT | APIC_INT_ASSERT,
8970 				   id_map[1]);
8971 	/* Signal other CPU we have sent INIT signal */
8972 	vmx_set_test_stage(10);
8973 
8974 	/*
8975 	 * Wait reasonable amount of time for other CPU
8976 	 * to exit VMX operation and process INIT signal
8977 	 */
8978 	delay(INIT_SIGNAL_TEST_DELAY);
8979 	report(!init_signal_test_thread_continued,
8980 	       "INIT signal processed after exit VMX operation");
8981 
8982 	/*
8983 	 * TODO: Send SIPI to other CPU to sipi_entry (See x86/cstart64.S)
8984 	 * to re-init it to kvm-unit-tests standard environment.
8985 	 * Somehow (?) verify that SIPI was indeed received.
8986 	 */
8987 }
8988 
8989 enum vmcs_access {
8990 	ACCESS_VMREAD,
8991 	ACCESS_VMWRITE,
8992 	ACCESS_NONE,
8993 };
8994 
8995 struct vmcs_shadow_test_common {
8996 	enum vmcs_access op;
8997 	enum Reason reason;
8998 	u64 field;
8999 	u64 value;
9000 	u64 flags;
9001 	u64 time;
9002 } l1_l2_common;
9003 
9004 static inline u64 vmread_flags(u64 field, u64 *val)
9005 {
9006 	u64 flags;
9007 
9008 	asm volatile ("vmread %2, %1; pushf; pop %0"
9009 		      : "=r" (flags), "=rm" (*val) : "r" (field) : "cc");
9010 	return flags & X86_EFLAGS_ALU;
9011 }
9012 
9013 static inline u64 vmwrite_flags(u64 field, u64 val)
9014 {
9015 	u64 flags;
9016 
9017 	asm volatile ("vmwrite %1, %2; pushf; pop %0"
9018 		      : "=r"(flags) : "rm" (val), "r" (field) : "cc");
9019 	return flags & X86_EFLAGS_ALU;
9020 }
9021 
9022 static void vmx_vmcs_shadow_test_guest(void)
9023 {
9024 	struct vmcs_shadow_test_common *c = &l1_l2_common;
9025 	u64 start;
9026 
9027 	while (c->op != ACCESS_NONE) {
9028 		start = rdtsc();
9029 		switch (c->op) {
9030 		default:
9031 			c->flags = -1ull;
9032 			break;
9033 		case ACCESS_VMREAD:
9034 			c->flags = vmread_flags(c->field, &c->value);
9035 			break;
9036 		case ACCESS_VMWRITE:
9037 			c->flags = vmwrite_flags(c->field, 0);
9038 			break;
9039 		}
9040 		c->time = rdtsc() - start;
9041 		vmcall();
9042 	}
9043 }
9044 
9045 static u64 vmread_from_shadow(u64 field)
9046 {
9047 	struct vmcs *primary;
9048 	struct vmcs *shadow;
9049 	u64 value;
9050 
9051 	TEST_ASSERT(!vmcs_save(&primary));
9052 	shadow = (struct vmcs *)vmcs_read(VMCS_LINK_PTR);
9053 	TEST_ASSERT(!make_vmcs_current(shadow));
9054 	value = vmcs_read(field);
9055 	TEST_ASSERT(!make_vmcs_current(primary));
9056 	return value;
9057 }
9058 
9059 static u64 vmwrite_to_shadow(u64 field, u64 value)
9060 {
9061 	struct vmcs *primary;
9062 	struct vmcs *shadow;
9063 
9064 	TEST_ASSERT(!vmcs_save(&primary));
9065 	shadow = (struct vmcs *)vmcs_read(VMCS_LINK_PTR);
9066 	TEST_ASSERT(!make_vmcs_current(shadow));
9067 	vmcs_write(field, value);
9068 	value = vmcs_read(field);
9069 	TEST_ASSERT(!make_vmcs_current(primary));
9070 	return value;
9071 }
9072 
9073 static void vmcs_shadow_test_access(u8 *bitmap[2], enum vmcs_access access)
9074 {
9075 	struct vmcs_shadow_test_common *c = &l1_l2_common;
9076 
9077 	c->op = access;
9078 	vmcs_write(VMX_INST_ERROR, 0);
9079 	enter_guest();
9080 	c->reason = vmcs_read(EXI_REASON) & 0xffff;
9081 	if (c->reason != VMX_VMCALL) {
9082 		skip_exit_insn();
9083 		enter_guest();
9084 	}
9085 	skip_exit_vmcall();
9086 }
9087 
9088 static void vmcs_shadow_test_field(u8 *bitmap[2], u64 field)
9089 {
9090 	struct vmcs_shadow_test_common *c = &l1_l2_common;
9091 	struct vmcs *shadow;
9092 	u64 value;
9093 	uintptr_t flags[2];
9094 	bool good_shadow;
9095 	u32 vmx_inst_error;
9096 
9097 	report_prefix_pushf("field %lx", field);
9098 	c->field = field;
9099 
9100 	shadow = (struct vmcs *)vmcs_read(VMCS_LINK_PTR);
9101 	if (shadow != (struct vmcs *)-1ull) {
9102 		flags[ACCESS_VMREAD] = vmread_flags(field, &value);
9103 		flags[ACCESS_VMWRITE] = vmwrite_flags(field, value);
9104 		good_shadow = !flags[ACCESS_VMREAD] && !flags[ACCESS_VMWRITE];
9105 	} else {
9106 		/*
9107 		 * When VMCS link pointer is -1ull, VMWRITE/VMREAD on
9108 		 * shadowed-fields should fail with setting RFLAGS.CF.
9109 		 */
9110 		flags[ACCESS_VMREAD] = X86_EFLAGS_CF;
9111 		flags[ACCESS_VMWRITE] = X86_EFLAGS_CF;
9112 		good_shadow = false;
9113 	}
9114 
9115 	/* Intercept both VMREAD and VMWRITE. */
9116 	report_prefix_push("no VMREAD/VMWRITE permission");
9117 	/* VMWRITE/VMREAD done on reserved-bit should always intercept */
9118 	if (!(field >> VMCS_FIELD_RESERVED_SHIFT)) {
9119 		set_bit(field, bitmap[ACCESS_VMREAD]);
9120 		set_bit(field, bitmap[ACCESS_VMWRITE]);
9121 	}
9122 	vmcs_shadow_test_access(bitmap, ACCESS_VMWRITE);
9123 	report(c->reason == VMX_VMWRITE, "not shadowed for VMWRITE");
9124 	vmcs_shadow_test_access(bitmap, ACCESS_VMREAD);
9125 	report(c->reason == VMX_VMREAD, "not shadowed for VMREAD");
9126 	report_prefix_pop();
9127 
9128 	if (field >> VMCS_FIELD_RESERVED_SHIFT)
9129 		goto out;
9130 
9131 	/* Permit shadowed VMREAD. */
9132 	report_prefix_push("VMREAD permission only");
9133 	clear_bit(field, bitmap[ACCESS_VMREAD]);
9134 	set_bit(field, bitmap[ACCESS_VMWRITE]);
9135 	if (good_shadow)
9136 		value = vmwrite_to_shadow(field, MAGIC_VAL_1 + field);
9137 	vmcs_shadow_test_access(bitmap, ACCESS_VMWRITE);
9138 	report(c->reason == VMX_VMWRITE, "not shadowed for VMWRITE");
9139 	vmcs_shadow_test_access(bitmap, ACCESS_VMREAD);
9140 	vmx_inst_error = vmcs_read(VMX_INST_ERROR);
9141 	report(c->reason == VMX_VMCALL, "shadowed for VMREAD (in %ld cycles)",
9142 	       c->time);
9143 	report(c->flags == flags[ACCESS_VMREAD],
9144 	       "ALU flags after VMREAD (%lx) are as expected (%lx)",
9145 	       c->flags, flags[ACCESS_VMREAD]);
9146 	if (good_shadow)
9147 		report(c->value == value,
9148 		       "value read from shadow (%lx) is as expected (%lx)",
9149 		       c->value, value);
9150 	else if (shadow != (struct vmcs *)-1ull && flags[ACCESS_VMREAD])
9151 		report(vmx_inst_error == VMXERR_UNSUPPORTED_VMCS_COMPONENT,
9152 		       "VMX_INST_ERROR (%d) is as expected (%d)",
9153 		       vmx_inst_error, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
9154 	report_prefix_pop();
9155 
9156 	/* Permit shadowed VMWRITE. */
9157 	report_prefix_push("VMWRITE permission only");
9158 	set_bit(field, bitmap[ACCESS_VMREAD]);
9159 	clear_bit(field, bitmap[ACCESS_VMWRITE]);
9160 	if (good_shadow)
9161 		vmwrite_to_shadow(field, MAGIC_VAL_1 + field);
9162 	vmcs_shadow_test_access(bitmap, ACCESS_VMWRITE);
9163 	vmx_inst_error = vmcs_read(VMX_INST_ERROR);
9164 	report(c->reason == VMX_VMCALL,
9165 		"shadowed for VMWRITE (in %ld cycles)",
9166 		c->time);
9167 	report(c->flags == flags[ACCESS_VMREAD],
9168 	       "ALU flags after VMWRITE (%lx) are as expected (%lx)",
9169 	       c->flags, flags[ACCESS_VMREAD]);
9170 	if (good_shadow) {
9171 		value = vmread_from_shadow(field);
9172 		report(value == 0,
9173 		       "shadow VMCS value (%lx) is as expected (%lx)", value,
9174 		       0ul);
9175 	} else if (shadow != (struct vmcs *)-1ull && flags[ACCESS_VMWRITE]) {
9176 		report(vmx_inst_error == VMXERR_UNSUPPORTED_VMCS_COMPONENT,
9177 		       "VMX_INST_ERROR (%d) is as expected (%d)",
9178 		       vmx_inst_error, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
9179 	}
9180 	vmcs_shadow_test_access(bitmap, ACCESS_VMREAD);
9181 	report(c->reason == VMX_VMREAD, "not shadowed for VMREAD");
9182 	report_prefix_pop();
9183 
9184 	/* Permit shadowed VMREAD and VMWRITE. */
9185 	report_prefix_push("VMREAD and VMWRITE permission");
9186 	clear_bit(field, bitmap[ACCESS_VMREAD]);
9187 	clear_bit(field, bitmap[ACCESS_VMWRITE]);
9188 	if (good_shadow)
9189 		vmwrite_to_shadow(field, MAGIC_VAL_1 + field);
9190 	vmcs_shadow_test_access(bitmap, ACCESS_VMWRITE);
9191 	vmx_inst_error = vmcs_read(VMX_INST_ERROR);
9192 	report(c->reason == VMX_VMCALL,
9193 		"shadowed for VMWRITE (in %ld cycles)",
9194 		c->time);
9195 	report(c->flags == flags[ACCESS_VMREAD],
9196 	       "ALU flags after VMWRITE (%lx) are as expected (%lx)",
9197 	       c->flags, flags[ACCESS_VMREAD]);
9198 	if (good_shadow) {
9199 		value = vmread_from_shadow(field);
9200 		report(value == 0,
9201 		       "shadow VMCS value (%lx) is as expected (%lx)", value,
9202 		       0ul);
9203 	} else if (shadow != (struct vmcs *)-1ull && flags[ACCESS_VMWRITE]) {
9204 		report(vmx_inst_error == VMXERR_UNSUPPORTED_VMCS_COMPONENT,
9205 		       "VMX_INST_ERROR (%d) is as expected (%d)",
9206 		       vmx_inst_error, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
9207 	}
9208 	vmcs_shadow_test_access(bitmap, ACCESS_VMREAD);
9209 	vmx_inst_error = vmcs_read(VMX_INST_ERROR);
9210 	report(c->reason == VMX_VMCALL, "shadowed for VMREAD (in %ld cycles)",
9211 	       c->time);
9212 	report(c->flags == flags[ACCESS_VMREAD],
9213 	       "ALU flags after VMREAD (%lx) are as expected (%lx)",
9214 	       c->flags, flags[ACCESS_VMREAD]);
9215 	if (good_shadow)
9216 		report(c->value == 0,
9217 		       "value read from shadow (%lx) is as expected (%lx)",
9218 		       c->value, 0ul);
9219 	else if (shadow != (struct vmcs *)-1ull && flags[ACCESS_VMREAD])
9220 		report(vmx_inst_error == VMXERR_UNSUPPORTED_VMCS_COMPONENT,
9221 		       "VMX_INST_ERROR (%d) is as expected (%d)",
9222 		       vmx_inst_error, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
9223 	report_prefix_pop();
9224 
9225 out:
9226 	report_prefix_pop();
9227 }
9228 
9229 static void vmx_vmcs_shadow_test_body(u8 *bitmap[2])
9230 {
9231 	unsigned base;
9232 	unsigned index;
9233 	unsigned bit;
9234 	unsigned highest_index = rdmsr(MSR_IA32_VMX_VMCS_ENUM);
9235 
9236 	/* Run test on all possible valid VMCS fields */
9237 	for (base = 0;
9238 	     base < (1 << VMCS_FIELD_RESERVED_SHIFT);
9239 	     base += (1 << VMCS_FIELD_TYPE_SHIFT))
9240 		for (index = 0; index <= highest_index; index++)
9241 			vmcs_shadow_test_field(bitmap, base + index);
9242 
9243 	/*
9244 	 * Run tests on some invalid VMCS fields
9245 	 * (Have reserved bit set).
9246 	 */
9247 	for (bit = VMCS_FIELD_RESERVED_SHIFT; bit < VMCS_FIELD_BIT_SIZE; bit++)
9248 		vmcs_shadow_test_field(bitmap, (1ull << bit));
9249 }
9250 
9251 static void vmx_vmcs_shadow_test(void)
9252 {
9253 	u8 *bitmap[2];
9254 	struct vmcs *shadow;
9255 
9256 	if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY)) {
9257 		printf("\t'Activate secondary controls' not supported.\n");
9258 		return;
9259 	}
9260 
9261 	if (!(ctrl_cpu_rev[1].clr & CPU_SHADOW_VMCS)) {
9262 		printf("\t'VMCS shadowing' not supported.\n");
9263 		return;
9264 	}
9265 
9266 	if (!(rdmsr(MSR_IA32_VMX_MISC) &
9267 	      MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS)) {
9268 		printf("\tVMWRITE can't modify VM-exit information fields.\n");
9269 		return;
9270 	}
9271 
9272 	test_set_guest(vmx_vmcs_shadow_test_guest);
9273 
9274 	bitmap[ACCESS_VMREAD] = alloc_page();
9275 	bitmap[ACCESS_VMWRITE] = alloc_page();
9276 
9277 	vmcs_write(VMREAD_BITMAP, virt_to_phys(bitmap[ACCESS_VMREAD]));
9278 	vmcs_write(VMWRITE_BITMAP, virt_to_phys(bitmap[ACCESS_VMWRITE]));
9279 
9280 	shadow = alloc_page();
9281 	shadow->hdr.revision_id = basic.revision;
9282 	shadow->hdr.shadow_vmcs = 1;
9283 	TEST_ASSERT(!vmcs_clear(shadow));
9284 
9285 	vmcs_clear_bits(CPU_EXEC_CTRL0, CPU_RDTSC);
9286 	vmcs_set_bits(CPU_EXEC_CTRL0, CPU_SECONDARY);
9287 	vmcs_set_bits(CPU_EXEC_CTRL1, CPU_SHADOW_VMCS);
9288 
9289 	vmcs_write(VMCS_LINK_PTR, virt_to_phys(shadow));
9290 	report_prefix_push("valid link pointer");
9291 	vmx_vmcs_shadow_test_body(bitmap);
9292 	report_prefix_pop();
9293 
9294 	vmcs_write(VMCS_LINK_PTR, -1ull);
9295 	report_prefix_push("invalid link pointer");
9296 	vmx_vmcs_shadow_test_body(bitmap);
9297 	report_prefix_pop();
9298 
9299 	l1_l2_common.op = ACCESS_NONE;
9300 	enter_guest();
9301 }
9302 
9303 /*
9304  * This test monitors the difference between a guest RDTSC instruction
9305  * and the IA32_TIME_STAMP_COUNTER MSR value stored in the VMCS12
9306  * VM-exit MSR-store list when taking a VM-exit on the instruction
9307  * following RDTSC.
9308  */
9309 #define RDTSC_DIFF_ITERS 100000
9310 #define RDTSC_DIFF_FAILS 100
9311 #define HOST_CAPTURED_GUEST_TSC_DIFF_THRESHOLD 750
9312 
9313 /*
9314  * Set 'use TSC offsetting' and set the guest offset to the
9315  * inverse of the host's current TSC value, so that the guest starts running
9316  * with an effective TSC value of 0.
9317  */
9318 static void reset_guest_tsc_to_zero(void)
9319 {
9320 	vmcs_set_bits(CPU_EXEC_CTRL0, CPU_USE_TSC_OFFSET);
9321 	vmcs_write(TSC_OFFSET, -rdtsc());
9322 }
9323 
9324 static void rdtsc_vmexit_diff_test_guest(void)
9325 {
9326 	int i;
9327 
9328 	for (i = 0; i < RDTSC_DIFF_ITERS; i++)
9329 		/* Ensure rdtsc is the last instruction before the vmcall. */
9330 		asm volatile("rdtsc; vmcall" : : : "eax", "edx");
9331 }
9332 
9333 /*
9334  * This function only considers the "use TSC offsetting" VM-execution
9335  * control.  It does not handle "use TSC scaling" (because the latter
9336  * isn't available to the host today.)
9337  */
9338 static unsigned long long host_time_to_guest_time(unsigned long long t)
9339 {
9340 	TEST_ASSERT(!(ctrl_cpu_rev[0].clr & CPU_SECONDARY) ||
9341 		    !(vmcs_read(CPU_EXEC_CTRL1) & CPU_USE_TSC_SCALING));
9342 
9343 	if (vmcs_read(CPU_EXEC_CTRL0) & CPU_USE_TSC_OFFSET)
9344 		t += vmcs_read(TSC_OFFSET);
9345 
9346 	return t;
9347 }
9348 
9349 static unsigned long long rdtsc_vmexit_diff_test_iteration(void)
9350 {
9351 	unsigned long long guest_tsc, host_to_guest_tsc;
9352 
9353 	enter_guest();
9354 	skip_exit_vmcall();
9355 	guest_tsc = (u32) regs.rax + (regs.rdx << 32);
9356 	host_to_guest_tsc = host_time_to_guest_time(exit_msr_store[0].value);
9357 
9358 	return host_to_guest_tsc - guest_tsc;
9359 }
9360 
9361 static void rdtsc_vmexit_diff_test(void)
9362 {
9363 	int fail = 0;
9364 	int i;
9365 
9366 	if (!(ctrl_cpu_rev[0].clr & CPU_USE_TSC_OFFSET))
9367 		test_skip("CPU doesn't support the 'use TSC offsetting' processor-based VM-execution control.\n");
9368 
9369 	test_set_guest(rdtsc_vmexit_diff_test_guest);
9370 
9371 	reset_guest_tsc_to_zero();
9372 
9373 	/*
9374 	 * Set up the VMCS12 VM-exit MSR-store list to store just one
9375 	 * MSR: IA32_TIME_STAMP_COUNTER. Note that the value stored is
9376 	 * in the host time domain (i.e., it is not adjusted according
9377 	 * to the TSC multiplier and TSC offset fields in the VMCS12,
9378 	 * as a guest RDTSC would be.)
9379 	 */
9380 	exit_msr_store = alloc_page();
9381 	exit_msr_store[0].index = MSR_IA32_TSC;
9382 	vmcs_write(EXI_MSR_ST_CNT, 1);
9383 	vmcs_write(EXIT_MSR_ST_ADDR, virt_to_phys(exit_msr_store));
9384 
9385 	for (i = 0; i < RDTSC_DIFF_ITERS; i++) {
9386 		if (rdtsc_vmexit_diff_test_iteration() >=
9387 		    HOST_CAPTURED_GUEST_TSC_DIFF_THRESHOLD)
9388 			fail++;
9389 	}
9390 
9391 	enter_guest();
9392 
9393 	report(fail < RDTSC_DIFF_FAILS,
9394 	       "RDTSC to VM-exit delta too high in %d of %d iterations",
9395 	       fail, RDTSC_DIFF_ITERS);
9396 }
9397 
9398 static int invalid_msr_init(struct vmcs *vmcs)
9399 {
9400 	if (!(ctrl_pin_rev.clr & PIN_PREEMPT)) {
9401 		printf("\tPreemption timer is not supported\n");
9402 		return VMX_TEST_EXIT;
9403 	}
9404 	vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) | PIN_PREEMPT);
9405 	preempt_val = 10000000;
9406 	vmcs_write(PREEMPT_TIMER_VALUE, preempt_val);
9407 	preempt_scale = rdmsr(MSR_IA32_VMX_MISC) & 0x1F;
9408 
9409 	if (!(ctrl_exit_rev.clr & EXI_SAVE_PREEMPT))
9410 		printf("\tSave preemption value is not supported\n");
9411 
9412 	vmcs_write(ENT_MSR_LD_CNT, 1);
9413 	vmcs_write(ENTER_MSR_LD_ADDR, (u64)0x13370000);
9414 
9415 	return VMX_TEST_START;
9416 }
9417 
9418 
9419 static void invalid_msr_main(void)
9420 {
9421 	report(0, "Invalid MSR load");
9422 }
9423 
9424 static int invalid_msr_exit_handler(void)
9425 {
9426 	report(0, "Invalid MSR load");
9427 	print_vmexit_info();
9428 	return VMX_TEST_EXIT;
9429 }
9430 
9431 static int invalid_msr_entry_failure(struct vmentry_failure *failure)
9432 {
9433 	ulong reason;
9434 
9435 	reason = vmcs_read(EXI_REASON);
9436 	report(reason == (0x80000000u | VMX_FAIL_MSR), "Invalid MSR load");
9437 	return VMX_TEST_VMEXIT;
9438 }
9439 
9440 /*
9441  * The max number of MSRs in an atomic switch MSR list is:
9442  * (111B + 1) * 512 = 4096
9443  *
9444  * Each list entry consumes:
9445  * 4-byte MSR index + 4 bytes reserved + 8-byte data = 16 bytes
9446  *
9447  * Allocate 128 kB to cover max_msr_list_size (i.e., 64 kB) and then some.
9448  */
9449 static const u32 msr_list_page_order = 5;
9450 
9451 static void atomic_switch_msr_limit_test_guest(void)
9452 {
9453 	vmcall();
9454 }
9455 
9456 static void populate_msr_list(struct vmx_msr_entry *msr_list,
9457 			      size_t byte_capacity, int count)
9458 {
9459 	int i;
9460 
9461 	for (i = 0; i < count; i++) {
9462 		msr_list[i].index = MSR_IA32_TSC;
9463 		msr_list[i].reserved = 0;
9464 		msr_list[i].value = 0x1234567890abcdef;
9465 	}
9466 
9467 	memset(msr_list + count, 0xff,
9468 	       byte_capacity - count * sizeof(*msr_list));
9469 }
9470 
9471 static int max_msr_list_size(void)
9472 {
9473 	u32 vmx_misc = rdmsr(MSR_IA32_VMX_MISC);
9474 	u32 factor = ((vmx_misc & GENMASK(27, 25)) >> 25) + 1;
9475 
9476 	return factor * 512;
9477 }
9478 
9479 static void atomic_switch_msrs_test(int count)
9480 {
9481 	struct vmx_msr_entry *vm_enter_load;
9482         struct vmx_msr_entry *vm_exit_load;
9483         struct vmx_msr_entry *vm_exit_store;
9484 	int max_allowed = max_msr_list_size();
9485 	int byte_capacity = 1ul << (msr_list_page_order + PAGE_SHIFT);
9486 	/* Exceeding the max MSR list size at exit trigers KVM to abort. */
9487 	int exit_count = count > max_allowed ? max_allowed : count;
9488 	int cleanup_count = count > max_allowed ? 2 : 1;
9489 	int i;
9490 
9491 	/*
9492 	 * Check for the IA32_TSC MSR,
9493 	 * available with the "TSC flag" and used to populate the MSR lists.
9494 	 */
9495 	if (!(cpuid(1).d & (1 << 4))) {
9496 		report_skip(__func__);
9497 		return;
9498 	}
9499 
9500 	/* Set L2 guest. */
9501 	test_set_guest(atomic_switch_msr_limit_test_guest);
9502 
9503 	/* Setup atomic MSR switch lists. */
9504 	vm_enter_load = alloc_pages(msr_list_page_order);
9505 	vm_exit_load = alloc_pages(msr_list_page_order);
9506 	vm_exit_store = alloc_pages(msr_list_page_order);
9507 
9508 	vmcs_write(ENTER_MSR_LD_ADDR, (u64)vm_enter_load);
9509 	vmcs_write(EXIT_MSR_LD_ADDR, (u64)vm_exit_load);
9510 	vmcs_write(EXIT_MSR_ST_ADDR, (u64)vm_exit_store);
9511 
9512 	/*
9513 	 * VM-Enter should succeed up to the max number of MSRs per list, and
9514 	 * should not consume junk beyond the last entry.
9515 	 */
9516 	populate_msr_list(vm_enter_load, byte_capacity, count);
9517 	populate_msr_list(vm_exit_load, byte_capacity, exit_count);
9518 	populate_msr_list(vm_exit_store, byte_capacity, exit_count);
9519 
9520 	vmcs_write(ENT_MSR_LD_CNT, count);
9521 	vmcs_write(EXI_MSR_LD_CNT, exit_count);
9522 	vmcs_write(EXI_MSR_ST_CNT, exit_count);
9523 
9524 	if (count <= max_allowed) {
9525 		enter_guest();
9526 		assert_exit_reason(VMX_VMCALL);
9527 		skip_exit_vmcall();
9528 	} else {
9529 		u32 exit_reason;
9530 		u32 exit_reason_want;
9531 		u32 exit_qual;
9532 
9533 		enter_guest_with_invalid_guest_state();
9534 
9535 		exit_reason = vmcs_read(EXI_REASON);
9536 		exit_reason_want = VMX_FAIL_MSR | VMX_ENTRY_FAILURE;
9537 		report(exit_reason == exit_reason_want,
9538 		       "exit_reason, %u, is %u.", exit_reason,
9539 		       exit_reason_want);
9540 
9541 		exit_qual = vmcs_read(EXI_QUALIFICATION);
9542 		report(exit_qual == max_allowed + 1, "exit_qual, %u, is %u.",
9543 		       exit_qual, max_allowed + 1);
9544 	}
9545 
9546 	/* Cleanup. */
9547 	vmcs_write(ENT_MSR_LD_CNT, 0);
9548 	vmcs_write(EXI_MSR_LD_CNT, 0);
9549 	vmcs_write(EXI_MSR_ST_CNT, 0);
9550 	for (i = 0; i < cleanup_count; i++) {
9551 		enter_guest();
9552 		skip_exit_vmcall();
9553 	}
9554 	free_pages_by_order(vm_enter_load, msr_list_page_order);
9555 	free_pages_by_order(vm_exit_load, msr_list_page_order);
9556 	free_pages_by_order(vm_exit_store, msr_list_page_order);
9557 }
9558 
9559 static void atomic_switch_max_msrs_test(void)
9560 {
9561 	atomic_switch_msrs_test(max_msr_list_size());
9562 }
9563 
9564 static void atomic_switch_overflow_msrs_test(void)
9565 {
9566 	atomic_switch_msrs_test(max_msr_list_size() + 1);
9567 }
9568 
9569 #define TEST(name) { #name, .v2 = name }
9570 
9571 /* name/init/guest_main/exit_handler/syscall_handler/guest_regs */
9572 struct vmx_test vmx_tests[] = {
9573 	{ "null", NULL, basic_guest_main, basic_exit_handler, NULL, {0} },
9574 	{ "vmenter", NULL, vmenter_main, vmenter_exit_handler, NULL, {0} },
9575 	{ "preemption timer", preemption_timer_init, preemption_timer_main,
9576 		preemption_timer_exit_handler, NULL, {0} },
9577 	{ "control field PAT", test_ctrl_pat_init, test_ctrl_pat_main,
9578 		test_ctrl_pat_exit_handler, NULL, {0} },
9579 	{ "control field EFER", test_ctrl_efer_init, test_ctrl_efer_main,
9580 		test_ctrl_efer_exit_handler, NULL, {0} },
9581 	{ "CR shadowing", NULL, cr_shadowing_main,
9582 		cr_shadowing_exit_handler, NULL, {0} },
9583 	{ "I/O bitmap", iobmp_init, iobmp_main, iobmp_exit_handler,
9584 		NULL, {0} },
9585 	{ "instruction intercept", insn_intercept_init, insn_intercept_main,
9586 		insn_intercept_exit_handler, NULL, {0} },
9587 	{ "EPT A/D disabled", ept_init, ept_main, ept_exit_handler, NULL, {0} },
9588 	{ "EPT A/D enabled", eptad_init, eptad_main, eptad_exit_handler, NULL, {0} },
9589 	{ "PML", pml_init, pml_main, pml_exit_handler, NULL, {0} },
9590 	{ "VPID", vpid_init, vpid_main, vpid_exit_handler, NULL, {0} },
9591 	{ "interrupt", interrupt_init, interrupt_main,
9592 		interrupt_exit_handler, NULL, {0} },
9593 	{ "debug controls", dbgctls_init, dbgctls_main, dbgctls_exit_handler,
9594 		NULL, {0} },
9595 	{ "MSR switch", msr_switch_init, msr_switch_main,
9596 		msr_switch_exit_handler, NULL, {0}, msr_switch_entry_failure },
9597 	{ "vmmcall", vmmcall_init, vmmcall_main, vmmcall_exit_handler, NULL, {0} },
9598 	{ "disable RDTSCP", disable_rdtscp_init, disable_rdtscp_main,
9599 		disable_rdtscp_exit_handler, NULL, {0} },
9600 	{ "int3", int3_init, int3_guest_main, int3_exit_handler, NULL, {0} },
9601 	{ "into", into_init, into_guest_main, into_exit_handler, NULL, {0} },
9602 	{ "exit_monitor_from_l2_test", NULL, exit_monitor_from_l2_main,
9603 		exit_monitor_from_l2_handler, NULL, {0} },
9604 	{ "invalid_msr", invalid_msr_init, invalid_msr_main,
9605 		invalid_msr_exit_handler, NULL, {0}, invalid_msr_entry_failure},
9606 	/* Basic V2 tests. */
9607 	TEST(v2_null_test),
9608 	TEST(v2_multiple_entries_test),
9609 	TEST(fixture_test_case1),
9610 	TEST(fixture_test_case2),
9611 	/* Opcode tests. */
9612 	TEST(invvpid_test_v2),
9613 	/* VM-entry tests */
9614 	TEST(vmx_controls_test),
9615 	TEST(vmx_host_state_area_test),
9616 	TEST(vmx_guest_state_area_test),
9617 	TEST(vmentry_movss_shadow_test),
9618 	/* APICv tests */
9619 	TEST(vmx_eoi_bitmap_ioapic_scan_test),
9620 	TEST(vmx_hlt_with_rvi_test),
9621 	TEST(apic_reg_virt_test),
9622 	TEST(virt_x2apic_mode_test),
9623 	/* APIC pass-through tests */
9624 	TEST(vmx_apic_passthrough_test),
9625 	TEST(vmx_apic_passthrough_thread_test),
9626 	TEST(vmx_apic_passthrough_tpr_threshold_test),
9627 	TEST(vmx_init_signal_test),
9628 	/* VMCS Shadowing tests */
9629 	TEST(vmx_vmcs_shadow_test),
9630 	/* Regression tests */
9631 	TEST(vmx_cr_load_test),
9632 	TEST(vmx_nm_test),
9633 	TEST(vmx_db_test),
9634 	TEST(vmx_nmi_window_test),
9635 	TEST(vmx_intr_window_test),
9636 	TEST(vmx_pending_event_test),
9637 	TEST(vmx_pending_event_hlt_test),
9638 	TEST(vmx_store_tsc_test),
9639 	/* EPT access tests. */
9640 	TEST(ept_access_test_not_present),
9641 	TEST(ept_access_test_read_only),
9642 	TEST(ept_access_test_write_only),
9643 	TEST(ept_access_test_read_write),
9644 	TEST(ept_access_test_execute_only),
9645 	TEST(ept_access_test_read_execute),
9646 	TEST(ept_access_test_write_execute),
9647 	TEST(ept_access_test_read_write_execute),
9648 	TEST(ept_access_test_reserved_bits),
9649 	TEST(ept_access_test_ignored_bits),
9650 	TEST(ept_access_test_paddr_not_present_ad_disabled),
9651 	TEST(ept_access_test_paddr_not_present_ad_enabled),
9652 	TEST(ept_access_test_paddr_read_only_ad_disabled),
9653 	TEST(ept_access_test_paddr_read_only_ad_enabled),
9654 	TEST(ept_access_test_paddr_read_write),
9655 	TEST(ept_access_test_paddr_read_write_execute),
9656 	TEST(ept_access_test_paddr_read_execute_ad_disabled),
9657 	TEST(ept_access_test_paddr_read_execute_ad_enabled),
9658 	TEST(ept_access_test_paddr_not_present_page_fault),
9659 	TEST(ept_access_test_force_2m_page),
9660 	/* Atomic MSR switch tests. */
9661 	TEST(atomic_switch_max_msrs_test),
9662 	TEST(atomic_switch_overflow_msrs_test),
9663 	TEST(rdtsc_vmexit_diff_test),
9664 	TEST(vmx_mtf_test),
9665 	{ NULL, NULL, NULL, NULL, NULL, {0} },
9666 };
9667