xref: /kvm-unit-tests/x86/svm.c (revision 70cea146df56b4711cd2f6ee5adfd118b55ce86a)
1 #include "svm.h"
2 #include "libcflat.h"
3 #include "processor.h"
4 #include "desc.h"
5 #include "msr.h"
6 #include "vm.h"
7 #include "smp.h"
8 #include "types.h"
9 #include "alloc_page.h"
10 #include "isr.h"
11 #include "apic.h"
12 
13 #define SVM_EXIT_MAX_DR_INTERCEPT 0x3f
14 
15 /* for the nested page table*/
16 u64 *pml4e;
17 u64 *pdpe;
18 u64 *pde[4];
19 u64 *pte[2048];
20 void *scratch_page;
21 
22 #define LATENCY_RUNS 1000000
23 
24 u64 tsc_start;
25 u64 tsc_end;
26 
27 u64 vmrun_sum, vmexit_sum;
28 u64 vmsave_sum, vmload_sum;
29 u64 stgi_sum, clgi_sum;
30 u64 latvmrun_max;
31 u64 latvmrun_min;
32 u64 latvmexit_max;
33 u64 latvmexit_min;
34 u64 latvmload_max;
35 u64 latvmload_min;
36 u64 latvmsave_max;
37 u64 latvmsave_min;
38 u64 latstgi_max;
39 u64 latstgi_min;
40 u64 latclgi_max;
41 u64 latclgi_min;
42 u64 runs;
43 
44 u8 *io_bitmap;
45 u8 io_bitmap_area[16384];
46 
47 u8 set_host_if;
48 
49 #define MSR_BITMAP_SIZE 8192
50 
51 u8 *msr_bitmap;
52 u8 msr_bitmap_area[MSR_BITMAP_SIZE + PAGE_SIZE];
53 
54 static bool npt_supported(void)
55 {
56 	return this_cpu_has(X86_FEATURE_NPT);
57 }
58 
59 static void setup_svm(void)
60 {
61     void *hsave = alloc_page();
62     u64 *page, address;
63     int i,j;
64 
65     wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
66     wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME);
67     wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NX);
68 
69     scratch_page = alloc_page();
70 
71     io_bitmap = (void *) (((ulong)io_bitmap_area + 4095) & ~4095);
72 
73     msr_bitmap = (void *) ALIGN((ulong)msr_bitmap_area, PAGE_SIZE);
74 
75     if (!npt_supported())
76         return;
77 
78     printf("NPT detected - running all tests with NPT enabled\n");
79 
80     /*
81      * Nested paging supported - Build a nested page table
82      * Build the page-table bottom-up and map everything with 4k pages
83      * to get enough granularity for the NPT unit-tests.
84      */
85 
86     address = 0;
87 
88     /* PTE level */
89     for (i = 0; i < 2048; ++i) {
90         page = alloc_page();
91 
92         for (j = 0; j < 512; ++j, address += 4096)
93             page[j] = address | 0x067ULL;
94 
95         pte[i] = page;
96     }
97 
98     /* PDE level */
99     for (i = 0; i < 4; ++i) {
100         page = alloc_page();
101 
102         for (j = 0; j < 512; ++j)
103             page[j] = (u64)pte[(i * 512) + j] | 0x027ULL;
104 
105         pde[i] = page;
106     }
107 
108     /* PDPe level */
109     pdpe   = alloc_page();
110     for (i = 0; i < 4; ++i)
111        pdpe[i] = ((u64)(pde[i])) | 0x27;
112 
113     /* PML4e level */
114     pml4e    = alloc_page();
115     pml4e[0] = ((u64)pdpe) | 0x27;
116 }
117 
118 static u64 *npt_get_pde(u64 address)
119 {
120     int i1, i2;
121 
122     address >>= 21;
123     i1 = (address >> 9) & 0x3;
124     i2 = address & 0x1ff;
125 
126     return &pde[i1][i2];
127 }
128 
129 static u64 *npt_get_pte(u64 address)
130 {
131     int i1, i2;
132 
133     address >>= 12;
134     i1 = (address >> 9) & 0x7ff;
135     i2 = address & 0x1ff;
136 
137     return &pte[i1][i2];
138 }
139 
140 static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector,
141                          u64 base, u32 limit, u32 attr)
142 {
143     seg->selector = selector;
144     seg->attrib = attr;
145     seg->limit = limit;
146     seg->base = base;
147 }
148 
149 static void vmcb_ident(struct vmcb *vmcb)
150 {
151     u64 vmcb_phys = virt_to_phys(vmcb);
152     struct vmcb_save_area *save = &vmcb->save;
153     struct vmcb_control_area *ctrl = &vmcb->control;
154     u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
155         | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK;
156     u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
157         | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK;
158     struct descriptor_table_ptr desc_table_ptr;
159 
160     memset(vmcb, 0, sizeof(*vmcb));
161     asm volatile ("vmsave %0" : : "a"(vmcb_phys) : "memory");
162     vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr);
163     vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr);
164     vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr);
165     vmcb_set_seg(&save->ds, read_ds(), 0, -1U, data_seg_attr);
166     sgdt(&desc_table_ptr);
167     vmcb_set_seg(&save->gdtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
168     sidt(&desc_table_ptr);
169     vmcb_set_seg(&save->idtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
170     ctrl->asid = 1;
171     save->cpl = 0;
172     save->efer = rdmsr(MSR_EFER);
173     save->cr4 = read_cr4();
174     save->cr3 = read_cr3();
175     save->cr0 = read_cr0();
176     save->dr7 = read_dr7();
177     save->dr6 = read_dr6();
178     save->cr2 = read_cr2();
179     save->g_pat = rdmsr(MSR_IA32_CR_PAT);
180     save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
181     ctrl->intercept = (1ULL << INTERCEPT_VMRUN) | (1ULL << INTERCEPT_VMMCALL);
182     ctrl->iopm_base_pa = virt_to_phys(io_bitmap);
183     ctrl->msrpm_base_pa = virt_to_phys(msr_bitmap);
184 
185     if (npt_supported()) {
186         ctrl->nested_ctl = 1;
187         ctrl->nested_cr3 = (u64)pml4e;
188     }
189 }
190 
191 struct test {
192     const char *name;
193     bool (*supported)(void);
194     void (*prepare)(struct test *test);
195     void (*guest_func)(struct test *test);
196     bool (*finished)(struct test *test);
197     bool (*succeeded)(struct test *test);
198     struct vmcb *vmcb;
199     int exits;
200     ulong scratch;
201 };
202 
203 static inline void vmmcall(void)
204 {
205     asm volatile ("vmmcall" : : : "memory");
206 }
207 
208 static void test_thunk(struct test *test)
209 {
210     test->guest_func(test);
211     vmmcall();
212 }
213 
214 struct regs {
215         u64 rax;
216         u64 rbx;
217         u64 rcx;
218         u64 rdx;
219         u64 cr2;
220         u64 rbp;
221         u64 rsi;
222         u64 rdi;
223         u64 r8;
224         u64 r9;
225         u64 r10;
226         u64 r11;
227         u64 r12;
228         u64 r13;
229         u64 r14;
230         u64 r15;
231         u64 rflags;
232 };
233 
234 struct regs regs;
235 
236 // rax handled specially below
237 
238 #define SAVE_GPR_C                              \
239         "xchg %%rbx, regs+0x8\n\t"              \
240         "xchg %%rcx, regs+0x10\n\t"             \
241         "xchg %%rdx, regs+0x18\n\t"             \
242         "xchg %%rbp, regs+0x28\n\t"             \
243         "xchg %%rsi, regs+0x30\n\t"             \
244         "xchg %%rdi, regs+0x38\n\t"             \
245         "xchg %%r8, regs+0x40\n\t"              \
246         "xchg %%r9, regs+0x48\n\t"              \
247         "xchg %%r10, regs+0x50\n\t"             \
248         "xchg %%r11, regs+0x58\n\t"             \
249         "xchg %%r12, regs+0x60\n\t"             \
250         "xchg %%r13, regs+0x68\n\t"             \
251         "xchg %%r14, regs+0x70\n\t"             \
252         "xchg %%r15, regs+0x78\n\t"
253 
254 #define LOAD_GPR_C      SAVE_GPR_C
255 
256 static void test_run(struct test *test, struct vmcb *vmcb)
257 {
258     u64 vmcb_phys = virt_to_phys(vmcb);
259     u64 guest_stack[10000];
260 
261     irq_disable();
262     test->vmcb = vmcb;
263     set_host_if = 1;
264     test->prepare(test);
265     vmcb->save.rip = (ulong)test_thunk;
266     vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack));
267     regs.rdi = (ulong)test;
268     do {
269         tsc_start = rdtsc();
270         asm volatile (
271             "clgi;\n\t" // semi-colon needed for LLVM compatibility
272             "cmpb $0, set_host_if\n\t"
273             "jz 1f\n\t"
274             "sti \n\t"
275             "1: \n\t"
276             "vmload \n\t"
277             "vmload %0\n\t"
278             "mov regs+0x80, %%r15\n\t"  // rflags
279             "mov %%r15, 0x170(%0)\n\t"
280             "mov regs, %%r15\n\t"       // rax
281             "mov %%r15, 0x1f8(%0)\n\t"
282             LOAD_GPR_C
283             "vmrun %0\n\t"
284             SAVE_GPR_C
285             "mov 0x170(%0), %%r15\n\t"  // rflags
286             "mov %%r15, regs+0x80\n\t"
287             "mov 0x1f8(%0), %%r15\n\t"  // rax
288             "mov %%r15, regs\n\t"
289             "vmsave %0\n\t"
290             "cli \n\t"
291             "stgi"
292             : : "a"(vmcb_phys)
293             : "rbx", "rcx", "rdx", "rsi",
294               "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15",
295               "memory");
296 	tsc_end = rdtsc();
297         ++test->exits;
298     } while (!test->finished(test));
299     irq_enable();
300 
301     report(test->succeeded(test), "%s", test->name);
302 }
303 
304 static bool smp_supported(void)
305 {
306 	return cpu_count() > 1;
307 }
308 
309 static bool default_supported(void)
310 {
311     return true;
312 }
313 
314 static void default_prepare(struct test *test)
315 {
316     vmcb_ident(test->vmcb);
317 }
318 
319 static bool default_finished(struct test *test)
320 {
321     return true; /* one vmexit */
322 }
323 
324 static void null_test(struct test *test)
325 {
326 }
327 
328 static bool null_check(struct test *test)
329 {
330     return test->vmcb->control.exit_code == SVM_EXIT_VMMCALL;
331 }
332 
333 static void prepare_no_vmrun_int(struct test *test)
334 {
335     test->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMRUN);
336 }
337 
338 static bool check_no_vmrun_int(struct test *test)
339 {
340     return test->vmcb->control.exit_code == SVM_EXIT_ERR;
341 }
342 
343 static void test_vmrun(struct test *test)
344 {
345     asm volatile ("vmrun %0" : : "a"(virt_to_phys(test->vmcb)));
346 }
347 
348 static bool check_vmrun(struct test *test)
349 {
350     return test->vmcb->control.exit_code == SVM_EXIT_VMRUN;
351 }
352 
353 static void prepare_cr3_intercept(struct test *test)
354 {
355     default_prepare(test);
356     test->vmcb->control.intercept_cr_read |= 1 << 3;
357 }
358 
359 static void test_cr3_intercept(struct test *test)
360 {
361     asm volatile ("mov %%cr3, %0" : "=r"(test->scratch) : : "memory");
362 }
363 
364 static bool check_cr3_intercept(struct test *test)
365 {
366     return test->vmcb->control.exit_code == SVM_EXIT_READ_CR3;
367 }
368 
369 static bool check_cr3_nointercept(struct test *test)
370 {
371     return null_check(test) && test->scratch == read_cr3();
372 }
373 
374 static void corrupt_cr3_intercept_bypass(void *_test)
375 {
376     struct test *test = _test;
377     extern volatile u32 mmio_insn;
378 
379     while (!__sync_bool_compare_and_swap(&test->scratch, 1, 2))
380         pause();
381     pause();
382     pause();
383     pause();
384     mmio_insn = 0x90d8200f;  // mov %cr3, %rax; nop
385 }
386 
387 static void prepare_cr3_intercept_bypass(struct test *test)
388 {
389     default_prepare(test);
390     test->vmcb->control.intercept_cr_read |= 1 << 3;
391     on_cpu_async(1, corrupt_cr3_intercept_bypass, test);
392 }
393 
394 static void test_cr3_intercept_bypass(struct test *test)
395 {
396     ulong a = 0xa0000;
397 
398     test->scratch = 1;
399     while (test->scratch != 2)
400         barrier();
401 
402     asm volatile ("mmio_insn: mov %0, (%0); nop"
403                   : "+a"(a) : : "memory");
404     test->scratch = a;
405 }
406 
407 static void prepare_dr_intercept(struct test *test)
408 {
409     default_prepare(test);
410     test->vmcb->control.intercept_dr_read = 0xff;
411     test->vmcb->control.intercept_dr_write = 0xff;
412 }
413 
414 static void test_dr_intercept(struct test *test)
415 {
416     unsigned int i, failcnt = 0;
417 
418     /* Loop testing debug register reads */
419     for (i = 0; i < 8; i++) {
420 
421         switch (i) {
422         case 0:
423             asm volatile ("mov %%dr0, %0" : "=r"(test->scratch) : : "memory");
424             break;
425         case 1:
426             asm volatile ("mov %%dr1, %0" : "=r"(test->scratch) : : "memory");
427             break;
428         case 2:
429             asm volatile ("mov %%dr2, %0" : "=r"(test->scratch) : : "memory");
430             break;
431         case 3:
432             asm volatile ("mov %%dr3, %0" : "=r"(test->scratch) : : "memory");
433             break;
434         case 4:
435             asm volatile ("mov %%dr4, %0" : "=r"(test->scratch) : : "memory");
436             break;
437         case 5:
438             asm volatile ("mov %%dr5, %0" : "=r"(test->scratch) : : "memory");
439             break;
440         case 6:
441             asm volatile ("mov %%dr6, %0" : "=r"(test->scratch) : : "memory");
442             break;
443         case 7:
444             asm volatile ("mov %%dr7, %0" : "=r"(test->scratch) : : "memory");
445             break;
446         }
447 
448         if (test->scratch != i) {
449             report(false, "dr%u read intercept", i);
450             failcnt++;
451         }
452     }
453 
454     /* Loop testing debug register writes */
455     for (i = 0; i < 8; i++) {
456 
457         switch (i) {
458         case 0:
459             asm volatile ("mov %0, %%dr0" : : "r"(test->scratch) : "memory");
460             break;
461         case 1:
462             asm volatile ("mov %0, %%dr1" : : "r"(test->scratch) : "memory");
463             break;
464         case 2:
465             asm volatile ("mov %0, %%dr2" : : "r"(test->scratch) : "memory");
466             break;
467         case 3:
468             asm volatile ("mov %0, %%dr3" : : "r"(test->scratch) : "memory");
469             break;
470         case 4:
471             asm volatile ("mov %0, %%dr4" : : "r"(test->scratch) : "memory");
472             break;
473         case 5:
474             asm volatile ("mov %0, %%dr5" : : "r"(test->scratch) : "memory");
475             break;
476         case 6:
477             asm volatile ("mov %0, %%dr6" : : "r"(test->scratch) : "memory");
478             break;
479         case 7:
480             asm volatile ("mov %0, %%dr7" : : "r"(test->scratch) : "memory");
481             break;
482         }
483 
484         if (test->scratch != i) {
485             report(false, "dr%u write intercept", i);
486             failcnt++;
487         }
488     }
489 
490     test->scratch = failcnt;
491 }
492 
493 static bool dr_intercept_finished(struct test *test)
494 {
495     ulong n = (test->vmcb->control.exit_code - SVM_EXIT_READ_DR0);
496 
497     /* Only expect DR intercepts */
498     if (n > (SVM_EXIT_MAX_DR_INTERCEPT - SVM_EXIT_READ_DR0))
499         return true;
500 
501     /*
502      * Compute debug register number.
503      * Per Appendix C "SVM Intercept Exit Codes" of AMD64 Architecture
504      * Programmer's Manual Volume 2 - System Programming:
505      * http://support.amd.com/TechDocs/24593.pdf
506      * there are 16 VMEXIT codes each for DR read and write.
507      */
508     test->scratch = (n % 16);
509 
510     /* Jump over MOV instruction */
511     test->vmcb->save.rip += 3;
512 
513     return false;
514 }
515 
516 static bool check_dr_intercept(struct test *test)
517 {
518     return !test->scratch;
519 }
520 
521 static bool next_rip_supported(void)
522 {
523     return this_cpu_has(X86_FEATURE_NRIPS);
524 }
525 
526 static void prepare_next_rip(struct test *test)
527 {
528     test->vmcb->control.intercept |= (1ULL << INTERCEPT_RDTSC);
529 }
530 
531 
532 static void test_next_rip(struct test *test)
533 {
534     asm volatile ("rdtsc\n\t"
535                   ".globl exp_next_rip\n\t"
536                   "exp_next_rip:\n\t" ::: "eax", "edx");
537 }
538 
539 static bool check_next_rip(struct test *test)
540 {
541     extern char exp_next_rip;
542     unsigned long address = (unsigned long)&exp_next_rip;
543 
544     return address == test->vmcb->control.next_rip;
545 }
546 
547 static void prepare_msr_intercept(struct test *test)
548 {
549     default_prepare(test);
550     test->vmcb->control.intercept |= (1ULL << INTERCEPT_MSR_PROT);
551     test->vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR);
552     memset(msr_bitmap, 0xff, MSR_BITMAP_SIZE);
553 }
554 
555 static void test_msr_intercept(struct test *test)
556 {
557     unsigned long msr_value = 0xef8056791234abcd; /* Arbitrary value */
558     unsigned long msr_index;
559 
560     for (msr_index = 0; msr_index <= 0xc0011fff; msr_index++) {
561         if (msr_index == 0xC0010131 /* MSR_SEV_STATUS */) {
562             /*
563              * Per section 15.34.10 "SEV_STATUS MSR" of AMD64 Architecture
564              * Programmer's Manual volume 2 - System Programming:
565              * http://support.amd.com/TechDocs/24593.pdf
566              * SEV_STATUS MSR (C001_0131) is a non-interceptable MSR.
567              */
568             continue;
569         }
570 
571         /* Skips gaps between supported MSR ranges */
572         if (msr_index == 0x2000)
573             msr_index = 0xc0000000;
574         else if (msr_index == 0xc0002000)
575             msr_index = 0xc0010000;
576 
577         test->scratch = -1;
578 
579         rdmsr(msr_index);
580 
581         /* Check that a read intercept occurred for MSR at msr_index */
582         if (test->scratch != msr_index)
583             report(false, "MSR 0x%lx read intercept", msr_index);
584 
585         /*
586          * Poor man approach to generate a value that
587          * seems arbitrary each time around the loop.
588          */
589         msr_value += (msr_value << 1);
590 
591         wrmsr(msr_index, msr_value);
592 
593         /* Check that a write intercept occurred for MSR with msr_value */
594         if (test->scratch != msr_value)
595             report(false, "MSR 0x%lx write intercept", msr_index);
596     }
597 
598     test->scratch = -2;
599 }
600 
601 static bool msr_intercept_finished(struct test *test)
602 {
603     u32 exit_code = test->vmcb->control.exit_code;
604     u64 exit_info_1;
605     u8 *opcode;
606 
607     if (exit_code == SVM_EXIT_MSR) {
608         exit_info_1 = test->vmcb->control.exit_info_1;
609     } else {
610         /*
611          * If #GP exception occurs instead, check that it was
612          * for RDMSR/WRMSR and set exit_info_1 accordingly.
613          */
614 
615         if (exit_code != (SVM_EXIT_EXCP_BASE + GP_VECTOR))
616             return true;
617 
618         opcode = (u8 *)test->vmcb->save.rip;
619         if (opcode[0] != 0x0f)
620             return true;
621 
622         switch (opcode[1]) {
623         case 0x30: /* WRMSR */
624             exit_info_1 = 1;
625             break;
626         case 0x32: /* RDMSR */
627             exit_info_1 = 0;
628             break;
629         default:
630             return true;
631         }
632 
633         /*
634          * Warn that #GP exception occured instead.
635          * RCX holds the MSR index.
636          */
637         printf("%s 0x%lx #GP exception\n",
638             exit_info_1 ? "WRMSR" : "RDMSR", regs.rcx);
639     }
640 
641     /* Jump over RDMSR/WRMSR instruction */
642     test->vmcb->save.rip += 2;
643 
644     /*
645      * Test whether the intercept was for RDMSR/WRMSR.
646      * For RDMSR, test->scratch is set to the MSR index;
647      *      RCX holds the MSR index.
648      * For WRMSR, test->scratch is set to the MSR value;
649      *      RDX holds the upper 32 bits of the MSR value,
650      *      while RAX hold its lower 32 bits.
651      */
652     if (exit_info_1)
653         test->scratch =
654             ((regs.rdx << 32) | (test->vmcb->save.rax & 0xffffffff));
655     else
656         test->scratch = regs.rcx;
657 
658     return false;
659 }
660 
661 static bool check_msr_intercept(struct test *test)
662 {
663     memset(msr_bitmap, 0, MSR_BITMAP_SIZE);
664     return (test->scratch == -2);
665 }
666 
667 static void prepare_mode_switch(struct test *test)
668 {
669     test->vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR)
670                                              |  (1ULL << UD_VECTOR)
671                                              |  (1ULL << DF_VECTOR)
672                                              |  (1ULL << PF_VECTOR);
673     test->scratch = 0;
674 }
675 
676 static void test_mode_switch(struct test *test)
677 {
678     asm volatile("	cli\n"
679 		 "	ljmp *1f\n" /* jump to 32-bit code segment */
680 		 "1:\n"
681 		 "	.long 2f\n"
682 		 "	.long " xstr(KERNEL_CS32) "\n"
683 		 ".code32\n"
684 		 "2:\n"
685 		 "	movl %%cr0, %%eax\n"
686 		 "	btcl  $31, %%eax\n" /* clear PG */
687 		 "	movl %%eax, %%cr0\n"
688 		 "	movl $0xc0000080, %%ecx\n" /* EFER */
689 		 "	rdmsr\n"
690 		 "	btcl $8, %%eax\n" /* clear LME */
691 		 "	wrmsr\n"
692 		 "	movl %%cr4, %%eax\n"
693 		 "	btcl $5, %%eax\n" /* clear PAE */
694 		 "	movl %%eax, %%cr4\n"
695 		 "	movw %[ds16], %%ax\n"
696 		 "	movw %%ax, %%ds\n"
697 		 "	ljmpl %[cs16], $3f\n" /* jump to 16 bit protected-mode */
698 		 ".code16\n"
699 		 "3:\n"
700 		 "	movl %%cr0, %%eax\n"
701 		 "	btcl $0, %%eax\n" /* clear PE  */
702 		 "	movl %%eax, %%cr0\n"
703 		 "	ljmpl $0, $4f\n"   /* jump to real-mode */
704 		 "4:\n"
705 		 "	vmmcall\n"
706 		 "	movl %%cr0, %%eax\n"
707 		 "	btsl $0, %%eax\n" /* set PE  */
708 		 "	movl %%eax, %%cr0\n"
709 		 "	ljmpl %[cs32], $5f\n" /* back to protected mode */
710 		 ".code32\n"
711 		 "5:\n"
712 		 "	movl %%cr4, %%eax\n"
713 		 "	btsl $5, %%eax\n" /* set PAE */
714 		 "	movl %%eax, %%cr4\n"
715 		 "	movl $0xc0000080, %%ecx\n" /* EFER */
716 		 "	rdmsr\n"
717 		 "	btsl $8, %%eax\n" /* set LME */
718 		 "	wrmsr\n"
719 		 "	movl %%cr0, %%eax\n"
720 		 "	btsl  $31, %%eax\n" /* set PG */
721 		 "	movl %%eax, %%cr0\n"
722 		 "	ljmpl %[cs64], $6f\n"    /* back to long mode */
723 		 ".code64\n\t"
724 		 "6:\n"
725 		 "	vmmcall\n"
726 		 :: [cs16] "i"(KERNEL_CS16), [ds16] "i"(KERNEL_DS16),
727 		    [cs32] "i"(KERNEL_CS32), [cs64] "i"(KERNEL_CS64)
728 		 : "rax", "rbx", "rcx", "rdx", "memory");
729 }
730 
731 static bool mode_switch_finished(struct test *test)
732 {
733     u64 cr0, cr4, efer;
734 
735     cr0  = test->vmcb->save.cr0;
736     cr4  = test->vmcb->save.cr4;
737     efer = test->vmcb->save.efer;
738 
739     /* Only expect VMMCALL intercepts */
740     if (test->vmcb->control.exit_code != SVM_EXIT_VMMCALL)
741 	    return true;
742 
743     /* Jump over VMMCALL instruction */
744     test->vmcb->save.rip += 3;
745 
746     /* Do sanity checks */
747     switch (test->scratch) {
748     case 0:
749         /* Test should be in real mode now - check for this */
750         if ((cr0  & 0x80000001) || /* CR0.PG, CR0.PE */
751             (cr4  & 0x00000020) || /* CR4.PAE */
752             (efer & 0x00000500))   /* EFER.LMA, EFER.LME */
753                 return true;
754         break;
755     case 2:
756         /* Test should be back in long-mode now - check for this */
757         if (((cr0  & 0x80000001) != 0x80000001) || /* CR0.PG, CR0.PE */
758             ((cr4  & 0x00000020) != 0x00000020) || /* CR4.PAE */
759             ((efer & 0x00000500) != 0x00000500))   /* EFER.LMA, EFER.LME */
760 		    return true;
761 	break;
762     }
763 
764     /* one step forward */
765     test->scratch += 1;
766 
767     return test->scratch == 2;
768 }
769 
770 static bool check_mode_switch(struct test *test)
771 {
772 	return test->scratch == 2;
773 }
774 
775 static void prepare_ioio(struct test *test)
776 {
777     test->vmcb->control.intercept |= (1ULL << INTERCEPT_IOIO_PROT);
778     test->scratch = 0;
779     memset(io_bitmap, 0, 8192);
780     io_bitmap[8192] = 0xFF;
781 }
782 
783 static int get_test_stage(struct test *test)
784 {
785     barrier();
786     return test->scratch;
787 }
788 
789 static void set_test_stage(struct test *test, int s)
790 {
791     barrier();
792     test->scratch = s;
793     barrier();
794 }
795 
796 static void inc_test_stage(struct test *test)
797 {
798     barrier();
799     test->scratch++;
800     barrier();
801 }
802 
803 static void test_ioio(struct test *test)
804 {
805     // stage 0, test IO pass
806     inb(0x5000);
807     outb(0x0, 0x5000);
808     if (get_test_stage(test) != 0)
809         goto fail;
810 
811     // test IO width, in/out
812     io_bitmap[0] = 0xFF;
813     inc_test_stage(test);
814     inb(0x0);
815     if (get_test_stage(test) != 2)
816         goto fail;
817 
818     outw(0x0, 0x0);
819     if (get_test_stage(test) != 3)
820         goto fail;
821 
822     inl(0x0);
823     if (get_test_stage(test) != 4)
824         goto fail;
825 
826     // test low/high IO port
827     io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8));
828     inb(0x5000);
829     if (get_test_stage(test) != 5)
830         goto fail;
831 
832     io_bitmap[0x9000 / 8] = (1 << (0x9000 % 8));
833     inw(0x9000);
834     if (get_test_stage(test) != 6)
835         goto fail;
836 
837     // test partial pass
838     io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8));
839     inl(0x4FFF);
840     if (get_test_stage(test) != 7)
841         goto fail;
842 
843     // test across pages
844     inc_test_stage(test);
845     inl(0x7FFF);
846     if (get_test_stage(test) != 8)
847         goto fail;
848 
849     inc_test_stage(test);
850     io_bitmap[0x8000 / 8] = 1 << (0x8000 % 8);
851     inl(0x7FFF);
852     if (get_test_stage(test) != 10)
853         goto fail;
854 
855     io_bitmap[0] = 0;
856     inl(0xFFFF);
857     if (get_test_stage(test) != 11)
858         goto fail;
859 
860     io_bitmap[0] = 0xFF;
861     io_bitmap[8192] = 0;
862     inl(0xFFFF);
863     inc_test_stage(test);
864     if (get_test_stage(test) != 12)
865         goto fail;
866 
867     return;
868 
869 fail:
870     report(false, "stage %d", get_test_stage(test));
871     test->scratch = -1;
872 }
873 
874 static bool ioio_finished(struct test *test)
875 {
876     unsigned port, size;
877 
878     /* Only expect IOIO intercepts */
879     if (test->vmcb->control.exit_code == SVM_EXIT_VMMCALL)
880         return true;
881 
882     if (test->vmcb->control.exit_code != SVM_EXIT_IOIO)
883         return true;
884 
885     /* one step forward */
886     test->scratch += 1;
887 
888     port = test->vmcb->control.exit_info_1 >> 16;
889     size = (test->vmcb->control.exit_info_1 >> SVM_IOIO_SIZE_SHIFT) & 7;
890 
891     while (size--) {
892         io_bitmap[port / 8] &= ~(1 << (port & 7));
893         port++;
894     }
895 
896     return false;
897 }
898 
899 static bool check_ioio(struct test *test)
900 {
901     memset(io_bitmap, 0, 8193);
902     return test->scratch != -1;
903 }
904 
905 static void prepare_asid_zero(struct test *test)
906 {
907     test->vmcb->control.asid = 0;
908 }
909 
910 static void test_asid_zero(struct test *test)
911 {
912     asm volatile ("vmmcall\n\t");
913 }
914 
915 static bool check_asid_zero(struct test *test)
916 {
917     return test->vmcb->control.exit_code == SVM_EXIT_ERR;
918 }
919 
920 static void sel_cr0_bug_prepare(struct test *test)
921 {
922     vmcb_ident(test->vmcb);
923     test->vmcb->control.intercept |= (1ULL << INTERCEPT_SELECTIVE_CR0);
924 }
925 
926 static bool sel_cr0_bug_finished(struct test *test)
927 {
928 	return true;
929 }
930 
931 static void sel_cr0_bug_test(struct test *test)
932 {
933     unsigned long cr0;
934 
935     /* read cr0, clear CD, and write back */
936     cr0  = read_cr0();
937     cr0 |= (1UL << 30);
938     write_cr0(cr0);
939 
940     /*
941      * If we are here the test failed, not sure what to do now because we
942      * are not in guest-mode anymore so we can't trigger an intercept.
943      * Trigger a tripple-fault for now.
944      */
945     report(false, "sel_cr0 test. Can not recover from this - exiting");
946     exit(report_summary());
947 }
948 
949 static bool sel_cr0_bug_check(struct test *test)
950 {
951     return test->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE;
952 }
953 
954 static void npt_nx_prepare(struct test *test)
955 {
956 
957     u64 *pte;
958 
959     vmcb_ident(test->vmcb);
960     pte = npt_get_pte((u64)null_test);
961 
962     *pte |= (1ULL << 63);
963 }
964 
965 static bool npt_nx_check(struct test *test)
966 {
967     u64 *pte = npt_get_pte((u64)null_test);
968 
969     *pte &= ~(1ULL << 63);
970 
971     test->vmcb->save.efer |= (1 << 11);
972 
973     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
974            && (test->vmcb->control.exit_info_1 == 0x100000015ULL);
975 }
976 
977 static void npt_us_prepare(struct test *test)
978 {
979     u64 *pte;
980 
981     vmcb_ident(test->vmcb);
982     pte = npt_get_pte((u64)scratch_page);
983 
984     *pte &= ~(1ULL << 2);
985 }
986 
987 static void npt_us_test(struct test *test)
988 {
989     (void) *(volatile u64 *)scratch_page;
990 }
991 
992 static bool npt_us_check(struct test *test)
993 {
994     u64 *pte = npt_get_pte((u64)scratch_page);
995 
996     *pte |= (1ULL << 2);
997 
998     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
999            && (test->vmcb->control.exit_info_1 == 0x100000005ULL);
1000 }
1001 
1002 u64 save_pde;
1003 
1004 static void npt_rsvd_prepare(struct test *test)
1005 {
1006     u64 *pde;
1007 
1008     vmcb_ident(test->vmcb);
1009     pde = npt_get_pde((u64) null_test);
1010 
1011     save_pde = *pde;
1012     *pde = (1ULL << 19) | (1ULL << 7) | 0x27;
1013 }
1014 
1015 static bool npt_rsvd_check(struct test *test)
1016 {
1017     u64 *pde = npt_get_pde((u64) null_test);
1018 
1019     *pde = save_pde;
1020 
1021     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
1022             && (test->vmcb->control.exit_info_1 == 0x10000001dULL);
1023 }
1024 
1025 static void npt_rw_prepare(struct test *test)
1026 {
1027 
1028     u64 *pte;
1029 
1030     vmcb_ident(test->vmcb);
1031     pte = npt_get_pte(0x80000);
1032 
1033     *pte &= ~(1ULL << 1);
1034 }
1035 
1036 static void npt_rw_test(struct test *test)
1037 {
1038     u64 *data = (void*)(0x80000);
1039 
1040     *data = 0;
1041 }
1042 
1043 static bool npt_rw_check(struct test *test)
1044 {
1045     u64 *pte = npt_get_pte(0x80000);
1046 
1047     *pte |= (1ULL << 1);
1048 
1049     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
1050            && (test->vmcb->control.exit_info_1 == 0x100000007ULL);
1051 }
1052 
1053 static void npt_rw_pfwalk_prepare(struct test *test)
1054 {
1055 
1056     u64 *pte;
1057 
1058     vmcb_ident(test->vmcb);
1059     pte = npt_get_pte(read_cr3());
1060 
1061     *pte &= ~(1ULL << 1);
1062 }
1063 
1064 static bool npt_rw_pfwalk_check(struct test *test)
1065 {
1066     u64 *pte = npt_get_pte(read_cr3());
1067 
1068     *pte |= (1ULL << 1);
1069 
1070     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
1071            && (test->vmcb->control.exit_info_1 == 0x200000006ULL)
1072 	   && (test->vmcb->control.exit_info_2 == read_cr3());
1073 }
1074 
1075 static void npt_rsvd_pfwalk_prepare(struct test *test)
1076 {
1077 
1078     vmcb_ident(test->vmcb);
1079 
1080     pdpe[0] |= (1ULL << 8);
1081 }
1082 
1083 static bool npt_rsvd_pfwalk_check(struct test *test)
1084 {
1085     pdpe[0] &= ~(1ULL << 8);
1086 
1087     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
1088             && (test->vmcb->control.exit_info_1 == 0x20000000eULL);
1089 }
1090 
1091 static void npt_l1mmio_prepare(struct test *test)
1092 {
1093     vmcb_ident(test->vmcb);
1094 }
1095 
1096 u32 nested_apic_version1;
1097 u32 nested_apic_version2;
1098 
1099 static void npt_l1mmio_test(struct test *test)
1100 {
1101     volatile u32 *data = (volatile void*)(0xfee00030UL);
1102 
1103     nested_apic_version1 = *data;
1104     nested_apic_version2 = *data;
1105 }
1106 
1107 static bool npt_l1mmio_check(struct test *test)
1108 {
1109     volatile u32 *data = (volatile void*)(0xfee00030);
1110     u32 lvr = *data;
1111 
1112     return nested_apic_version1 == lvr && nested_apic_version2 == lvr;
1113 }
1114 
1115 static void npt_rw_l1mmio_prepare(struct test *test)
1116 {
1117 
1118     u64 *pte;
1119 
1120     vmcb_ident(test->vmcb);
1121     pte = npt_get_pte(0xfee00080);
1122 
1123     *pte &= ~(1ULL << 1);
1124 }
1125 
1126 static void npt_rw_l1mmio_test(struct test *test)
1127 {
1128     volatile u32 *data = (volatile void*)(0xfee00080);
1129 
1130     *data = *data;
1131 }
1132 
1133 static bool npt_rw_l1mmio_check(struct test *test)
1134 {
1135     u64 *pte = npt_get_pte(0xfee00080);
1136 
1137     *pte |= (1ULL << 1);
1138 
1139     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
1140            && (test->vmcb->control.exit_info_1 == 0x100000007ULL);
1141 }
1142 
1143 #define TSC_ADJUST_VALUE    (1ll << 32)
1144 #define TSC_OFFSET_VALUE    (-1ll << 48)
1145 static bool ok;
1146 
1147 static void tsc_adjust_prepare(struct test *test)
1148 {
1149     default_prepare(test);
1150     test->vmcb->control.tsc_offset = TSC_OFFSET_VALUE;
1151 
1152     wrmsr(MSR_IA32_TSC_ADJUST, -TSC_ADJUST_VALUE);
1153     int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST);
1154     ok = adjust == -TSC_ADJUST_VALUE;
1155 }
1156 
1157 static void tsc_adjust_test(struct test *test)
1158 {
1159     int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST);
1160     ok &= adjust == -TSC_ADJUST_VALUE;
1161 
1162     uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE;
1163     wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE);
1164 
1165     adjust = rdmsr(MSR_IA32_TSC_ADJUST);
1166     ok &= adjust <= -2 * TSC_ADJUST_VALUE;
1167 
1168     uint64_t l1_tsc_end = rdtsc() - TSC_OFFSET_VALUE;
1169     ok &= (l1_tsc_end + TSC_ADJUST_VALUE - l1_tsc) < TSC_ADJUST_VALUE;
1170 
1171     uint64_t l1_tsc_msr = rdmsr(MSR_IA32_TSC) - TSC_OFFSET_VALUE;
1172     ok &= (l1_tsc_msr + TSC_ADJUST_VALUE - l1_tsc) < TSC_ADJUST_VALUE;
1173 }
1174 
1175 static bool tsc_adjust_check(struct test *test)
1176 {
1177     int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST);
1178 
1179     wrmsr(MSR_IA32_TSC_ADJUST, 0);
1180     return ok && adjust <= -2 * TSC_ADJUST_VALUE;
1181 }
1182 
1183 static void latency_prepare(struct test *test)
1184 {
1185     default_prepare(test);
1186     runs = LATENCY_RUNS;
1187     latvmrun_min = latvmexit_min = -1ULL;
1188     latvmrun_max = latvmexit_max = 0;
1189     vmrun_sum = vmexit_sum = 0;
1190 }
1191 
1192 static void latency_test(struct test *test)
1193 {
1194     u64 cycles;
1195 
1196 start:
1197     tsc_end = rdtsc();
1198 
1199     cycles = tsc_end - tsc_start;
1200 
1201     if (cycles > latvmrun_max)
1202         latvmrun_max = cycles;
1203 
1204     if (cycles < latvmrun_min)
1205         latvmrun_min = cycles;
1206 
1207     vmrun_sum += cycles;
1208 
1209     tsc_start = rdtsc();
1210 
1211     asm volatile ("vmmcall" : : : "memory");
1212     goto start;
1213 }
1214 
1215 static bool latency_finished(struct test *test)
1216 {
1217     u64 cycles;
1218 
1219     tsc_end = rdtsc();
1220 
1221     cycles = tsc_end - tsc_start;
1222 
1223     if (cycles > latvmexit_max)
1224         latvmexit_max = cycles;
1225 
1226     if (cycles < latvmexit_min)
1227         latvmexit_min = cycles;
1228 
1229     vmexit_sum += cycles;
1230 
1231     test->vmcb->save.rip += 3;
1232 
1233     runs -= 1;
1234 
1235     return runs == 0;
1236 }
1237 
1238 static bool latency_check(struct test *test)
1239 {
1240     printf("    Latency VMRUN : max: %ld min: %ld avg: %ld\n", latvmrun_max,
1241             latvmrun_min, vmrun_sum / LATENCY_RUNS);
1242     printf("    Latency VMEXIT: max: %ld min: %ld avg: %ld\n", latvmexit_max,
1243             latvmexit_min, vmexit_sum / LATENCY_RUNS);
1244     return true;
1245 }
1246 
1247 static void lat_svm_insn_prepare(struct test *test)
1248 {
1249     default_prepare(test);
1250     runs = LATENCY_RUNS;
1251     latvmload_min = latvmsave_min = latstgi_min = latclgi_min = -1ULL;
1252     latvmload_max = latvmsave_max = latstgi_max = latclgi_max = 0;
1253     vmload_sum = vmsave_sum = stgi_sum = clgi_sum;
1254 }
1255 
1256 static bool lat_svm_insn_finished(struct test *test)
1257 {
1258     u64 vmcb_phys = virt_to_phys(test->vmcb);
1259     u64 cycles;
1260 
1261     for ( ; runs != 0; runs--) {
1262         tsc_start = rdtsc();
1263         asm volatile("vmload %0\n\t" : : "a"(vmcb_phys) : "memory");
1264         cycles = rdtsc() - tsc_start;
1265         if (cycles > latvmload_max)
1266             latvmload_max = cycles;
1267         if (cycles < latvmload_min)
1268             latvmload_min = cycles;
1269         vmload_sum += cycles;
1270 
1271         tsc_start = rdtsc();
1272         asm volatile("vmsave %0\n\t" : : "a"(vmcb_phys) : "memory");
1273         cycles = rdtsc() - tsc_start;
1274         if (cycles > latvmsave_max)
1275             latvmsave_max = cycles;
1276         if (cycles < latvmsave_min)
1277             latvmsave_min = cycles;
1278         vmsave_sum += cycles;
1279 
1280         tsc_start = rdtsc();
1281         asm volatile("stgi\n\t");
1282         cycles = rdtsc() - tsc_start;
1283         if (cycles > latstgi_max)
1284             latstgi_max = cycles;
1285         if (cycles < latstgi_min)
1286             latstgi_min = cycles;
1287         stgi_sum += cycles;
1288 
1289         tsc_start = rdtsc();
1290         asm volatile("clgi\n\t");
1291         cycles = rdtsc() - tsc_start;
1292         if (cycles > latclgi_max)
1293             latclgi_max = cycles;
1294         if (cycles < latclgi_min)
1295             latclgi_min = cycles;
1296         clgi_sum += cycles;
1297     }
1298 
1299     return true;
1300 }
1301 
1302 static bool lat_svm_insn_check(struct test *test)
1303 {
1304     printf("    Latency VMLOAD: max: %ld min: %ld avg: %ld\n", latvmload_max,
1305             latvmload_min, vmload_sum / LATENCY_RUNS);
1306     printf("    Latency VMSAVE: max: %ld min: %ld avg: %ld\n", latvmsave_max,
1307             latvmsave_min, vmsave_sum / LATENCY_RUNS);
1308     printf("    Latency STGI:   max: %ld min: %ld avg: %ld\n", latstgi_max,
1309             latstgi_min, stgi_sum / LATENCY_RUNS);
1310     printf("    Latency CLGI:   max: %ld min: %ld avg: %ld\n", latclgi_max,
1311             latclgi_min, clgi_sum / LATENCY_RUNS);
1312     return true;
1313 }
1314 
1315 bool pending_event_ipi_fired;
1316 bool pending_event_guest_run;
1317 
1318 static void pending_event_ipi_isr(isr_regs_t *regs)
1319 {
1320     pending_event_ipi_fired = true;
1321     eoi();
1322 }
1323 
1324 static void pending_event_prepare(struct test *test)
1325 {
1326     int ipi_vector = 0xf1;
1327 
1328     default_prepare(test);
1329 
1330     pending_event_ipi_fired = false;
1331 
1332     handle_irq(ipi_vector, pending_event_ipi_isr);
1333 
1334     pending_event_guest_run = false;
1335 
1336     test->vmcb->control.intercept |= (1ULL << INTERCEPT_INTR);
1337     test->vmcb->control.int_ctl |= V_INTR_MASKING_MASK;
1338 
1339     apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL |
1340                   APIC_DM_FIXED | ipi_vector, 0);
1341 
1342     set_test_stage(test, 0);
1343 }
1344 
1345 static void pending_event_test(struct test *test)
1346 {
1347     pending_event_guest_run = true;
1348 }
1349 
1350 static bool pending_event_finished(struct test *test)
1351 {
1352     switch (get_test_stage(test)) {
1353     case 0:
1354         if (test->vmcb->control.exit_code != SVM_EXIT_INTR) {
1355             report(false, "VMEXIT not due to pending interrupt. Exit reason 0x%x",
1356                    test->vmcb->control.exit_code);
1357             return true;
1358         }
1359 
1360         test->vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR);
1361         test->vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
1362 
1363         if (pending_event_guest_run) {
1364             report(false, "Guest ran before host received IPI\n");
1365             return true;
1366         }
1367 
1368         irq_enable();
1369         asm volatile ("nop");
1370         irq_disable();
1371 
1372         if (!pending_event_ipi_fired) {
1373             report(false, "Pending interrupt not dispatched after IRQ enabled\n");
1374             return true;
1375         }
1376         break;
1377 
1378     case 1:
1379         if (!pending_event_guest_run) {
1380             report(false, "Guest did not resume when no interrupt\n");
1381             return true;
1382         }
1383         break;
1384     }
1385 
1386     inc_test_stage(test);
1387 
1388     return get_test_stage(test) == 2;
1389 }
1390 
1391 static bool pending_event_check(struct test *test)
1392 {
1393     return get_test_stage(test) == 2;
1394 }
1395 
1396 static void pending_event_prepare_vmask(struct test *test)
1397 {
1398     default_prepare(test);
1399 
1400     pending_event_ipi_fired = false;
1401 
1402     set_host_if = 0;
1403 
1404     handle_irq(0xf1, pending_event_ipi_isr);
1405 
1406     apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL |
1407               APIC_DM_FIXED | 0xf1, 0);
1408 
1409     set_test_stage(test, 0);
1410 }
1411 
1412 static void pending_event_test_vmask(struct test *test)
1413 {
1414     if (pending_event_ipi_fired == true) {
1415         set_test_stage(test, -1);
1416         report(false, "Interrupt preceeded guest");
1417         vmmcall();
1418     }
1419 
1420     irq_enable();
1421     asm volatile ("nop");
1422     irq_disable();
1423 
1424     if (pending_event_ipi_fired != true) {
1425         set_test_stage(test, -1);
1426         report(false, "Interrupt not triggered by guest");
1427     }
1428 
1429     vmmcall();
1430 
1431     irq_enable();
1432     asm volatile ("nop");
1433     irq_disable();
1434 }
1435 
1436 static bool pending_event_finished_vmask(struct test *test)
1437 {
1438     if ( test->vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1439         report(false, "VM_EXIT return to host is not EXIT_VMMCALL exit reason 0x%x",
1440                test->vmcb->control.exit_code);
1441         return true;
1442     }
1443 
1444     switch (get_test_stage(test)) {
1445     case 0:
1446         test->vmcb->save.rip += 3;
1447 
1448         pending_event_ipi_fired = false;
1449 
1450         test->vmcb->control.int_ctl |= V_INTR_MASKING_MASK;
1451 
1452         apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL |
1453               APIC_DM_FIXED | 0xf1, 0);
1454 
1455         break;
1456 
1457     case 1:
1458         if (pending_event_ipi_fired == true) {
1459             report(false, "Interrupt triggered by guest");
1460             return true;
1461         }
1462 
1463         irq_enable();
1464         asm volatile ("nop");
1465         irq_disable();
1466 
1467         if (pending_event_ipi_fired != true) {
1468             report(false, "Interrupt not triggered by host");
1469             return true;
1470         }
1471 
1472         break;
1473 
1474     default:
1475         return true;
1476     }
1477 
1478     inc_test_stage(test);
1479 
1480     return get_test_stage(test) == 2;
1481 }
1482 
1483 static bool pending_event_check_vmask(struct test *test)
1484 {
1485     return get_test_stage(test) == 2;
1486 }
1487 
1488 static struct test tests[] = {
1489     { "null", default_supported, default_prepare, null_test,
1490       default_finished, null_check },
1491     { "vmrun", default_supported, default_prepare, test_vmrun,
1492        default_finished, check_vmrun },
1493     { "ioio", default_supported, prepare_ioio, test_ioio,
1494        ioio_finished, check_ioio },
1495     { "vmrun intercept check", default_supported, prepare_no_vmrun_int,
1496       null_test, default_finished, check_no_vmrun_int },
1497     { "cr3 read intercept", default_supported, prepare_cr3_intercept,
1498       test_cr3_intercept, default_finished, check_cr3_intercept },
1499     { "cr3 read nointercept", default_supported, default_prepare,
1500       test_cr3_intercept, default_finished, check_cr3_nointercept },
1501     { "cr3 read intercept emulate", smp_supported,
1502       prepare_cr3_intercept_bypass, test_cr3_intercept_bypass,
1503       default_finished, check_cr3_intercept },
1504     { "dr intercept check", default_supported, prepare_dr_intercept,
1505       test_dr_intercept, dr_intercept_finished, check_dr_intercept },
1506     { "next_rip", next_rip_supported, prepare_next_rip, test_next_rip,
1507       default_finished, check_next_rip },
1508     { "msr intercept check", default_supported, prepare_msr_intercept,
1509        test_msr_intercept, msr_intercept_finished, check_msr_intercept },
1510     { "mode_switch", default_supported, prepare_mode_switch, test_mode_switch,
1511        mode_switch_finished, check_mode_switch },
1512     { "asid_zero", default_supported, prepare_asid_zero, test_asid_zero,
1513        default_finished, check_asid_zero },
1514     { "sel_cr0_bug", default_supported, sel_cr0_bug_prepare, sel_cr0_bug_test,
1515        sel_cr0_bug_finished, sel_cr0_bug_check },
1516     { "npt_nx", npt_supported, npt_nx_prepare, null_test,
1517 	    default_finished, npt_nx_check },
1518     { "npt_us", npt_supported, npt_us_prepare, npt_us_test,
1519 	    default_finished, npt_us_check },
1520     { "npt_rsvd", npt_supported, npt_rsvd_prepare, null_test,
1521 	    default_finished, npt_rsvd_check },
1522     { "npt_rw", npt_supported, npt_rw_prepare, npt_rw_test,
1523 	    default_finished, npt_rw_check },
1524     { "npt_rsvd_pfwalk", npt_supported, npt_rsvd_pfwalk_prepare, null_test,
1525 	    default_finished, npt_rsvd_pfwalk_check },
1526     { "npt_rw_pfwalk", npt_supported, npt_rw_pfwalk_prepare, null_test,
1527 	    default_finished, npt_rw_pfwalk_check },
1528     { "npt_l1mmio", npt_supported, npt_l1mmio_prepare, npt_l1mmio_test,
1529 	    default_finished, npt_l1mmio_check },
1530     { "npt_rw_l1mmio", npt_supported, npt_rw_l1mmio_prepare, npt_rw_l1mmio_test,
1531 	    default_finished, npt_rw_l1mmio_check },
1532     { "tsc_adjust", default_supported, tsc_adjust_prepare, tsc_adjust_test,
1533        default_finished, tsc_adjust_check },
1534     { "latency_run_exit", default_supported, latency_prepare, latency_test,
1535       latency_finished, latency_check },
1536     { "latency_svm_insn", default_supported, lat_svm_insn_prepare, null_test,
1537       lat_svm_insn_finished, lat_svm_insn_check },
1538     { "pending_event", default_supported, pending_event_prepare,
1539       pending_event_test, pending_event_finished, pending_event_check },
1540     { "pending_event_vmask", default_supported, pending_event_prepare_vmask,
1541       pending_event_test_vmask, pending_event_finished_vmask,
1542       pending_event_check_vmask },
1543 };
1544 
1545 int main(int ac, char **av)
1546 {
1547     int i, nr;
1548     struct vmcb *vmcb;
1549 
1550     setup_vm();
1551     smp_init();
1552 
1553     if (!this_cpu_has(X86_FEATURE_SVM)) {
1554         printf("SVM not availble\n");
1555         return report_summary();
1556     }
1557 
1558     setup_svm();
1559 
1560     vmcb = alloc_page();
1561 
1562     nr = ARRAY_SIZE(tests);
1563     for (i = 0; i < nr; ++i) {
1564         if (!tests[i].supported())
1565             continue;
1566         test_run(&tests[i], vmcb);
1567     }
1568 
1569     return report_summary();
1570 }
1571