xref: /kvm-unit-tests/x86/svm.c (revision 48a0145f4ebc1d6d1c045f13fe2b9d5f8600120f)
1 #include "svm.h"
2 #include "libcflat.h"
3 #include "processor.h"
4 #include "desc.h"
5 #include "msr.h"
6 #include "vm.h"
7 #include "smp.h"
8 #include "types.h"
9 #include "alloc_page.h"
10 #include "isr.h"
11 #include "apic.h"
12 
13 #define SVM_EXIT_MAX_DR_INTERCEPT 0x3f
14 
15 /* for the nested page table*/
16 u64 *pml4e;
17 u64 *pdpe;
18 u64 *pde[4];
19 u64 *pte[2048];
20 void *scratch_page;
21 
22 #define LATENCY_RUNS 1000000
23 
24 u64 tsc_start;
25 u64 tsc_end;
26 
27 u64 vmrun_sum, vmexit_sum;
28 u64 vmsave_sum, vmload_sum;
29 u64 stgi_sum, clgi_sum;
30 u64 latvmrun_max;
31 u64 latvmrun_min;
32 u64 latvmexit_max;
33 u64 latvmexit_min;
34 u64 latvmload_max;
35 u64 latvmload_min;
36 u64 latvmsave_max;
37 u64 latvmsave_min;
38 u64 latstgi_max;
39 u64 latstgi_min;
40 u64 latclgi_max;
41 u64 latclgi_min;
42 u64 runs;
43 
44 u8 *io_bitmap;
45 u8 io_bitmap_area[16384];
46 
47 u8 set_host_if;
48 
49 #define MSR_BITMAP_SIZE 8192
50 
51 u8 *msr_bitmap;
52 u8 msr_bitmap_area[MSR_BITMAP_SIZE + PAGE_SIZE];
53 
54 static bool npt_supported(void)
55 {
56 	return this_cpu_has(X86_FEATURE_NPT);
57 }
58 
59 static void setup_svm(void)
60 {
61     void *hsave = alloc_page();
62     u64 *page, address;
63     int i,j;
64 
65     wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
66     wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME);
67     wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NX);
68 
69     scratch_page = alloc_page();
70 
71     io_bitmap = (void *) (((ulong)io_bitmap_area + 4095) & ~4095);
72 
73     msr_bitmap = (void *) ALIGN((ulong)msr_bitmap_area, PAGE_SIZE);
74 
75     if (!npt_supported())
76         return;
77 
78     printf("NPT detected - running all tests with NPT enabled\n");
79 
80     /*
81      * Nested paging supported - Build a nested page table
82      * Build the page-table bottom-up and map everything with 4k pages
83      * to get enough granularity for the NPT unit-tests.
84      */
85 
86     address = 0;
87 
88     /* PTE level */
89     for (i = 0; i < 2048; ++i) {
90         page = alloc_page();
91 
92         for (j = 0; j < 512; ++j, address += 4096)
93             page[j] = address | 0x067ULL;
94 
95         pte[i] = page;
96     }
97 
98     /* PDE level */
99     for (i = 0; i < 4; ++i) {
100         page = alloc_page();
101 
102         for (j = 0; j < 512; ++j)
103             page[j] = (u64)pte[(i * 512) + j] | 0x027ULL;
104 
105         pde[i] = page;
106     }
107 
108     /* PDPe level */
109     pdpe   = alloc_page();
110     for (i = 0; i < 4; ++i)
111        pdpe[i] = ((u64)(pde[i])) | 0x27;
112 
113     /* PML4e level */
114     pml4e    = alloc_page();
115     pml4e[0] = ((u64)pdpe) | 0x27;
116 }
117 
118 static u64 *npt_get_pde(u64 address)
119 {
120     int i1, i2;
121 
122     address >>= 21;
123     i1 = (address >> 9) & 0x3;
124     i2 = address & 0x1ff;
125 
126     return &pde[i1][i2];
127 }
128 
129 static u64 *npt_get_pte(u64 address)
130 {
131     int i1, i2;
132 
133     address >>= 12;
134     i1 = (address >> 9) & 0x7ff;
135     i2 = address & 0x1ff;
136 
137     return &pte[i1][i2];
138 }
139 
140 static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector,
141                          u64 base, u32 limit, u32 attr)
142 {
143     seg->selector = selector;
144     seg->attrib = attr;
145     seg->limit = limit;
146     seg->base = base;
147 }
148 
149 static void vmcb_ident(struct vmcb *vmcb)
150 {
151     u64 vmcb_phys = virt_to_phys(vmcb);
152     struct vmcb_save_area *save = &vmcb->save;
153     struct vmcb_control_area *ctrl = &vmcb->control;
154     u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
155         | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK;
156     u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
157         | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK;
158     struct descriptor_table_ptr desc_table_ptr;
159 
160     memset(vmcb, 0, sizeof(*vmcb));
161     asm volatile ("vmsave" : : "a"(vmcb_phys) : "memory");
162     vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr);
163     vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr);
164     vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr);
165     vmcb_set_seg(&save->ds, read_ds(), 0, -1U, data_seg_attr);
166     sgdt(&desc_table_ptr);
167     vmcb_set_seg(&save->gdtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
168     sidt(&desc_table_ptr);
169     vmcb_set_seg(&save->idtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
170     ctrl->asid = 1;
171     save->cpl = 0;
172     save->efer = rdmsr(MSR_EFER);
173     save->cr4 = read_cr4();
174     save->cr3 = read_cr3();
175     save->cr0 = read_cr0();
176     save->dr7 = read_dr7();
177     save->dr6 = read_dr6();
178     save->cr2 = read_cr2();
179     save->g_pat = rdmsr(MSR_IA32_CR_PAT);
180     save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
181     ctrl->intercept = (1ULL << INTERCEPT_VMRUN) | (1ULL << INTERCEPT_VMMCALL);
182     ctrl->iopm_base_pa = virt_to_phys(io_bitmap);
183     ctrl->msrpm_base_pa = virt_to_phys(msr_bitmap);
184 
185     if (npt_supported()) {
186         ctrl->nested_ctl = 1;
187         ctrl->nested_cr3 = (u64)pml4e;
188     }
189 }
190 
191 struct test {
192     const char *name;
193     bool (*supported)(void);
194     void (*prepare)(struct test *test);
195     void (*guest_func)(struct test *test);
196     bool (*finished)(struct test *test);
197     bool (*succeeded)(struct test *test);
198     struct vmcb *vmcb;
199     int exits;
200     ulong scratch;
201 };
202 
203 static inline void vmmcall(void)
204 {
205     asm volatile ("vmmcall" : : : "memory");
206 }
207 
208 static void test_thunk(struct test *test)
209 {
210     test->guest_func(test);
211     vmmcall();
212 }
213 
214 struct regs {
215         u64 rax;
216         u64 rbx;
217         u64 rcx;
218         u64 rdx;
219         u64 cr2;
220         u64 rbp;
221         u64 rsi;
222         u64 rdi;
223         u64 r8;
224         u64 r9;
225         u64 r10;
226         u64 r11;
227         u64 r12;
228         u64 r13;
229         u64 r14;
230         u64 r15;
231         u64 rflags;
232 };
233 
234 struct regs regs;
235 
236 // rax handled specially below
237 
238 #define SAVE_GPR_C                              \
239         "xchg %%rbx, regs+0x8\n\t"              \
240         "xchg %%rcx, regs+0x10\n\t"             \
241         "xchg %%rdx, regs+0x18\n\t"             \
242         "xchg %%rbp, regs+0x28\n\t"             \
243         "xchg %%rsi, regs+0x30\n\t"             \
244         "xchg %%rdi, regs+0x38\n\t"             \
245         "xchg %%r8, regs+0x40\n\t"              \
246         "xchg %%r9, regs+0x48\n\t"              \
247         "xchg %%r10, regs+0x50\n\t"             \
248         "xchg %%r11, regs+0x58\n\t"             \
249         "xchg %%r12, regs+0x60\n\t"             \
250         "xchg %%r13, regs+0x68\n\t"             \
251         "xchg %%r14, regs+0x70\n\t"             \
252         "xchg %%r15, regs+0x78\n\t"
253 
254 #define LOAD_GPR_C      SAVE_GPR_C
255 
256 static void test_run(struct test *test, struct vmcb *vmcb)
257 {
258     u64 vmcb_phys = virt_to_phys(vmcb);
259     u64 guest_stack[10000];
260 
261     irq_disable();
262     test->vmcb = vmcb;
263     set_host_if = 1;
264     test->prepare(test);
265     vmcb->save.rip = (ulong)test_thunk;
266     vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack));
267     regs.rdi = (ulong)test;
268     do {
269         tsc_start = rdtsc();
270         asm volatile (
271             "clgi \n\t"
272             "cmpb $0, set_host_if\n\t"
273             "jz 1f\n\t"
274             "sti \n\t"
275             "1: \n\t"
276             "vmload \n\t"
277             "mov regs+0x80, %%r15\n\t"  // rflags
278             "mov %%r15, 0x170(%0)\n\t"
279             "mov regs, %%r15\n\t"       // rax
280             "mov %%r15, 0x1f8(%0)\n\t"
281             LOAD_GPR_C
282             "vmrun \n\t"
283             SAVE_GPR_C
284             "mov 0x170(%0), %%r15\n\t"  // rflags
285             "mov %%r15, regs+0x80\n\t"
286             "mov 0x1f8(%0), %%r15\n\t"  // rax
287             "mov %%r15, regs\n\t"
288             "vmsave \n\t"
289             "cli \n\t"
290             "stgi"
291             : : "a"(vmcb_phys)
292             : "rbx", "rcx", "rdx", "rsi",
293               "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15",
294               "memory");
295 	tsc_end = rdtsc();
296         ++test->exits;
297     } while (!test->finished(test));
298     irq_enable();
299 
300     report(test->succeeded(test), "%s", test->name);
301 }
302 
303 static bool smp_supported(void)
304 {
305 	return cpu_count() > 1;
306 }
307 
308 static bool default_supported(void)
309 {
310     return true;
311 }
312 
313 static void default_prepare(struct test *test)
314 {
315     vmcb_ident(test->vmcb);
316 }
317 
318 static bool default_finished(struct test *test)
319 {
320     return true; /* one vmexit */
321 }
322 
323 static void null_test(struct test *test)
324 {
325 }
326 
327 static bool null_check(struct test *test)
328 {
329     return test->vmcb->control.exit_code == SVM_EXIT_VMMCALL;
330 }
331 
332 static void prepare_no_vmrun_int(struct test *test)
333 {
334     test->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMRUN);
335 }
336 
337 static bool check_no_vmrun_int(struct test *test)
338 {
339     return test->vmcb->control.exit_code == SVM_EXIT_ERR;
340 }
341 
342 static void test_vmrun(struct test *test)
343 {
344     asm volatile ("vmrun" : : "a"(virt_to_phys(test->vmcb)));
345 }
346 
347 static bool check_vmrun(struct test *test)
348 {
349     return test->vmcb->control.exit_code == SVM_EXIT_VMRUN;
350 }
351 
352 static void prepare_cr3_intercept(struct test *test)
353 {
354     default_prepare(test);
355     test->vmcb->control.intercept_cr_read |= 1 << 3;
356 }
357 
358 static void test_cr3_intercept(struct test *test)
359 {
360     asm volatile ("mov %%cr3, %0" : "=r"(test->scratch) : : "memory");
361 }
362 
363 static bool check_cr3_intercept(struct test *test)
364 {
365     return test->vmcb->control.exit_code == SVM_EXIT_READ_CR3;
366 }
367 
368 static bool check_cr3_nointercept(struct test *test)
369 {
370     return null_check(test) && test->scratch == read_cr3();
371 }
372 
373 static void corrupt_cr3_intercept_bypass(void *_test)
374 {
375     struct test *test = _test;
376     extern volatile u32 mmio_insn;
377 
378     while (!__sync_bool_compare_and_swap(&test->scratch, 1, 2))
379         pause();
380     pause();
381     pause();
382     pause();
383     mmio_insn = 0x90d8200f;  // mov %cr3, %rax; nop
384 }
385 
386 static void prepare_cr3_intercept_bypass(struct test *test)
387 {
388     default_prepare(test);
389     test->vmcb->control.intercept_cr_read |= 1 << 3;
390     on_cpu_async(1, corrupt_cr3_intercept_bypass, test);
391 }
392 
393 static void test_cr3_intercept_bypass(struct test *test)
394 {
395     ulong a = 0xa0000;
396 
397     test->scratch = 1;
398     while (test->scratch != 2)
399         barrier();
400 
401     asm volatile ("mmio_insn: mov %0, (%0); nop"
402                   : "+a"(a) : : "memory");
403     test->scratch = a;
404 }
405 
406 static void prepare_dr_intercept(struct test *test)
407 {
408     default_prepare(test);
409     test->vmcb->control.intercept_dr_read = 0xff;
410     test->vmcb->control.intercept_dr_write = 0xff;
411 }
412 
413 static void test_dr_intercept(struct test *test)
414 {
415     unsigned int i, failcnt = 0;
416 
417     /* Loop testing debug register reads */
418     for (i = 0; i < 8; i++) {
419 
420         switch (i) {
421         case 0:
422             asm volatile ("mov %%dr0, %0" : "=r"(test->scratch) : : "memory");
423             break;
424         case 1:
425             asm volatile ("mov %%dr1, %0" : "=r"(test->scratch) : : "memory");
426             break;
427         case 2:
428             asm volatile ("mov %%dr2, %0" : "=r"(test->scratch) : : "memory");
429             break;
430         case 3:
431             asm volatile ("mov %%dr3, %0" : "=r"(test->scratch) : : "memory");
432             break;
433         case 4:
434             asm volatile ("mov %%dr4, %0" : "=r"(test->scratch) : : "memory");
435             break;
436         case 5:
437             asm volatile ("mov %%dr5, %0" : "=r"(test->scratch) : : "memory");
438             break;
439         case 6:
440             asm volatile ("mov %%dr6, %0" : "=r"(test->scratch) : : "memory");
441             break;
442         case 7:
443             asm volatile ("mov %%dr7, %0" : "=r"(test->scratch) : : "memory");
444             break;
445         }
446 
447         if (test->scratch != i) {
448             report(false, "dr%u read intercept", i);
449             failcnt++;
450         }
451     }
452 
453     /* Loop testing debug register writes */
454     for (i = 0; i < 8; i++) {
455 
456         switch (i) {
457         case 0:
458             asm volatile ("mov %0, %%dr0" : : "r"(test->scratch) : "memory");
459             break;
460         case 1:
461             asm volatile ("mov %0, %%dr1" : : "r"(test->scratch) : "memory");
462             break;
463         case 2:
464             asm volatile ("mov %0, %%dr2" : : "r"(test->scratch) : "memory");
465             break;
466         case 3:
467             asm volatile ("mov %0, %%dr3" : : "r"(test->scratch) : "memory");
468             break;
469         case 4:
470             asm volatile ("mov %0, %%dr4" : : "r"(test->scratch) : "memory");
471             break;
472         case 5:
473             asm volatile ("mov %0, %%dr5" : : "r"(test->scratch) : "memory");
474             break;
475         case 6:
476             asm volatile ("mov %0, %%dr6" : : "r"(test->scratch) : "memory");
477             break;
478         case 7:
479             asm volatile ("mov %0, %%dr7" : : "r"(test->scratch) : "memory");
480             break;
481         }
482 
483         if (test->scratch != i) {
484             report(false, "dr%u write intercept", i);
485             failcnt++;
486         }
487     }
488 
489     test->scratch = failcnt;
490 }
491 
492 static bool dr_intercept_finished(struct test *test)
493 {
494     ulong n = (test->vmcb->control.exit_code - SVM_EXIT_READ_DR0);
495 
496     /* Only expect DR intercepts */
497     if (n > (SVM_EXIT_MAX_DR_INTERCEPT - SVM_EXIT_READ_DR0))
498         return true;
499 
500     /*
501      * Compute debug register number.
502      * Per Appendix C "SVM Intercept Exit Codes" of AMD64 Architecture
503      * Programmer's Manual Volume 2 - System Programming:
504      * http://support.amd.com/TechDocs/24593.pdf
505      * there are 16 VMEXIT codes each for DR read and write.
506      */
507     test->scratch = (n % 16);
508 
509     /* Jump over MOV instruction */
510     test->vmcb->save.rip += 3;
511 
512     return false;
513 }
514 
515 static bool check_dr_intercept(struct test *test)
516 {
517     return !test->scratch;
518 }
519 
520 static bool next_rip_supported(void)
521 {
522     return this_cpu_has(X86_FEATURE_NRIPS);
523 }
524 
525 static void prepare_next_rip(struct test *test)
526 {
527     test->vmcb->control.intercept |= (1ULL << INTERCEPT_RDTSC);
528 }
529 
530 
531 static void test_next_rip(struct test *test)
532 {
533     asm volatile ("rdtsc\n\t"
534                   ".globl exp_next_rip\n\t"
535                   "exp_next_rip:\n\t" ::: "eax", "edx");
536 }
537 
538 static bool check_next_rip(struct test *test)
539 {
540     extern char exp_next_rip;
541     unsigned long address = (unsigned long)&exp_next_rip;
542 
543     return address == test->vmcb->control.next_rip;
544 }
545 
546 static void prepare_msr_intercept(struct test *test)
547 {
548     default_prepare(test);
549     test->vmcb->control.intercept |= (1ULL << INTERCEPT_MSR_PROT);
550     test->vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR);
551     memset(msr_bitmap, 0xff, MSR_BITMAP_SIZE);
552 }
553 
554 static void test_msr_intercept(struct test *test)
555 {
556     unsigned long msr_value = 0xef8056791234abcd; /* Arbitrary value */
557     unsigned long msr_index;
558 
559     for (msr_index = 0; msr_index <= 0xc0011fff; msr_index++) {
560         if (msr_index == 0xC0010131 /* MSR_SEV_STATUS */) {
561             /*
562              * Per section 15.34.10 "SEV_STATUS MSR" of AMD64 Architecture
563              * Programmer's Manual volume 2 - System Programming:
564              * http://support.amd.com/TechDocs/24593.pdf
565              * SEV_STATUS MSR (C001_0131) is a non-interceptable MSR.
566              */
567             continue;
568         }
569 
570         /* Skips gaps between supported MSR ranges */
571         if (msr_index == 0x2000)
572             msr_index = 0xc0000000;
573         else if (msr_index == 0xc0002000)
574             msr_index = 0xc0010000;
575 
576         test->scratch = -1;
577 
578         rdmsr(msr_index);
579 
580         /* Check that a read intercept occurred for MSR at msr_index */
581         if (test->scratch != msr_index)
582             report(false, "MSR 0x%lx read intercept", msr_index);
583 
584         /*
585          * Poor man approach to generate a value that
586          * seems arbitrary each time around the loop.
587          */
588         msr_value += (msr_value << 1);
589 
590         wrmsr(msr_index, msr_value);
591 
592         /* Check that a write intercept occurred for MSR with msr_value */
593         if (test->scratch != msr_value)
594             report(false, "MSR 0x%lx write intercept", msr_index);
595     }
596 
597     test->scratch = -2;
598 }
599 
600 static bool msr_intercept_finished(struct test *test)
601 {
602     u32 exit_code = test->vmcb->control.exit_code;
603     u64 exit_info_1;
604     u8 *opcode;
605 
606     if (exit_code == SVM_EXIT_MSR) {
607         exit_info_1 = test->vmcb->control.exit_info_1;
608     } else {
609         /*
610          * If #GP exception occurs instead, check that it was
611          * for RDMSR/WRMSR and set exit_info_1 accordingly.
612          */
613 
614         if (exit_code != (SVM_EXIT_EXCP_BASE + GP_VECTOR))
615             return true;
616 
617         opcode = (u8 *)test->vmcb->save.rip;
618         if (opcode[0] != 0x0f)
619             return true;
620 
621         switch (opcode[1]) {
622         case 0x30: /* WRMSR */
623             exit_info_1 = 1;
624             break;
625         case 0x32: /* RDMSR */
626             exit_info_1 = 0;
627             break;
628         default:
629             return true;
630         }
631 
632         /*
633          * Warn that #GP exception occured instead.
634          * RCX holds the MSR index.
635          */
636         printf("%s 0x%lx #GP exception\n",
637             exit_info_1 ? "WRMSR" : "RDMSR", regs.rcx);
638     }
639 
640     /* Jump over RDMSR/WRMSR instruction */
641     test->vmcb->save.rip += 2;
642 
643     /*
644      * Test whether the intercept was for RDMSR/WRMSR.
645      * For RDMSR, test->scratch is set to the MSR index;
646      *      RCX holds the MSR index.
647      * For WRMSR, test->scratch is set to the MSR value;
648      *      RDX holds the upper 32 bits of the MSR value,
649      *      while RAX hold its lower 32 bits.
650      */
651     if (exit_info_1)
652         test->scratch =
653             ((regs.rdx << 32) | (test->vmcb->save.rax & 0xffffffff));
654     else
655         test->scratch = regs.rcx;
656 
657     return false;
658 }
659 
660 static bool check_msr_intercept(struct test *test)
661 {
662     memset(msr_bitmap, 0, MSR_BITMAP_SIZE);
663     return (test->scratch == -2);
664 }
665 
666 static void prepare_mode_switch(struct test *test)
667 {
668     test->vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR)
669                                              |  (1ULL << UD_VECTOR)
670                                              |  (1ULL << DF_VECTOR)
671                                              |  (1ULL << PF_VECTOR);
672     test->scratch = 0;
673 }
674 
675 static void test_mode_switch(struct test *test)
676 {
677     asm volatile("	cli\n"
678 		 "	ljmp *1f\n" /* jump to 32-bit code segment */
679 		 "1:\n"
680 		 "	.long 2f\n"
681 		 "	.long " xstr(KERNEL_CS32) "\n"
682 		 ".code32\n"
683 		 "2:\n"
684 		 "	movl %%cr0, %%eax\n"
685 		 "	btcl  $31, %%eax\n" /* clear PG */
686 		 "	movl %%eax, %%cr0\n"
687 		 "	movl $0xc0000080, %%ecx\n" /* EFER */
688 		 "	rdmsr\n"
689 		 "	btcl $8, %%eax\n" /* clear LME */
690 		 "	wrmsr\n"
691 		 "	movl %%cr4, %%eax\n"
692 		 "	btcl $5, %%eax\n" /* clear PAE */
693 		 "	movl %%eax, %%cr4\n"
694 		 "	movw %[ds16], %%ax\n"
695 		 "	movw %%ax, %%ds\n"
696 		 "	ljmpl %[cs16], $3f\n" /* jump to 16 bit protected-mode */
697 		 ".code16\n"
698 		 "3:\n"
699 		 "	movl %%cr0, %%eax\n"
700 		 "	btcl $0, %%eax\n" /* clear PE  */
701 		 "	movl %%eax, %%cr0\n"
702 		 "	ljmpl $0, $4f\n"   /* jump to real-mode */
703 		 "4:\n"
704 		 "	vmmcall\n"
705 		 "	movl %%cr0, %%eax\n"
706 		 "	btsl $0, %%eax\n" /* set PE  */
707 		 "	movl %%eax, %%cr0\n"
708 		 "	ljmpl %[cs32], $5f\n" /* back to protected mode */
709 		 ".code32\n"
710 		 "5:\n"
711 		 "	movl %%cr4, %%eax\n"
712 		 "	btsl $5, %%eax\n" /* set PAE */
713 		 "	movl %%eax, %%cr4\n"
714 		 "	movl $0xc0000080, %%ecx\n" /* EFER */
715 		 "	rdmsr\n"
716 		 "	btsl $8, %%eax\n" /* set LME */
717 		 "	wrmsr\n"
718 		 "	movl %%cr0, %%eax\n"
719 		 "	btsl  $31, %%eax\n" /* set PG */
720 		 "	movl %%eax, %%cr0\n"
721 		 "	ljmpl %[cs64], $6f\n"    /* back to long mode */
722 		 ".code64\n\t"
723 		 "6:\n"
724 		 "	vmmcall\n"
725 		 :: [cs16] "i"(KERNEL_CS16), [ds16] "i"(KERNEL_DS16),
726 		    [cs32] "i"(KERNEL_CS32), [cs64] "i"(KERNEL_CS64)
727 		 : "rax", "rbx", "rcx", "rdx", "memory");
728 }
729 
730 static bool mode_switch_finished(struct test *test)
731 {
732     u64 cr0, cr4, efer;
733 
734     cr0  = test->vmcb->save.cr0;
735     cr4  = test->vmcb->save.cr4;
736     efer = test->vmcb->save.efer;
737 
738     /* Only expect VMMCALL intercepts */
739     if (test->vmcb->control.exit_code != SVM_EXIT_VMMCALL)
740 	    return true;
741 
742     /* Jump over VMMCALL instruction */
743     test->vmcb->save.rip += 3;
744 
745     /* Do sanity checks */
746     switch (test->scratch) {
747     case 0:
748         /* Test should be in real mode now - check for this */
749         if ((cr0  & 0x80000001) || /* CR0.PG, CR0.PE */
750             (cr4  & 0x00000020) || /* CR4.PAE */
751             (efer & 0x00000500))   /* EFER.LMA, EFER.LME */
752                 return true;
753         break;
754     case 2:
755         /* Test should be back in long-mode now - check for this */
756         if (((cr0  & 0x80000001) != 0x80000001) || /* CR0.PG, CR0.PE */
757             ((cr4  & 0x00000020) != 0x00000020) || /* CR4.PAE */
758             ((efer & 0x00000500) != 0x00000500))   /* EFER.LMA, EFER.LME */
759 		    return true;
760 	break;
761     }
762 
763     /* one step forward */
764     test->scratch += 1;
765 
766     return test->scratch == 2;
767 }
768 
769 static bool check_mode_switch(struct test *test)
770 {
771 	return test->scratch == 2;
772 }
773 
774 static void prepare_ioio(struct test *test)
775 {
776     test->vmcb->control.intercept |= (1ULL << INTERCEPT_IOIO_PROT);
777     test->scratch = 0;
778     memset(io_bitmap, 0, 8192);
779     io_bitmap[8192] = 0xFF;
780 }
781 
782 static int get_test_stage(struct test *test)
783 {
784     barrier();
785     return test->scratch;
786 }
787 
788 static void set_test_stage(struct test *test, int s)
789 {
790     barrier();
791     test->scratch = s;
792     barrier();
793 }
794 
795 static void inc_test_stage(struct test *test)
796 {
797     barrier();
798     test->scratch++;
799     barrier();
800 }
801 
802 static void test_ioio(struct test *test)
803 {
804     // stage 0, test IO pass
805     inb(0x5000);
806     outb(0x0, 0x5000);
807     if (get_test_stage(test) != 0)
808         goto fail;
809 
810     // test IO width, in/out
811     io_bitmap[0] = 0xFF;
812     inc_test_stage(test);
813     inb(0x0);
814     if (get_test_stage(test) != 2)
815         goto fail;
816 
817     outw(0x0, 0x0);
818     if (get_test_stage(test) != 3)
819         goto fail;
820 
821     inl(0x0);
822     if (get_test_stage(test) != 4)
823         goto fail;
824 
825     // test low/high IO port
826     io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8));
827     inb(0x5000);
828     if (get_test_stage(test) != 5)
829         goto fail;
830 
831     io_bitmap[0x9000 / 8] = (1 << (0x9000 % 8));
832     inw(0x9000);
833     if (get_test_stage(test) != 6)
834         goto fail;
835 
836     // test partial pass
837     io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8));
838     inl(0x4FFF);
839     if (get_test_stage(test) != 7)
840         goto fail;
841 
842     // test across pages
843     inc_test_stage(test);
844     inl(0x7FFF);
845     if (get_test_stage(test) != 8)
846         goto fail;
847 
848     inc_test_stage(test);
849     io_bitmap[0x8000 / 8] = 1 << (0x8000 % 8);
850     inl(0x7FFF);
851     if (get_test_stage(test) != 10)
852         goto fail;
853 
854     io_bitmap[0] = 0;
855     inl(0xFFFF);
856     if (get_test_stage(test) != 11)
857         goto fail;
858 
859     io_bitmap[0] = 0xFF;
860     io_bitmap[8192] = 0;
861     inl(0xFFFF);
862     inc_test_stage(test);
863     if (get_test_stage(test) != 12)
864         goto fail;
865 
866     return;
867 
868 fail:
869     report(false, "stage %d", get_test_stage(test));
870     test->scratch = -1;
871 }
872 
873 static bool ioio_finished(struct test *test)
874 {
875     unsigned port, size;
876 
877     /* Only expect IOIO intercepts */
878     if (test->vmcb->control.exit_code == SVM_EXIT_VMMCALL)
879         return true;
880 
881     if (test->vmcb->control.exit_code != SVM_EXIT_IOIO)
882         return true;
883 
884     /* one step forward */
885     test->scratch += 1;
886 
887     port = test->vmcb->control.exit_info_1 >> 16;
888     size = (test->vmcb->control.exit_info_1 >> SVM_IOIO_SIZE_SHIFT) & 7;
889 
890     while (size--) {
891         io_bitmap[port / 8] &= ~(1 << (port & 7));
892         port++;
893     }
894 
895     return false;
896 }
897 
898 static bool check_ioio(struct test *test)
899 {
900     memset(io_bitmap, 0, 8193);
901     return test->scratch != -1;
902 }
903 
904 static void prepare_asid_zero(struct test *test)
905 {
906     test->vmcb->control.asid = 0;
907 }
908 
909 static void test_asid_zero(struct test *test)
910 {
911     asm volatile ("vmmcall\n\t");
912 }
913 
914 static bool check_asid_zero(struct test *test)
915 {
916     return test->vmcb->control.exit_code == SVM_EXIT_ERR;
917 }
918 
919 static void sel_cr0_bug_prepare(struct test *test)
920 {
921     vmcb_ident(test->vmcb);
922     test->vmcb->control.intercept |= (1ULL << INTERCEPT_SELECTIVE_CR0);
923 }
924 
925 static bool sel_cr0_bug_finished(struct test *test)
926 {
927 	return true;
928 }
929 
930 static void sel_cr0_bug_test(struct test *test)
931 {
932     unsigned long cr0;
933 
934     /* read cr0, clear CD, and write back */
935     cr0  = read_cr0();
936     cr0 |= (1UL << 30);
937     write_cr0(cr0);
938 
939     /*
940      * If we are here the test failed, not sure what to do now because we
941      * are not in guest-mode anymore so we can't trigger an intercept.
942      * Trigger a tripple-fault for now.
943      */
944     report(false, "sel_cr0 test. Can not recover from this - exiting");
945     exit(report_summary());
946 }
947 
948 static bool sel_cr0_bug_check(struct test *test)
949 {
950     return test->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE;
951 }
952 
953 static void npt_nx_prepare(struct test *test)
954 {
955 
956     u64 *pte;
957 
958     vmcb_ident(test->vmcb);
959     pte = npt_get_pte((u64)null_test);
960 
961     *pte |= (1ULL << 63);
962 }
963 
964 static bool npt_nx_check(struct test *test)
965 {
966     u64 *pte = npt_get_pte((u64)null_test);
967 
968     *pte &= ~(1ULL << 63);
969 
970     test->vmcb->save.efer |= (1 << 11);
971 
972     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
973            && (test->vmcb->control.exit_info_1 == 0x100000015ULL);
974 }
975 
976 static void npt_us_prepare(struct test *test)
977 {
978     u64 *pte;
979 
980     vmcb_ident(test->vmcb);
981     pte = npt_get_pte((u64)scratch_page);
982 
983     *pte &= ~(1ULL << 2);
984 }
985 
986 static void npt_us_test(struct test *test)
987 {
988     (void) *(volatile u64 *)scratch_page;
989 }
990 
991 static bool npt_us_check(struct test *test)
992 {
993     u64 *pte = npt_get_pte((u64)scratch_page);
994 
995     *pte |= (1ULL << 2);
996 
997     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
998            && (test->vmcb->control.exit_info_1 == 0x100000005ULL);
999 }
1000 
1001 u64 save_pde;
1002 
1003 static void npt_rsvd_prepare(struct test *test)
1004 {
1005     u64 *pde;
1006 
1007     vmcb_ident(test->vmcb);
1008     pde = npt_get_pde((u64) null_test);
1009 
1010     save_pde = *pde;
1011     *pde = (1ULL << 19) | (1ULL << 7) | 0x27;
1012 }
1013 
1014 static bool npt_rsvd_check(struct test *test)
1015 {
1016     u64 *pde = npt_get_pde((u64) null_test);
1017 
1018     *pde = save_pde;
1019 
1020     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
1021             && (test->vmcb->control.exit_info_1 == 0x10000001dULL);
1022 }
1023 
1024 static void npt_rw_prepare(struct test *test)
1025 {
1026 
1027     u64 *pte;
1028 
1029     vmcb_ident(test->vmcb);
1030     pte = npt_get_pte(0x80000);
1031 
1032     *pte &= ~(1ULL << 1);
1033 }
1034 
1035 static void npt_rw_test(struct test *test)
1036 {
1037     u64 *data = (void*)(0x80000);
1038 
1039     *data = 0;
1040 }
1041 
1042 static bool npt_rw_check(struct test *test)
1043 {
1044     u64 *pte = npt_get_pte(0x80000);
1045 
1046     *pte |= (1ULL << 1);
1047 
1048     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
1049            && (test->vmcb->control.exit_info_1 == 0x100000007ULL);
1050 }
1051 
1052 static void npt_rw_pfwalk_prepare(struct test *test)
1053 {
1054 
1055     u64 *pte;
1056 
1057     vmcb_ident(test->vmcb);
1058     pte = npt_get_pte(read_cr3());
1059 
1060     *pte &= ~(1ULL << 1);
1061 }
1062 
1063 static bool npt_rw_pfwalk_check(struct test *test)
1064 {
1065     u64 *pte = npt_get_pte(read_cr3());
1066 
1067     *pte |= (1ULL << 1);
1068 
1069     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
1070            && (test->vmcb->control.exit_info_1 == 0x200000006ULL)
1071 	   && (test->vmcb->control.exit_info_2 == read_cr3());
1072 }
1073 
1074 static void npt_rsvd_pfwalk_prepare(struct test *test)
1075 {
1076 
1077     vmcb_ident(test->vmcb);
1078 
1079     pdpe[0] |= (1ULL << 8);
1080 }
1081 
1082 static bool npt_rsvd_pfwalk_check(struct test *test)
1083 {
1084     pdpe[0] &= ~(1ULL << 8);
1085 
1086     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
1087             && (test->vmcb->control.exit_info_1 == 0x20000000eULL);
1088 }
1089 
1090 static void npt_l1mmio_prepare(struct test *test)
1091 {
1092     vmcb_ident(test->vmcb);
1093 }
1094 
1095 u32 nested_apic_version1;
1096 u32 nested_apic_version2;
1097 
1098 static void npt_l1mmio_test(struct test *test)
1099 {
1100     volatile u32 *data = (volatile void*)(0xfee00030UL);
1101 
1102     nested_apic_version1 = *data;
1103     nested_apic_version2 = *data;
1104 }
1105 
1106 static bool npt_l1mmio_check(struct test *test)
1107 {
1108     volatile u32 *data = (volatile void*)(0xfee00030);
1109     u32 lvr = *data;
1110 
1111     return nested_apic_version1 == lvr && nested_apic_version2 == lvr;
1112 }
1113 
1114 static void npt_rw_l1mmio_prepare(struct test *test)
1115 {
1116 
1117     u64 *pte;
1118 
1119     vmcb_ident(test->vmcb);
1120     pte = npt_get_pte(0xfee00080);
1121 
1122     *pte &= ~(1ULL << 1);
1123 }
1124 
1125 static void npt_rw_l1mmio_test(struct test *test)
1126 {
1127     volatile u32 *data = (volatile void*)(0xfee00080);
1128 
1129     *data = *data;
1130 }
1131 
1132 static bool npt_rw_l1mmio_check(struct test *test)
1133 {
1134     u64 *pte = npt_get_pte(0xfee00080);
1135 
1136     *pte |= (1ULL << 1);
1137 
1138     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
1139            && (test->vmcb->control.exit_info_1 == 0x100000007ULL);
1140 }
1141 
1142 #define TSC_ADJUST_VALUE    (1ll << 32)
1143 #define TSC_OFFSET_VALUE    (-1ll << 48)
1144 static bool ok;
1145 
1146 static void tsc_adjust_prepare(struct test *test)
1147 {
1148     default_prepare(test);
1149     test->vmcb->control.tsc_offset = TSC_OFFSET_VALUE;
1150 
1151     wrmsr(MSR_IA32_TSC_ADJUST, -TSC_ADJUST_VALUE);
1152     int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST);
1153     ok = adjust == -TSC_ADJUST_VALUE;
1154 }
1155 
1156 static void tsc_adjust_test(struct test *test)
1157 {
1158     int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST);
1159     ok &= adjust == -TSC_ADJUST_VALUE;
1160 
1161     uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE;
1162     wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE);
1163 
1164     adjust = rdmsr(MSR_IA32_TSC_ADJUST);
1165     ok &= adjust <= -2 * TSC_ADJUST_VALUE;
1166 
1167     uint64_t l1_tsc_end = rdtsc() - TSC_OFFSET_VALUE;
1168     ok &= (l1_tsc_end + TSC_ADJUST_VALUE - l1_tsc) < TSC_ADJUST_VALUE;
1169 
1170     uint64_t l1_tsc_msr = rdmsr(MSR_IA32_TSC) - TSC_OFFSET_VALUE;
1171     ok &= (l1_tsc_msr + TSC_ADJUST_VALUE - l1_tsc) < TSC_ADJUST_VALUE;
1172 }
1173 
1174 static bool tsc_adjust_check(struct test *test)
1175 {
1176     int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST);
1177 
1178     wrmsr(MSR_IA32_TSC_ADJUST, 0);
1179     return ok && adjust <= -2 * TSC_ADJUST_VALUE;
1180 }
1181 
1182 static void latency_prepare(struct test *test)
1183 {
1184     default_prepare(test);
1185     runs = LATENCY_RUNS;
1186     latvmrun_min = latvmexit_min = -1ULL;
1187     latvmrun_max = latvmexit_max = 0;
1188     vmrun_sum = vmexit_sum = 0;
1189 }
1190 
1191 static void latency_test(struct test *test)
1192 {
1193     u64 cycles;
1194 
1195 start:
1196     tsc_end = rdtsc();
1197 
1198     cycles = tsc_end - tsc_start;
1199 
1200     if (cycles > latvmrun_max)
1201         latvmrun_max = cycles;
1202 
1203     if (cycles < latvmrun_min)
1204         latvmrun_min = cycles;
1205 
1206     vmrun_sum += cycles;
1207 
1208     tsc_start = rdtsc();
1209 
1210     asm volatile ("vmmcall" : : : "memory");
1211     goto start;
1212 }
1213 
1214 static bool latency_finished(struct test *test)
1215 {
1216     u64 cycles;
1217 
1218     tsc_end = rdtsc();
1219 
1220     cycles = tsc_end - tsc_start;
1221 
1222     if (cycles > latvmexit_max)
1223         latvmexit_max = cycles;
1224 
1225     if (cycles < latvmexit_min)
1226         latvmexit_min = cycles;
1227 
1228     vmexit_sum += cycles;
1229 
1230     test->vmcb->save.rip += 3;
1231 
1232     runs -= 1;
1233 
1234     return runs == 0;
1235 }
1236 
1237 static bool latency_check(struct test *test)
1238 {
1239     printf("    Latency VMRUN : max: %ld min: %ld avg: %ld\n", latvmrun_max,
1240             latvmrun_min, vmrun_sum / LATENCY_RUNS);
1241     printf("    Latency VMEXIT: max: %ld min: %ld avg: %ld\n", latvmexit_max,
1242             latvmexit_min, vmexit_sum / LATENCY_RUNS);
1243     return true;
1244 }
1245 
1246 static void lat_svm_insn_prepare(struct test *test)
1247 {
1248     default_prepare(test);
1249     runs = LATENCY_RUNS;
1250     latvmload_min = latvmsave_min = latstgi_min = latclgi_min = -1ULL;
1251     latvmload_max = latvmsave_max = latstgi_max = latclgi_max = 0;
1252     vmload_sum = vmsave_sum = stgi_sum = clgi_sum;
1253 }
1254 
1255 static bool lat_svm_insn_finished(struct test *test)
1256 {
1257     u64 vmcb_phys = virt_to_phys(test->vmcb);
1258     u64 cycles;
1259 
1260     for ( ; runs != 0; runs--) {
1261         tsc_start = rdtsc();
1262         asm volatile("vmload\n\t" : : "a"(vmcb_phys) : "memory");
1263         cycles = rdtsc() - tsc_start;
1264         if (cycles > latvmload_max)
1265             latvmload_max = cycles;
1266         if (cycles < latvmload_min)
1267             latvmload_min = cycles;
1268         vmload_sum += cycles;
1269 
1270         tsc_start = rdtsc();
1271         asm volatile("vmsave\n\t" : : "a"(vmcb_phys) : "memory");
1272         cycles = rdtsc() - tsc_start;
1273         if (cycles > latvmsave_max)
1274             latvmsave_max = cycles;
1275         if (cycles < latvmsave_min)
1276             latvmsave_min = cycles;
1277         vmsave_sum += cycles;
1278 
1279         tsc_start = rdtsc();
1280         asm volatile("stgi\n\t");
1281         cycles = rdtsc() - tsc_start;
1282         if (cycles > latstgi_max)
1283             latstgi_max = cycles;
1284         if (cycles < latstgi_min)
1285             latstgi_min = cycles;
1286         stgi_sum += cycles;
1287 
1288         tsc_start = rdtsc();
1289         asm volatile("clgi\n\t");
1290         cycles = rdtsc() - tsc_start;
1291         if (cycles > latclgi_max)
1292             latclgi_max = cycles;
1293         if (cycles < latclgi_min)
1294             latclgi_min = cycles;
1295         clgi_sum += cycles;
1296     }
1297 
1298     return true;
1299 }
1300 
1301 static bool lat_svm_insn_check(struct test *test)
1302 {
1303     printf("    Latency VMLOAD: max: %ld min: %ld avg: %ld\n", latvmload_max,
1304             latvmload_min, vmload_sum / LATENCY_RUNS);
1305     printf("    Latency VMSAVE: max: %ld min: %ld avg: %ld\n", latvmsave_max,
1306             latvmsave_min, vmsave_sum / LATENCY_RUNS);
1307     printf("    Latency STGI:   max: %ld min: %ld avg: %ld\n", latstgi_max,
1308             latstgi_min, stgi_sum / LATENCY_RUNS);
1309     printf("    Latency CLGI:   max: %ld min: %ld avg: %ld\n", latclgi_max,
1310             latclgi_min, clgi_sum / LATENCY_RUNS);
1311     return true;
1312 }
1313 
1314 bool pending_event_ipi_fired;
1315 bool pending_event_guest_run;
1316 
1317 static void pending_event_ipi_isr(isr_regs_t *regs)
1318 {
1319     pending_event_ipi_fired = true;
1320     eoi();
1321 }
1322 
1323 static void pending_event_prepare(struct test *test)
1324 {
1325     int ipi_vector = 0xf1;
1326 
1327     default_prepare(test);
1328 
1329     pending_event_ipi_fired = false;
1330 
1331     handle_irq(ipi_vector, pending_event_ipi_isr);
1332 
1333     pending_event_guest_run = false;
1334 
1335     test->vmcb->control.intercept |= (1ULL << INTERCEPT_INTR);
1336     test->vmcb->control.int_ctl |= V_INTR_MASKING_MASK;
1337 
1338     apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL |
1339                   APIC_DM_FIXED | ipi_vector, 0);
1340 
1341     set_test_stage(test, 0);
1342 }
1343 
1344 static void pending_event_test(struct test *test)
1345 {
1346     pending_event_guest_run = true;
1347 }
1348 
1349 static bool pending_event_finished(struct test *test)
1350 {
1351     switch (get_test_stage(test)) {
1352     case 0:
1353         if (test->vmcb->control.exit_code != SVM_EXIT_INTR) {
1354             report(false, "VMEXIT not due to pending interrupt. Exit reason 0x%x",
1355                    test->vmcb->control.exit_code);
1356             return true;
1357         }
1358 
1359         test->vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR);
1360         test->vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
1361 
1362         if (pending_event_guest_run) {
1363             report(false, "Guest ran before host received IPI\n");
1364             return true;
1365         }
1366 
1367         irq_enable();
1368         asm volatile ("nop");
1369         irq_disable();
1370 
1371         if (!pending_event_ipi_fired) {
1372             report(false, "Pending interrupt not dispatched after IRQ enabled\n");
1373             return true;
1374         }
1375         break;
1376 
1377     case 1:
1378         if (!pending_event_guest_run) {
1379             report(false, "Guest did not resume when no interrupt\n");
1380             return true;
1381         }
1382         break;
1383     }
1384 
1385     inc_test_stage(test);
1386 
1387     return get_test_stage(test) == 2;
1388 }
1389 
1390 static bool pending_event_check(struct test *test)
1391 {
1392     return get_test_stage(test) == 2;
1393 }
1394 
1395 static void pending_event_prepare_vmask(struct test *test)
1396 {
1397     default_prepare(test);
1398 
1399     pending_event_ipi_fired = false;
1400 
1401     set_host_if = 0;
1402 
1403     handle_irq(0xf1, pending_event_ipi_isr);
1404 
1405     apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL |
1406               APIC_DM_FIXED | 0xf1, 0);
1407 
1408     set_test_stage(test, 0);
1409 }
1410 
1411 static void pending_event_test_vmask(struct test *test)
1412 {
1413     if (pending_event_ipi_fired == true) {
1414         set_test_stage(test, -1);
1415         report(false, "Interrupt preceeded guest");
1416         vmmcall();
1417     }
1418 
1419     irq_enable();
1420     asm volatile ("nop");
1421     irq_disable();
1422 
1423     if (pending_event_ipi_fired != true) {
1424         set_test_stage(test, -1);
1425         report(false, "Interrupt not triggered by guest");
1426     }
1427 
1428     vmmcall();
1429 
1430     irq_enable();
1431     asm volatile ("nop");
1432     irq_disable();
1433 }
1434 
1435 static bool pending_event_finished_vmask(struct test *test)
1436 {
1437     if ( test->vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1438         report(false, "VM_EXIT return to host is not EXIT_VMMCALL exit reason 0x%x",
1439                test->vmcb->control.exit_code);
1440         return true;
1441     }
1442 
1443     switch (get_test_stage(test)) {
1444     case 0:
1445         test->vmcb->save.rip += 3;
1446 
1447         pending_event_ipi_fired = false;
1448 
1449         test->vmcb->control.int_ctl |= V_INTR_MASKING_MASK;
1450 
1451         apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL |
1452               APIC_DM_FIXED | 0xf1, 0);
1453 
1454         break;
1455 
1456     case 1:
1457         if (pending_event_ipi_fired == true) {
1458             report(false, "Interrupt triggered by guest");
1459             return true;
1460         }
1461 
1462         irq_enable();
1463         asm volatile ("nop");
1464         irq_disable();
1465 
1466         if (pending_event_ipi_fired != true) {
1467             report(false, "Interrupt not triggered by host");
1468             return true;
1469         }
1470 
1471         break;
1472 
1473     default:
1474         return true;
1475     }
1476 
1477     inc_test_stage(test);
1478 
1479     return get_test_stage(test) == 2;
1480 }
1481 
1482 static bool pending_event_check_vmask(struct test *test)
1483 {
1484     return get_test_stage(test) == 2;
1485 }
1486 
1487 static struct test tests[] = {
1488     { "null", default_supported, default_prepare, null_test,
1489       default_finished, null_check },
1490     { "vmrun", default_supported, default_prepare, test_vmrun,
1491        default_finished, check_vmrun },
1492     { "ioio", default_supported, prepare_ioio, test_ioio,
1493        ioio_finished, check_ioio },
1494     { "vmrun intercept check", default_supported, prepare_no_vmrun_int,
1495       null_test, default_finished, check_no_vmrun_int },
1496     { "cr3 read intercept", default_supported, prepare_cr3_intercept,
1497       test_cr3_intercept, default_finished, check_cr3_intercept },
1498     { "cr3 read nointercept", default_supported, default_prepare,
1499       test_cr3_intercept, default_finished, check_cr3_nointercept },
1500     { "cr3 read intercept emulate", smp_supported,
1501       prepare_cr3_intercept_bypass, test_cr3_intercept_bypass,
1502       default_finished, check_cr3_intercept },
1503     { "dr intercept check", default_supported, prepare_dr_intercept,
1504       test_dr_intercept, dr_intercept_finished, check_dr_intercept },
1505     { "next_rip", next_rip_supported, prepare_next_rip, test_next_rip,
1506       default_finished, check_next_rip },
1507     { "msr intercept check", default_supported, prepare_msr_intercept,
1508        test_msr_intercept, msr_intercept_finished, check_msr_intercept },
1509     { "mode_switch", default_supported, prepare_mode_switch, test_mode_switch,
1510        mode_switch_finished, check_mode_switch },
1511     { "asid_zero", default_supported, prepare_asid_zero, test_asid_zero,
1512        default_finished, check_asid_zero },
1513     { "sel_cr0_bug", default_supported, sel_cr0_bug_prepare, sel_cr0_bug_test,
1514        sel_cr0_bug_finished, sel_cr0_bug_check },
1515     { "npt_nx", npt_supported, npt_nx_prepare, null_test,
1516 	    default_finished, npt_nx_check },
1517     { "npt_us", npt_supported, npt_us_prepare, npt_us_test,
1518 	    default_finished, npt_us_check },
1519     { "npt_rsvd", npt_supported, npt_rsvd_prepare, null_test,
1520 	    default_finished, npt_rsvd_check },
1521     { "npt_rw", npt_supported, npt_rw_prepare, npt_rw_test,
1522 	    default_finished, npt_rw_check },
1523     { "npt_rsvd_pfwalk", npt_supported, npt_rsvd_pfwalk_prepare, null_test,
1524 	    default_finished, npt_rsvd_pfwalk_check },
1525     { "npt_rw_pfwalk", npt_supported, npt_rw_pfwalk_prepare, null_test,
1526 	    default_finished, npt_rw_pfwalk_check },
1527     { "npt_l1mmio", npt_supported, npt_l1mmio_prepare, npt_l1mmio_test,
1528 	    default_finished, npt_l1mmio_check },
1529     { "npt_rw_l1mmio", npt_supported, npt_rw_l1mmio_prepare, npt_rw_l1mmio_test,
1530 	    default_finished, npt_rw_l1mmio_check },
1531     { "tsc_adjust", default_supported, tsc_adjust_prepare, tsc_adjust_test,
1532        default_finished, tsc_adjust_check },
1533     { "latency_run_exit", default_supported, latency_prepare, latency_test,
1534       latency_finished, latency_check },
1535     { "latency_svm_insn", default_supported, lat_svm_insn_prepare, null_test,
1536       lat_svm_insn_finished, lat_svm_insn_check },
1537     { "pending_event", default_supported, pending_event_prepare,
1538       pending_event_test, pending_event_finished, pending_event_check },
1539     { "pending_event_vmask", default_supported, pending_event_prepare_vmask,
1540       pending_event_test_vmask, pending_event_finished_vmask,
1541       pending_event_check_vmask },
1542 };
1543 
1544 int main(int ac, char **av)
1545 {
1546     int i, nr;
1547     struct vmcb *vmcb;
1548 
1549     setup_vm();
1550     smp_init();
1551 
1552     if (!this_cpu_has(X86_FEATURE_SVM)) {
1553         printf("SVM not availble\n");
1554         return report_summary();
1555     }
1556 
1557     setup_svm();
1558 
1559     vmcb = alloc_page();
1560 
1561     nr = ARRAY_SIZE(tests);
1562     for (i = 0; i < nr; ++i) {
1563         if (!tests[i].supported())
1564             continue;
1565         test_run(&tests[i], vmcb);
1566     }
1567 
1568     return report_summary();
1569 }
1570