xref: /kvm-unit-tests/x86/svm.c (revision 2b934609604d16126499c8acef2808d84990968f)
1 #include "svm.h"
2 #include "libcflat.h"
3 #include "processor.h"
4 #include "desc.h"
5 #include "msr.h"
6 #include "vm.h"
7 #include "smp.h"
8 #include "types.h"
9 #include "alloc_page.h"
10 #include "isr.h"
11 #include "apic.h"
12 
13 #define SVM_EXIT_MAX_DR_INTERCEPT 0x3f
14 
15 /* for the nested page table*/
16 u64 *pml4e;
17 u64 *pdpe;
18 u64 *pde[4];
19 u64 *pte[2048];
20 void *scratch_page;
21 
22 #define LATENCY_RUNS 1000000
23 
24 u64 tsc_start;
25 u64 tsc_end;
26 
27 u64 vmrun_sum, vmexit_sum;
28 u64 vmsave_sum, vmload_sum;
29 u64 stgi_sum, clgi_sum;
30 u64 latvmrun_max;
31 u64 latvmrun_min;
32 u64 latvmexit_max;
33 u64 latvmexit_min;
34 u64 latvmload_max;
35 u64 latvmload_min;
36 u64 latvmsave_max;
37 u64 latvmsave_min;
38 u64 latstgi_max;
39 u64 latstgi_min;
40 u64 latclgi_max;
41 u64 latclgi_min;
42 u64 runs;
43 
44 u8 *io_bitmap;
45 u8 io_bitmap_area[16384];
46 
47 #define MSR_BITMAP_SIZE 8192
48 
49 u8 *msr_bitmap;
50 u8 msr_bitmap_area[MSR_BITMAP_SIZE + PAGE_SIZE];
51 
52 static bool npt_supported(void)
53 {
54 	return this_cpu_has(X86_FEATURE_NPT);
55 }
56 
57 static void setup_svm(void)
58 {
59     void *hsave = alloc_page();
60     u64 *page, address;
61     int i,j;
62 
63     wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
64     wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME);
65     wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NX);
66 
67     scratch_page = alloc_page();
68 
69     io_bitmap = (void *) (((ulong)io_bitmap_area + 4095) & ~4095);
70 
71     msr_bitmap = (void *) ALIGN((ulong)msr_bitmap_area, PAGE_SIZE);
72 
73     if (!npt_supported())
74         return;
75 
76     printf("NPT detected - running all tests with NPT enabled\n");
77 
78     /*
79      * Nested paging supported - Build a nested page table
80      * Build the page-table bottom-up and map everything with 4k pages
81      * to get enough granularity for the NPT unit-tests.
82      */
83 
84     address = 0;
85 
86     /* PTE level */
87     for (i = 0; i < 2048; ++i) {
88         page = alloc_page();
89 
90         for (j = 0; j < 512; ++j, address += 4096)
91             page[j] = address | 0x067ULL;
92 
93         pte[i] = page;
94     }
95 
96     /* PDE level */
97     for (i = 0; i < 4; ++i) {
98         page = alloc_page();
99 
100         for (j = 0; j < 512; ++j)
101             page[j] = (u64)pte[(i * 512) + j] | 0x027ULL;
102 
103         pde[i] = page;
104     }
105 
106     /* PDPe level */
107     pdpe   = alloc_page();
108     for (i = 0; i < 4; ++i)
109        pdpe[i] = ((u64)(pde[i])) | 0x27;
110 
111     /* PML4e level */
112     pml4e    = alloc_page();
113     pml4e[0] = ((u64)pdpe) | 0x27;
114 }
115 
116 static u64 *npt_get_pde(u64 address)
117 {
118     int i1, i2;
119 
120     address >>= 21;
121     i1 = (address >> 9) & 0x3;
122     i2 = address & 0x1ff;
123 
124     return &pde[i1][i2];
125 }
126 
127 static u64 *npt_get_pte(u64 address)
128 {
129     int i1, i2;
130 
131     address >>= 12;
132     i1 = (address >> 9) & 0x7ff;
133     i2 = address & 0x1ff;
134 
135     return &pte[i1][i2];
136 }
137 
138 static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector,
139                          u64 base, u32 limit, u32 attr)
140 {
141     seg->selector = selector;
142     seg->attrib = attr;
143     seg->limit = limit;
144     seg->base = base;
145 }
146 
147 static void vmcb_ident(struct vmcb *vmcb)
148 {
149     u64 vmcb_phys = virt_to_phys(vmcb);
150     struct vmcb_save_area *save = &vmcb->save;
151     struct vmcb_control_area *ctrl = &vmcb->control;
152     u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
153         | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK;
154     u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
155         | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK;
156     struct descriptor_table_ptr desc_table_ptr;
157 
158     memset(vmcb, 0, sizeof(*vmcb));
159     asm volatile ("vmsave %0" : : "a"(vmcb_phys) : "memory");
160     vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr);
161     vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr);
162     vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr);
163     vmcb_set_seg(&save->ds, read_ds(), 0, -1U, data_seg_attr);
164     sgdt(&desc_table_ptr);
165     vmcb_set_seg(&save->gdtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
166     sidt(&desc_table_ptr);
167     vmcb_set_seg(&save->idtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
168     ctrl->asid = 1;
169     save->cpl = 0;
170     save->efer = rdmsr(MSR_EFER);
171     save->cr4 = read_cr4();
172     save->cr3 = read_cr3();
173     save->cr0 = read_cr0();
174     save->dr7 = read_dr7();
175     save->dr6 = read_dr6();
176     save->cr2 = read_cr2();
177     save->g_pat = rdmsr(MSR_IA32_CR_PAT);
178     save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
179     ctrl->intercept = (1ULL << INTERCEPT_VMRUN) | (1ULL << INTERCEPT_VMMCALL);
180     ctrl->iopm_base_pa = virt_to_phys(io_bitmap);
181     ctrl->msrpm_base_pa = virt_to_phys(msr_bitmap);
182 
183     if (npt_supported()) {
184         ctrl->nested_ctl = 1;
185         ctrl->nested_cr3 = (u64)pml4e;
186     }
187 }
188 
189 struct test {
190     const char *name;
191     bool (*supported)(void);
192     void (*prepare)(struct test *test);
193     void (*prepare_gif_clear)(struct test *test);
194     void (*guest_func)(struct test *test);
195     bool (*finished)(struct test *test);
196     bool (*succeeded)(struct test *test);
197     struct vmcb *vmcb;
198     int exits;
199     ulong scratch;
200 };
201 
202 static inline void vmmcall(void)
203 {
204     asm volatile ("vmmcall" : : : "memory");
205 }
206 
207 static void test_thunk(struct test *test)
208 {
209     test->guest_func(test);
210     vmmcall();
211 }
212 
213 struct regs {
214         u64 rax;
215         u64 rbx;
216         u64 rcx;
217         u64 rdx;
218         u64 cr2;
219         u64 rbp;
220         u64 rsi;
221         u64 rdi;
222         u64 r8;
223         u64 r9;
224         u64 r10;
225         u64 r11;
226         u64 r12;
227         u64 r13;
228         u64 r14;
229         u64 r15;
230         u64 rflags;
231 };
232 
233 struct regs regs;
234 
235 // rax handled specially below
236 
237 #define SAVE_GPR_C                              \
238         "xchg %%rbx, regs+0x8\n\t"              \
239         "xchg %%rcx, regs+0x10\n\t"             \
240         "xchg %%rdx, regs+0x18\n\t"             \
241         "xchg %%rbp, regs+0x28\n\t"             \
242         "xchg %%rsi, regs+0x30\n\t"             \
243         "xchg %%rdi, regs+0x38\n\t"             \
244         "xchg %%r8, regs+0x40\n\t"              \
245         "xchg %%r9, regs+0x48\n\t"              \
246         "xchg %%r10, regs+0x50\n\t"             \
247         "xchg %%r11, regs+0x58\n\t"             \
248         "xchg %%r12, regs+0x60\n\t"             \
249         "xchg %%r13, regs+0x68\n\t"             \
250         "xchg %%r14, regs+0x70\n\t"             \
251         "xchg %%r15, regs+0x78\n\t"
252 
253 #define LOAD_GPR_C      SAVE_GPR_C
254 
255 static void test_run(struct test *test, struct vmcb *vmcb)
256 {
257     u64 vmcb_phys = virt_to_phys(vmcb);
258     u64 guest_stack[10000];
259 
260     irq_disable();
261     test->vmcb = vmcb;
262     test->prepare(test);
263     vmcb->save.rip = (ulong)test_thunk;
264     vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack));
265     regs.rdi = (ulong)test;
266     do {
267         struct test *the_test = test;
268         u64 the_vmcb = vmcb_phys;
269         tsc_start = rdtsc();
270         asm volatile (
271             "clgi;\n\t" // semi-colon needed for LLVM compatibility
272             "sti \n\t"
273             "call *%c[PREPARE_GIF_CLEAR](%[test]) \n \t"
274             "mov %[vmcb_phys], %%rax \n\t"
275             "vmload %%rax\n\t"
276             "mov regs+0x80, %%r15\n\t"  // rflags
277             "mov %%r15, 0x170(%%rax)\n\t"
278             "mov regs, %%r15\n\t"       // rax
279             "mov %%r15, 0x1f8(%%rax)\n\t"
280             LOAD_GPR_C
281             "vmrun %%rax\n\t"
282             SAVE_GPR_C
283             "mov 0x170(%%rax), %%r15\n\t"  // rflags
284             "mov %%r15, regs+0x80\n\t"
285             "mov 0x1f8(%%rax), %%r15\n\t"  // rax
286             "mov %%r15, regs\n\t"
287             "vmsave %%rax\n\t"
288             "cli \n\t"
289             "stgi"
290             : // inputs clobbered by the guest:
291 	      "=D" (the_test),            // first argument register
292 	      "=b" (the_vmcb)             // callee save register!
293             : [test] "0" (the_test),
294 	      [vmcb_phys] "1"(the_vmcb),
295 	      [PREPARE_GIF_CLEAR] "i" (offsetof(struct test, prepare_gif_clear))
296             : "rax", "rcx", "rdx", "rsi",
297               "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15",
298               "memory");
299 	tsc_end = rdtsc();
300         ++test->exits;
301     } while (!test->finished(test));
302     irq_enable();
303 
304     report(test->succeeded(test), "%s", test->name);
305 }
306 
307 static bool smp_supported(void)
308 {
309 	return cpu_count() > 1;
310 }
311 
312 static bool default_supported(void)
313 {
314     return true;
315 }
316 
317 static void default_prepare(struct test *test)
318 {
319     vmcb_ident(test->vmcb);
320 }
321 
322 static void default_prepare_gif_clear(struct test *test)
323 {
324 }
325 
326 static bool default_finished(struct test *test)
327 {
328     return true; /* one vmexit */
329 }
330 
331 static void null_test(struct test *test)
332 {
333 }
334 
335 static bool null_check(struct test *test)
336 {
337     return test->vmcb->control.exit_code == SVM_EXIT_VMMCALL;
338 }
339 
340 static void prepare_no_vmrun_int(struct test *test)
341 {
342     test->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMRUN);
343 }
344 
345 static bool check_no_vmrun_int(struct test *test)
346 {
347     return test->vmcb->control.exit_code == SVM_EXIT_ERR;
348 }
349 
350 static void test_vmrun(struct test *test)
351 {
352     asm volatile ("vmrun %0" : : "a"(virt_to_phys(test->vmcb)));
353 }
354 
355 static bool check_vmrun(struct test *test)
356 {
357     return test->vmcb->control.exit_code == SVM_EXIT_VMRUN;
358 }
359 
360 static void prepare_cr3_intercept(struct test *test)
361 {
362     default_prepare(test);
363     test->vmcb->control.intercept_cr_read |= 1 << 3;
364 }
365 
366 static void test_cr3_intercept(struct test *test)
367 {
368     asm volatile ("mov %%cr3, %0" : "=r"(test->scratch) : : "memory");
369 }
370 
371 static bool check_cr3_intercept(struct test *test)
372 {
373     return test->vmcb->control.exit_code == SVM_EXIT_READ_CR3;
374 }
375 
376 static bool check_cr3_nointercept(struct test *test)
377 {
378     return null_check(test) && test->scratch == read_cr3();
379 }
380 
381 static void corrupt_cr3_intercept_bypass(void *_test)
382 {
383     struct test *test = _test;
384     extern volatile u32 mmio_insn;
385 
386     while (!__sync_bool_compare_and_swap(&test->scratch, 1, 2))
387         pause();
388     pause();
389     pause();
390     pause();
391     mmio_insn = 0x90d8200f;  // mov %cr3, %rax; nop
392 }
393 
394 static void prepare_cr3_intercept_bypass(struct test *test)
395 {
396     default_prepare(test);
397     test->vmcb->control.intercept_cr_read |= 1 << 3;
398     on_cpu_async(1, corrupt_cr3_intercept_bypass, test);
399 }
400 
401 static void test_cr3_intercept_bypass(struct test *test)
402 {
403     ulong a = 0xa0000;
404 
405     test->scratch = 1;
406     while (test->scratch != 2)
407         barrier();
408 
409     asm volatile ("mmio_insn: mov %0, (%0); nop"
410                   : "+a"(a) : : "memory");
411     test->scratch = a;
412 }
413 
414 static void prepare_dr_intercept(struct test *test)
415 {
416     default_prepare(test);
417     test->vmcb->control.intercept_dr_read = 0xff;
418     test->vmcb->control.intercept_dr_write = 0xff;
419 }
420 
421 static void test_dr_intercept(struct test *test)
422 {
423     unsigned int i, failcnt = 0;
424 
425     /* Loop testing debug register reads */
426     for (i = 0; i < 8; i++) {
427 
428         switch (i) {
429         case 0:
430             asm volatile ("mov %%dr0, %0" : "=r"(test->scratch) : : "memory");
431             break;
432         case 1:
433             asm volatile ("mov %%dr1, %0" : "=r"(test->scratch) : : "memory");
434             break;
435         case 2:
436             asm volatile ("mov %%dr2, %0" : "=r"(test->scratch) : : "memory");
437             break;
438         case 3:
439             asm volatile ("mov %%dr3, %0" : "=r"(test->scratch) : : "memory");
440             break;
441         case 4:
442             asm volatile ("mov %%dr4, %0" : "=r"(test->scratch) : : "memory");
443             break;
444         case 5:
445             asm volatile ("mov %%dr5, %0" : "=r"(test->scratch) : : "memory");
446             break;
447         case 6:
448             asm volatile ("mov %%dr6, %0" : "=r"(test->scratch) : : "memory");
449             break;
450         case 7:
451             asm volatile ("mov %%dr7, %0" : "=r"(test->scratch) : : "memory");
452             break;
453         }
454 
455         if (test->scratch != i) {
456             report(false, "dr%u read intercept", i);
457             failcnt++;
458         }
459     }
460 
461     /* Loop testing debug register writes */
462     for (i = 0; i < 8; i++) {
463 
464         switch (i) {
465         case 0:
466             asm volatile ("mov %0, %%dr0" : : "r"(test->scratch) : "memory");
467             break;
468         case 1:
469             asm volatile ("mov %0, %%dr1" : : "r"(test->scratch) : "memory");
470             break;
471         case 2:
472             asm volatile ("mov %0, %%dr2" : : "r"(test->scratch) : "memory");
473             break;
474         case 3:
475             asm volatile ("mov %0, %%dr3" : : "r"(test->scratch) : "memory");
476             break;
477         case 4:
478             asm volatile ("mov %0, %%dr4" : : "r"(test->scratch) : "memory");
479             break;
480         case 5:
481             asm volatile ("mov %0, %%dr5" : : "r"(test->scratch) : "memory");
482             break;
483         case 6:
484             asm volatile ("mov %0, %%dr6" : : "r"(test->scratch) : "memory");
485             break;
486         case 7:
487             asm volatile ("mov %0, %%dr7" : : "r"(test->scratch) : "memory");
488             break;
489         }
490 
491         if (test->scratch != i) {
492             report(false, "dr%u write intercept", i);
493             failcnt++;
494         }
495     }
496 
497     test->scratch = failcnt;
498 }
499 
500 static bool dr_intercept_finished(struct test *test)
501 {
502     ulong n = (test->vmcb->control.exit_code - SVM_EXIT_READ_DR0);
503 
504     /* Only expect DR intercepts */
505     if (n > (SVM_EXIT_MAX_DR_INTERCEPT - SVM_EXIT_READ_DR0))
506         return true;
507 
508     /*
509      * Compute debug register number.
510      * Per Appendix C "SVM Intercept Exit Codes" of AMD64 Architecture
511      * Programmer's Manual Volume 2 - System Programming:
512      * http://support.amd.com/TechDocs/24593.pdf
513      * there are 16 VMEXIT codes each for DR read and write.
514      */
515     test->scratch = (n % 16);
516 
517     /* Jump over MOV instruction */
518     test->vmcb->save.rip += 3;
519 
520     return false;
521 }
522 
523 static bool check_dr_intercept(struct test *test)
524 {
525     return !test->scratch;
526 }
527 
528 static bool next_rip_supported(void)
529 {
530     return this_cpu_has(X86_FEATURE_NRIPS);
531 }
532 
533 static void prepare_next_rip(struct test *test)
534 {
535     test->vmcb->control.intercept |= (1ULL << INTERCEPT_RDTSC);
536 }
537 
538 
539 static void test_next_rip(struct test *test)
540 {
541     asm volatile ("rdtsc\n\t"
542                   ".globl exp_next_rip\n\t"
543                   "exp_next_rip:\n\t" ::: "eax", "edx");
544 }
545 
546 static bool check_next_rip(struct test *test)
547 {
548     extern char exp_next_rip;
549     unsigned long address = (unsigned long)&exp_next_rip;
550 
551     return address == test->vmcb->control.next_rip;
552 }
553 
554 static void prepare_msr_intercept(struct test *test)
555 {
556     default_prepare(test);
557     test->vmcb->control.intercept |= (1ULL << INTERCEPT_MSR_PROT);
558     test->vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR);
559     memset(msr_bitmap, 0xff, MSR_BITMAP_SIZE);
560 }
561 
562 static void test_msr_intercept(struct test *test)
563 {
564     unsigned long msr_value = 0xef8056791234abcd; /* Arbitrary value */
565     unsigned long msr_index;
566 
567     for (msr_index = 0; msr_index <= 0xc0011fff; msr_index++) {
568         if (msr_index == 0xC0010131 /* MSR_SEV_STATUS */) {
569             /*
570              * Per section 15.34.10 "SEV_STATUS MSR" of AMD64 Architecture
571              * Programmer's Manual volume 2 - System Programming:
572              * http://support.amd.com/TechDocs/24593.pdf
573              * SEV_STATUS MSR (C001_0131) is a non-interceptable MSR.
574              */
575             continue;
576         }
577 
578         /* Skips gaps between supported MSR ranges */
579         if (msr_index == 0x2000)
580             msr_index = 0xc0000000;
581         else if (msr_index == 0xc0002000)
582             msr_index = 0xc0010000;
583 
584         test->scratch = -1;
585 
586         rdmsr(msr_index);
587 
588         /* Check that a read intercept occurred for MSR at msr_index */
589         if (test->scratch != msr_index)
590             report(false, "MSR 0x%lx read intercept", msr_index);
591 
592         /*
593          * Poor man approach to generate a value that
594          * seems arbitrary each time around the loop.
595          */
596         msr_value += (msr_value << 1);
597 
598         wrmsr(msr_index, msr_value);
599 
600         /* Check that a write intercept occurred for MSR with msr_value */
601         if (test->scratch != msr_value)
602             report(false, "MSR 0x%lx write intercept", msr_index);
603     }
604 
605     test->scratch = -2;
606 }
607 
608 static bool msr_intercept_finished(struct test *test)
609 {
610     u32 exit_code = test->vmcb->control.exit_code;
611     u64 exit_info_1;
612     u8 *opcode;
613 
614     if (exit_code == SVM_EXIT_MSR) {
615         exit_info_1 = test->vmcb->control.exit_info_1;
616     } else {
617         /*
618          * If #GP exception occurs instead, check that it was
619          * for RDMSR/WRMSR and set exit_info_1 accordingly.
620          */
621 
622         if (exit_code != (SVM_EXIT_EXCP_BASE + GP_VECTOR))
623             return true;
624 
625         opcode = (u8 *)test->vmcb->save.rip;
626         if (opcode[0] != 0x0f)
627             return true;
628 
629         switch (opcode[1]) {
630         case 0x30: /* WRMSR */
631             exit_info_1 = 1;
632             break;
633         case 0x32: /* RDMSR */
634             exit_info_1 = 0;
635             break;
636         default:
637             return true;
638         }
639 
640         /*
641          * Warn that #GP exception occured instead.
642          * RCX holds the MSR index.
643          */
644         printf("%s 0x%lx #GP exception\n",
645             exit_info_1 ? "WRMSR" : "RDMSR", regs.rcx);
646     }
647 
648     /* Jump over RDMSR/WRMSR instruction */
649     test->vmcb->save.rip += 2;
650 
651     /*
652      * Test whether the intercept was for RDMSR/WRMSR.
653      * For RDMSR, test->scratch is set to the MSR index;
654      *      RCX holds the MSR index.
655      * For WRMSR, test->scratch is set to the MSR value;
656      *      RDX holds the upper 32 bits of the MSR value,
657      *      while RAX hold its lower 32 bits.
658      */
659     if (exit_info_1)
660         test->scratch =
661             ((regs.rdx << 32) | (test->vmcb->save.rax & 0xffffffff));
662     else
663         test->scratch = regs.rcx;
664 
665     return false;
666 }
667 
668 static bool check_msr_intercept(struct test *test)
669 {
670     memset(msr_bitmap, 0, MSR_BITMAP_SIZE);
671     return (test->scratch == -2);
672 }
673 
674 static void prepare_mode_switch(struct test *test)
675 {
676     test->vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR)
677                                              |  (1ULL << UD_VECTOR)
678                                              |  (1ULL << DF_VECTOR)
679                                              |  (1ULL << PF_VECTOR);
680     test->scratch = 0;
681 }
682 
683 static void test_mode_switch(struct test *test)
684 {
685     asm volatile("	cli\n"
686 		 "	ljmp *1f\n" /* jump to 32-bit code segment */
687 		 "1:\n"
688 		 "	.long 2f\n"
689 		 "	.long " xstr(KERNEL_CS32) "\n"
690 		 ".code32\n"
691 		 "2:\n"
692 		 "	movl %%cr0, %%eax\n"
693 		 "	btcl  $31, %%eax\n" /* clear PG */
694 		 "	movl %%eax, %%cr0\n"
695 		 "	movl $0xc0000080, %%ecx\n" /* EFER */
696 		 "	rdmsr\n"
697 		 "	btcl $8, %%eax\n" /* clear LME */
698 		 "	wrmsr\n"
699 		 "	movl %%cr4, %%eax\n"
700 		 "	btcl $5, %%eax\n" /* clear PAE */
701 		 "	movl %%eax, %%cr4\n"
702 		 "	movw %[ds16], %%ax\n"
703 		 "	movw %%ax, %%ds\n"
704 		 "	ljmpl %[cs16], $3f\n" /* jump to 16 bit protected-mode */
705 		 ".code16\n"
706 		 "3:\n"
707 		 "	movl %%cr0, %%eax\n"
708 		 "	btcl $0, %%eax\n" /* clear PE  */
709 		 "	movl %%eax, %%cr0\n"
710 		 "	ljmpl $0, $4f\n"   /* jump to real-mode */
711 		 "4:\n"
712 		 "	vmmcall\n"
713 		 "	movl %%cr0, %%eax\n"
714 		 "	btsl $0, %%eax\n" /* set PE  */
715 		 "	movl %%eax, %%cr0\n"
716 		 "	ljmpl %[cs32], $5f\n" /* back to protected mode */
717 		 ".code32\n"
718 		 "5:\n"
719 		 "	movl %%cr4, %%eax\n"
720 		 "	btsl $5, %%eax\n" /* set PAE */
721 		 "	movl %%eax, %%cr4\n"
722 		 "	movl $0xc0000080, %%ecx\n" /* EFER */
723 		 "	rdmsr\n"
724 		 "	btsl $8, %%eax\n" /* set LME */
725 		 "	wrmsr\n"
726 		 "	movl %%cr0, %%eax\n"
727 		 "	btsl  $31, %%eax\n" /* set PG */
728 		 "	movl %%eax, %%cr0\n"
729 		 "	ljmpl %[cs64], $6f\n"    /* back to long mode */
730 		 ".code64\n\t"
731 		 "6:\n"
732 		 "	vmmcall\n"
733 		 :: [cs16] "i"(KERNEL_CS16), [ds16] "i"(KERNEL_DS16),
734 		    [cs32] "i"(KERNEL_CS32), [cs64] "i"(KERNEL_CS64)
735 		 : "rax", "rbx", "rcx", "rdx", "memory");
736 }
737 
738 static bool mode_switch_finished(struct test *test)
739 {
740     u64 cr0, cr4, efer;
741 
742     cr0  = test->vmcb->save.cr0;
743     cr4  = test->vmcb->save.cr4;
744     efer = test->vmcb->save.efer;
745 
746     /* Only expect VMMCALL intercepts */
747     if (test->vmcb->control.exit_code != SVM_EXIT_VMMCALL)
748 	    return true;
749 
750     /* Jump over VMMCALL instruction */
751     test->vmcb->save.rip += 3;
752 
753     /* Do sanity checks */
754     switch (test->scratch) {
755     case 0:
756         /* Test should be in real mode now - check for this */
757         if ((cr0  & 0x80000001) || /* CR0.PG, CR0.PE */
758             (cr4  & 0x00000020) || /* CR4.PAE */
759             (efer & 0x00000500))   /* EFER.LMA, EFER.LME */
760                 return true;
761         break;
762     case 2:
763         /* Test should be back in long-mode now - check for this */
764         if (((cr0  & 0x80000001) != 0x80000001) || /* CR0.PG, CR0.PE */
765             ((cr4  & 0x00000020) != 0x00000020) || /* CR4.PAE */
766             ((efer & 0x00000500) != 0x00000500))   /* EFER.LMA, EFER.LME */
767 		    return true;
768 	break;
769     }
770 
771     /* one step forward */
772     test->scratch += 1;
773 
774     return test->scratch == 2;
775 }
776 
777 static bool check_mode_switch(struct test *test)
778 {
779 	return test->scratch == 2;
780 }
781 
782 static void prepare_ioio(struct test *test)
783 {
784     test->vmcb->control.intercept |= (1ULL << INTERCEPT_IOIO_PROT);
785     test->scratch = 0;
786     memset(io_bitmap, 0, 8192);
787     io_bitmap[8192] = 0xFF;
788 }
789 
790 static int get_test_stage(struct test *test)
791 {
792     barrier();
793     return test->scratch;
794 }
795 
796 static void set_test_stage(struct test *test, int s)
797 {
798     barrier();
799     test->scratch = s;
800     barrier();
801 }
802 
803 static void inc_test_stage(struct test *test)
804 {
805     barrier();
806     test->scratch++;
807     barrier();
808 }
809 
810 static void test_ioio(struct test *test)
811 {
812     // stage 0, test IO pass
813     inb(0x5000);
814     outb(0x0, 0x5000);
815     if (get_test_stage(test) != 0)
816         goto fail;
817 
818     // test IO width, in/out
819     io_bitmap[0] = 0xFF;
820     inc_test_stage(test);
821     inb(0x0);
822     if (get_test_stage(test) != 2)
823         goto fail;
824 
825     outw(0x0, 0x0);
826     if (get_test_stage(test) != 3)
827         goto fail;
828 
829     inl(0x0);
830     if (get_test_stage(test) != 4)
831         goto fail;
832 
833     // test low/high IO port
834     io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8));
835     inb(0x5000);
836     if (get_test_stage(test) != 5)
837         goto fail;
838 
839     io_bitmap[0x9000 / 8] = (1 << (0x9000 % 8));
840     inw(0x9000);
841     if (get_test_stage(test) != 6)
842         goto fail;
843 
844     // test partial pass
845     io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8));
846     inl(0x4FFF);
847     if (get_test_stage(test) != 7)
848         goto fail;
849 
850     // test across pages
851     inc_test_stage(test);
852     inl(0x7FFF);
853     if (get_test_stage(test) != 8)
854         goto fail;
855 
856     inc_test_stage(test);
857     io_bitmap[0x8000 / 8] = 1 << (0x8000 % 8);
858     inl(0x7FFF);
859     if (get_test_stage(test) != 10)
860         goto fail;
861 
862     io_bitmap[0] = 0;
863     inl(0xFFFF);
864     if (get_test_stage(test) != 11)
865         goto fail;
866 
867     io_bitmap[0] = 0xFF;
868     io_bitmap[8192] = 0;
869     inl(0xFFFF);
870     inc_test_stage(test);
871     if (get_test_stage(test) != 12)
872         goto fail;
873 
874     return;
875 
876 fail:
877     report(false, "stage %d", get_test_stage(test));
878     test->scratch = -1;
879 }
880 
881 static bool ioio_finished(struct test *test)
882 {
883     unsigned port, size;
884 
885     /* Only expect IOIO intercepts */
886     if (test->vmcb->control.exit_code == SVM_EXIT_VMMCALL)
887         return true;
888 
889     if (test->vmcb->control.exit_code != SVM_EXIT_IOIO)
890         return true;
891 
892     /* one step forward */
893     test->scratch += 1;
894 
895     port = test->vmcb->control.exit_info_1 >> 16;
896     size = (test->vmcb->control.exit_info_1 >> SVM_IOIO_SIZE_SHIFT) & 7;
897 
898     while (size--) {
899         io_bitmap[port / 8] &= ~(1 << (port & 7));
900         port++;
901     }
902 
903     return false;
904 }
905 
906 static bool check_ioio(struct test *test)
907 {
908     memset(io_bitmap, 0, 8193);
909     return test->scratch != -1;
910 }
911 
912 static void prepare_asid_zero(struct test *test)
913 {
914     test->vmcb->control.asid = 0;
915 }
916 
917 static void test_asid_zero(struct test *test)
918 {
919     asm volatile ("vmmcall\n\t");
920 }
921 
922 static bool check_asid_zero(struct test *test)
923 {
924     return test->vmcb->control.exit_code == SVM_EXIT_ERR;
925 }
926 
927 static void sel_cr0_bug_prepare(struct test *test)
928 {
929     vmcb_ident(test->vmcb);
930     test->vmcb->control.intercept |= (1ULL << INTERCEPT_SELECTIVE_CR0);
931 }
932 
933 static bool sel_cr0_bug_finished(struct test *test)
934 {
935 	return true;
936 }
937 
938 static void sel_cr0_bug_test(struct test *test)
939 {
940     unsigned long cr0;
941 
942     /* read cr0, clear CD, and write back */
943     cr0  = read_cr0();
944     cr0 |= (1UL << 30);
945     write_cr0(cr0);
946 
947     /*
948      * If we are here the test failed, not sure what to do now because we
949      * are not in guest-mode anymore so we can't trigger an intercept.
950      * Trigger a tripple-fault for now.
951      */
952     report(false, "sel_cr0 test. Can not recover from this - exiting");
953     exit(report_summary());
954 }
955 
956 static bool sel_cr0_bug_check(struct test *test)
957 {
958     return test->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE;
959 }
960 
961 static void npt_nx_prepare(struct test *test)
962 {
963 
964     u64 *pte;
965 
966     vmcb_ident(test->vmcb);
967     pte = npt_get_pte((u64)null_test);
968 
969     *pte |= (1ULL << 63);
970 }
971 
972 static bool npt_nx_check(struct test *test)
973 {
974     u64 *pte = npt_get_pte((u64)null_test);
975 
976     *pte &= ~(1ULL << 63);
977 
978     test->vmcb->save.efer |= (1 << 11);
979 
980     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
981            && (test->vmcb->control.exit_info_1 == 0x100000015ULL);
982 }
983 
984 static void npt_us_prepare(struct test *test)
985 {
986     u64 *pte;
987 
988     vmcb_ident(test->vmcb);
989     pte = npt_get_pte((u64)scratch_page);
990 
991     *pte &= ~(1ULL << 2);
992 }
993 
994 static void npt_us_test(struct test *test)
995 {
996     (void) *(volatile u64 *)scratch_page;
997 }
998 
999 static bool npt_us_check(struct test *test)
1000 {
1001     u64 *pte = npt_get_pte((u64)scratch_page);
1002 
1003     *pte |= (1ULL << 2);
1004 
1005     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
1006            && (test->vmcb->control.exit_info_1 == 0x100000005ULL);
1007 }
1008 
1009 u64 save_pde;
1010 
1011 static void npt_rsvd_prepare(struct test *test)
1012 {
1013     u64 *pde;
1014 
1015     vmcb_ident(test->vmcb);
1016     pde = npt_get_pde((u64) null_test);
1017 
1018     save_pde = *pde;
1019     *pde = (1ULL << 19) | (1ULL << 7) | 0x27;
1020 }
1021 
1022 static bool npt_rsvd_check(struct test *test)
1023 {
1024     u64 *pde = npt_get_pde((u64) null_test);
1025 
1026     *pde = save_pde;
1027 
1028     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
1029             && (test->vmcb->control.exit_info_1 == 0x10000001dULL);
1030 }
1031 
1032 static void npt_rw_prepare(struct test *test)
1033 {
1034 
1035     u64 *pte;
1036 
1037     vmcb_ident(test->vmcb);
1038     pte = npt_get_pte(0x80000);
1039 
1040     *pte &= ~(1ULL << 1);
1041 }
1042 
1043 static void npt_rw_test(struct test *test)
1044 {
1045     u64 *data = (void*)(0x80000);
1046 
1047     *data = 0;
1048 }
1049 
1050 static bool npt_rw_check(struct test *test)
1051 {
1052     u64 *pte = npt_get_pte(0x80000);
1053 
1054     *pte |= (1ULL << 1);
1055 
1056     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
1057            && (test->vmcb->control.exit_info_1 == 0x100000007ULL);
1058 }
1059 
1060 static void npt_rw_pfwalk_prepare(struct test *test)
1061 {
1062 
1063     u64 *pte;
1064 
1065     vmcb_ident(test->vmcb);
1066     pte = npt_get_pte(read_cr3());
1067 
1068     *pte &= ~(1ULL << 1);
1069 }
1070 
1071 static bool npt_rw_pfwalk_check(struct test *test)
1072 {
1073     u64 *pte = npt_get_pte(read_cr3());
1074 
1075     *pte |= (1ULL << 1);
1076 
1077     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
1078            && (test->vmcb->control.exit_info_1 == 0x200000006ULL)
1079 	   && (test->vmcb->control.exit_info_2 == read_cr3());
1080 }
1081 
1082 static void npt_rsvd_pfwalk_prepare(struct test *test)
1083 {
1084 
1085     vmcb_ident(test->vmcb);
1086 
1087     pdpe[0] |= (1ULL << 8);
1088 }
1089 
1090 static bool npt_rsvd_pfwalk_check(struct test *test)
1091 {
1092     pdpe[0] &= ~(1ULL << 8);
1093 
1094     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
1095             && (test->vmcb->control.exit_info_1 == 0x20000000eULL);
1096 }
1097 
1098 static void npt_l1mmio_prepare(struct test *test)
1099 {
1100     vmcb_ident(test->vmcb);
1101 }
1102 
1103 u32 nested_apic_version1;
1104 u32 nested_apic_version2;
1105 
1106 static void npt_l1mmio_test(struct test *test)
1107 {
1108     volatile u32 *data = (volatile void*)(0xfee00030UL);
1109 
1110     nested_apic_version1 = *data;
1111     nested_apic_version2 = *data;
1112 }
1113 
1114 static bool npt_l1mmio_check(struct test *test)
1115 {
1116     volatile u32 *data = (volatile void*)(0xfee00030);
1117     u32 lvr = *data;
1118 
1119     return nested_apic_version1 == lvr && nested_apic_version2 == lvr;
1120 }
1121 
1122 static void npt_rw_l1mmio_prepare(struct test *test)
1123 {
1124 
1125     u64 *pte;
1126 
1127     vmcb_ident(test->vmcb);
1128     pte = npt_get_pte(0xfee00080);
1129 
1130     *pte &= ~(1ULL << 1);
1131 }
1132 
1133 static void npt_rw_l1mmio_test(struct test *test)
1134 {
1135     volatile u32 *data = (volatile void*)(0xfee00080);
1136 
1137     *data = *data;
1138 }
1139 
1140 static bool npt_rw_l1mmio_check(struct test *test)
1141 {
1142     u64 *pte = npt_get_pte(0xfee00080);
1143 
1144     *pte |= (1ULL << 1);
1145 
1146     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
1147            && (test->vmcb->control.exit_info_1 == 0x100000007ULL);
1148 }
1149 
1150 #define TSC_ADJUST_VALUE    (1ll << 32)
1151 #define TSC_OFFSET_VALUE    (-1ll << 48)
1152 static bool ok;
1153 
1154 static void tsc_adjust_prepare(struct test *test)
1155 {
1156     default_prepare(test);
1157     test->vmcb->control.tsc_offset = TSC_OFFSET_VALUE;
1158 
1159     wrmsr(MSR_IA32_TSC_ADJUST, -TSC_ADJUST_VALUE);
1160     int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST);
1161     ok = adjust == -TSC_ADJUST_VALUE;
1162 }
1163 
1164 static void tsc_adjust_test(struct test *test)
1165 {
1166     int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST);
1167     ok &= adjust == -TSC_ADJUST_VALUE;
1168 
1169     uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE;
1170     wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE);
1171 
1172     adjust = rdmsr(MSR_IA32_TSC_ADJUST);
1173     ok &= adjust <= -2 * TSC_ADJUST_VALUE;
1174 
1175     uint64_t l1_tsc_end = rdtsc() - TSC_OFFSET_VALUE;
1176     ok &= (l1_tsc_end + TSC_ADJUST_VALUE - l1_tsc) < TSC_ADJUST_VALUE;
1177 
1178     uint64_t l1_tsc_msr = rdmsr(MSR_IA32_TSC) - TSC_OFFSET_VALUE;
1179     ok &= (l1_tsc_msr + TSC_ADJUST_VALUE - l1_tsc) < TSC_ADJUST_VALUE;
1180 }
1181 
1182 static bool tsc_adjust_check(struct test *test)
1183 {
1184     int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST);
1185 
1186     wrmsr(MSR_IA32_TSC_ADJUST, 0);
1187     return ok && adjust <= -2 * TSC_ADJUST_VALUE;
1188 }
1189 
1190 static void latency_prepare(struct test *test)
1191 {
1192     default_prepare(test);
1193     runs = LATENCY_RUNS;
1194     latvmrun_min = latvmexit_min = -1ULL;
1195     latvmrun_max = latvmexit_max = 0;
1196     vmrun_sum = vmexit_sum = 0;
1197 }
1198 
1199 static void latency_test(struct test *test)
1200 {
1201     u64 cycles;
1202 
1203 start:
1204     tsc_end = rdtsc();
1205 
1206     cycles = tsc_end - tsc_start;
1207 
1208     if (cycles > latvmrun_max)
1209         latvmrun_max = cycles;
1210 
1211     if (cycles < latvmrun_min)
1212         latvmrun_min = cycles;
1213 
1214     vmrun_sum += cycles;
1215 
1216     tsc_start = rdtsc();
1217 
1218     asm volatile ("vmmcall" : : : "memory");
1219     goto start;
1220 }
1221 
1222 static bool latency_finished(struct test *test)
1223 {
1224     u64 cycles;
1225 
1226     tsc_end = rdtsc();
1227 
1228     cycles = tsc_end - tsc_start;
1229 
1230     if (cycles > latvmexit_max)
1231         latvmexit_max = cycles;
1232 
1233     if (cycles < latvmexit_min)
1234         latvmexit_min = cycles;
1235 
1236     vmexit_sum += cycles;
1237 
1238     test->vmcb->save.rip += 3;
1239 
1240     runs -= 1;
1241 
1242     return runs == 0;
1243 }
1244 
1245 static bool latency_check(struct test *test)
1246 {
1247     printf("    Latency VMRUN : max: %ld min: %ld avg: %ld\n", latvmrun_max,
1248             latvmrun_min, vmrun_sum / LATENCY_RUNS);
1249     printf("    Latency VMEXIT: max: %ld min: %ld avg: %ld\n", latvmexit_max,
1250             latvmexit_min, vmexit_sum / LATENCY_RUNS);
1251     return true;
1252 }
1253 
1254 static void lat_svm_insn_prepare(struct test *test)
1255 {
1256     default_prepare(test);
1257     runs = LATENCY_RUNS;
1258     latvmload_min = latvmsave_min = latstgi_min = latclgi_min = -1ULL;
1259     latvmload_max = latvmsave_max = latstgi_max = latclgi_max = 0;
1260     vmload_sum = vmsave_sum = stgi_sum = clgi_sum;
1261 }
1262 
1263 static bool lat_svm_insn_finished(struct test *test)
1264 {
1265     u64 vmcb_phys = virt_to_phys(test->vmcb);
1266     u64 cycles;
1267 
1268     for ( ; runs != 0; runs--) {
1269         tsc_start = rdtsc();
1270         asm volatile("vmload %0\n\t" : : "a"(vmcb_phys) : "memory");
1271         cycles = rdtsc() - tsc_start;
1272         if (cycles > latvmload_max)
1273             latvmload_max = cycles;
1274         if (cycles < latvmload_min)
1275             latvmload_min = cycles;
1276         vmload_sum += cycles;
1277 
1278         tsc_start = rdtsc();
1279         asm volatile("vmsave %0\n\t" : : "a"(vmcb_phys) : "memory");
1280         cycles = rdtsc() - tsc_start;
1281         if (cycles > latvmsave_max)
1282             latvmsave_max = cycles;
1283         if (cycles < latvmsave_min)
1284             latvmsave_min = cycles;
1285         vmsave_sum += cycles;
1286 
1287         tsc_start = rdtsc();
1288         asm volatile("stgi\n\t");
1289         cycles = rdtsc() - tsc_start;
1290         if (cycles > latstgi_max)
1291             latstgi_max = cycles;
1292         if (cycles < latstgi_min)
1293             latstgi_min = cycles;
1294         stgi_sum += cycles;
1295 
1296         tsc_start = rdtsc();
1297         asm volatile("clgi\n\t");
1298         cycles = rdtsc() - tsc_start;
1299         if (cycles > latclgi_max)
1300             latclgi_max = cycles;
1301         if (cycles < latclgi_min)
1302             latclgi_min = cycles;
1303         clgi_sum += cycles;
1304     }
1305 
1306     return true;
1307 }
1308 
1309 static bool lat_svm_insn_check(struct test *test)
1310 {
1311     printf("    Latency VMLOAD: max: %ld min: %ld avg: %ld\n", latvmload_max,
1312             latvmload_min, vmload_sum / LATENCY_RUNS);
1313     printf("    Latency VMSAVE: max: %ld min: %ld avg: %ld\n", latvmsave_max,
1314             latvmsave_min, vmsave_sum / LATENCY_RUNS);
1315     printf("    Latency STGI:   max: %ld min: %ld avg: %ld\n", latstgi_max,
1316             latstgi_min, stgi_sum / LATENCY_RUNS);
1317     printf("    Latency CLGI:   max: %ld min: %ld avg: %ld\n", latclgi_max,
1318             latclgi_min, clgi_sum / LATENCY_RUNS);
1319     return true;
1320 }
1321 
1322 bool pending_event_ipi_fired;
1323 bool pending_event_guest_run;
1324 
1325 static void pending_event_ipi_isr(isr_regs_t *regs)
1326 {
1327     pending_event_ipi_fired = true;
1328     eoi();
1329 }
1330 
1331 static void pending_event_prepare(struct test *test)
1332 {
1333     int ipi_vector = 0xf1;
1334 
1335     default_prepare(test);
1336 
1337     pending_event_ipi_fired = false;
1338 
1339     handle_irq(ipi_vector, pending_event_ipi_isr);
1340 
1341     pending_event_guest_run = false;
1342 
1343     test->vmcb->control.intercept |= (1ULL << INTERCEPT_INTR);
1344     test->vmcb->control.int_ctl |= V_INTR_MASKING_MASK;
1345 
1346     apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL |
1347                   APIC_DM_FIXED | ipi_vector, 0);
1348 
1349     set_test_stage(test, 0);
1350 }
1351 
1352 static void pending_event_test(struct test *test)
1353 {
1354     pending_event_guest_run = true;
1355 }
1356 
1357 static bool pending_event_finished(struct test *test)
1358 {
1359     switch (get_test_stage(test)) {
1360     case 0:
1361         if (test->vmcb->control.exit_code != SVM_EXIT_INTR) {
1362             report(false, "VMEXIT not due to pending interrupt. Exit reason 0x%x",
1363                    test->vmcb->control.exit_code);
1364             return true;
1365         }
1366 
1367         test->vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR);
1368         test->vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
1369 
1370         if (pending_event_guest_run) {
1371             report(false, "Guest ran before host received IPI\n");
1372             return true;
1373         }
1374 
1375         irq_enable();
1376         asm volatile ("nop");
1377         irq_disable();
1378 
1379         if (!pending_event_ipi_fired) {
1380             report(false, "Pending interrupt not dispatched after IRQ enabled\n");
1381             return true;
1382         }
1383         break;
1384 
1385     case 1:
1386         if (!pending_event_guest_run) {
1387             report(false, "Guest did not resume when no interrupt\n");
1388             return true;
1389         }
1390         break;
1391     }
1392 
1393     inc_test_stage(test);
1394 
1395     return get_test_stage(test) == 2;
1396 }
1397 
1398 static bool pending_event_check(struct test *test)
1399 {
1400     return get_test_stage(test) == 2;
1401 }
1402 
1403 static void pending_event_cli_prepare(struct test *test)
1404 {
1405     default_prepare(test);
1406 
1407     pending_event_ipi_fired = false;
1408 
1409     handle_irq(0xf1, pending_event_ipi_isr);
1410 
1411     apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL |
1412               APIC_DM_FIXED | 0xf1, 0);
1413 
1414     set_test_stage(test, 0);
1415 }
1416 
1417 static void pending_event_cli_prepare_gif_clear(struct test *test)
1418 {
1419     asm("cli");
1420 }
1421 
1422 static void pending_event_cli_test(struct test *test)
1423 {
1424     if (pending_event_ipi_fired == true) {
1425         set_test_stage(test, -1);
1426         report(false, "Interrupt preceeded guest");
1427         vmmcall();
1428     }
1429 
1430     /* VINTR_MASKING is zero.  This should cause the IPI to fire.  */
1431     irq_enable();
1432     asm volatile ("nop");
1433     irq_disable();
1434 
1435     if (pending_event_ipi_fired != true) {
1436         set_test_stage(test, -1);
1437         report(false, "Interrupt not triggered by guest");
1438     }
1439 
1440     vmmcall();
1441 
1442     /*
1443      * Now VINTR_MASKING=1, but no interrupt is pending so
1444      * the VINTR interception should be clear in VMCB02.  Check
1445      * that L0 did not leave a stale VINTR in the VMCB.
1446      */
1447     irq_enable();
1448     asm volatile ("nop");
1449     irq_disable();
1450 }
1451 
1452 static bool pending_event_cli_finished(struct test *test)
1453 {
1454     if ( test->vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1455         report(false, "VM_EXIT return to host is not EXIT_VMMCALL exit reason 0x%x",
1456                test->vmcb->control.exit_code);
1457         return true;
1458     }
1459 
1460     switch (get_test_stage(test)) {
1461     case 0:
1462         test->vmcb->save.rip += 3;
1463 
1464         pending_event_ipi_fired = false;
1465 
1466         test->vmcb->control.int_ctl |= V_INTR_MASKING_MASK;
1467 
1468 	/* Now entering again with VINTR_MASKING=1.  */
1469         apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL |
1470               APIC_DM_FIXED | 0xf1, 0);
1471 
1472         break;
1473 
1474     case 1:
1475         if (pending_event_ipi_fired == true) {
1476             report(false, "Interrupt triggered by guest");
1477             return true;
1478         }
1479 
1480         irq_enable();
1481         asm volatile ("nop");
1482         irq_disable();
1483 
1484         if (pending_event_ipi_fired != true) {
1485             report(false, "Interrupt not triggered by host");
1486             return true;
1487         }
1488 
1489         break;
1490 
1491     default:
1492         return true;
1493     }
1494 
1495     inc_test_stage(test);
1496 
1497     return get_test_stage(test) == 2;
1498 }
1499 
1500 static bool pending_event_cli_check(struct test *test)
1501 {
1502     return get_test_stage(test) == 2;
1503 }
1504 
1505 #define TIMER_VECTOR    222
1506 
1507 static volatile bool timer_fired;
1508 
1509 static void timer_isr(isr_regs_t *regs)
1510 {
1511     timer_fired = true;
1512     apic_write(APIC_EOI, 0);
1513 }
1514 
1515 static void interrupt_prepare(struct test *test)
1516 {
1517     default_prepare(test);
1518     handle_irq(TIMER_VECTOR, timer_isr);
1519     timer_fired = false;
1520     set_test_stage(test, 0);
1521 }
1522 
1523 static void interrupt_test(struct test *test)
1524 {
1525     long long start, loops;
1526 
1527     apic_write(APIC_LVTT, TIMER_VECTOR);
1528     irq_enable();
1529     apic_write(APIC_TMICT, 1); //Timer Initial Count Register 0x380 one-shot
1530     for (loops = 0; loops < 10000000 && !timer_fired; loops++)
1531         asm volatile ("nop");
1532 
1533     report(timer_fired, "direct interrupt while running guest");
1534 
1535     if (!timer_fired) {
1536         set_test_stage(test, -1);
1537         vmmcall();
1538     }
1539 
1540     apic_write(APIC_TMICT, 0);
1541     irq_disable();
1542     vmmcall();
1543 
1544     timer_fired = false;
1545     apic_write(APIC_TMICT, 1);
1546     for (loops = 0; loops < 10000000 && !timer_fired; loops++)
1547         asm volatile ("nop");
1548 
1549     report(timer_fired, "intercepted interrupt while running guest");
1550 
1551     if (!timer_fired) {
1552         set_test_stage(test, -1);
1553         vmmcall();
1554     }
1555 
1556     irq_enable();
1557     apic_write(APIC_TMICT, 0);
1558     irq_disable();
1559 
1560     timer_fired = false;
1561     start = rdtsc();
1562     apic_write(APIC_TMICT, 1000000);
1563     asm volatile ("sti; hlt");
1564 
1565     report(rdtsc() - start > 10000 && timer_fired,
1566           "direct interrupt + hlt");
1567 
1568     if (!timer_fired) {
1569         set_test_stage(test, -1);
1570         vmmcall();
1571     }
1572 
1573     apic_write(APIC_TMICT, 0);
1574     irq_disable();
1575     vmmcall();
1576 
1577     timer_fired = false;
1578     start = rdtsc();
1579     apic_write(APIC_TMICT, 1000000);
1580     asm volatile ("hlt");
1581 
1582     report(rdtsc() - start > 10000 && timer_fired,
1583            "intercepted interrupt + hlt");
1584 
1585     if (!timer_fired) {
1586         set_test_stage(test, -1);
1587         vmmcall();
1588     }
1589 
1590     apic_write(APIC_TMICT, 0);
1591     irq_disable();
1592 }
1593 
1594 static bool interrupt_finished(struct test *test)
1595 {
1596     switch (get_test_stage(test)) {
1597     case 0:
1598     case 2:
1599         if (test->vmcb->control.exit_code != SVM_EXIT_VMMCALL) {
1600             report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x",
1601                    test->vmcb->control.exit_code);
1602             return true;
1603         }
1604         test->vmcb->save.rip += 3;
1605 
1606         test->vmcb->control.intercept |= (1ULL << INTERCEPT_INTR);
1607         test->vmcb->control.int_ctl |= V_INTR_MASKING_MASK;
1608         break;
1609 
1610     case 1:
1611     case 3:
1612         if (test->vmcb->control.exit_code != SVM_EXIT_INTR) {
1613             report(false, "VMEXIT not due to intr intercept. Exit reason 0x%x",
1614                    test->vmcb->control.exit_code);
1615             return true;
1616         }
1617 
1618         irq_enable();
1619         asm volatile ("nop");
1620         irq_disable();
1621 
1622         test->vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR);
1623         test->vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
1624         break;
1625 
1626     case 4:
1627         break;
1628 
1629     default:
1630         return true;
1631     }
1632 
1633     inc_test_stage(test);
1634 
1635     return get_test_stage(test) == 5;
1636 }
1637 
1638 static bool interrupt_check(struct test *test)
1639 {
1640     return get_test_stage(test) == 5;
1641 }
1642 
1643 static struct test tests[] = {
1644     { "null", default_supported, default_prepare,
1645       default_prepare_gif_clear, null_test,
1646       default_finished, null_check },
1647     { "vmrun", default_supported, default_prepare,
1648       default_prepare_gif_clear, test_vmrun,
1649        default_finished, check_vmrun },
1650     { "ioio", default_supported, prepare_ioio,
1651        default_prepare_gif_clear, test_ioio,
1652        ioio_finished, check_ioio },
1653     { "vmrun intercept check", default_supported, prepare_no_vmrun_int,
1654       default_prepare_gif_clear, null_test, default_finished,
1655       check_no_vmrun_int },
1656     { "cr3 read intercept", default_supported,
1657       prepare_cr3_intercept, default_prepare_gif_clear,
1658       test_cr3_intercept, default_finished, check_cr3_intercept },
1659     { "cr3 read nointercept", default_supported, default_prepare,
1660       default_prepare_gif_clear, test_cr3_intercept, default_finished,
1661       check_cr3_nointercept },
1662     { "cr3 read intercept emulate", smp_supported,
1663       prepare_cr3_intercept_bypass, default_prepare_gif_clear,
1664       test_cr3_intercept_bypass, default_finished, check_cr3_intercept },
1665     { "dr intercept check", default_supported, prepare_dr_intercept,
1666       default_prepare_gif_clear, test_dr_intercept, dr_intercept_finished,
1667       check_dr_intercept },
1668     { "next_rip", next_rip_supported, prepare_next_rip,
1669       default_prepare_gif_clear, test_next_rip,
1670       default_finished, check_next_rip },
1671     { "msr intercept check", default_supported, prepare_msr_intercept,
1672       default_prepare_gif_clear, test_msr_intercept,
1673       msr_intercept_finished, check_msr_intercept },
1674     { "mode_switch", default_supported, prepare_mode_switch,
1675       default_prepare_gif_clear, test_mode_switch,
1676        mode_switch_finished, check_mode_switch },
1677     { "asid_zero", default_supported, prepare_asid_zero,
1678       default_prepare_gif_clear, test_asid_zero,
1679        default_finished, check_asid_zero },
1680     { "sel_cr0_bug", default_supported, sel_cr0_bug_prepare,
1681       default_prepare_gif_clear, sel_cr0_bug_test,
1682        sel_cr0_bug_finished, sel_cr0_bug_check },
1683     { "npt_nx", npt_supported, npt_nx_prepare,
1684       default_prepare_gif_clear, null_test,
1685       default_finished, npt_nx_check },
1686     { "npt_us", npt_supported, npt_us_prepare,
1687       default_prepare_gif_clear, npt_us_test,
1688       default_finished, npt_us_check },
1689     { "npt_rsvd", npt_supported, npt_rsvd_prepare,
1690       default_prepare_gif_clear, null_test,
1691       default_finished, npt_rsvd_check },
1692     { "npt_rw", npt_supported, npt_rw_prepare,
1693       default_prepare_gif_clear, npt_rw_test,
1694       default_finished, npt_rw_check },
1695     { "npt_rsvd_pfwalk", npt_supported, npt_rsvd_pfwalk_prepare,
1696       default_prepare_gif_clear, null_test,
1697       default_finished, npt_rsvd_pfwalk_check },
1698     { "npt_rw_pfwalk", npt_supported, npt_rw_pfwalk_prepare,
1699       default_prepare_gif_clear, null_test,
1700       default_finished, npt_rw_pfwalk_check },
1701     { "npt_l1mmio", npt_supported, npt_l1mmio_prepare,
1702       default_prepare_gif_clear, npt_l1mmio_test,
1703       default_finished, npt_l1mmio_check },
1704     { "npt_rw_l1mmio", npt_supported, npt_rw_l1mmio_prepare,
1705       default_prepare_gif_clear, npt_rw_l1mmio_test,
1706       default_finished, npt_rw_l1mmio_check },
1707     { "tsc_adjust", default_supported, tsc_adjust_prepare,
1708       default_prepare_gif_clear, tsc_adjust_test,
1709       default_finished, tsc_adjust_check },
1710     { "latency_run_exit", default_supported, latency_prepare,
1711       default_prepare_gif_clear, latency_test,
1712       latency_finished, latency_check },
1713     { "latency_svm_insn", default_supported, lat_svm_insn_prepare,
1714       default_prepare_gif_clear, null_test,
1715       lat_svm_insn_finished, lat_svm_insn_check },
1716     { "pending_event", default_supported, pending_event_prepare,
1717       default_prepare_gif_clear,
1718       pending_event_test, pending_event_finished, pending_event_check },
1719     { "pending_event_cli", default_supported, pending_event_cli_prepare,
1720       pending_event_cli_prepare_gif_clear,
1721       pending_event_cli_test, pending_event_cli_finished,
1722       pending_event_cli_check },
1723     { "interrupt", default_supported, interrupt_prepare,
1724       default_prepare_gif_clear, interrupt_test,
1725       interrupt_finished, interrupt_check },
1726 };
1727 
1728 int matched;
1729 
1730 static bool
1731 test_wanted(const char *name, char *filters[], int filter_count)
1732 {
1733         int i;
1734         bool positive = false;
1735         bool match = false;
1736         char clean_name[strlen(name) + 1];
1737         char *c;
1738         const char *n;
1739 
1740         /* Replace spaces with underscores. */
1741         n = name;
1742         c = &clean_name[0];
1743         do *c++ = (*n == ' ') ? '_' : *n;
1744         while (*n++);
1745 
1746         for (i = 0; i < filter_count; i++) {
1747                 const char *filter = filters[i];
1748 
1749                 if (filter[0] == '-') {
1750                         if (simple_glob(clean_name, filter + 1))
1751                                 return false;
1752                 } else {
1753                         positive = true;
1754                         match |= simple_glob(clean_name, filter);
1755                 }
1756         }
1757 
1758         if (!positive || match) {
1759                 matched++;
1760                 return true;
1761         } else {
1762                 return false;
1763         }
1764 }
1765 
1766 int main(int ac, char **av)
1767 {
1768     int i, nr;
1769     struct vmcb *vmcb;
1770 
1771     ac--;
1772     av++;
1773 
1774     setup_vm();
1775     smp_init();
1776 
1777     if (!this_cpu_has(X86_FEATURE_SVM)) {
1778         printf("SVM not availble\n");
1779         return report_summary();
1780     }
1781 
1782     setup_svm();
1783 
1784     vmcb = alloc_page();
1785 
1786     nr = ARRAY_SIZE(tests);
1787     for (i = 0; i < nr; ++i) {
1788         if (!test_wanted(tests[i].name, av, ac) || !tests[i].supported())
1789             continue;
1790         test_run(&tests[i], vmcb);
1791     }
1792 
1793     if (!matched)
1794         report(matched, "command line didn't match any tests!");
1795 
1796     return report_summary();
1797 }
1798