xref: /kvm-unit-tests/x86/svm.c (revision 6163f75d09a0a96a5c3db82dd768b13f79629c00)
1 #include "svm.h"
2 #include "libcflat.h"
3 #include "processor.h"
4 #include "desc.h"
5 #include "msr.h"
6 #include "vm.h"
7 #include "smp.h"
8 #include "types.h"
9 #include "alloc_page.h"
10 #include "isr.h"
11 #include "apic.h"
12 
13 #define SVM_EXIT_MAX_DR_INTERCEPT 0x3f
14 
15 /* for the nested page table*/
16 u64 *pml4e;
17 u64 *pdpe;
18 u64 *pde[4];
19 u64 *pte[2048];
20 void *scratch_page;
21 
22 #define LATENCY_RUNS 1000000
23 
24 u64 tsc_start;
25 u64 tsc_end;
26 
27 u64 vmrun_sum, vmexit_sum;
28 u64 vmsave_sum, vmload_sum;
29 u64 stgi_sum, clgi_sum;
30 u64 latvmrun_max;
31 u64 latvmrun_min;
32 u64 latvmexit_max;
33 u64 latvmexit_min;
34 u64 latvmload_max;
35 u64 latvmload_min;
36 u64 latvmsave_max;
37 u64 latvmsave_min;
38 u64 latstgi_max;
39 u64 latstgi_min;
40 u64 latclgi_max;
41 u64 latclgi_min;
42 u64 runs;
43 
44 u8 *io_bitmap;
45 u8 io_bitmap_area[16384];
46 
47 #define MSR_BITMAP_SIZE 8192
48 
49 u8 *msr_bitmap;
50 u8 msr_bitmap_area[MSR_BITMAP_SIZE + PAGE_SIZE];
51 
52 static bool npt_supported(void)
53 {
54 	return this_cpu_has(X86_FEATURE_NPT);
55 }
56 
57 static void setup_svm(void)
58 {
59     void *hsave = alloc_page();
60     u64 *page, address;
61     int i,j;
62 
63     wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
64     wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME);
65     wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NX);
66 
67     scratch_page = alloc_page();
68 
69     io_bitmap = (void *) (((ulong)io_bitmap_area + 4095) & ~4095);
70 
71     msr_bitmap = (void *) ALIGN((ulong)msr_bitmap_area, PAGE_SIZE);
72 
73     if (!npt_supported())
74         return;
75 
76     printf("NPT detected - running all tests with NPT enabled\n");
77 
78     /*
79      * Nested paging supported - Build a nested page table
80      * Build the page-table bottom-up and map everything with 4k pages
81      * to get enough granularity for the NPT unit-tests.
82      */
83 
84     address = 0;
85 
86     /* PTE level */
87     for (i = 0; i < 2048; ++i) {
88         page = alloc_page();
89 
90         for (j = 0; j < 512; ++j, address += 4096)
91             page[j] = address | 0x067ULL;
92 
93         pte[i] = page;
94     }
95 
96     /* PDE level */
97     for (i = 0; i < 4; ++i) {
98         page = alloc_page();
99 
100         for (j = 0; j < 512; ++j)
101             page[j] = (u64)pte[(i * 512) + j] | 0x027ULL;
102 
103         pde[i] = page;
104     }
105 
106     /* PDPe level */
107     pdpe   = alloc_page();
108     for (i = 0; i < 4; ++i)
109        pdpe[i] = ((u64)(pde[i])) | 0x27;
110 
111     /* PML4e level */
112     pml4e    = alloc_page();
113     pml4e[0] = ((u64)pdpe) | 0x27;
114 }
115 
116 static u64 *npt_get_pde(u64 address)
117 {
118     int i1, i2;
119 
120     address >>= 21;
121     i1 = (address >> 9) & 0x3;
122     i2 = address & 0x1ff;
123 
124     return &pde[i1][i2];
125 }
126 
127 static u64 *npt_get_pte(u64 address)
128 {
129     int i1, i2;
130 
131     address >>= 12;
132     i1 = (address >> 9) & 0x7ff;
133     i2 = address & 0x1ff;
134 
135     return &pte[i1][i2];
136 }
137 
138 static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector,
139                          u64 base, u32 limit, u32 attr)
140 {
141     seg->selector = selector;
142     seg->attrib = attr;
143     seg->limit = limit;
144     seg->base = base;
145 }
146 
147 static void vmcb_ident(struct vmcb *vmcb)
148 {
149     u64 vmcb_phys = virt_to_phys(vmcb);
150     struct vmcb_save_area *save = &vmcb->save;
151     struct vmcb_control_area *ctrl = &vmcb->control;
152     u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
153         | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK;
154     u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
155         | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK;
156     struct descriptor_table_ptr desc_table_ptr;
157 
158     memset(vmcb, 0, sizeof(*vmcb));
159     asm volatile ("vmsave" : : "a"(vmcb_phys) : "memory");
160     vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr);
161     vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr);
162     vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr);
163     vmcb_set_seg(&save->ds, read_ds(), 0, -1U, data_seg_attr);
164     sgdt(&desc_table_ptr);
165     vmcb_set_seg(&save->gdtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
166     sidt(&desc_table_ptr);
167     vmcb_set_seg(&save->idtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
168     ctrl->asid = 1;
169     save->cpl = 0;
170     save->efer = rdmsr(MSR_EFER);
171     save->cr4 = read_cr4();
172     save->cr3 = read_cr3();
173     save->cr0 = read_cr0();
174     save->dr7 = read_dr7();
175     save->dr6 = read_dr6();
176     save->cr2 = read_cr2();
177     save->g_pat = rdmsr(MSR_IA32_CR_PAT);
178     save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
179     ctrl->intercept = (1ULL << INTERCEPT_VMRUN) | (1ULL << INTERCEPT_VMMCALL);
180     ctrl->iopm_base_pa = virt_to_phys(io_bitmap);
181     ctrl->msrpm_base_pa = virt_to_phys(msr_bitmap);
182 
183     if (npt_supported()) {
184         ctrl->nested_ctl = 1;
185         ctrl->nested_cr3 = (u64)pml4e;
186     }
187 }
188 
189 struct test {
190     const char *name;
191     bool (*supported)(void);
192     void (*prepare)(struct test *test);
193     void (*guest_func)(struct test *test);
194     bool (*finished)(struct test *test);
195     bool (*succeeded)(struct test *test);
196     struct vmcb *vmcb;
197     int exits;
198     ulong scratch;
199 };
200 
201 static inline void vmmcall(void)
202 {
203     asm volatile ("vmmcall" : : : "memory");
204 }
205 
206 static void test_thunk(struct test *test)
207 {
208     test->guest_func(test);
209     vmmcall();
210 }
211 
212 struct regs {
213         u64 rax;
214         u64 rbx;
215         u64 rcx;
216         u64 rdx;
217         u64 cr2;
218         u64 rbp;
219         u64 rsi;
220         u64 rdi;
221         u64 r8;
222         u64 r9;
223         u64 r10;
224         u64 r11;
225         u64 r12;
226         u64 r13;
227         u64 r14;
228         u64 r15;
229         u64 rflags;
230 };
231 
232 struct regs regs;
233 
234 // rax handled specially below
235 
236 #define SAVE_GPR_C                              \
237         "xchg %%rbx, regs+0x8\n\t"              \
238         "xchg %%rcx, regs+0x10\n\t"             \
239         "xchg %%rdx, regs+0x18\n\t"             \
240         "xchg %%rbp, regs+0x28\n\t"             \
241         "xchg %%rsi, regs+0x30\n\t"             \
242         "xchg %%rdi, regs+0x38\n\t"             \
243         "xchg %%r8, regs+0x40\n\t"              \
244         "xchg %%r9, regs+0x48\n\t"              \
245         "xchg %%r10, regs+0x50\n\t"             \
246         "xchg %%r11, regs+0x58\n\t"             \
247         "xchg %%r12, regs+0x60\n\t"             \
248         "xchg %%r13, regs+0x68\n\t"             \
249         "xchg %%r14, regs+0x70\n\t"             \
250         "xchg %%r15, regs+0x78\n\t"
251 
252 #define LOAD_GPR_C      SAVE_GPR_C
253 
254 static void test_run(struct test *test, struct vmcb *vmcb)
255 {
256     u64 vmcb_phys = virt_to_phys(vmcb);
257     u64 guest_stack[10000];
258 
259     irq_disable();
260     test->vmcb = vmcb;
261     test->prepare(test);
262     vmcb->save.rip = (ulong)test_thunk;
263     vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack));
264     regs.rdi = (ulong)test;
265     do {
266         tsc_start = rdtsc();
267         asm volatile (
268             "clgi \n\t"
269             "vmload \n\t"
270             "mov regs+0x80, %%r15\n\t"  // rflags
271             "mov %%r15, 0x170(%0)\n\t"
272             "mov regs, %%r15\n\t"       // rax
273             "mov %%r15, 0x1f8(%0)\n\t"
274             LOAD_GPR_C
275             "sti \n\t"		// only used if V_INTR_MASKING=1
276             "vmrun \n\t"
277             "cli \n\t"
278             SAVE_GPR_C
279             "mov 0x170(%0), %%r15\n\t"  // rflags
280             "mov %%r15, regs+0x80\n\t"
281             "mov 0x1f8(%0), %%r15\n\t"  // rax
282             "mov %%r15, regs\n\t"
283             "vmsave \n\t"
284             "stgi"
285             : : "a"(vmcb_phys)
286             : "rbx", "rcx", "rdx", "rsi",
287               "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15",
288               "memory");
289 	tsc_end = rdtsc();
290         ++test->exits;
291     } while (!test->finished(test));
292     irq_enable();
293 
294     report("%s", test->succeeded(test), test->name);
295 }
296 
297 static bool smp_supported(void)
298 {
299 	return cpu_count() > 1;
300 }
301 
302 static bool default_supported(void)
303 {
304     return true;
305 }
306 
307 static void default_prepare(struct test *test)
308 {
309     vmcb_ident(test->vmcb);
310 }
311 
312 static bool default_finished(struct test *test)
313 {
314     return true; /* one vmexit */
315 }
316 
317 static void null_test(struct test *test)
318 {
319 }
320 
321 static bool null_check(struct test *test)
322 {
323     return test->vmcb->control.exit_code == SVM_EXIT_VMMCALL;
324 }
325 
326 static void prepare_no_vmrun_int(struct test *test)
327 {
328     test->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMRUN);
329 }
330 
331 static bool check_no_vmrun_int(struct test *test)
332 {
333     return test->vmcb->control.exit_code == SVM_EXIT_ERR;
334 }
335 
336 static void test_vmrun(struct test *test)
337 {
338     asm volatile ("vmrun" : : "a"(virt_to_phys(test->vmcb)));
339 }
340 
341 static bool check_vmrun(struct test *test)
342 {
343     return test->vmcb->control.exit_code == SVM_EXIT_VMRUN;
344 }
345 
346 static void prepare_cr3_intercept(struct test *test)
347 {
348     default_prepare(test);
349     test->vmcb->control.intercept_cr_read |= 1 << 3;
350 }
351 
352 static void test_cr3_intercept(struct test *test)
353 {
354     asm volatile ("mov %%cr3, %0" : "=r"(test->scratch) : : "memory");
355 }
356 
357 static bool check_cr3_intercept(struct test *test)
358 {
359     return test->vmcb->control.exit_code == SVM_EXIT_READ_CR3;
360 }
361 
362 static bool check_cr3_nointercept(struct test *test)
363 {
364     return null_check(test) && test->scratch == read_cr3();
365 }
366 
367 static void corrupt_cr3_intercept_bypass(void *_test)
368 {
369     struct test *test = _test;
370     extern volatile u32 mmio_insn;
371 
372     while (!__sync_bool_compare_and_swap(&test->scratch, 1, 2))
373         pause();
374     pause();
375     pause();
376     pause();
377     mmio_insn = 0x90d8200f;  // mov %cr3, %rax; nop
378 }
379 
380 static void prepare_cr3_intercept_bypass(struct test *test)
381 {
382     default_prepare(test);
383     test->vmcb->control.intercept_cr_read |= 1 << 3;
384     on_cpu_async(1, corrupt_cr3_intercept_bypass, test);
385 }
386 
387 static void test_cr3_intercept_bypass(struct test *test)
388 {
389     ulong a = 0xa0000;
390 
391     test->scratch = 1;
392     while (test->scratch != 2)
393         barrier();
394 
395     asm volatile ("mmio_insn: mov %0, (%0); nop"
396                   : "+a"(a) : : "memory");
397     test->scratch = a;
398 }
399 
400 static void prepare_dr_intercept(struct test *test)
401 {
402     default_prepare(test);
403     test->vmcb->control.intercept_dr_read = 0xff;
404     test->vmcb->control.intercept_dr_write = 0xff;
405 }
406 
407 static void test_dr_intercept(struct test *test)
408 {
409     unsigned int i, failcnt = 0;
410 
411     /* Loop testing debug register reads */
412     for (i = 0; i < 8; i++) {
413 
414         switch (i) {
415         case 0:
416             asm volatile ("mov %%dr0, %0" : "=r"(test->scratch) : : "memory");
417             break;
418         case 1:
419             asm volatile ("mov %%dr1, %0" : "=r"(test->scratch) : : "memory");
420             break;
421         case 2:
422             asm volatile ("mov %%dr2, %0" : "=r"(test->scratch) : : "memory");
423             break;
424         case 3:
425             asm volatile ("mov %%dr3, %0" : "=r"(test->scratch) : : "memory");
426             break;
427         case 4:
428             asm volatile ("mov %%dr4, %0" : "=r"(test->scratch) : : "memory");
429             break;
430         case 5:
431             asm volatile ("mov %%dr5, %0" : "=r"(test->scratch) : : "memory");
432             break;
433         case 6:
434             asm volatile ("mov %%dr6, %0" : "=r"(test->scratch) : : "memory");
435             break;
436         case 7:
437             asm volatile ("mov %%dr7, %0" : "=r"(test->scratch) : : "memory");
438             break;
439         }
440 
441         if (test->scratch != i) {
442             report("dr%u read intercept", false, i);
443             failcnt++;
444         }
445     }
446 
447     /* Loop testing debug register writes */
448     for (i = 0; i < 8; i++) {
449 
450         switch (i) {
451         case 0:
452             asm volatile ("mov %0, %%dr0" : : "r"(test->scratch) : "memory");
453             break;
454         case 1:
455             asm volatile ("mov %0, %%dr1" : : "r"(test->scratch) : "memory");
456             break;
457         case 2:
458             asm volatile ("mov %0, %%dr2" : : "r"(test->scratch) : "memory");
459             break;
460         case 3:
461             asm volatile ("mov %0, %%dr3" : : "r"(test->scratch) : "memory");
462             break;
463         case 4:
464             asm volatile ("mov %0, %%dr4" : : "r"(test->scratch) : "memory");
465             break;
466         case 5:
467             asm volatile ("mov %0, %%dr5" : : "r"(test->scratch) : "memory");
468             break;
469         case 6:
470             asm volatile ("mov %0, %%dr6" : : "r"(test->scratch) : "memory");
471             break;
472         case 7:
473             asm volatile ("mov %0, %%dr7" : : "r"(test->scratch) : "memory");
474             break;
475         }
476 
477         if (test->scratch != i) {
478             report("dr%u write intercept", false, i);
479             failcnt++;
480         }
481     }
482 
483     test->scratch = failcnt;
484 }
485 
486 static bool dr_intercept_finished(struct test *test)
487 {
488     ulong n = (test->vmcb->control.exit_code - SVM_EXIT_READ_DR0);
489 
490     /* Only expect DR intercepts */
491     if (n > (SVM_EXIT_MAX_DR_INTERCEPT - SVM_EXIT_READ_DR0))
492         return true;
493 
494     /*
495      * Compute debug register number.
496      * Per Appendix C "SVM Intercept Exit Codes" of AMD64 Architecture
497      * Programmer's Manual Volume 2 - System Programming:
498      * http://support.amd.com/TechDocs/24593.pdf
499      * there are 16 VMEXIT codes each for DR read and write.
500      */
501     test->scratch = (n % 16);
502 
503     /* Jump over MOV instruction */
504     test->vmcb->save.rip += 3;
505 
506     return false;
507 }
508 
509 static bool check_dr_intercept(struct test *test)
510 {
511     return !test->scratch;
512 }
513 
514 static bool next_rip_supported(void)
515 {
516     return this_cpu_has(X86_FEATURE_NRIPS);
517 }
518 
519 static void prepare_next_rip(struct test *test)
520 {
521     test->vmcb->control.intercept |= (1ULL << INTERCEPT_RDTSC);
522 }
523 
524 
525 static void test_next_rip(struct test *test)
526 {
527     asm volatile ("rdtsc\n\t"
528                   ".globl exp_next_rip\n\t"
529                   "exp_next_rip:\n\t" ::: "eax", "edx");
530 }
531 
532 static bool check_next_rip(struct test *test)
533 {
534     extern char exp_next_rip;
535     unsigned long address = (unsigned long)&exp_next_rip;
536 
537     return address == test->vmcb->control.next_rip;
538 }
539 
540 static void prepare_msr_intercept(struct test *test)
541 {
542     default_prepare(test);
543     test->vmcb->control.intercept |= (1ULL << INTERCEPT_MSR_PROT);
544     test->vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR);
545     memset(msr_bitmap, 0xff, MSR_BITMAP_SIZE);
546 }
547 
548 static void test_msr_intercept(struct test *test)
549 {
550     unsigned long msr_value = 0xef8056791234abcd; /* Arbitrary value */
551     unsigned long msr_index;
552 
553     for (msr_index = 0; msr_index <= 0xc0011fff; msr_index++) {
554         if (msr_index == 0xC0010131 /* MSR_SEV_STATUS */) {
555             /*
556              * Per section 15.34.10 "SEV_STATUS MSR" of AMD64 Architecture
557              * Programmer's Manual volume 2 - System Programming:
558              * http://support.amd.com/TechDocs/24593.pdf
559              * SEV_STATUS MSR (C001_0131) is a non-interceptable MSR.
560              */
561             continue;
562         }
563 
564         /* Skips gaps between supported MSR ranges */
565         if (msr_index == 0x2000)
566             msr_index = 0xc0000000;
567         else if (msr_index == 0xc0002000)
568             msr_index = 0xc0010000;
569 
570         test->scratch = -1;
571 
572         rdmsr(msr_index);
573 
574         /* Check that a read intercept occurred for MSR at msr_index */
575         if (test->scratch != msr_index)
576             report("MSR 0x%lx read intercept", false, msr_index);
577 
578         /*
579          * Poor man approach to generate a value that
580          * seems arbitrary each time around the loop.
581          */
582         msr_value += (msr_value << 1);
583 
584         wrmsr(msr_index, msr_value);
585 
586         /* Check that a write intercept occurred for MSR with msr_value */
587         if (test->scratch != msr_value)
588             report("MSR 0x%lx write intercept", false, msr_index);
589     }
590 
591     test->scratch = -2;
592 }
593 
594 static bool msr_intercept_finished(struct test *test)
595 {
596     u32 exit_code = test->vmcb->control.exit_code;
597     u64 exit_info_1;
598     u8 *opcode;
599 
600     if (exit_code == SVM_EXIT_MSR) {
601         exit_info_1 = test->vmcb->control.exit_info_1;
602     } else {
603         /*
604          * If #GP exception occurs instead, check that it was
605          * for RDMSR/WRMSR and set exit_info_1 accordingly.
606          */
607 
608         if (exit_code != (SVM_EXIT_EXCP_BASE + GP_VECTOR))
609             return true;
610 
611         opcode = (u8 *)test->vmcb->save.rip;
612         if (opcode[0] != 0x0f)
613             return true;
614 
615         switch (opcode[1]) {
616         case 0x30: /* WRMSR */
617             exit_info_1 = 1;
618             break;
619         case 0x32: /* RDMSR */
620             exit_info_1 = 0;
621             break;
622         default:
623             return true;
624         }
625 
626         /*
627          * Warn that #GP exception occured instead.
628          * RCX holds the MSR index.
629          */
630         printf("%s 0x%lx #GP exception\n",
631             exit_info_1 ? "WRMSR" : "RDMSR", regs.rcx);
632     }
633 
634     /* Jump over RDMSR/WRMSR instruction */
635     test->vmcb->save.rip += 2;
636 
637     /*
638      * Test whether the intercept was for RDMSR/WRMSR.
639      * For RDMSR, test->scratch is set to the MSR index;
640      *      RCX holds the MSR index.
641      * For WRMSR, test->scratch is set to the MSR value;
642      *      RDX holds the upper 32 bits of the MSR value,
643      *      while RAX hold its lower 32 bits.
644      */
645     if (exit_info_1)
646         test->scratch =
647             ((regs.rdx << 32) | (test->vmcb->save.rax & 0xffffffff));
648     else
649         test->scratch = regs.rcx;
650 
651     return false;
652 }
653 
654 static bool check_msr_intercept(struct test *test)
655 {
656     memset(msr_bitmap, 0, MSR_BITMAP_SIZE);
657     return (test->scratch == -2);
658 }
659 
660 static void prepare_mode_switch(struct test *test)
661 {
662     test->vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR)
663                                              |  (1ULL << UD_VECTOR)
664                                              |  (1ULL << DF_VECTOR)
665                                              |  (1ULL << PF_VECTOR);
666     test->scratch = 0;
667 }
668 
669 static void test_mode_switch(struct test *test)
670 {
671     asm volatile("	cli\n"
672 		 "	ljmp *1f\n" /* jump to 32-bit code segment */
673 		 "1:\n"
674 		 "	.long 2f\n"
675 		 "	.long " xstr(KERNEL_CS32) "\n"
676 		 ".code32\n"
677 		 "2:\n"
678 		 "	movl %%cr0, %%eax\n"
679 		 "	btcl  $31, %%eax\n" /* clear PG */
680 		 "	movl %%eax, %%cr0\n"
681 		 "	movl $0xc0000080, %%ecx\n" /* EFER */
682 		 "	rdmsr\n"
683 		 "	btcl $8, %%eax\n" /* clear LME */
684 		 "	wrmsr\n"
685 		 "	movl %%cr4, %%eax\n"
686 		 "	btcl $5, %%eax\n" /* clear PAE */
687 		 "	movl %%eax, %%cr4\n"
688 		 "	movw %[ds16], %%ax\n"
689 		 "	movw %%ax, %%ds\n"
690 		 "	ljmpl %[cs16], $3f\n" /* jump to 16 bit protected-mode */
691 		 ".code16\n"
692 		 "3:\n"
693 		 "	movl %%cr0, %%eax\n"
694 		 "	btcl $0, %%eax\n" /* clear PE  */
695 		 "	movl %%eax, %%cr0\n"
696 		 "	ljmpl $0, $4f\n"   /* jump to real-mode */
697 		 "4:\n"
698 		 "	vmmcall\n"
699 		 "	movl %%cr0, %%eax\n"
700 		 "	btsl $0, %%eax\n" /* set PE  */
701 		 "	movl %%eax, %%cr0\n"
702 		 "	ljmpl %[cs32], $5f\n" /* back to protected mode */
703 		 ".code32\n"
704 		 "5:\n"
705 		 "	movl %%cr4, %%eax\n"
706 		 "	btsl $5, %%eax\n" /* set PAE */
707 		 "	movl %%eax, %%cr4\n"
708 		 "	movl $0xc0000080, %%ecx\n" /* EFER */
709 		 "	rdmsr\n"
710 		 "	btsl $8, %%eax\n" /* set LME */
711 		 "	wrmsr\n"
712 		 "	movl %%cr0, %%eax\n"
713 		 "	btsl  $31, %%eax\n" /* set PG */
714 		 "	movl %%eax, %%cr0\n"
715 		 "	ljmpl %[cs64], $6f\n"    /* back to long mode */
716 		 ".code64\n\t"
717 		 "6:\n"
718 		 "	vmmcall\n"
719 		 :: [cs16] "i"(KERNEL_CS16), [ds16] "i"(KERNEL_DS16),
720 		    [cs32] "i"(KERNEL_CS32), [cs64] "i"(KERNEL_CS64)
721 		 : "rax", "rbx", "rcx", "rdx", "memory");
722 }
723 
724 static bool mode_switch_finished(struct test *test)
725 {
726     u64 cr0, cr4, efer;
727 
728     cr0  = test->vmcb->save.cr0;
729     cr4  = test->vmcb->save.cr4;
730     efer = test->vmcb->save.efer;
731 
732     /* Only expect VMMCALL intercepts */
733     if (test->vmcb->control.exit_code != SVM_EXIT_VMMCALL)
734 	    return true;
735 
736     /* Jump over VMMCALL instruction */
737     test->vmcb->save.rip += 3;
738 
739     /* Do sanity checks */
740     switch (test->scratch) {
741     case 0:
742         /* Test should be in real mode now - check for this */
743         if ((cr0  & 0x80000001) || /* CR0.PG, CR0.PE */
744             (cr4  & 0x00000020) || /* CR4.PAE */
745             (efer & 0x00000500))   /* EFER.LMA, EFER.LME */
746                 return true;
747         break;
748     case 2:
749         /* Test should be back in long-mode now - check for this */
750         if (((cr0  & 0x80000001) != 0x80000001) || /* CR0.PG, CR0.PE */
751             ((cr4  & 0x00000020) != 0x00000020) || /* CR4.PAE */
752             ((efer & 0x00000500) != 0x00000500))   /* EFER.LMA, EFER.LME */
753 		    return true;
754 	break;
755     }
756 
757     /* one step forward */
758     test->scratch += 1;
759 
760     return test->scratch == 2;
761 }
762 
763 static bool check_mode_switch(struct test *test)
764 {
765 	return test->scratch == 2;
766 }
767 
768 static void prepare_ioio(struct test *test)
769 {
770     test->vmcb->control.intercept |= (1ULL << INTERCEPT_IOIO_PROT);
771     test->scratch = 0;
772     memset(io_bitmap, 0, 8192);
773     io_bitmap[8192] = 0xFF;
774 }
775 
776 static int get_test_stage(struct test *test)
777 {
778     barrier();
779     return test->scratch;
780 }
781 
782 static void set_test_stage(struct test *test, int s)
783 {
784     barrier();
785     test->scratch = s;
786     barrier();
787 }
788 
789 static void inc_test_stage(struct test *test)
790 {
791     barrier();
792     test->scratch++;
793     barrier();
794 }
795 
796 static void test_ioio(struct test *test)
797 {
798     // stage 0, test IO pass
799     inb(0x5000);
800     outb(0x0, 0x5000);
801     if (get_test_stage(test) != 0)
802         goto fail;
803 
804     // test IO width, in/out
805     io_bitmap[0] = 0xFF;
806     inc_test_stage(test);
807     inb(0x0);
808     if (get_test_stage(test) != 2)
809         goto fail;
810 
811     outw(0x0, 0x0);
812     if (get_test_stage(test) != 3)
813         goto fail;
814 
815     inl(0x0);
816     if (get_test_stage(test) != 4)
817         goto fail;
818 
819     // test low/high IO port
820     io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8));
821     inb(0x5000);
822     if (get_test_stage(test) != 5)
823         goto fail;
824 
825     io_bitmap[0x9000 / 8] = (1 << (0x9000 % 8));
826     inw(0x9000);
827     if (get_test_stage(test) != 6)
828         goto fail;
829 
830     // test partial pass
831     io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8));
832     inl(0x4FFF);
833     if (get_test_stage(test) != 7)
834         goto fail;
835 
836     // test across pages
837     inc_test_stage(test);
838     inl(0x7FFF);
839     if (get_test_stage(test) != 8)
840         goto fail;
841 
842     inc_test_stage(test);
843     io_bitmap[0x8000 / 8] = 1 << (0x8000 % 8);
844     inl(0x7FFF);
845     if (get_test_stage(test) != 10)
846         goto fail;
847 
848     io_bitmap[0] = 0;
849     inl(0xFFFF);
850     if (get_test_stage(test) != 11)
851         goto fail;
852 
853     io_bitmap[0] = 0xFF;
854     io_bitmap[8192] = 0;
855     inl(0xFFFF);
856     inc_test_stage(test);
857     if (get_test_stage(test) != 12)
858         goto fail;
859 
860     return;
861 
862 fail:
863     report("stage %d", false, get_test_stage(test));
864     test->scratch = -1;
865 }
866 
867 static bool ioio_finished(struct test *test)
868 {
869     unsigned port, size;
870 
871     /* Only expect IOIO intercepts */
872     if (test->vmcb->control.exit_code == SVM_EXIT_VMMCALL)
873         return true;
874 
875     if (test->vmcb->control.exit_code != SVM_EXIT_IOIO)
876         return true;
877 
878     /* one step forward */
879     test->scratch += 1;
880 
881     port = test->vmcb->control.exit_info_1 >> 16;
882     size = (test->vmcb->control.exit_info_1 >> SVM_IOIO_SIZE_SHIFT) & 7;
883 
884     while (size--) {
885         io_bitmap[port / 8] &= ~(1 << (port & 7));
886         port++;
887     }
888 
889     return false;
890 }
891 
892 static bool check_ioio(struct test *test)
893 {
894     memset(io_bitmap, 0, 8193);
895     return test->scratch != -1;
896 }
897 
898 static void prepare_asid_zero(struct test *test)
899 {
900     test->vmcb->control.asid = 0;
901 }
902 
903 static void test_asid_zero(struct test *test)
904 {
905     asm volatile ("vmmcall\n\t");
906 }
907 
908 static bool check_asid_zero(struct test *test)
909 {
910     return test->vmcb->control.exit_code == SVM_EXIT_ERR;
911 }
912 
913 static void sel_cr0_bug_prepare(struct test *test)
914 {
915     vmcb_ident(test->vmcb);
916     test->vmcb->control.intercept |= (1ULL << INTERCEPT_SELECTIVE_CR0);
917 }
918 
919 static bool sel_cr0_bug_finished(struct test *test)
920 {
921 	return true;
922 }
923 
924 static void sel_cr0_bug_test(struct test *test)
925 {
926     unsigned long cr0;
927 
928     /* read cr0, clear CD, and write back */
929     cr0  = read_cr0();
930     cr0 |= (1UL << 30);
931     write_cr0(cr0);
932 
933     /*
934      * If we are here the test failed, not sure what to do now because we
935      * are not in guest-mode anymore so we can't trigger an intercept.
936      * Trigger a tripple-fault for now.
937      */
938     report("sel_cr0 test. Can not recover from this - exiting", false);
939     exit(report_summary());
940 }
941 
942 static bool sel_cr0_bug_check(struct test *test)
943 {
944     return test->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE;
945 }
946 
947 static void npt_nx_prepare(struct test *test)
948 {
949 
950     u64 *pte;
951 
952     vmcb_ident(test->vmcb);
953     pte = npt_get_pte((u64)null_test);
954 
955     *pte |= (1ULL << 63);
956 }
957 
958 static bool npt_nx_check(struct test *test)
959 {
960     u64 *pte = npt_get_pte((u64)null_test);
961 
962     *pte &= ~(1ULL << 63);
963 
964     test->vmcb->save.efer |= (1 << 11);
965 
966     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
967            && (test->vmcb->control.exit_info_1 == 0x100000015ULL);
968 }
969 
970 static void npt_us_prepare(struct test *test)
971 {
972     u64 *pte;
973 
974     vmcb_ident(test->vmcb);
975     pte = npt_get_pte((u64)scratch_page);
976 
977     *pte &= ~(1ULL << 2);
978 }
979 
980 static void npt_us_test(struct test *test)
981 {
982     (void) *(volatile u64 *)scratch_page;
983 }
984 
985 static bool npt_us_check(struct test *test)
986 {
987     u64 *pte = npt_get_pte((u64)scratch_page);
988 
989     *pte |= (1ULL << 2);
990 
991     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
992            && (test->vmcb->control.exit_info_1 == 0x100000005ULL);
993 }
994 
995 u64 save_pde;
996 
997 static void npt_rsvd_prepare(struct test *test)
998 {
999     u64 *pde;
1000 
1001     vmcb_ident(test->vmcb);
1002     pde = npt_get_pde((u64) null_test);
1003 
1004     save_pde = *pde;
1005     *pde = (1ULL << 19) | (1ULL << 7) | 0x27;
1006 }
1007 
1008 static bool npt_rsvd_check(struct test *test)
1009 {
1010     u64 *pde = npt_get_pde((u64) null_test);
1011 
1012     *pde = save_pde;
1013 
1014     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
1015             && (test->vmcb->control.exit_info_1 == 0x10000001dULL);
1016 }
1017 
1018 static void npt_rw_prepare(struct test *test)
1019 {
1020 
1021     u64 *pte;
1022 
1023     vmcb_ident(test->vmcb);
1024     pte = npt_get_pte(0x80000);
1025 
1026     *pte &= ~(1ULL << 1);
1027 }
1028 
1029 static void npt_rw_test(struct test *test)
1030 {
1031     u64 *data = (void*)(0x80000);
1032 
1033     *data = 0;
1034 }
1035 
1036 static bool npt_rw_check(struct test *test)
1037 {
1038     u64 *pte = npt_get_pte(0x80000);
1039 
1040     *pte |= (1ULL << 1);
1041 
1042     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
1043            && (test->vmcb->control.exit_info_1 == 0x100000007ULL);
1044 }
1045 
1046 static void npt_rw_pfwalk_prepare(struct test *test)
1047 {
1048 
1049     u64 *pte;
1050 
1051     vmcb_ident(test->vmcb);
1052     pte = npt_get_pte(read_cr3());
1053 
1054     *pte &= ~(1ULL << 1);
1055 }
1056 
1057 static bool npt_rw_pfwalk_check(struct test *test)
1058 {
1059     u64 *pte = npt_get_pte(read_cr3());
1060 
1061     *pte |= (1ULL << 1);
1062 
1063     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
1064            && (test->vmcb->control.exit_info_1 == 0x200000006ULL)
1065 	   && (test->vmcb->control.exit_info_2 == read_cr3());
1066 }
1067 
1068 static void npt_rsvd_pfwalk_prepare(struct test *test)
1069 {
1070 
1071     vmcb_ident(test->vmcb);
1072 
1073     pdpe[0] |= (1ULL << 8);
1074 }
1075 
1076 static bool npt_rsvd_pfwalk_check(struct test *test)
1077 {
1078     pdpe[0] &= ~(1ULL << 8);
1079 
1080     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
1081             && (test->vmcb->control.exit_info_1 == 0x20000000eULL);
1082 }
1083 
1084 static void npt_l1mmio_prepare(struct test *test)
1085 {
1086     vmcb_ident(test->vmcb);
1087 }
1088 
1089 u32 nested_apic_version1;
1090 u32 nested_apic_version2;
1091 
1092 static void npt_l1mmio_test(struct test *test)
1093 {
1094     volatile u32 *data = (volatile void*)(0xfee00030UL);
1095 
1096     nested_apic_version1 = *data;
1097     nested_apic_version2 = *data;
1098 }
1099 
1100 static bool npt_l1mmio_check(struct test *test)
1101 {
1102     volatile u32 *data = (volatile void*)(0xfee00030);
1103     u32 lvr = *data;
1104 
1105     return nested_apic_version1 == lvr && nested_apic_version2 == lvr;
1106 }
1107 
1108 static void npt_rw_l1mmio_prepare(struct test *test)
1109 {
1110 
1111     u64 *pte;
1112 
1113     vmcb_ident(test->vmcb);
1114     pte = npt_get_pte(0xfee00080);
1115 
1116     *pte &= ~(1ULL << 1);
1117 }
1118 
1119 static void npt_rw_l1mmio_test(struct test *test)
1120 {
1121     volatile u32 *data = (volatile void*)(0xfee00080);
1122 
1123     *data = *data;
1124 }
1125 
1126 static bool npt_rw_l1mmio_check(struct test *test)
1127 {
1128     u64 *pte = npt_get_pte(0xfee00080);
1129 
1130     *pte |= (1ULL << 1);
1131 
1132     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
1133            && (test->vmcb->control.exit_info_1 == 0x100000007ULL);
1134 }
1135 
1136 #define TSC_ADJUST_VALUE    (1ll << 32)
1137 #define TSC_OFFSET_VALUE    (-1ll << 48)
1138 static bool ok;
1139 
1140 static void tsc_adjust_prepare(struct test *test)
1141 {
1142     default_prepare(test);
1143     test->vmcb->control.tsc_offset = TSC_OFFSET_VALUE;
1144 
1145     wrmsr(MSR_IA32_TSC_ADJUST, -TSC_ADJUST_VALUE);
1146     int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST);
1147     ok = adjust == -TSC_ADJUST_VALUE;
1148 }
1149 
1150 static void tsc_adjust_test(struct test *test)
1151 {
1152     int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST);
1153     ok &= adjust == -TSC_ADJUST_VALUE;
1154 
1155     uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE;
1156     wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE);
1157 
1158     adjust = rdmsr(MSR_IA32_TSC_ADJUST);
1159     ok &= adjust <= -2 * TSC_ADJUST_VALUE;
1160 
1161     uint64_t l1_tsc_end = rdtsc() - TSC_OFFSET_VALUE;
1162     ok &= (l1_tsc_end + TSC_ADJUST_VALUE - l1_tsc) < TSC_ADJUST_VALUE;
1163 
1164     uint64_t l1_tsc_msr = rdmsr(MSR_IA32_TSC) - TSC_OFFSET_VALUE;
1165     ok &= (l1_tsc_msr + TSC_ADJUST_VALUE - l1_tsc) < TSC_ADJUST_VALUE;
1166 }
1167 
1168 static bool tsc_adjust_check(struct test *test)
1169 {
1170     int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST);
1171 
1172     wrmsr(MSR_IA32_TSC_ADJUST, 0);
1173     return ok && adjust <= -2 * TSC_ADJUST_VALUE;
1174 }
1175 
1176 static void latency_prepare(struct test *test)
1177 {
1178     default_prepare(test);
1179     runs = LATENCY_RUNS;
1180     latvmrun_min = latvmexit_min = -1ULL;
1181     latvmrun_max = latvmexit_max = 0;
1182     vmrun_sum = vmexit_sum = 0;
1183 }
1184 
1185 static void latency_test(struct test *test)
1186 {
1187     u64 cycles;
1188 
1189 start:
1190     tsc_end = rdtsc();
1191 
1192     cycles = tsc_end - tsc_start;
1193 
1194     if (cycles > latvmrun_max)
1195         latvmrun_max = cycles;
1196 
1197     if (cycles < latvmrun_min)
1198         latvmrun_min = cycles;
1199 
1200     vmrun_sum += cycles;
1201 
1202     tsc_start = rdtsc();
1203 
1204     asm volatile ("vmmcall" : : : "memory");
1205     goto start;
1206 }
1207 
1208 static bool latency_finished(struct test *test)
1209 {
1210     u64 cycles;
1211 
1212     tsc_end = rdtsc();
1213 
1214     cycles = tsc_end - tsc_start;
1215 
1216     if (cycles > latvmexit_max)
1217         latvmexit_max = cycles;
1218 
1219     if (cycles < latvmexit_min)
1220         latvmexit_min = cycles;
1221 
1222     vmexit_sum += cycles;
1223 
1224     test->vmcb->save.rip += 3;
1225 
1226     runs -= 1;
1227 
1228     return runs == 0;
1229 }
1230 
1231 static bool latency_check(struct test *test)
1232 {
1233     printf("    Latency VMRUN : max: %ld min: %ld avg: %ld\n", latvmrun_max,
1234             latvmrun_min, vmrun_sum / LATENCY_RUNS);
1235     printf("    Latency VMEXIT: max: %ld min: %ld avg: %ld\n", latvmexit_max,
1236             latvmexit_min, vmexit_sum / LATENCY_RUNS);
1237     return true;
1238 }
1239 
1240 static void lat_svm_insn_prepare(struct test *test)
1241 {
1242     default_prepare(test);
1243     runs = LATENCY_RUNS;
1244     latvmload_min = latvmsave_min = latstgi_min = latclgi_min = -1ULL;
1245     latvmload_max = latvmsave_max = latstgi_max = latclgi_max = 0;
1246     vmload_sum = vmsave_sum = stgi_sum = clgi_sum;
1247 }
1248 
1249 static bool lat_svm_insn_finished(struct test *test)
1250 {
1251     u64 vmcb_phys = virt_to_phys(test->vmcb);
1252     u64 cycles;
1253 
1254     for ( ; runs != 0; runs--) {
1255         tsc_start = rdtsc();
1256         asm volatile("vmload\n\t" : : "a"(vmcb_phys) : "memory");
1257         cycles = rdtsc() - tsc_start;
1258         if (cycles > latvmload_max)
1259             latvmload_max = cycles;
1260         if (cycles < latvmload_min)
1261             latvmload_min = cycles;
1262         vmload_sum += cycles;
1263 
1264         tsc_start = rdtsc();
1265         asm volatile("vmsave\n\t" : : "a"(vmcb_phys) : "memory");
1266         cycles = rdtsc() - tsc_start;
1267         if (cycles > latvmsave_max)
1268             latvmsave_max = cycles;
1269         if (cycles < latvmsave_min)
1270             latvmsave_min = cycles;
1271         vmsave_sum += cycles;
1272 
1273         tsc_start = rdtsc();
1274         asm volatile("stgi\n\t");
1275         cycles = rdtsc() - tsc_start;
1276         if (cycles > latstgi_max)
1277             latstgi_max = cycles;
1278         if (cycles < latstgi_min)
1279             latstgi_min = cycles;
1280         stgi_sum += cycles;
1281 
1282         tsc_start = rdtsc();
1283         asm volatile("clgi\n\t");
1284         cycles = rdtsc() - tsc_start;
1285         if (cycles > latclgi_max)
1286             latclgi_max = cycles;
1287         if (cycles < latclgi_min)
1288             latclgi_min = cycles;
1289         clgi_sum += cycles;
1290     }
1291 
1292     return true;
1293 }
1294 
1295 static bool lat_svm_insn_check(struct test *test)
1296 {
1297     printf("    Latency VMLOAD: max: %ld min: %ld avg: %ld\n", latvmload_max,
1298             latvmload_min, vmload_sum / LATENCY_RUNS);
1299     printf("    Latency VMSAVE: max: %ld min: %ld avg: %ld\n", latvmsave_max,
1300             latvmsave_min, vmsave_sum / LATENCY_RUNS);
1301     printf("    Latency STGI:   max: %ld min: %ld avg: %ld\n", latstgi_max,
1302             latstgi_min, stgi_sum / LATENCY_RUNS);
1303     printf("    Latency CLGI:   max: %ld min: %ld avg: %ld\n", latclgi_max,
1304             latclgi_min, clgi_sum / LATENCY_RUNS);
1305     return true;
1306 }
1307 
1308 bool pending_event_ipi_fired;
1309 bool pending_event_guest_run;
1310 
1311 static void pending_event_ipi_isr(isr_regs_t *regs)
1312 {
1313     pending_event_ipi_fired = true;
1314     eoi();
1315 }
1316 
1317 static void pending_event_prepare(struct test *test)
1318 {
1319     int ipi_vector = 0xf1;
1320 
1321     default_prepare(test);
1322 
1323     pending_event_ipi_fired = false;
1324 
1325     handle_irq(ipi_vector, pending_event_ipi_isr);
1326 
1327     pending_event_guest_run = false;
1328 
1329     test->vmcb->control.intercept |= (1ULL << INTERCEPT_INTR);
1330     test->vmcb->control.int_ctl |= V_INTR_MASKING_MASK;
1331 
1332     apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL |
1333                   APIC_DM_FIXED | ipi_vector, 0);
1334 
1335     set_test_stage(test, 0);
1336 }
1337 
1338 static void pending_event_test(struct test *test)
1339 {
1340     pending_event_guest_run = true;
1341 }
1342 
1343 static bool pending_event_finished(struct test *test)
1344 {
1345     switch (get_test_stage(test)) {
1346     case 0:
1347         if (test->vmcb->control.exit_code != SVM_EXIT_INTR) {
1348             report("VMEXIT not due to pending interrupt. Exit reason 0x%x",
1349             false, test->vmcb->control.exit_code);
1350             return true;
1351         }
1352 
1353         test->vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR);
1354         test->vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
1355 
1356         if (pending_event_guest_run) {
1357             report("Guest ran before host received IPI\n", false);
1358             return true;
1359         }
1360 
1361         irq_enable();
1362         asm volatile ("nop");
1363         irq_disable();
1364 
1365         if (!pending_event_ipi_fired) {
1366             report("Pending interrupt not dispatched after IRQ enabled\n", false);
1367             return true;
1368         }
1369         break;
1370 
1371     case 1:
1372         if (!pending_event_guest_run) {
1373             report("Guest did not resume when no interrupt\n", false);
1374             return true;
1375         }
1376         break;
1377     }
1378 
1379     inc_test_stage(test);
1380 
1381     return get_test_stage(test) == 2;
1382 }
1383 
1384 static bool pending_event_check(struct test *test)
1385 {
1386     return get_test_stage(test) == 2;
1387 }
1388 
1389 static struct test tests[] = {
1390     { "null", default_supported, default_prepare, null_test,
1391       default_finished, null_check },
1392     { "vmrun", default_supported, default_prepare, test_vmrun,
1393        default_finished, check_vmrun },
1394     { "ioio", default_supported, prepare_ioio, test_ioio,
1395        ioio_finished, check_ioio },
1396     { "vmrun intercept check", default_supported, prepare_no_vmrun_int,
1397       null_test, default_finished, check_no_vmrun_int },
1398     { "cr3 read intercept", default_supported, prepare_cr3_intercept,
1399       test_cr3_intercept, default_finished, check_cr3_intercept },
1400     { "cr3 read nointercept", default_supported, default_prepare,
1401       test_cr3_intercept, default_finished, check_cr3_nointercept },
1402     { "cr3 read intercept emulate", smp_supported,
1403       prepare_cr3_intercept_bypass, test_cr3_intercept_bypass,
1404       default_finished, check_cr3_intercept },
1405     { "dr intercept check", default_supported, prepare_dr_intercept,
1406       test_dr_intercept, dr_intercept_finished, check_dr_intercept },
1407     { "next_rip", next_rip_supported, prepare_next_rip, test_next_rip,
1408       default_finished, check_next_rip },
1409     { "msr intercept check", default_supported, prepare_msr_intercept,
1410        test_msr_intercept, msr_intercept_finished, check_msr_intercept },
1411     { "mode_switch", default_supported, prepare_mode_switch, test_mode_switch,
1412        mode_switch_finished, check_mode_switch },
1413     { "asid_zero", default_supported, prepare_asid_zero, test_asid_zero,
1414        default_finished, check_asid_zero },
1415     { "sel_cr0_bug", default_supported, sel_cr0_bug_prepare, sel_cr0_bug_test,
1416        sel_cr0_bug_finished, sel_cr0_bug_check },
1417     { "npt_nx", npt_supported, npt_nx_prepare, null_test,
1418 	    default_finished, npt_nx_check },
1419     { "npt_us", npt_supported, npt_us_prepare, npt_us_test,
1420 	    default_finished, npt_us_check },
1421     { "npt_rsvd", npt_supported, npt_rsvd_prepare, null_test,
1422 	    default_finished, npt_rsvd_check },
1423     { "npt_rw", npt_supported, npt_rw_prepare, npt_rw_test,
1424 	    default_finished, npt_rw_check },
1425     { "npt_rsvd_pfwalk", npt_supported, npt_rsvd_pfwalk_prepare, null_test,
1426 	    default_finished, npt_rsvd_pfwalk_check },
1427     { "npt_rw_pfwalk", npt_supported, npt_rw_pfwalk_prepare, null_test,
1428 	    default_finished, npt_rw_pfwalk_check },
1429     { "npt_l1mmio", npt_supported, npt_l1mmio_prepare, npt_l1mmio_test,
1430 	    default_finished, npt_l1mmio_check },
1431     { "npt_rw_l1mmio", npt_supported, npt_rw_l1mmio_prepare, npt_rw_l1mmio_test,
1432 	    default_finished, npt_rw_l1mmio_check },
1433     { "tsc_adjust", default_supported, tsc_adjust_prepare, tsc_adjust_test,
1434        default_finished, tsc_adjust_check },
1435     { "latency_run_exit", default_supported, latency_prepare, latency_test,
1436       latency_finished, latency_check },
1437     { "latency_svm_insn", default_supported, lat_svm_insn_prepare, null_test,
1438       lat_svm_insn_finished, lat_svm_insn_check },
1439     { "pending_event", default_supported, pending_event_prepare,
1440       pending_event_test, pending_event_finished, pending_event_check },
1441 };
1442 
1443 int main(int ac, char **av)
1444 {
1445     int i, nr;
1446     struct vmcb *vmcb;
1447 
1448     setup_vm();
1449     smp_init();
1450 
1451     if (!this_cpu_has(X86_FEATURE_SVM)) {
1452         printf("SVM not availble\n");
1453         return report_summary();
1454     }
1455 
1456     setup_svm();
1457 
1458     vmcb = alloc_page();
1459 
1460     nr = ARRAY_SIZE(tests);
1461     for (i = 0; i < nr; ++i) {
1462         if (!tests[i].supported())
1463             continue;
1464         test_run(&tests[i], vmcb);
1465     }
1466 
1467     return report_summary();
1468 }
1469