xref: /kvm-unit-tests/x86/svm.c (revision bc0c0f49f6b4501b758a3ff6e1ed43a7aa7e53d5)
17d36db35SAvi Kivity #include "svm.h"
27d36db35SAvi Kivity #include "libcflat.h"
37d36db35SAvi Kivity #include "processor.h"
4b46094b4SPaolo Bonzini #include "desc.h"
57d36db35SAvi Kivity #include "msr.h"
67d36db35SAvi Kivity #include "vm.h"
77d36db35SAvi Kivity #include "smp.h"
87d36db35SAvi Kivity #include "types.h"
95aca024eSPaolo Bonzini #include "alloc_page.h"
107d36db35SAvi Kivity 
111535bf0fSJoerg Roedel /* for the nested page table*/
121535bf0fSJoerg Roedel u64 *pml4e;
131535bf0fSJoerg Roedel u64 *pdpe;
141535bf0fSJoerg Roedel u64 *pde[4];
151535bf0fSJoerg Roedel u64 *pte[2048];
16c0a4e715SPaolo Bonzini void *scratch_page;
171535bf0fSJoerg Roedel 
1821c23154SJoerg Roedel #define LATENCY_RUNS 1000000
1921c23154SJoerg Roedel 
2021c23154SJoerg Roedel u64 tsc_start;
2121c23154SJoerg Roedel u64 tsc_end;
2221c23154SJoerg Roedel 
2321c23154SJoerg Roedel u64 vmrun_sum, vmexit_sum;
24ef101219SRoedel, Joerg u64 vmsave_sum, vmload_sum;
25ef101219SRoedel, Joerg u64 stgi_sum, clgi_sum;
2621c23154SJoerg Roedel u64 latvmrun_max;
2721c23154SJoerg Roedel u64 latvmrun_min;
2821c23154SJoerg Roedel u64 latvmexit_max;
2921c23154SJoerg Roedel u64 latvmexit_min;
30ef101219SRoedel, Joerg u64 latvmload_max;
31ef101219SRoedel, Joerg u64 latvmload_min;
32ef101219SRoedel, Joerg u64 latvmsave_max;
33ef101219SRoedel, Joerg u64 latvmsave_min;
34ef101219SRoedel, Joerg u64 latstgi_max;
35ef101219SRoedel, Joerg u64 latstgi_min;
36ef101219SRoedel, Joerg u64 latclgi_max;
37ef101219SRoedel, Joerg u64 latclgi_min;
3821c23154SJoerg Roedel u64 runs;
3921c23154SJoerg Roedel 
403d46571bSPaolo Bonzini u8 *io_bitmap;
413d46571bSPaolo Bonzini u8 io_bitmap_area[16384];
423d46571bSPaolo Bonzini 
431535bf0fSJoerg Roedel static bool npt_supported(void)
441535bf0fSJoerg Roedel {
451535bf0fSJoerg Roedel    return cpuid(0x8000000A).d & 1;
461535bf0fSJoerg Roedel }
471535bf0fSJoerg Roedel 
487d36db35SAvi Kivity static void setup_svm(void)
497d36db35SAvi Kivity {
507d36db35SAvi Kivity     void *hsave = alloc_page();
511535bf0fSJoerg Roedel     u64 *page, address;
521535bf0fSJoerg Roedel     int i,j;
537d36db35SAvi Kivity 
547d36db35SAvi Kivity     wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
557d36db35SAvi Kivity     wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME);
568594b943SJoerg Roedel     wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NX);
571535bf0fSJoerg Roedel 
58ea975120SJoerg Roedel     scratch_page = alloc_page();
59ea975120SJoerg Roedel 
603d46571bSPaolo Bonzini     io_bitmap = (void *) (((ulong)io_bitmap_area + 4095) & ~4095);
613d46571bSPaolo Bonzini 
621535bf0fSJoerg Roedel     if (!npt_supported())
631535bf0fSJoerg Roedel         return;
641535bf0fSJoerg Roedel 
651535bf0fSJoerg Roedel     printf("NPT detected - running all tests with NPT enabled\n");
661535bf0fSJoerg Roedel 
671535bf0fSJoerg Roedel     /*
681535bf0fSJoerg Roedel      * Nested paging supported - Build a nested page table
691535bf0fSJoerg Roedel      * Build the page-table bottom-up and map everything with 4k pages
701535bf0fSJoerg Roedel      * to get enough granularity for the NPT unit-tests.
711535bf0fSJoerg Roedel      */
721535bf0fSJoerg Roedel 
731535bf0fSJoerg Roedel     address = 0;
741535bf0fSJoerg Roedel 
751535bf0fSJoerg Roedel     /* PTE level */
761535bf0fSJoerg Roedel     for (i = 0; i < 2048; ++i) {
771535bf0fSJoerg Roedel         page = alloc_page();
781535bf0fSJoerg Roedel 
791535bf0fSJoerg Roedel         for (j = 0; j < 512; ++j, address += 4096)
801535bf0fSJoerg Roedel             page[j] = address | 0x067ULL;
811535bf0fSJoerg Roedel 
821535bf0fSJoerg Roedel         pte[i] = page;
831535bf0fSJoerg Roedel     }
841535bf0fSJoerg Roedel 
851535bf0fSJoerg Roedel     /* PDE level */
861535bf0fSJoerg Roedel     for (i = 0; i < 4; ++i) {
871535bf0fSJoerg Roedel         page = alloc_page();
881535bf0fSJoerg Roedel 
891535bf0fSJoerg Roedel         for (j = 0; j < 512; ++j)
9093b05099SPaolo Bonzini             page[j] = (u64)pte[(i * 512) + j] | 0x027ULL;
911535bf0fSJoerg Roedel 
921535bf0fSJoerg Roedel         pde[i] = page;
931535bf0fSJoerg Roedel     }
941535bf0fSJoerg Roedel 
951535bf0fSJoerg Roedel     /* PDPe level */
961535bf0fSJoerg Roedel     pdpe   = alloc_page();
971535bf0fSJoerg Roedel     for (i = 0; i < 4; ++i)
981535bf0fSJoerg Roedel        pdpe[i] = ((u64)(pde[i])) | 0x27;
991535bf0fSJoerg Roedel 
1001535bf0fSJoerg Roedel     /* PML4e level */
1011535bf0fSJoerg Roedel     pml4e    = alloc_page();
1021535bf0fSJoerg Roedel     pml4e[0] = ((u64)pdpe) | 0x27;
1037d36db35SAvi Kivity }
1047d36db35SAvi Kivity 
105f6a2ca45SPaolo Bonzini static u64 *npt_get_pde(u64 address)
106f6a2ca45SPaolo Bonzini {
107f6a2ca45SPaolo Bonzini     int i1, i2;
108f6a2ca45SPaolo Bonzini 
109f6a2ca45SPaolo Bonzini     address >>= 21;
110f6a2ca45SPaolo Bonzini     i1 = (address >> 9) & 0x3;
111f6a2ca45SPaolo Bonzini     i2 = address & 0x1ff;
112f6a2ca45SPaolo Bonzini 
113f6a2ca45SPaolo Bonzini     return &pde[i1][i2];
114f6a2ca45SPaolo Bonzini }
115f6a2ca45SPaolo Bonzini 
116726a1dd7SPaolo Bonzini static u64 *npt_get_pte(u64 address)
1178594b943SJoerg Roedel {
1188594b943SJoerg Roedel     int i1, i2;
1198594b943SJoerg Roedel 
1208594b943SJoerg Roedel     address >>= 12;
1218594b943SJoerg Roedel     i1 = (address >> 9) & 0x7ff;
1228594b943SJoerg Roedel     i2 = address & 0x1ff;
1238594b943SJoerg Roedel 
1248594b943SJoerg Roedel     return &pte[i1][i2];
1258594b943SJoerg Roedel }
1268594b943SJoerg Roedel 
1277d36db35SAvi Kivity static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector,
1287d36db35SAvi Kivity                          u64 base, u32 limit, u32 attr)
1297d36db35SAvi Kivity {
1307d36db35SAvi Kivity     seg->selector = selector;
1317d36db35SAvi Kivity     seg->attrib = attr;
1327d36db35SAvi Kivity     seg->limit = limit;
1337d36db35SAvi Kivity     seg->base = base;
1347d36db35SAvi Kivity }
1357d36db35SAvi Kivity 
1367d36db35SAvi Kivity static void vmcb_ident(struct vmcb *vmcb)
1377d36db35SAvi Kivity {
1387d36db35SAvi Kivity     u64 vmcb_phys = virt_to_phys(vmcb);
1397d36db35SAvi Kivity     struct vmcb_save_area *save = &vmcb->save;
1407d36db35SAvi Kivity     struct vmcb_control_area *ctrl = &vmcb->control;
1417d36db35SAvi Kivity     u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
1427d36db35SAvi Kivity         | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK;
1437d36db35SAvi Kivity     u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
1447d36db35SAvi Kivity         | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK;
1457d36db35SAvi Kivity     struct descriptor_table_ptr desc_table_ptr;
1467d36db35SAvi Kivity 
1477d36db35SAvi Kivity     memset(vmcb, 0, sizeof(*vmcb));
1487d36db35SAvi Kivity     asm volatile ("vmsave" : : "a"(vmcb_phys) : "memory");
1497d36db35SAvi Kivity     vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr);
1507d36db35SAvi Kivity     vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr);
1517d36db35SAvi Kivity     vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr);
1527d36db35SAvi Kivity     vmcb_set_seg(&save->ds, read_ds(), 0, -1U, data_seg_attr);
1537d36db35SAvi Kivity     sgdt(&desc_table_ptr);
1547d36db35SAvi Kivity     vmcb_set_seg(&save->gdtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
1557d36db35SAvi Kivity     sidt(&desc_table_ptr);
1567d36db35SAvi Kivity     vmcb_set_seg(&save->idtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
1577d36db35SAvi Kivity     ctrl->asid = 1;
1587d36db35SAvi Kivity     save->cpl = 0;
1597d36db35SAvi Kivity     save->efer = rdmsr(MSR_EFER);
1607d36db35SAvi Kivity     save->cr4 = read_cr4();
1617d36db35SAvi Kivity     save->cr3 = read_cr3();
1627d36db35SAvi Kivity     save->cr0 = read_cr0();
1637d36db35SAvi Kivity     save->dr7 = read_dr7();
1647d36db35SAvi Kivity     save->dr6 = read_dr6();
1657d36db35SAvi Kivity     save->cr2 = read_cr2();
1667d36db35SAvi Kivity     save->g_pat = rdmsr(MSR_IA32_CR_PAT);
1677d36db35SAvi Kivity     save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
1687d36db35SAvi Kivity     ctrl->intercept = (1ULL << INTERCEPT_VMRUN) | (1ULL << INTERCEPT_VMMCALL);
1693d46571bSPaolo Bonzini     ctrl->iopm_base_pa = virt_to_phys(io_bitmap);
1701535bf0fSJoerg Roedel 
1711535bf0fSJoerg Roedel     if (npt_supported()) {
1721535bf0fSJoerg Roedel         ctrl->nested_ctl = 1;
1731535bf0fSJoerg Roedel         ctrl->nested_cr3 = (u64)pml4e;
1741535bf0fSJoerg Roedel     }
1757d36db35SAvi Kivity }
1767d36db35SAvi Kivity 
1777d36db35SAvi Kivity struct test {
1787d36db35SAvi Kivity     const char *name;
1797d36db35SAvi Kivity     bool (*supported)(void);
1807d36db35SAvi Kivity     void (*prepare)(struct test *test);
1817d36db35SAvi Kivity     void (*guest_func)(struct test *test);
1827d36db35SAvi Kivity     bool (*finished)(struct test *test);
1837d36db35SAvi Kivity     bool (*succeeded)(struct test *test);
1847d36db35SAvi Kivity     struct vmcb *vmcb;
1857d36db35SAvi Kivity     int exits;
1867d36db35SAvi Kivity     ulong scratch;
1877d36db35SAvi Kivity };
1887d36db35SAvi Kivity 
189e0b6541cSPaolo Bonzini static inline void vmmcall(void)
190e0b6541cSPaolo Bonzini {
191e0b6541cSPaolo Bonzini     asm volatile ("vmmcall" : : : "memory");
192e0b6541cSPaolo Bonzini }
193e0b6541cSPaolo Bonzini 
1947d36db35SAvi Kivity static void test_thunk(struct test *test)
1957d36db35SAvi Kivity {
1967d36db35SAvi Kivity     test->guest_func(test);
197e0b6541cSPaolo Bonzini     vmmcall();
1987d36db35SAvi Kivity }
1997d36db35SAvi Kivity 
200a43baea0SPaolo Bonzini struct regs {
201a43baea0SPaolo Bonzini         u64 rax;
202*bc0c0f49STambe, William         u64 rbx;
203a43baea0SPaolo Bonzini         u64 rcx;
204a43baea0SPaolo Bonzini         u64 rdx;
205a43baea0SPaolo Bonzini         u64 cr2;
206a43baea0SPaolo Bonzini         u64 rbp;
207a43baea0SPaolo Bonzini         u64 rsi;
208a43baea0SPaolo Bonzini         u64 rdi;
209a43baea0SPaolo Bonzini         u64 r8;
210a43baea0SPaolo Bonzini         u64 r9;
211a43baea0SPaolo Bonzini         u64 r10;
212a43baea0SPaolo Bonzini         u64 r11;
213a43baea0SPaolo Bonzini         u64 r12;
214a43baea0SPaolo Bonzini         u64 r13;
215a43baea0SPaolo Bonzini         u64 r14;
216a43baea0SPaolo Bonzini         u64 r15;
217a43baea0SPaolo Bonzini         u64 rflags;
218a43baea0SPaolo Bonzini };
219a43baea0SPaolo Bonzini 
220a43baea0SPaolo Bonzini struct regs regs;
221a43baea0SPaolo Bonzini 
222a43baea0SPaolo Bonzini // rax handled specially below
223a43baea0SPaolo Bonzini 
224a43baea0SPaolo Bonzini #define SAVE_GPR_C                              \
225a43baea0SPaolo Bonzini         "xchg %%rbx, regs+0x8\n\t"              \
226a43baea0SPaolo Bonzini         "xchg %%rcx, regs+0x10\n\t"             \
227a43baea0SPaolo Bonzini         "xchg %%rdx, regs+0x18\n\t"             \
228a43baea0SPaolo Bonzini         "xchg %%rbp, regs+0x28\n\t"             \
229a43baea0SPaolo Bonzini         "xchg %%rsi, regs+0x30\n\t"             \
230a43baea0SPaolo Bonzini         "xchg %%rdi, regs+0x38\n\t"             \
231a43baea0SPaolo Bonzini         "xchg %%r8, regs+0x40\n\t"              \
232a43baea0SPaolo Bonzini         "xchg %%r9, regs+0x48\n\t"              \
233a43baea0SPaolo Bonzini         "xchg %%r10, regs+0x50\n\t"             \
234a43baea0SPaolo Bonzini         "xchg %%r11, regs+0x58\n\t"             \
235a43baea0SPaolo Bonzini         "xchg %%r12, regs+0x60\n\t"             \
236a43baea0SPaolo Bonzini         "xchg %%r13, regs+0x68\n\t"             \
237a43baea0SPaolo Bonzini         "xchg %%r14, regs+0x70\n\t"             \
238a43baea0SPaolo Bonzini         "xchg %%r15, regs+0x78\n\t"
239a43baea0SPaolo Bonzini 
240a43baea0SPaolo Bonzini #define LOAD_GPR_C      SAVE_GPR_C
241a43baea0SPaolo Bonzini 
242a43ed2acSAndrew Jones static void test_run(struct test *test, struct vmcb *vmcb)
2437d36db35SAvi Kivity {
2447d36db35SAvi Kivity     u64 vmcb_phys = virt_to_phys(vmcb);
2457d36db35SAvi Kivity     u64 guest_stack[10000];
2467d36db35SAvi Kivity 
2477d36db35SAvi Kivity     test->vmcb = vmcb;
2487d36db35SAvi Kivity     test->prepare(test);
2497d36db35SAvi Kivity     vmcb->save.rip = (ulong)test_thunk;
2507d36db35SAvi Kivity     vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack));
251a43baea0SPaolo Bonzini     regs.rdi = (ulong)test;
2527d36db35SAvi Kivity     do {
25321c23154SJoerg Roedel         tsc_start = rdtsc();
2547d36db35SAvi Kivity         asm volatile (
2557d36db35SAvi Kivity             "clgi \n\t"
2567d36db35SAvi Kivity             "vmload \n\t"
257a43baea0SPaolo Bonzini             "mov regs+0x80, %%r15\n\t"  // rflags
258a43baea0SPaolo Bonzini             "mov %%r15, 0x170(%0)\n\t"
259a43baea0SPaolo Bonzini             "mov regs, %%r15\n\t"       // rax
260a43baea0SPaolo Bonzini             "mov %%r15, 0x1f8(%0)\n\t"
261a43baea0SPaolo Bonzini             LOAD_GPR_C
2627d36db35SAvi Kivity             "vmrun \n\t"
263a43baea0SPaolo Bonzini             SAVE_GPR_C
264a43baea0SPaolo Bonzini             "mov 0x170(%0), %%r15\n\t"  // rflags
265a43baea0SPaolo Bonzini             "mov %%r15, regs+0x80\n\t"
266a43baea0SPaolo Bonzini             "mov 0x1f8(%0), %%r15\n\t"  // rax
267a43baea0SPaolo Bonzini             "mov %%r15, regs\n\t"
2687d36db35SAvi Kivity             "vmsave \n\t"
2697d36db35SAvi Kivity             "stgi"
270a43baea0SPaolo Bonzini             : : "a"(vmcb_phys)
2717d36db35SAvi Kivity             : "rbx", "rcx", "rdx", "rsi",
2727d36db35SAvi Kivity               "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15",
2737d36db35SAvi Kivity               "memory");
27421c23154SJoerg Roedel 	tsc_end = rdtsc();
2757d36db35SAvi Kivity         ++test->exits;
2767d36db35SAvi Kivity     } while (!test->finished(test));
2777d36db35SAvi Kivity 
278a43ed2acSAndrew Jones     report("%s", test->succeeded(test), test->name);
2797d36db35SAvi Kivity }
2807d36db35SAvi Kivity 
281095274b4SPrasad Joshi static bool smp_supported(void)
282095274b4SPrasad Joshi {
283095274b4SPrasad Joshi 	return cpu_count() > 1;
284095274b4SPrasad Joshi }
285095274b4SPrasad Joshi 
2867d36db35SAvi Kivity static bool default_supported(void)
2877d36db35SAvi Kivity {
2887d36db35SAvi Kivity     return true;
2897d36db35SAvi Kivity }
2907d36db35SAvi Kivity 
2917d36db35SAvi Kivity static void default_prepare(struct test *test)
2927d36db35SAvi Kivity {
2937d36db35SAvi Kivity     vmcb_ident(test->vmcb);
2947d36db35SAvi Kivity     cli();
2957d36db35SAvi Kivity }
2967d36db35SAvi Kivity 
2977d36db35SAvi Kivity static bool default_finished(struct test *test)
2987d36db35SAvi Kivity {
2997d36db35SAvi Kivity     return true; /* one vmexit */
3007d36db35SAvi Kivity }
3017d36db35SAvi Kivity 
3027d36db35SAvi Kivity static void null_test(struct test *test)
3037d36db35SAvi Kivity {
3047d36db35SAvi Kivity }
3057d36db35SAvi Kivity 
3067d36db35SAvi Kivity static bool null_check(struct test *test)
3077d36db35SAvi Kivity {
3087d36db35SAvi Kivity     return test->vmcb->control.exit_code == SVM_EXIT_VMMCALL;
3097d36db35SAvi Kivity }
3107d36db35SAvi Kivity 
3117d36db35SAvi Kivity static void prepare_no_vmrun_int(struct test *test)
3127d36db35SAvi Kivity {
3137d36db35SAvi Kivity     test->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMRUN);
3147d36db35SAvi Kivity }
3157d36db35SAvi Kivity 
3167d36db35SAvi Kivity static bool check_no_vmrun_int(struct test *test)
3177d36db35SAvi Kivity {
3187d36db35SAvi Kivity     return test->vmcb->control.exit_code == SVM_EXIT_ERR;
3197d36db35SAvi Kivity }
3207d36db35SAvi Kivity 
3217d36db35SAvi Kivity static void test_vmrun(struct test *test)
3227d36db35SAvi Kivity {
3237d36db35SAvi Kivity     asm volatile ("vmrun" : : "a"(virt_to_phys(test->vmcb)));
3247d36db35SAvi Kivity }
3257d36db35SAvi Kivity 
3267d36db35SAvi Kivity static bool check_vmrun(struct test *test)
3277d36db35SAvi Kivity {
3287d36db35SAvi Kivity     return test->vmcb->control.exit_code == SVM_EXIT_VMRUN;
3297d36db35SAvi Kivity }
3307d36db35SAvi Kivity 
3317d36db35SAvi Kivity static void prepare_cr3_intercept(struct test *test)
3327d36db35SAvi Kivity {
3337d36db35SAvi Kivity     default_prepare(test);
3347d36db35SAvi Kivity     test->vmcb->control.intercept_cr_read |= 1 << 3;
3357d36db35SAvi Kivity }
3367d36db35SAvi Kivity 
3377d36db35SAvi Kivity static void test_cr3_intercept(struct test *test)
3387d36db35SAvi Kivity {
3397d36db35SAvi Kivity     asm volatile ("mov %%cr3, %0" : "=r"(test->scratch) : : "memory");
3407d36db35SAvi Kivity }
3417d36db35SAvi Kivity 
3427d36db35SAvi Kivity static bool check_cr3_intercept(struct test *test)
3437d36db35SAvi Kivity {
3447d36db35SAvi Kivity     return test->vmcb->control.exit_code == SVM_EXIT_READ_CR3;
3457d36db35SAvi Kivity }
3467d36db35SAvi Kivity 
3477d36db35SAvi Kivity static bool check_cr3_nointercept(struct test *test)
3487d36db35SAvi Kivity {
3497d36db35SAvi Kivity     return null_check(test) && test->scratch == read_cr3();
3507d36db35SAvi Kivity }
3517d36db35SAvi Kivity 
3527d36db35SAvi Kivity static void corrupt_cr3_intercept_bypass(void *_test)
3537d36db35SAvi Kivity {
3547d36db35SAvi Kivity     struct test *test = _test;
3557d36db35SAvi Kivity     extern volatile u32 mmio_insn;
3567d36db35SAvi Kivity 
3577d36db35SAvi Kivity     while (!__sync_bool_compare_and_swap(&test->scratch, 1, 2))
3587d36db35SAvi Kivity         pause();
3597d36db35SAvi Kivity     pause();
3607d36db35SAvi Kivity     pause();
3617d36db35SAvi Kivity     pause();
3627d36db35SAvi Kivity     mmio_insn = 0x90d8200f;  // mov %cr3, %rax; nop
3637d36db35SAvi Kivity }
3647d36db35SAvi Kivity 
3657d36db35SAvi Kivity static void prepare_cr3_intercept_bypass(struct test *test)
3667d36db35SAvi Kivity {
3677d36db35SAvi Kivity     default_prepare(test);
3687d36db35SAvi Kivity     test->vmcb->control.intercept_cr_read |= 1 << 3;
3697d36db35SAvi Kivity     on_cpu_async(1, corrupt_cr3_intercept_bypass, test);
3707d36db35SAvi Kivity }
3717d36db35SAvi Kivity 
3727d36db35SAvi Kivity static void test_cr3_intercept_bypass(struct test *test)
3737d36db35SAvi Kivity {
3747d36db35SAvi Kivity     ulong a = 0xa0000;
3757d36db35SAvi Kivity 
3767d36db35SAvi Kivity     test->scratch = 1;
3777d36db35SAvi Kivity     while (test->scratch != 2)
3787d36db35SAvi Kivity         barrier();
3797d36db35SAvi Kivity 
3807d36db35SAvi Kivity     asm volatile ("mmio_insn: mov %0, (%0); nop"
3817d36db35SAvi Kivity                   : "+a"(a) : : "memory");
3827d36db35SAvi Kivity     test->scratch = a;
3837d36db35SAvi Kivity }
3847d36db35SAvi Kivity 
3857d36db35SAvi Kivity static bool next_rip_supported(void)
3867d36db35SAvi Kivity {
3877d36db35SAvi Kivity     return (cpuid(SVM_CPUID_FUNC).d & 8);
3887d36db35SAvi Kivity }
3897d36db35SAvi Kivity 
3907d36db35SAvi Kivity static void prepare_next_rip(struct test *test)
3917d36db35SAvi Kivity {
3927d36db35SAvi Kivity     test->vmcb->control.intercept |= (1ULL << INTERCEPT_RDTSC);
3937d36db35SAvi Kivity }
3947d36db35SAvi Kivity 
3957d36db35SAvi Kivity 
3967d36db35SAvi Kivity static void test_next_rip(struct test *test)
3977d36db35SAvi Kivity {
3987d36db35SAvi Kivity     asm volatile ("rdtsc\n\t"
3997d36db35SAvi Kivity                   ".globl exp_next_rip\n\t"
4007d36db35SAvi Kivity                   "exp_next_rip:\n\t" ::: "eax", "edx");
4017d36db35SAvi Kivity }
4027d36db35SAvi Kivity 
4037d36db35SAvi Kivity static bool check_next_rip(struct test *test)
4047d36db35SAvi Kivity {
4057d36db35SAvi Kivity     extern char exp_next_rip;
4067d36db35SAvi Kivity     unsigned long address = (unsigned long)&exp_next_rip;
4077d36db35SAvi Kivity 
4087d36db35SAvi Kivity     return address == test->vmcb->control.next_rip;
4097d36db35SAvi Kivity }
4107d36db35SAvi Kivity 
4117d36db35SAvi Kivity static void prepare_mode_switch(struct test *test)
4127d36db35SAvi Kivity {
4137d36db35SAvi Kivity     test->vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR)
4147d36db35SAvi Kivity                                              |  (1ULL << UD_VECTOR)
4157d36db35SAvi Kivity                                              |  (1ULL << DF_VECTOR)
4167d36db35SAvi Kivity                                              |  (1ULL << PF_VECTOR);
4177d36db35SAvi Kivity     test->scratch = 0;
4187d36db35SAvi Kivity }
4197d36db35SAvi Kivity 
4207d36db35SAvi Kivity static void test_mode_switch(struct test *test)
4217d36db35SAvi Kivity {
4227d36db35SAvi Kivity     asm volatile("	cli\n"
4237d36db35SAvi Kivity 		 "	ljmp *1f\n" /* jump to 32-bit code segment */
4247d36db35SAvi Kivity 		 "1:\n"
4257d36db35SAvi Kivity 		 "	.long 2f\n"
426b46094b4SPaolo Bonzini 		 "	.long " xstr(KERNEL_CS32) "\n"
4277d36db35SAvi Kivity 		 ".code32\n"
4287d36db35SAvi Kivity 		 "2:\n"
4297d36db35SAvi Kivity 		 "	movl %%cr0, %%eax\n"
4307d36db35SAvi Kivity 		 "	btcl  $31, %%eax\n" /* clear PG */
4317d36db35SAvi Kivity 		 "	movl %%eax, %%cr0\n"
4327d36db35SAvi Kivity 		 "	movl $0xc0000080, %%ecx\n" /* EFER */
4337d36db35SAvi Kivity 		 "	rdmsr\n"
4347d36db35SAvi Kivity 		 "	btcl $8, %%eax\n" /* clear LME */
4357d36db35SAvi Kivity 		 "	wrmsr\n"
4367d36db35SAvi Kivity 		 "	movl %%cr4, %%eax\n"
4377d36db35SAvi Kivity 		 "	btcl $5, %%eax\n" /* clear PAE */
4387d36db35SAvi Kivity 		 "	movl %%eax, %%cr4\n"
439b46094b4SPaolo Bonzini 		 "	movw %[ds16], %%ax\n"
4407d36db35SAvi Kivity 		 "	movw %%ax, %%ds\n"
441b46094b4SPaolo Bonzini 		 "	ljmpl %[cs16], $3f\n" /* jump to 16 bit protected-mode */
4427d36db35SAvi Kivity 		 ".code16\n"
4437d36db35SAvi Kivity 		 "3:\n"
4447d36db35SAvi Kivity 		 "	movl %%cr0, %%eax\n"
4457d36db35SAvi Kivity 		 "	btcl $0, %%eax\n" /* clear PE  */
4467d36db35SAvi Kivity 		 "	movl %%eax, %%cr0\n"
4477d36db35SAvi Kivity 		 "	ljmpl $0, $4f\n"   /* jump to real-mode */
4487d36db35SAvi Kivity 		 "4:\n"
4497d36db35SAvi Kivity 		 "	vmmcall\n"
4507d36db35SAvi Kivity 		 "	movl %%cr0, %%eax\n"
4517d36db35SAvi Kivity 		 "	btsl $0, %%eax\n" /* set PE  */
4527d36db35SAvi Kivity 		 "	movl %%eax, %%cr0\n"
453b46094b4SPaolo Bonzini 		 "	ljmpl %[cs32], $5f\n" /* back to protected mode */
4547d36db35SAvi Kivity 		 ".code32\n"
4557d36db35SAvi Kivity 		 "5:\n"
4567d36db35SAvi Kivity 		 "	movl %%cr4, %%eax\n"
4577d36db35SAvi Kivity 		 "	btsl $5, %%eax\n" /* set PAE */
4587d36db35SAvi Kivity 		 "	movl %%eax, %%cr4\n"
4597d36db35SAvi Kivity 		 "	movl $0xc0000080, %%ecx\n" /* EFER */
4607d36db35SAvi Kivity 		 "	rdmsr\n"
4617d36db35SAvi Kivity 		 "	btsl $8, %%eax\n" /* set LME */
4627d36db35SAvi Kivity 		 "	wrmsr\n"
4637d36db35SAvi Kivity 		 "	movl %%cr0, %%eax\n"
4647d36db35SAvi Kivity 		 "	btsl  $31, %%eax\n" /* set PG */
4657d36db35SAvi Kivity 		 "	movl %%eax, %%cr0\n"
466b46094b4SPaolo Bonzini 		 "	ljmpl %[cs64], $6f\n"    /* back to long mode */
4677d36db35SAvi Kivity 		 ".code64\n\t"
4687d36db35SAvi Kivity 		 "6:\n"
4697d36db35SAvi Kivity 		 "	vmmcall\n"
470b46094b4SPaolo Bonzini 		 :: [cs16] "i"(KERNEL_CS16), [ds16] "i"(KERNEL_DS16),
471b46094b4SPaolo Bonzini 		    [cs32] "i"(KERNEL_CS32), [cs64] "i"(KERNEL_CS64)
472b46094b4SPaolo Bonzini 		 : "rax", "rbx", "rcx", "rdx", "memory");
4737d36db35SAvi Kivity }
4747d36db35SAvi Kivity 
4757d36db35SAvi Kivity static bool mode_switch_finished(struct test *test)
4767d36db35SAvi Kivity {
4777d36db35SAvi Kivity     u64 cr0, cr4, efer;
4787d36db35SAvi Kivity 
4797d36db35SAvi Kivity     cr0  = test->vmcb->save.cr0;
4807d36db35SAvi Kivity     cr4  = test->vmcb->save.cr4;
4817d36db35SAvi Kivity     efer = test->vmcb->save.efer;
4827d36db35SAvi Kivity 
4837d36db35SAvi Kivity     /* Only expect VMMCALL intercepts */
4847d36db35SAvi Kivity     if (test->vmcb->control.exit_code != SVM_EXIT_VMMCALL)
4857d36db35SAvi Kivity 	    return true;
4867d36db35SAvi Kivity 
4877d36db35SAvi Kivity     /* Jump over VMMCALL instruction */
4887d36db35SAvi Kivity     test->vmcb->save.rip += 3;
4897d36db35SAvi Kivity 
4907d36db35SAvi Kivity     /* Do sanity checks */
4917d36db35SAvi Kivity     switch (test->scratch) {
4927d36db35SAvi Kivity     case 0:
4937d36db35SAvi Kivity         /* Test should be in real mode now - check for this */
4947d36db35SAvi Kivity         if ((cr0  & 0x80000001) || /* CR0.PG, CR0.PE */
4957d36db35SAvi Kivity             (cr4  & 0x00000020) || /* CR4.PAE */
4967d36db35SAvi Kivity             (efer & 0x00000500))   /* EFER.LMA, EFER.LME */
4977d36db35SAvi Kivity                 return true;
4987d36db35SAvi Kivity         break;
4997d36db35SAvi Kivity     case 2:
5007d36db35SAvi Kivity         /* Test should be back in long-mode now - check for this */
5017d36db35SAvi Kivity         if (((cr0  & 0x80000001) != 0x80000001) || /* CR0.PG, CR0.PE */
5027d36db35SAvi Kivity             ((cr4  & 0x00000020) != 0x00000020) || /* CR4.PAE */
5037d36db35SAvi Kivity             ((efer & 0x00000500) != 0x00000500))   /* EFER.LMA, EFER.LME */
5047d36db35SAvi Kivity 		    return true;
5057d36db35SAvi Kivity 	break;
5067d36db35SAvi Kivity     }
5077d36db35SAvi Kivity 
5087d36db35SAvi Kivity     /* one step forward */
5097d36db35SAvi Kivity     test->scratch += 1;
5107d36db35SAvi Kivity 
5117d36db35SAvi Kivity     return test->scratch == 2;
5127d36db35SAvi Kivity }
5137d36db35SAvi Kivity 
5147d36db35SAvi Kivity static bool check_mode_switch(struct test *test)
5157d36db35SAvi Kivity {
5167d36db35SAvi Kivity 	return test->scratch == 2;
5177d36db35SAvi Kivity }
5187d36db35SAvi Kivity 
519bcd9774aSPaolo Bonzini static void prepare_ioio(struct test *test)
520bcd9774aSPaolo Bonzini {
521bcd9774aSPaolo Bonzini     test->vmcb->control.intercept |= (1ULL << INTERCEPT_IOIO_PROT);
522bcd9774aSPaolo Bonzini     test->scratch = 0;
523bcd9774aSPaolo Bonzini     memset(io_bitmap, 0, 8192);
524bcd9774aSPaolo Bonzini     io_bitmap[8192] = 0xFF;
525bcd9774aSPaolo Bonzini }
526bcd9774aSPaolo Bonzini 
527db4898e8SThomas Huth static int get_test_stage(struct test *test)
528bcd9774aSPaolo Bonzini {
529bcd9774aSPaolo Bonzini     barrier();
530bcd9774aSPaolo Bonzini     return test->scratch;
531bcd9774aSPaolo Bonzini }
532bcd9774aSPaolo Bonzini 
533db4898e8SThomas Huth static void inc_test_stage(struct test *test)
534bcd9774aSPaolo Bonzini {
535bcd9774aSPaolo Bonzini     barrier();
536bcd9774aSPaolo Bonzini     test->scratch++;
537bcd9774aSPaolo Bonzini     barrier();
538bcd9774aSPaolo Bonzini }
539bcd9774aSPaolo Bonzini 
540bcd9774aSPaolo Bonzini static void test_ioio(struct test *test)
541bcd9774aSPaolo Bonzini {
542bcd9774aSPaolo Bonzini     // stage 0, test IO pass
543bcd9774aSPaolo Bonzini     inb(0x5000);
544bcd9774aSPaolo Bonzini     outb(0x0, 0x5000);
545bcd9774aSPaolo Bonzini     if (get_test_stage(test) != 0)
546bcd9774aSPaolo Bonzini         goto fail;
547bcd9774aSPaolo Bonzini 
548bcd9774aSPaolo Bonzini     // test IO width, in/out
549bcd9774aSPaolo Bonzini     io_bitmap[0] = 0xFF;
550bcd9774aSPaolo Bonzini     inc_test_stage(test);
551bcd9774aSPaolo Bonzini     inb(0x0);
552bcd9774aSPaolo Bonzini     if (get_test_stage(test) != 2)
553bcd9774aSPaolo Bonzini         goto fail;
554bcd9774aSPaolo Bonzini 
555bcd9774aSPaolo Bonzini     outw(0x0, 0x0);
556bcd9774aSPaolo Bonzini     if (get_test_stage(test) != 3)
557bcd9774aSPaolo Bonzini         goto fail;
558bcd9774aSPaolo Bonzini 
559bcd9774aSPaolo Bonzini     inl(0x0);
560bcd9774aSPaolo Bonzini     if (get_test_stage(test) != 4)
561bcd9774aSPaolo Bonzini         goto fail;
562bcd9774aSPaolo Bonzini 
563bcd9774aSPaolo Bonzini     // test low/high IO port
564bcd9774aSPaolo Bonzini     io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8));
565bcd9774aSPaolo Bonzini     inb(0x5000);
566bcd9774aSPaolo Bonzini     if (get_test_stage(test) != 5)
567bcd9774aSPaolo Bonzini         goto fail;
568bcd9774aSPaolo Bonzini 
569bcd9774aSPaolo Bonzini     io_bitmap[0x9000 / 8] = (1 << (0x9000 % 8));
570bcd9774aSPaolo Bonzini     inw(0x9000);
571bcd9774aSPaolo Bonzini     if (get_test_stage(test) != 6)
572bcd9774aSPaolo Bonzini         goto fail;
573bcd9774aSPaolo Bonzini 
574bcd9774aSPaolo Bonzini     // test partial pass
575bcd9774aSPaolo Bonzini     io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8));
576bcd9774aSPaolo Bonzini     inl(0x4FFF);
577bcd9774aSPaolo Bonzini     if (get_test_stage(test) != 7)
578bcd9774aSPaolo Bonzini         goto fail;
579bcd9774aSPaolo Bonzini 
580bcd9774aSPaolo Bonzini     // test across pages
581bcd9774aSPaolo Bonzini     inc_test_stage(test);
582bcd9774aSPaolo Bonzini     inl(0x7FFF);
583bcd9774aSPaolo Bonzini     if (get_test_stage(test) != 8)
584bcd9774aSPaolo Bonzini         goto fail;
585bcd9774aSPaolo Bonzini 
586bcd9774aSPaolo Bonzini     inc_test_stage(test);
587bcd9774aSPaolo Bonzini     io_bitmap[0x8000 / 8] = 1 << (0x8000 % 8);
588bcd9774aSPaolo Bonzini     inl(0x7FFF);
589bcd9774aSPaolo Bonzini     if (get_test_stage(test) != 10)
590bcd9774aSPaolo Bonzini         goto fail;
591bcd9774aSPaolo Bonzini 
592bcd9774aSPaolo Bonzini     io_bitmap[0] = 0;
593bcd9774aSPaolo Bonzini     inl(0xFFFF);
594bcd9774aSPaolo Bonzini     if (get_test_stage(test) != 11)
595bcd9774aSPaolo Bonzini         goto fail;
596bcd9774aSPaolo Bonzini 
597bcd9774aSPaolo Bonzini     io_bitmap[0] = 0xFF;
598bcd9774aSPaolo Bonzini     io_bitmap[8192] = 0;
599bcd9774aSPaolo Bonzini     inl(0xFFFF);
600bcd9774aSPaolo Bonzini     inc_test_stage(test);
601bcd9774aSPaolo Bonzini     if (get_test_stage(test) != 12)
602bcd9774aSPaolo Bonzini         goto fail;
603bcd9774aSPaolo Bonzini 
604bcd9774aSPaolo Bonzini     return;
605bcd9774aSPaolo Bonzini 
606bcd9774aSPaolo Bonzini fail:
607d637cb11SAndrew Jones     report("stage %d", false, get_test_stage(test));
608bcd9774aSPaolo Bonzini     test->scratch = -1;
609bcd9774aSPaolo Bonzini }
610bcd9774aSPaolo Bonzini 
611bcd9774aSPaolo Bonzini static bool ioio_finished(struct test *test)
612bcd9774aSPaolo Bonzini {
613bcd9774aSPaolo Bonzini     unsigned port, size;
614bcd9774aSPaolo Bonzini 
615bcd9774aSPaolo Bonzini     /* Only expect IOIO intercepts */
616bcd9774aSPaolo Bonzini     if (test->vmcb->control.exit_code == SVM_EXIT_VMMCALL)
617bcd9774aSPaolo Bonzini         return true;
618bcd9774aSPaolo Bonzini 
619bcd9774aSPaolo Bonzini     if (test->vmcb->control.exit_code != SVM_EXIT_IOIO)
620bcd9774aSPaolo Bonzini         return true;
621bcd9774aSPaolo Bonzini 
622bcd9774aSPaolo Bonzini     /* one step forward */
623bcd9774aSPaolo Bonzini     test->scratch += 1;
624bcd9774aSPaolo Bonzini 
625bcd9774aSPaolo Bonzini     port = test->vmcb->control.exit_info_1 >> 16;
626bcd9774aSPaolo Bonzini     size = (test->vmcb->control.exit_info_1 >> SVM_IOIO_SIZE_SHIFT) & 7;
627bcd9774aSPaolo Bonzini 
628bcd9774aSPaolo Bonzini     while (size--) {
629bcd9774aSPaolo Bonzini         io_bitmap[port / 8] &= ~(1 << (port & 7));
630bcd9774aSPaolo Bonzini         port++;
631bcd9774aSPaolo Bonzini     }
632bcd9774aSPaolo Bonzini 
633bcd9774aSPaolo Bonzini     return false;
634bcd9774aSPaolo Bonzini }
635bcd9774aSPaolo Bonzini 
636bcd9774aSPaolo Bonzini static bool check_ioio(struct test *test)
637bcd9774aSPaolo Bonzini {
638bcd9774aSPaolo Bonzini     memset(io_bitmap, 0, 8193);
639bcd9774aSPaolo Bonzini     return test->scratch != -1;
640bcd9774aSPaolo Bonzini }
641bcd9774aSPaolo Bonzini 
6427d36db35SAvi Kivity static void prepare_asid_zero(struct test *test)
6437d36db35SAvi Kivity {
6447d36db35SAvi Kivity     test->vmcb->control.asid = 0;
6457d36db35SAvi Kivity }
6467d36db35SAvi Kivity 
6477d36db35SAvi Kivity static void test_asid_zero(struct test *test)
6487d36db35SAvi Kivity {
6497d36db35SAvi Kivity     asm volatile ("vmmcall\n\t");
6507d36db35SAvi Kivity }
6517d36db35SAvi Kivity 
6527d36db35SAvi Kivity static bool check_asid_zero(struct test *test)
6537d36db35SAvi Kivity {
6547d36db35SAvi Kivity     return test->vmcb->control.exit_code == SVM_EXIT_ERR;
6557d36db35SAvi Kivity }
6567d36db35SAvi Kivity 
6574c8eb156SJoerg Roedel static void sel_cr0_bug_prepare(struct test *test)
6584c8eb156SJoerg Roedel {
6594c8eb156SJoerg Roedel     vmcb_ident(test->vmcb);
6604c8eb156SJoerg Roedel     test->vmcb->control.intercept |= (1ULL << INTERCEPT_SELECTIVE_CR0);
6614c8eb156SJoerg Roedel }
6624c8eb156SJoerg Roedel 
6634c8eb156SJoerg Roedel static bool sel_cr0_bug_finished(struct test *test)
6644c8eb156SJoerg Roedel {
6654c8eb156SJoerg Roedel 	return true;
6664c8eb156SJoerg Roedel }
6674c8eb156SJoerg Roedel 
6684c8eb156SJoerg Roedel static void sel_cr0_bug_test(struct test *test)
6694c8eb156SJoerg Roedel {
6704c8eb156SJoerg Roedel     unsigned long cr0;
6714c8eb156SJoerg Roedel 
6724c8eb156SJoerg Roedel     /* read cr0, clear CD, and write back */
6734c8eb156SJoerg Roedel     cr0  = read_cr0();
6744c8eb156SJoerg Roedel     cr0 |= (1UL << 30);
6754c8eb156SJoerg Roedel     write_cr0(cr0);
6764c8eb156SJoerg Roedel 
6774c8eb156SJoerg Roedel     /*
6784c8eb156SJoerg Roedel      * If we are here the test failed, not sure what to do now because we
6794c8eb156SJoerg Roedel      * are not in guest-mode anymore so we can't trigger an intercept.
6804c8eb156SJoerg Roedel      * Trigger a tripple-fault for now.
6814c8eb156SJoerg Roedel      */
682d637cb11SAndrew Jones     report("sel_cr0 test. Can not recover from this - exiting", false);
683a43ed2acSAndrew Jones     exit(report_summary());
6844c8eb156SJoerg Roedel }
6854c8eb156SJoerg Roedel 
6864c8eb156SJoerg Roedel static bool sel_cr0_bug_check(struct test *test)
6874c8eb156SJoerg Roedel {
6884c8eb156SJoerg Roedel     return test->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE;
6894c8eb156SJoerg Roedel }
6904c8eb156SJoerg Roedel 
6918594b943SJoerg Roedel static void npt_nx_prepare(struct test *test)
6928594b943SJoerg Roedel {
6938594b943SJoerg Roedel 
6948594b943SJoerg Roedel     u64 *pte;
6958594b943SJoerg Roedel 
6968594b943SJoerg Roedel     vmcb_ident(test->vmcb);
697726a1dd7SPaolo Bonzini     pte = npt_get_pte((u64)null_test);
6988594b943SJoerg Roedel 
6998594b943SJoerg Roedel     *pte |= (1ULL << 63);
7008594b943SJoerg Roedel }
7018594b943SJoerg Roedel 
7028594b943SJoerg Roedel static bool npt_nx_check(struct test *test)
7038594b943SJoerg Roedel {
704726a1dd7SPaolo Bonzini     u64 *pte = npt_get_pte((u64)null_test);
7058594b943SJoerg Roedel 
7068594b943SJoerg Roedel     *pte &= ~(1ULL << 63);
7078594b943SJoerg Roedel 
7088594b943SJoerg Roedel     test->vmcb->save.efer |= (1 << 11);
7098594b943SJoerg Roedel 
7108594b943SJoerg Roedel     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
711e8b10c1fSPaolo Bonzini            && (test->vmcb->control.exit_info_1 == 0x100000015ULL);
7128594b943SJoerg Roedel }
7138594b943SJoerg Roedel 
714ea975120SJoerg Roedel static void npt_us_prepare(struct test *test)
715ea975120SJoerg Roedel {
716ea975120SJoerg Roedel     u64 *pte;
717ea975120SJoerg Roedel 
718ea975120SJoerg Roedel     vmcb_ident(test->vmcb);
719726a1dd7SPaolo Bonzini     pte = npt_get_pte((u64)scratch_page);
720ea975120SJoerg Roedel 
721ea975120SJoerg Roedel     *pte &= ~(1ULL << 2);
722ea975120SJoerg Roedel }
723ea975120SJoerg Roedel 
724ea975120SJoerg Roedel static void npt_us_test(struct test *test)
725ea975120SJoerg Roedel {
726c0a4e715SPaolo Bonzini     (void) *(volatile u64 *)scratch_page;
727ea975120SJoerg Roedel }
728ea975120SJoerg Roedel 
729ea975120SJoerg Roedel static bool npt_us_check(struct test *test)
730ea975120SJoerg Roedel {
731726a1dd7SPaolo Bonzini     u64 *pte = npt_get_pte((u64)scratch_page);
732ea975120SJoerg Roedel 
733ea975120SJoerg Roedel     *pte |= (1ULL << 2);
734ea975120SJoerg Roedel 
735ea975120SJoerg Roedel     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
736e8b10c1fSPaolo Bonzini            && (test->vmcb->control.exit_info_1 == 0x100000005ULL);
737ea975120SJoerg Roedel }
738ea975120SJoerg Roedel 
739f6a2ca45SPaolo Bonzini u64 save_pde;
740f6a2ca45SPaolo Bonzini 
741dd6ef43cSJoerg Roedel static void npt_rsvd_prepare(struct test *test)
742dd6ef43cSJoerg Roedel {
743f6a2ca45SPaolo Bonzini     u64 *pde;
744dd6ef43cSJoerg Roedel 
745dd6ef43cSJoerg Roedel     vmcb_ident(test->vmcb);
746f6a2ca45SPaolo Bonzini     pde = npt_get_pde((u64) null_test);
747dd6ef43cSJoerg Roedel 
748f6a2ca45SPaolo Bonzini     save_pde = *pde;
749f6a2ca45SPaolo Bonzini     *pde = (1ULL << 19) | (1ULL << 7) | 0x27;
750dd6ef43cSJoerg Roedel }
751dd6ef43cSJoerg Roedel 
752dd6ef43cSJoerg Roedel static bool npt_rsvd_check(struct test *test)
753dd6ef43cSJoerg Roedel {
754f6a2ca45SPaolo Bonzini     u64 *pde = npt_get_pde((u64) null_test);
755f6a2ca45SPaolo Bonzini 
756f6a2ca45SPaolo Bonzini     *pde = save_pde;
757dd6ef43cSJoerg Roedel 
758dd6ef43cSJoerg Roedel     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
759f6a2ca45SPaolo Bonzini             && (test->vmcb->control.exit_info_1 == 0x10000001dULL);
760dd6ef43cSJoerg Roedel }
761dd6ef43cSJoerg Roedel 
7625ebf82edSJoerg Roedel static void npt_rw_prepare(struct test *test)
7635ebf82edSJoerg Roedel {
7645ebf82edSJoerg Roedel 
7655ebf82edSJoerg Roedel     u64 *pte;
7665ebf82edSJoerg Roedel 
7675ebf82edSJoerg Roedel     vmcb_ident(test->vmcb);
768726a1dd7SPaolo Bonzini     pte = npt_get_pte(0x80000);
7695ebf82edSJoerg Roedel 
7705ebf82edSJoerg Roedel     *pte &= ~(1ULL << 1);
7715ebf82edSJoerg Roedel }
7725ebf82edSJoerg Roedel 
7735ebf82edSJoerg Roedel static void npt_rw_test(struct test *test)
7745ebf82edSJoerg Roedel {
7755ebf82edSJoerg Roedel     u64 *data = (void*)(0x80000);
7765ebf82edSJoerg Roedel 
7775ebf82edSJoerg Roedel     *data = 0;
7785ebf82edSJoerg Roedel }
7795ebf82edSJoerg Roedel 
7805ebf82edSJoerg Roedel static bool npt_rw_check(struct test *test)
7815ebf82edSJoerg Roedel {
782726a1dd7SPaolo Bonzini     u64 *pte = npt_get_pte(0x80000);
7835ebf82edSJoerg Roedel 
7845ebf82edSJoerg Roedel     *pte |= (1ULL << 1);
7855ebf82edSJoerg Roedel 
7865ebf82edSJoerg Roedel     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
787e8b10c1fSPaolo Bonzini            && (test->vmcb->control.exit_info_1 == 0x100000007ULL);
7885ebf82edSJoerg Roedel }
7895ebf82edSJoerg Roedel 
790f6a2ca45SPaolo Bonzini static void npt_rw_pfwalk_prepare(struct test *test)
791590040ffSJoerg Roedel {
792590040ffSJoerg Roedel 
793590040ffSJoerg Roedel     u64 *pte;
794590040ffSJoerg Roedel 
795590040ffSJoerg Roedel     vmcb_ident(test->vmcb);
796726a1dd7SPaolo Bonzini     pte = npt_get_pte(read_cr3());
797590040ffSJoerg Roedel 
798590040ffSJoerg Roedel     *pte &= ~(1ULL << 1);
799590040ffSJoerg Roedel }
800590040ffSJoerg Roedel 
801f6a2ca45SPaolo Bonzini static bool npt_rw_pfwalk_check(struct test *test)
802590040ffSJoerg Roedel {
803726a1dd7SPaolo Bonzini     u64 *pte = npt_get_pte(read_cr3());
804590040ffSJoerg Roedel 
805590040ffSJoerg Roedel     *pte |= (1ULL << 1);
806590040ffSJoerg Roedel 
807590040ffSJoerg Roedel     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
808e8b10c1fSPaolo Bonzini            && (test->vmcb->control.exit_info_1 == 0x200000006ULL)
809590040ffSJoerg Roedel 	   && (test->vmcb->control.exit_info_2 == read_cr3());
810590040ffSJoerg Roedel }
811590040ffSJoerg Roedel 
812f6a2ca45SPaolo Bonzini static void npt_rsvd_pfwalk_prepare(struct test *test)
813f6a2ca45SPaolo Bonzini {
814f6a2ca45SPaolo Bonzini 
815f6a2ca45SPaolo Bonzini     vmcb_ident(test->vmcb);
816f6a2ca45SPaolo Bonzini 
817f6a2ca45SPaolo Bonzini     pdpe[0] |= (1ULL << 8);
818f6a2ca45SPaolo Bonzini }
819f6a2ca45SPaolo Bonzini 
820f6a2ca45SPaolo Bonzini static bool npt_rsvd_pfwalk_check(struct test *test)
821f6a2ca45SPaolo Bonzini {
822f6a2ca45SPaolo Bonzini     pdpe[0] &= ~(1ULL << 8);
823f6a2ca45SPaolo Bonzini 
824f6a2ca45SPaolo Bonzini     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
825f6a2ca45SPaolo Bonzini             && (test->vmcb->control.exit_info_1 == 0x200000006ULL);
826f6a2ca45SPaolo Bonzini }
827f6a2ca45SPaolo Bonzini 
828a2ab7740SPaolo Bonzini static void npt_l1mmio_prepare(struct test *test)
829a2ab7740SPaolo Bonzini {
830a2ab7740SPaolo Bonzini     vmcb_ident(test->vmcb);
831a2ab7740SPaolo Bonzini }
832a2ab7740SPaolo Bonzini 
8331e699ecbSPaolo Bonzini u32 nested_apic_version1;
8341e699ecbSPaolo Bonzini u32 nested_apic_version2;
835a2ab7740SPaolo Bonzini 
836a2ab7740SPaolo Bonzini static void npt_l1mmio_test(struct test *test)
837a2ab7740SPaolo Bonzini {
8381e699ecbSPaolo Bonzini     volatile u32 *data = (volatile void*)(0xfee00030UL);
839a2ab7740SPaolo Bonzini 
8401e699ecbSPaolo Bonzini     nested_apic_version1 = *data;
8411e699ecbSPaolo Bonzini     nested_apic_version2 = *data;
842a2ab7740SPaolo Bonzini }
843a2ab7740SPaolo Bonzini 
844a2ab7740SPaolo Bonzini static bool npt_l1mmio_check(struct test *test)
845a2ab7740SPaolo Bonzini {
8461e699ecbSPaolo Bonzini     volatile u32 *data = (volatile void*)(0xfee00030);
8471e699ecbSPaolo Bonzini     u32 lvr = *data;
848a2ab7740SPaolo Bonzini 
8491e699ecbSPaolo Bonzini     return nested_apic_version1 == lvr && nested_apic_version2 == lvr;
850a2ab7740SPaolo Bonzini }
851a2ab7740SPaolo Bonzini 
85269dd444aSPaolo Bonzini static void npt_rw_l1mmio_prepare(struct test *test)
85369dd444aSPaolo Bonzini {
85469dd444aSPaolo Bonzini 
85569dd444aSPaolo Bonzini     u64 *pte;
85669dd444aSPaolo Bonzini 
85769dd444aSPaolo Bonzini     vmcb_ident(test->vmcb);
85869dd444aSPaolo Bonzini     pte = npt_get_pte(0xfee00080);
85969dd444aSPaolo Bonzini 
86069dd444aSPaolo Bonzini     *pte &= ~(1ULL << 1);
86169dd444aSPaolo Bonzini }
86269dd444aSPaolo Bonzini 
86369dd444aSPaolo Bonzini static void npt_rw_l1mmio_test(struct test *test)
86469dd444aSPaolo Bonzini {
86569dd444aSPaolo Bonzini     volatile u32 *data = (volatile void*)(0xfee00080);
86669dd444aSPaolo Bonzini 
86769dd444aSPaolo Bonzini     *data = *data;
86869dd444aSPaolo Bonzini }
86969dd444aSPaolo Bonzini 
87069dd444aSPaolo Bonzini static bool npt_rw_l1mmio_check(struct test *test)
87169dd444aSPaolo Bonzini {
87269dd444aSPaolo Bonzini     u64 *pte = npt_get_pte(0xfee00080);
87369dd444aSPaolo Bonzini 
87469dd444aSPaolo Bonzini     *pte |= (1ULL << 1);
87569dd444aSPaolo Bonzini 
87669dd444aSPaolo Bonzini     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
87769dd444aSPaolo Bonzini            && (test->vmcb->control.exit_info_1 == 0x100000007ULL);
87869dd444aSPaolo Bonzini }
87969dd444aSPaolo Bonzini 
88036a7018aSPaolo Bonzini #define TSC_ADJUST_VALUE    (1ll << 32)
88136a7018aSPaolo Bonzini #define TSC_OFFSET_VALUE    (-1ll << 48)
88236a7018aSPaolo Bonzini static bool ok;
88336a7018aSPaolo Bonzini 
88436a7018aSPaolo Bonzini static void tsc_adjust_prepare(struct test *test)
88536a7018aSPaolo Bonzini {
88636a7018aSPaolo Bonzini     default_prepare(test);
88736a7018aSPaolo Bonzini     test->vmcb->control.tsc_offset = TSC_OFFSET_VALUE;
88836a7018aSPaolo Bonzini 
88936a7018aSPaolo Bonzini     wrmsr(MSR_IA32_TSC_ADJUST, -TSC_ADJUST_VALUE);
89036a7018aSPaolo Bonzini     int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST);
89136a7018aSPaolo Bonzini     ok = adjust == -TSC_ADJUST_VALUE;
89236a7018aSPaolo Bonzini }
89336a7018aSPaolo Bonzini 
89436a7018aSPaolo Bonzini static void tsc_adjust_test(struct test *test)
89536a7018aSPaolo Bonzini {
89636a7018aSPaolo Bonzini     int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST);
89736a7018aSPaolo Bonzini     ok &= adjust == -TSC_ADJUST_VALUE;
89836a7018aSPaolo Bonzini 
89936a7018aSPaolo Bonzini     uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE;
90036a7018aSPaolo Bonzini     wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE);
90136a7018aSPaolo Bonzini 
90236a7018aSPaolo Bonzini     adjust = rdmsr(MSR_IA32_TSC_ADJUST);
90336a7018aSPaolo Bonzini     ok &= adjust <= -2 * TSC_ADJUST_VALUE;
90436a7018aSPaolo Bonzini 
90536a7018aSPaolo Bonzini     uint64_t l1_tsc_end = rdtsc() - TSC_OFFSET_VALUE;
90636a7018aSPaolo Bonzini     ok &= (l1_tsc_end + TSC_ADJUST_VALUE - l1_tsc) < TSC_ADJUST_VALUE;
90736a7018aSPaolo Bonzini 
90836a7018aSPaolo Bonzini     uint64_t l1_tsc_msr = rdmsr(MSR_IA32_TSC) - TSC_OFFSET_VALUE;
90936a7018aSPaolo Bonzini     ok &= (l1_tsc_msr + TSC_ADJUST_VALUE - l1_tsc) < TSC_ADJUST_VALUE;
91036a7018aSPaolo Bonzini }
91136a7018aSPaolo Bonzini 
91236a7018aSPaolo Bonzini static bool tsc_adjust_check(struct test *test)
91336a7018aSPaolo Bonzini {
91436a7018aSPaolo Bonzini     int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST);
91536a7018aSPaolo Bonzini 
91636a7018aSPaolo Bonzini     wrmsr(MSR_IA32_TSC_ADJUST, 0);
91736a7018aSPaolo Bonzini     return ok && adjust <= -2 * TSC_ADJUST_VALUE;
91836a7018aSPaolo Bonzini }
91936a7018aSPaolo Bonzini 
92021c23154SJoerg Roedel static void latency_prepare(struct test *test)
92121c23154SJoerg Roedel {
92221c23154SJoerg Roedel     default_prepare(test);
92321c23154SJoerg Roedel     runs = LATENCY_RUNS;
92421c23154SJoerg Roedel     latvmrun_min = latvmexit_min = -1ULL;
92521c23154SJoerg Roedel     latvmrun_max = latvmexit_max = 0;
92621c23154SJoerg Roedel     vmrun_sum = vmexit_sum = 0;
92721c23154SJoerg Roedel }
92821c23154SJoerg Roedel 
92921c23154SJoerg Roedel static void latency_test(struct test *test)
93021c23154SJoerg Roedel {
93121c23154SJoerg Roedel     u64 cycles;
93221c23154SJoerg Roedel 
93321c23154SJoerg Roedel start:
93421c23154SJoerg Roedel     tsc_end = rdtsc();
93521c23154SJoerg Roedel 
93621c23154SJoerg Roedel     cycles = tsc_end - tsc_start;
93721c23154SJoerg Roedel 
93821c23154SJoerg Roedel     if (cycles > latvmrun_max)
93921c23154SJoerg Roedel         latvmrun_max = cycles;
94021c23154SJoerg Roedel 
94121c23154SJoerg Roedel     if (cycles < latvmrun_min)
94221c23154SJoerg Roedel         latvmrun_min = cycles;
94321c23154SJoerg Roedel 
94421c23154SJoerg Roedel     vmrun_sum += cycles;
94521c23154SJoerg Roedel 
94621c23154SJoerg Roedel     tsc_start = rdtsc();
94721c23154SJoerg Roedel 
94821c23154SJoerg Roedel     asm volatile ("vmmcall" : : : "memory");
94921c23154SJoerg Roedel     goto start;
95021c23154SJoerg Roedel }
95121c23154SJoerg Roedel 
95221c23154SJoerg Roedel static bool latency_finished(struct test *test)
95321c23154SJoerg Roedel {
95421c23154SJoerg Roedel     u64 cycles;
95521c23154SJoerg Roedel 
95621c23154SJoerg Roedel     tsc_end = rdtsc();
95721c23154SJoerg Roedel 
95821c23154SJoerg Roedel     cycles = tsc_end - tsc_start;
95921c23154SJoerg Roedel 
96021c23154SJoerg Roedel     if (cycles > latvmexit_max)
96121c23154SJoerg Roedel         latvmexit_max = cycles;
96221c23154SJoerg Roedel 
96321c23154SJoerg Roedel     if (cycles < latvmexit_min)
96421c23154SJoerg Roedel         latvmexit_min = cycles;
96521c23154SJoerg Roedel 
96621c23154SJoerg Roedel     vmexit_sum += cycles;
96721c23154SJoerg Roedel 
96821c23154SJoerg Roedel     test->vmcb->save.rip += 3;
96921c23154SJoerg Roedel 
97021c23154SJoerg Roedel     runs -= 1;
97121c23154SJoerg Roedel 
97221c23154SJoerg Roedel     return runs == 0;
97321c23154SJoerg Roedel }
97421c23154SJoerg Roedel 
97521c23154SJoerg Roedel static bool latency_check(struct test *test)
97621c23154SJoerg Roedel {
977b006d7ebSAndrew Jones     printf("    Latency VMRUN : max: %ld min: %ld avg: %ld\n", latvmrun_max,
97821c23154SJoerg Roedel             latvmrun_min, vmrun_sum / LATENCY_RUNS);
979b006d7ebSAndrew Jones     printf("    Latency VMEXIT: max: %ld min: %ld avg: %ld\n", latvmexit_max,
98021c23154SJoerg Roedel             latvmexit_min, vmexit_sum / LATENCY_RUNS);
98121c23154SJoerg Roedel     return true;
98221c23154SJoerg Roedel }
98321c23154SJoerg Roedel 
984ef101219SRoedel, Joerg static void lat_svm_insn_prepare(struct test *test)
985ef101219SRoedel, Joerg {
986ef101219SRoedel, Joerg     default_prepare(test);
987ef101219SRoedel, Joerg     runs = LATENCY_RUNS;
988ef101219SRoedel, Joerg     latvmload_min = latvmsave_min = latstgi_min = latclgi_min = -1ULL;
989ef101219SRoedel, Joerg     latvmload_max = latvmsave_max = latstgi_max = latclgi_max = 0;
990ef101219SRoedel, Joerg     vmload_sum = vmsave_sum = stgi_sum = clgi_sum;
991ef101219SRoedel, Joerg }
992ef101219SRoedel, Joerg 
993ef101219SRoedel, Joerg static bool lat_svm_insn_finished(struct test *test)
994ef101219SRoedel, Joerg {
995ef101219SRoedel, Joerg     u64 vmcb_phys = virt_to_phys(test->vmcb);
996ef101219SRoedel, Joerg     u64 cycles;
997ef101219SRoedel, Joerg 
998ef101219SRoedel, Joerg     for ( ; runs != 0; runs--) {
999ef101219SRoedel, Joerg         tsc_start = rdtsc();
1000ef101219SRoedel, Joerg         asm volatile("vmload\n\t" : : "a"(vmcb_phys) : "memory");
1001ef101219SRoedel, Joerg         cycles = rdtsc() - tsc_start;
1002ef101219SRoedel, Joerg         if (cycles > latvmload_max)
1003ef101219SRoedel, Joerg             latvmload_max = cycles;
1004ef101219SRoedel, Joerg         if (cycles < latvmload_min)
1005ef101219SRoedel, Joerg             latvmload_min = cycles;
1006ef101219SRoedel, Joerg         vmload_sum += cycles;
1007ef101219SRoedel, Joerg 
1008ef101219SRoedel, Joerg         tsc_start = rdtsc();
1009ef101219SRoedel, Joerg         asm volatile("vmsave\n\t" : : "a"(vmcb_phys) : "memory");
1010ef101219SRoedel, Joerg         cycles = rdtsc() - tsc_start;
1011ef101219SRoedel, Joerg         if (cycles > latvmsave_max)
1012ef101219SRoedel, Joerg             latvmsave_max = cycles;
1013ef101219SRoedel, Joerg         if (cycles < latvmsave_min)
1014ef101219SRoedel, Joerg             latvmsave_min = cycles;
1015ef101219SRoedel, Joerg         vmsave_sum += cycles;
1016ef101219SRoedel, Joerg 
1017ef101219SRoedel, Joerg         tsc_start = rdtsc();
1018ef101219SRoedel, Joerg         asm volatile("stgi\n\t");
1019ef101219SRoedel, Joerg         cycles = rdtsc() - tsc_start;
1020ef101219SRoedel, Joerg         if (cycles > latstgi_max)
1021ef101219SRoedel, Joerg             latstgi_max = cycles;
1022ef101219SRoedel, Joerg         if (cycles < latstgi_min)
1023ef101219SRoedel, Joerg             latstgi_min = cycles;
1024ef101219SRoedel, Joerg         stgi_sum += cycles;
1025ef101219SRoedel, Joerg 
1026ef101219SRoedel, Joerg         tsc_start = rdtsc();
1027ef101219SRoedel, Joerg         asm volatile("clgi\n\t");
1028ef101219SRoedel, Joerg         cycles = rdtsc() - tsc_start;
1029ef101219SRoedel, Joerg         if (cycles > latclgi_max)
1030ef101219SRoedel, Joerg             latclgi_max = cycles;
1031ef101219SRoedel, Joerg         if (cycles < latclgi_min)
1032ef101219SRoedel, Joerg             latclgi_min = cycles;
1033ef101219SRoedel, Joerg         clgi_sum += cycles;
1034ef101219SRoedel, Joerg     }
1035ef101219SRoedel, Joerg 
1036ef101219SRoedel, Joerg     return true;
1037ef101219SRoedel, Joerg }
1038ef101219SRoedel, Joerg 
1039ef101219SRoedel, Joerg static bool lat_svm_insn_check(struct test *test)
1040ef101219SRoedel, Joerg {
1041b006d7ebSAndrew Jones     printf("    Latency VMLOAD: max: %ld min: %ld avg: %ld\n", latvmload_max,
1042ef101219SRoedel, Joerg             latvmload_min, vmload_sum / LATENCY_RUNS);
1043b006d7ebSAndrew Jones     printf("    Latency VMSAVE: max: %ld min: %ld avg: %ld\n", latvmsave_max,
1044ef101219SRoedel, Joerg             latvmsave_min, vmsave_sum / LATENCY_RUNS);
1045b006d7ebSAndrew Jones     printf("    Latency STGI:   max: %ld min: %ld avg: %ld\n", latstgi_max,
1046ef101219SRoedel, Joerg             latstgi_min, stgi_sum / LATENCY_RUNS);
1047b006d7ebSAndrew Jones     printf("    Latency CLGI:   max: %ld min: %ld avg: %ld\n", latclgi_max,
1048ef101219SRoedel, Joerg             latclgi_min, clgi_sum / LATENCY_RUNS);
1049ef101219SRoedel, Joerg     return true;
1050ef101219SRoedel, Joerg }
10517d36db35SAvi Kivity static struct test tests[] = {
10527d36db35SAvi Kivity     { "null", default_supported, default_prepare, null_test,
10537d36db35SAvi Kivity       default_finished, null_check },
10547d36db35SAvi Kivity     { "vmrun", default_supported, default_prepare, test_vmrun,
10557d36db35SAvi Kivity        default_finished, check_vmrun },
1056bcd9774aSPaolo Bonzini     { "ioio", default_supported, prepare_ioio, test_ioio,
1057bcd9774aSPaolo Bonzini        ioio_finished, check_ioio },
10587d36db35SAvi Kivity     { "vmrun intercept check", default_supported, prepare_no_vmrun_int,
10597d36db35SAvi Kivity       null_test, default_finished, check_no_vmrun_int },
10607d36db35SAvi Kivity     { "cr3 read intercept", default_supported, prepare_cr3_intercept,
10617d36db35SAvi Kivity       test_cr3_intercept, default_finished, check_cr3_intercept },
10627d36db35SAvi Kivity     { "cr3 read nointercept", default_supported, default_prepare,
10637d36db35SAvi Kivity       test_cr3_intercept, default_finished, check_cr3_nointercept },
1064095274b4SPrasad Joshi     { "cr3 read intercept emulate", smp_supported,
10657d36db35SAvi Kivity       prepare_cr3_intercept_bypass, test_cr3_intercept_bypass,
10667d36db35SAvi Kivity       default_finished, check_cr3_intercept },
10677d36db35SAvi Kivity     { "next_rip", next_rip_supported, prepare_next_rip, test_next_rip,
10687d36db35SAvi Kivity       default_finished, check_next_rip },
10697d36db35SAvi Kivity     { "mode_switch", default_supported, prepare_mode_switch, test_mode_switch,
10707d36db35SAvi Kivity        mode_switch_finished, check_mode_switch },
10717d36db35SAvi Kivity     { "asid_zero", default_supported, prepare_asid_zero, test_asid_zero,
10727d36db35SAvi Kivity        default_finished, check_asid_zero },
10734c8eb156SJoerg Roedel     { "sel_cr0_bug", default_supported, sel_cr0_bug_prepare, sel_cr0_bug_test,
10744c8eb156SJoerg Roedel        sel_cr0_bug_finished, sel_cr0_bug_check },
10758594b943SJoerg Roedel     { "npt_nx", npt_supported, npt_nx_prepare, null_test,
1076ea975120SJoerg Roedel 	    default_finished, npt_nx_check },
1077ea975120SJoerg Roedel     { "npt_us", npt_supported, npt_us_prepare, npt_us_test,
1078ea975120SJoerg Roedel 	    default_finished, npt_us_check },
1079dd6ef43cSJoerg Roedel     { "npt_rsvd", npt_supported, npt_rsvd_prepare, null_test,
1080dd6ef43cSJoerg Roedel 	    default_finished, npt_rsvd_check },
10815ebf82edSJoerg Roedel     { "npt_rw", npt_supported, npt_rw_prepare, npt_rw_test,
10825ebf82edSJoerg Roedel 	    default_finished, npt_rw_check },
1083f6a2ca45SPaolo Bonzini     { "npt_rsvd_pfwalk", npt_supported, npt_rsvd_pfwalk_prepare, null_test,
1084f6a2ca45SPaolo Bonzini 	    default_finished, npt_rsvd_pfwalk_check },
1085f6a2ca45SPaolo Bonzini     { "npt_rw_pfwalk", npt_supported, npt_rw_pfwalk_prepare, null_test,
1086f6a2ca45SPaolo Bonzini 	    default_finished, npt_rw_pfwalk_check },
1087a2ab7740SPaolo Bonzini     { "npt_l1mmio", npt_supported, npt_l1mmio_prepare, npt_l1mmio_test,
1088a2ab7740SPaolo Bonzini 	    default_finished, npt_l1mmio_check },
108969dd444aSPaolo Bonzini     { "npt_rw_l1mmio", npt_supported, npt_rw_l1mmio_prepare, npt_rw_l1mmio_test,
109069dd444aSPaolo Bonzini 	    default_finished, npt_rw_l1mmio_check },
109136a7018aSPaolo Bonzini     { "tsc_adjust", default_supported, tsc_adjust_prepare, tsc_adjust_test,
109236a7018aSPaolo Bonzini        default_finished, tsc_adjust_check },
109321c23154SJoerg Roedel     { "latency_run_exit", default_supported, latency_prepare, latency_test,
109421c23154SJoerg Roedel       latency_finished, latency_check },
1095ef101219SRoedel, Joerg     { "latency_svm_insn", default_supported, lat_svm_insn_prepare, null_test,
1096ef101219SRoedel, Joerg       lat_svm_insn_finished, lat_svm_insn_check },
10977d36db35SAvi Kivity };
10987d36db35SAvi Kivity 
10997d36db35SAvi Kivity int main(int ac, char **av)
11007d36db35SAvi Kivity {
1101a43ed2acSAndrew Jones     int i, nr;
11027d36db35SAvi Kivity     struct vmcb *vmcb;
11037d36db35SAvi Kivity 
11047d36db35SAvi Kivity     setup_vm();
11057d36db35SAvi Kivity     smp_init();
11067d36db35SAvi Kivity 
11077d36db35SAvi Kivity     if (!(cpuid(0x80000001).c & 4)) {
11087d36db35SAvi Kivity         printf("SVM not availble\n");
110932b9603cSRadim Krčmář         return report_summary();
11107d36db35SAvi Kivity     }
11117d36db35SAvi Kivity 
11127d36db35SAvi Kivity     setup_svm();
11137d36db35SAvi Kivity 
11147d36db35SAvi Kivity     vmcb = alloc_page();
11157d36db35SAvi Kivity 
11167d36db35SAvi Kivity     nr = ARRAY_SIZE(tests);
11177d36db35SAvi Kivity     for (i = 0; i < nr; ++i) {
11187d36db35SAvi Kivity         if (!tests[i].supported())
11197d36db35SAvi Kivity             continue;
1120a43ed2acSAndrew Jones         test_run(&tests[i], vmcb);
11217d36db35SAvi Kivity     }
11227d36db35SAvi Kivity 
1123a43ed2acSAndrew Jones     return report_summary();
11247d36db35SAvi Kivity }
1125