xref: /kvm-unit-tests/x86/svm.c (revision b46094b4f8d61a6955375bc0d5a99be83aa8a525)
17d36db35SAvi Kivity #include "svm.h"
27d36db35SAvi Kivity #include "libcflat.h"
37d36db35SAvi Kivity #include "processor.h"
4*b46094b4SPaolo Bonzini #include "desc.h"
57d36db35SAvi Kivity #include "msr.h"
67d36db35SAvi Kivity #include "vm.h"
77d36db35SAvi Kivity #include "smp.h"
87d36db35SAvi Kivity #include "types.h"
97d36db35SAvi Kivity 
101535bf0fSJoerg Roedel /* for the nested page table*/
111535bf0fSJoerg Roedel u64 *pml4e;
121535bf0fSJoerg Roedel u64 *pdpe;
131535bf0fSJoerg Roedel u64 *pde[4];
141535bf0fSJoerg Roedel u64 *pte[2048];
15c0a4e715SPaolo Bonzini void *scratch_page;
161535bf0fSJoerg Roedel 
1721c23154SJoerg Roedel #define LATENCY_RUNS 1000000
1821c23154SJoerg Roedel 
1921c23154SJoerg Roedel u64 tsc_start;
2021c23154SJoerg Roedel u64 tsc_end;
2121c23154SJoerg Roedel 
2221c23154SJoerg Roedel u64 vmrun_sum, vmexit_sum;
23ef101219SRoedel, Joerg u64 vmsave_sum, vmload_sum;
24ef101219SRoedel, Joerg u64 stgi_sum, clgi_sum;
2521c23154SJoerg Roedel u64 latvmrun_max;
2621c23154SJoerg Roedel u64 latvmrun_min;
2721c23154SJoerg Roedel u64 latvmexit_max;
2821c23154SJoerg Roedel u64 latvmexit_min;
29ef101219SRoedel, Joerg u64 latvmload_max;
30ef101219SRoedel, Joerg u64 latvmload_min;
31ef101219SRoedel, Joerg u64 latvmsave_max;
32ef101219SRoedel, Joerg u64 latvmsave_min;
33ef101219SRoedel, Joerg u64 latstgi_max;
34ef101219SRoedel, Joerg u64 latstgi_min;
35ef101219SRoedel, Joerg u64 latclgi_max;
36ef101219SRoedel, Joerg u64 latclgi_min;
3721c23154SJoerg Roedel u64 runs;
3821c23154SJoerg Roedel 
391535bf0fSJoerg Roedel static bool npt_supported(void)
401535bf0fSJoerg Roedel {
411535bf0fSJoerg Roedel    return cpuid(0x8000000A).d & 1;
421535bf0fSJoerg Roedel }
431535bf0fSJoerg Roedel 
447d36db35SAvi Kivity static void setup_svm(void)
457d36db35SAvi Kivity {
467d36db35SAvi Kivity     void *hsave = alloc_page();
471535bf0fSJoerg Roedel     u64 *page, address;
481535bf0fSJoerg Roedel     int i,j;
497d36db35SAvi Kivity 
507d36db35SAvi Kivity     wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
517d36db35SAvi Kivity     wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME);
528594b943SJoerg Roedel     wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NX);
531535bf0fSJoerg Roedel 
54ea975120SJoerg Roedel     scratch_page = alloc_page();
55ea975120SJoerg Roedel 
561535bf0fSJoerg Roedel     if (!npt_supported())
571535bf0fSJoerg Roedel         return;
581535bf0fSJoerg Roedel 
591535bf0fSJoerg Roedel     printf("NPT detected - running all tests with NPT enabled\n");
601535bf0fSJoerg Roedel 
611535bf0fSJoerg Roedel     /*
621535bf0fSJoerg Roedel      * Nested paging supported - Build a nested page table
631535bf0fSJoerg Roedel      * Build the page-table bottom-up and map everything with 4k pages
641535bf0fSJoerg Roedel      * to get enough granularity for the NPT unit-tests.
651535bf0fSJoerg Roedel      */
661535bf0fSJoerg Roedel 
671535bf0fSJoerg Roedel     address = 0;
681535bf0fSJoerg Roedel 
691535bf0fSJoerg Roedel     /* PTE level */
701535bf0fSJoerg Roedel     for (i = 0; i < 2048; ++i) {
711535bf0fSJoerg Roedel         page = alloc_page();
721535bf0fSJoerg Roedel 
731535bf0fSJoerg Roedel         for (j = 0; j < 512; ++j, address += 4096)
741535bf0fSJoerg Roedel             page[j] = address | 0x067ULL;
751535bf0fSJoerg Roedel 
761535bf0fSJoerg Roedel         pte[i] = page;
771535bf0fSJoerg Roedel     }
781535bf0fSJoerg Roedel 
791535bf0fSJoerg Roedel     /* PDE level */
801535bf0fSJoerg Roedel     for (i = 0; i < 4; ++i) {
811535bf0fSJoerg Roedel         page = alloc_page();
821535bf0fSJoerg Roedel 
831535bf0fSJoerg Roedel         for (j = 0; j < 512; ++j)
841535bf0fSJoerg Roedel             page[j] = (u64)pte[(i * 514) + j] | 0x027ULL;
851535bf0fSJoerg Roedel 
861535bf0fSJoerg Roedel         pde[i] = page;
871535bf0fSJoerg Roedel     }
881535bf0fSJoerg Roedel 
891535bf0fSJoerg Roedel     /* PDPe level */
901535bf0fSJoerg Roedel     pdpe   = alloc_page();
911535bf0fSJoerg Roedel     for (i = 0; i < 4; ++i)
921535bf0fSJoerg Roedel        pdpe[i] = ((u64)(pde[i])) | 0x27;
931535bf0fSJoerg Roedel 
941535bf0fSJoerg Roedel     /* PML4e level */
951535bf0fSJoerg Roedel     pml4e    = alloc_page();
961535bf0fSJoerg Roedel     pml4e[0] = ((u64)pdpe) | 0x27;
977d36db35SAvi Kivity }
987d36db35SAvi Kivity 
99726a1dd7SPaolo Bonzini static u64 *npt_get_pte(u64 address)
1008594b943SJoerg Roedel {
1018594b943SJoerg Roedel     int i1, i2;
1028594b943SJoerg Roedel 
1038594b943SJoerg Roedel     address >>= 12;
1048594b943SJoerg Roedel     i1 = (address >> 9) & 0x7ff;
1058594b943SJoerg Roedel     i2 = address & 0x1ff;
1068594b943SJoerg Roedel 
1078594b943SJoerg Roedel     return &pte[i1][i2];
1088594b943SJoerg Roedel }
1098594b943SJoerg Roedel 
1107d36db35SAvi Kivity static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector,
1117d36db35SAvi Kivity                          u64 base, u32 limit, u32 attr)
1127d36db35SAvi Kivity {
1137d36db35SAvi Kivity     seg->selector = selector;
1147d36db35SAvi Kivity     seg->attrib = attr;
1157d36db35SAvi Kivity     seg->limit = limit;
1167d36db35SAvi Kivity     seg->base = base;
1177d36db35SAvi Kivity }
1187d36db35SAvi Kivity 
1197d36db35SAvi Kivity static void vmcb_ident(struct vmcb *vmcb)
1207d36db35SAvi Kivity {
1217d36db35SAvi Kivity     u64 vmcb_phys = virt_to_phys(vmcb);
1227d36db35SAvi Kivity     struct vmcb_save_area *save = &vmcb->save;
1237d36db35SAvi Kivity     struct vmcb_control_area *ctrl = &vmcb->control;
1247d36db35SAvi Kivity     u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
1257d36db35SAvi Kivity         | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK;
1267d36db35SAvi Kivity     u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
1277d36db35SAvi Kivity         | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK;
1287d36db35SAvi Kivity     struct descriptor_table_ptr desc_table_ptr;
1297d36db35SAvi Kivity 
1307d36db35SAvi Kivity     memset(vmcb, 0, sizeof(*vmcb));
1317d36db35SAvi Kivity     asm volatile ("vmsave" : : "a"(vmcb_phys) : "memory");
1327d36db35SAvi Kivity     vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr);
1337d36db35SAvi Kivity     vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr);
1347d36db35SAvi Kivity     vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr);
1357d36db35SAvi Kivity     vmcb_set_seg(&save->ds, read_ds(), 0, -1U, data_seg_attr);
1367d36db35SAvi Kivity     sgdt(&desc_table_ptr);
1377d36db35SAvi Kivity     vmcb_set_seg(&save->gdtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
1387d36db35SAvi Kivity     sidt(&desc_table_ptr);
1397d36db35SAvi Kivity     vmcb_set_seg(&save->idtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
1407d36db35SAvi Kivity     ctrl->asid = 1;
1417d36db35SAvi Kivity     save->cpl = 0;
1427d36db35SAvi Kivity     save->efer = rdmsr(MSR_EFER);
1437d36db35SAvi Kivity     save->cr4 = read_cr4();
1447d36db35SAvi Kivity     save->cr3 = read_cr3();
1457d36db35SAvi Kivity     save->cr0 = read_cr0();
1467d36db35SAvi Kivity     save->dr7 = read_dr7();
1477d36db35SAvi Kivity     save->dr6 = read_dr6();
1487d36db35SAvi Kivity     save->cr2 = read_cr2();
1497d36db35SAvi Kivity     save->g_pat = rdmsr(MSR_IA32_CR_PAT);
1507d36db35SAvi Kivity     save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
1517d36db35SAvi Kivity     ctrl->intercept = (1ULL << INTERCEPT_VMRUN) | (1ULL << INTERCEPT_VMMCALL);
1521535bf0fSJoerg Roedel 
1531535bf0fSJoerg Roedel     if (npt_supported()) {
1541535bf0fSJoerg Roedel         ctrl->nested_ctl = 1;
1551535bf0fSJoerg Roedel         ctrl->nested_cr3 = (u64)pml4e;
1561535bf0fSJoerg Roedel     }
1577d36db35SAvi Kivity }
1587d36db35SAvi Kivity 
1597d36db35SAvi Kivity struct test {
1607d36db35SAvi Kivity     const char *name;
1617d36db35SAvi Kivity     bool (*supported)(void);
1627d36db35SAvi Kivity     void (*prepare)(struct test *test);
1637d36db35SAvi Kivity     void (*guest_func)(struct test *test);
1647d36db35SAvi Kivity     bool (*finished)(struct test *test);
1657d36db35SAvi Kivity     bool (*succeeded)(struct test *test);
1667d36db35SAvi Kivity     struct vmcb *vmcb;
1677d36db35SAvi Kivity     int exits;
1687d36db35SAvi Kivity     ulong scratch;
1697d36db35SAvi Kivity };
1707d36db35SAvi Kivity 
1717d36db35SAvi Kivity static void test_thunk(struct test *test)
1727d36db35SAvi Kivity {
1737d36db35SAvi Kivity     test->guest_func(test);
1747d36db35SAvi Kivity     asm volatile ("vmmcall" : : : "memory");
1757d36db35SAvi Kivity }
1767d36db35SAvi Kivity 
1777d36db35SAvi Kivity static bool test_run(struct test *test, struct vmcb *vmcb)
1787d36db35SAvi Kivity {
1797d36db35SAvi Kivity     u64 vmcb_phys = virt_to_phys(vmcb);
1807d36db35SAvi Kivity     u64 guest_stack[10000];
1817d36db35SAvi Kivity     bool success;
1827d36db35SAvi Kivity 
1837d36db35SAvi Kivity     test->vmcb = vmcb;
1847d36db35SAvi Kivity     test->prepare(test);
1857d36db35SAvi Kivity     vmcb->save.rip = (ulong)test_thunk;
1867d36db35SAvi Kivity     vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack));
1877d36db35SAvi Kivity     do {
18821c23154SJoerg Roedel         tsc_start = rdtsc();
1897d36db35SAvi Kivity         asm volatile (
1907d36db35SAvi Kivity             "clgi \n\t"
1917d36db35SAvi Kivity             "vmload \n\t"
1927d36db35SAvi Kivity             "push %%rbp \n\t"
1937d36db35SAvi Kivity             "push %1 \n\t"
1947d36db35SAvi Kivity             "vmrun \n\t"
1957d36db35SAvi Kivity             "pop %1 \n\t"
1967d36db35SAvi Kivity             "pop %%rbp \n\t"
1977d36db35SAvi Kivity             "vmsave \n\t"
1987d36db35SAvi Kivity             "stgi"
1997d36db35SAvi Kivity             : : "a"(vmcb_phys), "D"(test)
2007d36db35SAvi Kivity             : "rbx", "rcx", "rdx", "rsi",
2017d36db35SAvi Kivity               "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15",
2027d36db35SAvi Kivity               "memory");
20321c23154SJoerg Roedel 	tsc_end = rdtsc();
2047d36db35SAvi Kivity         ++test->exits;
2057d36db35SAvi Kivity     } while (!test->finished(test));
2067d36db35SAvi Kivity 
20721c23154SJoerg Roedel 
2087d36db35SAvi Kivity     success = test->succeeded(test);
2097d36db35SAvi Kivity 
2107d36db35SAvi Kivity     printf("%s: %s\n", test->name, success ? "PASS" : "FAIL");
2117d36db35SAvi Kivity 
2127d36db35SAvi Kivity     return success;
2137d36db35SAvi Kivity }
2147d36db35SAvi Kivity 
215095274b4SPrasad Joshi static bool smp_supported(void)
216095274b4SPrasad Joshi {
217095274b4SPrasad Joshi 	return cpu_count() > 1;
218095274b4SPrasad Joshi }
219095274b4SPrasad Joshi 
2207d36db35SAvi Kivity static bool default_supported(void)
2217d36db35SAvi Kivity {
2227d36db35SAvi Kivity     return true;
2237d36db35SAvi Kivity }
2247d36db35SAvi Kivity 
2257d36db35SAvi Kivity static void default_prepare(struct test *test)
2267d36db35SAvi Kivity {
2277d36db35SAvi Kivity     vmcb_ident(test->vmcb);
2287d36db35SAvi Kivity     cli();
2297d36db35SAvi Kivity }
2307d36db35SAvi Kivity 
2317d36db35SAvi Kivity static bool default_finished(struct test *test)
2327d36db35SAvi Kivity {
2337d36db35SAvi Kivity     return true; /* one vmexit */
2347d36db35SAvi Kivity }
2357d36db35SAvi Kivity 
2367d36db35SAvi Kivity static void null_test(struct test *test)
2377d36db35SAvi Kivity {
2387d36db35SAvi Kivity }
2397d36db35SAvi Kivity 
2407d36db35SAvi Kivity static bool null_check(struct test *test)
2417d36db35SAvi Kivity {
2427d36db35SAvi Kivity     return test->vmcb->control.exit_code == SVM_EXIT_VMMCALL;
2437d36db35SAvi Kivity }
2447d36db35SAvi Kivity 
2457d36db35SAvi Kivity static void prepare_no_vmrun_int(struct test *test)
2467d36db35SAvi Kivity {
2477d36db35SAvi Kivity     test->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMRUN);
2487d36db35SAvi Kivity }
2497d36db35SAvi Kivity 
2507d36db35SAvi Kivity static bool check_no_vmrun_int(struct test *test)
2517d36db35SAvi Kivity {
2527d36db35SAvi Kivity     return test->vmcb->control.exit_code == SVM_EXIT_ERR;
2537d36db35SAvi Kivity }
2547d36db35SAvi Kivity 
2557d36db35SAvi Kivity static void test_vmrun(struct test *test)
2567d36db35SAvi Kivity {
2577d36db35SAvi Kivity     asm volatile ("vmrun" : : "a"(virt_to_phys(test->vmcb)));
2587d36db35SAvi Kivity }
2597d36db35SAvi Kivity 
2607d36db35SAvi Kivity static bool check_vmrun(struct test *test)
2617d36db35SAvi Kivity {
2627d36db35SAvi Kivity     return test->vmcb->control.exit_code == SVM_EXIT_VMRUN;
2637d36db35SAvi Kivity }
2647d36db35SAvi Kivity 
2657d36db35SAvi Kivity static void prepare_cr3_intercept(struct test *test)
2667d36db35SAvi Kivity {
2677d36db35SAvi Kivity     default_prepare(test);
2687d36db35SAvi Kivity     test->vmcb->control.intercept_cr_read |= 1 << 3;
2697d36db35SAvi Kivity }
2707d36db35SAvi Kivity 
2717d36db35SAvi Kivity static void test_cr3_intercept(struct test *test)
2727d36db35SAvi Kivity {
2737d36db35SAvi Kivity     asm volatile ("mov %%cr3, %0" : "=r"(test->scratch) : : "memory");
2747d36db35SAvi Kivity }
2757d36db35SAvi Kivity 
2767d36db35SAvi Kivity static bool check_cr3_intercept(struct test *test)
2777d36db35SAvi Kivity {
2787d36db35SAvi Kivity     return test->vmcb->control.exit_code == SVM_EXIT_READ_CR3;
2797d36db35SAvi Kivity }
2807d36db35SAvi Kivity 
2817d36db35SAvi Kivity static bool check_cr3_nointercept(struct test *test)
2827d36db35SAvi Kivity {
2837d36db35SAvi Kivity     return null_check(test) && test->scratch == read_cr3();
2847d36db35SAvi Kivity }
2857d36db35SAvi Kivity 
2867d36db35SAvi Kivity static void corrupt_cr3_intercept_bypass(void *_test)
2877d36db35SAvi Kivity {
2887d36db35SAvi Kivity     struct test *test = _test;
2897d36db35SAvi Kivity     extern volatile u32 mmio_insn;
2907d36db35SAvi Kivity 
2917d36db35SAvi Kivity     while (!__sync_bool_compare_and_swap(&test->scratch, 1, 2))
2927d36db35SAvi Kivity         pause();
2937d36db35SAvi Kivity     pause();
2947d36db35SAvi Kivity     pause();
2957d36db35SAvi Kivity     pause();
2967d36db35SAvi Kivity     mmio_insn = 0x90d8200f;  // mov %cr3, %rax; nop
2977d36db35SAvi Kivity }
2987d36db35SAvi Kivity 
2997d36db35SAvi Kivity static void prepare_cr3_intercept_bypass(struct test *test)
3007d36db35SAvi Kivity {
3017d36db35SAvi Kivity     default_prepare(test);
3027d36db35SAvi Kivity     test->vmcb->control.intercept_cr_read |= 1 << 3;
3037d36db35SAvi Kivity     on_cpu_async(1, corrupt_cr3_intercept_bypass, test);
3047d36db35SAvi Kivity }
3057d36db35SAvi Kivity 
3067d36db35SAvi Kivity static void test_cr3_intercept_bypass(struct test *test)
3077d36db35SAvi Kivity {
3087d36db35SAvi Kivity     ulong a = 0xa0000;
3097d36db35SAvi Kivity 
3107d36db35SAvi Kivity     test->scratch = 1;
3117d36db35SAvi Kivity     while (test->scratch != 2)
3127d36db35SAvi Kivity         barrier();
3137d36db35SAvi Kivity 
3147d36db35SAvi Kivity     asm volatile ("mmio_insn: mov %0, (%0); nop"
3157d36db35SAvi Kivity                   : "+a"(a) : : "memory");
3167d36db35SAvi Kivity     test->scratch = a;
3177d36db35SAvi Kivity }
3187d36db35SAvi Kivity 
3197d36db35SAvi Kivity static bool next_rip_supported(void)
3207d36db35SAvi Kivity {
3217d36db35SAvi Kivity     return (cpuid(SVM_CPUID_FUNC).d & 8);
3227d36db35SAvi Kivity }
3237d36db35SAvi Kivity 
3247d36db35SAvi Kivity static void prepare_next_rip(struct test *test)
3257d36db35SAvi Kivity {
3267d36db35SAvi Kivity     test->vmcb->control.intercept |= (1ULL << INTERCEPT_RDTSC);
3277d36db35SAvi Kivity }
3287d36db35SAvi Kivity 
3297d36db35SAvi Kivity 
3307d36db35SAvi Kivity static void test_next_rip(struct test *test)
3317d36db35SAvi Kivity {
3327d36db35SAvi Kivity     asm volatile ("rdtsc\n\t"
3337d36db35SAvi Kivity                   ".globl exp_next_rip\n\t"
3347d36db35SAvi Kivity                   "exp_next_rip:\n\t" ::: "eax", "edx");
3357d36db35SAvi Kivity }
3367d36db35SAvi Kivity 
3377d36db35SAvi Kivity static bool check_next_rip(struct test *test)
3387d36db35SAvi Kivity {
3397d36db35SAvi Kivity     extern char exp_next_rip;
3407d36db35SAvi Kivity     unsigned long address = (unsigned long)&exp_next_rip;
3417d36db35SAvi Kivity 
3427d36db35SAvi Kivity     return address == test->vmcb->control.next_rip;
3437d36db35SAvi Kivity }
3447d36db35SAvi Kivity 
3457d36db35SAvi Kivity static void prepare_mode_switch(struct test *test)
3467d36db35SAvi Kivity {
3477d36db35SAvi Kivity     test->vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR)
3487d36db35SAvi Kivity                                              |  (1ULL << UD_VECTOR)
3497d36db35SAvi Kivity                                              |  (1ULL << DF_VECTOR)
3507d36db35SAvi Kivity                                              |  (1ULL << PF_VECTOR);
3517d36db35SAvi Kivity     test->scratch = 0;
3527d36db35SAvi Kivity }
3537d36db35SAvi Kivity 
3547d36db35SAvi Kivity static void test_mode_switch(struct test *test)
3557d36db35SAvi Kivity {
3567d36db35SAvi Kivity     asm volatile("	cli\n"
3577d36db35SAvi Kivity 		 "	ljmp *1f\n" /* jump to 32-bit code segment */
3587d36db35SAvi Kivity 		 "1:\n"
3597d36db35SAvi Kivity 		 "	.long 2f\n"
360*b46094b4SPaolo Bonzini 		 "	.long " xstr(KERNEL_CS32) "\n"
3617d36db35SAvi Kivity 		 ".code32\n"
3627d36db35SAvi Kivity 		 "2:\n"
3637d36db35SAvi Kivity 		 "	movl %%cr0, %%eax\n"
3647d36db35SAvi Kivity 		 "	btcl  $31, %%eax\n" /* clear PG */
3657d36db35SAvi Kivity 		 "	movl %%eax, %%cr0\n"
3667d36db35SAvi Kivity 		 "	movl $0xc0000080, %%ecx\n" /* EFER */
3677d36db35SAvi Kivity 		 "	rdmsr\n"
3687d36db35SAvi Kivity 		 "	btcl $8, %%eax\n" /* clear LME */
3697d36db35SAvi Kivity 		 "	wrmsr\n"
3707d36db35SAvi Kivity 		 "	movl %%cr4, %%eax\n"
3717d36db35SAvi Kivity 		 "	btcl $5, %%eax\n" /* clear PAE */
3727d36db35SAvi Kivity 		 "	movl %%eax, %%cr4\n"
373*b46094b4SPaolo Bonzini 		 "	movw %[ds16], %%ax\n"
3747d36db35SAvi Kivity 		 "	movw %%ax, %%ds\n"
375*b46094b4SPaolo Bonzini 		 "	ljmpl %[cs16], $3f\n" /* jump to 16 bit protected-mode */
3767d36db35SAvi Kivity 		 ".code16\n"
3777d36db35SAvi Kivity 		 "3:\n"
3787d36db35SAvi Kivity 		 "	movl %%cr0, %%eax\n"
3797d36db35SAvi Kivity 		 "	btcl $0, %%eax\n" /* clear PE  */
3807d36db35SAvi Kivity 		 "	movl %%eax, %%cr0\n"
3817d36db35SAvi Kivity 		 "	ljmpl $0, $4f\n"   /* jump to real-mode */
3827d36db35SAvi Kivity 		 "4:\n"
3837d36db35SAvi Kivity 		 "	vmmcall\n"
3847d36db35SAvi Kivity 		 "	movl %%cr0, %%eax\n"
3857d36db35SAvi Kivity 		 "	btsl $0, %%eax\n" /* set PE  */
3867d36db35SAvi Kivity 		 "	movl %%eax, %%cr0\n"
387*b46094b4SPaolo Bonzini 		 "	ljmpl %[cs32], $5f\n" /* back to protected mode */
3887d36db35SAvi Kivity 		 ".code32\n"
3897d36db35SAvi Kivity 		 "5:\n"
3907d36db35SAvi Kivity 		 "	movl %%cr4, %%eax\n"
3917d36db35SAvi Kivity 		 "	btsl $5, %%eax\n" /* set PAE */
3927d36db35SAvi Kivity 		 "	movl %%eax, %%cr4\n"
3937d36db35SAvi Kivity 		 "	movl $0xc0000080, %%ecx\n" /* EFER */
3947d36db35SAvi Kivity 		 "	rdmsr\n"
3957d36db35SAvi Kivity 		 "	btsl $8, %%eax\n" /* set LME */
3967d36db35SAvi Kivity 		 "	wrmsr\n"
3977d36db35SAvi Kivity 		 "	movl %%cr0, %%eax\n"
3987d36db35SAvi Kivity 		 "	btsl  $31, %%eax\n" /* set PG */
3997d36db35SAvi Kivity 		 "	movl %%eax, %%cr0\n"
400*b46094b4SPaolo Bonzini 		 "	ljmpl %[cs64], $6f\n"    /* back to long mode */
4017d36db35SAvi Kivity 		 ".code64\n\t"
4027d36db35SAvi Kivity 		 "6:\n"
4037d36db35SAvi Kivity 		 "	vmmcall\n"
404*b46094b4SPaolo Bonzini 		 :: [cs16] "i"(KERNEL_CS16), [ds16] "i"(KERNEL_DS16),
405*b46094b4SPaolo Bonzini 		    [cs32] "i"(KERNEL_CS32), [cs64] "i"(KERNEL_CS64)
406*b46094b4SPaolo Bonzini 		 : "rax", "rbx", "rcx", "rdx", "memory");
4077d36db35SAvi Kivity }
4087d36db35SAvi Kivity 
4097d36db35SAvi Kivity static bool mode_switch_finished(struct test *test)
4107d36db35SAvi Kivity {
4117d36db35SAvi Kivity     u64 cr0, cr4, efer;
4127d36db35SAvi Kivity 
4137d36db35SAvi Kivity     cr0  = test->vmcb->save.cr0;
4147d36db35SAvi Kivity     cr4  = test->vmcb->save.cr4;
4157d36db35SAvi Kivity     efer = test->vmcb->save.efer;
4167d36db35SAvi Kivity 
4177d36db35SAvi Kivity     /* Only expect VMMCALL intercepts */
4187d36db35SAvi Kivity     if (test->vmcb->control.exit_code != SVM_EXIT_VMMCALL)
4197d36db35SAvi Kivity 	    return true;
4207d36db35SAvi Kivity 
4217d36db35SAvi Kivity     /* Jump over VMMCALL instruction */
4227d36db35SAvi Kivity     test->vmcb->save.rip += 3;
4237d36db35SAvi Kivity 
4247d36db35SAvi Kivity     /* Do sanity checks */
4257d36db35SAvi Kivity     switch (test->scratch) {
4267d36db35SAvi Kivity     case 0:
4277d36db35SAvi Kivity         /* Test should be in real mode now - check for this */
4287d36db35SAvi Kivity         if ((cr0  & 0x80000001) || /* CR0.PG, CR0.PE */
4297d36db35SAvi Kivity             (cr4  & 0x00000020) || /* CR4.PAE */
4307d36db35SAvi Kivity             (efer & 0x00000500))   /* EFER.LMA, EFER.LME */
4317d36db35SAvi Kivity                 return true;
4327d36db35SAvi Kivity         break;
4337d36db35SAvi Kivity     case 2:
4347d36db35SAvi Kivity         /* Test should be back in long-mode now - check for this */
4357d36db35SAvi Kivity         if (((cr0  & 0x80000001) != 0x80000001) || /* CR0.PG, CR0.PE */
4367d36db35SAvi Kivity             ((cr4  & 0x00000020) != 0x00000020) || /* CR4.PAE */
4377d36db35SAvi Kivity             ((efer & 0x00000500) != 0x00000500))   /* EFER.LMA, EFER.LME */
4387d36db35SAvi Kivity 		    return true;
4397d36db35SAvi Kivity 	break;
4407d36db35SAvi Kivity     }
4417d36db35SAvi Kivity 
4427d36db35SAvi Kivity     /* one step forward */
4437d36db35SAvi Kivity     test->scratch += 1;
4447d36db35SAvi Kivity 
4457d36db35SAvi Kivity     return test->scratch == 2;
4467d36db35SAvi Kivity }
4477d36db35SAvi Kivity 
4487d36db35SAvi Kivity static bool check_mode_switch(struct test *test)
4497d36db35SAvi Kivity {
4507d36db35SAvi Kivity 	return test->scratch == 2;
4517d36db35SAvi Kivity }
4527d36db35SAvi Kivity 
4537d36db35SAvi Kivity static void prepare_asid_zero(struct test *test)
4547d36db35SAvi Kivity {
4557d36db35SAvi Kivity     test->vmcb->control.asid = 0;
4567d36db35SAvi Kivity }
4577d36db35SAvi Kivity 
4587d36db35SAvi Kivity static void test_asid_zero(struct test *test)
4597d36db35SAvi Kivity {
4607d36db35SAvi Kivity     asm volatile ("vmmcall\n\t");
4617d36db35SAvi Kivity }
4627d36db35SAvi Kivity 
4637d36db35SAvi Kivity static bool check_asid_zero(struct test *test)
4647d36db35SAvi Kivity {
4657d36db35SAvi Kivity     return test->vmcb->control.exit_code == SVM_EXIT_ERR;
4667d36db35SAvi Kivity }
4677d36db35SAvi Kivity 
4684c8eb156SJoerg Roedel static void sel_cr0_bug_prepare(struct test *test)
4694c8eb156SJoerg Roedel {
4704c8eb156SJoerg Roedel     vmcb_ident(test->vmcb);
4714c8eb156SJoerg Roedel     test->vmcb->control.intercept |= (1ULL << INTERCEPT_SELECTIVE_CR0);
4724c8eb156SJoerg Roedel }
4734c8eb156SJoerg Roedel 
4744c8eb156SJoerg Roedel static bool sel_cr0_bug_finished(struct test *test)
4754c8eb156SJoerg Roedel {
4764c8eb156SJoerg Roedel 	return true;
4774c8eb156SJoerg Roedel }
4784c8eb156SJoerg Roedel 
4794c8eb156SJoerg Roedel static void sel_cr0_bug_test(struct test *test)
4804c8eb156SJoerg Roedel {
4814c8eb156SJoerg Roedel     unsigned long cr0;
4824c8eb156SJoerg Roedel 
4834c8eb156SJoerg Roedel     /* read cr0, clear CD, and write back */
4844c8eb156SJoerg Roedel     cr0  = read_cr0();
4854c8eb156SJoerg Roedel     cr0 |= (1UL << 30);
4864c8eb156SJoerg Roedel     write_cr0(cr0);
4874c8eb156SJoerg Roedel 
4884c8eb156SJoerg Roedel     /*
4894c8eb156SJoerg Roedel      * If we are here the test failed, not sure what to do now because we
4904c8eb156SJoerg Roedel      * are not in guest-mode anymore so we can't trigger an intercept.
4914c8eb156SJoerg Roedel      * Trigger a tripple-fault for now.
4924c8eb156SJoerg Roedel      */
4934c8eb156SJoerg Roedel     printf("sel_cr0 test failed. Can not recover from this - exiting\n");
4944c8eb156SJoerg Roedel     exit(1);
4954c8eb156SJoerg Roedel }
4964c8eb156SJoerg Roedel 
4974c8eb156SJoerg Roedel static bool sel_cr0_bug_check(struct test *test)
4984c8eb156SJoerg Roedel {
4994c8eb156SJoerg Roedel     return test->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE;
5004c8eb156SJoerg Roedel }
5014c8eb156SJoerg Roedel 
5028594b943SJoerg Roedel static void npt_nx_prepare(struct test *test)
5038594b943SJoerg Roedel {
5048594b943SJoerg Roedel 
5058594b943SJoerg Roedel     u64 *pte;
5068594b943SJoerg Roedel 
5078594b943SJoerg Roedel     vmcb_ident(test->vmcb);
508726a1dd7SPaolo Bonzini     pte = npt_get_pte((u64)null_test);
5098594b943SJoerg Roedel 
5108594b943SJoerg Roedel     *pte |= (1ULL << 63);
5118594b943SJoerg Roedel }
5128594b943SJoerg Roedel 
5138594b943SJoerg Roedel static bool npt_nx_check(struct test *test)
5148594b943SJoerg Roedel {
515726a1dd7SPaolo Bonzini     u64 *pte = npt_get_pte((u64)null_test);
5168594b943SJoerg Roedel 
5178594b943SJoerg Roedel     *pte &= ~(1ULL << 63);
5188594b943SJoerg Roedel 
5198594b943SJoerg Roedel     test->vmcb->save.efer |= (1 << 11);
5208594b943SJoerg Roedel 
5218594b943SJoerg Roedel     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
5228594b943SJoerg Roedel            && (test->vmcb->control.exit_info_1 == 0x15);
5238594b943SJoerg Roedel }
5248594b943SJoerg Roedel 
525ea975120SJoerg Roedel static void npt_us_prepare(struct test *test)
526ea975120SJoerg Roedel {
527ea975120SJoerg Roedel     u64 *pte;
528ea975120SJoerg Roedel 
529ea975120SJoerg Roedel     vmcb_ident(test->vmcb);
530726a1dd7SPaolo Bonzini     pte = npt_get_pte((u64)scratch_page);
531ea975120SJoerg Roedel 
532ea975120SJoerg Roedel     *pte &= ~(1ULL << 2);
533ea975120SJoerg Roedel }
534ea975120SJoerg Roedel 
535ea975120SJoerg Roedel static void npt_us_test(struct test *test)
536ea975120SJoerg Roedel {
537c0a4e715SPaolo Bonzini     (void) *(volatile u64 *)scratch_page;
538ea975120SJoerg Roedel }
539ea975120SJoerg Roedel 
540ea975120SJoerg Roedel static bool npt_us_check(struct test *test)
541ea975120SJoerg Roedel {
542726a1dd7SPaolo Bonzini     u64 *pte = npt_get_pte((u64)scratch_page);
543ea975120SJoerg Roedel 
544ea975120SJoerg Roedel     *pte |= (1ULL << 2);
545ea975120SJoerg Roedel 
546ea975120SJoerg Roedel     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
547ea975120SJoerg Roedel            && (test->vmcb->control.exit_info_1 == 0x05);
548ea975120SJoerg Roedel }
549ea975120SJoerg Roedel 
550dd6ef43cSJoerg Roedel static void npt_rsvd_prepare(struct test *test)
551dd6ef43cSJoerg Roedel {
552dd6ef43cSJoerg Roedel 
553dd6ef43cSJoerg Roedel     vmcb_ident(test->vmcb);
554dd6ef43cSJoerg Roedel 
555dd6ef43cSJoerg Roedel     pdpe[0] |= (1ULL << 8);
556dd6ef43cSJoerg Roedel }
557dd6ef43cSJoerg Roedel 
558dd6ef43cSJoerg Roedel static bool npt_rsvd_check(struct test *test)
559dd6ef43cSJoerg Roedel {
560dd6ef43cSJoerg Roedel     pdpe[0] &= ~(1ULL << 8);
561dd6ef43cSJoerg Roedel 
562dd6ef43cSJoerg Roedel     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
563dd6ef43cSJoerg Roedel             && (test->vmcb->control.exit_info_1 == 0x0f);
564dd6ef43cSJoerg Roedel }
565dd6ef43cSJoerg Roedel 
5665ebf82edSJoerg Roedel static void npt_rw_prepare(struct test *test)
5675ebf82edSJoerg Roedel {
5685ebf82edSJoerg Roedel 
5695ebf82edSJoerg Roedel     u64 *pte;
5705ebf82edSJoerg Roedel 
5715ebf82edSJoerg Roedel     vmcb_ident(test->vmcb);
572726a1dd7SPaolo Bonzini     pte = npt_get_pte(0x80000);
5735ebf82edSJoerg Roedel 
5745ebf82edSJoerg Roedel     *pte &= ~(1ULL << 1);
5755ebf82edSJoerg Roedel }
5765ebf82edSJoerg Roedel 
5775ebf82edSJoerg Roedel static void npt_rw_test(struct test *test)
5785ebf82edSJoerg Roedel {
5795ebf82edSJoerg Roedel     u64 *data = (void*)(0x80000);
5805ebf82edSJoerg Roedel 
5815ebf82edSJoerg Roedel     *data = 0;
5825ebf82edSJoerg Roedel }
5835ebf82edSJoerg Roedel 
5845ebf82edSJoerg Roedel static bool npt_rw_check(struct test *test)
5855ebf82edSJoerg Roedel {
586726a1dd7SPaolo Bonzini     u64 *pte = npt_get_pte(0x80000);
5875ebf82edSJoerg Roedel 
5885ebf82edSJoerg Roedel     *pte |= (1ULL << 1);
5895ebf82edSJoerg Roedel 
5905ebf82edSJoerg Roedel     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
5915ebf82edSJoerg Roedel            && (test->vmcb->control.exit_info_1 == 0x07);
5925ebf82edSJoerg Roedel }
5935ebf82edSJoerg Roedel 
594590040ffSJoerg Roedel static void npt_pfwalk_prepare(struct test *test)
595590040ffSJoerg Roedel {
596590040ffSJoerg Roedel 
597590040ffSJoerg Roedel     u64 *pte;
598590040ffSJoerg Roedel 
599590040ffSJoerg Roedel     vmcb_ident(test->vmcb);
600726a1dd7SPaolo Bonzini     pte = npt_get_pte(read_cr3());
601590040ffSJoerg Roedel 
602590040ffSJoerg Roedel     *pte &= ~(1ULL << 1);
603590040ffSJoerg Roedel }
604590040ffSJoerg Roedel 
605590040ffSJoerg Roedel static bool npt_pfwalk_check(struct test *test)
606590040ffSJoerg Roedel {
607726a1dd7SPaolo Bonzini     u64 *pte = npt_get_pte(read_cr3());
608590040ffSJoerg Roedel 
609590040ffSJoerg Roedel     *pte |= (1ULL << 1);
610590040ffSJoerg Roedel 
611590040ffSJoerg Roedel     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
612590040ffSJoerg Roedel            && (test->vmcb->control.exit_info_1 == 0x7)
613590040ffSJoerg Roedel 	   && (test->vmcb->control.exit_info_2 == read_cr3());
614590040ffSJoerg Roedel }
615590040ffSJoerg Roedel 
61621c23154SJoerg Roedel static void latency_prepare(struct test *test)
61721c23154SJoerg Roedel {
61821c23154SJoerg Roedel     default_prepare(test);
61921c23154SJoerg Roedel     runs = LATENCY_RUNS;
62021c23154SJoerg Roedel     latvmrun_min = latvmexit_min = -1ULL;
62121c23154SJoerg Roedel     latvmrun_max = latvmexit_max = 0;
62221c23154SJoerg Roedel     vmrun_sum = vmexit_sum = 0;
62321c23154SJoerg Roedel }
62421c23154SJoerg Roedel 
62521c23154SJoerg Roedel static void latency_test(struct test *test)
62621c23154SJoerg Roedel {
62721c23154SJoerg Roedel     u64 cycles;
62821c23154SJoerg Roedel 
62921c23154SJoerg Roedel start:
63021c23154SJoerg Roedel     tsc_end = rdtsc();
63121c23154SJoerg Roedel 
63221c23154SJoerg Roedel     cycles = tsc_end - tsc_start;
63321c23154SJoerg Roedel 
63421c23154SJoerg Roedel     if (cycles > latvmrun_max)
63521c23154SJoerg Roedel         latvmrun_max = cycles;
63621c23154SJoerg Roedel 
63721c23154SJoerg Roedel     if (cycles < latvmrun_min)
63821c23154SJoerg Roedel         latvmrun_min = cycles;
63921c23154SJoerg Roedel 
64021c23154SJoerg Roedel     vmrun_sum += cycles;
64121c23154SJoerg Roedel 
64221c23154SJoerg Roedel     tsc_start = rdtsc();
64321c23154SJoerg Roedel 
64421c23154SJoerg Roedel     asm volatile ("vmmcall" : : : "memory");
64521c23154SJoerg Roedel     goto start;
64621c23154SJoerg Roedel }
64721c23154SJoerg Roedel 
64821c23154SJoerg Roedel static bool latency_finished(struct test *test)
64921c23154SJoerg Roedel {
65021c23154SJoerg Roedel     u64 cycles;
65121c23154SJoerg Roedel 
65221c23154SJoerg Roedel     tsc_end = rdtsc();
65321c23154SJoerg Roedel 
65421c23154SJoerg Roedel     cycles = tsc_end - tsc_start;
65521c23154SJoerg Roedel 
65621c23154SJoerg Roedel     if (cycles > latvmexit_max)
65721c23154SJoerg Roedel         latvmexit_max = cycles;
65821c23154SJoerg Roedel 
65921c23154SJoerg Roedel     if (cycles < latvmexit_min)
66021c23154SJoerg Roedel         latvmexit_min = cycles;
66121c23154SJoerg Roedel 
66221c23154SJoerg Roedel     vmexit_sum += cycles;
66321c23154SJoerg Roedel 
66421c23154SJoerg Roedel     test->vmcb->save.rip += 3;
66521c23154SJoerg Roedel 
66621c23154SJoerg Roedel     runs -= 1;
66721c23154SJoerg Roedel 
66821c23154SJoerg Roedel     return runs == 0;
66921c23154SJoerg Roedel }
67021c23154SJoerg Roedel 
67121c23154SJoerg Roedel static bool latency_check(struct test *test)
67221c23154SJoerg Roedel {
67321c23154SJoerg Roedel     printf("    Latency VMRUN : max: %d min: %d avg: %d\n", latvmrun_max,
67421c23154SJoerg Roedel             latvmrun_min, vmrun_sum / LATENCY_RUNS);
67521c23154SJoerg Roedel     printf("    Latency VMEXIT: max: %d min: %d avg: %d\n", latvmexit_max,
67621c23154SJoerg Roedel             latvmexit_min, vmexit_sum / LATENCY_RUNS);
67721c23154SJoerg Roedel     return true;
67821c23154SJoerg Roedel }
67921c23154SJoerg Roedel 
680ef101219SRoedel, Joerg static void lat_svm_insn_prepare(struct test *test)
681ef101219SRoedel, Joerg {
682ef101219SRoedel, Joerg     default_prepare(test);
683ef101219SRoedel, Joerg     runs = LATENCY_RUNS;
684ef101219SRoedel, Joerg     latvmload_min = latvmsave_min = latstgi_min = latclgi_min = -1ULL;
685ef101219SRoedel, Joerg     latvmload_max = latvmsave_max = latstgi_max = latclgi_max = 0;
686ef101219SRoedel, Joerg     vmload_sum = vmsave_sum = stgi_sum = clgi_sum;
687ef101219SRoedel, Joerg }
688ef101219SRoedel, Joerg 
689ef101219SRoedel, Joerg static bool lat_svm_insn_finished(struct test *test)
690ef101219SRoedel, Joerg {
691ef101219SRoedel, Joerg     u64 vmcb_phys = virt_to_phys(test->vmcb);
692ef101219SRoedel, Joerg     u64 cycles;
693ef101219SRoedel, Joerg 
694ef101219SRoedel, Joerg     for ( ; runs != 0; runs--) {
695ef101219SRoedel, Joerg         tsc_start = rdtsc();
696ef101219SRoedel, Joerg         asm volatile("vmload\n\t" : : "a"(vmcb_phys) : "memory");
697ef101219SRoedel, Joerg         cycles = rdtsc() - tsc_start;
698ef101219SRoedel, Joerg         if (cycles > latvmload_max)
699ef101219SRoedel, Joerg             latvmload_max = cycles;
700ef101219SRoedel, Joerg         if (cycles < latvmload_min)
701ef101219SRoedel, Joerg             latvmload_min = cycles;
702ef101219SRoedel, Joerg         vmload_sum += cycles;
703ef101219SRoedel, Joerg 
704ef101219SRoedel, Joerg         tsc_start = rdtsc();
705ef101219SRoedel, Joerg         asm volatile("vmsave\n\t" : : "a"(vmcb_phys) : "memory");
706ef101219SRoedel, Joerg         cycles = rdtsc() - tsc_start;
707ef101219SRoedel, Joerg         if (cycles > latvmsave_max)
708ef101219SRoedel, Joerg             latvmsave_max = cycles;
709ef101219SRoedel, Joerg         if (cycles < latvmsave_min)
710ef101219SRoedel, Joerg             latvmsave_min = cycles;
711ef101219SRoedel, Joerg         vmsave_sum += cycles;
712ef101219SRoedel, Joerg 
713ef101219SRoedel, Joerg         tsc_start = rdtsc();
714ef101219SRoedel, Joerg         asm volatile("stgi\n\t");
715ef101219SRoedel, Joerg         cycles = rdtsc() - tsc_start;
716ef101219SRoedel, Joerg         if (cycles > latstgi_max)
717ef101219SRoedel, Joerg             latstgi_max = cycles;
718ef101219SRoedel, Joerg         if (cycles < latstgi_min)
719ef101219SRoedel, Joerg             latstgi_min = cycles;
720ef101219SRoedel, Joerg         stgi_sum += cycles;
721ef101219SRoedel, Joerg 
722ef101219SRoedel, Joerg         tsc_start = rdtsc();
723ef101219SRoedel, Joerg         asm volatile("clgi\n\t");
724ef101219SRoedel, Joerg         cycles = rdtsc() - tsc_start;
725ef101219SRoedel, Joerg         if (cycles > latclgi_max)
726ef101219SRoedel, Joerg             latclgi_max = cycles;
727ef101219SRoedel, Joerg         if (cycles < latclgi_min)
728ef101219SRoedel, Joerg             latclgi_min = cycles;
729ef101219SRoedel, Joerg         clgi_sum += cycles;
730ef101219SRoedel, Joerg     }
731ef101219SRoedel, Joerg 
732ef101219SRoedel, Joerg     return true;
733ef101219SRoedel, Joerg }
734ef101219SRoedel, Joerg 
735ef101219SRoedel, Joerg static bool lat_svm_insn_check(struct test *test)
736ef101219SRoedel, Joerg {
737ef101219SRoedel, Joerg     printf("    Latency VMLOAD: max: %d min: %d avg: %d\n", latvmload_max,
738ef101219SRoedel, Joerg             latvmload_min, vmload_sum / LATENCY_RUNS);
739ef101219SRoedel, Joerg     printf("    Latency VMSAVE: max: %d min: %d avg: %d\n", latvmsave_max,
740ef101219SRoedel, Joerg             latvmsave_min, vmsave_sum / LATENCY_RUNS);
741ef101219SRoedel, Joerg     printf("    Latency STGI:   max: %d min: %d avg: %d\n", latstgi_max,
742ef101219SRoedel, Joerg             latstgi_min, stgi_sum / LATENCY_RUNS);
743ef101219SRoedel, Joerg     printf("    Latency CLGI:   max: %d min: %d avg: %d\n", latclgi_max,
744ef101219SRoedel, Joerg             latclgi_min, clgi_sum / LATENCY_RUNS);
745ef101219SRoedel, Joerg     return true;
746ef101219SRoedel, Joerg }
7477d36db35SAvi Kivity static struct test tests[] = {
7487d36db35SAvi Kivity     { "null", default_supported, default_prepare, null_test,
7497d36db35SAvi Kivity       default_finished, null_check },
7507d36db35SAvi Kivity     { "vmrun", default_supported, default_prepare, test_vmrun,
7517d36db35SAvi Kivity        default_finished, check_vmrun },
7527d36db35SAvi Kivity     { "vmrun intercept check", default_supported, prepare_no_vmrun_int,
7537d36db35SAvi Kivity       null_test, default_finished, check_no_vmrun_int },
7547d36db35SAvi Kivity     { "cr3 read intercept", default_supported, prepare_cr3_intercept,
7557d36db35SAvi Kivity       test_cr3_intercept, default_finished, check_cr3_intercept },
7567d36db35SAvi Kivity     { "cr3 read nointercept", default_supported, default_prepare,
7577d36db35SAvi Kivity       test_cr3_intercept, default_finished, check_cr3_nointercept },
758095274b4SPrasad Joshi     { "cr3 read intercept emulate", smp_supported,
7597d36db35SAvi Kivity       prepare_cr3_intercept_bypass, test_cr3_intercept_bypass,
7607d36db35SAvi Kivity       default_finished, check_cr3_intercept },
7617d36db35SAvi Kivity     { "next_rip", next_rip_supported, prepare_next_rip, test_next_rip,
7627d36db35SAvi Kivity       default_finished, check_next_rip },
7637d36db35SAvi Kivity     { "mode_switch", default_supported, prepare_mode_switch, test_mode_switch,
7647d36db35SAvi Kivity        mode_switch_finished, check_mode_switch },
7657d36db35SAvi Kivity     { "asid_zero", default_supported, prepare_asid_zero, test_asid_zero,
7667d36db35SAvi Kivity        default_finished, check_asid_zero },
7674c8eb156SJoerg Roedel     { "sel_cr0_bug", default_supported, sel_cr0_bug_prepare, sel_cr0_bug_test,
7684c8eb156SJoerg Roedel        sel_cr0_bug_finished, sel_cr0_bug_check },
7698594b943SJoerg Roedel     { "npt_nx", npt_supported, npt_nx_prepare, null_test,
770ea975120SJoerg Roedel 	    default_finished, npt_nx_check },
771ea975120SJoerg Roedel     { "npt_us", npt_supported, npt_us_prepare, npt_us_test,
772ea975120SJoerg Roedel 	    default_finished, npt_us_check },
773dd6ef43cSJoerg Roedel     { "npt_rsvd", npt_supported, npt_rsvd_prepare, null_test,
774dd6ef43cSJoerg Roedel 	    default_finished, npt_rsvd_check },
7755ebf82edSJoerg Roedel     { "npt_rw", npt_supported, npt_rw_prepare, npt_rw_test,
7765ebf82edSJoerg Roedel 	    default_finished, npt_rw_check },
777590040ffSJoerg Roedel     { "npt_pfwalk", npt_supported, npt_pfwalk_prepare, null_test,
778590040ffSJoerg Roedel 	    default_finished, npt_pfwalk_check },
77921c23154SJoerg Roedel     { "latency_run_exit", default_supported, latency_prepare, latency_test,
78021c23154SJoerg Roedel       latency_finished, latency_check },
781ef101219SRoedel, Joerg     { "latency_svm_insn", default_supported, lat_svm_insn_prepare, null_test,
782ef101219SRoedel, Joerg       lat_svm_insn_finished, lat_svm_insn_check },
7837d36db35SAvi Kivity };
7847d36db35SAvi Kivity 
7857d36db35SAvi Kivity int main(int ac, char **av)
7867d36db35SAvi Kivity {
7877d36db35SAvi Kivity     int i, nr, passed, done;
7887d36db35SAvi Kivity     struct vmcb *vmcb;
7897d36db35SAvi Kivity 
7907d36db35SAvi Kivity     setup_vm();
7917d36db35SAvi Kivity     smp_init();
7927d36db35SAvi Kivity 
7937d36db35SAvi Kivity     if (!(cpuid(0x80000001).c & 4)) {
7947d36db35SAvi Kivity         printf("SVM not availble\n");
7957d36db35SAvi Kivity         return 0;
7967d36db35SAvi Kivity     }
7977d36db35SAvi Kivity 
7987d36db35SAvi Kivity     setup_svm();
7997d36db35SAvi Kivity 
8007d36db35SAvi Kivity     vmcb = alloc_page();
8017d36db35SAvi Kivity 
8027d36db35SAvi Kivity     nr = ARRAY_SIZE(tests);
8037d36db35SAvi Kivity     passed = done = 0;
8047d36db35SAvi Kivity     for (i = 0; i < nr; ++i) {
8057d36db35SAvi Kivity         if (!tests[i].supported())
8067d36db35SAvi Kivity             continue;
8077d36db35SAvi Kivity         done += 1;
8087d36db35SAvi Kivity         passed += test_run(&tests[i], vmcb);
8097d36db35SAvi Kivity     }
8107d36db35SAvi Kivity 
8117d36db35SAvi Kivity     printf("\nSUMMARY: %d TESTS, %d FAILURES\n", done, (done - passed));
8127d36db35SAvi Kivity     return passed == done ? 0 : 1;
8137d36db35SAvi Kivity }
814