xref: /kvm-unit-tests/x86/svm.c (revision a43baea076d05ae25b4e6bef9f1032d8f1f30ee0)
17d36db35SAvi Kivity #include "svm.h"
27d36db35SAvi Kivity #include "libcflat.h"
37d36db35SAvi Kivity #include "processor.h"
4b46094b4SPaolo Bonzini #include "desc.h"
57d36db35SAvi Kivity #include "msr.h"
67d36db35SAvi Kivity #include "vm.h"
77d36db35SAvi Kivity #include "smp.h"
87d36db35SAvi Kivity #include "types.h"
97d36db35SAvi Kivity 
101535bf0fSJoerg Roedel /* for the nested page table*/
111535bf0fSJoerg Roedel u64 *pml4e;
121535bf0fSJoerg Roedel u64 *pdpe;
131535bf0fSJoerg Roedel u64 *pde[4];
141535bf0fSJoerg Roedel u64 *pte[2048];
15c0a4e715SPaolo Bonzini void *scratch_page;
161535bf0fSJoerg Roedel 
1721c23154SJoerg Roedel #define LATENCY_RUNS 1000000
1821c23154SJoerg Roedel 
1921c23154SJoerg Roedel u64 tsc_start;
2021c23154SJoerg Roedel u64 tsc_end;
2121c23154SJoerg Roedel 
2221c23154SJoerg Roedel u64 vmrun_sum, vmexit_sum;
23ef101219SRoedel, Joerg u64 vmsave_sum, vmload_sum;
24ef101219SRoedel, Joerg u64 stgi_sum, clgi_sum;
2521c23154SJoerg Roedel u64 latvmrun_max;
2621c23154SJoerg Roedel u64 latvmrun_min;
2721c23154SJoerg Roedel u64 latvmexit_max;
2821c23154SJoerg Roedel u64 latvmexit_min;
29ef101219SRoedel, Joerg u64 latvmload_max;
30ef101219SRoedel, Joerg u64 latvmload_min;
31ef101219SRoedel, Joerg u64 latvmsave_max;
32ef101219SRoedel, Joerg u64 latvmsave_min;
33ef101219SRoedel, Joerg u64 latstgi_max;
34ef101219SRoedel, Joerg u64 latstgi_min;
35ef101219SRoedel, Joerg u64 latclgi_max;
36ef101219SRoedel, Joerg u64 latclgi_min;
3721c23154SJoerg Roedel u64 runs;
3821c23154SJoerg Roedel 
391535bf0fSJoerg Roedel static bool npt_supported(void)
401535bf0fSJoerg Roedel {
411535bf0fSJoerg Roedel    return cpuid(0x8000000A).d & 1;
421535bf0fSJoerg Roedel }
431535bf0fSJoerg Roedel 
447d36db35SAvi Kivity static void setup_svm(void)
457d36db35SAvi Kivity {
467d36db35SAvi Kivity     void *hsave = alloc_page();
471535bf0fSJoerg Roedel     u64 *page, address;
481535bf0fSJoerg Roedel     int i,j;
497d36db35SAvi Kivity 
507d36db35SAvi Kivity     wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
517d36db35SAvi Kivity     wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME);
528594b943SJoerg Roedel     wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NX);
531535bf0fSJoerg Roedel 
54ea975120SJoerg Roedel     scratch_page = alloc_page();
55ea975120SJoerg Roedel 
561535bf0fSJoerg Roedel     if (!npt_supported())
571535bf0fSJoerg Roedel         return;
581535bf0fSJoerg Roedel 
591535bf0fSJoerg Roedel     printf("NPT detected - running all tests with NPT enabled\n");
601535bf0fSJoerg Roedel 
611535bf0fSJoerg Roedel     /*
621535bf0fSJoerg Roedel      * Nested paging supported - Build a nested page table
631535bf0fSJoerg Roedel      * Build the page-table bottom-up and map everything with 4k pages
641535bf0fSJoerg Roedel      * to get enough granularity for the NPT unit-tests.
651535bf0fSJoerg Roedel      */
661535bf0fSJoerg Roedel 
671535bf0fSJoerg Roedel     address = 0;
681535bf0fSJoerg Roedel 
691535bf0fSJoerg Roedel     /* PTE level */
701535bf0fSJoerg Roedel     for (i = 0; i < 2048; ++i) {
711535bf0fSJoerg Roedel         page = alloc_page();
721535bf0fSJoerg Roedel 
731535bf0fSJoerg Roedel         for (j = 0; j < 512; ++j, address += 4096)
741535bf0fSJoerg Roedel             page[j] = address | 0x067ULL;
751535bf0fSJoerg Roedel 
761535bf0fSJoerg Roedel         pte[i] = page;
771535bf0fSJoerg Roedel     }
781535bf0fSJoerg Roedel 
791535bf0fSJoerg Roedel     /* PDE level */
801535bf0fSJoerg Roedel     for (i = 0; i < 4; ++i) {
811535bf0fSJoerg Roedel         page = alloc_page();
821535bf0fSJoerg Roedel 
831535bf0fSJoerg Roedel         for (j = 0; j < 512; ++j)
841535bf0fSJoerg Roedel             page[j] = (u64)pte[(i * 514) + j] | 0x027ULL;
851535bf0fSJoerg Roedel 
861535bf0fSJoerg Roedel         pde[i] = page;
871535bf0fSJoerg Roedel     }
881535bf0fSJoerg Roedel 
891535bf0fSJoerg Roedel     /* PDPe level */
901535bf0fSJoerg Roedel     pdpe   = alloc_page();
911535bf0fSJoerg Roedel     for (i = 0; i < 4; ++i)
921535bf0fSJoerg Roedel        pdpe[i] = ((u64)(pde[i])) | 0x27;
931535bf0fSJoerg Roedel 
941535bf0fSJoerg Roedel     /* PML4e level */
951535bf0fSJoerg Roedel     pml4e    = alloc_page();
961535bf0fSJoerg Roedel     pml4e[0] = ((u64)pdpe) | 0x27;
977d36db35SAvi Kivity }
987d36db35SAvi Kivity 
99726a1dd7SPaolo Bonzini static u64 *npt_get_pte(u64 address)
1008594b943SJoerg Roedel {
1018594b943SJoerg Roedel     int i1, i2;
1028594b943SJoerg Roedel 
1038594b943SJoerg Roedel     address >>= 12;
1048594b943SJoerg Roedel     i1 = (address >> 9) & 0x7ff;
1058594b943SJoerg Roedel     i2 = address & 0x1ff;
1068594b943SJoerg Roedel 
1078594b943SJoerg Roedel     return &pte[i1][i2];
1088594b943SJoerg Roedel }
1098594b943SJoerg Roedel 
1107d36db35SAvi Kivity static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector,
1117d36db35SAvi Kivity                          u64 base, u32 limit, u32 attr)
1127d36db35SAvi Kivity {
1137d36db35SAvi Kivity     seg->selector = selector;
1147d36db35SAvi Kivity     seg->attrib = attr;
1157d36db35SAvi Kivity     seg->limit = limit;
1167d36db35SAvi Kivity     seg->base = base;
1177d36db35SAvi Kivity }
1187d36db35SAvi Kivity 
1197d36db35SAvi Kivity static void vmcb_ident(struct vmcb *vmcb)
1207d36db35SAvi Kivity {
1217d36db35SAvi Kivity     u64 vmcb_phys = virt_to_phys(vmcb);
1227d36db35SAvi Kivity     struct vmcb_save_area *save = &vmcb->save;
1237d36db35SAvi Kivity     struct vmcb_control_area *ctrl = &vmcb->control;
1247d36db35SAvi Kivity     u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
1257d36db35SAvi Kivity         | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK;
1267d36db35SAvi Kivity     u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
1277d36db35SAvi Kivity         | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK;
1287d36db35SAvi Kivity     struct descriptor_table_ptr desc_table_ptr;
1297d36db35SAvi Kivity 
1307d36db35SAvi Kivity     memset(vmcb, 0, sizeof(*vmcb));
1317d36db35SAvi Kivity     asm volatile ("vmsave" : : "a"(vmcb_phys) : "memory");
1327d36db35SAvi Kivity     vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr);
1337d36db35SAvi Kivity     vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr);
1347d36db35SAvi Kivity     vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr);
1357d36db35SAvi Kivity     vmcb_set_seg(&save->ds, read_ds(), 0, -1U, data_seg_attr);
1367d36db35SAvi Kivity     sgdt(&desc_table_ptr);
1377d36db35SAvi Kivity     vmcb_set_seg(&save->gdtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
1387d36db35SAvi Kivity     sidt(&desc_table_ptr);
1397d36db35SAvi Kivity     vmcb_set_seg(&save->idtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
1407d36db35SAvi Kivity     ctrl->asid = 1;
1417d36db35SAvi Kivity     save->cpl = 0;
1427d36db35SAvi Kivity     save->efer = rdmsr(MSR_EFER);
1437d36db35SAvi Kivity     save->cr4 = read_cr4();
1447d36db35SAvi Kivity     save->cr3 = read_cr3();
1457d36db35SAvi Kivity     save->cr0 = read_cr0();
1467d36db35SAvi Kivity     save->dr7 = read_dr7();
1477d36db35SAvi Kivity     save->dr6 = read_dr6();
1487d36db35SAvi Kivity     save->cr2 = read_cr2();
1497d36db35SAvi Kivity     save->g_pat = rdmsr(MSR_IA32_CR_PAT);
1507d36db35SAvi Kivity     save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
1517d36db35SAvi Kivity     ctrl->intercept = (1ULL << INTERCEPT_VMRUN) | (1ULL << INTERCEPT_VMMCALL);
1521535bf0fSJoerg Roedel 
1531535bf0fSJoerg Roedel     if (npt_supported()) {
1541535bf0fSJoerg Roedel         ctrl->nested_ctl = 1;
1551535bf0fSJoerg Roedel         ctrl->nested_cr3 = (u64)pml4e;
1561535bf0fSJoerg Roedel     }
1577d36db35SAvi Kivity }
1587d36db35SAvi Kivity 
1597d36db35SAvi Kivity struct test {
1607d36db35SAvi Kivity     const char *name;
1617d36db35SAvi Kivity     bool (*supported)(void);
1627d36db35SAvi Kivity     void (*prepare)(struct test *test);
1637d36db35SAvi Kivity     void (*guest_func)(struct test *test);
1647d36db35SAvi Kivity     bool (*finished)(struct test *test);
1657d36db35SAvi Kivity     bool (*succeeded)(struct test *test);
1667d36db35SAvi Kivity     struct vmcb *vmcb;
1677d36db35SAvi Kivity     int exits;
1687d36db35SAvi Kivity     ulong scratch;
1697d36db35SAvi Kivity };
1707d36db35SAvi Kivity 
1717d36db35SAvi Kivity static void test_thunk(struct test *test)
1727d36db35SAvi Kivity {
1737d36db35SAvi Kivity     test->guest_func(test);
1747d36db35SAvi Kivity     asm volatile ("vmmcall" : : : "memory");
1757d36db35SAvi Kivity }
1767d36db35SAvi Kivity 
177*a43baea0SPaolo Bonzini struct regs {
178*a43baea0SPaolo Bonzini         u64 rax;
179*a43baea0SPaolo Bonzini         u64 rcx;
180*a43baea0SPaolo Bonzini         u64 rdx;
181*a43baea0SPaolo Bonzini         u64 rbx;
182*a43baea0SPaolo Bonzini         u64 cr2;
183*a43baea0SPaolo Bonzini         u64 rbp;
184*a43baea0SPaolo Bonzini         u64 rsi;
185*a43baea0SPaolo Bonzini         u64 rdi;
186*a43baea0SPaolo Bonzini         u64 r8;
187*a43baea0SPaolo Bonzini         u64 r9;
188*a43baea0SPaolo Bonzini         u64 r10;
189*a43baea0SPaolo Bonzini         u64 r11;
190*a43baea0SPaolo Bonzini         u64 r12;
191*a43baea0SPaolo Bonzini         u64 r13;
192*a43baea0SPaolo Bonzini         u64 r14;
193*a43baea0SPaolo Bonzini         u64 r15;
194*a43baea0SPaolo Bonzini         u64 rflags;
195*a43baea0SPaolo Bonzini };
196*a43baea0SPaolo Bonzini 
197*a43baea0SPaolo Bonzini struct regs regs;
198*a43baea0SPaolo Bonzini 
199*a43baea0SPaolo Bonzini // rax handled specially below
200*a43baea0SPaolo Bonzini 
201*a43baea0SPaolo Bonzini #define SAVE_GPR_C                              \
202*a43baea0SPaolo Bonzini         "xchg %%rbx, regs+0x8\n\t"              \
203*a43baea0SPaolo Bonzini         "xchg %%rcx, regs+0x10\n\t"             \
204*a43baea0SPaolo Bonzini         "xchg %%rdx, regs+0x18\n\t"             \
205*a43baea0SPaolo Bonzini         "xchg %%rbp, regs+0x28\n\t"             \
206*a43baea0SPaolo Bonzini         "xchg %%rsi, regs+0x30\n\t"             \
207*a43baea0SPaolo Bonzini         "xchg %%rdi, regs+0x38\n\t"             \
208*a43baea0SPaolo Bonzini         "xchg %%r8, regs+0x40\n\t"              \
209*a43baea0SPaolo Bonzini         "xchg %%r9, regs+0x48\n\t"              \
210*a43baea0SPaolo Bonzini         "xchg %%r10, regs+0x50\n\t"             \
211*a43baea0SPaolo Bonzini         "xchg %%r11, regs+0x58\n\t"             \
212*a43baea0SPaolo Bonzini         "xchg %%r12, regs+0x60\n\t"             \
213*a43baea0SPaolo Bonzini         "xchg %%r13, regs+0x68\n\t"             \
214*a43baea0SPaolo Bonzini         "xchg %%r14, regs+0x70\n\t"             \
215*a43baea0SPaolo Bonzini         "xchg %%r15, regs+0x78\n\t"
216*a43baea0SPaolo Bonzini 
217*a43baea0SPaolo Bonzini #define LOAD_GPR_C      SAVE_GPR_C
218*a43baea0SPaolo Bonzini 
2197d36db35SAvi Kivity static bool test_run(struct test *test, struct vmcb *vmcb)
2207d36db35SAvi Kivity {
2217d36db35SAvi Kivity     u64 vmcb_phys = virt_to_phys(vmcb);
2227d36db35SAvi Kivity     u64 guest_stack[10000];
2237d36db35SAvi Kivity     bool success;
2247d36db35SAvi Kivity 
2257d36db35SAvi Kivity     test->vmcb = vmcb;
2267d36db35SAvi Kivity     test->prepare(test);
2277d36db35SAvi Kivity     vmcb->save.rip = (ulong)test_thunk;
2287d36db35SAvi Kivity     vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack));
229*a43baea0SPaolo Bonzini     regs.rdi = (ulong)test;
2307d36db35SAvi Kivity     do {
23121c23154SJoerg Roedel         tsc_start = rdtsc();
2327d36db35SAvi Kivity         asm volatile (
2337d36db35SAvi Kivity             "clgi \n\t"
2347d36db35SAvi Kivity             "vmload \n\t"
235*a43baea0SPaolo Bonzini             "mov regs+0x80, %%r15\n\t"  // rflags
236*a43baea0SPaolo Bonzini             "mov %%r15, 0x170(%0)\n\t"
237*a43baea0SPaolo Bonzini             "mov regs, %%r15\n\t"       // rax
238*a43baea0SPaolo Bonzini             "mov %%r15, 0x1f8(%0)\n\t"
239*a43baea0SPaolo Bonzini             LOAD_GPR_C
2407d36db35SAvi Kivity             "vmrun \n\t"
241*a43baea0SPaolo Bonzini             SAVE_GPR_C
242*a43baea0SPaolo Bonzini             "mov 0x170(%0), %%r15\n\t"  // rflags
243*a43baea0SPaolo Bonzini             "mov %%r15, regs+0x80\n\t"
244*a43baea0SPaolo Bonzini             "mov 0x1f8(%0), %%r15\n\t"  // rax
245*a43baea0SPaolo Bonzini             "mov %%r15, regs\n\t"
2467d36db35SAvi Kivity             "vmsave \n\t"
2477d36db35SAvi Kivity             "stgi"
248*a43baea0SPaolo Bonzini             : : "a"(vmcb_phys)
2497d36db35SAvi Kivity             : "rbx", "rcx", "rdx", "rsi",
2507d36db35SAvi Kivity               "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15",
2517d36db35SAvi Kivity               "memory");
25221c23154SJoerg Roedel 	tsc_end = rdtsc();
2537d36db35SAvi Kivity         ++test->exits;
2547d36db35SAvi Kivity     } while (!test->finished(test));
2557d36db35SAvi Kivity 
25621c23154SJoerg Roedel 
2577d36db35SAvi Kivity     success = test->succeeded(test);
2587d36db35SAvi Kivity 
2597d36db35SAvi Kivity     printf("%s: %s\n", test->name, success ? "PASS" : "FAIL");
2607d36db35SAvi Kivity 
2617d36db35SAvi Kivity     return success;
2627d36db35SAvi Kivity }
2637d36db35SAvi Kivity 
264095274b4SPrasad Joshi static bool smp_supported(void)
265095274b4SPrasad Joshi {
266095274b4SPrasad Joshi 	return cpu_count() > 1;
267095274b4SPrasad Joshi }
268095274b4SPrasad Joshi 
2697d36db35SAvi Kivity static bool default_supported(void)
2707d36db35SAvi Kivity {
2717d36db35SAvi Kivity     return true;
2727d36db35SAvi Kivity }
2737d36db35SAvi Kivity 
2747d36db35SAvi Kivity static void default_prepare(struct test *test)
2757d36db35SAvi Kivity {
2767d36db35SAvi Kivity     vmcb_ident(test->vmcb);
2777d36db35SAvi Kivity     cli();
2787d36db35SAvi Kivity }
2797d36db35SAvi Kivity 
2807d36db35SAvi Kivity static bool default_finished(struct test *test)
2817d36db35SAvi Kivity {
2827d36db35SAvi Kivity     return true; /* one vmexit */
2837d36db35SAvi Kivity }
2847d36db35SAvi Kivity 
2857d36db35SAvi Kivity static void null_test(struct test *test)
2867d36db35SAvi Kivity {
2877d36db35SAvi Kivity }
2887d36db35SAvi Kivity 
2897d36db35SAvi Kivity static bool null_check(struct test *test)
2907d36db35SAvi Kivity {
2917d36db35SAvi Kivity     return test->vmcb->control.exit_code == SVM_EXIT_VMMCALL;
2927d36db35SAvi Kivity }
2937d36db35SAvi Kivity 
2947d36db35SAvi Kivity static void prepare_no_vmrun_int(struct test *test)
2957d36db35SAvi Kivity {
2967d36db35SAvi Kivity     test->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMRUN);
2977d36db35SAvi Kivity }
2987d36db35SAvi Kivity 
2997d36db35SAvi Kivity static bool check_no_vmrun_int(struct test *test)
3007d36db35SAvi Kivity {
3017d36db35SAvi Kivity     return test->vmcb->control.exit_code == SVM_EXIT_ERR;
3027d36db35SAvi Kivity }
3037d36db35SAvi Kivity 
3047d36db35SAvi Kivity static void test_vmrun(struct test *test)
3057d36db35SAvi Kivity {
3067d36db35SAvi Kivity     asm volatile ("vmrun" : : "a"(virt_to_phys(test->vmcb)));
3077d36db35SAvi Kivity }
3087d36db35SAvi Kivity 
3097d36db35SAvi Kivity static bool check_vmrun(struct test *test)
3107d36db35SAvi Kivity {
3117d36db35SAvi Kivity     return test->vmcb->control.exit_code == SVM_EXIT_VMRUN;
3127d36db35SAvi Kivity }
3137d36db35SAvi Kivity 
3147d36db35SAvi Kivity static void prepare_cr3_intercept(struct test *test)
3157d36db35SAvi Kivity {
3167d36db35SAvi Kivity     default_prepare(test);
3177d36db35SAvi Kivity     test->vmcb->control.intercept_cr_read |= 1 << 3;
3187d36db35SAvi Kivity }
3197d36db35SAvi Kivity 
3207d36db35SAvi Kivity static void test_cr3_intercept(struct test *test)
3217d36db35SAvi Kivity {
3227d36db35SAvi Kivity     asm volatile ("mov %%cr3, %0" : "=r"(test->scratch) : : "memory");
3237d36db35SAvi Kivity }
3247d36db35SAvi Kivity 
3257d36db35SAvi Kivity static bool check_cr3_intercept(struct test *test)
3267d36db35SAvi Kivity {
3277d36db35SAvi Kivity     return test->vmcb->control.exit_code == SVM_EXIT_READ_CR3;
3287d36db35SAvi Kivity }
3297d36db35SAvi Kivity 
3307d36db35SAvi Kivity static bool check_cr3_nointercept(struct test *test)
3317d36db35SAvi Kivity {
3327d36db35SAvi Kivity     return null_check(test) && test->scratch == read_cr3();
3337d36db35SAvi Kivity }
3347d36db35SAvi Kivity 
3357d36db35SAvi Kivity static void corrupt_cr3_intercept_bypass(void *_test)
3367d36db35SAvi Kivity {
3377d36db35SAvi Kivity     struct test *test = _test;
3387d36db35SAvi Kivity     extern volatile u32 mmio_insn;
3397d36db35SAvi Kivity 
3407d36db35SAvi Kivity     while (!__sync_bool_compare_and_swap(&test->scratch, 1, 2))
3417d36db35SAvi Kivity         pause();
3427d36db35SAvi Kivity     pause();
3437d36db35SAvi Kivity     pause();
3447d36db35SAvi Kivity     pause();
3457d36db35SAvi Kivity     mmio_insn = 0x90d8200f;  // mov %cr3, %rax; nop
3467d36db35SAvi Kivity }
3477d36db35SAvi Kivity 
3487d36db35SAvi Kivity static void prepare_cr3_intercept_bypass(struct test *test)
3497d36db35SAvi Kivity {
3507d36db35SAvi Kivity     default_prepare(test);
3517d36db35SAvi Kivity     test->vmcb->control.intercept_cr_read |= 1 << 3;
3527d36db35SAvi Kivity     on_cpu_async(1, corrupt_cr3_intercept_bypass, test);
3537d36db35SAvi Kivity }
3547d36db35SAvi Kivity 
3557d36db35SAvi Kivity static void test_cr3_intercept_bypass(struct test *test)
3567d36db35SAvi Kivity {
3577d36db35SAvi Kivity     ulong a = 0xa0000;
3587d36db35SAvi Kivity 
3597d36db35SAvi Kivity     test->scratch = 1;
3607d36db35SAvi Kivity     while (test->scratch != 2)
3617d36db35SAvi Kivity         barrier();
3627d36db35SAvi Kivity 
3637d36db35SAvi Kivity     asm volatile ("mmio_insn: mov %0, (%0); nop"
3647d36db35SAvi Kivity                   : "+a"(a) : : "memory");
3657d36db35SAvi Kivity     test->scratch = a;
3667d36db35SAvi Kivity }
3677d36db35SAvi Kivity 
3687d36db35SAvi Kivity static bool next_rip_supported(void)
3697d36db35SAvi Kivity {
3707d36db35SAvi Kivity     return (cpuid(SVM_CPUID_FUNC).d & 8);
3717d36db35SAvi Kivity }
3727d36db35SAvi Kivity 
3737d36db35SAvi Kivity static void prepare_next_rip(struct test *test)
3747d36db35SAvi Kivity {
3757d36db35SAvi Kivity     test->vmcb->control.intercept |= (1ULL << INTERCEPT_RDTSC);
3767d36db35SAvi Kivity }
3777d36db35SAvi Kivity 
3787d36db35SAvi Kivity 
3797d36db35SAvi Kivity static void test_next_rip(struct test *test)
3807d36db35SAvi Kivity {
3817d36db35SAvi Kivity     asm volatile ("rdtsc\n\t"
3827d36db35SAvi Kivity                   ".globl exp_next_rip\n\t"
3837d36db35SAvi Kivity                   "exp_next_rip:\n\t" ::: "eax", "edx");
3847d36db35SAvi Kivity }
3857d36db35SAvi Kivity 
3867d36db35SAvi Kivity static bool check_next_rip(struct test *test)
3877d36db35SAvi Kivity {
3887d36db35SAvi Kivity     extern char exp_next_rip;
3897d36db35SAvi Kivity     unsigned long address = (unsigned long)&exp_next_rip;
3907d36db35SAvi Kivity 
3917d36db35SAvi Kivity     return address == test->vmcb->control.next_rip;
3927d36db35SAvi Kivity }
3937d36db35SAvi Kivity 
3947d36db35SAvi Kivity static void prepare_mode_switch(struct test *test)
3957d36db35SAvi Kivity {
3967d36db35SAvi Kivity     test->vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR)
3977d36db35SAvi Kivity                                              |  (1ULL << UD_VECTOR)
3987d36db35SAvi Kivity                                              |  (1ULL << DF_VECTOR)
3997d36db35SAvi Kivity                                              |  (1ULL << PF_VECTOR);
4007d36db35SAvi Kivity     test->scratch = 0;
4017d36db35SAvi Kivity }
4027d36db35SAvi Kivity 
4037d36db35SAvi Kivity static void test_mode_switch(struct test *test)
4047d36db35SAvi Kivity {
4057d36db35SAvi Kivity     asm volatile("	cli\n"
4067d36db35SAvi Kivity 		 "	ljmp *1f\n" /* jump to 32-bit code segment */
4077d36db35SAvi Kivity 		 "1:\n"
4087d36db35SAvi Kivity 		 "	.long 2f\n"
409b46094b4SPaolo Bonzini 		 "	.long " xstr(KERNEL_CS32) "\n"
4107d36db35SAvi Kivity 		 ".code32\n"
4117d36db35SAvi Kivity 		 "2:\n"
4127d36db35SAvi Kivity 		 "	movl %%cr0, %%eax\n"
4137d36db35SAvi Kivity 		 "	btcl  $31, %%eax\n" /* clear PG */
4147d36db35SAvi Kivity 		 "	movl %%eax, %%cr0\n"
4157d36db35SAvi Kivity 		 "	movl $0xc0000080, %%ecx\n" /* EFER */
4167d36db35SAvi Kivity 		 "	rdmsr\n"
4177d36db35SAvi Kivity 		 "	btcl $8, %%eax\n" /* clear LME */
4187d36db35SAvi Kivity 		 "	wrmsr\n"
4197d36db35SAvi Kivity 		 "	movl %%cr4, %%eax\n"
4207d36db35SAvi Kivity 		 "	btcl $5, %%eax\n" /* clear PAE */
4217d36db35SAvi Kivity 		 "	movl %%eax, %%cr4\n"
422b46094b4SPaolo Bonzini 		 "	movw %[ds16], %%ax\n"
4237d36db35SAvi Kivity 		 "	movw %%ax, %%ds\n"
424b46094b4SPaolo Bonzini 		 "	ljmpl %[cs16], $3f\n" /* jump to 16 bit protected-mode */
4257d36db35SAvi Kivity 		 ".code16\n"
4267d36db35SAvi Kivity 		 "3:\n"
4277d36db35SAvi Kivity 		 "	movl %%cr0, %%eax\n"
4287d36db35SAvi Kivity 		 "	btcl $0, %%eax\n" /* clear PE  */
4297d36db35SAvi Kivity 		 "	movl %%eax, %%cr0\n"
4307d36db35SAvi Kivity 		 "	ljmpl $0, $4f\n"   /* jump to real-mode */
4317d36db35SAvi Kivity 		 "4:\n"
4327d36db35SAvi Kivity 		 "	vmmcall\n"
4337d36db35SAvi Kivity 		 "	movl %%cr0, %%eax\n"
4347d36db35SAvi Kivity 		 "	btsl $0, %%eax\n" /* set PE  */
4357d36db35SAvi Kivity 		 "	movl %%eax, %%cr0\n"
436b46094b4SPaolo Bonzini 		 "	ljmpl %[cs32], $5f\n" /* back to protected mode */
4377d36db35SAvi Kivity 		 ".code32\n"
4387d36db35SAvi Kivity 		 "5:\n"
4397d36db35SAvi Kivity 		 "	movl %%cr4, %%eax\n"
4407d36db35SAvi Kivity 		 "	btsl $5, %%eax\n" /* set PAE */
4417d36db35SAvi Kivity 		 "	movl %%eax, %%cr4\n"
4427d36db35SAvi Kivity 		 "	movl $0xc0000080, %%ecx\n" /* EFER */
4437d36db35SAvi Kivity 		 "	rdmsr\n"
4447d36db35SAvi Kivity 		 "	btsl $8, %%eax\n" /* set LME */
4457d36db35SAvi Kivity 		 "	wrmsr\n"
4467d36db35SAvi Kivity 		 "	movl %%cr0, %%eax\n"
4477d36db35SAvi Kivity 		 "	btsl  $31, %%eax\n" /* set PG */
4487d36db35SAvi Kivity 		 "	movl %%eax, %%cr0\n"
449b46094b4SPaolo Bonzini 		 "	ljmpl %[cs64], $6f\n"    /* back to long mode */
4507d36db35SAvi Kivity 		 ".code64\n\t"
4517d36db35SAvi Kivity 		 "6:\n"
4527d36db35SAvi Kivity 		 "	vmmcall\n"
453b46094b4SPaolo Bonzini 		 :: [cs16] "i"(KERNEL_CS16), [ds16] "i"(KERNEL_DS16),
454b46094b4SPaolo Bonzini 		    [cs32] "i"(KERNEL_CS32), [cs64] "i"(KERNEL_CS64)
455b46094b4SPaolo Bonzini 		 : "rax", "rbx", "rcx", "rdx", "memory");
4567d36db35SAvi Kivity }
4577d36db35SAvi Kivity 
4587d36db35SAvi Kivity static bool mode_switch_finished(struct test *test)
4597d36db35SAvi Kivity {
4607d36db35SAvi Kivity     u64 cr0, cr4, efer;
4617d36db35SAvi Kivity 
4627d36db35SAvi Kivity     cr0  = test->vmcb->save.cr0;
4637d36db35SAvi Kivity     cr4  = test->vmcb->save.cr4;
4647d36db35SAvi Kivity     efer = test->vmcb->save.efer;
4657d36db35SAvi Kivity 
4667d36db35SAvi Kivity     /* Only expect VMMCALL intercepts */
4677d36db35SAvi Kivity     if (test->vmcb->control.exit_code != SVM_EXIT_VMMCALL)
4687d36db35SAvi Kivity 	    return true;
4697d36db35SAvi Kivity 
4707d36db35SAvi Kivity     /* Jump over VMMCALL instruction */
4717d36db35SAvi Kivity     test->vmcb->save.rip += 3;
4727d36db35SAvi Kivity 
4737d36db35SAvi Kivity     /* Do sanity checks */
4747d36db35SAvi Kivity     switch (test->scratch) {
4757d36db35SAvi Kivity     case 0:
4767d36db35SAvi Kivity         /* Test should be in real mode now - check for this */
4777d36db35SAvi Kivity         if ((cr0  & 0x80000001) || /* CR0.PG, CR0.PE */
4787d36db35SAvi Kivity             (cr4  & 0x00000020) || /* CR4.PAE */
4797d36db35SAvi Kivity             (efer & 0x00000500))   /* EFER.LMA, EFER.LME */
4807d36db35SAvi Kivity                 return true;
4817d36db35SAvi Kivity         break;
4827d36db35SAvi Kivity     case 2:
4837d36db35SAvi Kivity         /* Test should be back in long-mode now - check for this */
4847d36db35SAvi Kivity         if (((cr0  & 0x80000001) != 0x80000001) || /* CR0.PG, CR0.PE */
4857d36db35SAvi Kivity             ((cr4  & 0x00000020) != 0x00000020) || /* CR4.PAE */
4867d36db35SAvi Kivity             ((efer & 0x00000500) != 0x00000500))   /* EFER.LMA, EFER.LME */
4877d36db35SAvi Kivity 		    return true;
4887d36db35SAvi Kivity 	break;
4897d36db35SAvi Kivity     }
4907d36db35SAvi Kivity 
4917d36db35SAvi Kivity     /* one step forward */
4927d36db35SAvi Kivity     test->scratch += 1;
4937d36db35SAvi Kivity 
4947d36db35SAvi Kivity     return test->scratch == 2;
4957d36db35SAvi Kivity }
4967d36db35SAvi Kivity 
4977d36db35SAvi Kivity static bool check_mode_switch(struct test *test)
4987d36db35SAvi Kivity {
4997d36db35SAvi Kivity 	return test->scratch == 2;
5007d36db35SAvi Kivity }
5017d36db35SAvi Kivity 
5027d36db35SAvi Kivity static void prepare_asid_zero(struct test *test)
5037d36db35SAvi Kivity {
5047d36db35SAvi Kivity     test->vmcb->control.asid = 0;
5057d36db35SAvi Kivity }
5067d36db35SAvi Kivity 
5077d36db35SAvi Kivity static void test_asid_zero(struct test *test)
5087d36db35SAvi Kivity {
5097d36db35SAvi Kivity     asm volatile ("vmmcall\n\t");
5107d36db35SAvi Kivity }
5117d36db35SAvi Kivity 
5127d36db35SAvi Kivity static bool check_asid_zero(struct test *test)
5137d36db35SAvi Kivity {
5147d36db35SAvi Kivity     return test->vmcb->control.exit_code == SVM_EXIT_ERR;
5157d36db35SAvi Kivity }
5167d36db35SAvi Kivity 
5174c8eb156SJoerg Roedel static void sel_cr0_bug_prepare(struct test *test)
5184c8eb156SJoerg Roedel {
5194c8eb156SJoerg Roedel     vmcb_ident(test->vmcb);
5204c8eb156SJoerg Roedel     test->vmcb->control.intercept |= (1ULL << INTERCEPT_SELECTIVE_CR0);
5214c8eb156SJoerg Roedel }
5224c8eb156SJoerg Roedel 
5234c8eb156SJoerg Roedel static bool sel_cr0_bug_finished(struct test *test)
5244c8eb156SJoerg Roedel {
5254c8eb156SJoerg Roedel 	return true;
5264c8eb156SJoerg Roedel }
5274c8eb156SJoerg Roedel 
5284c8eb156SJoerg Roedel static void sel_cr0_bug_test(struct test *test)
5294c8eb156SJoerg Roedel {
5304c8eb156SJoerg Roedel     unsigned long cr0;
5314c8eb156SJoerg Roedel 
5324c8eb156SJoerg Roedel     /* read cr0, clear CD, and write back */
5334c8eb156SJoerg Roedel     cr0  = read_cr0();
5344c8eb156SJoerg Roedel     cr0 |= (1UL << 30);
5354c8eb156SJoerg Roedel     write_cr0(cr0);
5364c8eb156SJoerg Roedel 
5374c8eb156SJoerg Roedel     /*
5384c8eb156SJoerg Roedel      * If we are here the test failed, not sure what to do now because we
5394c8eb156SJoerg Roedel      * are not in guest-mode anymore so we can't trigger an intercept.
5404c8eb156SJoerg Roedel      * Trigger a tripple-fault for now.
5414c8eb156SJoerg Roedel      */
5424c8eb156SJoerg Roedel     printf("sel_cr0 test failed. Can not recover from this - exiting\n");
5434c8eb156SJoerg Roedel     exit(1);
5444c8eb156SJoerg Roedel }
5454c8eb156SJoerg Roedel 
5464c8eb156SJoerg Roedel static bool sel_cr0_bug_check(struct test *test)
5474c8eb156SJoerg Roedel {
5484c8eb156SJoerg Roedel     return test->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE;
5494c8eb156SJoerg Roedel }
5504c8eb156SJoerg Roedel 
5518594b943SJoerg Roedel static void npt_nx_prepare(struct test *test)
5528594b943SJoerg Roedel {
5538594b943SJoerg Roedel 
5548594b943SJoerg Roedel     u64 *pte;
5558594b943SJoerg Roedel 
5568594b943SJoerg Roedel     vmcb_ident(test->vmcb);
557726a1dd7SPaolo Bonzini     pte = npt_get_pte((u64)null_test);
5588594b943SJoerg Roedel 
5598594b943SJoerg Roedel     *pte |= (1ULL << 63);
5608594b943SJoerg Roedel }
5618594b943SJoerg Roedel 
5628594b943SJoerg Roedel static bool npt_nx_check(struct test *test)
5638594b943SJoerg Roedel {
564726a1dd7SPaolo Bonzini     u64 *pte = npt_get_pte((u64)null_test);
5658594b943SJoerg Roedel 
5668594b943SJoerg Roedel     *pte &= ~(1ULL << 63);
5678594b943SJoerg Roedel 
5688594b943SJoerg Roedel     test->vmcb->save.efer |= (1 << 11);
5698594b943SJoerg Roedel 
5708594b943SJoerg Roedel     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
5718594b943SJoerg Roedel            && (test->vmcb->control.exit_info_1 == 0x15);
5728594b943SJoerg Roedel }
5738594b943SJoerg Roedel 
574ea975120SJoerg Roedel static void npt_us_prepare(struct test *test)
575ea975120SJoerg Roedel {
576ea975120SJoerg Roedel     u64 *pte;
577ea975120SJoerg Roedel 
578ea975120SJoerg Roedel     vmcb_ident(test->vmcb);
579726a1dd7SPaolo Bonzini     pte = npt_get_pte((u64)scratch_page);
580ea975120SJoerg Roedel 
581ea975120SJoerg Roedel     *pte &= ~(1ULL << 2);
582ea975120SJoerg Roedel }
583ea975120SJoerg Roedel 
584ea975120SJoerg Roedel static void npt_us_test(struct test *test)
585ea975120SJoerg Roedel {
586c0a4e715SPaolo Bonzini     (void) *(volatile u64 *)scratch_page;
587ea975120SJoerg Roedel }
588ea975120SJoerg Roedel 
589ea975120SJoerg Roedel static bool npt_us_check(struct test *test)
590ea975120SJoerg Roedel {
591726a1dd7SPaolo Bonzini     u64 *pte = npt_get_pte((u64)scratch_page);
592ea975120SJoerg Roedel 
593ea975120SJoerg Roedel     *pte |= (1ULL << 2);
594ea975120SJoerg Roedel 
595ea975120SJoerg Roedel     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
596ea975120SJoerg Roedel            && (test->vmcb->control.exit_info_1 == 0x05);
597ea975120SJoerg Roedel }
598ea975120SJoerg Roedel 
599dd6ef43cSJoerg Roedel static void npt_rsvd_prepare(struct test *test)
600dd6ef43cSJoerg Roedel {
601dd6ef43cSJoerg Roedel 
602dd6ef43cSJoerg Roedel     vmcb_ident(test->vmcb);
603dd6ef43cSJoerg Roedel 
604dd6ef43cSJoerg Roedel     pdpe[0] |= (1ULL << 8);
605dd6ef43cSJoerg Roedel }
606dd6ef43cSJoerg Roedel 
607dd6ef43cSJoerg Roedel static bool npt_rsvd_check(struct test *test)
608dd6ef43cSJoerg Roedel {
609dd6ef43cSJoerg Roedel     pdpe[0] &= ~(1ULL << 8);
610dd6ef43cSJoerg Roedel 
611dd6ef43cSJoerg Roedel     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
612dd6ef43cSJoerg Roedel             && (test->vmcb->control.exit_info_1 == 0x0f);
613dd6ef43cSJoerg Roedel }
614dd6ef43cSJoerg Roedel 
6155ebf82edSJoerg Roedel static void npt_rw_prepare(struct test *test)
6165ebf82edSJoerg Roedel {
6175ebf82edSJoerg Roedel 
6185ebf82edSJoerg Roedel     u64 *pte;
6195ebf82edSJoerg Roedel 
6205ebf82edSJoerg Roedel     vmcb_ident(test->vmcb);
621726a1dd7SPaolo Bonzini     pte = npt_get_pte(0x80000);
6225ebf82edSJoerg Roedel 
6235ebf82edSJoerg Roedel     *pte &= ~(1ULL << 1);
6245ebf82edSJoerg Roedel }
6255ebf82edSJoerg Roedel 
6265ebf82edSJoerg Roedel static void npt_rw_test(struct test *test)
6275ebf82edSJoerg Roedel {
6285ebf82edSJoerg Roedel     u64 *data = (void*)(0x80000);
6295ebf82edSJoerg Roedel 
6305ebf82edSJoerg Roedel     *data = 0;
6315ebf82edSJoerg Roedel }
6325ebf82edSJoerg Roedel 
6335ebf82edSJoerg Roedel static bool npt_rw_check(struct test *test)
6345ebf82edSJoerg Roedel {
635726a1dd7SPaolo Bonzini     u64 *pte = npt_get_pte(0x80000);
6365ebf82edSJoerg Roedel 
6375ebf82edSJoerg Roedel     *pte |= (1ULL << 1);
6385ebf82edSJoerg Roedel 
6395ebf82edSJoerg Roedel     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
6405ebf82edSJoerg Roedel            && (test->vmcb->control.exit_info_1 == 0x07);
6415ebf82edSJoerg Roedel }
6425ebf82edSJoerg Roedel 
643590040ffSJoerg Roedel static void npt_pfwalk_prepare(struct test *test)
644590040ffSJoerg Roedel {
645590040ffSJoerg Roedel 
646590040ffSJoerg Roedel     u64 *pte;
647590040ffSJoerg Roedel 
648590040ffSJoerg Roedel     vmcb_ident(test->vmcb);
649726a1dd7SPaolo Bonzini     pte = npt_get_pte(read_cr3());
650590040ffSJoerg Roedel 
651590040ffSJoerg Roedel     *pte &= ~(1ULL << 1);
652590040ffSJoerg Roedel }
653590040ffSJoerg Roedel 
654590040ffSJoerg Roedel static bool npt_pfwalk_check(struct test *test)
655590040ffSJoerg Roedel {
656726a1dd7SPaolo Bonzini     u64 *pte = npt_get_pte(read_cr3());
657590040ffSJoerg Roedel 
658590040ffSJoerg Roedel     *pte |= (1ULL << 1);
659590040ffSJoerg Roedel 
660590040ffSJoerg Roedel     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
661590040ffSJoerg Roedel            && (test->vmcb->control.exit_info_1 == 0x7)
662590040ffSJoerg Roedel 	   && (test->vmcb->control.exit_info_2 == read_cr3());
663590040ffSJoerg Roedel }
664590040ffSJoerg Roedel 
66521c23154SJoerg Roedel static void latency_prepare(struct test *test)
66621c23154SJoerg Roedel {
66721c23154SJoerg Roedel     default_prepare(test);
66821c23154SJoerg Roedel     runs = LATENCY_RUNS;
66921c23154SJoerg Roedel     latvmrun_min = latvmexit_min = -1ULL;
67021c23154SJoerg Roedel     latvmrun_max = latvmexit_max = 0;
67121c23154SJoerg Roedel     vmrun_sum = vmexit_sum = 0;
67221c23154SJoerg Roedel }
67321c23154SJoerg Roedel 
67421c23154SJoerg Roedel static void latency_test(struct test *test)
67521c23154SJoerg Roedel {
67621c23154SJoerg Roedel     u64 cycles;
67721c23154SJoerg Roedel 
67821c23154SJoerg Roedel start:
67921c23154SJoerg Roedel     tsc_end = rdtsc();
68021c23154SJoerg Roedel 
68121c23154SJoerg Roedel     cycles = tsc_end - tsc_start;
68221c23154SJoerg Roedel 
68321c23154SJoerg Roedel     if (cycles > latvmrun_max)
68421c23154SJoerg Roedel         latvmrun_max = cycles;
68521c23154SJoerg Roedel 
68621c23154SJoerg Roedel     if (cycles < latvmrun_min)
68721c23154SJoerg Roedel         latvmrun_min = cycles;
68821c23154SJoerg Roedel 
68921c23154SJoerg Roedel     vmrun_sum += cycles;
69021c23154SJoerg Roedel 
69121c23154SJoerg Roedel     tsc_start = rdtsc();
69221c23154SJoerg Roedel 
69321c23154SJoerg Roedel     asm volatile ("vmmcall" : : : "memory");
69421c23154SJoerg Roedel     goto start;
69521c23154SJoerg Roedel }
69621c23154SJoerg Roedel 
69721c23154SJoerg Roedel static bool latency_finished(struct test *test)
69821c23154SJoerg Roedel {
69921c23154SJoerg Roedel     u64 cycles;
70021c23154SJoerg Roedel 
70121c23154SJoerg Roedel     tsc_end = rdtsc();
70221c23154SJoerg Roedel 
70321c23154SJoerg Roedel     cycles = tsc_end - tsc_start;
70421c23154SJoerg Roedel 
70521c23154SJoerg Roedel     if (cycles > latvmexit_max)
70621c23154SJoerg Roedel         latvmexit_max = cycles;
70721c23154SJoerg Roedel 
70821c23154SJoerg Roedel     if (cycles < latvmexit_min)
70921c23154SJoerg Roedel         latvmexit_min = cycles;
71021c23154SJoerg Roedel 
71121c23154SJoerg Roedel     vmexit_sum += cycles;
71221c23154SJoerg Roedel 
71321c23154SJoerg Roedel     test->vmcb->save.rip += 3;
71421c23154SJoerg Roedel 
71521c23154SJoerg Roedel     runs -= 1;
71621c23154SJoerg Roedel 
71721c23154SJoerg Roedel     return runs == 0;
71821c23154SJoerg Roedel }
71921c23154SJoerg Roedel 
72021c23154SJoerg Roedel static bool latency_check(struct test *test)
72121c23154SJoerg Roedel {
72221c23154SJoerg Roedel     printf("    Latency VMRUN : max: %d min: %d avg: %d\n", latvmrun_max,
72321c23154SJoerg Roedel             latvmrun_min, vmrun_sum / LATENCY_RUNS);
72421c23154SJoerg Roedel     printf("    Latency VMEXIT: max: %d min: %d avg: %d\n", latvmexit_max,
72521c23154SJoerg Roedel             latvmexit_min, vmexit_sum / LATENCY_RUNS);
72621c23154SJoerg Roedel     return true;
72721c23154SJoerg Roedel }
72821c23154SJoerg Roedel 
729ef101219SRoedel, Joerg static void lat_svm_insn_prepare(struct test *test)
730ef101219SRoedel, Joerg {
731ef101219SRoedel, Joerg     default_prepare(test);
732ef101219SRoedel, Joerg     runs = LATENCY_RUNS;
733ef101219SRoedel, Joerg     latvmload_min = latvmsave_min = latstgi_min = latclgi_min = -1ULL;
734ef101219SRoedel, Joerg     latvmload_max = latvmsave_max = latstgi_max = latclgi_max = 0;
735ef101219SRoedel, Joerg     vmload_sum = vmsave_sum = stgi_sum = clgi_sum;
736ef101219SRoedel, Joerg }
737ef101219SRoedel, Joerg 
738ef101219SRoedel, Joerg static bool lat_svm_insn_finished(struct test *test)
739ef101219SRoedel, Joerg {
740ef101219SRoedel, Joerg     u64 vmcb_phys = virt_to_phys(test->vmcb);
741ef101219SRoedel, Joerg     u64 cycles;
742ef101219SRoedel, Joerg 
743ef101219SRoedel, Joerg     for ( ; runs != 0; runs--) {
744ef101219SRoedel, Joerg         tsc_start = rdtsc();
745ef101219SRoedel, Joerg         asm volatile("vmload\n\t" : : "a"(vmcb_phys) : "memory");
746ef101219SRoedel, Joerg         cycles = rdtsc() - tsc_start;
747ef101219SRoedel, Joerg         if (cycles > latvmload_max)
748ef101219SRoedel, Joerg             latvmload_max = cycles;
749ef101219SRoedel, Joerg         if (cycles < latvmload_min)
750ef101219SRoedel, Joerg             latvmload_min = cycles;
751ef101219SRoedel, Joerg         vmload_sum += cycles;
752ef101219SRoedel, Joerg 
753ef101219SRoedel, Joerg         tsc_start = rdtsc();
754ef101219SRoedel, Joerg         asm volatile("vmsave\n\t" : : "a"(vmcb_phys) : "memory");
755ef101219SRoedel, Joerg         cycles = rdtsc() - tsc_start;
756ef101219SRoedel, Joerg         if (cycles > latvmsave_max)
757ef101219SRoedel, Joerg             latvmsave_max = cycles;
758ef101219SRoedel, Joerg         if (cycles < latvmsave_min)
759ef101219SRoedel, Joerg             latvmsave_min = cycles;
760ef101219SRoedel, Joerg         vmsave_sum += cycles;
761ef101219SRoedel, Joerg 
762ef101219SRoedel, Joerg         tsc_start = rdtsc();
763ef101219SRoedel, Joerg         asm volatile("stgi\n\t");
764ef101219SRoedel, Joerg         cycles = rdtsc() - tsc_start;
765ef101219SRoedel, Joerg         if (cycles > latstgi_max)
766ef101219SRoedel, Joerg             latstgi_max = cycles;
767ef101219SRoedel, Joerg         if (cycles < latstgi_min)
768ef101219SRoedel, Joerg             latstgi_min = cycles;
769ef101219SRoedel, Joerg         stgi_sum += cycles;
770ef101219SRoedel, Joerg 
771ef101219SRoedel, Joerg         tsc_start = rdtsc();
772ef101219SRoedel, Joerg         asm volatile("clgi\n\t");
773ef101219SRoedel, Joerg         cycles = rdtsc() - tsc_start;
774ef101219SRoedel, Joerg         if (cycles > latclgi_max)
775ef101219SRoedel, Joerg             latclgi_max = cycles;
776ef101219SRoedel, Joerg         if (cycles < latclgi_min)
777ef101219SRoedel, Joerg             latclgi_min = cycles;
778ef101219SRoedel, Joerg         clgi_sum += cycles;
779ef101219SRoedel, Joerg     }
780ef101219SRoedel, Joerg 
781ef101219SRoedel, Joerg     return true;
782ef101219SRoedel, Joerg }
783ef101219SRoedel, Joerg 
784ef101219SRoedel, Joerg static bool lat_svm_insn_check(struct test *test)
785ef101219SRoedel, Joerg {
786ef101219SRoedel, Joerg     printf("    Latency VMLOAD: max: %d min: %d avg: %d\n", latvmload_max,
787ef101219SRoedel, Joerg             latvmload_min, vmload_sum / LATENCY_RUNS);
788ef101219SRoedel, Joerg     printf("    Latency VMSAVE: max: %d min: %d avg: %d\n", latvmsave_max,
789ef101219SRoedel, Joerg             latvmsave_min, vmsave_sum / LATENCY_RUNS);
790ef101219SRoedel, Joerg     printf("    Latency STGI:   max: %d min: %d avg: %d\n", latstgi_max,
791ef101219SRoedel, Joerg             latstgi_min, stgi_sum / LATENCY_RUNS);
792ef101219SRoedel, Joerg     printf("    Latency CLGI:   max: %d min: %d avg: %d\n", latclgi_max,
793ef101219SRoedel, Joerg             latclgi_min, clgi_sum / LATENCY_RUNS);
794ef101219SRoedel, Joerg     return true;
795ef101219SRoedel, Joerg }
7967d36db35SAvi Kivity static struct test tests[] = {
7977d36db35SAvi Kivity     { "null", default_supported, default_prepare, null_test,
7987d36db35SAvi Kivity       default_finished, null_check },
7997d36db35SAvi Kivity     { "vmrun", default_supported, default_prepare, test_vmrun,
8007d36db35SAvi Kivity        default_finished, check_vmrun },
8017d36db35SAvi Kivity     { "vmrun intercept check", default_supported, prepare_no_vmrun_int,
8027d36db35SAvi Kivity       null_test, default_finished, check_no_vmrun_int },
8037d36db35SAvi Kivity     { "cr3 read intercept", default_supported, prepare_cr3_intercept,
8047d36db35SAvi Kivity       test_cr3_intercept, default_finished, check_cr3_intercept },
8057d36db35SAvi Kivity     { "cr3 read nointercept", default_supported, default_prepare,
8067d36db35SAvi Kivity       test_cr3_intercept, default_finished, check_cr3_nointercept },
807095274b4SPrasad Joshi     { "cr3 read intercept emulate", smp_supported,
8087d36db35SAvi Kivity       prepare_cr3_intercept_bypass, test_cr3_intercept_bypass,
8097d36db35SAvi Kivity       default_finished, check_cr3_intercept },
8107d36db35SAvi Kivity     { "next_rip", next_rip_supported, prepare_next_rip, test_next_rip,
8117d36db35SAvi Kivity       default_finished, check_next_rip },
8127d36db35SAvi Kivity     { "mode_switch", default_supported, prepare_mode_switch, test_mode_switch,
8137d36db35SAvi Kivity        mode_switch_finished, check_mode_switch },
8147d36db35SAvi Kivity     { "asid_zero", default_supported, prepare_asid_zero, test_asid_zero,
8157d36db35SAvi Kivity        default_finished, check_asid_zero },
8164c8eb156SJoerg Roedel     { "sel_cr0_bug", default_supported, sel_cr0_bug_prepare, sel_cr0_bug_test,
8174c8eb156SJoerg Roedel        sel_cr0_bug_finished, sel_cr0_bug_check },
8188594b943SJoerg Roedel     { "npt_nx", npt_supported, npt_nx_prepare, null_test,
819ea975120SJoerg Roedel 	    default_finished, npt_nx_check },
820ea975120SJoerg Roedel     { "npt_us", npt_supported, npt_us_prepare, npt_us_test,
821ea975120SJoerg Roedel 	    default_finished, npt_us_check },
822dd6ef43cSJoerg Roedel     { "npt_rsvd", npt_supported, npt_rsvd_prepare, null_test,
823dd6ef43cSJoerg Roedel 	    default_finished, npt_rsvd_check },
8245ebf82edSJoerg Roedel     { "npt_rw", npt_supported, npt_rw_prepare, npt_rw_test,
8255ebf82edSJoerg Roedel 	    default_finished, npt_rw_check },
826590040ffSJoerg Roedel     { "npt_pfwalk", npt_supported, npt_pfwalk_prepare, null_test,
827590040ffSJoerg Roedel 	    default_finished, npt_pfwalk_check },
82821c23154SJoerg Roedel     { "latency_run_exit", default_supported, latency_prepare, latency_test,
82921c23154SJoerg Roedel       latency_finished, latency_check },
830ef101219SRoedel, Joerg     { "latency_svm_insn", default_supported, lat_svm_insn_prepare, null_test,
831ef101219SRoedel, Joerg       lat_svm_insn_finished, lat_svm_insn_check },
8327d36db35SAvi Kivity };
8337d36db35SAvi Kivity 
8347d36db35SAvi Kivity int main(int ac, char **av)
8357d36db35SAvi Kivity {
8367d36db35SAvi Kivity     int i, nr, passed, done;
8377d36db35SAvi Kivity     struct vmcb *vmcb;
8387d36db35SAvi Kivity 
8397d36db35SAvi Kivity     setup_vm();
8407d36db35SAvi Kivity     smp_init();
8417d36db35SAvi Kivity 
8427d36db35SAvi Kivity     if (!(cpuid(0x80000001).c & 4)) {
8437d36db35SAvi Kivity         printf("SVM not availble\n");
8447d36db35SAvi Kivity         return 0;
8457d36db35SAvi Kivity     }
8467d36db35SAvi Kivity 
8477d36db35SAvi Kivity     setup_svm();
8487d36db35SAvi Kivity 
8497d36db35SAvi Kivity     vmcb = alloc_page();
8507d36db35SAvi Kivity 
8517d36db35SAvi Kivity     nr = ARRAY_SIZE(tests);
8527d36db35SAvi Kivity     passed = done = 0;
8537d36db35SAvi Kivity     for (i = 0; i < nr; ++i) {
8547d36db35SAvi Kivity         if (!tests[i].supported())
8557d36db35SAvi Kivity             continue;
8567d36db35SAvi Kivity         done += 1;
8577d36db35SAvi Kivity         passed += test_run(&tests[i], vmcb);
8587d36db35SAvi Kivity     }
8597d36db35SAvi Kivity 
8607d36db35SAvi Kivity     printf("\nSUMMARY: %d TESTS, %d FAILURES\n", done, (done - passed));
8617d36db35SAvi Kivity     return passed == done ? 0 : 1;
8627d36db35SAvi Kivity }
863