xref: /kvm-unit-tests/x86/svm.c (revision e8b10c1ff110e5047c4706d71326260b6e6ca79c)
17d36db35SAvi Kivity #include "svm.h"
27d36db35SAvi Kivity #include "libcflat.h"
37d36db35SAvi Kivity #include "processor.h"
4b46094b4SPaolo Bonzini #include "desc.h"
57d36db35SAvi Kivity #include "msr.h"
67d36db35SAvi Kivity #include "vm.h"
77d36db35SAvi Kivity #include "smp.h"
87d36db35SAvi Kivity #include "types.h"
9bcd9774aSPaolo Bonzini #include "io.h"
107d36db35SAvi Kivity 
111535bf0fSJoerg Roedel /* for the nested page table*/
121535bf0fSJoerg Roedel u64 *pml4e;
131535bf0fSJoerg Roedel u64 *pdpe;
141535bf0fSJoerg Roedel u64 *pde[4];
151535bf0fSJoerg Roedel u64 *pte[2048];
16c0a4e715SPaolo Bonzini void *scratch_page;
171535bf0fSJoerg Roedel 
1821c23154SJoerg Roedel #define LATENCY_RUNS 1000000
1921c23154SJoerg Roedel 
2021c23154SJoerg Roedel u64 tsc_start;
2121c23154SJoerg Roedel u64 tsc_end;
2221c23154SJoerg Roedel 
2321c23154SJoerg Roedel u64 vmrun_sum, vmexit_sum;
24ef101219SRoedel, Joerg u64 vmsave_sum, vmload_sum;
25ef101219SRoedel, Joerg u64 stgi_sum, clgi_sum;
2621c23154SJoerg Roedel u64 latvmrun_max;
2721c23154SJoerg Roedel u64 latvmrun_min;
2821c23154SJoerg Roedel u64 latvmexit_max;
2921c23154SJoerg Roedel u64 latvmexit_min;
30ef101219SRoedel, Joerg u64 latvmload_max;
31ef101219SRoedel, Joerg u64 latvmload_min;
32ef101219SRoedel, Joerg u64 latvmsave_max;
33ef101219SRoedel, Joerg u64 latvmsave_min;
34ef101219SRoedel, Joerg u64 latstgi_max;
35ef101219SRoedel, Joerg u64 latstgi_min;
36ef101219SRoedel, Joerg u64 latclgi_max;
37ef101219SRoedel, Joerg u64 latclgi_min;
3821c23154SJoerg Roedel u64 runs;
3921c23154SJoerg Roedel 
403d46571bSPaolo Bonzini u8 *io_bitmap;
413d46571bSPaolo Bonzini u8 io_bitmap_area[16384];
423d46571bSPaolo Bonzini 
431535bf0fSJoerg Roedel static bool npt_supported(void)
441535bf0fSJoerg Roedel {
451535bf0fSJoerg Roedel    return cpuid(0x8000000A).d & 1;
461535bf0fSJoerg Roedel }
471535bf0fSJoerg Roedel 
487d36db35SAvi Kivity static void setup_svm(void)
497d36db35SAvi Kivity {
507d36db35SAvi Kivity     void *hsave = alloc_page();
511535bf0fSJoerg Roedel     u64 *page, address;
521535bf0fSJoerg Roedel     int i,j;
537d36db35SAvi Kivity 
547d36db35SAvi Kivity     wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
557d36db35SAvi Kivity     wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME);
568594b943SJoerg Roedel     wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NX);
571535bf0fSJoerg Roedel 
58ea975120SJoerg Roedel     scratch_page = alloc_page();
59ea975120SJoerg Roedel 
603d46571bSPaolo Bonzini     io_bitmap = (void *) (((ulong)io_bitmap_area + 4095) & ~4095);
613d46571bSPaolo Bonzini 
621535bf0fSJoerg Roedel     if (!npt_supported())
631535bf0fSJoerg Roedel         return;
641535bf0fSJoerg Roedel 
651535bf0fSJoerg Roedel     printf("NPT detected - running all tests with NPT enabled\n");
661535bf0fSJoerg Roedel 
671535bf0fSJoerg Roedel     /*
681535bf0fSJoerg Roedel      * Nested paging supported - Build a nested page table
691535bf0fSJoerg Roedel      * Build the page-table bottom-up and map everything with 4k pages
701535bf0fSJoerg Roedel      * to get enough granularity for the NPT unit-tests.
711535bf0fSJoerg Roedel      */
721535bf0fSJoerg Roedel 
731535bf0fSJoerg Roedel     address = 0;
741535bf0fSJoerg Roedel 
751535bf0fSJoerg Roedel     /* PTE level */
761535bf0fSJoerg Roedel     for (i = 0; i < 2048; ++i) {
771535bf0fSJoerg Roedel         page = alloc_page();
781535bf0fSJoerg Roedel 
791535bf0fSJoerg Roedel         for (j = 0; j < 512; ++j, address += 4096)
801535bf0fSJoerg Roedel             page[j] = address | 0x067ULL;
811535bf0fSJoerg Roedel 
821535bf0fSJoerg Roedel         pte[i] = page;
831535bf0fSJoerg Roedel     }
841535bf0fSJoerg Roedel 
851535bf0fSJoerg Roedel     /* PDE level */
861535bf0fSJoerg Roedel     for (i = 0; i < 4; ++i) {
871535bf0fSJoerg Roedel         page = alloc_page();
881535bf0fSJoerg Roedel 
891535bf0fSJoerg Roedel         for (j = 0; j < 512; ++j)
9093b05099SPaolo Bonzini             page[j] = (u64)pte[(i * 512) + j] | 0x027ULL;
911535bf0fSJoerg Roedel 
921535bf0fSJoerg Roedel         pde[i] = page;
931535bf0fSJoerg Roedel     }
941535bf0fSJoerg Roedel 
951535bf0fSJoerg Roedel     /* PDPe level */
961535bf0fSJoerg Roedel     pdpe   = alloc_page();
971535bf0fSJoerg Roedel     for (i = 0; i < 4; ++i)
981535bf0fSJoerg Roedel        pdpe[i] = ((u64)(pde[i])) | 0x27;
991535bf0fSJoerg Roedel 
1001535bf0fSJoerg Roedel     /* PML4e level */
1011535bf0fSJoerg Roedel     pml4e    = alloc_page();
1021535bf0fSJoerg Roedel     pml4e[0] = ((u64)pdpe) | 0x27;
1037d36db35SAvi Kivity }
1047d36db35SAvi Kivity 
105726a1dd7SPaolo Bonzini static u64 *npt_get_pte(u64 address)
1068594b943SJoerg Roedel {
1078594b943SJoerg Roedel     int i1, i2;
1088594b943SJoerg Roedel 
1098594b943SJoerg Roedel     address >>= 12;
1108594b943SJoerg Roedel     i1 = (address >> 9) & 0x7ff;
1118594b943SJoerg Roedel     i2 = address & 0x1ff;
1128594b943SJoerg Roedel 
1138594b943SJoerg Roedel     return &pte[i1][i2];
1148594b943SJoerg Roedel }
1158594b943SJoerg Roedel 
1167d36db35SAvi Kivity static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector,
1177d36db35SAvi Kivity                          u64 base, u32 limit, u32 attr)
1187d36db35SAvi Kivity {
1197d36db35SAvi Kivity     seg->selector = selector;
1207d36db35SAvi Kivity     seg->attrib = attr;
1217d36db35SAvi Kivity     seg->limit = limit;
1227d36db35SAvi Kivity     seg->base = base;
1237d36db35SAvi Kivity }
1247d36db35SAvi Kivity 
1257d36db35SAvi Kivity static void vmcb_ident(struct vmcb *vmcb)
1267d36db35SAvi Kivity {
1277d36db35SAvi Kivity     u64 vmcb_phys = virt_to_phys(vmcb);
1287d36db35SAvi Kivity     struct vmcb_save_area *save = &vmcb->save;
1297d36db35SAvi Kivity     struct vmcb_control_area *ctrl = &vmcb->control;
1307d36db35SAvi Kivity     u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
1317d36db35SAvi Kivity         | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK;
1327d36db35SAvi Kivity     u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
1337d36db35SAvi Kivity         | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK;
1347d36db35SAvi Kivity     struct descriptor_table_ptr desc_table_ptr;
1357d36db35SAvi Kivity 
1367d36db35SAvi Kivity     memset(vmcb, 0, sizeof(*vmcb));
1377d36db35SAvi Kivity     asm volatile ("vmsave" : : "a"(vmcb_phys) : "memory");
1387d36db35SAvi Kivity     vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr);
1397d36db35SAvi Kivity     vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr);
1407d36db35SAvi Kivity     vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr);
1417d36db35SAvi Kivity     vmcb_set_seg(&save->ds, read_ds(), 0, -1U, data_seg_attr);
1427d36db35SAvi Kivity     sgdt(&desc_table_ptr);
1437d36db35SAvi Kivity     vmcb_set_seg(&save->gdtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
1447d36db35SAvi Kivity     sidt(&desc_table_ptr);
1457d36db35SAvi Kivity     vmcb_set_seg(&save->idtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
1467d36db35SAvi Kivity     ctrl->asid = 1;
1477d36db35SAvi Kivity     save->cpl = 0;
1487d36db35SAvi Kivity     save->efer = rdmsr(MSR_EFER);
1497d36db35SAvi Kivity     save->cr4 = read_cr4();
1507d36db35SAvi Kivity     save->cr3 = read_cr3();
1517d36db35SAvi Kivity     save->cr0 = read_cr0();
1527d36db35SAvi Kivity     save->dr7 = read_dr7();
1537d36db35SAvi Kivity     save->dr6 = read_dr6();
1547d36db35SAvi Kivity     save->cr2 = read_cr2();
1557d36db35SAvi Kivity     save->g_pat = rdmsr(MSR_IA32_CR_PAT);
1567d36db35SAvi Kivity     save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
1577d36db35SAvi Kivity     ctrl->intercept = (1ULL << INTERCEPT_VMRUN) | (1ULL << INTERCEPT_VMMCALL);
1583d46571bSPaolo Bonzini     ctrl->iopm_base_pa = virt_to_phys(io_bitmap);
1591535bf0fSJoerg Roedel 
1601535bf0fSJoerg Roedel     if (npt_supported()) {
1611535bf0fSJoerg Roedel         ctrl->nested_ctl = 1;
1621535bf0fSJoerg Roedel         ctrl->nested_cr3 = (u64)pml4e;
1631535bf0fSJoerg Roedel     }
1647d36db35SAvi Kivity }
1657d36db35SAvi Kivity 
1667d36db35SAvi Kivity struct test {
1677d36db35SAvi Kivity     const char *name;
1687d36db35SAvi Kivity     bool (*supported)(void);
1697d36db35SAvi Kivity     void (*prepare)(struct test *test);
1707d36db35SAvi Kivity     void (*guest_func)(struct test *test);
1717d36db35SAvi Kivity     bool (*finished)(struct test *test);
1727d36db35SAvi Kivity     bool (*succeeded)(struct test *test);
1737d36db35SAvi Kivity     struct vmcb *vmcb;
1747d36db35SAvi Kivity     int exits;
1757d36db35SAvi Kivity     ulong scratch;
1767d36db35SAvi Kivity };
1777d36db35SAvi Kivity 
178e0b6541cSPaolo Bonzini static inline void vmmcall(void)
179e0b6541cSPaolo Bonzini {
180e0b6541cSPaolo Bonzini     asm volatile ("vmmcall" : : : "memory");
181e0b6541cSPaolo Bonzini }
182e0b6541cSPaolo Bonzini 
1837d36db35SAvi Kivity static void test_thunk(struct test *test)
1847d36db35SAvi Kivity {
1857d36db35SAvi Kivity     test->guest_func(test);
186e0b6541cSPaolo Bonzini     vmmcall();
1877d36db35SAvi Kivity }
1887d36db35SAvi Kivity 
189a43baea0SPaolo Bonzini struct regs {
190a43baea0SPaolo Bonzini         u64 rax;
191a43baea0SPaolo Bonzini         u64 rcx;
192a43baea0SPaolo Bonzini         u64 rdx;
193a43baea0SPaolo Bonzini         u64 rbx;
194a43baea0SPaolo Bonzini         u64 cr2;
195a43baea0SPaolo Bonzini         u64 rbp;
196a43baea0SPaolo Bonzini         u64 rsi;
197a43baea0SPaolo Bonzini         u64 rdi;
198a43baea0SPaolo Bonzini         u64 r8;
199a43baea0SPaolo Bonzini         u64 r9;
200a43baea0SPaolo Bonzini         u64 r10;
201a43baea0SPaolo Bonzini         u64 r11;
202a43baea0SPaolo Bonzini         u64 r12;
203a43baea0SPaolo Bonzini         u64 r13;
204a43baea0SPaolo Bonzini         u64 r14;
205a43baea0SPaolo Bonzini         u64 r15;
206a43baea0SPaolo Bonzini         u64 rflags;
207a43baea0SPaolo Bonzini };
208a43baea0SPaolo Bonzini 
209a43baea0SPaolo Bonzini struct regs regs;
210a43baea0SPaolo Bonzini 
211a43baea0SPaolo Bonzini // rax handled specially below
212a43baea0SPaolo Bonzini 
213a43baea0SPaolo Bonzini #define SAVE_GPR_C                              \
214a43baea0SPaolo Bonzini         "xchg %%rbx, regs+0x8\n\t"              \
215a43baea0SPaolo Bonzini         "xchg %%rcx, regs+0x10\n\t"             \
216a43baea0SPaolo Bonzini         "xchg %%rdx, regs+0x18\n\t"             \
217a43baea0SPaolo Bonzini         "xchg %%rbp, regs+0x28\n\t"             \
218a43baea0SPaolo Bonzini         "xchg %%rsi, regs+0x30\n\t"             \
219a43baea0SPaolo Bonzini         "xchg %%rdi, regs+0x38\n\t"             \
220a43baea0SPaolo Bonzini         "xchg %%r8, regs+0x40\n\t"              \
221a43baea0SPaolo Bonzini         "xchg %%r9, regs+0x48\n\t"              \
222a43baea0SPaolo Bonzini         "xchg %%r10, regs+0x50\n\t"             \
223a43baea0SPaolo Bonzini         "xchg %%r11, regs+0x58\n\t"             \
224a43baea0SPaolo Bonzini         "xchg %%r12, regs+0x60\n\t"             \
225a43baea0SPaolo Bonzini         "xchg %%r13, regs+0x68\n\t"             \
226a43baea0SPaolo Bonzini         "xchg %%r14, regs+0x70\n\t"             \
227a43baea0SPaolo Bonzini         "xchg %%r15, regs+0x78\n\t"
228a43baea0SPaolo Bonzini 
229a43baea0SPaolo Bonzini #define LOAD_GPR_C      SAVE_GPR_C
230a43baea0SPaolo Bonzini 
2317d36db35SAvi Kivity static bool test_run(struct test *test, struct vmcb *vmcb)
2327d36db35SAvi Kivity {
2337d36db35SAvi Kivity     u64 vmcb_phys = virt_to_phys(vmcb);
2347d36db35SAvi Kivity     u64 guest_stack[10000];
2357d36db35SAvi Kivity     bool success;
2367d36db35SAvi Kivity 
2377d36db35SAvi Kivity     test->vmcb = vmcb;
2387d36db35SAvi Kivity     test->prepare(test);
2397d36db35SAvi Kivity     vmcb->save.rip = (ulong)test_thunk;
2407d36db35SAvi Kivity     vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack));
241a43baea0SPaolo Bonzini     regs.rdi = (ulong)test;
2427d36db35SAvi Kivity     do {
24321c23154SJoerg Roedel         tsc_start = rdtsc();
2447d36db35SAvi Kivity         asm volatile (
2457d36db35SAvi Kivity             "clgi \n\t"
2467d36db35SAvi Kivity             "vmload \n\t"
247a43baea0SPaolo Bonzini             "mov regs+0x80, %%r15\n\t"  // rflags
248a43baea0SPaolo Bonzini             "mov %%r15, 0x170(%0)\n\t"
249a43baea0SPaolo Bonzini             "mov regs, %%r15\n\t"       // rax
250a43baea0SPaolo Bonzini             "mov %%r15, 0x1f8(%0)\n\t"
251a43baea0SPaolo Bonzini             LOAD_GPR_C
2527d36db35SAvi Kivity             "vmrun \n\t"
253a43baea0SPaolo Bonzini             SAVE_GPR_C
254a43baea0SPaolo Bonzini             "mov 0x170(%0), %%r15\n\t"  // rflags
255a43baea0SPaolo Bonzini             "mov %%r15, regs+0x80\n\t"
256a43baea0SPaolo Bonzini             "mov 0x1f8(%0), %%r15\n\t"  // rax
257a43baea0SPaolo Bonzini             "mov %%r15, regs\n\t"
2587d36db35SAvi Kivity             "vmsave \n\t"
2597d36db35SAvi Kivity             "stgi"
260a43baea0SPaolo Bonzini             : : "a"(vmcb_phys)
2617d36db35SAvi Kivity             : "rbx", "rcx", "rdx", "rsi",
2627d36db35SAvi Kivity               "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15",
2637d36db35SAvi Kivity               "memory");
26421c23154SJoerg Roedel 	tsc_end = rdtsc();
2657d36db35SAvi Kivity         ++test->exits;
2667d36db35SAvi Kivity     } while (!test->finished(test));
2677d36db35SAvi Kivity 
26821c23154SJoerg Roedel 
2697d36db35SAvi Kivity     success = test->succeeded(test);
2707d36db35SAvi Kivity 
2717d36db35SAvi Kivity     printf("%s: %s\n", test->name, success ? "PASS" : "FAIL");
2727d36db35SAvi Kivity 
2737d36db35SAvi Kivity     return success;
2747d36db35SAvi Kivity }
2757d36db35SAvi Kivity 
276095274b4SPrasad Joshi static bool smp_supported(void)
277095274b4SPrasad Joshi {
278095274b4SPrasad Joshi 	return cpu_count() > 1;
279095274b4SPrasad Joshi }
280095274b4SPrasad Joshi 
2817d36db35SAvi Kivity static bool default_supported(void)
2827d36db35SAvi Kivity {
2837d36db35SAvi Kivity     return true;
2847d36db35SAvi Kivity }
2857d36db35SAvi Kivity 
2867d36db35SAvi Kivity static void default_prepare(struct test *test)
2877d36db35SAvi Kivity {
2887d36db35SAvi Kivity     vmcb_ident(test->vmcb);
2897d36db35SAvi Kivity     cli();
2907d36db35SAvi Kivity }
2917d36db35SAvi Kivity 
2927d36db35SAvi Kivity static bool default_finished(struct test *test)
2937d36db35SAvi Kivity {
2947d36db35SAvi Kivity     return true; /* one vmexit */
2957d36db35SAvi Kivity }
2967d36db35SAvi Kivity 
2977d36db35SAvi Kivity static void null_test(struct test *test)
2987d36db35SAvi Kivity {
2997d36db35SAvi Kivity }
3007d36db35SAvi Kivity 
3017d36db35SAvi Kivity static bool null_check(struct test *test)
3027d36db35SAvi Kivity {
3037d36db35SAvi Kivity     return test->vmcb->control.exit_code == SVM_EXIT_VMMCALL;
3047d36db35SAvi Kivity }
3057d36db35SAvi Kivity 
3067d36db35SAvi Kivity static void prepare_no_vmrun_int(struct test *test)
3077d36db35SAvi Kivity {
3087d36db35SAvi Kivity     test->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMRUN);
3097d36db35SAvi Kivity }
3107d36db35SAvi Kivity 
3117d36db35SAvi Kivity static bool check_no_vmrun_int(struct test *test)
3127d36db35SAvi Kivity {
3137d36db35SAvi Kivity     return test->vmcb->control.exit_code == SVM_EXIT_ERR;
3147d36db35SAvi Kivity }
3157d36db35SAvi Kivity 
3167d36db35SAvi Kivity static void test_vmrun(struct test *test)
3177d36db35SAvi Kivity {
3187d36db35SAvi Kivity     asm volatile ("vmrun" : : "a"(virt_to_phys(test->vmcb)));
3197d36db35SAvi Kivity }
3207d36db35SAvi Kivity 
3217d36db35SAvi Kivity static bool check_vmrun(struct test *test)
3227d36db35SAvi Kivity {
3237d36db35SAvi Kivity     return test->vmcb->control.exit_code == SVM_EXIT_VMRUN;
3247d36db35SAvi Kivity }
3257d36db35SAvi Kivity 
3267d36db35SAvi Kivity static void prepare_cr3_intercept(struct test *test)
3277d36db35SAvi Kivity {
3287d36db35SAvi Kivity     default_prepare(test);
3297d36db35SAvi Kivity     test->vmcb->control.intercept_cr_read |= 1 << 3;
3307d36db35SAvi Kivity }
3317d36db35SAvi Kivity 
3327d36db35SAvi Kivity static void test_cr3_intercept(struct test *test)
3337d36db35SAvi Kivity {
3347d36db35SAvi Kivity     asm volatile ("mov %%cr3, %0" : "=r"(test->scratch) : : "memory");
3357d36db35SAvi Kivity }
3367d36db35SAvi Kivity 
3377d36db35SAvi Kivity static bool check_cr3_intercept(struct test *test)
3387d36db35SAvi Kivity {
3397d36db35SAvi Kivity     return test->vmcb->control.exit_code == SVM_EXIT_READ_CR3;
3407d36db35SAvi Kivity }
3417d36db35SAvi Kivity 
3427d36db35SAvi Kivity static bool check_cr3_nointercept(struct test *test)
3437d36db35SAvi Kivity {
3447d36db35SAvi Kivity     return null_check(test) && test->scratch == read_cr3();
3457d36db35SAvi Kivity }
3467d36db35SAvi Kivity 
3477d36db35SAvi Kivity static void corrupt_cr3_intercept_bypass(void *_test)
3487d36db35SAvi Kivity {
3497d36db35SAvi Kivity     struct test *test = _test;
3507d36db35SAvi Kivity     extern volatile u32 mmio_insn;
3517d36db35SAvi Kivity 
3527d36db35SAvi Kivity     while (!__sync_bool_compare_and_swap(&test->scratch, 1, 2))
3537d36db35SAvi Kivity         pause();
3547d36db35SAvi Kivity     pause();
3557d36db35SAvi Kivity     pause();
3567d36db35SAvi Kivity     pause();
3577d36db35SAvi Kivity     mmio_insn = 0x90d8200f;  // mov %cr3, %rax; nop
3587d36db35SAvi Kivity }
3597d36db35SAvi Kivity 
3607d36db35SAvi Kivity static void prepare_cr3_intercept_bypass(struct test *test)
3617d36db35SAvi Kivity {
3627d36db35SAvi Kivity     default_prepare(test);
3637d36db35SAvi Kivity     test->vmcb->control.intercept_cr_read |= 1 << 3;
3647d36db35SAvi Kivity     on_cpu_async(1, corrupt_cr3_intercept_bypass, test);
3657d36db35SAvi Kivity }
3667d36db35SAvi Kivity 
3677d36db35SAvi Kivity static void test_cr3_intercept_bypass(struct test *test)
3687d36db35SAvi Kivity {
3697d36db35SAvi Kivity     ulong a = 0xa0000;
3707d36db35SAvi Kivity 
3717d36db35SAvi Kivity     test->scratch = 1;
3727d36db35SAvi Kivity     while (test->scratch != 2)
3737d36db35SAvi Kivity         barrier();
3747d36db35SAvi Kivity 
3757d36db35SAvi Kivity     asm volatile ("mmio_insn: mov %0, (%0); nop"
3767d36db35SAvi Kivity                   : "+a"(a) : : "memory");
3777d36db35SAvi Kivity     test->scratch = a;
3787d36db35SAvi Kivity }
3797d36db35SAvi Kivity 
3807d36db35SAvi Kivity static bool next_rip_supported(void)
3817d36db35SAvi Kivity {
3827d36db35SAvi Kivity     return (cpuid(SVM_CPUID_FUNC).d & 8);
3837d36db35SAvi Kivity }
3847d36db35SAvi Kivity 
3857d36db35SAvi Kivity static void prepare_next_rip(struct test *test)
3867d36db35SAvi Kivity {
3877d36db35SAvi Kivity     test->vmcb->control.intercept |= (1ULL << INTERCEPT_RDTSC);
3887d36db35SAvi Kivity }
3897d36db35SAvi Kivity 
3907d36db35SAvi Kivity 
3917d36db35SAvi Kivity static void test_next_rip(struct test *test)
3927d36db35SAvi Kivity {
3937d36db35SAvi Kivity     asm volatile ("rdtsc\n\t"
3947d36db35SAvi Kivity                   ".globl exp_next_rip\n\t"
3957d36db35SAvi Kivity                   "exp_next_rip:\n\t" ::: "eax", "edx");
3967d36db35SAvi Kivity }
3977d36db35SAvi Kivity 
3987d36db35SAvi Kivity static bool check_next_rip(struct test *test)
3997d36db35SAvi Kivity {
4007d36db35SAvi Kivity     extern char exp_next_rip;
4017d36db35SAvi Kivity     unsigned long address = (unsigned long)&exp_next_rip;
4027d36db35SAvi Kivity 
4037d36db35SAvi Kivity     return address == test->vmcb->control.next_rip;
4047d36db35SAvi Kivity }
4057d36db35SAvi Kivity 
4067d36db35SAvi Kivity static void prepare_mode_switch(struct test *test)
4077d36db35SAvi Kivity {
4087d36db35SAvi Kivity     test->vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR)
4097d36db35SAvi Kivity                                              |  (1ULL << UD_VECTOR)
4107d36db35SAvi Kivity                                              |  (1ULL << DF_VECTOR)
4117d36db35SAvi Kivity                                              |  (1ULL << PF_VECTOR);
4127d36db35SAvi Kivity     test->scratch = 0;
4137d36db35SAvi Kivity }
4147d36db35SAvi Kivity 
4157d36db35SAvi Kivity static void test_mode_switch(struct test *test)
4167d36db35SAvi Kivity {
4177d36db35SAvi Kivity     asm volatile("	cli\n"
4187d36db35SAvi Kivity 		 "	ljmp *1f\n" /* jump to 32-bit code segment */
4197d36db35SAvi Kivity 		 "1:\n"
4207d36db35SAvi Kivity 		 "	.long 2f\n"
421b46094b4SPaolo Bonzini 		 "	.long " xstr(KERNEL_CS32) "\n"
4227d36db35SAvi Kivity 		 ".code32\n"
4237d36db35SAvi Kivity 		 "2:\n"
4247d36db35SAvi Kivity 		 "	movl %%cr0, %%eax\n"
4257d36db35SAvi Kivity 		 "	btcl  $31, %%eax\n" /* clear PG */
4267d36db35SAvi Kivity 		 "	movl %%eax, %%cr0\n"
4277d36db35SAvi Kivity 		 "	movl $0xc0000080, %%ecx\n" /* EFER */
4287d36db35SAvi Kivity 		 "	rdmsr\n"
4297d36db35SAvi Kivity 		 "	btcl $8, %%eax\n" /* clear LME */
4307d36db35SAvi Kivity 		 "	wrmsr\n"
4317d36db35SAvi Kivity 		 "	movl %%cr4, %%eax\n"
4327d36db35SAvi Kivity 		 "	btcl $5, %%eax\n" /* clear PAE */
4337d36db35SAvi Kivity 		 "	movl %%eax, %%cr4\n"
434b46094b4SPaolo Bonzini 		 "	movw %[ds16], %%ax\n"
4357d36db35SAvi Kivity 		 "	movw %%ax, %%ds\n"
436b46094b4SPaolo Bonzini 		 "	ljmpl %[cs16], $3f\n" /* jump to 16 bit protected-mode */
4377d36db35SAvi Kivity 		 ".code16\n"
4387d36db35SAvi Kivity 		 "3:\n"
4397d36db35SAvi Kivity 		 "	movl %%cr0, %%eax\n"
4407d36db35SAvi Kivity 		 "	btcl $0, %%eax\n" /* clear PE  */
4417d36db35SAvi Kivity 		 "	movl %%eax, %%cr0\n"
4427d36db35SAvi Kivity 		 "	ljmpl $0, $4f\n"   /* jump to real-mode */
4437d36db35SAvi Kivity 		 "4:\n"
4447d36db35SAvi Kivity 		 "	vmmcall\n"
4457d36db35SAvi Kivity 		 "	movl %%cr0, %%eax\n"
4467d36db35SAvi Kivity 		 "	btsl $0, %%eax\n" /* set PE  */
4477d36db35SAvi Kivity 		 "	movl %%eax, %%cr0\n"
448b46094b4SPaolo Bonzini 		 "	ljmpl %[cs32], $5f\n" /* back to protected mode */
4497d36db35SAvi Kivity 		 ".code32\n"
4507d36db35SAvi Kivity 		 "5:\n"
4517d36db35SAvi Kivity 		 "	movl %%cr4, %%eax\n"
4527d36db35SAvi Kivity 		 "	btsl $5, %%eax\n" /* set PAE */
4537d36db35SAvi Kivity 		 "	movl %%eax, %%cr4\n"
4547d36db35SAvi Kivity 		 "	movl $0xc0000080, %%ecx\n" /* EFER */
4557d36db35SAvi Kivity 		 "	rdmsr\n"
4567d36db35SAvi Kivity 		 "	btsl $8, %%eax\n" /* set LME */
4577d36db35SAvi Kivity 		 "	wrmsr\n"
4587d36db35SAvi Kivity 		 "	movl %%cr0, %%eax\n"
4597d36db35SAvi Kivity 		 "	btsl  $31, %%eax\n" /* set PG */
4607d36db35SAvi Kivity 		 "	movl %%eax, %%cr0\n"
461b46094b4SPaolo Bonzini 		 "	ljmpl %[cs64], $6f\n"    /* back to long mode */
4627d36db35SAvi Kivity 		 ".code64\n\t"
4637d36db35SAvi Kivity 		 "6:\n"
4647d36db35SAvi Kivity 		 "	vmmcall\n"
465b46094b4SPaolo Bonzini 		 :: [cs16] "i"(KERNEL_CS16), [ds16] "i"(KERNEL_DS16),
466b46094b4SPaolo Bonzini 		    [cs32] "i"(KERNEL_CS32), [cs64] "i"(KERNEL_CS64)
467b46094b4SPaolo Bonzini 		 : "rax", "rbx", "rcx", "rdx", "memory");
4687d36db35SAvi Kivity }
4697d36db35SAvi Kivity 
4707d36db35SAvi Kivity static bool mode_switch_finished(struct test *test)
4717d36db35SAvi Kivity {
4727d36db35SAvi Kivity     u64 cr0, cr4, efer;
4737d36db35SAvi Kivity 
4747d36db35SAvi Kivity     cr0  = test->vmcb->save.cr0;
4757d36db35SAvi Kivity     cr4  = test->vmcb->save.cr4;
4767d36db35SAvi Kivity     efer = test->vmcb->save.efer;
4777d36db35SAvi Kivity 
4787d36db35SAvi Kivity     /* Only expect VMMCALL intercepts */
4797d36db35SAvi Kivity     if (test->vmcb->control.exit_code != SVM_EXIT_VMMCALL)
4807d36db35SAvi Kivity 	    return true;
4817d36db35SAvi Kivity 
4827d36db35SAvi Kivity     /* Jump over VMMCALL instruction */
4837d36db35SAvi Kivity     test->vmcb->save.rip += 3;
4847d36db35SAvi Kivity 
4857d36db35SAvi Kivity     /* Do sanity checks */
4867d36db35SAvi Kivity     switch (test->scratch) {
4877d36db35SAvi Kivity     case 0:
4887d36db35SAvi Kivity         /* Test should be in real mode now - check for this */
4897d36db35SAvi Kivity         if ((cr0  & 0x80000001) || /* CR0.PG, CR0.PE */
4907d36db35SAvi Kivity             (cr4  & 0x00000020) || /* CR4.PAE */
4917d36db35SAvi Kivity             (efer & 0x00000500))   /* EFER.LMA, EFER.LME */
4927d36db35SAvi Kivity                 return true;
4937d36db35SAvi Kivity         break;
4947d36db35SAvi Kivity     case 2:
4957d36db35SAvi Kivity         /* Test should be back in long-mode now - check for this */
4967d36db35SAvi Kivity         if (((cr0  & 0x80000001) != 0x80000001) || /* CR0.PG, CR0.PE */
4977d36db35SAvi Kivity             ((cr4  & 0x00000020) != 0x00000020) || /* CR4.PAE */
4987d36db35SAvi Kivity             ((efer & 0x00000500) != 0x00000500))   /* EFER.LMA, EFER.LME */
4997d36db35SAvi Kivity 		    return true;
5007d36db35SAvi Kivity 	break;
5017d36db35SAvi Kivity     }
5027d36db35SAvi Kivity 
5037d36db35SAvi Kivity     /* one step forward */
5047d36db35SAvi Kivity     test->scratch += 1;
5057d36db35SAvi Kivity 
5067d36db35SAvi Kivity     return test->scratch == 2;
5077d36db35SAvi Kivity }
5087d36db35SAvi Kivity 
5097d36db35SAvi Kivity static bool check_mode_switch(struct test *test)
5107d36db35SAvi Kivity {
5117d36db35SAvi Kivity 	return test->scratch == 2;
5127d36db35SAvi Kivity }
5137d36db35SAvi Kivity 
514bcd9774aSPaolo Bonzini static void prepare_ioio(struct test *test)
515bcd9774aSPaolo Bonzini {
516bcd9774aSPaolo Bonzini     test->vmcb->control.intercept |= (1ULL << INTERCEPT_IOIO_PROT);
517bcd9774aSPaolo Bonzini     test->scratch = 0;
518bcd9774aSPaolo Bonzini     memset(io_bitmap, 0, 8192);
519bcd9774aSPaolo Bonzini     io_bitmap[8192] = 0xFF;
520bcd9774aSPaolo Bonzini }
521bcd9774aSPaolo Bonzini 
522bcd9774aSPaolo Bonzini int get_test_stage(struct test *test)
523bcd9774aSPaolo Bonzini {
524bcd9774aSPaolo Bonzini     barrier();
525bcd9774aSPaolo Bonzini     return test->scratch;
526bcd9774aSPaolo Bonzini }
527bcd9774aSPaolo Bonzini 
528bcd9774aSPaolo Bonzini void inc_test_stage(struct test *test)
529bcd9774aSPaolo Bonzini {
530bcd9774aSPaolo Bonzini     barrier();
531bcd9774aSPaolo Bonzini     test->scratch++;
532bcd9774aSPaolo Bonzini     barrier();
533bcd9774aSPaolo Bonzini }
534bcd9774aSPaolo Bonzini 
535bcd9774aSPaolo Bonzini static void test_ioio(struct test *test)
536bcd9774aSPaolo Bonzini {
537bcd9774aSPaolo Bonzini     // stage 0, test IO pass
538bcd9774aSPaolo Bonzini     inb(0x5000);
539bcd9774aSPaolo Bonzini     outb(0x0, 0x5000);
540bcd9774aSPaolo Bonzini     if (get_test_stage(test) != 0)
541bcd9774aSPaolo Bonzini         goto fail;
542bcd9774aSPaolo Bonzini 
543bcd9774aSPaolo Bonzini     // test IO width, in/out
544bcd9774aSPaolo Bonzini     io_bitmap[0] = 0xFF;
545bcd9774aSPaolo Bonzini     inc_test_stage(test);
546bcd9774aSPaolo Bonzini     inb(0x0);
547bcd9774aSPaolo Bonzini     if (get_test_stage(test) != 2)
548bcd9774aSPaolo Bonzini         goto fail;
549bcd9774aSPaolo Bonzini 
550bcd9774aSPaolo Bonzini     outw(0x0, 0x0);
551bcd9774aSPaolo Bonzini     if (get_test_stage(test) != 3)
552bcd9774aSPaolo Bonzini         goto fail;
553bcd9774aSPaolo Bonzini 
554bcd9774aSPaolo Bonzini     inl(0x0);
555bcd9774aSPaolo Bonzini     if (get_test_stage(test) != 4)
556bcd9774aSPaolo Bonzini         goto fail;
557bcd9774aSPaolo Bonzini 
558bcd9774aSPaolo Bonzini     // test low/high IO port
559bcd9774aSPaolo Bonzini     io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8));
560bcd9774aSPaolo Bonzini     inb(0x5000);
561bcd9774aSPaolo Bonzini     if (get_test_stage(test) != 5)
562bcd9774aSPaolo Bonzini         goto fail;
563bcd9774aSPaolo Bonzini 
564bcd9774aSPaolo Bonzini     io_bitmap[0x9000 / 8] = (1 << (0x9000 % 8));
565bcd9774aSPaolo Bonzini     inw(0x9000);
566bcd9774aSPaolo Bonzini     if (get_test_stage(test) != 6)
567bcd9774aSPaolo Bonzini         goto fail;
568bcd9774aSPaolo Bonzini 
569bcd9774aSPaolo Bonzini     // test partial pass
570bcd9774aSPaolo Bonzini     io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8));
571bcd9774aSPaolo Bonzini     inl(0x4FFF);
572bcd9774aSPaolo Bonzini     if (get_test_stage(test) != 7)
573bcd9774aSPaolo Bonzini         goto fail;
574bcd9774aSPaolo Bonzini 
575bcd9774aSPaolo Bonzini     // test across pages
576bcd9774aSPaolo Bonzini     inc_test_stage(test);
577bcd9774aSPaolo Bonzini     inl(0x7FFF);
578bcd9774aSPaolo Bonzini     if (get_test_stage(test) != 8)
579bcd9774aSPaolo Bonzini         goto fail;
580bcd9774aSPaolo Bonzini 
581bcd9774aSPaolo Bonzini     inc_test_stage(test);
582bcd9774aSPaolo Bonzini     io_bitmap[0x8000 / 8] = 1 << (0x8000 % 8);
583bcd9774aSPaolo Bonzini     inl(0x7FFF);
584bcd9774aSPaolo Bonzini     if (get_test_stage(test) != 10)
585bcd9774aSPaolo Bonzini         goto fail;
586bcd9774aSPaolo Bonzini 
587bcd9774aSPaolo Bonzini     io_bitmap[0] = 0;
588bcd9774aSPaolo Bonzini     inl(0xFFFF);
589bcd9774aSPaolo Bonzini     if (get_test_stage(test) != 11)
590bcd9774aSPaolo Bonzini         goto fail;
591bcd9774aSPaolo Bonzini 
592bcd9774aSPaolo Bonzini     io_bitmap[0] = 0xFF;
593bcd9774aSPaolo Bonzini     io_bitmap[8192] = 0;
594bcd9774aSPaolo Bonzini     inl(0xFFFF);
595bcd9774aSPaolo Bonzini     inc_test_stage(test);
596bcd9774aSPaolo Bonzini     if (get_test_stage(test) != 12)
597bcd9774aSPaolo Bonzini         goto fail;
598bcd9774aSPaolo Bonzini 
599bcd9774aSPaolo Bonzini     return;
600bcd9774aSPaolo Bonzini 
601bcd9774aSPaolo Bonzini fail:
602bcd9774aSPaolo Bonzini     printf("test failure, stage %d\n", get_test_stage(test));
603bcd9774aSPaolo Bonzini     test->scratch = -1;
604bcd9774aSPaolo Bonzini }
605bcd9774aSPaolo Bonzini 
606bcd9774aSPaolo Bonzini static bool ioio_finished(struct test *test)
607bcd9774aSPaolo Bonzini {
608bcd9774aSPaolo Bonzini     unsigned port, size;
609bcd9774aSPaolo Bonzini 
610bcd9774aSPaolo Bonzini     /* Only expect IOIO intercepts */
611bcd9774aSPaolo Bonzini     if (test->vmcb->control.exit_code == SVM_EXIT_VMMCALL)
612bcd9774aSPaolo Bonzini         return true;
613bcd9774aSPaolo Bonzini 
614bcd9774aSPaolo Bonzini     if (test->vmcb->control.exit_code != SVM_EXIT_IOIO)
615bcd9774aSPaolo Bonzini         return true;
616bcd9774aSPaolo Bonzini 
617bcd9774aSPaolo Bonzini     /* one step forward */
618bcd9774aSPaolo Bonzini     test->scratch += 1;
619bcd9774aSPaolo Bonzini 
620bcd9774aSPaolo Bonzini     port = test->vmcb->control.exit_info_1 >> 16;
621bcd9774aSPaolo Bonzini     size = (test->vmcb->control.exit_info_1 >> SVM_IOIO_SIZE_SHIFT) & 7;
622bcd9774aSPaolo Bonzini 
623bcd9774aSPaolo Bonzini     while (size--) {
624bcd9774aSPaolo Bonzini         io_bitmap[port / 8] &= ~(1 << (port & 7));
625bcd9774aSPaolo Bonzini         port++;
626bcd9774aSPaolo Bonzini     }
627bcd9774aSPaolo Bonzini 
628bcd9774aSPaolo Bonzini     return false;
629bcd9774aSPaolo Bonzini }
630bcd9774aSPaolo Bonzini 
631bcd9774aSPaolo Bonzini static bool check_ioio(struct test *test)
632bcd9774aSPaolo Bonzini {
633bcd9774aSPaolo Bonzini     memset(io_bitmap, 0, 8193);
634bcd9774aSPaolo Bonzini     return test->scratch != -1;
635bcd9774aSPaolo Bonzini }
636bcd9774aSPaolo Bonzini 
6377d36db35SAvi Kivity static void prepare_asid_zero(struct test *test)
6387d36db35SAvi Kivity {
6397d36db35SAvi Kivity     test->vmcb->control.asid = 0;
6407d36db35SAvi Kivity }
6417d36db35SAvi Kivity 
6427d36db35SAvi Kivity static void test_asid_zero(struct test *test)
6437d36db35SAvi Kivity {
6447d36db35SAvi Kivity     asm volatile ("vmmcall\n\t");
6457d36db35SAvi Kivity }
6467d36db35SAvi Kivity 
6477d36db35SAvi Kivity static bool check_asid_zero(struct test *test)
6487d36db35SAvi Kivity {
6497d36db35SAvi Kivity     return test->vmcb->control.exit_code == SVM_EXIT_ERR;
6507d36db35SAvi Kivity }
6517d36db35SAvi Kivity 
6524c8eb156SJoerg Roedel static void sel_cr0_bug_prepare(struct test *test)
6534c8eb156SJoerg Roedel {
6544c8eb156SJoerg Roedel     vmcb_ident(test->vmcb);
6554c8eb156SJoerg Roedel     test->vmcb->control.intercept |= (1ULL << INTERCEPT_SELECTIVE_CR0);
6564c8eb156SJoerg Roedel }
6574c8eb156SJoerg Roedel 
6584c8eb156SJoerg Roedel static bool sel_cr0_bug_finished(struct test *test)
6594c8eb156SJoerg Roedel {
6604c8eb156SJoerg Roedel 	return true;
6614c8eb156SJoerg Roedel }
6624c8eb156SJoerg Roedel 
6634c8eb156SJoerg Roedel static void sel_cr0_bug_test(struct test *test)
6644c8eb156SJoerg Roedel {
6654c8eb156SJoerg Roedel     unsigned long cr0;
6664c8eb156SJoerg Roedel 
6674c8eb156SJoerg Roedel     /* read cr0, clear CD, and write back */
6684c8eb156SJoerg Roedel     cr0  = read_cr0();
6694c8eb156SJoerg Roedel     cr0 |= (1UL << 30);
6704c8eb156SJoerg Roedel     write_cr0(cr0);
6714c8eb156SJoerg Roedel 
6724c8eb156SJoerg Roedel     /*
6734c8eb156SJoerg Roedel      * If we are here the test failed, not sure what to do now because we
6744c8eb156SJoerg Roedel      * are not in guest-mode anymore so we can't trigger an intercept.
6754c8eb156SJoerg Roedel      * Trigger a tripple-fault for now.
6764c8eb156SJoerg Roedel      */
6774c8eb156SJoerg Roedel     printf("sel_cr0 test failed. Can not recover from this - exiting\n");
6784c8eb156SJoerg Roedel     exit(1);
6794c8eb156SJoerg Roedel }
6804c8eb156SJoerg Roedel 
6814c8eb156SJoerg Roedel static bool sel_cr0_bug_check(struct test *test)
6824c8eb156SJoerg Roedel {
6834c8eb156SJoerg Roedel     return test->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE;
6844c8eb156SJoerg Roedel }
6854c8eb156SJoerg Roedel 
6868594b943SJoerg Roedel static void npt_nx_prepare(struct test *test)
6878594b943SJoerg Roedel {
6888594b943SJoerg Roedel 
6898594b943SJoerg Roedel     u64 *pte;
6908594b943SJoerg Roedel 
6918594b943SJoerg Roedel     vmcb_ident(test->vmcb);
692726a1dd7SPaolo Bonzini     pte = npt_get_pte((u64)null_test);
6938594b943SJoerg Roedel 
6948594b943SJoerg Roedel     *pte |= (1ULL << 63);
6958594b943SJoerg Roedel }
6968594b943SJoerg Roedel 
6978594b943SJoerg Roedel static bool npt_nx_check(struct test *test)
6988594b943SJoerg Roedel {
699726a1dd7SPaolo Bonzini     u64 *pte = npt_get_pte((u64)null_test);
7008594b943SJoerg Roedel 
7018594b943SJoerg Roedel     *pte &= ~(1ULL << 63);
7028594b943SJoerg Roedel 
7038594b943SJoerg Roedel     test->vmcb->save.efer |= (1 << 11);
7048594b943SJoerg Roedel 
7058594b943SJoerg Roedel     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
706*e8b10c1fSPaolo Bonzini            && (test->vmcb->control.exit_info_1 == 0x100000015ULL);
7078594b943SJoerg Roedel }
7088594b943SJoerg Roedel 
709ea975120SJoerg Roedel static void npt_us_prepare(struct test *test)
710ea975120SJoerg Roedel {
711ea975120SJoerg Roedel     u64 *pte;
712ea975120SJoerg Roedel 
713ea975120SJoerg Roedel     vmcb_ident(test->vmcb);
714726a1dd7SPaolo Bonzini     pte = npt_get_pte((u64)scratch_page);
715ea975120SJoerg Roedel 
716ea975120SJoerg Roedel     *pte &= ~(1ULL << 2);
717ea975120SJoerg Roedel }
718ea975120SJoerg Roedel 
719ea975120SJoerg Roedel static void npt_us_test(struct test *test)
720ea975120SJoerg Roedel {
721c0a4e715SPaolo Bonzini     (void) *(volatile u64 *)scratch_page;
722ea975120SJoerg Roedel }
723ea975120SJoerg Roedel 
724ea975120SJoerg Roedel static bool npt_us_check(struct test *test)
725ea975120SJoerg Roedel {
726726a1dd7SPaolo Bonzini     u64 *pte = npt_get_pte((u64)scratch_page);
727ea975120SJoerg Roedel 
728ea975120SJoerg Roedel     *pte |= (1ULL << 2);
729ea975120SJoerg Roedel 
730ea975120SJoerg Roedel     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
731*e8b10c1fSPaolo Bonzini            && (test->vmcb->control.exit_info_1 == 0x100000005ULL);
732ea975120SJoerg Roedel }
733ea975120SJoerg Roedel 
734dd6ef43cSJoerg Roedel static void npt_rsvd_prepare(struct test *test)
735dd6ef43cSJoerg Roedel {
736dd6ef43cSJoerg Roedel 
737dd6ef43cSJoerg Roedel     vmcb_ident(test->vmcb);
738dd6ef43cSJoerg Roedel 
739dd6ef43cSJoerg Roedel     pdpe[0] |= (1ULL << 8);
740dd6ef43cSJoerg Roedel }
741dd6ef43cSJoerg Roedel 
742dd6ef43cSJoerg Roedel static bool npt_rsvd_check(struct test *test)
743dd6ef43cSJoerg Roedel {
744dd6ef43cSJoerg Roedel     pdpe[0] &= ~(1ULL << 8);
745dd6ef43cSJoerg Roedel 
746dd6ef43cSJoerg Roedel     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
747*e8b10c1fSPaolo Bonzini             && (test->vmcb->control.exit_info_1 == 0x200000006ULL);
748dd6ef43cSJoerg Roedel }
749dd6ef43cSJoerg Roedel 
7505ebf82edSJoerg Roedel static void npt_rw_prepare(struct test *test)
7515ebf82edSJoerg Roedel {
7525ebf82edSJoerg Roedel 
7535ebf82edSJoerg Roedel     u64 *pte;
7545ebf82edSJoerg Roedel 
7555ebf82edSJoerg Roedel     vmcb_ident(test->vmcb);
756726a1dd7SPaolo Bonzini     pte = npt_get_pte(0x80000);
7575ebf82edSJoerg Roedel 
7585ebf82edSJoerg Roedel     *pte &= ~(1ULL << 1);
7595ebf82edSJoerg Roedel }
7605ebf82edSJoerg Roedel 
7615ebf82edSJoerg Roedel static void npt_rw_test(struct test *test)
7625ebf82edSJoerg Roedel {
7635ebf82edSJoerg Roedel     u64 *data = (void*)(0x80000);
7645ebf82edSJoerg Roedel 
7655ebf82edSJoerg Roedel     *data = 0;
7665ebf82edSJoerg Roedel }
7675ebf82edSJoerg Roedel 
7685ebf82edSJoerg Roedel static bool npt_rw_check(struct test *test)
7695ebf82edSJoerg Roedel {
770726a1dd7SPaolo Bonzini     u64 *pte = npt_get_pte(0x80000);
7715ebf82edSJoerg Roedel 
7725ebf82edSJoerg Roedel     *pte |= (1ULL << 1);
7735ebf82edSJoerg Roedel 
7745ebf82edSJoerg Roedel     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
775*e8b10c1fSPaolo Bonzini            && (test->vmcb->control.exit_info_1 == 0x100000007ULL);
7765ebf82edSJoerg Roedel }
7775ebf82edSJoerg Roedel 
778590040ffSJoerg Roedel static void npt_pfwalk_prepare(struct test *test)
779590040ffSJoerg Roedel {
780590040ffSJoerg Roedel 
781590040ffSJoerg Roedel     u64 *pte;
782590040ffSJoerg Roedel 
783590040ffSJoerg Roedel     vmcb_ident(test->vmcb);
784726a1dd7SPaolo Bonzini     pte = npt_get_pte(read_cr3());
785590040ffSJoerg Roedel 
786590040ffSJoerg Roedel     *pte &= ~(1ULL << 1);
787590040ffSJoerg Roedel }
788590040ffSJoerg Roedel 
789590040ffSJoerg Roedel static bool npt_pfwalk_check(struct test *test)
790590040ffSJoerg Roedel {
791726a1dd7SPaolo Bonzini     u64 *pte = npt_get_pte(read_cr3());
792590040ffSJoerg Roedel 
793590040ffSJoerg Roedel     *pte |= (1ULL << 1);
794590040ffSJoerg Roedel 
795590040ffSJoerg Roedel     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
796*e8b10c1fSPaolo Bonzini            && (test->vmcb->control.exit_info_1 == 0x200000006ULL)
797590040ffSJoerg Roedel 	   && (test->vmcb->control.exit_info_2 == read_cr3());
798590040ffSJoerg Roedel }
799590040ffSJoerg Roedel 
800a2ab7740SPaolo Bonzini static void npt_l1mmio_prepare(struct test *test)
801a2ab7740SPaolo Bonzini {
802a2ab7740SPaolo Bonzini     vmcb_ident(test->vmcb);
803a2ab7740SPaolo Bonzini }
804a2ab7740SPaolo Bonzini 
8051e699ecbSPaolo Bonzini u32 nested_apic_version1;
8061e699ecbSPaolo Bonzini u32 nested_apic_version2;
807a2ab7740SPaolo Bonzini 
808a2ab7740SPaolo Bonzini static void npt_l1mmio_test(struct test *test)
809a2ab7740SPaolo Bonzini {
8101e699ecbSPaolo Bonzini     volatile u32 *data = (volatile void*)(0xfee00030UL);
811a2ab7740SPaolo Bonzini 
8121e699ecbSPaolo Bonzini     nested_apic_version1 = *data;
8131e699ecbSPaolo Bonzini     nested_apic_version2 = *data;
814a2ab7740SPaolo Bonzini }
815a2ab7740SPaolo Bonzini 
816a2ab7740SPaolo Bonzini static bool npt_l1mmio_check(struct test *test)
817a2ab7740SPaolo Bonzini {
8181e699ecbSPaolo Bonzini     volatile u32 *data = (volatile void*)(0xfee00030);
8191e699ecbSPaolo Bonzini     u32 lvr = *data;
820a2ab7740SPaolo Bonzini 
8211e699ecbSPaolo Bonzini     return nested_apic_version1 == lvr && nested_apic_version2 == lvr;
822a2ab7740SPaolo Bonzini }
823a2ab7740SPaolo Bonzini 
82421c23154SJoerg Roedel static void latency_prepare(struct test *test)
82521c23154SJoerg Roedel {
82621c23154SJoerg Roedel     default_prepare(test);
82721c23154SJoerg Roedel     runs = LATENCY_RUNS;
82821c23154SJoerg Roedel     latvmrun_min = latvmexit_min = -1ULL;
82921c23154SJoerg Roedel     latvmrun_max = latvmexit_max = 0;
83021c23154SJoerg Roedel     vmrun_sum = vmexit_sum = 0;
83121c23154SJoerg Roedel }
83221c23154SJoerg Roedel 
83321c23154SJoerg Roedel static void latency_test(struct test *test)
83421c23154SJoerg Roedel {
83521c23154SJoerg Roedel     u64 cycles;
83621c23154SJoerg Roedel 
83721c23154SJoerg Roedel start:
83821c23154SJoerg Roedel     tsc_end = rdtsc();
83921c23154SJoerg Roedel 
84021c23154SJoerg Roedel     cycles = tsc_end - tsc_start;
84121c23154SJoerg Roedel 
84221c23154SJoerg Roedel     if (cycles > latvmrun_max)
84321c23154SJoerg Roedel         latvmrun_max = cycles;
84421c23154SJoerg Roedel 
84521c23154SJoerg Roedel     if (cycles < latvmrun_min)
84621c23154SJoerg Roedel         latvmrun_min = cycles;
84721c23154SJoerg Roedel 
84821c23154SJoerg Roedel     vmrun_sum += cycles;
84921c23154SJoerg Roedel 
85021c23154SJoerg Roedel     tsc_start = rdtsc();
85121c23154SJoerg Roedel 
85221c23154SJoerg Roedel     asm volatile ("vmmcall" : : : "memory");
85321c23154SJoerg Roedel     goto start;
85421c23154SJoerg Roedel }
85521c23154SJoerg Roedel 
85621c23154SJoerg Roedel static bool latency_finished(struct test *test)
85721c23154SJoerg Roedel {
85821c23154SJoerg Roedel     u64 cycles;
85921c23154SJoerg Roedel 
86021c23154SJoerg Roedel     tsc_end = rdtsc();
86121c23154SJoerg Roedel 
86221c23154SJoerg Roedel     cycles = tsc_end - tsc_start;
86321c23154SJoerg Roedel 
86421c23154SJoerg Roedel     if (cycles > latvmexit_max)
86521c23154SJoerg Roedel         latvmexit_max = cycles;
86621c23154SJoerg Roedel 
86721c23154SJoerg Roedel     if (cycles < latvmexit_min)
86821c23154SJoerg Roedel         latvmexit_min = cycles;
86921c23154SJoerg Roedel 
87021c23154SJoerg Roedel     vmexit_sum += cycles;
87121c23154SJoerg Roedel 
87221c23154SJoerg Roedel     test->vmcb->save.rip += 3;
87321c23154SJoerg Roedel 
87421c23154SJoerg Roedel     runs -= 1;
87521c23154SJoerg Roedel 
87621c23154SJoerg Roedel     return runs == 0;
87721c23154SJoerg Roedel }
87821c23154SJoerg Roedel 
87921c23154SJoerg Roedel static bool latency_check(struct test *test)
88021c23154SJoerg Roedel {
88121c23154SJoerg Roedel     printf("    Latency VMRUN : max: %d min: %d avg: %d\n", latvmrun_max,
88221c23154SJoerg Roedel             latvmrun_min, vmrun_sum / LATENCY_RUNS);
88321c23154SJoerg Roedel     printf("    Latency VMEXIT: max: %d min: %d avg: %d\n", latvmexit_max,
88421c23154SJoerg Roedel             latvmexit_min, vmexit_sum / LATENCY_RUNS);
88521c23154SJoerg Roedel     return true;
88621c23154SJoerg Roedel }
88721c23154SJoerg Roedel 
888ef101219SRoedel, Joerg static void lat_svm_insn_prepare(struct test *test)
889ef101219SRoedel, Joerg {
890ef101219SRoedel, Joerg     default_prepare(test);
891ef101219SRoedel, Joerg     runs = LATENCY_RUNS;
892ef101219SRoedel, Joerg     latvmload_min = latvmsave_min = latstgi_min = latclgi_min = -1ULL;
893ef101219SRoedel, Joerg     latvmload_max = latvmsave_max = latstgi_max = latclgi_max = 0;
894ef101219SRoedel, Joerg     vmload_sum = vmsave_sum = stgi_sum = clgi_sum;
895ef101219SRoedel, Joerg }
896ef101219SRoedel, Joerg 
897ef101219SRoedel, Joerg static bool lat_svm_insn_finished(struct test *test)
898ef101219SRoedel, Joerg {
899ef101219SRoedel, Joerg     u64 vmcb_phys = virt_to_phys(test->vmcb);
900ef101219SRoedel, Joerg     u64 cycles;
901ef101219SRoedel, Joerg 
902ef101219SRoedel, Joerg     for ( ; runs != 0; runs--) {
903ef101219SRoedel, Joerg         tsc_start = rdtsc();
904ef101219SRoedel, Joerg         asm volatile("vmload\n\t" : : "a"(vmcb_phys) : "memory");
905ef101219SRoedel, Joerg         cycles = rdtsc() - tsc_start;
906ef101219SRoedel, Joerg         if (cycles > latvmload_max)
907ef101219SRoedel, Joerg             latvmload_max = cycles;
908ef101219SRoedel, Joerg         if (cycles < latvmload_min)
909ef101219SRoedel, Joerg             latvmload_min = cycles;
910ef101219SRoedel, Joerg         vmload_sum += cycles;
911ef101219SRoedel, Joerg 
912ef101219SRoedel, Joerg         tsc_start = rdtsc();
913ef101219SRoedel, Joerg         asm volatile("vmsave\n\t" : : "a"(vmcb_phys) : "memory");
914ef101219SRoedel, Joerg         cycles = rdtsc() - tsc_start;
915ef101219SRoedel, Joerg         if (cycles > latvmsave_max)
916ef101219SRoedel, Joerg             latvmsave_max = cycles;
917ef101219SRoedel, Joerg         if (cycles < latvmsave_min)
918ef101219SRoedel, Joerg             latvmsave_min = cycles;
919ef101219SRoedel, Joerg         vmsave_sum += cycles;
920ef101219SRoedel, Joerg 
921ef101219SRoedel, Joerg         tsc_start = rdtsc();
922ef101219SRoedel, Joerg         asm volatile("stgi\n\t");
923ef101219SRoedel, Joerg         cycles = rdtsc() - tsc_start;
924ef101219SRoedel, Joerg         if (cycles > latstgi_max)
925ef101219SRoedel, Joerg             latstgi_max = cycles;
926ef101219SRoedel, Joerg         if (cycles < latstgi_min)
927ef101219SRoedel, Joerg             latstgi_min = cycles;
928ef101219SRoedel, Joerg         stgi_sum += cycles;
929ef101219SRoedel, Joerg 
930ef101219SRoedel, Joerg         tsc_start = rdtsc();
931ef101219SRoedel, Joerg         asm volatile("clgi\n\t");
932ef101219SRoedel, Joerg         cycles = rdtsc() - tsc_start;
933ef101219SRoedel, Joerg         if (cycles > latclgi_max)
934ef101219SRoedel, Joerg             latclgi_max = cycles;
935ef101219SRoedel, Joerg         if (cycles < latclgi_min)
936ef101219SRoedel, Joerg             latclgi_min = cycles;
937ef101219SRoedel, Joerg         clgi_sum += cycles;
938ef101219SRoedel, Joerg     }
939ef101219SRoedel, Joerg 
940ef101219SRoedel, Joerg     return true;
941ef101219SRoedel, Joerg }
942ef101219SRoedel, Joerg 
943ef101219SRoedel, Joerg static bool lat_svm_insn_check(struct test *test)
944ef101219SRoedel, Joerg {
945ef101219SRoedel, Joerg     printf("    Latency VMLOAD: max: %d min: %d avg: %d\n", latvmload_max,
946ef101219SRoedel, Joerg             latvmload_min, vmload_sum / LATENCY_RUNS);
947ef101219SRoedel, Joerg     printf("    Latency VMSAVE: max: %d min: %d avg: %d\n", latvmsave_max,
948ef101219SRoedel, Joerg             latvmsave_min, vmsave_sum / LATENCY_RUNS);
949ef101219SRoedel, Joerg     printf("    Latency STGI:   max: %d min: %d avg: %d\n", latstgi_max,
950ef101219SRoedel, Joerg             latstgi_min, stgi_sum / LATENCY_RUNS);
951ef101219SRoedel, Joerg     printf("    Latency CLGI:   max: %d min: %d avg: %d\n", latclgi_max,
952ef101219SRoedel, Joerg             latclgi_min, clgi_sum / LATENCY_RUNS);
953ef101219SRoedel, Joerg     return true;
954ef101219SRoedel, Joerg }
9557d36db35SAvi Kivity static struct test tests[] = {
9567d36db35SAvi Kivity     { "null", default_supported, default_prepare, null_test,
9577d36db35SAvi Kivity       default_finished, null_check },
9587d36db35SAvi Kivity     { "vmrun", default_supported, default_prepare, test_vmrun,
9597d36db35SAvi Kivity        default_finished, check_vmrun },
960bcd9774aSPaolo Bonzini     { "ioio", default_supported, prepare_ioio, test_ioio,
961bcd9774aSPaolo Bonzini        ioio_finished, check_ioio },
9627d36db35SAvi Kivity     { "vmrun intercept check", default_supported, prepare_no_vmrun_int,
9637d36db35SAvi Kivity       null_test, default_finished, check_no_vmrun_int },
9647d36db35SAvi Kivity     { "cr3 read intercept", default_supported, prepare_cr3_intercept,
9657d36db35SAvi Kivity       test_cr3_intercept, default_finished, check_cr3_intercept },
9667d36db35SAvi Kivity     { "cr3 read nointercept", default_supported, default_prepare,
9677d36db35SAvi Kivity       test_cr3_intercept, default_finished, check_cr3_nointercept },
968095274b4SPrasad Joshi     { "cr3 read intercept emulate", smp_supported,
9697d36db35SAvi Kivity       prepare_cr3_intercept_bypass, test_cr3_intercept_bypass,
9707d36db35SAvi Kivity       default_finished, check_cr3_intercept },
9717d36db35SAvi Kivity     { "next_rip", next_rip_supported, prepare_next_rip, test_next_rip,
9727d36db35SAvi Kivity       default_finished, check_next_rip },
9737d36db35SAvi Kivity     { "mode_switch", default_supported, prepare_mode_switch, test_mode_switch,
9747d36db35SAvi Kivity        mode_switch_finished, check_mode_switch },
9757d36db35SAvi Kivity     { "asid_zero", default_supported, prepare_asid_zero, test_asid_zero,
9767d36db35SAvi Kivity        default_finished, check_asid_zero },
9774c8eb156SJoerg Roedel     { "sel_cr0_bug", default_supported, sel_cr0_bug_prepare, sel_cr0_bug_test,
9784c8eb156SJoerg Roedel        sel_cr0_bug_finished, sel_cr0_bug_check },
9798594b943SJoerg Roedel     { "npt_nx", npt_supported, npt_nx_prepare, null_test,
980ea975120SJoerg Roedel 	    default_finished, npt_nx_check },
981ea975120SJoerg Roedel     { "npt_us", npt_supported, npt_us_prepare, npt_us_test,
982ea975120SJoerg Roedel 	    default_finished, npt_us_check },
983dd6ef43cSJoerg Roedel     { "npt_rsvd", npt_supported, npt_rsvd_prepare, null_test,
984dd6ef43cSJoerg Roedel 	    default_finished, npt_rsvd_check },
9855ebf82edSJoerg Roedel     { "npt_rw", npt_supported, npt_rw_prepare, npt_rw_test,
9865ebf82edSJoerg Roedel 	    default_finished, npt_rw_check },
987590040ffSJoerg Roedel     { "npt_pfwalk", npt_supported, npt_pfwalk_prepare, null_test,
988590040ffSJoerg Roedel 	    default_finished, npt_pfwalk_check },
989a2ab7740SPaolo Bonzini     { "npt_l1mmio", npt_supported, npt_l1mmio_prepare, npt_l1mmio_test,
990a2ab7740SPaolo Bonzini 	    default_finished, npt_l1mmio_check },
99121c23154SJoerg Roedel     { "latency_run_exit", default_supported, latency_prepare, latency_test,
99221c23154SJoerg Roedel       latency_finished, latency_check },
993ef101219SRoedel, Joerg     { "latency_svm_insn", default_supported, lat_svm_insn_prepare, null_test,
994ef101219SRoedel, Joerg       lat_svm_insn_finished, lat_svm_insn_check },
9957d36db35SAvi Kivity };
9967d36db35SAvi Kivity 
9977d36db35SAvi Kivity int main(int ac, char **av)
9987d36db35SAvi Kivity {
9997d36db35SAvi Kivity     int i, nr, passed, done;
10007d36db35SAvi Kivity     struct vmcb *vmcb;
10017d36db35SAvi Kivity 
10027d36db35SAvi Kivity     setup_vm();
10037d36db35SAvi Kivity     smp_init();
10047d36db35SAvi Kivity 
10057d36db35SAvi Kivity     if (!(cpuid(0x80000001).c & 4)) {
10067d36db35SAvi Kivity         printf("SVM not availble\n");
10077d36db35SAvi Kivity         return 0;
10087d36db35SAvi Kivity     }
10097d36db35SAvi Kivity 
10107d36db35SAvi Kivity     setup_svm();
10117d36db35SAvi Kivity 
10127d36db35SAvi Kivity     vmcb = alloc_page();
10137d36db35SAvi Kivity 
10147d36db35SAvi Kivity     nr = ARRAY_SIZE(tests);
10157d36db35SAvi Kivity     passed = done = 0;
10167d36db35SAvi Kivity     for (i = 0; i < nr; ++i) {
10177d36db35SAvi Kivity         if (!tests[i].supported())
10187d36db35SAvi Kivity             continue;
10197d36db35SAvi Kivity         done += 1;
10207d36db35SAvi Kivity         passed += test_run(&tests[i], vmcb);
10217d36db35SAvi Kivity     }
10227d36db35SAvi Kivity 
10237d36db35SAvi Kivity     printf("\nSUMMARY: %d TESTS, %d FAILURES\n", done, (done - passed));
10247d36db35SAvi Kivity     return passed == done ? 0 : 1;
10257d36db35SAvi Kivity }
1026