xref: /kvm-unit-tests/x86/svm.c (revision f6a2ca456c195b30bfd11205b604334bcc1aec68)
17d36db35SAvi Kivity #include "svm.h"
27d36db35SAvi Kivity #include "libcflat.h"
37d36db35SAvi Kivity #include "processor.h"
4b46094b4SPaolo Bonzini #include "desc.h"
57d36db35SAvi Kivity #include "msr.h"
67d36db35SAvi Kivity #include "vm.h"
77d36db35SAvi Kivity #include "smp.h"
87d36db35SAvi Kivity #include "types.h"
9bcd9774aSPaolo Bonzini #include "io.h"
107d36db35SAvi Kivity 
111535bf0fSJoerg Roedel /* for the nested page table*/
121535bf0fSJoerg Roedel u64 *pml4e;
131535bf0fSJoerg Roedel u64 *pdpe;
141535bf0fSJoerg Roedel u64 *pde[4];
151535bf0fSJoerg Roedel u64 *pte[2048];
16c0a4e715SPaolo Bonzini void *scratch_page;
171535bf0fSJoerg Roedel 
1821c23154SJoerg Roedel #define LATENCY_RUNS 1000000
1921c23154SJoerg Roedel 
2021c23154SJoerg Roedel u64 tsc_start;
2121c23154SJoerg Roedel u64 tsc_end;
2221c23154SJoerg Roedel 
2321c23154SJoerg Roedel u64 vmrun_sum, vmexit_sum;
24ef101219SRoedel, Joerg u64 vmsave_sum, vmload_sum;
25ef101219SRoedel, Joerg u64 stgi_sum, clgi_sum;
2621c23154SJoerg Roedel u64 latvmrun_max;
2721c23154SJoerg Roedel u64 latvmrun_min;
2821c23154SJoerg Roedel u64 latvmexit_max;
2921c23154SJoerg Roedel u64 latvmexit_min;
30ef101219SRoedel, Joerg u64 latvmload_max;
31ef101219SRoedel, Joerg u64 latvmload_min;
32ef101219SRoedel, Joerg u64 latvmsave_max;
33ef101219SRoedel, Joerg u64 latvmsave_min;
34ef101219SRoedel, Joerg u64 latstgi_max;
35ef101219SRoedel, Joerg u64 latstgi_min;
36ef101219SRoedel, Joerg u64 latclgi_max;
37ef101219SRoedel, Joerg u64 latclgi_min;
3821c23154SJoerg Roedel u64 runs;
3921c23154SJoerg Roedel 
403d46571bSPaolo Bonzini u8 *io_bitmap;
413d46571bSPaolo Bonzini u8 io_bitmap_area[16384];
423d46571bSPaolo Bonzini 
431535bf0fSJoerg Roedel static bool npt_supported(void)
441535bf0fSJoerg Roedel {
451535bf0fSJoerg Roedel    return cpuid(0x8000000A).d & 1;
461535bf0fSJoerg Roedel }
471535bf0fSJoerg Roedel 
487d36db35SAvi Kivity static void setup_svm(void)
497d36db35SAvi Kivity {
507d36db35SAvi Kivity     void *hsave = alloc_page();
511535bf0fSJoerg Roedel     u64 *page, address;
521535bf0fSJoerg Roedel     int i,j;
537d36db35SAvi Kivity 
547d36db35SAvi Kivity     wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
557d36db35SAvi Kivity     wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME);
568594b943SJoerg Roedel     wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NX);
571535bf0fSJoerg Roedel 
58ea975120SJoerg Roedel     scratch_page = alloc_page();
59ea975120SJoerg Roedel 
603d46571bSPaolo Bonzini     io_bitmap = (void *) (((ulong)io_bitmap_area + 4095) & ~4095);
613d46571bSPaolo Bonzini 
621535bf0fSJoerg Roedel     if (!npt_supported())
631535bf0fSJoerg Roedel         return;
641535bf0fSJoerg Roedel 
651535bf0fSJoerg Roedel     printf("NPT detected - running all tests with NPT enabled\n");
661535bf0fSJoerg Roedel 
671535bf0fSJoerg Roedel     /*
681535bf0fSJoerg Roedel      * Nested paging supported - Build a nested page table
691535bf0fSJoerg Roedel      * Build the page-table bottom-up and map everything with 4k pages
701535bf0fSJoerg Roedel      * to get enough granularity for the NPT unit-tests.
711535bf0fSJoerg Roedel      */
721535bf0fSJoerg Roedel 
731535bf0fSJoerg Roedel     address = 0;
741535bf0fSJoerg Roedel 
751535bf0fSJoerg Roedel     /* PTE level */
761535bf0fSJoerg Roedel     for (i = 0; i < 2048; ++i) {
771535bf0fSJoerg Roedel         page = alloc_page();
781535bf0fSJoerg Roedel 
791535bf0fSJoerg Roedel         for (j = 0; j < 512; ++j, address += 4096)
801535bf0fSJoerg Roedel             page[j] = address | 0x067ULL;
811535bf0fSJoerg Roedel 
821535bf0fSJoerg Roedel         pte[i] = page;
831535bf0fSJoerg Roedel     }
841535bf0fSJoerg Roedel 
851535bf0fSJoerg Roedel     /* PDE level */
861535bf0fSJoerg Roedel     for (i = 0; i < 4; ++i) {
871535bf0fSJoerg Roedel         page = alloc_page();
881535bf0fSJoerg Roedel 
891535bf0fSJoerg Roedel         for (j = 0; j < 512; ++j)
9093b05099SPaolo Bonzini             page[j] = (u64)pte[(i * 512) + j] | 0x027ULL;
911535bf0fSJoerg Roedel 
921535bf0fSJoerg Roedel         pde[i] = page;
931535bf0fSJoerg Roedel     }
941535bf0fSJoerg Roedel 
951535bf0fSJoerg Roedel     /* PDPe level */
961535bf0fSJoerg Roedel     pdpe   = alloc_page();
971535bf0fSJoerg Roedel     for (i = 0; i < 4; ++i)
981535bf0fSJoerg Roedel        pdpe[i] = ((u64)(pde[i])) | 0x27;
991535bf0fSJoerg Roedel 
1001535bf0fSJoerg Roedel     /* PML4e level */
1011535bf0fSJoerg Roedel     pml4e    = alloc_page();
1021535bf0fSJoerg Roedel     pml4e[0] = ((u64)pdpe) | 0x27;
1037d36db35SAvi Kivity }
1047d36db35SAvi Kivity 
105*f6a2ca45SPaolo Bonzini static u64 *npt_get_pde(u64 address)
106*f6a2ca45SPaolo Bonzini {
107*f6a2ca45SPaolo Bonzini     int i1, i2;
108*f6a2ca45SPaolo Bonzini 
109*f6a2ca45SPaolo Bonzini     address >>= 21;
110*f6a2ca45SPaolo Bonzini     i1 = (address >> 9) & 0x3;
111*f6a2ca45SPaolo Bonzini     i2 = address & 0x1ff;
112*f6a2ca45SPaolo Bonzini 
113*f6a2ca45SPaolo Bonzini     return &pde[i1][i2];
114*f6a2ca45SPaolo Bonzini }
115*f6a2ca45SPaolo Bonzini 
116726a1dd7SPaolo Bonzini static u64 *npt_get_pte(u64 address)
1178594b943SJoerg Roedel {
1188594b943SJoerg Roedel     int i1, i2;
1198594b943SJoerg Roedel 
1208594b943SJoerg Roedel     address >>= 12;
1218594b943SJoerg Roedel     i1 = (address >> 9) & 0x7ff;
1228594b943SJoerg Roedel     i2 = address & 0x1ff;
1238594b943SJoerg Roedel 
1248594b943SJoerg Roedel     return &pte[i1][i2];
1258594b943SJoerg Roedel }
1268594b943SJoerg Roedel 
1277d36db35SAvi Kivity static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector,
1287d36db35SAvi Kivity                          u64 base, u32 limit, u32 attr)
1297d36db35SAvi Kivity {
1307d36db35SAvi Kivity     seg->selector = selector;
1317d36db35SAvi Kivity     seg->attrib = attr;
1327d36db35SAvi Kivity     seg->limit = limit;
1337d36db35SAvi Kivity     seg->base = base;
1347d36db35SAvi Kivity }
1357d36db35SAvi Kivity 
1367d36db35SAvi Kivity static void vmcb_ident(struct vmcb *vmcb)
1377d36db35SAvi Kivity {
1387d36db35SAvi Kivity     u64 vmcb_phys = virt_to_phys(vmcb);
1397d36db35SAvi Kivity     struct vmcb_save_area *save = &vmcb->save;
1407d36db35SAvi Kivity     struct vmcb_control_area *ctrl = &vmcb->control;
1417d36db35SAvi Kivity     u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
1427d36db35SAvi Kivity         | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK;
1437d36db35SAvi Kivity     u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
1447d36db35SAvi Kivity         | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK;
1457d36db35SAvi Kivity     struct descriptor_table_ptr desc_table_ptr;
1467d36db35SAvi Kivity 
1477d36db35SAvi Kivity     memset(vmcb, 0, sizeof(*vmcb));
1487d36db35SAvi Kivity     asm volatile ("vmsave" : : "a"(vmcb_phys) : "memory");
1497d36db35SAvi Kivity     vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr);
1507d36db35SAvi Kivity     vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr);
1517d36db35SAvi Kivity     vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr);
1527d36db35SAvi Kivity     vmcb_set_seg(&save->ds, read_ds(), 0, -1U, data_seg_attr);
1537d36db35SAvi Kivity     sgdt(&desc_table_ptr);
1547d36db35SAvi Kivity     vmcb_set_seg(&save->gdtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
1557d36db35SAvi Kivity     sidt(&desc_table_ptr);
1567d36db35SAvi Kivity     vmcb_set_seg(&save->idtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
1577d36db35SAvi Kivity     ctrl->asid = 1;
1587d36db35SAvi Kivity     save->cpl = 0;
1597d36db35SAvi Kivity     save->efer = rdmsr(MSR_EFER);
1607d36db35SAvi Kivity     save->cr4 = read_cr4();
1617d36db35SAvi Kivity     save->cr3 = read_cr3();
1627d36db35SAvi Kivity     save->cr0 = read_cr0();
1637d36db35SAvi Kivity     save->dr7 = read_dr7();
1647d36db35SAvi Kivity     save->dr6 = read_dr6();
1657d36db35SAvi Kivity     save->cr2 = read_cr2();
1667d36db35SAvi Kivity     save->g_pat = rdmsr(MSR_IA32_CR_PAT);
1677d36db35SAvi Kivity     save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
1687d36db35SAvi Kivity     ctrl->intercept = (1ULL << INTERCEPT_VMRUN) | (1ULL << INTERCEPT_VMMCALL);
1693d46571bSPaolo Bonzini     ctrl->iopm_base_pa = virt_to_phys(io_bitmap);
1701535bf0fSJoerg Roedel 
1711535bf0fSJoerg Roedel     if (npt_supported()) {
1721535bf0fSJoerg Roedel         ctrl->nested_ctl = 1;
1731535bf0fSJoerg Roedel         ctrl->nested_cr3 = (u64)pml4e;
1741535bf0fSJoerg Roedel     }
1757d36db35SAvi Kivity }
1767d36db35SAvi Kivity 
1777d36db35SAvi Kivity struct test {
1787d36db35SAvi Kivity     const char *name;
1797d36db35SAvi Kivity     bool (*supported)(void);
1807d36db35SAvi Kivity     void (*prepare)(struct test *test);
1817d36db35SAvi Kivity     void (*guest_func)(struct test *test);
1827d36db35SAvi Kivity     bool (*finished)(struct test *test);
1837d36db35SAvi Kivity     bool (*succeeded)(struct test *test);
1847d36db35SAvi Kivity     struct vmcb *vmcb;
1857d36db35SAvi Kivity     int exits;
1867d36db35SAvi Kivity     ulong scratch;
1877d36db35SAvi Kivity };
1887d36db35SAvi Kivity 
189e0b6541cSPaolo Bonzini static inline void vmmcall(void)
190e0b6541cSPaolo Bonzini {
191e0b6541cSPaolo Bonzini     asm volatile ("vmmcall" : : : "memory");
192e0b6541cSPaolo Bonzini }
193e0b6541cSPaolo Bonzini 
1947d36db35SAvi Kivity static void test_thunk(struct test *test)
1957d36db35SAvi Kivity {
1967d36db35SAvi Kivity     test->guest_func(test);
197e0b6541cSPaolo Bonzini     vmmcall();
1987d36db35SAvi Kivity }
1997d36db35SAvi Kivity 
200a43baea0SPaolo Bonzini struct regs {
201a43baea0SPaolo Bonzini         u64 rax;
202a43baea0SPaolo Bonzini         u64 rcx;
203a43baea0SPaolo Bonzini         u64 rdx;
204a43baea0SPaolo Bonzini         u64 rbx;
205a43baea0SPaolo Bonzini         u64 cr2;
206a43baea0SPaolo Bonzini         u64 rbp;
207a43baea0SPaolo Bonzini         u64 rsi;
208a43baea0SPaolo Bonzini         u64 rdi;
209a43baea0SPaolo Bonzini         u64 r8;
210a43baea0SPaolo Bonzini         u64 r9;
211a43baea0SPaolo Bonzini         u64 r10;
212a43baea0SPaolo Bonzini         u64 r11;
213a43baea0SPaolo Bonzini         u64 r12;
214a43baea0SPaolo Bonzini         u64 r13;
215a43baea0SPaolo Bonzini         u64 r14;
216a43baea0SPaolo Bonzini         u64 r15;
217a43baea0SPaolo Bonzini         u64 rflags;
218a43baea0SPaolo Bonzini };
219a43baea0SPaolo Bonzini 
220a43baea0SPaolo Bonzini struct regs regs;
221a43baea0SPaolo Bonzini 
222a43baea0SPaolo Bonzini // rax handled specially below
223a43baea0SPaolo Bonzini 
224a43baea0SPaolo Bonzini #define SAVE_GPR_C                              \
225a43baea0SPaolo Bonzini         "xchg %%rbx, regs+0x8\n\t"              \
226a43baea0SPaolo Bonzini         "xchg %%rcx, regs+0x10\n\t"             \
227a43baea0SPaolo Bonzini         "xchg %%rdx, regs+0x18\n\t"             \
228a43baea0SPaolo Bonzini         "xchg %%rbp, regs+0x28\n\t"             \
229a43baea0SPaolo Bonzini         "xchg %%rsi, regs+0x30\n\t"             \
230a43baea0SPaolo Bonzini         "xchg %%rdi, regs+0x38\n\t"             \
231a43baea0SPaolo Bonzini         "xchg %%r8, regs+0x40\n\t"              \
232a43baea0SPaolo Bonzini         "xchg %%r9, regs+0x48\n\t"              \
233a43baea0SPaolo Bonzini         "xchg %%r10, regs+0x50\n\t"             \
234a43baea0SPaolo Bonzini         "xchg %%r11, regs+0x58\n\t"             \
235a43baea0SPaolo Bonzini         "xchg %%r12, regs+0x60\n\t"             \
236a43baea0SPaolo Bonzini         "xchg %%r13, regs+0x68\n\t"             \
237a43baea0SPaolo Bonzini         "xchg %%r14, regs+0x70\n\t"             \
238a43baea0SPaolo Bonzini         "xchg %%r15, regs+0x78\n\t"
239a43baea0SPaolo Bonzini 
240a43baea0SPaolo Bonzini #define LOAD_GPR_C      SAVE_GPR_C
241a43baea0SPaolo Bonzini 
2427d36db35SAvi Kivity static bool test_run(struct test *test, struct vmcb *vmcb)
2437d36db35SAvi Kivity {
2447d36db35SAvi Kivity     u64 vmcb_phys = virt_to_phys(vmcb);
2457d36db35SAvi Kivity     u64 guest_stack[10000];
2467d36db35SAvi Kivity     bool success;
2477d36db35SAvi Kivity 
2487d36db35SAvi Kivity     test->vmcb = vmcb;
2497d36db35SAvi Kivity     test->prepare(test);
2507d36db35SAvi Kivity     vmcb->save.rip = (ulong)test_thunk;
2517d36db35SAvi Kivity     vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack));
252a43baea0SPaolo Bonzini     regs.rdi = (ulong)test;
2537d36db35SAvi Kivity     do {
25421c23154SJoerg Roedel         tsc_start = rdtsc();
2557d36db35SAvi Kivity         asm volatile (
2567d36db35SAvi Kivity             "clgi \n\t"
2577d36db35SAvi Kivity             "vmload \n\t"
258a43baea0SPaolo Bonzini             "mov regs+0x80, %%r15\n\t"  // rflags
259a43baea0SPaolo Bonzini             "mov %%r15, 0x170(%0)\n\t"
260a43baea0SPaolo Bonzini             "mov regs, %%r15\n\t"       // rax
261a43baea0SPaolo Bonzini             "mov %%r15, 0x1f8(%0)\n\t"
262a43baea0SPaolo Bonzini             LOAD_GPR_C
2637d36db35SAvi Kivity             "vmrun \n\t"
264a43baea0SPaolo Bonzini             SAVE_GPR_C
265a43baea0SPaolo Bonzini             "mov 0x170(%0), %%r15\n\t"  // rflags
266a43baea0SPaolo Bonzini             "mov %%r15, regs+0x80\n\t"
267a43baea0SPaolo Bonzini             "mov 0x1f8(%0), %%r15\n\t"  // rax
268a43baea0SPaolo Bonzini             "mov %%r15, regs\n\t"
2697d36db35SAvi Kivity             "vmsave \n\t"
2707d36db35SAvi Kivity             "stgi"
271a43baea0SPaolo Bonzini             : : "a"(vmcb_phys)
2727d36db35SAvi Kivity             : "rbx", "rcx", "rdx", "rsi",
2737d36db35SAvi Kivity               "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15",
2747d36db35SAvi Kivity               "memory");
27521c23154SJoerg Roedel 	tsc_end = rdtsc();
2767d36db35SAvi Kivity         ++test->exits;
2777d36db35SAvi Kivity     } while (!test->finished(test));
2787d36db35SAvi Kivity 
27921c23154SJoerg Roedel 
2807d36db35SAvi Kivity     success = test->succeeded(test);
2817d36db35SAvi Kivity 
2827d36db35SAvi Kivity     printf("%s: %s\n", test->name, success ? "PASS" : "FAIL");
2837d36db35SAvi Kivity 
2847d36db35SAvi Kivity     return success;
2857d36db35SAvi Kivity }
2867d36db35SAvi Kivity 
287095274b4SPrasad Joshi static bool smp_supported(void)
288095274b4SPrasad Joshi {
289095274b4SPrasad Joshi 	return cpu_count() > 1;
290095274b4SPrasad Joshi }
291095274b4SPrasad Joshi 
2927d36db35SAvi Kivity static bool default_supported(void)
2937d36db35SAvi Kivity {
2947d36db35SAvi Kivity     return true;
2957d36db35SAvi Kivity }
2967d36db35SAvi Kivity 
2977d36db35SAvi Kivity static void default_prepare(struct test *test)
2987d36db35SAvi Kivity {
2997d36db35SAvi Kivity     vmcb_ident(test->vmcb);
3007d36db35SAvi Kivity     cli();
3017d36db35SAvi Kivity }
3027d36db35SAvi Kivity 
3037d36db35SAvi Kivity static bool default_finished(struct test *test)
3047d36db35SAvi Kivity {
3057d36db35SAvi Kivity     return true; /* one vmexit */
3067d36db35SAvi Kivity }
3077d36db35SAvi Kivity 
3087d36db35SAvi Kivity static void null_test(struct test *test)
3097d36db35SAvi Kivity {
3107d36db35SAvi Kivity }
3117d36db35SAvi Kivity 
3127d36db35SAvi Kivity static bool null_check(struct test *test)
3137d36db35SAvi Kivity {
3147d36db35SAvi Kivity     return test->vmcb->control.exit_code == SVM_EXIT_VMMCALL;
3157d36db35SAvi Kivity }
3167d36db35SAvi Kivity 
3177d36db35SAvi Kivity static void prepare_no_vmrun_int(struct test *test)
3187d36db35SAvi Kivity {
3197d36db35SAvi Kivity     test->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMRUN);
3207d36db35SAvi Kivity }
3217d36db35SAvi Kivity 
3227d36db35SAvi Kivity static bool check_no_vmrun_int(struct test *test)
3237d36db35SAvi Kivity {
3247d36db35SAvi Kivity     return test->vmcb->control.exit_code == SVM_EXIT_ERR;
3257d36db35SAvi Kivity }
3267d36db35SAvi Kivity 
3277d36db35SAvi Kivity static void test_vmrun(struct test *test)
3287d36db35SAvi Kivity {
3297d36db35SAvi Kivity     asm volatile ("vmrun" : : "a"(virt_to_phys(test->vmcb)));
3307d36db35SAvi Kivity }
3317d36db35SAvi Kivity 
3327d36db35SAvi Kivity static bool check_vmrun(struct test *test)
3337d36db35SAvi Kivity {
3347d36db35SAvi Kivity     return test->vmcb->control.exit_code == SVM_EXIT_VMRUN;
3357d36db35SAvi Kivity }
3367d36db35SAvi Kivity 
3377d36db35SAvi Kivity static void prepare_cr3_intercept(struct test *test)
3387d36db35SAvi Kivity {
3397d36db35SAvi Kivity     default_prepare(test);
3407d36db35SAvi Kivity     test->vmcb->control.intercept_cr_read |= 1 << 3;
3417d36db35SAvi Kivity }
3427d36db35SAvi Kivity 
3437d36db35SAvi Kivity static void test_cr3_intercept(struct test *test)
3447d36db35SAvi Kivity {
3457d36db35SAvi Kivity     asm volatile ("mov %%cr3, %0" : "=r"(test->scratch) : : "memory");
3467d36db35SAvi Kivity }
3477d36db35SAvi Kivity 
3487d36db35SAvi Kivity static bool check_cr3_intercept(struct test *test)
3497d36db35SAvi Kivity {
3507d36db35SAvi Kivity     return test->vmcb->control.exit_code == SVM_EXIT_READ_CR3;
3517d36db35SAvi Kivity }
3527d36db35SAvi Kivity 
3537d36db35SAvi Kivity static bool check_cr3_nointercept(struct test *test)
3547d36db35SAvi Kivity {
3557d36db35SAvi Kivity     return null_check(test) && test->scratch == read_cr3();
3567d36db35SAvi Kivity }
3577d36db35SAvi Kivity 
3587d36db35SAvi Kivity static void corrupt_cr3_intercept_bypass(void *_test)
3597d36db35SAvi Kivity {
3607d36db35SAvi Kivity     struct test *test = _test;
3617d36db35SAvi Kivity     extern volatile u32 mmio_insn;
3627d36db35SAvi Kivity 
3637d36db35SAvi Kivity     while (!__sync_bool_compare_and_swap(&test->scratch, 1, 2))
3647d36db35SAvi Kivity         pause();
3657d36db35SAvi Kivity     pause();
3667d36db35SAvi Kivity     pause();
3677d36db35SAvi Kivity     pause();
3687d36db35SAvi Kivity     mmio_insn = 0x90d8200f;  // mov %cr3, %rax; nop
3697d36db35SAvi Kivity }
3707d36db35SAvi Kivity 
3717d36db35SAvi Kivity static void prepare_cr3_intercept_bypass(struct test *test)
3727d36db35SAvi Kivity {
3737d36db35SAvi Kivity     default_prepare(test);
3747d36db35SAvi Kivity     test->vmcb->control.intercept_cr_read |= 1 << 3;
3757d36db35SAvi Kivity     on_cpu_async(1, corrupt_cr3_intercept_bypass, test);
3767d36db35SAvi Kivity }
3777d36db35SAvi Kivity 
3787d36db35SAvi Kivity static void test_cr3_intercept_bypass(struct test *test)
3797d36db35SAvi Kivity {
3807d36db35SAvi Kivity     ulong a = 0xa0000;
3817d36db35SAvi Kivity 
3827d36db35SAvi Kivity     test->scratch = 1;
3837d36db35SAvi Kivity     while (test->scratch != 2)
3847d36db35SAvi Kivity         barrier();
3857d36db35SAvi Kivity 
3867d36db35SAvi Kivity     asm volatile ("mmio_insn: mov %0, (%0); nop"
3877d36db35SAvi Kivity                   : "+a"(a) : : "memory");
3887d36db35SAvi Kivity     test->scratch = a;
3897d36db35SAvi Kivity }
3907d36db35SAvi Kivity 
3917d36db35SAvi Kivity static bool next_rip_supported(void)
3927d36db35SAvi Kivity {
3937d36db35SAvi Kivity     return (cpuid(SVM_CPUID_FUNC).d & 8);
3947d36db35SAvi Kivity }
3957d36db35SAvi Kivity 
3967d36db35SAvi Kivity static void prepare_next_rip(struct test *test)
3977d36db35SAvi Kivity {
3987d36db35SAvi Kivity     test->vmcb->control.intercept |= (1ULL << INTERCEPT_RDTSC);
3997d36db35SAvi Kivity }
4007d36db35SAvi Kivity 
4017d36db35SAvi Kivity 
4027d36db35SAvi Kivity static void test_next_rip(struct test *test)
4037d36db35SAvi Kivity {
4047d36db35SAvi Kivity     asm volatile ("rdtsc\n\t"
4057d36db35SAvi Kivity                   ".globl exp_next_rip\n\t"
4067d36db35SAvi Kivity                   "exp_next_rip:\n\t" ::: "eax", "edx");
4077d36db35SAvi Kivity }
4087d36db35SAvi Kivity 
4097d36db35SAvi Kivity static bool check_next_rip(struct test *test)
4107d36db35SAvi Kivity {
4117d36db35SAvi Kivity     extern char exp_next_rip;
4127d36db35SAvi Kivity     unsigned long address = (unsigned long)&exp_next_rip;
4137d36db35SAvi Kivity 
4147d36db35SAvi Kivity     return address == test->vmcb->control.next_rip;
4157d36db35SAvi Kivity }
4167d36db35SAvi Kivity 
4177d36db35SAvi Kivity static void prepare_mode_switch(struct test *test)
4187d36db35SAvi Kivity {
4197d36db35SAvi Kivity     test->vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR)
4207d36db35SAvi Kivity                                              |  (1ULL << UD_VECTOR)
4217d36db35SAvi Kivity                                              |  (1ULL << DF_VECTOR)
4227d36db35SAvi Kivity                                              |  (1ULL << PF_VECTOR);
4237d36db35SAvi Kivity     test->scratch = 0;
4247d36db35SAvi Kivity }
4257d36db35SAvi Kivity 
4267d36db35SAvi Kivity static void test_mode_switch(struct test *test)
4277d36db35SAvi Kivity {
4287d36db35SAvi Kivity     asm volatile("	cli\n"
4297d36db35SAvi Kivity 		 "	ljmp *1f\n" /* jump to 32-bit code segment */
4307d36db35SAvi Kivity 		 "1:\n"
4317d36db35SAvi Kivity 		 "	.long 2f\n"
432b46094b4SPaolo Bonzini 		 "	.long " xstr(KERNEL_CS32) "\n"
4337d36db35SAvi Kivity 		 ".code32\n"
4347d36db35SAvi Kivity 		 "2:\n"
4357d36db35SAvi Kivity 		 "	movl %%cr0, %%eax\n"
4367d36db35SAvi Kivity 		 "	btcl  $31, %%eax\n" /* clear PG */
4377d36db35SAvi Kivity 		 "	movl %%eax, %%cr0\n"
4387d36db35SAvi Kivity 		 "	movl $0xc0000080, %%ecx\n" /* EFER */
4397d36db35SAvi Kivity 		 "	rdmsr\n"
4407d36db35SAvi Kivity 		 "	btcl $8, %%eax\n" /* clear LME */
4417d36db35SAvi Kivity 		 "	wrmsr\n"
4427d36db35SAvi Kivity 		 "	movl %%cr4, %%eax\n"
4437d36db35SAvi Kivity 		 "	btcl $5, %%eax\n" /* clear PAE */
4447d36db35SAvi Kivity 		 "	movl %%eax, %%cr4\n"
445b46094b4SPaolo Bonzini 		 "	movw %[ds16], %%ax\n"
4467d36db35SAvi Kivity 		 "	movw %%ax, %%ds\n"
447b46094b4SPaolo Bonzini 		 "	ljmpl %[cs16], $3f\n" /* jump to 16 bit protected-mode */
4487d36db35SAvi Kivity 		 ".code16\n"
4497d36db35SAvi Kivity 		 "3:\n"
4507d36db35SAvi Kivity 		 "	movl %%cr0, %%eax\n"
4517d36db35SAvi Kivity 		 "	btcl $0, %%eax\n" /* clear PE  */
4527d36db35SAvi Kivity 		 "	movl %%eax, %%cr0\n"
4537d36db35SAvi Kivity 		 "	ljmpl $0, $4f\n"   /* jump to real-mode */
4547d36db35SAvi Kivity 		 "4:\n"
4557d36db35SAvi Kivity 		 "	vmmcall\n"
4567d36db35SAvi Kivity 		 "	movl %%cr0, %%eax\n"
4577d36db35SAvi Kivity 		 "	btsl $0, %%eax\n" /* set PE  */
4587d36db35SAvi Kivity 		 "	movl %%eax, %%cr0\n"
459b46094b4SPaolo Bonzini 		 "	ljmpl %[cs32], $5f\n" /* back to protected mode */
4607d36db35SAvi Kivity 		 ".code32\n"
4617d36db35SAvi Kivity 		 "5:\n"
4627d36db35SAvi Kivity 		 "	movl %%cr4, %%eax\n"
4637d36db35SAvi Kivity 		 "	btsl $5, %%eax\n" /* set PAE */
4647d36db35SAvi Kivity 		 "	movl %%eax, %%cr4\n"
4657d36db35SAvi Kivity 		 "	movl $0xc0000080, %%ecx\n" /* EFER */
4667d36db35SAvi Kivity 		 "	rdmsr\n"
4677d36db35SAvi Kivity 		 "	btsl $8, %%eax\n" /* set LME */
4687d36db35SAvi Kivity 		 "	wrmsr\n"
4697d36db35SAvi Kivity 		 "	movl %%cr0, %%eax\n"
4707d36db35SAvi Kivity 		 "	btsl  $31, %%eax\n" /* set PG */
4717d36db35SAvi Kivity 		 "	movl %%eax, %%cr0\n"
472b46094b4SPaolo Bonzini 		 "	ljmpl %[cs64], $6f\n"    /* back to long mode */
4737d36db35SAvi Kivity 		 ".code64\n\t"
4747d36db35SAvi Kivity 		 "6:\n"
4757d36db35SAvi Kivity 		 "	vmmcall\n"
476b46094b4SPaolo Bonzini 		 :: [cs16] "i"(KERNEL_CS16), [ds16] "i"(KERNEL_DS16),
477b46094b4SPaolo Bonzini 		    [cs32] "i"(KERNEL_CS32), [cs64] "i"(KERNEL_CS64)
478b46094b4SPaolo Bonzini 		 : "rax", "rbx", "rcx", "rdx", "memory");
4797d36db35SAvi Kivity }
4807d36db35SAvi Kivity 
4817d36db35SAvi Kivity static bool mode_switch_finished(struct test *test)
4827d36db35SAvi Kivity {
4837d36db35SAvi Kivity     u64 cr0, cr4, efer;
4847d36db35SAvi Kivity 
4857d36db35SAvi Kivity     cr0  = test->vmcb->save.cr0;
4867d36db35SAvi Kivity     cr4  = test->vmcb->save.cr4;
4877d36db35SAvi Kivity     efer = test->vmcb->save.efer;
4887d36db35SAvi Kivity 
4897d36db35SAvi Kivity     /* Only expect VMMCALL intercepts */
4907d36db35SAvi Kivity     if (test->vmcb->control.exit_code != SVM_EXIT_VMMCALL)
4917d36db35SAvi Kivity 	    return true;
4927d36db35SAvi Kivity 
4937d36db35SAvi Kivity     /* Jump over VMMCALL instruction */
4947d36db35SAvi Kivity     test->vmcb->save.rip += 3;
4957d36db35SAvi Kivity 
4967d36db35SAvi Kivity     /* Do sanity checks */
4977d36db35SAvi Kivity     switch (test->scratch) {
4987d36db35SAvi Kivity     case 0:
4997d36db35SAvi Kivity         /* Test should be in real mode now - check for this */
5007d36db35SAvi Kivity         if ((cr0  & 0x80000001) || /* CR0.PG, CR0.PE */
5017d36db35SAvi Kivity             (cr4  & 0x00000020) || /* CR4.PAE */
5027d36db35SAvi Kivity             (efer & 0x00000500))   /* EFER.LMA, EFER.LME */
5037d36db35SAvi Kivity                 return true;
5047d36db35SAvi Kivity         break;
5057d36db35SAvi Kivity     case 2:
5067d36db35SAvi Kivity         /* Test should be back in long-mode now - check for this */
5077d36db35SAvi Kivity         if (((cr0  & 0x80000001) != 0x80000001) || /* CR0.PG, CR0.PE */
5087d36db35SAvi Kivity             ((cr4  & 0x00000020) != 0x00000020) || /* CR4.PAE */
5097d36db35SAvi Kivity             ((efer & 0x00000500) != 0x00000500))   /* EFER.LMA, EFER.LME */
5107d36db35SAvi Kivity 		    return true;
5117d36db35SAvi Kivity 	break;
5127d36db35SAvi Kivity     }
5137d36db35SAvi Kivity 
5147d36db35SAvi Kivity     /* one step forward */
5157d36db35SAvi Kivity     test->scratch += 1;
5167d36db35SAvi Kivity 
5177d36db35SAvi Kivity     return test->scratch == 2;
5187d36db35SAvi Kivity }
5197d36db35SAvi Kivity 
5207d36db35SAvi Kivity static bool check_mode_switch(struct test *test)
5217d36db35SAvi Kivity {
5227d36db35SAvi Kivity 	return test->scratch == 2;
5237d36db35SAvi Kivity }
5247d36db35SAvi Kivity 
525bcd9774aSPaolo Bonzini static void prepare_ioio(struct test *test)
526bcd9774aSPaolo Bonzini {
527bcd9774aSPaolo Bonzini     test->vmcb->control.intercept |= (1ULL << INTERCEPT_IOIO_PROT);
528bcd9774aSPaolo Bonzini     test->scratch = 0;
529bcd9774aSPaolo Bonzini     memset(io_bitmap, 0, 8192);
530bcd9774aSPaolo Bonzini     io_bitmap[8192] = 0xFF;
531bcd9774aSPaolo Bonzini }
532bcd9774aSPaolo Bonzini 
533bcd9774aSPaolo Bonzini int get_test_stage(struct test *test)
534bcd9774aSPaolo Bonzini {
535bcd9774aSPaolo Bonzini     barrier();
536bcd9774aSPaolo Bonzini     return test->scratch;
537bcd9774aSPaolo Bonzini }
538bcd9774aSPaolo Bonzini 
539bcd9774aSPaolo Bonzini void inc_test_stage(struct test *test)
540bcd9774aSPaolo Bonzini {
541bcd9774aSPaolo Bonzini     barrier();
542bcd9774aSPaolo Bonzini     test->scratch++;
543bcd9774aSPaolo Bonzini     barrier();
544bcd9774aSPaolo Bonzini }
545bcd9774aSPaolo Bonzini 
546bcd9774aSPaolo Bonzini static void test_ioio(struct test *test)
547bcd9774aSPaolo Bonzini {
548bcd9774aSPaolo Bonzini     // stage 0, test IO pass
549bcd9774aSPaolo Bonzini     inb(0x5000);
550bcd9774aSPaolo Bonzini     outb(0x0, 0x5000);
551bcd9774aSPaolo Bonzini     if (get_test_stage(test) != 0)
552bcd9774aSPaolo Bonzini         goto fail;
553bcd9774aSPaolo Bonzini 
554bcd9774aSPaolo Bonzini     // test IO width, in/out
555bcd9774aSPaolo Bonzini     io_bitmap[0] = 0xFF;
556bcd9774aSPaolo Bonzini     inc_test_stage(test);
557bcd9774aSPaolo Bonzini     inb(0x0);
558bcd9774aSPaolo Bonzini     if (get_test_stage(test) != 2)
559bcd9774aSPaolo Bonzini         goto fail;
560bcd9774aSPaolo Bonzini 
561bcd9774aSPaolo Bonzini     outw(0x0, 0x0);
562bcd9774aSPaolo Bonzini     if (get_test_stage(test) != 3)
563bcd9774aSPaolo Bonzini         goto fail;
564bcd9774aSPaolo Bonzini 
565bcd9774aSPaolo Bonzini     inl(0x0);
566bcd9774aSPaolo Bonzini     if (get_test_stage(test) != 4)
567bcd9774aSPaolo Bonzini         goto fail;
568bcd9774aSPaolo Bonzini 
569bcd9774aSPaolo Bonzini     // test low/high IO port
570bcd9774aSPaolo Bonzini     io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8));
571bcd9774aSPaolo Bonzini     inb(0x5000);
572bcd9774aSPaolo Bonzini     if (get_test_stage(test) != 5)
573bcd9774aSPaolo Bonzini         goto fail;
574bcd9774aSPaolo Bonzini 
575bcd9774aSPaolo Bonzini     io_bitmap[0x9000 / 8] = (1 << (0x9000 % 8));
576bcd9774aSPaolo Bonzini     inw(0x9000);
577bcd9774aSPaolo Bonzini     if (get_test_stage(test) != 6)
578bcd9774aSPaolo Bonzini         goto fail;
579bcd9774aSPaolo Bonzini 
580bcd9774aSPaolo Bonzini     // test partial pass
581bcd9774aSPaolo Bonzini     io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8));
582bcd9774aSPaolo Bonzini     inl(0x4FFF);
583bcd9774aSPaolo Bonzini     if (get_test_stage(test) != 7)
584bcd9774aSPaolo Bonzini         goto fail;
585bcd9774aSPaolo Bonzini 
586bcd9774aSPaolo Bonzini     // test across pages
587bcd9774aSPaolo Bonzini     inc_test_stage(test);
588bcd9774aSPaolo Bonzini     inl(0x7FFF);
589bcd9774aSPaolo Bonzini     if (get_test_stage(test) != 8)
590bcd9774aSPaolo Bonzini         goto fail;
591bcd9774aSPaolo Bonzini 
592bcd9774aSPaolo Bonzini     inc_test_stage(test);
593bcd9774aSPaolo Bonzini     io_bitmap[0x8000 / 8] = 1 << (0x8000 % 8);
594bcd9774aSPaolo Bonzini     inl(0x7FFF);
595bcd9774aSPaolo Bonzini     if (get_test_stage(test) != 10)
596bcd9774aSPaolo Bonzini         goto fail;
597bcd9774aSPaolo Bonzini 
598bcd9774aSPaolo Bonzini     io_bitmap[0] = 0;
599bcd9774aSPaolo Bonzini     inl(0xFFFF);
600bcd9774aSPaolo Bonzini     if (get_test_stage(test) != 11)
601bcd9774aSPaolo Bonzini         goto fail;
602bcd9774aSPaolo Bonzini 
603bcd9774aSPaolo Bonzini     io_bitmap[0] = 0xFF;
604bcd9774aSPaolo Bonzini     io_bitmap[8192] = 0;
605bcd9774aSPaolo Bonzini     inl(0xFFFF);
606bcd9774aSPaolo Bonzini     inc_test_stage(test);
607bcd9774aSPaolo Bonzini     if (get_test_stage(test) != 12)
608bcd9774aSPaolo Bonzini         goto fail;
609bcd9774aSPaolo Bonzini 
610bcd9774aSPaolo Bonzini     return;
611bcd9774aSPaolo Bonzini 
612bcd9774aSPaolo Bonzini fail:
613bcd9774aSPaolo Bonzini     printf("test failure, stage %d\n", get_test_stage(test));
614bcd9774aSPaolo Bonzini     test->scratch = -1;
615bcd9774aSPaolo Bonzini }
616bcd9774aSPaolo Bonzini 
617bcd9774aSPaolo Bonzini static bool ioio_finished(struct test *test)
618bcd9774aSPaolo Bonzini {
619bcd9774aSPaolo Bonzini     unsigned port, size;
620bcd9774aSPaolo Bonzini 
621bcd9774aSPaolo Bonzini     /* Only expect IOIO intercepts */
622bcd9774aSPaolo Bonzini     if (test->vmcb->control.exit_code == SVM_EXIT_VMMCALL)
623bcd9774aSPaolo Bonzini         return true;
624bcd9774aSPaolo Bonzini 
625bcd9774aSPaolo Bonzini     if (test->vmcb->control.exit_code != SVM_EXIT_IOIO)
626bcd9774aSPaolo Bonzini         return true;
627bcd9774aSPaolo Bonzini 
628bcd9774aSPaolo Bonzini     /* one step forward */
629bcd9774aSPaolo Bonzini     test->scratch += 1;
630bcd9774aSPaolo Bonzini 
631bcd9774aSPaolo Bonzini     port = test->vmcb->control.exit_info_1 >> 16;
632bcd9774aSPaolo Bonzini     size = (test->vmcb->control.exit_info_1 >> SVM_IOIO_SIZE_SHIFT) & 7;
633bcd9774aSPaolo Bonzini 
634bcd9774aSPaolo Bonzini     while (size--) {
635bcd9774aSPaolo Bonzini         io_bitmap[port / 8] &= ~(1 << (port & 7));
636bcd9774aSPaolo Bonzini         port++;
637bcd9774aSPaolo Bonzini     }
638bcd9774aSPaolo Bonzini 
639bcd9774aSPaolo Bonzini     return false;
640bcd9774aSPaolo Bonzini }
641bcd9774aSPaolo Bonzini 
642bcd9774aSPaolo Bonzini static bool check_ioio(struct test *test)
643bcd9774aSPaolo Bonzini {
644bcd9774aSPaolo Bonzini     memset(io_bitmap, 0, 8193);
645bcd9774aSPaolo Bonzini     return test->scratch != -1;
646bcd9774aSPaolo Bonzini }
647bcd9774aSPaolo Bonzini 
6487d36db35SAvi Kivity static void prepare_asid_zero(struct test *test)
6497d36db35SAvi Kivity {
6507d36db35SAvi Kivity     test->vmcb->control.asid = 0;
6517d36db35SAvi Kivity }
6527d36db35SAvi Kivity 
6537d36db35SAvi Kivity static void test_asid_zero(struct test *test)
6547d36db35SAvi Kivity {
6557d36db35SAvi Kivity     asm volatile ("vmmcall\n\t");
6567d36db35SAvi Kivity }
6577d36db35SAvi Kivity 
6587d36db35SAvi Kivity static bool check_asid_zero(struct test *test)
6597d36db35SAvi Kivity {
6607d36db35SAvi Kivity     return test->vmcb->control.exit_code == SVM_EXIT_ERR;
6617d36db35SAvi Kivity }
6627d36db35SAvi Kivity 
6634c8eb156SJoerg Roedel static void sel_cr0_bug_prepare(struct test *test)
6644c8eb156SJoerg Roedel {
6654c8eb156SJoerg Roedel     vmcb_ident(test->vmcb);
6664c8eb156SJoerg Roedel     test->vmcb->control.intercept |= (1ULL << INTERCEPT_SELECTIVE_CR0);
6674c8eb156SJoerg Roedel }
6684c8eb156SJoerg Roedel 
6694c8eb156SJoerg Roedel static bool sel_cr0_bug_finished(struct test *test)
6704c8eb156SJoerg Roedel {
6714c8eb156SJoerg Roedel 	return true;
6724c8eb156SJoerg Roedel }
6734c8eb156SJoerg Roedel 
6744c8eb156SJoerg Roedel static void sel_cr0_bug_test(struct test *test)
6754c8eb156SJoerg Roedel {
6764c8eb156SJoerg Roedel     unsigned long cr0;
6774c8eb156SJoerg Roedel 
6784c8eb156SJoerg Roedel     /* read cr0, clear CD, and write back */
6794c8eb156SJoerg Roedel     cr0  = read_cr0();
6804c8eb156SJoerg Roedel     cr0 |= (1UL << 30);
6814c8eb156SJoerg Roedel     write_cr0(cr0);
6824c8eb156SJoerg Roedel 
6834c8eb156SJoerg Roedel     /*
6844c8eb156SJoerg Roedel      * If we are here the test failed, not sure what to do now because we
6854c8eb156SJoerg Roedel      * are not in guest-mode anymore so we can't trigger an intercept.
6864c8eb156SJoerg Roedel      * Trigger a tripple-fault for now.
6874c8eb156SJoerg Roedel      */
6884c8eb156SJoerg Roedel     printf("sel_cr0 test failed. Can not recover from this - exiting\n");
6894c8eb156SJoerg Roedel     exit(1);
6904c8eb156SJoerg Roedel }
6914c8eb156SJoerg Roedel 
6924c8eb156SJoerg Roedel static bool sel_cr0_bug_check(struct test *test)
6934c8eb156SJoerg Roedel {
6944c8eb156SJoerg Roedel     return test->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE;
6954c8eb156SJoerg Roedel }
6964c8eb156SJoerg Roedel 
6978594b943SJoerg Roedel static void npt_nx_prepare(struct test *test)
6988594b943SJoerg Roedel {
6998594b943SJoerg Roedel 
7008594b943SJoerg Roedel     u64 *pte;
7018594b943SJoerg Roedel 
7028594b943SJoerg Roedel     vmcb_ident(test->vmcb);
703726a1dd7SPaolo Bonzini     pte = npt_get_pte((u64)null_test);
7048594b943SJoerg Roedel 
7058594b943SJoerg Roedel     *pte |= (1ULL << 63);
7068594b943SJoerg Roedel }
7078594b943SJoerg Roedel 
7088594b943SJoerg Roedel static bool npt_nx_check(struct test *test)
7098594b943SJoerg Roedel {
710726a1dd7SPaolo Bonzini     u64 *pte = npt_get_pte((u64)null_test);
7118594b943SJoerg Roedel 
7128594b943SJoerg Roedel     *pte &= ~(1ULL << 63);
7138594b943SJoerg Roedel 
7148594b943SJoerg Roedel     test->vmcb->save.efer |= (1 << 11);
7158594b943SJoerg Roedel 
7168594b943SJoerg Roedel     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
717e8b10c1fSPaolo Bonzini            && (test->vmcb->control.exit_info_1 == 0x100000015ULL);
7188594b943SJoerg Roedel }
7198594b943SJoerg Roedel 
720ea975120SJoerg Roedel static void npt_us_prepare(struct test *test)
721ea975120SJoerg Roedel {
722ea975120SJoerg Roedel     u64 *pte;
723ea975120SJoerg Roedel 
724ea975120SJoerg Roedel     vmcb_ident(test->vmcb);
725726a1dd7SPaolo Bonzini     pte = npt_get_pte((u64)scratch_page);
726ea975120SJoerg Roedel 
727ea975120SJoerg Roedel     *pte &= ~(1ULL << 2);
728ea975120SJoerg Roedel }
729ea975120SJoerg Roedel 
730ea975120SJoerg Roedel static void npt_us_test(struct test *test)
731ea975120SJoerg Roedel {
732c0a4e715SPaolo Bonzini     (void) *(volatile u64 *)scratch_page;
733ea975120SJoerg Roedel }
734ea975120SJoerg Roedel 
735ea975120SJoerg Roedel static bool npt_us_check(struct test *test)
736ea975120SJoerg Roedel {
737726a1dd7SPaolo Bonzini     u64 *pte = npt_get_pte((u64)scratch_page);
738ea975120SJoerg Roedel 
739ea975120SJoerg Roedel     *pte |= (1ULL << 2);
740ea975120SJoerg Roedel 
741ea975120SJoerg Roedel     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
742e8b10c1fSPaolo Bonzini            && (test->vmcb->control.exit_info_1 == 0x100000005ULL);
743ea975120SJoerg Roedel }
744ea975120SJoerg Roedel 
745*f6a2ca45SPaolo Bonzini u64 save_pde;
746*f6a2ca45SPaolo Bonzini 
747dd6ef43cSJoerg Roedel static void npt_rsvd_prepare(struct test *test)
748dd6ef43cSJoerg Roedel {
749*f6a2ca45SPaolo Bonzini     u64 *pde;
750dd6ef43cSJoerg Roedel 
751dd6ef43cSJoerg Roedel     vmcb_ident(test->vmcb);
752*f6a2ca45SPaolo Bonzini     pde = npt_get_pde((u64) null_test);
753dd6ef43cSJoerg Roedel 
754*f6a2ca45SPaolo Bonzini     save_pde = *pde;
755*f6a2ca45SPaolo Bonzini     *pde = (1ULL << 19) | (1ULL << 7) | 0x27;
756dd6ef43cSJoerg Roedel }
757dd6ef43cSJoerg Roedel 
758dd6ef43cSJoerg Roedel static bool npt_rsvd_check(struct test *test)
759dd6ef43cSJoerg Roedel {
760*f6a2ca45SPaolo Bonzini     u64 *pde = npt_get_pde((u64) null_test);
761*f6a2ca45SPaolo Bonzini 
762*f6a2ca45SPaolo Bonzini     *pde = save_pde;
763dd6ef43cSJoerg Roedel 
764dd6ef43cSJoerg Roedel     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
765*f6a2ca45SPaolo Bonzini             && (test->vmcb->control.exit_info_1 == 0x10000001dULL);
766dd6ef43cSJoerg Roedel }
767dd6ef43cSJoerg Roedel 
7685ebf82edSJoerg Roedel static void npt_rw_prepare(struct test *test)
7695ebf82edSJoerg Roedel {
7705ebf82edSJoerg Roedel 
7715ebf82edSJoerg Roedel     u64 *pte;
7725ebf82edSJoerg Roedel 
7735ebf82edSJoerg Roedel     vmcb_ident(test->vmcb);
774726a1dd7SPaolo Bonzini     pte = npt_get_pte(0x80000);
7755ebf82edSJoerg Roedel 
7765ebf82edSJoerg Roedel     *pte &= ~(1ULL << 1);
7775ebf82edSJoerg Roedel }
7785ebf82edSJoerg Roedel 
7795ebf82edSJoerg Roedel static void npt_rw_test(struct test *test)
7805ebf82edSJoerg Roedel {
7815ebf82edSJoerg Roedel     u64 *data = (void*)(0x80000);
7825ebf82edSJoerg Roedel 
7835ebf82edSJoerg Roedel     *data = 0;
7845ebf82edSJoerg Roedel }
7855ebf82edSJoerg Roedel 
7865ebf82edSJoerg Roedel static bool npt_rw_check(struct test *test)
7875ebf82edSJoerg Roedel {
788726a1dd7SPaolo Bonzini     u64 *pte = npt_get_pte(0x80000);
7895ebf82edSJoerg Roedel 
7905ebf82edSJoerg Roedel     *pte |= (1ULL << 1);
7915ebf82edSJoerg Roedel 
7925ebf82edSJoerg Roedel     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
793e8b10c1fSPaolo Bonzini            && (test->vmcb->control.exit_info_1 == 0x100000007ULL);
7945ebf82edSJoerg Roedel }
7955ebf82edSJoerg Roedel 
796*f6a2ca45SPaolo Bonzini static void npt_rw_pfwalk_prepare(struct test *test)
797590040ffSJoerg Roedel {
798590040ffSJoerg Roedel 
799590040ffSJoerg Roedel     u64 *pte;
800590040ffSJoerg Roedel 
801590040ffSJoerg Roedel     vmcb_ident(test->vmcb);
802726a1dd7SPaolo Bonzini     pte = npt_get_pte(read_cr3());
803590040ffSJoerg Roedel 
804590040ffSJoerg Roedel     *pte &= ~(1ULL << 1);
805590040ffSJoerg Roedel }
806590040ffSJoerg Roedel 
807*f6a2ca45SPaolo Bonzini static bool npt_rw_pfwalk_check(struct test *test)
808590040ffSJoerg Roedel {
809726a1dd7SPaolo Bonzini     u64 *pte = npt_get_pte(read_cr3());
810590040ffSJoerg Roedel 
811590040ffSJoerg Roedel     *pte |= (1ULL << 1);
812590040ffSJoerg Roedel 
813590040ffSJoerg Roedel     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
814e8b10c1fSPaolo Bonzini            && (test->vmcb->control.exit_info_1 == 0x200000006ULL)
815590040ffSJoerg Roedel 	   && (test->vmcb->control.exit_info_2 == read_cr3());
816590040ffSJoerg Roedel }
817590040ffSJoerg Roedel 
818*f6a2ca45SPaolo Bonzini static void npt_rsvd_pfwalk_prepare(struct test *test)
819*f6a2ca45SPaolo Bonzini {
820*f6a2ca45SPaolo Bonzini 
821*f6a2ca45SPaolo Bonzini     vmcb_ident(test->vmcb);
822*f6a2ca45SPaolo Bonzini 
823*f6a2ca45SPaolo Bonzini     pdpe[0] |= (1ULL << 8);
824*f6a2ca45SPaolo Bonzini }
825*f6a2ca45SPaolo Bonzini 
826*f6a2ca45SPaolo Bonzini static bool npt_rsvd_pfwalk_check(struct test *test)
827*f6a2ca45SPaolo Bonzini {
828*f6a2ca45SPaolo Bonzini     pdpe[0] &= ~(1ULL << 8);
829*f6a2ca45SPaolo Bonzini 
830*f6a2ca45SPaolo Bonzini     return (test->vmcb->control.exit_code == SVM_EXIT_NPF)
831*f6a2ca45SPaolo Bonzini             && (test->vmcb->control.exit_info_1 == 0x200000006ULL);
832*f6a2ca45SPaolo Bonzini }
833*f6a2ca45SPaolo Bonzini 
834a2ab7740SPaolo Bonzini static void npt_l1mmio_prepare(struct test *test)
835a2ab7740SPaolo Bonzini {
836a2ab7740SPaolo Bonzini     vmcb_ident(test->vmcb);
837a2ab7740SPaolo Bonzini }
838a2ab7740SPaolo Bonzini 
8391e699ecbSPaolo Bonzini u32 nested_apic_version1;
8401e699ecbSPaolo Bonzini u32 nested_apic_version2;
841a2ab7740SPaolo Bonzini 
842a2ab7740SPaolo Bonzini static void npt_l1mmio_test(struct test *test)
843a2ab7740SPaolo Bonzini {
8441e699ecbSPaolo Bonzini     volatile u32 *data = (volatile void*)(0xfee00030UL);
845a2ab7740SPaolo Bonzini 
8461e699ecbSPaolo Bonzini     nested_apic_version1 = *data;
8471e699ecbSPaolo Bonzini     nested_apic_version2 = *data;
848a2ab7740SPaolo Bonzini }
849a2ab7740SPaolo Bonzini 
850a2ab7740SPaolo Bonzini static bool npt_l1mmio_check(struct test *test)
851a2ab7740SPaolo Bonzini {
8521e699ecbSPaolo Bonzini     volatile u32 *data = (volatile void*)(0xfee00030);
8531e699ecbSPaolo Bonzini     u32 lvr = *data;
854a2ab7740SPaolo Bonzini 
8551e699ecbSPaolo Bonzini     return nested_apic_version1 == lvr && nested_apic_version2 == lvr;
856a2ab7740SPaolo Bonzini }
857a2ab7740SPaolo Bonzini 
85821c23154SJoerg Roedel static void latency_prepare(struct test *test)
85921c23154SJoerg Roedel {
86021c23154SJoerg Roedel     default_prepare(test);
86121c23154SJoerg Roedel     runs = LATENCY_RUNS;
86221c23154SJoerg Roedel     latvmrun_min = latvmexit_min = -1ULL;
86321c23154SJoerg Roedel     latvmrun_max = latvmexit_max = 0;
86421c23154SJoerg Roedel     vmrun_sum = vmexit_sum = 0;
86521c23154SJoerg Roedel }
86621c23154SJoerg Roedel 
86721c23154SJoerg Roedel static void latency_test(struct test *test)
86821c23154SJoerg Roedel {
86921c23154SJoerg Roedel     u64 cycles;
87021c23154SJoerg Roedel 
87121c23154SJoerg Roedel start:
87221c23154SJoerg Roedel     tsc_end = rdtsc();
87321c23154SJoerg Roedel 
87421c23154SJoerg Roedel     cycles = tsc_end - tsc_start;
87521c23154SJoerg Roedel 
87621c23154SJoerg Roedel     if (cycles > latvmrun_max)
87721c23154SJoerg Roedel         latvmrun_max = cycles;
87821c23154SJoerg Roedel 
87921c23154SJoerg Roedel     if (cycles < latvmrun_min)
88021c23154SJoerg Roedel         latvmrun_min = cycles;
88121c23154SJoerg Roedel 
88221c23154SJoerg Roedel     vmrun_sum += cycles;
88321c23154SJoerg Roedel 
88421c23154SJoerg Roedel     tsc_start = rdtsc();
88521c23154SJoerg Roedel 
88621c23154SJoerg Roedel     asm volatile ("vmmcall" : : : "memory");
88721c23154SJoerg Roedel     goto start;
88821c23154SJoerg Roedel }
88921c23154SJoerg Roedel 
89021c23154SJoerg Roedel static bool latency_finished(struct test *test)
89121c23154SJoerg Roedel {
89221c23154SJoerg Roedel     u64 cycles;
89321c23154SJoerg Roedel 
89421c23154SJoerg Roedel     tsc_end = rdtsc();
89521c23154SJoerg Roedel 
89621c23154SJoerg Roedel     cycles = tsc_end - tsc_start;
89721c23154SJoerg Roedel 
89821c23154SJoerg Roedel     if (cycles > latvmexit_max)
89921c23154SJoerg Roedel         latvmexit_max = cycles;
90021c23154SJoerg Roedel 
90121c23154SJoerg Roedel     if (cycles < latvmexit_min)
90221c23154SJoerg Roedel         latvmexit_min = cycles;
90321c23154SJoerg Roedel 
90421c23154SJoerg Roedel     vmexit_sum += cycles;
90521c23154SJoerg Roedel 
90621c23154SJoerg Roedel     test->vmcb->save.rip += 3;
90721c23154SJoerg Roedel 
90821c23154SJoerg Roedel     runs -= 1;
90921c23154SJoerg Roedel 
91021c23154SJoerg Roedel     return runs == 0;
91121c23154SJoerg Roedel }
91221c23154SJoerg Roedel 
91321c23154SJoerg Roedel static bool latency_check(struct test *test)
91421c23154SJoerg Roedel {
91521c23154SJoerg Roedel     printf("    Latency VMRUN : max: %d min: %d avg: %d\n", latvmrun_max,
91621c23154SJoerg Roedel             latvmrun_min, vmrun_sum / LATENCY_RUNS);
91721c23154SJoerg Roedel     printf("    Latency VMEXIT: max: %d min: %d avg: %d\n", latvmexit_max,
91821c23154SJoerg Roedel             latvmexit_min, vmexit_sum / LATENCY_RUNS);
91921c23154SJoerg Roedel     return true;
92021c23154SJoerg Roedel }
92121c23154SJoerg Roedel 
922ef101219SRoedel, Joerg static void lat_svm_insn_prepare(struct test *test)
923ef101219SRoedel, Joerg {
924ef101219SRoedel, Joerg     default_prepare(test);
925ef101219SRoedel, Joerg     runs = LATENCY_RUNS;
926ef101219SRoedel, Joerg     latvmload_min = latvmsave_min = latstgi_min = latclgi_min = -1ULL;
927ef101219SRoedel, Joerg     latvmload_max = latvmsave_max = latstgi_max = latclgi_max = 0;
928ef101219SRoedel, Joerg     vmload_sum = vmsave_sum = stgi_sum = clgi_sum;
929ef101219SRoedel, Joerg }
930ef101219SRoedel, Joerg 
931ef101219SRoedel, Joerg static bool lat_svm_insn_finished(struct test *test)
932ef101219SRoedel, Joerg {
933ef101219SRoedel, Joerg     u64 vmcb_phys = virt_to_phys(test->vmcb);
934ef101219SRoedel, Joerg     u64 cycles;
935ef101219SRoedel, Joerg 
936ef101219SRoedel, Joerg     for ( ; runs != 0; runs--) {
937ef101219SRoedel, Joerg         tsc_start = rdtsc();
938ef101219SRoedel, Joerg         asm volatile("vmload\n\t" : : "a"(vmcb_phys) : "memory");
939ef101219SRoedel, Joerg         cycles = rdtsc() - tsc_start;
940ef101219SRoedel, Joerg         if (cycles > latvmload_max)
941ef101219SRoedel, Joerg             latvmload_max = cycles;
942ef101219SRoedel, Joerg         if (cycles < latvmload_min)
943ef101219SRoedel, Joerg             latvmload_min = cycles;
944ef101219SRoedel, Joerg         vmload_sum += cycles;
945ef101219SRoedel, Joerg 
946ef101219SRoedel, Joerg         tsc_start = rdtsc();
947ef101219SRoedel, Joerg         asm volatile("vmsave\n\t" : : "a"(vmcb_phys) : "memory");
948ef101219SRoedel, Joerg         cycles = rdtsc() - tsc_start;
949ef101219SRoedel, Joerg         if (cycles > latvmsave_max)
950ef101219SRoedel, Joerg             latvmsave_max = cycles;
951ef101219SRoedel, Joerg         if (cycles < latvmsave_min)
952ef101219SRoedel, Joerg             latvmsave_min = cycles;
953ef101219SRoedel, Joerg         vmsave_sum += cycles;
954ef101219SRoedel, Joerg 
955ef101219SRoedel, Joerg         tsc_start = rdtsc();
956ef101219SRoedel, Joerg         asm volatile("stgi\n\t");
957ef101219SRoedel, Joerg         cycles = rdtsc() - tsc_start;
958ef101219SRoedel, Joerg         if (cycles > latstgi_max)
959ef101219SRoedel, Joerg             latstgi_max = cycles;
960ef101219SRoedel, Joerg         if (cycles < latstgi_min)
961ef101219SRoedel, Joerg             latstgi_min = cycles;
962ef101219SRoedel, Joerg         stgi_sum += cycles;
963ef101219SRoedel, Joerg 
964ef101219SRoedel, Joerg         tsc_start = rdtsc();
965ef101219SRoedel, Joerg         asm volatile("clgi\n\t");
966ef101219SRoedel, Joerg         cycles = rdtsc() - tsc_start;
967ef101219SRoedel, Joerg         if (cycles > latclgi_max)
968ef101219SRoedel, Joerg             latclgi_max = cycles;
969ef101219SRoedel, Joerg         if (cycles < latclgi_min)
970ef101219SRoedel, Joerg             latclgi_min = cycles;
971ef101219SRoedel, Joerg         clgi_sum += cycles;
972ef101219SRoedel, Joerg     }
973ef101219SRoedel, Joerg 
974ef101219SRoedel, Joerg     return true;
975ef101219SRoedel, Joerg }
976ef101219SRoedel, Joerg 
977ef101219SRoedel, Joerg static bool lat_svm_insn_check(struct test *test)
978ef101219SRoedel, Joerg {
979ef101219SRoedel, Joerg     printf("    Latency VMLOAD: max: %d min: %d avg: %d\n", latvmload_max,
980ef101219SRoedel, Joerg             latvmload_min, vmload_sum / LATENCY_RUNS);
981ef101219SRoedel, Joerg     printf("    Latency VMSAVE: max: %d min: %d avg: %d\n", latvmsave_max,
982ef101219SRoedel, Joerg             latvmsave_min, vmsave_sum / LATENCY_RUNS);
983ef101219SRoedel, Joerg     printf("    Latency STGI:   max: %d min: %d avg: %d\n", latstgi_max,
984ef101219SRoedel, Joerg             latstgi_min, stgi_sum / LATENCY_RUNS);
985ef101219SRoedel, Joerg     printf("    Latency CLGI:   max: %d min: %d avg: %d\n", latclgi_max,
986ef101219SRoedel, Joerg             latclgi_min, clgi_sum / LATENCY_RUNS);
987ef101219SRoedel, Joerg     return true;
988ef101219SRoedel, Joerg }
9897d36db35SAvi Kivity static struct test tests[] = {
9907d36db35SAvi Kivity     { "null", default_supported, default_prepare, null_test,
9917d36db35SAvi Kivity       default_finished, null_check },
9927d36db35SAvi Kivity     { "vmrun", default_supported, default_prepare, test_vmrun,
9937d36db35SAvi Kivity        default_finished, check_vmrun },
994bcd9774aSPaolo Bonzini     { "ioio", default_supported, prepare_ioio, test_ioio,
995bcd9774aSPaolo Bonzini        ioio_finished, check_ioio },
9967d36db35SAvi Kivity     { "vmrun intercept check", default_supported, prepare_no_vmrun_int,
9977d36db35SAvi Kivity       null_test, default_finished, check_no_vmrun_int },
9987d36db35SAvi Kivity     { "cr3 read intercept", default_supported, prepare_cr3_intercept,
9997d36db35SAvi Kivity       test_cr3_intercept, default_finished, check_cr3_intercept },
10007d36db35SAvi Kivity     { "cr3 read nointercept", default_supported, default_prepare,
10017d36db35SAvi Kivity       test_cr3_intercept, default_finished, check_cr3_nointercept },
1002095274b4SPrasad Joshi     { "cr3 read intercept emulate", smp_supported,
10037d36db35SAvi Kivity       prepare_cr3_intercept_bypass, test_cr3_intercept_bypass,
10047d36db35SAvi Kivity       default_finished, check_cr3_intercept },
10057d36db35SAvi Kivity     { "next_rip", next_rip_supported, prepare_next_rip, test_next_rip,
10067d36db35SAvi Kivity       default_finished, check_next_rip },
10077d36db35SAvi Kivity     { "mode_switch", default_supported, prepare_mode_switch, test_mode_switch,
10087d36db35SAvi Kivity        mode_switch_finished, check_mode_switch },
10097d36db35SAvi Kivity     { "asid_zero", default_supported, prepare_asid_zero, test_asid_zero,
10107d36db35SAvi Kivity        default_finished, check_asid_zero },
10114c8eb156SJoerg Roedel     { "sel_cr0_bug", default_supported, sel_cr0_bug_prepare, sel_cr0_bug_test,
10124c8eb156SJoerg Roedel        sel_cr0_bug_finished, sel_cr0_bug_check },
10138594b943SJoerg Roedel     { "npt_nx", npt_supported, npt_nx_prepare, null_test,
1014ea975120SJoerg Roedel 	    default_finished, npt_nx_check },
1015ea975120SJoerg Roedel     { "npt_us", npt_supported, npt_us_prepare, npt_us_test,
1016ea975120SJoerg Roedel 	    default_finished, npt_us_check },
1017dd6ef43cSJoerg Roedel     { "npt_rsvd", npt_supported, npt_rsvd_prepare, null_test,
1018dd6ef43cSJoerg Roedel 	    default_finished, npt_rsvd_check },
10195ebf82edSJoerg Roedel     { "npt_rw", npt_supported, npt_rw_prepare, npt_rw_test,
10205ebf82edSJoerg Roedel 	    default_finished, npt_rw_check },
1021*f6a2ca45SPaolo Bonzini     { "npt_rsvd_pfwalk", npt_supported, npt_rsvd_pfwalk_prepare, null_test,
1022*f6a2ca45SPaolo Bonzini 	    default_finished, npt_rsvd_pfwalk_check },
1023*f6a2ca45SPaolo Bonzini     { "npt_rw_pfwalk", npt_supported, npt_rw_pfwalk_prepare, null_test,
1024*f6a2ca45SPaolo Bonzini 	    default_finished, npt_rw_pfwalk_check },
1025a2ab7740SPaolo Bonzini     { "npt_l1mmio", npt_supported, npt_l1mmio_prepare, npt_l1mmio_test,
1026a2ab7740SPaolo Bonzini 	    default_finished, npt_l1mmio_check },
102721c23154SJoerg Roedel     { "latency_run_exit", default_supported, latency_prepare, latency_test,
102821c23154SJoerg Roedel       latency_finished, latency_check },
1029ef101219SRoedel, Joerg     { "latency_svm_insn", default_supported, lat_svm_insn_prepare, null_test,
1030ef101219SRoedel, Joerg       lat_svm_insn_finished, lat_svm_insn_check },
10317d36db35SAvi Kivity };
10327d36db35SAvi Kivity 
10337d36db35SAvi Kivity int main(int ac, char **av)
10347d36db35SAvi Kivity {
10357d36db35SAvi Kivity     int i, nr, passed, done;
10367d36db35SAvi Kivity     struct vmcb *vmcb;
10377d36db35SAvi Kivity 
10387d36db35SAvi Kivity     setup_vm();
10397d36db35SAvi Kivity     smp_init();
10407d36db35SAvi Kivity 
10417d36db35SAvi Kivity     if (!(cpuid(0x80000001).c & 4)) {
10427d36db35SAvi Kivity         printf("SVM not availble\n");
10437d36db35SAvi Kivity         return 0;
10447d36db35SAvi Kivity     }
10457d36db35SAvi Kivity 
10467d36db35SAvi Kivity     setup_svm();
10477d36db35SAvi Kivity 
10487d36db35SAvi Kivity     vmcb = alloc_page();
10497d36db35SAvi Kivity 
10507d36db35SAvi Kivity     nr = ARRAY_SIZE(tests);
10517d36db35SAvi Kivity     passed = done = 0;
10527d36db35SAvi Kivity     for (i = 0; i < nr; ++i) {
10537d36db35SAvi Kivity         if (!tests[i].supported())
10547d36db35SAvi Kivity             continue;
10557d36db35SAvi Kivity         done += 1;
10567d36db35SAvi Kivity         passed += test_run(&tests[i], vmcb);
10577d36db35SAvi Kivity     }
10587d36db35SAvi Kivity 
10597d36db35SAvi Kivity     printf("\nSUMMARY: %d TESTS, %d FAILURES\n", done, (done - passed));
10607d36db35SAvi Kivity     return passed == done ? 0 : 1;
10617d36db35SAvi Kivity }
1062