17d36db35SAvi Kivity #include "svm.h" 27d36db35SAvi Kivity #include "libcflat.h" 37d36db35SAvi Kivity #include "processor.h" 4b46094b4SPaolo Bonzini #include "desc.h" 57d36db35SAvi Kivity #include "msr.h" 67d36db35SAvi Kivity #include "vm.h" 77d36db35SAvi Kivity #include "smp.h" 87d36db35SAvi Kivity #include "types.h" 95aca024eSPaolo Bonzini #include "alloc_page.h" 10306bb7dbSCathy Avery #include "isr.h" 11306bb7dbSCathy Avery #include "apic.h" 127d36db35SAvi Kivity 138c6286f1STambe, William #define SVM_EXIT_MAX_DR_INTERCEPT 0x3f 148c6286f1STambe, William 151535bf0fSJoerg Roedel /* for the nested page table*/ 161535bf0fSJoerg Roedel u64 *pml4e; 171535bf0fSJoerg Roedel u64 *pdpe; 181535bf0fSJoerg Roedel u64 *pde[4]; 191535bf0fSJoerg Roedel u64 *pte[2048]; 20c0a4e715SPaolo Bonzini void *scratch_page; 211535bf0fSJoerg Roedel 2221c23154SJoerg Roedel #define LATENCY_RUNS 1000000 2321c23154SJoerg Roedel 2421c23154SJoerg Roedel u64 tsc_start; 2521c23154SJoerg Roedel u64 tsc_end; 2621c23154SJoerg Roedel 2721c23154SJoerg Roedel u64 vmrun_sum, vmexit_sum; 28ef101219SRoedel, Joerg u64 vmsave_sum, vmload_sum; 29ef101219SRoedel, Joerg u64 stgi_sum, clgi_sum; 3021c23154SJoerg Roedel u64 latvmrun_max; 3121c23154SJoerg Roedel u64 latvmrun_min; 3221c23154SJoerg Roedel u64 latvmexit_max; 3321c23154SJoerg Roedel u64 latvmexit_min; 34ef101219SRoedel, Joerg u64 latvmload_max; 35ef101219SRoedel, Joerg u64 latvmload_min; 36ef101219SRoedel, Joerg u64 latvmsave_max; 37ef101219SRoedel, Joerg u64 latvmsave_min; 38ef101219SRoedel, Joerg u64 latstgi_max; 39ef101219SRoedel, Joerg u64 latstgi_min; 40ef101219SRoedel, Joerg u64 latclgi_max; 41ef101219SRoedel, Joerg u64 latclgi_min; 4221c23154SJoerg Roedel u64 runs; 4321c23154SJoerg Roedel 443d46571bSPaolo Bonzini u8 *io_bitmap; 453d46571bSPaolo Bonzini u8 io_bitmap_area[16384]; 463d46571bSPaolo Bonzini 472e7dd780SCathy Avery u8 set_host_if; 482e7dd780SCathy Avery 4906a8c023STambe, William #define MSR_BITMAP_SIZE 8192 5006a8c023STambe, William 5106a8c023STambe, William u8 *msr_bitmap; 5206a8c023STambe, William u8 msr_bitmap_area[MSR_BITMAP_SIZE + PAGE_SIZE]; 5306a8c023STambe, William 541535bf0fSJoerg Roedel static bool npt_supported(void) 551535bf0fSJoerg Roedel { 56badc98caSKrish Sadhukhan return this_cpu_has(X86_FEATURE_NPT); 571535bf0fSJoerg Roedel } 581535bf0fSJoerg Roedel 597d36db35SAvi Kivity static void setup_svm(void) 607d36db35SAvi Kivity { 617d36db35SAvi Kivity void *hsave = alloc_page(); 621535bf0fSJoerg Roedel u64 *page, address; 631535bf0fSJoerg Roedel int i,j; 647d36db35SAvi Kivity 657d36db35SAvi Kivity wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave)); 667d36db35SAvi Kivity wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME); 678594b943SJoerg Roedel wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NX); 681535bf0fSJoerg Roedel 69ea975120SJoerg Roedel scratch_page = alloc_page(); 70ea975120SJoerg Roedel 713d46571bSPaolo Bonzini io_bitmap = (void *) (((ulong)io_bitmap_area + 4095) & ~4095); 723d46571bSPaolo Bonzini 7306a8c023STambe, William msr_bitmap = (void *) ALIGN((ulong)msr_bitmap_area, PAGE_SIZE); 7406a8c023STambe, William 751535bf0fSJoerg Roedel if (!npt_supported()) 761535bf0fSJoerg Roedel return; 771535bf0fSJoerg Roedel 781535bf0fSJoerg Roedel printf("NPT detected - running all tests with NPT enabled\n"); 791535bf0fSJoerg Roedel 801535bf0fSJoerg Roedel /* 811535bf0fSJoerg Roedel * Nested paging supported - Build a nested page table 821535bf0fSJoerg Roedel * Build the page-table bottom-up and map everything with 4k pages 831535bf0fSJoerg Roedel * to get enough granularity for the NPT unit-tests. 841535bf0fSJoerg Roedel */ 851535bf0fSJoerg Roedel 861535bf0fSJoerg Roedel address = 0; 871535bf0fSJoerg Roedel 881535bf0fSJoerg Roedel /* PTE level */ 891535bf0fSJoerg Roedel for (i = 0; i < 2048; ++i) { 901535bf0fSJoerg Roedel page = alloc_page(); 911535bf0fSJoerg Roedel 921535bf0fSJoerg Roedel for (j = 0; j < 512; ++j, address += 4096) 931535bf0fSJoerg Roedel page[j] = address | 0x067ULL; 941535bf0fSJoerg Roedel 951535bf0fSJoerg Roedel pte[i] = page; 961535bf0fSJoerg Roedel } 971535bf0fSJoerg Roedel 981535bf0fSJoerg Roedel /* PDE level */ 991535bf0fSJoerg Roedel for (i = 0; i < 4; ++i) { 1001535bf0fSJoerg Roedel page = alloc_page(); 1011535bf0fSJoerg Roedel 1021535bf0fSJoerg Roedel for (j = 0; j < 512; ++j) 10393b05099SPaolo Bonzini page[j] = (u64)pte[(i * 512) + j] | 0x027ULL; 1041535bf0fSJoerg Roedel 1051535bf0fSJoerg Roedel pde[i] = page; 1061535bf0fSJoerg Roedel } 1071535bf0fSJoerg Roedel 1081535bf0fSJoerg Roedel /* PDPe level */ 1091535bf0fSJoerg Roedel pdpe = alloc_page(); 1101535bf0fSJoerg Roedel for (i = 0; i < 4; ++i) 1111535bf0fSJoerg Roedel pdpe[i] = ((u64)(pde[i])) | 0x27; 1121535bf0fSJoerg Roedel 1131535bf0fSJoerg Roedel /* PML4e level */ 1141535bf0fSJoerg Roedel pml4e = alloc_page(); 1151535bf0fSJoerg Roedel pml4e[0] = ((u64)pdpe) | 0x27; 1167d36db35SAvi Kivity } 1177d36db35SAvi Kivity 118f6a2ca45SPaolo Bonzini static u64 *npt_get_pde(u64 address) 119f6a2ca45SPaolo Bonzini { 120f6a2ca45SPaolo Bonzini int i1, i2; 121f6a2ca45SPaolo Bonzini 122f6a2ca45SPaolo Bonzini address >>= 21; 123f6a2ca45SPaolo Bonzini i1 = (address >> 9) & 0x3; 124f6a2ca45SPaolo Bonzini i2 = address & 0x1ff; 125f6a2ca45SPaolo Bonzini 126f6a2ca45SPaolo Bonzini return &pde[i1][i2]; 127f6a2ca45SPaolo Bonzini } 128f6a2ca45SPaolo Bonzini 129726a1dd7SPaolo Bonzini static u64 *npt_get_pte(u64 address) 1308594b943SJoerg Roedel { 1318594b943SJoerg Roedel int i1, i2; 1328594b943SJoerg Roedel 1338594b943SJoerg Roedel address >>= 12; 1348594b943SJoerg Roedel i1 = (address >> 9) & 0x7ff; 1358594b943SJoerg Roedel i2 = address & 0x1ff; 1368594b943SJoerg Roedel 1378594b943SJoerg Roedel return &pte[i1][i2]; 1388594b943SJoerg Roedel } 1398594b943SJoerg Roedel 1407d36db35SAvi Kivity static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector, 1417d36db35SAvi Kivity u64 base, u32 limit, u32 attr) 1427d36db35SAvi Kivity { 1437d36db35SAvi Kivity seg->selector = selector; 1447d36db35SAvi Kivity seg->attrib = attr; 1457d36db35SAvi Kivity seg->limit = limit; 1467d36db35SAvi Kivity seg->base = base; 1477d36db35SAvi Kivity } 1487d36db35SAvi Kivity 1497d36db35SAvi Kivity static void vmcb_ident(struct vmcb *vmcb) 1507d36db35SAvi Kivity { 1517d36db35SAvi Kivity u64 vmcb_phys = virt_to_phys(vmcb); 1527d36db35SAvi Kivity struct vmcb_save_area *save = &vmcb->save; 1537d36db35SAvi Kivity struct vmcb_control_area *ctrl = &vmcb->control; 1547d36db35SAvi Kivity u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK 1557d36db35SAvi Kivity | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK; 1567d36db35SAvi Kivity u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK 1577d36db35SAvi Kivity | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK; 1587d36db35SAvi Kivity struct descriptor_table_ptr desc_table_ptr; 1597d36db35SAvi Kivity 1607d36db35SAvi Kivity memset(vmcb, 0, sizeof(*vmcb)); 1612c6589bcSPeter Shier asm volatile ("vmsave %0" : : "a"(vmcb_phys) : "memory"); 1627d36db35SAvi Kivity vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr); 1637d36db35SAvi Kivity vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr); 1647d36db35SAvi Kivity vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr); 1657d36db35SAvi Kivity vmcb_set_seg(&save->ds, read_ds(), 0, -1U, data_seg_attr); 1667d36db35SAvi Kivity sgdt(&desc_table_ptr); 1677d36db35SAvi Kivity vmcb_set_seg(&save->gdtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0); 1687d36db35SAvi Kivity sidt(&desc_table_ptr); 1697d36db35SAvi Kivity vmcb_set_seg(&save->idtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0); 1707d36db35SAvi Kivity ctrl->asid = 1; 1717d36db35SAvi Kivity save->cpl = 0; 1727d36db35SAvi Kivity save->efer = rdmsr(MSR_EFER); 1737d36db35SAvi Kivity save->cr4 = read_cr4(); 1747d36db35SAvi Kivity save->cr3 = read_cr3(); 1757d36db35SAvi Kivity save->cr0 = read_cr0(); 1767d36db35SAvi Kivity save->dr7 = read_dr7(); 1777d36db35SAvi Kivity save->dr6 = read_dr6(); 1787d36db35SAvi Kivity save->cr2 = read_cr2(); 1797d36db35SAvi Kivity save->g_pat = rdmsr(MSR_IA32_CR_PAT); 1807d36db35SAvi Kivity save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 1817d36db35SAvi Kivity ctrl->intercept = (1ULL << INTERCEPT_VMRUN) | (1ULL << INTERCEPT_VMMCALL); 1823d46571bSPaolo Bonzini ctrl->iopm_base_pa = virt_to_phys(io_bitmap); 18306a8c023STambe, William ctrl->msrpm_base_pa = virt_to_phys(msr_bitmap); 1841535bf0fSJoerg Roedel 1851535bf0fSJoerg Roedel if (npt_supported()) { 1861535bf0fSJoerg Roedel ctrl->nested_ctl = 1; 1871535bf0fSJoerg Roedel ctrl->nested_cr3 = (u64)pml4e; 1881535bf0fSJoerg Roedel } 1897d36db35SAvi Kivity } 1907d36db35SAvi Kivity 1917d36db35SAvi Kivity struct test { 1927d36db35SAvi Kivity const char *name; 1937d36db35SAvi Kivity bool (*supported)(void); 1947d36db35SAvi Kivity void (*prepare)(struct test *test); 195*e7bce343SPaolo Bonzini void (*prepare_gif_clear)(struct test *test); 1967d36db35SAvi Kivity void (*guest_func)(struct test *test); 1977d36db35SAvi Kivity bool (*finished)(struct test *test); 1987d36db35SAvi Kivity bool (*succeeded)(struct test *test); 1997d36db35SAvi Kivity struct vmcb *vmcb; 2007d36db35SAvi Kivity int exits; 2017d36db35SAvi Kivity ulong scratch; 2027d36db35SAvi Kivity }; 2037d36db35SAvi Kivity 204e0b6541cSPaolo Bonzini static inline void vmmcall(void) 205e0b6541cSPaolo Bonzini { 206e0b6541cSPaolo Bonzini asm volatile ("vmmcall" : : : "memory"); 207e0b6541cSPaolo Bonzini } 208e0b6541cSPaolo Bonzini 2097d36db35SAvi Kivity static void test_thunk(struct test *test) 2107d36db35SAvi Kivity { 2117d36db35SAvi Kivity test->guest_func(test); 212e0b6541cSPaolo Bonzini vmmcall(); 2137d36db35SAvi Kivity } 2147d36db35SAvi Kivity 215a43baea0SPaolo Bonzini struct regs { 216a43baea0SPaolo Bonzini u64 rax; 217bc0c0f49STambe, William u64 rbx; 218a43baea0SPaolo Bonzini u64 rcx; 219a43baea0SPaolo Bonzini u64 rdx; 220a43baea0SPaolo Bonzini u64 cr2; 221a43baea0SPaolo Bonzini u64 rbp; 222a43baea0SPaolo Bonzini u64 rsi; 223a43baea0SPaolo Bonzini u64 rdi; 224a43baea0SPaolo Bonzini u64 r8; 225a43baea0SPaolo Bonzini u64 r9; 226a43baea0SPaolo Bonzini u64 r10; 227a43baea0SPaolo Bonzini u64 r11; 228a43baea0SPaolo Bonzini u64 r12; 229a43baea0SPaolo Bonzini u64 r13; 230a43baea0SPaolo Bonzini u64 r14; 231a43baea0SPaolo Bonzini u64 r15; 232a43baea0SPaolo Bonzini u64 rflags; 233a43baea0SPaolo Bonzini }; 234a43baea0SPaolo Bonzini 235a43baea0SPaolo Bonzini struct regs regs; 236a43baea0SPaolo Bonzini 237a43baea0SPaolo Bonzini // rax handled specially below 238a43baea0SPaolo Bonzini 239a43baea0SPaolo Bonzini #define SAVE_GPR_C \ 240a43baea0SPaolo Bonzini "xchg %%rbx, regs+0x8\n\t" \ 241a43baea0SPaolo Bonzini "xchg %%rcx, regs+0x10\n\t" \ 242a43baea0SPaolo Bonzini "xchg %%rdx, regs+0x18\n\t" \ 243a43baea0SPaolo Bonzini "xchg %%rbp, regs+0x28\n\t" \ 244a43baea0SPaolo Bonzini "xchg %%rsi, regs+0x30\n\t" \ 245a43baea0SPaolo Bonzini "xchg %%rdi, regs+0x38\n\t" \ 246a43baea0SPaolo Bonzini "xchg %%r8, regs+0x40\n\t" \ 247a43baea0SPaolo Bonzini "xchg %%r9, regs+0x48\n\t" \ 248a43baea0SPaolo Bonzini "xchg %%r10, regs+0x50\n\t" \ 249a43baea0SPaolo Bonzini "xchg %%r11, regs+0x58\n\t" \ 250a43baea0SPaolo Bonzini "xchg %%r12, regs+0x60\n\t" \ 251a43baea0SPaolo Bonzini "xchg %%r13, regs+0x68\n\t" \ 252a43baea0SPaolo Bonzini "xchg %%r14, regs+0x70\n\t" \ 253a43baea0SPaolo Bonzini "xchg %%r15, regs+0x78\n\t" 254a43baea0SPaolo Bonzini 255a43baea0SPaolo Bonzini #define LOAD_GPR_C SAVE_GPR_C 256a43baea0SPaolo Bonzini 257a43ed2acSAndrew Jones static void test_run(struct test *test, struct vmcb *vmcb) 2587d36db35SAvi Kivity { 2597d36db35SAvi Kivity u64 vmcb_phys = virt_to_phys(vmcb); 2607d36db35SAvi Kivity u64 guest_stack[10000]; 2617d36db35SAvi Kivity 2621500aca4SPaolo Bonzini irq_disable(); 2637d36db35SAvi Kivity test->vmcb = vmcb; 2642e7dd780SCathy Avery set_host_if = 1; 2657d36db35SAvi Kivity test->prepare(test); 2667d36db35SAvi Kivity vmcb->save.rip = (ulong)test_thunk; 2677d36db35SAvi Kivity vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack)); 268a43baea0SPaolo Bonzini regs.rdi = (ulong)test; 2697d36db35SAvi Kivity do { 270*e7bce343SPaolo Bonzini struct test *the_test = test; 271*e7bce343SPaolo Bonzini u64 the_vmcb = vmcb_phys; 27221c23154SJoerg Roedel tsc_start = rdtsc(); 2737d36db35SAvi Kivity asm volatile ( 2742c6589bcSPeter Shier "clgi;\n\t" // semi-colon needed for LLVM compatibility 2752e7dd780SCathy Avery "sti \n\t" 276*e7bce343SPaolo Bonzini "call *%c[PREPARE_GIF_CLEAR](%[test]) \n \t" 277*e7bce343SPaolo Bonzini "mov %[vmcb_phys], %%rax \n\t" 278*e7bce343SPaolo Bonzini "vmload %%rax\n\t" 279a43baea0SPaolo Bonzini "mov regs+0x80, %%r15\n\t" // rflags 280*e7bce343SPaolo Bonzini "mov %%r15, 0x170(%%rax)\n\t" 281a43baea0SPaolo Bonzini "mov regs, %%r15\n\t" // rax 282*e7bce343SPaolo Bonzini "mov %%r15, 0x1f8(%%rax)\n\t" 283a43baea0SPaolo Bonzini LOAD_GPR_C 284*e7bce343SPaolo Bonzini "vmrun %%rax\n\t" 285a43baea0SPaolo Bonzini SAVE_GPR_C 286*e7bce343SPaolo Bonzini "mov 0x170(%%rax), %%r15\n\t" // rflags 287a43baea0SPaolo Bonzini "mov %%r15, regs+0x80\n\t" 288*e7bce343SPaolo Bonzini "mov 0x1f8(%%rax), %%r15\n\t" // rax 289a43baea0SPaolo Bonzini "mov %%r15, regs\n\t" 290*e7bce343SPaolo Bonzini "vmsave %%rax\n\t" 2912e7dd780SCathy Avery "cli \n\t" 2927d36db35SAvi Kivity "stgi" 293*e7bce343SPaolo Bonzini : // inputs clobbered by the guest: 294*e7bce343SPaolo Bonzini "+D" (the_test), // first argument register 295*e7bce343SPaolo Bonzini "+b" (the_vmcb) // callee save register! 296*e7bce343SPaolo Bonzini : [test] "0" (the_test), 297*e7bce343SPaolo Bonzini [vmcb_phys] "1"(the_vmcb), 298*e7bce343SPaolo Bonzini [PREPARE_GIF_CLEAR] "i" (offsetof(struct test, prepare_gif_clear)) 299*e7bce343SPaolo Bonzini : "rax", "rcx", "rdx", "rsi", 3007d36db35SAvi Kivity "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15", 3017d36db35SAvi Kivity "memory"); 30221c23154SJoerg Roedel tsc_end = rdtsc(); 3037d36db35SAvi Kivity ++test->exits; 3047d36db35SAvi Kivity } while (!test->finished(test)); 3051500aca4SPaolo Bonzini irq_enable(); 3067d36db35SAvi Kivity 307a299895bSThomas Huth report(test->succeeded(test), "%s", test->name); 3087d36db35SAvi Kivity } 3097d36db35SAvi Kivity 310095274b4SPrasad Joshi static bool smp_supported(void) 311095274b4SPrasad Joshi { 312095274b4SPrasad Joshi return cpu_count() > 1; 313095274b4SPrasad Joshi } 314095274b4SPrasad Joshi 3157d36db35SAvi Kivity static bool default_supported(void) 3167d36db35SAvi Kivity { 3177d36db35SAvi Kivity return true; 3187d36db35SAvi Kivity } 3197d36db35SAvi Kivity 3207d36db35SAvi Kivity static void default_prepare(struct test *test) 3217d36db35SAvi Kivity { 3227d36db35SAvi Kivity vmcb_ident(test->vmcb); 3237d36db35SAvi Kivity } 3247d36db35SAvi Kivity 325*e7bce343SPaolo Bonzini static void default_prepare_gif_clear(struct test *test) 326*e7bce343SPaolo Bonzini { 327*e7bce343SPaolo Bonzini if (!set_host_if) 328*e7bce343SPaolo Bonzini asm("cli"); 329*e7bce343SPaolo Bonzini } 330*e7bce343SPaolo Bonzini 3317d36db35SAvi Kivity static bool default_finished(struct test *test) 3327d36db35SAvi Kivity { 3337d36db35SAvi Kivity return true; /* one vmexit */ 3347d36db35SAvi Kivity } 3357d36db35SAvi Kivity 3367d36db35SAvi Kivity static void null_test(struct test *test) 3377d36db35SAvi Kivity { 3387d36db35SAvi Kivity } 3397d36db35SAvi Kivity 3407d36db35SAvi Kivity static bool null_check(struct test *test) 3417d36db35SAvi Kivity { 3427d36db35SAvi Kivity return test->vmcb->control.exit_code == SVM_EXIT_VMMCALL; 3437d36db35SAvi Kivity } 3447d36db35SAvi Kivity 3457d36db35SAvi Kivity static void prepare_no_vmrun_int(struct test *test) 3467d36db35SAvi Kivity { 3477d36db35SAvi Kivity test->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMRUN); 3487d36db35SAvi Kivity } 3497d36db35SAvi Kivity 3507d36db35SAvi Kivity static bool check_no_vmrun_int(struct test *test) 3517d36db35SAvi Kivity { 3527d36db35SAvi Kivity return test->vmcb->control.exit_code == SVM_EXIT_ERR; 3537d36db35SAvi Kivity } 3547d36db35SAvi Kivity 3557d36db35SAvi Kivity static void test_vmrun(struct test *test) 3567d36db35SAvi Kivity { 3572c6589bcSPeter Shier asm volatile ("vmrun %0" : : "a"(virt_to_phys(test->vmcb))); 3587d36db35SAvi Kivity } 3597d36db35SAvi Kivity 3607d36db35SAvi Kivity static bool check_vmrun(struct test *test) 3617d36db35SAvi Kivity { 3627d36db35SAvi Kivity return test->vmcb->control.exit_code == SVM_EXIT_VMRUN; 3637d36db35SAvi Kivity } 3647d36db35SAvi Kivity 3657d36db35SAvi Kivity static void prepare_cr3_intercept(struct test *test) 3667d36db35SAvi Kivity { 3677d36db35SAvi Kivity default_prepare(test); 3687d36db35SAvi Kivity test->vmcb->control.intercept_cr_read |= 1 << 3; 3697d36db35SAvi Kivity } 3707d36db35SAvi Kivity 3717d36db35SAvi Kivity static void test_cr3_intercept(struct test *test) 3727d36db35SAvi Kivity { 3737d36db35SAvi Kivity asm volatile ("mov %%cr3, %0" : "=r"(test->scratch) : : "memory"); 3747d36db35SAvi Kivity } 3757d36db35SAvi Kivity 3767d36db35SAvi Kivity static bool check_cr3_intercept(struct test *test) 3777d36db35SAvi Kivity { 3787d36db35SAvi Kivity return test->vmcb->control.exit_code == SVM_EXIT_READ_CR3; 3797d36db35SAvi Kivity } 3807d36db35SAvi Kivity 3817d36db35SAvi Kivity static bool check_cr3_nointercept(struct test *test) 3827d36db35SAvi Kivity { 3837d36db35SAvi Kivity return null_check(test) && test->scratch == read_cr3(); 3847d36db35SAvi Kivity } 3857d36db35SAvi Kivity 3867d36db35SAvi Kivity static void corrupt_cr3_intercept_bypass(void *_test) 3877d36db35SAvi Kivity { 3887d36db35SAvi Kivity struct test *test = _test; 3897d36db35SAvi Kivity extern volatile u32 mmio_insn; 3907d36db35SAvi Kivity 3917d36db35SAvi Kivity while (!__sync_bool_compare_and_swap(&test->scratch, 1, 2)) 3927d36db35SAvi Kivity pause(); 3937d36db35SAvi Kivity pause(); 3947d36db35SAvi Kivity pause(); 3957d36db35SAvi Kivity pause(); 3967d36db35SAvi Kivity mmio_insn = 0x90d8200f; // mov %cr3, %rax; nop 3977d36db35SAvi Kivity } 3987d36db35SAvi Kivity 3997d36db35SAvi Kivity static void prepare_cr3_intercept_bypass(struct test *test) 4007d36db35SAvi Kivity { 4017d36db35SAvi Kivity default_prepare(test); 4027d36db35SAvi Kivity test->vmcb->control.intercept_cr_read |= 1 << 3; 4037d36db35SAvi Kivity on_cpu_async(1, corrupt_cr3_intercept_bypass, test); 4047d36db35SAvi Kivity } 4057d36db35SAvi Kivity 4067d36db35SAvi Kivity static void test_cr3_intercept_bypass(struct test *test) 4077d36db35SAvi Kivity { 4087d36db35SAvi Kivity ulong a = 0xa0000; 4097d36db35SAvi Kivity 4107d36db35SAvi Kivity test->scratch = 1; 4117d36db35SAvi Kivity while (test->scratch != 2) 4127d36db35SAvi Kivity barrier(); 4137d36db35SAvi Kivity 4147d36db35SAvi Kivity asm volatile ("mmio_insn: mov %0, (%0); nop" 4157d36db35SAvi Kivity : "+a"(a) : : "memory"); 4167d36db35SAvi Kivity test->scratch = a; 4177d36db35SAvi Kivity } 4187d36db35SAvi Kivity 4198c6286f1STambe, William static void prepare_dr_intercept(struct test *test) 4208c6286f1STambe, William { 4218c6286f1STambe, William default_prepare(test); 4228c6286f1STambe, William test->vmcb->control.intercept_dr_read = 0xff; 4238c6286f1STambe, William test->vmcb->control.intercept_dr_write = 0xff; 4248c6286f1STambe, William } 4258c6286f1STambe, William 4268c6286f1STambe, William static void test_dr_intercept(struct test *test) 4278c6286f1STambe, William { 4288c6286f1STambe, William unsigned int i, failcnt = 0; 4298c6286f1STambe, William 4308c6286f1STambe, William /* Loop testing debug register reads */ 4318c6286f1STambe, William for (i = 0; i < 8; i++) { 4328c6286f1STambe, William 4338c6286f1STambe, William switch (i) { 4348c6286f1STambe, William case 0: 4358c6286f1STambe, William asm volatile ("mov %%dr0, %0" : "=r"(test->scratch) : : "memory"); 4368c6286f1STambe, William break; 4378c6286f1STambe, William case 1: 4388c6286f1STambe, William asm volatile ("mov %%dr1, %0" : "=r"(test->scratch) : : "memory"); 4398c6286f1STambe, William break; 4408c6286f1STambe, William case 2: 4418c6286f1STambe, William asm volatile ("mov %%dr2, %0" : "=r"(test->scratch) : : "memory"); 4428c6286f1STambe, William break; 4438c6286f1STambe, William case 3: 4448c6286f1STambe, William asm volatile ("mov %%dr3, %0" : "=r"(test->scratch) : : "memory"); 4458c6286f1STambe, William break; 4468c6286f1STambe, William case 4: 4478c6286f1STambe, William asm volatile ("mov %%dr4, %0" : "=r"(test->scratch) : : "memory"); 4488c6286f1STambe, William break; 4498c6286f1STambe, William case 5: 4508c6286f1STambe, William asm volatile ("mov %%dr5, %0" : "=r"(test->scratch) : : "memory"); 4518c6286f1STambe, William break; 4528c6286f1STambe, William case 6: 4538c6286f1STambe, William asm volatile ("mov %%dr6, %0" : "=r"(test->scratch) : : "memory"); 4548c6286f1STambe, William break; 4558c6286f1STambe, William case 7: 4568c6286f1STambe, William asm volatile ("mov %%dr7, %0" : "=r"(test->scratch) : : "memory"); 4578c6286f1STambe, William break; 4588c6286f1STambe, William } 4598c6286f1STambe, William 4608c6286f1STambe, William if (test->scratch != i) { 461a299895bSThomas Huth report(false, "dr%u read intercept", i); 4628c6286f1STambe, William failcnt++; 4638c6286f1STambe, William } 4648c6286f1STambe, William } 4658c6286f1STambe, William 4668c6286f1STambe, William /* Loop testing debug register writes */ 4678c6286f1STambe, William for (i = 0; i < 8; i++) { 4688c6286f1STambe, William 4698c6286f1STambe, William switch (i) { 4708c6286f1STambe, William case 0: 4718c6286f1STambe, William asm volatile ("mov %0, %%dr0" : : "r"(test->scratch) : "memory"); 4728c6286f1STambe, William break; 4738c6286f1STambe, William case 1: 4748c6286f1STambe, William asm volatile ("mov %0, %%dr1" : : "r"(test->scratch) : "memory"); 4758c6286f1STambe, William break; 4768c6286f1STambe, William case 2: 4778c6286f1STambe, William asm volatile ("mov %0, %%dr2" : : "r"(test->scratch) : "memory"); 4788c6286f1STambe, William break; 4798c6286f1STambe, William case 3: 4808c6286f1STambe, William asm volatile ("mov %0, %%dr3" : : "r"(test->scratch) : "memory"); 4818c6286f1STambe, William break; 4828c6286f1STambe, William case 4: 4838c6286f1STambe, William asm volatile ("mov %0, %%dr4" : : "r"(test->scratch) : "memory"); 4848c6286f1STambe, William break; 4858c6286f1STambe, William case 5: 4868c6286f1STambe, William asm volatile ("mov %0, %%dr5" : : "r"(test->scratch) : "memory"); 4878c6286f1STambe, William break; 4888c6286f1STambe, William case 6: 4898c6286f1STambe, William asm volatile ("mov %0, %%dr6" : : "r"(test->scratch) : "memory"); 4908c6286f1STambe, William break; 4918c6286f1STambe, William case 7: 4928c6286f1STambe, William asm volatile ("mov %0, %%dr7" : : "r"(test->scratch) : "memory"); 4938c6286f1STambe, William break; 4948c6286f1STambe, William } 4958c6286f1STambe, William 4968c6286f1STambe, William if (test->scratch != i) { 497a299895bSThomas Huth report(false, "dr%u write intercept", i); 4988c6286f1STambe, William failcnt++; 4998c6286f1STambe, William } 5008c6286f1STambe, William } 5018c6286f1STambe, William 5028c6286f1STambe, William test->scratch = failcnt; 5038c6286f1STambe, William } 5048c6286f1STambe, William 5058c6286f1STambe, William static bool dr_intercept_finished(struct test *test) 5068c6286f1STambe, William { 5078c6286f1STambe, William ulong n = (test->vmcb->control.exit_code - SVM_EXIT_READ_DR0); 5088c6286f1STambe, William 5098c6286f1STambe, William /* Only expect DR intercepts */ 5108c6286f1STambe, William if (n > (SVM_EXIT_MAX_DR_INTERCEPT - SVM_EXIT_READ_DR0)) 5118c6286f1STambe, William return true; 5128c6286f1STambe, William 5138c6286f1STambe, William /* 5148c6286f1STambe, William * Compute debug register number. 5158c6286f1STambe, William * Per Appendix C "SVM Intercept Exit Codes" of AMD64 Architecture 5168c6286f1STambe, William * Programmer's Manual Volume 2 - System Programming: 5178c6286f1STambe, William * http://support.amd.com/TechDocs/24593.pdf 5188c6286f1STambe, William * there are 16 VMEXIT codes each for DR read and write. 5198c6286f1STambe, William */ 5208c6286f1STambe, William test->scratch = (n % 16); 5218c6286f1STambe, William 5228c6286f1STambe, William /* Jump over MOV instruction */ 5238c6286f1STambe, William test->vmcb->save.rip += 3; 5248c6286f1STambe, William 5258c6286f1STambe, William return false; 5268c6286f1STambe, William } 5278c6286f1STambe, William 5288c6286f1STambe, William static bool check_dr_intercept(struct test *test) 5298c6286f1STambe, William { 5308c6286f1STambe, William return !test->scratch; 5318c6286f1STambe, William } 5328c6286f1STambe, William 5337d36db35SAvi Kivity static bool next_rip_supported(void) 5347d36db35SAvi Kivity { 535badc98caSKrish Sadhukhan return this_cpu_has(X86_FEATURE_NRIPS); 5367d36db35SAvi Kivity } 5377d36db35SAvi Kivity 5387d36db35SAvi Kivity static void prepare_next_rip(struct test *test) 5397d36db35SAvi Kivity { 5407d36db35SAvi Kivity test->vmcb->control.intercept |= (1ULL << INTERCEPT_RDTSC); 5417d36db35SAvi Kivity } 5427d36db35SAvi Kivity 5437d36db35SAvi Kivity 5447d36db35SAvi Kivity static void test_next_rip(struct test *test) 5457d36db35SAvi Kivity { 5467d36db35SAvi Kivity asm volatile ("rdtsc\n\t" 5477d36db35SAvi Kivity ".globl exp_next_rip\n\t" 5487d36db35SAvi Kivity "exp_next_rip:\n\t" ::: "eax", "edx"); 5497d36db35SAvi Kivity } 5507d36db35SAvi Kivity 5517d36db35SAvi Kivity static bool check_next_rip(struct test *test) 5527d36db35SAvi Kivity { 5537d36db35SAvi Kivity extern char exp_next_rip; 5547d36db35SAvi Kivity unsigned long address = (unsigned long)&exp_next_rip; 5557d36db35SAvi Kivity 5567d36db35SAvi Kivity return address == test->vmcb->control.next_rip; 5577d36db35SAvi Kivity } 5587d36db35SAvi Kivity 55906a8c023STambe, William static void prepare_msr_intercept(struct test *test) 56006a8c023STambe, William { 56106a8c023STambe, William default_prepare(test); 56206a8c023STambe, William test->vmcb->control.intercept |= (1ULL << INTERCEPT_MSR_PROT); 56306a8c023STambe, William test->vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR); 56406a8c023STambe, William memset(msr_bitmap, 0xff, MSR_BITMAP_SIZE); 56506a8c023STambe, William } 56606a8c023STambe, William 56706a8c023STambe, William static void test_msr_intercept(struct test *test) 56806a8c023STambe, William { 56906a8c023STambe, William unsigned long msr_value = 0xef8056791234abcd; /* Arbitrary value */ 57006a8c023STambe, William unsigned long msr_index; 57106a8c023STambe, William 57206a8c023STambe, William for (msr_index = 0; msr_index <= 0xc0011fff; msr_index++) { 57306a8c023STambe, William if (msr_index == 0xC0010131 /* MSR_SEV_STATUS */) { 57406a8c023STambe, William /* 57506a8c023STambe, William * Per section 15.34.10 "SEV_STATUS MSR" of AMD64 Architecture 57606a8c023STambe, William * Programmer's Manual volume 2 - System Programming: 57706a8c023STambe, William * http://support.amd.com/TechDocs/24593.pdf 57806a8c023STambe, William * SEV_STATUS MSR (C001_0131) is a non-interceptable MSR. 57906a8c023STambe, William */ 58006a8c023STambe, William continue; 58106a8c023STambe, William } 58206a8c023STambe, William 58306a8c023STambe, William /* Skips gaps between supported MSR ranges */ 58406a8c023STambe, William if (msr_index == 0x2000) 58506a8c023STambe, William msr_index = 0xc0000000; 58606a8c023STambe, William else if (msr_index == 0xc0002000) 58706a8c023STambe, William msr_index = 0xc0010000; 58806a8c023STambe, William 58906a8c023STambe, William test->scratch = -1; 59006a8c023STambe, William 59106a8c023STambe, William rdmsr(msr_index); 59206a8c023STambe, William 59306a8c023STambe, William /* Check that a read intercept occurred for MSR at msr_index */ 59406a8c023STambe, William if (test->scratch != msr_index) 595a299895bSThomas Huth report(false, "MSR 0x%lx read intercept", msr_index); 59606a8c023STambe, William 59706a8c023STambe, William /* 59806a8c023STambe, William * Poor man approach to generate a value that 59906a8c023STambe, William * seems arbitrary each time around the loop. 60006a8c023STambe, William */ 60106a8c023STambe, William msr_value += (msr_value << 1); 60206a8c023STambe, William 60306a8c023STambe, William wrmsr(msr_index, msr_value); 60406a8c023STambe, William 60506a8c023STambe, William /* Check that a write intercept occurred for MSR with msr_value */ 60606a8c023STambe, William if (test->scratch != msr_value) 607a299895bSThomas Huth report(false, "MSR 0x%lx write intercept", msr_index); 60806a8c023STambe, William } 60906a8c023STambe, William 61006a8c023STambe, William test->scratch = -2; 61106a8c023STambe, William } 61206a8c023STambe, William 61306a8c023STambe, William static bool msr_intercept_finished(struct test *test) 61406a8c023STambe, William { 61506a8c023STambe, William u32 exit_code = test->vmcb->control.exit_code; 61606a8c023STambe, William u64 exit_info_1; 61706a8c023STambe, William u8 *opcode; 61806a8c023STambe, William 61906a8c023STambe, William if (exit_code == SVM_EXIT_MSR) { 62006a8c023STambe, William exit_info_1 = test->vmcb->control.exit_info_1; 62106a8c023STambe, William } else { 62206a8c023STambe, William /* 62306a8c023STambe, William * If #GP exception occurs instead, check that it was 62406a8c023STambe, William * for RDMSR/WRMSR and set exit_info_1 accordingly. 62506a8c023STambe, William */ 62606a8c023STambe, William 62706a8c023STambe, William if (exit_code != (SVM_EXIT_EXCP_BASE + GP_VECTOR)) 62806a8c023STambe, William return true; 62906a8c023STambe, William 63006a8c023STambe, William opcode = (u8 *)test->vmcb->save.rip; 63106a8c023STambe, William if (opcode[0] != 0x0f) 63206a8c023STambe, William return true; 63306a8c023STambe, William 63406a8c023STambe, William switch (opcode[1]) { 63506a8c023STambe, William case 0x30: /* WRMSR */ 63606a8c023STambe, William exit_info_1 = 1; 63706a8c023STambe, William break; 63806a8c023STambe, William case 0x32: /* RDMSR */ 63906a8c023STambe, William exit_info_1 = 0; 64006a8c023STambe, William break; 64106a8c023STambe, William default: 64206a8c023STambe, William return true; 64306a8c023STambe, William } 64406a8c023STambe, William 64506a8c023STambe, William /* 64606a8c023STambe, William * Warn that #GP exception occured instead. 64706a8c023STambe, William * RCX holds the MSR index. 64806a8c023STambe, William */ 64906a8c023STambe, William printf("%s 0x%lx #GP exception\n", 65006a8c023STambe, William exit_info_1 ? "WRMSR" : "RDMSR", regs.rcx); 65106a8c023STambe, William } 65206a8c023STambe, William 65306a8c023STambe, William /* Jump over RDMSR/WRMSR instruction */ 65406a8c023STambe, William test->vmcb->save.rip += 2; 65506a8c023STambe, William 65606a8c023STambe, William /* 65706a8c023STambe, William * Test whether the intercept was for RDMSR/WRMSR. 65806a8c023STambe, William * For RDMSR, test->scratch is set to the MSR index; 65906a8c023STambe, William * RCX holds the MSR index. 66006a8c023STambe, William * For WRMSR, test->scratch is set to the MSR value; 66106a8c023STambe, William * RDX holds the upper 32 bits of the MSR value, 66206a8c023STambe, William * while RAX hold its lower 32 bits. 66306a8c023STambe, William */ 66406a8c023STambe, William if (exit_info_1) 66506a8c023STambe, William test->scratch = 66606a8c023STambe, William ((regs.rdx << 32) | (test->vmcb->save.rax & 0xffffffff)); 66706a8c023STambe, William else 66806a8c023STambe, William test->scratch = regs.rcx; 66906a8c023STambe, William 67006a8c023STambe, William return false; 67106a8c023STambe, William } 67206a8c023STambe, William 67306a8c023STambe, William static bool check_msr_intercept(struct test *test) 67406a8c023STambe, William { 67506a8c023STambe, William memset(msr_bitmap, 0, MSR_BITMAP_SIZE); 67606a8c023STambe, William return (test->scratch == -2); 67706a8c023STambe, William } 67806a8c023STambe, William 6797d36db35SAvi Kivity static void prepare_mode_switch(struct test *test) 6807d36db35SAvi Kivity { 6817d36db35SAvi Kivity test->vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR) 6827d36db35SAvi Kivity | (1ULL << UD_VECTOR) 6837d36db35SAvi Kivity | (1ULL << DF_VECTOR) 6847d36db35SAvi Kivity | (1ULL << PF_VECTOR); 6857d36db35SAvi Kivity test->scratch = 0; 6867d36db35SAvi Kivity } 6877d36db35SAvi Kivity 6887d36db35SAvi Kivity static void test_mode_switch(struct test *test) 6897d36db35SAvi Kivity { 6907d36db35SAvi Kivity asm volatile(" cli\n" 6917d36db35SAvi Kivity " ljmp *1f\n" /* jump to 32-bit code segment */ 6927d36db35SAvi Kivity "1:\n" 6937d36db35SAvi Kivity " .long 2f\n" 694b46094b4SPaolo Bonzini " .long " xstr(KERNEL_CS32) "\n" 6957d36db35SAvi Kivity ".code32\n" 6967d36db35SAvi Kivity "2:\n" 6977d36db35SAvi Kivity " movl %%cr0, %%eax\n" 6987d36db35SAvi Kivity " btcl $31, %%eax\n" /* clear PG */ 6997d36db35SAvi Kivity " movl %%eax, %%cr0\n" 7007d36db35SAvi Kivity " movl $0xc0000080, %%ecx\n" /* EFER */ 7017d36db35SAvi Kivity " rdmsr\n" 7027d36db35SAvi Kivity " btcl $8, %%eax\n" /* clear LME */ 7037d36db35SAvi Kivity " wrmsr\n" 7047d36db35SAvi Kivity " movl %%cr4, %%eax\n" 7057d36db35SAvi Kivity " btcl $5, %%eax\n" /* clear PAE */ 7067d36db35SAvi Kivity " movl %%eax, %%cr4\n" 707b46094b4SPaolo Bonzini " movw %[ds16], %%ax\n" 7087d36db35SAvi Kivity " movw %%ax, %%ds\n" 709b46094b4SPaolo Bonzini " ljmpl %[cs16], $3f\n" /* jump to 16 bit protected-mode */ 7107d36db35SAvi Kivity ".code16\n" 7117d36db35SAvi Kivity "3:\n" 7127d36db35SAvi Kivity " movl %%cr0, %%eax\n" 7137d36db35SAvi Kivity " btcl $0, %%eax\n" /* clear PE */ 7147d36db35SAvi Kivity " movl %%eax, %%cr0\n" 7157d36db35SAvi Kivity " ljmpl $0, $4f\n" /* jump to real-mode */ 7167d36db35SAvi Kivity "4:\n" 7177d36db35SAvi Kivity " vmmcall\n" 7187d36db35SAvi Kivity " movl %%cr0, %%eax\n" 7197d36db35SAvi Kivity " btsl $0, %%eax\n" /* set PE */ 7207d36db35SAvi Kivity " movl %%eax, %%cr0\n" 721b46094b4SPaolo Bonzini " ljmpl %[cs32], $5f\n" /* back to protected mode */ 7227d36db35SAvi Kivity ".code32\n" 7237d36db35SAvi Kivity "5:\n" 7247d36db35SAvi Kivity " movl %%cr4, %%eax\n" 7257d36db35SAvi Kivity " btsl $5, %%eax\n" /* set PAE */ 7267d36db35SAvi Kivity " movl %%eax, %%cr4\n" 7277d36db35SAvi Kivity " movl $0xc0000080, %%ecx\n" /* EFER */ 7287d36db35SAvi Kivity " rdmsr\n" 7297d36db35SAvi Kivity " btsl $8, %%eax\n" /* set LME */ 7307d36db35SAvi Kivity " wrmsr\n" 7317d36db35SAvi Kivity " movl %%cr0, %%eax\n" 7327d36db35SAvi Kivity " btsl $31, %%eax\n" /* set PG */ 7337d36db35SAvi Kivity " movl %%eax, %%cr0\n" 734b46094b4SPaolo Bonzini " ljmpl %[cs64], $6f\n" /* back to long mode */ 7357d36db35SAvi Kivity ".code64\n\t" 7367d36db35SAvi Kivity "6:\n" 7377d36db35SAvi Kivity " vmmcall\n" 738b46094b4SPaolo Bonzini :: [cs16] "i"(KERNEL_CS16), [ds16] "i"(KERNEL_DS16), 739b46094b4SPaolo Bonzini [cs32] "i"(KERNEL_CS32), [cs64] "i"(KERNEL_CS64) 740b46094b4SPaolo Bonzini : "rax", "rbx", "rcx", "rdx", "memory"); 7417d36db35SAvi Kivity } 7427d36db35SAvi Kivity 7437d36db35SAvi Kivity static bool mode_switch_finished(struct test *test) 7447d36db35SAvi Kivity { 7457d36db35SAvi Kivity u64 cr0, cr4, efer; 7467d36db35SAvi Kivity 7477d36db35SAvi Kivity cr0 = test->vmcb->save.cr0; 7487d36db35SAvi Kivity cr4 = test->vmcb->save.cr4; 7497d36db35SAvi Kivity efer = test->vmcb->save.efer; 7507d36db35SAvi Kivity 7517d36db35SAvi Kivity /* Only expect VMMCALL intercepts */ 7527d36db35SAvi Kivity if (test->vmcb->control.exit_code != SVM_EXIT_VMMCALL) 7537d36db35SAvi Kivity return true; 7547d36db35SAvi Kivity 7557d36db35SAvi Kivity /* Jump over VMMCALL instruction */ 7567d36db35SAvi Kivity test->vmcb->save.rip += 3; 7577d36db35SAvi Kivity 7587d36db35SAvi Kivity /* Do sanity checks */ 7597d36db35SAvi Kivity switch (test->scratch) { 7607d36db35SAvi Kivity case 0: 7617d36db35SAvi Kivity /* Test should be in real mode now - check for this */ 7627d36db35SAvi Kivity if ((cr0 & 0x80000001) || /* CR0.PG, CR0.PE */ 7637d36db35SAvi Kivity (cr4 & 0x00000020) || /* CR4.PAE */ 7647d36db35SAvi Kivity (efer & 0x00000500)) /* EFER.LMA, EFER.LME */ 7657d36db35SAvi Kivity return true; 7667d36db35SAvi Kivity break; 7677d36db35SAvi Kivity case 2: 7687d36db35SAvi Kivity /* Test should be back in long-mode now - check for this */ 7697d36db35SAvi Kivity if (((cr0 & 0x80000001) != 0x80000001) || /* CR0.PG, CR0.PE */ 7707d36db35SAvi Kivity ((cr4 & 0x00000020) != 0x00000020) || /* CR4.PAE */ 7717d36db35SAvi Kivity ((efer & 0x00000500) != 0x00000500)) /* EFER.LMA, EFER.LME */ 7727d36db35SAvi Kivity return true; 7737d36db35SAvi Kivity break; 7747d36db35SAvi Kivity } 7757d36db35SAvi Kivity 7767d36db35SAvi Kivity /* one step forward */ 7777d36db35SAvi Kivity test->scratch += 1; 7787d36db35SAvi Kivity 7797d36db35SAvi Kivity return test->scratch == 2; 7807d36db35SAvi Kivity } 7817d36db35SAvi Kivity 7827d36db35SAvi Kivity static bool check_mode_switch(struct test *test) 7837d36db35SAvi Kivity { 7847d36db35SAvi Kivity return test->scratch == 2; 7857d36db35SAvi Kivity } 7867d36db35SAvi Kivity 787bcd9774aSPaolo Bonzini static void prepare_ioio(struct test *test) 788bcd9774aSPaolo Bonzini { 789bcd9774aSPaolo Bonzini test->vmcb->control.intercept |= (1ULL << INTERCEPT_IOIO_PROT); 790bcd9774aSPaolo Bonzini test->scratch = 0; 791bcd9774aSPaolo Bonzini memset(io_bitmap, 0, 8192); 792bcd9774aSPaolo Bonzini io_bitmap[8192] = 0xFF; 793bcd9774aSPaolo Bonzini } 794bcd9774aSPaolo Bonzini 795db4898e8SThomas Huth static int get_test_stage(struct test *test) 796bcd9774aSPaolo Bonzini { 797bcd9774aSPaolo Bonzini barrier(); 798bcd9774aSPaolo Bonzini return test->scratch; 799bcd9774aSPaolo Bonzini } 800bcd9774aSPaolo Bonzini 801306bb7dbSCathy Avery static void set_test_stage(struct test *test, int s) 802306bb7dbSCathy Avery { 803306bb7dbSCathy Avery barrier(); 804306bb7dbSCathy Avery test->scratch = s; 805306bb7dbSCathy Avery barrier(); 806306bb7dbSCathy Avery } 807306bb7dbSCathy Avery 808db4898e8SThomas Huth static void inc_test_stage(struct test *test) 809bcd9774aSPaolo Bonzini { 810bcd9774aSPaolo Bonzini barrier(); 811bcd9774aSPaolo Bonzini test->scratch++; 812bcd9774aSPaolo Bonzini barrier(); 813bcd9774aSPaolo Bonzini } 814bcd9774aSPaolo Bonzini 815bcd9774aSPaolo Bonzini static void test_ioio(struct test *test) 816bcd9774aSPaolo Bonzini { 817bcd9774aSPaolo Bonzini // stage 0, test IO pass 818bcd9774aSPaolo Bonzini inb(0x5000); 819bcd9774aSPaolo Bonzini outb(0x0, 0x5000); 820bcd9774aSPaolo Bonzini if (get_test_stage(test) != 0) 821bcd9774aSPaolo Bonzini goto fail; 822bcd9774aSPaolo Bonzini 823bcd9774aSPaolo Bonzini // test IO width, in/out 824bcd9774aSPaolo Bonzini io_bitmap[0] = 0xFF; 825bcd9774aSPaolo Bonzini inc_test_stage(test); 826bcd9774aSPaolo Bonzini inb(0x0); 827bcd9774aSPaolo Bonzini if (get_test_stage(test) != 2) 828bcd9774aSPaolo Bonzini goto fail; 829bcd9774aSPaolo Bonzini 830bcd9774aSPaolo Bonzini outw(0x0, 0x0); 831bcd9774aSPaolo Bonzini if (get_test_stage(test) != 3) 832bcd9774aSPaolo Bonzini goto fail; 833bcd9774aSPaolo Bonzini 834bcd9774aSPaolo Bonzini inl(0x0); 835bcd9774aSPaolo Bonzini if (get_test_stage(test) != 4) 836bcd9774aSPaolo Bonzini goto fail; 837bcd9774aSPaolo Bonzini 838bcd9774aSPaolo Bonzini // test low/high IO port 839bcd9774aSPaolo Bonzini io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8)); 840bcd9774aSPaolo Bonzini inb(0x5000); 841bcd9774aSPaolo Bonzini if (get_test_stage(test) != 5) 842bcd9774aSPaolo Bonzini goto fail; 843bcd9774aSPaolo Bonzini 844bcd9774aSPaolo Bonzini io_bitmap[0x9000 / 8] = (1 << (0x9000 % 8)); 845bcd9774aSPaolo Bonzini inw(0x9000); 846bcd9774aSPaolo Bonzini if (get_test_stage(test) != 6) 847bcd9774aSPaolo Bonzini goto fail; 848bcd9774aSPaolo Bonzini 849bcd9774aSPaolo Bonzini // test partial pass 850bcd9774aSPaolo Bonzini io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8)); 851bcd9774aSPaolo Bonzini inl(0x4FFF); 852bcd9774aSPaolo Bonzini if (get_test_stage(test) != 7) 853bcd9774aSPaolo Bonzini goto fail; 854bcd9774aSPaolo Bonzini 855bcd9774aSPaolo Bonzini // test across pages 856bcd9774aSPaolo Bonzini inc_test_stage(test); 857bcd9774aSPaolo Bonzini inl(0x7FFF); 858bcd9774aSPaolo Bonzini if (get_test_stage(test) != 8) 859bcd9774aSPaolo Bonzini goto fail; 860bcd9774aSPaolo Bonzini 861bcd9774aSPaolo Bonzini inc_test_stage(test); 862bcd9774aSPaolo Bonzini io_bitmap[0x8000 / 8] = 1 << (0x8000 % 8); 863bcd9774aSPaolo Bonzini inl(0x7FFF); 864bcd9774aSPaolo Bonzini if (get_test_stage(test) != 10) 865bcd9774aSPaolo Bonzini goto fail; 866bcd9774aSPaolo Bonzini 867bcd9774aSPaolo Bonzini io_bitmap[0] = 0; 868bcd9774aSPaolo Bonzini inl(0xFFFF); 869bcd9774aSPaolo Bonzini if (get_test_stage(test) != 11) 870bcd9774aSPaolo Bonzini goto fail; 871bcd9774aSPaolo Bonzini 872bcd9774aSPaolo Bonzini io_bitmap[0] = 0xFF; 873bcd9774aSPaolo Bonzini io_bitmap[8192] = 0; 874bcd9774aSPaolo Bonzini inl(0xFFFF); 875bcd9774aSPaolo Bonzini inc_test_stage(test); 876bcd9774aSPaolo Bonzini if (get_test_stage(test) != 12) 877bcd9774aSPaolo Bonzini goto fail; 878bcd9774aSPaolo Bonzini 879bcd9774aSPaolo Bonzini return; 880bcd9774aSPaolo Bonzini 881bcd9774aSPaolo Bonzini fail: 882a299895bSThomas Huth report(false, "stage %d", get_test_stage(test)); 883bcd9774aSPaolo Bonzini test->scratch = -1; 884bcd9774aSPaolo Bonzini } 885bcd9774aSPaolo Bonzini 886bcd9774aSPaolo Bonzini static bool ioio_finished(struct test *test) 887bcd9774aSPaolo Bonzini { 888bcd9774aSPaolo Bonzini unsigned port, size; 889bcd9774aSPaolo Bonzini 890bcd9774aSPaolo Bonzini /* Only expect IOIO intercepts */ 891bcd9774aSPaolo Bonzini if (test->vmcb->control.exit_code == SVM_EXIT_VMMCALL) 892bcd9774aSPaolo Bonzini return true; 893bcd9774aSPaolo Bonzini 894bcd9774aSPaolo Bonzini if (test->vmcb->control.exit_code != SVM_EXIT_IOIO) 895bcd9774aSPaolo Bonzini return true; 896bcd9774aSPaolo Bonzini 897bcd9774aSPaolo Bonzini /* one step forward */ 898bcd9774aSPaolo Bonzini test->scratch += 1; 899bcd9774aSPaolo Bonzini 900bcd9774aSPaolo Bonzini port = test->vmcb->control.exit_info_1 >> 16; 901bcd9774aSPaolo Bonzini size = (test->vmcb->control.exit_info_1 >> SVM_IOIO_SIZE_SHIFT) & 7; 902bcd9774aSPaolo Bonzini 903bcd9774aSPaolo Bonzini while (size--) { 904bcd9774aSPaolo Bonzini io_bitmap[port / 8] &= ~(1 << (port & 7)); 905bcd9774aSPaolo Bonzini port++; 906bcd9774aSPaolo Bonzini } 907bcd9774aSPaolo Bonzini 908bcd9774aSPaolo Bonzini return false; 909bcd9774aSPaolo Bonzini } 910bcd9774aSPaolo Bonzini 911bcd9774aSPaolo Bonzini static bool check_ioio(struct test *test) 912bcd9774aSPaolo Bonzini { 913bcd9774aSPaolo Bonzini memset(io_bitmap, 0, 8193); 914bcd9774aSPaolo Bonzini return test->scratch != -1; 915bcd9774aSPaolo Bonzini } 916bcd9774aSPaolo Bonzini 9177d36db35SAvi Kivity static void prepare_asid_zero(struct test *test) 9187d36db35SAvi Kivity { 9197d36db35SAvi Kivity test->vmcb->control.asid = 0; 9207d36db35SAvi Kivity } 9217d36db35SAvi Kivity 9227d36db35SAvi Kivity static void test_asid_zero(struct test *test) 9237d36db35SAvi Kivity { 9247d36db35SAvi Kivity asm volatile ("vmmcall\n\t"); 9257d36db35SAvi Kivity } 9267d36db35SAvi Kivity 9277d36db35SAvi Kivity static bool check_asid_zero(struct test *test) 9287d36db35SAvi Kivity { 9297d36db35SAvi Kivity return test->vmcb->control.exit_code == SVM_EXIT_ERR; 9307d36db35SAvi Kivity } 9317d36db35SAvi Kivity 9324c8eb156SJoerg Roedel static void sel_cr0_bug_prepare(struct test *test) 9334c8eb156SJoerg Roedel { 9344c8eb156SJoerg Roedel vmcb_ident(test->vmcb); 9354c8eb156SJoerg Roedel test->vmcb->control.intercept |= (1ULL << INTERCEPT_SELECTIVE_CR0); 9364c8eb156SJoerg Roedel } 9374c8eb156SJoerg Roedel 9384c8eb156SJoerg Roedel static bool sel_cr0_bug_finished(struct test *test) 9394c8eb156SJoerg Roedel { 9404c8eb156SJoerg Roedel return true; 9414c8eb156SJoerg Roedel } 9424c8eb156SJoerg Roedel 9434c8eb156SJoerg Roedel static void sel_cr0_bug_test(struct test *test) 9444c8eb156SJoerg Roedel { 9454c8eb156SJoerg Roedel unsigned long cr0; 9464c8eb156SJoerg Roedel 9474c8eb156SJoerg Roedel /* read cr0, clear CD, and write back */ 9484c8eb156SJoerg Roedel cr0 = read_cr0(); 9494c8eb156SJoerg Roedel cr0 |= (1UL << 30); 9504c8eb156SJoerg Roedel write_cr0(cr0); 9514c8eb156SJoerg Roedel 9524c8eb156SJoerg Roedel /* 9534c8eb156SJoerg Roedel * If we are here the test failed, not sure what to do now because we 9544c8eb156SJoerg Roedel * are not in guest-mode anymore so we can't trigger an intercept. 9554c8eb156SJoerg Roedel * Trigger a tripple-fault for now. 9564c8eb156SJoerg Roedel */ 957a299895bSThomas Huth report(false, "sel_cr0 test. Can not recover from this - exiting"); 958a43ed2acSAndrew Jones exit(report_summary()); 9594c8eb156SJoerg Roedel } 9604c8eb156SJoerg Roedel 9614c8eb156SJoerg Roedel static bool sel_cr0_bug_check(struct test *test) 9624c8eb156SJoerg Roedel { 9634c8eb156SJoerg Roedel return test->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE; 9644c8eb156SJoerg Roedel } 9654c8eb156SJoerg Roedel 9668594b943SJoerg Roedel static void npt_nx_prepare(struct test *test) 9678594b943SJoerg Roedel { 9688594b943SJoerg Roedel 9698594b943SJoerg Roedel u64 *pte; 9708594b943SJoerg Roedel 9718594b943SJoerg Roedel vmcb_ident(test->vmcb); 972726a1dd7SPaolo Bonzini pte = npt_get_pte((u64)null_test); 9738594b943SJoerg Roedel 9748594b943SJoerg Roedel *pte |= (1ULL << 63); 9758594b943SJoerg Roedel } 9768594b943SJoerg Roedel 9778594b943SJoerg Roedel static bool npt_nx_check(struct test *test) 9788594b943SJoerg Roedel { 979726a1dd7SPaolo Bonzini u64 *pte = npt_get_pte((u64)null_test); 9808594b943SJoerg Roedel 9818594b943SJoerg Roedel *pte &= ~(1ULL << 63); 9828594b943SJoerg Roedel 9838594b943SJoerg Roedel test->vmcb->save.efer |= (1 << 11); 9848594b943SJoerg Roedel 9858594b943SJoerg Roedel return (test->vmcb->control.exit_code == SVM_EXIT_NPF) 986e8b10c1fSPaolo Bonzini && (test->vmcb->control.exit_info_1 == 0x100000015ULL); 9878594b943SJoerg Roedel } 9888594b943SJoerg Roedel 989ea975120SJoerg Roedel static void npt_us_prepare(struct test *test) 990ea975120SJoerg Roedel { 991ea975120SJoerg Roedel u64 *pte; 992ea975120SJoerg Roedel 993ea975120SJoerg Roedel vmcb_ident(test->vmcb); 994726a1dd7SPaolo Bonzini pte = npt_get_pte((u64)scratch_page); 995ea975120SJoerg Roedel 996ea975120SJoerg Roedel *pte &= ~(1ULL << 2); 997ea975120SJoerg Roedel } 998ea975120SJoerg Roedel 999ea975120SJoerg Roedel static void npt_us_test(struct test *test) 1000ea975120SJoerg Roedel { 1001c0a4e715SPaolo Bonzini (void) *(volatile u64 *)scratch_page; 1002ea975120SJoerg Roedel } 1003ea975120SJoerg Roedel 1004ea975120SJoerg Roedel static bool npt_us_check(struct test *test) 1005ea975120SJoerg Roedel { 1006726a1dd7SPaolo Bonzini u64 *pte = npt_get_pte((u64)scratch_page); 1007ea975120SJoerg Roedel 1008ea975120SJoerg Roedel *pte |= (1ULL << 2); 1009ea975120SJoerg Roedel 1010ea975120SJoerg Roedel return (test->vmcb->control.exit_code == SVM_EXIT_NPF) 1011e8b10c1fSPaolo Bonzini && (test->vmcb->control.exit_info_1 == 0x100000005ULL); 1012ea975120SJoerg Roedel } 1013ea975120SJoerg Roedel 1014f6a2ca45SPaolo Bonzini u64 save_pde; 1015f6a2ca45SPaolo Bonzini 1016dd6ef43cSJoerg Roedel static void npt_rsvd_prepare(struct test *test) 1017dd6ef43cSJoerg Roedel { 1018f6a2ca45SPaolo Bonzini u64 *pde; 1019dd6ef43cSJoerg Roedel 1020dd6ef43cSJoerg Roedel vmcb_ident(test->vmcb); 1021f6a2ca45SPaolo Bonzini pde = npt_get_pde((u64) null_test); 1022dd6ef43cSJoerg Roedel 1023f6a2ca45SPaolo Bonzini save_pde = *pde; 1024f6a2ca45SPaolo Bonzini *pde = (1ULL << 19) | (1ULL << 7) | 0x27; 1025dd6ef43cSJoerg Roedel } 1026dd6ef43cSJoerg Roedel 1027dd6ef43cSJoerg Roedel static bool npt_rsvd_check(struct test *test) 1028dd6ef43cSJoerg Roedel { 1029f6a2ca45SPaolo Bonzini u64 *pde = npt_get_pde((u64) null_test); 1030f6a2ca45SPaolo Bonzini 1031f6a2ca45SPaolo Bonzini *pde = save_pde; 1032dd6ef43cSJoerg Roedel 1033dd6ef43cSJoerg Roedel return (test->vmcb->control.exit_code == SVM_EXIT_NPF) 1034f6a2ca45SPaolo Bonzini && (test->vmcb->control.exit_info_1 == 0x10000001dULL); 1035dd6ef43cSJoerg Roedel } 1036dd6ef43cSJoerg Roedel 10375ebf82edSJoerg Roedel static void npt_rw_prepare(struct test *test) 10385ebf82edSJoerg Roedel { 10395ebf82edSJoerg Roedel 10405ebf82edSJoerg Roedel u64 *pte; 10415ebf82edSJoerg Roedel 10425ebf82edSJoerg Roedel vmcb_ident(test->vmcb); 1043726a1dd7SPaolo Bonzini pte = npt_get_pte(0x80000); 10445ebf82edSJoerg Roedel 10455ebf82edSJoerg Roedel *pte &= ~(1ULL << 1); 10465ebf82edSJoerg Roedel } 10475ebf82edSJoerg Roedel 10485ebf82edSJoerg Roedel static void npt_rw_test(struct test *test) 10495ebf82edSJoerg Roedel { 10505ebf82edSJoerg Roedel u64 *data = (void*)(0x80000); 10515ebf82edSJoerg Roedel 10525ebf82edSJoerg Roedel *data = 0; 10535ebf82edSJoerg Roedel } 10545ebf82edSJoerg Roedel 10555ebf82edSJoerg Roedel static bool npt_rw_check(struct test *test) 10565ebf82edSJoerg Roedel { 1057726a1dd7SPaolo Bonzini u64 *pte = npt_get_pte(0x80000); 10585ebf82edSJoerg Roedel 10595ebf82edSJoerg Roedel *pte |= (1ULL << 1); 10605ebf82edSJoerg Roedel 10615ebf82edSJoerg Roedel return (test->vmcb->control.exit_code == SVM_EXIT_NPF) 1062e8b10c1fSPaolo Bonzini && (test->vmcb->control.exit_info_1 == 0x100000007ULL); 10635ebf82edSJoerg Roedel } 10645ebf82edSJoerg Roedel 1065f6a2ca45SPaolo Bonzini static void npt_rw_pfwalk_prepare(struct test *test) 1066590040ffSJoerg Roedel { 1067590040ffSJoerg Roedel 1068590040ffSJoerg Roedel u64 *pte; 1069590040ffSJoerg Roedel 1070590040ffSJoerg Roedel vmcb_ident(test->vmcb); 1071726a1dd7SPaolo Bonzini pte = npt_get_pte(read_cr3()); 1072590040ffSJoerg Roedel 1073590040ffSJoerg Roedel *pte &= ~(1ULL << 1); 1074590040ffSJoerg Roedel } 1075590040ffSJoerg Roedel 1076f6a2ca45SPaolo Bonzini static bool npt_rw_pfwalk_check(struct test *test) 1077590040ffSJoerg Roedel { 1078726a1dd7SPaolo Bonzini u64 *pte = npt_get_pte(read_cr3()); 1079590040ffSJoerg Roedel 1080590040ffSJoerg Roedel *pte |= (1ULL << 1); 1081590040ffSJoerg Roedel 1082590040ffSJoerg Roedel return (test->vmcb->control.exit_code == SVM_EXIT_NPF) 1083e8b10c1fSPaolo Bonzini && (test->vmcb->control.exit_info_1 == 0x200000006ULL) 1084590040ffSJoerg Roedel && (test->vmcb->control.exit_info_2 == read_cr3()); 1085590040ffSJoerg Roedel } 1086590040ffSJoerg Roedel 1087f6a2ca45SPaolo Bonzini static void npt_rsvd_pfwalk_prepare(struct test *test) 1088f6a2ca45SPaolo Bonzini { 1089f6a2ca45SPaolo Bonzini 1090f6a2ca45SPaolo Bonzini vmcb_ident(test->vmcb); 1091f6a2ca45SPaolo Bonzini 1092f6a2ca45SPaolo Bonzini pdpe[0] |= (1ULL << 8); 1093f6a2ca45SPaolo Bonzini } 1094f6a2ca45SPaolo Bonzini 1095f6a2ca45SPaolo Bonzini static bool npt_rsvd_pfwalk_check(struct test *test) 1096f6a2ca45SPaolo Bonzini { 1097f6a2ca45SPaolo Bonzini pdpe[0] &= ~(1ULL << 8); 1098f6a2ca45SPaolo Bonzini 1099f6a2ca45SPaolo Bonzini return (test->vmcb->control.exit_code == SVM_EXIT_NPF) 11003fc91a19SCathy Avery && (test->vmcb->control.exit_info_1 == 0x20000000eULL); 1101f6a2ca45SPaolo Bonzini } 1102f6a2ca45SPaolo Bonzini 1103a2ab7740SPaolo Bonzini static void npt_l1mmio_prepare(struct test *test) 1104a2ab7740SPaolo Bonzini { 1105a2ab7740SPaolo Bonzini vmcb_ident(test->vmcb); 1106a2ab7740SPaolo Bonzini } 1107a2ab7740SPaolo Bonzini 11081e699ecbSPaolo Bonzini u32 nested_apic_version1; 11091e699ecbSPaolo Bonzini u32 nested_apic_version2; 1110a2ab7740SPaolo Bonzini 1111a2ab7740SPaolo Bonzini static void npt_l1mmio_test(struct test *test) 1112a2ab7740SPaolo Bonzini { 11131e699ecbSPaolo Bonzini volatile u32 *data = (volatile void*)(0xfee00030UL); 1114a2ab7740SPaolo Bonzini 11151e699ecbSPaolo Bonzini nested_apic_version1 = *data; 11161e699ecbSPaolo Bonzini nested_apic_version2 = *data; 1117a2ab7740SPaolo Bonzini } 1118a2ab7740SPaolo Bonzini 1119a2ab7740SPaolo Bonzini static bool npt_l1mmio_check(struct test *test) 1120a2ab7740SPaolo Bonzini { 11211e699ecbSPaolo Bonzini volatile u32 *data = (volatile void*)(0xfee00030); 11221e699ecbSPaolo Bonzini u32 lvr = *data; 1123a2ab7740SPaolo Bonzini 11241e699ecbSPaolo Bonzini return nested_apic_version1 == lvr && nested_apic_version2 == lvr; 1125a2ab7740SPaolo Bonzini } 1126a2ab7740SPaolo Bonzini 112769dd444aSPaolo Bonzini static void npt_rw_l1mmio_prepare(struct test *test) 112869dd444aSPaolo Bonzini { 112969dd444aSPaolo Bonzini 113069dd444aSPaolo Bonzini u64 *pte; 113169dd444aSPaolo Bonzini 113269dd444aSPaolo Bonzini vmcb_ident(test->vmcb); 113369dd444aSPaolo Bonzini pte = npt_get_pte(0xfee00080); 113469dd444aSPaolo Bonzini 113569dd444aSPaolo Bonzini *pte &= ~(1ULL << 1); 113669dd444aSPaolo Bonzini } 113769dd444aSPaolo Bonzini 113869dd444aSPaolo Bonzini static void npt_rw_l1mmio_test(struct test *test) 113969dd444aSPaolo Bonzini { 114069dd444aSPaolo Bonzini volatile u32 *data = (volatile void*)(0xfee00080); 114169dd444aSPaolo Bonzini 114269dd444aSPaolo Bonzini *data = *data; 114369dd444aSPaolo Bonzini } 114469dd444aSPaolo Bonzini 114569dd444aSPaolo Bonzini static bool npt_rw_l1mmio_check(struct test *test) 114669dd444aSPaolo Bonzini { 114769dd444aSPaolo Bonzini u64 *pte = npt_get_pte(0xfee00080); 114869dd444aSPaolo Bonzini 114969dd444aSPaolo Bonzini *pte |= (1ULL << 1); 115069dd444aSPaolo Bonzini 115169dd444aSPaolo Bonzini return (test->vmcb->control.exit_code == SVM_EXIT_NPF) 115269dd444aSPaolo Bonzini && (test->vmcb->control.exit_info_1 == 0x100000007ULL); 115369dd444aSPaolo Bonzini } 115469dd444aSPaolo Bonzini 115536a7018aSPaolo Bonzini #define TSC_ADJUST_VALUE (1ll << 32) 115636a7018aSPaolo Bonzini #define TSC_OFFSET_VALUE (-1ll << 48) 115736a7018aSPaolo Bonzini static bool ok; 115836a7018aSPaolo Bonzini 115936a7018aSPaolo Bonzini static void tsc_adjust_prepare(struct test *test) 116036a7018aSPaolo Bonzini { 116136a7018aSPaolo Bonzini default_prepare(test); 116236a7018aSPaolo Bonzini test->vmcb->control.tsc_offset = TSC_OFFSET_VALUE; 116336a7018aSPaolo Bonzini 116436a7018aSPaolo Bonzini wrmsr(MSR_IA32_TSC_ADJUST, -TSC_ADJUST_VALUE); 116536a7018aSPaolo Bonzini int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST); 116636a7018aSPaolo Bonzini ok = adjust == -TSC_ADJUST_VALUE; 116736a7018aSPaolo Bonzini } 116836a7018aSPaolo Bonzini 116936a7018aSPaolo Bonzini static void tsc_adjust_test(struct test *test) 117036a7018aSPaolo Bonzini { 117136a7018aSPaolo Bonzini int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST); 117236a7018aSPaolo Bonzini ok &= adjust == -TSC_ADJUST_VALUE; 117336a7018aSPaolo Bonzini 117436a7018aSPaolo Bonzini uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE; 117536a7018aSPaolo Bonzini wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE); 117636a7018aSPaolo Bonzini 117736a7018aSPaolo Bonzini adjust = rdmsr(MSR_IA32_TSC_ADJUST); 117836a7018aSPaolo Bonzini ok &= adjust <= -2 * TSC_ADJUST_VALUE; 117936a7018aSPaolo Bonzini 118036a7018aSPaolo Bonzini uint64_t l1_tsc_end = rdtsc() - TSC_OFFSET_VALUE; 118136a7018aSPaolo Bonzini ok &= (l1_tsc_end + TSC_ADJUST_VALUE - l1_tsc) < TSC_ADJUST_VALUE; 118236a7018aSPaolo Bonzini 118336a7018aSPaolo Bonzini uint64_t l1_tsc_msr = rdmsr(MSR_IA32_TSC) - TSC_OFFSET_VALUE; 118436a7018aSPaolo Bonzini ok &= (l1_tsc_msr + TSC_ADJUST_VALUE - l1_tsc) < TSC_ADJUST_VALUE; 118536a7018aSPaolo Bonzini } 118636a7018aSPaolo Bonzini 118736a7018aSPaolo Bonzini static bool tsc_adjust_check(struct test *test) 118836a7018aSPaolo Bonzini { 118936a7018aSPaolo Bonzini int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST); 119036a7018aSPaolo Bonzini 119136a7018aSPaolo Bonzini wrmsr(MSR_IA32_TSC_ADJUST, 0); 119236a7018aSPaolo Bonzini return ok && adjust <= -2 * TSC_ADJUST_VALUE; 119336a7018aSPaolo Bonzini } 119436a7018aSPaolo Bonzini 119521c23154SJoerg Roedel static void latency_prepare(struct test *test) 119621c23154SJoerg Roedel { 119721c23154SJoerg Roedel default_prepare(test); 119821c23154SJoerg Roedel runs = LATENCY_RUNS; 119921c23154SJoerg Roedel latvmrun_min = latvmexit_min = -1ULL; 120021c23154SJoerg Roedel latvmrun_max = latvmexit_max = 0; 120121c23154SJoerg Roedel vmrun_sum = vmexit_sum = 0; 120221c23154SJoerg Roedel } 120321c23154SJoerg Roedel 120421c23154SJoerg Roedel static void latency_test(struct test *test) 120521c23154SJoerg Roedel { 120621c23154SJoerg Roedel u64 cycles; 120721c23154SJoerg Roedel 120821c23154SJoerg Roedel start: 120921c23154SJoerg Roedel tsc_end = rdtsc(); 121021c23154SJoerg Roedel 121121c23154SJoerg Roedel cycles = tsc_end - tsc_start; 121221c23154SJoerg Roedel 121321c23154SJoerg Roedel if (cycles > latvmrun_max) 121421c23154SJoerg Roedel latvmrun_max = cycles; 121521c23154SJoerg Roedel 121621c23154SJoerg Roedel if (cycles < latvmrun_min) 121721c23154SJoerg Roedel latvmrun_min = cycles; 121821c23154SJoerg Roedel 121921c23154SJoerg Roedel vmrun_sum += cycles; 122021c23154SJoerg Roedel 122121c23154SJoerg Roedel tsc_start = rdtsc(); 122221c23154SJoerg Roedel 122321c23154SJoerg Roedel asm volatile ("vmmcall" : : : "memory"); 122421c23154SJoerg Roedel goto start; 122521c23154SJoerg Roedel } 122621c23154SJoerg Roedel 122721c23154SJoerg Roedel static bool latency_finished(struct test *test) 122821c23154SJoerg Roedel { 122921c23154SJoerg Roedel u64 cycles; 123021c23154SJoerg Roedel 123121c23154SJoerg Roedel tsc_end = rdtsc(); 123221c23154SJoerg Roedel 123321c23154SJoerg Roedel cycles = tsc_end - tsc_start; 123421c23154SJoerg Roedel 123521c23154SJoerg Roedel if (cycles > latvmexit_max) 123621c23154SJoerg Roedel latvmexit_max = cycles; 123721c23154SJoerg Roedel 123821c23154SJoerg Roedel if (cycles < latvmexit_min) 123921c23154SJoerg Roedel latvmexit_min = cycles; 124021c23154SJoerg Roedel 124121c23154SJoerg Roedel vmexit_sum += cycles; 124221c23154SJoerg Roedel 124321c23154SJoerg Roedel test->vmcb->save.rip += 3; 124421c23154SJoerg Roedel 124521c23154SJoerg Roedel runs -= 1; 124621c23154SJoerg Roedel 124721c23154SJoerg Roedel return runs == 0; 124821c23154SJoerg Roedel } 124921c23154SJoerg Roedel 125021c23154SJoerg Roedel static bool latency_check(struct test *test) 125121c23154SJoerg Roedel { 1252b006d7ebSAndrew Jones printf(" Latency VMRUN : max: %ld min: %ld avg: %ld\n", latvmrun_max, 125321c23154SJoerg Roedel latvmrun_min, vmrun_sum / LATENCY_RUNS); 1254b006d7ebSAndrew Jones printf(" Latency VMEXIT: max: %ld min: %ld avg: %ld\n", latvmexit_max, 125521c23154SJoerg Roedel latvmexit_min, vmexit_sum / LATENCY_RUNS); 125621c23154SJoerg Roedel return true; 125721c23154SJoerg Roedel } 125821c23154SJoerg Roedel 1259ef101219SRoedel, Joerg static void lat_svm_insn_prepare(struct test *test) 1260ef101219SRoedel, Joerg { 1261ef101219SRoedel, Joerg default_prepare(test); 1262ef101219SRoedel, Joerg runs = LATENCY_RUNS; 1263ef101219SRoedel, Joerg latvmload_min = latvmsave_min = latstgi_min = latclgi_min = -1ULL; 1264ef101219SRoedel, Joerg latvmload_max = latvmsave_max = latstgi_max = latclgi_max = 0; 1265ef101219SRoedel, Joerg vmload_sum = vmsave_sum = stgi_sum = clgi_sum; 1266ef101219SRoedel, Joerg } 1267ef101219SRoedel, Joerg 1268ef101219SRoedel, Joerg static bool lat_svm_insn_finished(struct test *test) 1269ef101219SRoedel, Joerg { 1270ef101219SRoedel, Joerg u64 vmcb_phys = virt_to_phys(test->vmcb); 1271ef101219SRoedel, Joerg u64 cycles; 1272ef101219SRoedel, Joerg 1273ef101219SRoedel, Joerg for ( ; runs != 0; runs--) { 1274ef101219SRoedel, Joerg tsc_start = rdtsc(); 12752c6589bcSPeter Shier asm volatile("vmload %0\n\t" : : "a"(vmcb_phys) : "memory"); 1276ef101219SRoedel, Joerg cycles = rdtsc() - tsc_start; 1277ef101219SRoedel, Joerg if (cycles > latvmload_max) 1278ef101219SRoedel, Joerg latvmload_max = cycles; 1279ef101219SRoedel, Joerg if (cycles < latvmload_min) 1280ef101219SRoedel, Joerg latvmload_min = cycles; 1281ef101219SRoedel, Joerg vmload_sum += cycles; 1282ef101219SRoedel, Joerg 1283ef101219SRoedel, Joerg tsc_start = rdtsc(); 12842c6589bcSPeter Shier asm volatile("vmsave %0\n\t" : : "a"(vmcb_phys) : "memory"); 1285ef101219SRoedel, Joerg cycles = rdtsc() - tsc_start; 1286ef101219SRoedel, Joerg if (cycles > latvmsave_max) 1287ef101219SRoedel, Joerg latvmsave_max = cycles; 1288ef101219SRoedel, Joerg if (cycles < latvmsave_min) 1289ef101219SRoedel, Joerg latvmsave_min = cycles; 1290ef101219SRoedel, Joerg vmsave_sum += cycles; 1291ef101219SRoedel, Joerg 1292ef101219SRoedel, Joerg tsc_start = rdtsc(); 1293ef101219SRoedel, Joerg asm volatile("stgi\n\t"); 1294ef101219SRoedel, Joerg cycles = rdtsc() - tsc_start; 1295ef101219SRoedel, Joerg if (cycles > latstgi_max) 1296ef101219SRoedel, Joerg latstgi_max = cycles; 1297ef101219SRoedel, Joerg if (cycles < latstgi_min) 1298ef101219SRoedel, Joerg latstgi_min = cycles; 1299ef101219SRoedel, Joerg stgi_sum += cycles; 1300ef101219SRoedel, Joerg 1301ef101219SRoedel, Joerg tsc_start = rdtsc(); 1302ef101219SRoedel, Joerg asm volatile("clgi\n\t"); 1303ef101219SRoedel, Joerg cycles = rdtsc() - tsc_start; 1304ef101219SRoedel, Joerg if (cycles > latclgi_max) 1305ef101219SRoedel, Joerg latclgi_max = cycles; 1306ef101219SRoedel, Joerg if (cycles < latclgi_min) 1307ef101219SRoedel, Joerg latclgi_min = cycles; 1308ef101219SRoedel, Joerg clgi_sum += cycles; 1309ef101219SRoedel, Joerg } 1310ef101219SRoedel, Joerg 1311ef101219SRoedel, Joerg return true; 1312ef101219SRoedel, Joerg } 1313ef101219SRoedel, Joerg 1314ef101219SRoedel, Joerg static bool lat_svm_insn_check(struct test *test) 1315ef101219SRoedel, Joerg { 1316b006d7ebSAndrew Jones printf(" Latency VMLOAD: max: %ld min: %ld avg: %ld\n", latvmload_max, 1317ef101219SRoedel, Joerg latvmload_min, vmload_sum / LATENCY_RUNS); 1318b006d7ebSAndrew Jones printf(" Latency VMSAVE: max: %ld min: %ld avg: %ld\n", latvmsave_max, 1319ef101219SRoedel, Joerg latvmsave_min, vmsave_sum / LATENCY_RUNS); 1320b006d7ebSAndrew Jones printf(" Latency STGI: max: %ld min: %ld avg: %ld\n", latstgi_max, 1321ef101219SRoedel, Joerg latstgi_min, stgi_sum / LATENCY_RUNS); 1322b006d7ebSAndrew Jones printf(" Latency CLGI: max: %ld min: %ld avg: %ld\n", latclgi_max, 1323ef101219SRoedel, Joerg latclgi_min, clgi_sum / LATENCY_RUNS); 1324ef101219SRoedel, Joerg return true; 1325ef101219SRoedel, Joerg } 1326306bb7dbSCathy Avery 1327306bb7dbSCathy Avery bool pending_event_ipi_fired; 1328306bb7dbSCathy Avery bool pending_event_guest_run; 1329306bb7dbSCathy Avery 1330306bb7dbSCathy Avery static void pending_event_ipi_isr(isr_regs_t *regs) 1331306bb7dbSCathy Avery { 1332306bb7dbSCathy Avery pending_event_ipi_fired = true; 1333306bb7dbSCathy Avery eoi(); 1334306bb7dbSCathy Avery } 1335306bb7dbSCathy Avery 1336306bb7dbSCathy Avery static void pending_event_prepare(struct test *test) 1337306bb7dbSCathy Avery { 1338306bb7dbSCathy Avery int ipi_vector = 0xf1; 1339306bb7dbSCathy Avery 1340306bb7dbSCathy Avery default_prepare(test); 1341306bb7dbSCathy Avery 1342306bb7dbSCathy Avery pending_event_ipi_fired = false; 1343306bb7dbSCathy Avery 1344306bb7dbSCathy Avery handle_irq(ipi_vector, pending_event_ipi_isr); 1345306bb7dbSCathy Avery 1346306bb7dbSCathy Avery pending_event_guest_run = false; 1347306bb7dbSCathy Avery 1348306bb7dbSCathy Avery test->vmcb->control.intercept |= (1ULL << INTERCEPT_INTR); 1349306bb7dbSCathy Avery test->vmcb->control.int_ctl |= V_INTR_MASKING_MASK; 1350306bb7dbSCathy Avery 1351306bb7dbSCathy Avery apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | 1352306bb7dbSCathy Avery APIC_DM_FIXED | ipi_vector, 0); 1353306bb7dbSCathy Avery 1354306bb7dbSCathy Avery set_test_stage(test, 0); 1355306bb7dbSCathy Avery } 1356306bb7dbSCathy Avery 1357306bb7dbSCathy Avery static void pending_event_test(struct test *test) 1358306bb7dbSCathy Avery { 1359306bb7dbSCathy Avery pending_event_guest_run = true; 1360306bb7dbSCathy Avery } 1361306bb7dbSCathy Avery 1362306bb7dbSCathy Avery static bool pending_event_finished(struct test *test) 1363306bb7dbSCathy Avery { 1364306bb7dbSCathy Avery switch (get_test_stage(test)) { 1365306bb7dbSCathy Avery case 0: 1366306bb7dbSCathy Avery if (test->vmcb->control.exit_code != SVM_EXIT_INTR) { 1367a299895bSThomas Huth report(false, "VMEXIT not due to pending interrupt. Exit reason 0x%x", 1368a299895bSThomas Huth test->vmcb->control.exit_code); 1369306bb7dbSCathy Avery return true; 1370306bb7dbSCathy Avery } 1371306bb7dbSCathy Avery 1372306bb7dbSCathy Avery test->vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR); 1373306bb7dbSCathy Avery test->vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; 1374306bb7dbSCathy Avery 1375306bb7dbSCathy Avery if (pending_event_guest_run) { 1376a299895bSThomas Huth report(false, "Guest ran before host received IPI\n"); 1377306bb7dbSCathy Avery return true; 1378306bb7dbSCathy Avery } 1379306bb7dbSCathy Avery 1380306bb7dbSCathy Avery irq_enable(); 1381306bb7dbSCathy Avery asm volatile ("nop"); 1382306bb7dbSCathy Avery irq_disable(); 1383306bb7dbSCathy Avery 1384306bb7dbSCathy Avery if (!pending_event_ipi_fired) { 1385a299895bSThomas Huth report(false, "Pending interrupt not dispatched after IRQ enabled\n"); 1386306bb7dbSCathy Avery return true; 1387306bb7dbSCathy Avery } 1388306bb7dbSCathy Avery break; 1389306bb7dbSCathy Avery 1390306bb7dbSCathy Avery case 1: 1391306bb7dbSCathy Avery if (!pending_event_guest_run) { 1392a299895bSThomas Huth report(false, "Guest did not resume when no interrupt\n"); 1393306bb7dbSCathy Avery return true; 1394306bb7dbSCathy Avery } 1395306bb7dbSCathy Avery break; 1396306bb7dbSCathy Avery } 1397306bb7dbSCathy Avery 1398306bb7dbSCathy Avery inc_test_stage(test); 1399306bb7dbSCathy Avery 1400306bb7dbSCathy Avery return get_test_stage(test) == 2; 1401306bb7dbSCathy Avery } 1402306bb7dbSCathy Avery 1403306bb7dbSCathy Avery static bool pending_event_check(struct test *test) 1404306bb7dbSCathy Avery { 1405306bb7dbSCathy Avery return get_test_stage(test) == 2; 1406306bb7dbSCathy Avery } 1407306bb7dbSCathy Avery 14082e7dd780SCathy Avery static void pending_event_prepare_vmask(struct test *test) 14092e7dd780SCathy Avery { 14102e7dd780SCathy Avery default_prepare(test); 14112e7dd780SCathy Avery 14122e7dd780SCathy Avery pending_event_ipi_fired = false; 14132e7dd780SCathy Avery 14142e7dd780SCathy Avery set_host_if = 0; 14152e7dd780SCathy Avery 14162e7dd780SCathy Avery handle_irq(0xf1, pending_event_ipi_isr); 14172e7dd780SCathy Avery 14182e7dd780SCathy Avery apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | 14192e7dd780SCathy Avery APIC_DM_FIXED | 0xf1, 0); 14202e7dd780SCathy Avery 14212e7dd780SCathy Avery set_test_stage(test, 0); 14222e7dd780SCathy Avery } 14232e7dd780SCathy Avery 14242e7dd780SCathy Avery static void pending_event_test_vmask(struct test *test) 14252e7dd780SCathy Avery { 14262e7dd780SCathy Avery if (pending_event_ipi_fired == true) { 14272e7dd780SCathy Avery set_test_stage(test, -1); 14282e7dd780SCathy Avery report(false, "Interrupt preceeded guest"); 14292e7dd780SCathy Avery vmmcall(); 14302e7dd780SCathy Avery } 14312e7dd780SCathy Avery 14322e7dd780SCathy Avery irq_enable(); 14332e7dd780SCathy Avery asm volatile ("nop"); 14342e7dd780SCathy Avery irq_disable(); 14352e7dd780SCathy Avery 14362e7dd780SCathy Avery if (pending_event_ipi_fired != true) { 14372e7dd780SCathy Avery set_test_stage(test, -1); 14382e7dd780SCathy Avery report(false, "Interrupt not triggered by guest"); 14392e7dd780SCathy Avery } 14402e7dd780SCathy Avery 14412e7dd780SCathy Avery vmmcall(); 14422e7dd780SCathy Avery 14432e7dd780SCathy Avery irq_enable(); 14442e7dd780SCathy Avery asm volatile ("nop"); 14452e7dd780SCathy Avery irq_disable(); 14462e7dd780SCathy Avery } 14472e7dd780SCathy Avery 14482e7dd780SCathy Avery static bool pending_event_finished_vmask(struct test *test) 14492e7dd780SCathy Avery { 14502e7dd780SCathy Avery if ( test->vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 14512e7dd780SCathy Avery report(false, "VM_EXIT return to host is not EXIT_VMMCALL exit reason 0x%x", 14522e7dd780SCathy Avery test->vmcb->control.exit_code); 14532e7dd780SCathy Avery return true; 14542e7dd780SCathy Avery } 14552e7dd780SCathy Avery 14562e7dd780SCathy Avery switch (get_test_stage(test)) { 14572e7dd780SCathy Avery case 0: 14582e7dd780SCathy Avery test->vmcb->save.rip += 3; 14592e7dd780SCathy Avery 14602e7dd780SCathy Avery pending_event_ipi_fired = false; 14612e7dd780SCathy Avery 14622e7dd780SCathy Avery test->vmcb->control.int_ctl |= V_INTR_MASKING_MASK; 14632e7dd780SCathy Avery 14642e7dd780SCathy Avery apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | 14652e7dd780SCathy Avery APIC_DM_FIXED | 0xf1, 0); 14662e7dd780SCathy Avery 14672e7dd780SCathy Avery break; 14682e7dd780SCathy Avery 14692e7dd780SCathy Avery case 1: 14702e7dd780SCathy Avery if (pending_event_ipi_fired == true) { 14712e7dd780SCathy Avery report(false, "Interrupt triggered by guest"); 14722e7dd780SCathy Avery return true; 14732e7dd780SCathy Avery } 14742e7dd780SCathy Avery 14752e7dd780SCathy Avery irq_enable(); 14762e7dd780SCathy Avery asm volatile ("nop"); 14772e7dd780SCathy Avery irq_disable(); 14782e7dd780SCathy Avery 14792e7dd780SCathy Avery if (pending_event_ipi_fired != true) { 14802e7dd780SCathy Avery report(false, "Interrupt not triggered by host"); 14812e7dd780SCathy Avery return true; 14822e7dd780SCathy Avery } 14832e7dd780SCathy Avery 14842e7dd780SCathy Avery break; 14852e7dd780SCathy Avery 14862e7dd780SCathy Avery default: 14872e7dd780SCathy Avery return true; 14882e7dd780SCathy Avery } 14892e7dd780SCathy Avery 14902e7dd780SCathy Avery inc_test_stage(test); 14912e7dd780SCathy Avery 14922e7dd780SCathy Avery return get_test_stage(test) == 2; 14932e7dd780SCathy Avery } 14942e7dd780SCathy Avery 14952e7dd780SCathy Avery static bool pending_event_check_vmask(struct test *test) 14962e7dd780SCathy Avery { 14972e7dd780SCathy Avery return get_test_stage(test) == 2; 14982e7dd780SCathy Avery } 14992e7dd780SCathy Avery 15007d36db35SAvi Kivity static struct test tests[] = { 1501*e7bce343SPaolo Bonzini { "null", default_supported, default_prepare, 1502*e7bce343SPaolo Bonzini default_prepare_gif_clear, null_test, 15037d36db35SAvi Kivity default_finished, null_check }, 1504*e7bce343SPaolo Bonzini { "vmrun", default_supported, default_prepare, 1505*e7bce343SPaolo Bonzini default_prepare_gif_clear, test_vmrun, 15067d36db35SAvi Kivity default_finished, check_vmrun }, 1507*e7bce343SPaolo Bonzini { "ioio", default_supported, prepare_ioio, 1508*e7bce343SPaolo Bonzini default_prepare_gif_clear, test_ioio, 1509bcd9774aSPaolo Bonzini ioio_finished, check_ioio }, 15107d36db35SAvi Kivity { "vmrun intercept check", default_supported, prepare_no_vmrun_int, 1511*e7bce343SPaolo Bonzini default_prepare_gif_clear, null_test, default_finished, 1512*e7bce343SPaolo Bonzini check_no_vmrun_int }, 1513*e7bce343SPaolo Bonzini { "cr3 read intercept", default_supported, 1514*e7bce343SPaolo Bonzini prepare_cr3_intercept, default_prepare_gif_clear, 15157d36db35SAvi Kivity test_cr3_intercept, default_finished, check_cr3_intercept }, 15167d36db35SAvi Kivity { "cr3 read nointercept", default_supported, default_prepare, 1517*e7bce343SPaolo Bonzini default_prepare_gif_clear, test_cr3_intercept, default_finished, 1518*e7bce343SPaolo Bonzini check_cr3_nointercept }, 1519095274b4SPrasad Joshi { "cr3 read intercept emulate", smp_supported, 1520*e7bce343SPaolo Bonzini prepare_cr3_intercept_bypass, default_prepare_gif_clear, 1521*e7bce343SPaolo Bonzini test_cr3_intercept_bypass, default_finished, check_cr3_intercept }, 15228c6286f1STambe, William { "dr intercept check", default_supported, prepare_dr_intercept, 1523*e7bce343SPaolo Bonzini default_prepare_gif_clear, test_dr_intercept, dr_intercept_finished, 1524*e7bce343SPaolo Bonzini check_dr_intercept }, 1525*e7bce343SPaolo Bonzini { "next_rip", next_rip_supported, prepare_next_rip, 1526*e7bce343SPaolo Bonzini default_prepare_gif_clear, test_next_rip, 15277d36db35SAvi Kivity default_finished, check_next_rip }, 152806a8c023STambe, William { "msr intercept check", default_supported, prepare_msr_intercept, 1529*e7bce343SPaolo Bonzini default_prepare_gif_clear, test_msr_intercept, 1530*e7bce343SPaolo Bonzini msr_intercept_finished, check_msr_intercept }, 1531*e7bce343SPaolo Bonzini { "mode_switch", default_supported, prepare_mode_switch, 1532*e7bce343SPaolo Bonzini default_prepare_gif_clear, test_mode_switch, 15337d36db35SAvi Kivity mode_switch_finished, check_mode_switch }, 1534*e7bce343SPaolo Bonzini { "asid_zero", default_supported, prepare_asid_zero, 1535*e7bce343SPaolo Bonzini default_prepare_gif_clear, test_asid_zero, 15367d36db35SAvi Kivity default_finished, check_asid_zero }, 1537*e7bce343SPaolo Bonzini { "sel_cr0_bug", default_supported, sel_cr0_bug_prepare, 1538*e7bce343SPaolo Bonzini default_prepare_gif_clear, sel_cr0_bug_test, 15394c8eb156SJoerg Roedel sel_cr0_bug_finished, sel_cr0_bug_check }, 1540*e7bce343SPaolo Bonzini { "npt_nx", npt_supported, npt_nx_prepare, 1541*e7bce343SPaolo Bonzini default_prepare_gif_clear, null_test, 1542ea975120SJoerg Roedel default_finished, npt_nx_check }, 1543*e7bce343SPaolo Bonzini { "npt_us", npt_supported, npt_us_prepare, 1544*e7bce343SPaolo Bonzini default_prepare_gif_clear, npt_us_test, 1545ea975120SJoerg Roedel default_finished, npt_us_check }, 1546*e7bce343SPaolo Bonzini { "npt_rsvd", npt_supported, npt_rsvd_prepare, 1547*e7bce343SPaolo Bonzini default_prepare_gif_clear, null_test, 1548dd6ef43cSJoerg Roedel default_finished, npt_rsvd_check }, 1549*e7bce343SPaolo Bonzini { "npt_rw", npt_supported, npt_rw_prepare, 1550*e7bce343SPaolo Bonzini default_prepare_gif_clear, npt_rw_test, 15515ebf82edSJoerg Roedel default_finished, npt_rw_check }, 1552*e7bce343SPaolo Bonzini { "npt_rsvd_pfwalk", npt_supported, npt_rsvd_pfwalk_prepare, 1553*e7bce343SPaolo Bonzini default_prepare_gif_clear, null_test, 1554f6a2ca45SPaolo Bonzini default_finished, npt_rsvd_pfwalk_check }, 1555*e7bce343SPaolo Bonzini { "npt_rw_pfwalk", npt_supported, npt_rw_pfwalk_prepare, 1556*e7bce343SPaolo Bonzini default_prepare_gif_clear, null_test, 1557f6a2ca45SPaolo Bonzini default_finished, npt_rw_pfwalk_check }, 1558*e7bce343SPaolo Bonzini { "npt_l1mmio", npt_supported, npt_l1mmio_prepare, 1559*e7bce343SPaolo Bonzini default_prepare_gif_clear, npt_l1mmio_test, 1560a2ab7740SPaolo Bonzini default_finished, npt_l1mmio_check }, 1561*e7bce343SPaolo Bonzini { "npt_rw_l1mmio", npt_supported, npt_rw_l1mmio_prepare, 1562*e7bce343SPaolo Bonzini default_prepare_gif_clear, npt_rw_l1mmio_test, 156369dd444aSPaolo Bonzini default_finished, npt_rw_l1mmio_check }, 1564*e7bce343SPaolo Bonzini { "tsc_adjust", default_supported, tsc_adjust_prepare, 1565*e7bce343SPaolo Bonzini default_prepare_gif_clear, tsc_adjust_test, 156636a7018aSPaolo Bonzini default_finished, tsc_adjust_check }, 1567*e7bce343SPaolo Bonzini { "latency_run_exit", default_supported, latency_prepare, 1568*e7bce343SPaolo Bonzini default_prepare_gif_clear, latency_test, 156921c23154SJoerg Roedel latency_finished, latency_check }, 1570*e7bce343SPaolo Bonzini { "latency_svm_insn", default_supported, lat_svm_insn_prepare, 1571*e7bce343SPaolo Bonzini default_prepare_gif_clear, null_test, 1572ef101219SRoedel, Joerg lat_svm_insn_finished, lat_svm_insn_check }, 1573306bb7dbSCathy Avery { "pending_event", default_supported, pending_event_prepare, 1574*e7bce343SPaolo Bonzini default_prepare_gif_clear, 1575306bb7dbSCathy Avery pending_event_test, pending_event_finished, pending_event_check }, 15762e7dd780SCathy Avery { "pending_event_vmask", default_supported, pending_event_prepare_vmask, 1577*e7bce343SPaolo Bonzini default_prepare_gif_clear, 15782e7dd780SCathy Avery pending_event_test_vmask, pending_event_finished_vmask, 15792e7dd780SCathy Avery pending_event_check_vmask }, 15807d36db35SAvi Kivity }; 15817d36db35SAvi Kivity 15827d36db35SAvi Kivity int main(int ac, char **av) 15837d36db35SAvi Kivity { 1584a43ed2acSAndrew Jones int i, nr; 15857d36db35SAvi Kivity struct vmcb *vmcb; 15867d36db35SAvi Kivity 15877d36db35SAvi Kivity setup_vm(); 15887d36db35SAvi Kivity smp_init(); 15897d36db35SAvi Kivity 1590badc98caSKrish Sadhukhan if (!this_cpu_has(X86_FEATURE_SVM)) { 15917d36db35SAvi Kivity printf("SVM not availble\n"); 159232b9603cSRadim Krčmář return report_summary(); 15937d36db35SAvi Kivity } 15947d36db35SAvi Kivity 15957d36db35SAvi Kivity setup_svm(); 15967d36db35SAvi Kivity 15977d36db35SAvi Kivity vmcb = alloc_page(); 15987d36db35SAvi Kivity 15997d36db35SAvi Kivity nr = ARRAY_SIZE(tests); 16007d36db35SAvi Kivity for (i = 0; i < nr; ++i) { 16017d36db35SAvi Kivity if (!tests[i].supported()) 16027d36db35SAvi Kivity continue; 1603a43ed2acSAndrew Jones test_run(&tests[i], vmcb); 16047d36db35SAvi Kivity } 16057d36db35SAvi Kivity 1606a43ed2acSAndrew Jones return report_summary(); 16077d36db35SAvi Kivity } 1608