1 /* 2 * Framework for testing nested virtualization 3 */ 4 5 #include "svm.h" 6 #include "libcflat.h" 7 #include "processor.h" 8 #include "desc.h" 9 #include "msr.h" 10 #include "vm.h" 11 #include "smp.h" 12 #include "types.h" 13 #include "alloc_page.h" 14 #include "isr.h" 15 #include "apic.h" 16 17 /* for the nested page table*/ 18 u64 *pte[2048]; 19 u64 *pde[4]; 20 u64 *pdpe; 21 u64 *pml4e; 22 23 struct vmcb *vmcb; 24 25 u64 *npt_get_pte(u64 address) 26 { 27 int i1, i2; 28 29 address >>= 12; 30 i1 = (address >> 9) & 0x7ff; 31 i2 = address & 0x1ff; 32 33 return &pte[i1][i2]; 34 } 35 36 u64 *npt_get_pde(u64 address) 37 { 38 int i1, i2; 39 40 address >>= 21; 41 i1 = (address >> 9) & 0x3; 42 i2 = address & 0x1ff; 43 44 return &pde[i1][i2]; 45 } 46 47 u64 *npt_get_pdpe(void) 48 { 49 return pdpe; 50 } 51 52 u64 *npt_get_pml4e(void) 53 { 54 return pml4e; 55 } 56 57 bool smp_supported(void) 58 { 59 return cpu_count() > 1; 60 } 61 62 bool default_supported(void) 63 { 64 return true; 65 } 66 67 void default_prepare(struct svm_test *test) 68 { 69 vmcb_ident(vmcb); 70 } 71 72 void default_prepare_gif_clear(struct svm_test *test) 73 { 74 } 75 76 bool default_finished(struct svm_test *test) 77 { 78 return true; /* one vmexit */ 79 } 80 81 bool npt_supported(void) 82 { 83 return this_cpu_has(X86_FEATURE_NPT); 84 } 85 86 int get_test_stage(struct svm_test *test) 87 { 88 barrier(); 89 return test->scratch; 90 } 91 92 void set_test_stage(struct svm_test *test, int s) 93 { 94 barrier(); 95 test->scratch = s; 96 barrier(); 97 } 98 99 void inc_test_stage(struct svm_test *test) 100 { 101 barrier(); 102 test->scratch++; 103 barrier(); 104 } 105 106 static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector, 107 u64 base, u32 limit, u32 attr) 108 { 109 seg->selector = selector; 110 seg->attrib = attr; 111 seg->limit = limit; 112 seg->base = base; 113 } 114 115 inline void vmmcall(void) 116 { 117 asm volatile ("vmmcall" : : : "memory"); 118 } 119 120 static test_guest_func guest_main; 121 122 void test_set_guest(test_guest_func func) 123 { 124 guest_main = func; 125 } 126 127 static void test_thunk(struct svm_test *test) 128 { 129 guest_main(test); 130 vmmcall(); 131 } 132 133 u8 *io_bitmap; 134 u8 io_bitmap_area[16384]; 135 136 u8 *msr_bitmap; 137 u8 msr_bitmap_area[MSR_BITMAP_SIZE + PAGE_SIZE]; 138 139 void vmcb_ident(struct vmcb *vmcb) 140 { 141 u64 vmcb_phys = virt_to_phys(vmcb); 142 struct vmcb_save_area *save = &vmcb->save; 143 struct vmcb_control_area *ctrl = &vmcb->control; 144 u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK 145 | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK; 146 u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK 147 | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK; 148 struct descriptor_table_ptr desc_table_ptr; 149 150 memset(vmcb, 0, sizeof(*vmcb)); 151 asm volatile ("vmsave %0" : : "a"(vmcb_phys) : "memory"); 152 vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr); 153 vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr); 154 vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr); 155 vmcb_set_seg(&save->ds, read_ds(), 0, -1U, data_seg_attr); 156 sgdt(&desc_table_ptr); 157 vmcb_set_seg(&save->gdtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0); 158 sidt(&desc_table_ptr); 159 vmcb_set_seg(&save->idtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0); 160 ctrl->asid = 1; 161 save->cpl = 0; 162 save->efer = rdmsr(MSR_EFER); 163 save->cr4 = read_cr4(); 164 save->cr3 = read_cr3(); 165 save->cr0 = read_cr0(); 166 save->dr7 = read_dr7(); 167 save->dr6 = read_dr6(); 168 save->cr2 = read_cr2(); 169 save->g_pat = rdmsr(MSR_IA32_CR_PAT); 170 save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 171 ctrl->intercept = (1ULL << INTERCEPT_VMRUN) | (1ULL << INTERCEPT_VMMCALL); 172 ctrl->iopm_base_pa = virt_to_phys(io_bitmap); 173 ctrl->msrpm_base_pa = virt_to_phys(msr_bitmap); 174 175 if (npt_supported()) { 176 ctrl->nested_ctl = 1; 177 ctrl->nested_cr3 = (u64)pml4e; 178 ctrl->tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; 179 } 180 } 181 182 struct regs regs; 183 184 struct regs get_regs(void) 185 { 186 return regs; 187 } 188 189 // rax handled specially below 190 191 #define SAVE_GPR_C \ 192 "xchg %%rbx, regs+0x8\n\t" \ 193 "xchg %%rcx, regs+0x10\n\t" \ 194 "xchg %%rdx, regs+0x18\n\t" \ 195 "xchg %%rbp, regs+0x28\n\t" \ 196 "xchg %%rsi, regs+0x30\n\t" \ 197 "xchg %%rdi, regs+0x38\n\t" \ 198 "xchg %%r8, regs+0x40\n\t" \ 199 "xchg %%r9, regs+0x48\n\t" \ 200 "xchg %%r10, regs+0x50\n\t" \ 201 "xchg %%r11, regs+0x58\n\t" \ 202 "xchg %%r12, regs+0x60\n\t" \ 203 "xchg %%r13, regs+0x68\n\t" \ 204 "xchg %%r14, regs+0x70\n\t" \ 205 "xchg %%r15, regs+0x78\n\t" 206 207 #define LOAD_GPR_C SAVE_GPR_C 208 209 struct svm_test *v2_test; 210 211 #define ASM_PRE_VMRUN_CMD \ 212 "vmload %%rax\n\t" \ 213 "mov regs+0x80, %%r15\n\t" \ 214 "mov %%r15, 0x170(%%rax)\n\t" \ 215 "mov regs, %%r15\n\t" \ 216 "mov %%r15, 0x1f8(%%rax)\n\t" \ 217 LOAD_GPR_C \ 218 219 #define ASM_POST_VMRUN_CMD \ 220 SAVE_GPR_C \ 221 "mov 0x170(%%rax), %%r15\n\t" \ 222 "mov %%r15, regs+0x80\n\t" \ 223 "mov 0x1f8(%%rax), %%r15\n\t" \ 224 "mov %%r15, regs\n\t" \ 225 "vmsave %%rax\n\t" \ 226 227 u64 guest_stack[10000]; 228 229 int svm_vmrun(void) 230 { 231 vmcb->save.rip = (ulong)test_thunk; 232 vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack)); 233 regs.rdi = (ulong)v2_test; 234 235 asm volatile ( 236 ASM_PRE_VMRUN_CMD 237 "vmrun %%rax\n\t" \ 238 ASM_POST_VMRUN_CMD 239 : 240 : "a" (virt_to_phys(vmcb)) 241 : "memory", "r15"); 242 243 return (vmcb->control.exit_code); 244 } 245 246 extern u64 *vmrun_rip; 247 248 static void test_run(struct svm_test *test) 249 { 250 u64 vmcb_phys = virt_to_phys(vmcb); 251 252 irq_disable(); 253 vmcb_ident(vmcb); 254 255 test->prepare(test); 256 guest_main = test->guest_func; 257 vmcb->save.rip = (ulong)test_thunk; 258 vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack)); 259 regs.rdi = (ulong)test; 260 do { 261 struct svm_test *the_test = test; 262 u64 the_vmcb = vmcb_phys; 263 asm volatile ( 264 "clgi;\n\t" // semi-colon needed for LLVM compatibility 265 "sti \n\t" 266 "call *%c[PREPARE_GIF_CLEAR](%[test]) \n \t" 267 "mov %[vmcb_phys], %%rax \n\t" 268 ASM_PRE_VMRUN_CMD 269 ".global vmrun_rip\n\t" \ 270 "vmrun_rip: vmrun %%rax\n\t" \ 271 ASM_POST_VMRUN_CMD 272 "cli \n\t" 273 "stgi" 274 : // inputs clobbered by the guest: 275 "=D" (the_test), // first argument register 276 "=b" (the_vmcb) // callee save register! 277 : [test] "0" (the_test), 278 [vmcb_phys] "1"(the_vmcb), 279 [PREPARE_GIF_CLEAR] "i" (offsetof(struct svm_test, prepare_gif_clear)) 280 : "rax", "rcx", "rdx", "rsi", 281 "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15", 282 "memory"); 283 ++test->exits; 284 } while (!test->finished(test)); 285 irq_enable(); 286 287 report(test->succeeded(test), "%s", test->name); 288 289 if (test->on_vcpu) 290 test->on_vcpu_done = true; 291 } 292 293 static void set_additional_vcpu_msr(void *msr_efer) 294 { 295 void *hsave = alloc_page(); 296 297 wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave)); 298 wrmsr(MSR_EFER, (ulong)msr_efer | EFER_SVME); 299 } 300 301 static void setup_svm(void) 302 { 303 void *hsave = alloc_page(); 304 u64 *page, address; 305 int i,j; 306 307 wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave)); 308 wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME); 309 310 io_bitmap = (void *) ALIGN((ulong)io_bitmap_area, PAGE_SIZE); 311 312 msr_bitmap = (void *) ALIGN((ulong)msr_bitmap_area, PAGE_SIZE); 313 314 if (!npt_supported()) 315 return; 316 317 for (i = 1; i < cpu_count(); i++) 318 on_cpu(i, (void *)set_additional_vcpu_msr, (void *)rdmsr(MSR_EFER)); 319 320 printf("NPT detected - running all tests with NPT enabled\n"); 321 322 /* 323 * Nested paging supported - Build a nested page table 324 * Build the page-table bottom-up and map everything with 4k 325 * pages to get enough granularity for the NPT unit-tests. 326 */ 327 328 address = 0; 329 330 /* PTE level */ 331 for (i = 0; i < 2048; ++i) { 332 page = alloc_page(); 333 334 for (j = 0; j < 512; ++j, address += 4096) 335 page[j] = address | 0x067ULL; 336 337 pte[i] = page; 338 } 339 340 /* PDE level */ 341 for (i = 0; i < 4; ++i) { 342 page = alloc_page(); 343 344 for (j = 0; j < 512; ++j) 345 page[j] = (u64)pte[(i * 512) + j] | 0x027ULL; 346 347 pde[i] = page; 348 } 349 350 /* PDPe level */ 351 pdpe = alloc_page(); 352 for (i = 0; i < 4; ++i) 353 pdpe[i] = ((u64)(pde[i])) | 0x27; 354 355 /* PML4e level */ 356 pml4e = alloc_page(); 357 pml4e[0] = ((u64)pdpe) | 0x27; 358 } 359 360 int matched; 361 362 static bool 363 test_wanted(const char *name, char *filters[], int filter_count) 364 { 365 int i; 366 bool positive = false; 367 bool match = false; 368 char clean_name[strlen(name) + 1]; 369 char *c; 370 const char *n; 371 372 /* Replace spaces with underscores. */ 373 n = name; 374 c = &clean_name[0]; 375 do *c++ = (*n == ' ') ? '_' : *n; 376 while (*n++); 377 378 for (i = 0; i < filter_count; i++) { 379 const char *filter = filters[i]; 380 381 if (filter[0] == '-') { 382 if (simple_glob(clean_name, filter + 1)) 383 return false; 384 } else { 385 positive = true; 386 match |= simple_glob(clean_name, filter); 387 } 388 } 389 390 if (!positive || match) { 391 matched++; 392 return true; 393 } else { 394 return false; 395 } 396 } 397 398 int main(int ac, char **av) 399 { 400 int i = 0; 401 402 ac--; 403 av++; 404 405 setup_vm(); 406 407 if (!this_cpu_has(X86_FEATURE_SVM)) { 408 printf("SVM not availble\n"); 409 return report_summary(); 410 } 411 412 setup_svm(); 413 414 vmcb = alloc_page(); 415 416 for (; svm_tests[i].name != NULL; i++) { 417 if (!test_wanted(svm_tests[i].name, av, ac)) 418 continue; 419 if (svm_tests[i].supported && !svm_tests[i].supported()) 420 continue; 421 if (svm_tests[i].v2 == NULL) { 422 if (svm_tests[i].on_vcpu) { 423 if (cpu_count() <= svm_tests[i].on_vcpu) 424 continue; 425 on_cpu_async(svm_tests[i].on_vcpu, (void *)test_run, &svm_tests[i]); 426 while (!svm_tests[i].on_vcpu_done) 427 cpu_relax(); 428 } 429 else 430 test_run(&svm_tests[i]); 431 } else { 432 vmcb_ident(vmcb); 433 v2_test = &(svm_tests[i]); 434 svm_tests[i].v2(); 435 } 436 } 437 438 if (!matched) 439 report(matched, "command line didn't match any tests!"); 440 441 return report_summary(); 442 } 443