1 #include "svm.h" 2 #include "libcflat.h" 3 #include "processor.h" 4 #include "desc.h" 5 #include "msr.h" 6 #include "vm.h" 7 #include "smp.h" 8 #include "types.h" 9 #include "io.h" 10 11 /* for the nested page table*/ 12 u64 *pml4e; 13 u64 *pdpe; 14 u64 *pde[4]; 15 u64 *pte[2048]; 16 void *scratch_page; 17 18 #define LATENCY_RUNS 1000000 19 20 u64 tsc_start; 21 u64 tsc_end; 22 23 u64 vmrun_sum, vmexit_sum; 24 u64 vmsave_sum, vmload_sum; 25 u64 stgi_sum, clgi_sum; 26 u64 latvmrun_max; 27 u64 latvmrun_min; 28 u64 latvmexit_max; 29 u64 latvmexit_min; 30 u64 latvmload_max; 31 u64 latvmload_min; 32 u64 latvmsave_max; 33 u64 latvmsave_min; 34 u64 latstgi_max; 35 u64 latstgi_min; 36 u64 latclgi_max; 37 u64 latclgi_min; 38 u64 runs; 39 40 u8 *io_bitmap; 41 u8 io_bitmap_area[16384]; 42 43 static bool npt_supported(void) 44 { 45 return cpuid(0x8000000A).d & 1; 46 } 47 48 static void setup_svm(void) 49 { 50 void *hsave = alloc_page(); 51 u64 *page, address; 52 int i,j; 53 54 wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave)); 55 wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME); 56 wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NX); 57 58 scratch_page = alloc_page(); 59 60 io_bitmap = (void *) (((ulong)io_bitmap_area + 4095) & ~4095); 61 62 if (!npt_supported()) 63 return; 64 65 printf("NPT detected - running all tests with NPT enabled\n"); 66 67 /* 68 * Nested paging supported - Build a nested page table 69 * Build the page-table bottom-up and map everything with 4k pages 70 * to get enough granularity for the NPT unit-tests. 71 */ 72 73 address = 0; 74 75 /* PTE level */ 76 for (i = 0; i < 2048; ++i) { 77 page = alloc_page(); 78 79 for (j = 0; j < 512; ++j, address += 4096) 80 page[j] = address | 0x067ULL; 81 82 pte[i] = page; 83 } 84 85 /* PDE level */ 86 for (i = 0; i < 4; ++i) { 87 page = alloc_page(); 88 89 for (j = 0; j < 512; ++j) 90 page[j] = (u64)pte[(i * 512) + j] | 0x027ULL; 91 92 pde[i] = page; 93 } 94 95 /* PDPe level */ 96 pdpe = alloc_page(); 97 for (i = 0; i < 4; ++i) 98 pdpe[i] = ((u64)(pde[i])) | 0x27; 99 100 /* PML4e level */ 101 pml4e = alloc_page(); 102 pml4e[0] = ((u64)pdpe) | 0x27; 103 } 104 105 static u64 *npt_get_pde(u64 address) 106 { 107 int i1, i2; 108 109 address >>= 21; 110 i1 = (address >> 9) & 0x3; 111 i2 = address & 0x1ff; 112 113 return &pde[i1][i2]; 114 } 115 116 static u64 *npt_get_pte(u64 address) 117 { 118 int i1, i2; 119 120 address >>= 12; 121 i1 = (address >> 9) & 0x7ff; 122 i2 = address & 0x1ff; 123 124 return &pte[i1][i2]; 125 } 126 127 static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector, 128 u64 base, u32 limit, u32 attr) 129 { 130 seg->selector = selector; 131 seg->attrib = attr; 132 seg->limit = limit; 133 seg->base = base; 134 } 135 136 static void vmcb_ident(struct vmcb *vmcb) 137 { 138 u64 vmcb_phys = virt_to_phys(vmcb); 139 struct vmcb_save_area *save = &vmcb->save; 140 struct vmcb_control_area *ctrl = &vmcb->control; 141 u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK 142 | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK; 143 u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK 144 | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK; 145 struct descriptor_table_ptr desc_table_ptr; 146 147 memset(vmcb, 0, sizeof(*vmcb)); 148 asm volatile ("vmsave" : : "a"(vmcb_phys) : "memory"); 149 vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr); 150 vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr); 151 vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr); 152 vmcb_set_seg(&save->ds, read_ds(), 0, -1U, data_seg_attr); 153 sgdt(&desc_table_ptr); 154 vmcb_set_seg(&save->gdtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0); 155 sidt(&desc_table_ptr); 156 vmcb_set_seg(&save->idtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0); 157 ctrl->asid = 1; 158 save->cpl = 0; 159 save->efer = rdmsr(MSR_EFER); 160 save->cr4 = read_cr4(); 161 save->cr3 = read_cr3(); 162 save->cr0 = read_cr0(); 163 save->dr7 = read_dr7(); 164 save->dr6 = read_dr6(); 165 save->cr2 = read_cr2(); 166 save->g_pat = rdmsr(MSR_IA32_CR_PAT); 167 save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 168 ctrl->intercept = (1ULL << INTERCEPT_VMRUN) | (1ULL << INTERCEPT_VMMCALL); 169 ctrl->iopm_base_pa = virt_to_phys(io_bitmap); 170 171 if (npt_supported()) { 172 ctrl->nested_ctl = 1; 173 ctrl->nested_cr3 = (u64)pml4e; 174 } 175 } 176 177 struct test { 178 const char *name; 179 bool (*supported)(void); 180 void (*prepare)(struct test *test); 181 void (*guest_func)(struct test *test); 182 bool (*finished)(struct test *test); 183 bool (*succeeded)(struct test *test); 184 struct vmcb *vmcb; 185 int exits; 186 ulong scratch; 187 }; 188 189 static inline void vmmcall(void) 190 { 191 asm volatile ("vmmcall" : : : "memory"); 192 } 193 194 static void test_thunk(struct test *test) 195 { 196 test->guest_func(test); 197 vmmcall(); 198 } 199 200 struct regs { 201 u64 rax; 202 u64 rcx; 203 u64 rdx; 204 u64 rbx; 205 u64 cr2; 206 u64 rbp; 207 u64 rsi; 208 u64 rdi; 209 u64 r8; 210 u64 r9; 211 u64 r10; 212 u64 r11; 213 u64 r12; 214 u64 r13; 215 u64 r14; 216 u64 r15; 217 u64 rflags; 218 }; 219 220 struct regs regs; 221 222 // rax handled specially below 223 224 #define SAVE_GPR_C \ 225 "xchg %%rbx, regs+0x8\n\t" \ 226 "xchg %%rcx, regs+0x10\n\t" \ 227 "xchg %%rdx, regs+0x18\n\t" \ 228 "xchg %%rbp, regs+0x28\n\t" \ 229 "xchg %%rsi, regs+0x30\n\t" \ 230 "xchg %%rdi, regs+0x38\n\t" \ 231 "xchg %%r8, regs+0x40\n\t" \ 232 "xchg %%r9, regs+0x48\n\t" \ 233 "xchg %%r10, regs+0x50\n\t" \ 234 "xchg %%r11, regs+0x58\n\t" \ 235 "xchg %%r12, regs+0x60\n\t" \ 236 "xchg %%r13, regs+0x68\n\t" \ 237 "xchg %%r14, regs+0x70\n\t" \ 238 "xchg %%r15, regs+0x78\n\t" 239 240 #define LOAD_GPR_C SAVE_GPR_C 241 242 static bool test_run(struct test *test, struct vmcb *vmcb) 243 { 244 u64 vmcb_phys = virt_to_phys(vmcb); 245 u64 guest_stack[10000]; 246 bool success; 247 248 test->vmcb = vmcb; 249 test->prepare(test); 250 vmcb->save.rip = (ulong)test_thunk; 251 vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack)); 252 regs.rdi = (ulong)test; 253 do { 254 tsc_start = rdtsc(); 255 asm volatile ( 256 "clgi \n\t" 257 "vmload \n\t" 258 "mov regs+0x80, %%r15\n\t" // rflags 259 "mov %%r15, 0x170(%0)\n\t" 260 "mov regs, %%r15\n\t" // rax 261 "mov %%r15, 0x1f8(%0)\n\t" 262 LOAD_GPR_C 263 "vmrun \n\t" 264 SAVE_GPR_C 265 "mov 0x170(%0), %%r15\n\t" // rflags 266 "mov %%r15, regs+0x80\n\t" 267 "mov 0x1f8(%0), %%r15\n\t" // rax 268 "mov %%r15, regs\n\t" 269 "vmsave \n\t" 270 "stgi" 271 : : "a"(vmcb_phys) 272 : "rbx", "rcx", "rdx", "rsi", 273 "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15", 274 "memory"); 275 tsc_end = rdtsc(); 276 ++test->exits; 277 } while (!test->finished(test)); 278 279 280 success = test->succeeded(test); 281 282 printf("%s: %s\n", test->name, success ? "PASS" : "FAIL"); 283 284 return success; 285 } 286 287 static bool smp_supported(void) 288 { 289 return cpu_count() > 1; 290 } 291 292 static bool default_supported(void) 293 { 294 return true; 295 } 296 297 static void default_prepare(struct test *test) 298 { 299 vmcb_ident(test->vmcb); 300 cli(); 301 } 302 303 static bool default_finished(struct test *test) 304 { 305 return true; /* one vmexit */ 306 } 307 308 static void null_test(struct test *test) 309 { 310 } 311 312 static bool null_check(struct test *test) 313 { 314 return test->vmcb->control.exit_code == SVM_EXIT_VMMCALL; 315 } 316 317 static void prepare_no_vmrun_int(struct test *test) 318 { 319 test->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMRUN); 320 } 321 322 static bool check_no_vmrun_int(struct test *test) 323 { 324 return test->vmcb->control.exit_code == SVM_EXIT_ERR; 325 } 326 327 static void test_vmrun(struct test *test) 328 { 329 asm volatile ("vmrun" : : "a"(virt_to_phys(test->vmcb))); 330 } 331 332 static bool check_vmrun(struct test *test) 333 { 334 return test->vmcb->control.exit_code == SVM_EXIT_VMRUN; 335 } 336 337 static void prepare_cr3_intercept(struct test *test) 338 { 339 default_prepare(test); 340 test->vmcb->control.intercept_cr_read |= 1 << 3; 341 } 342 343 static void test_cr3_intercept(struct test *test) 344 { 345 asm volatile ("mov %%cr3, %0" : "=r"(test->scratch) : : "memory"); 346 } 347 348 static bool check_cr3_intercept(struct test *test) 349 { 350 return test->vmcb->control.exit_code == SVM_EXIT_READ_CR3; 351 } 352 353 static bool check_cr3_nointercept(struct test *test) 354 { 355 return null_check(test) && test->scratch == read_cr3(); 356 } 357 358 static void corrupt_cr3_intercept_bypass(void *_test) 359 { 360 struct test *test = _test; 361 extern volatile u32 mmio_insn; 362 363 while (!__sync_bool_compare_and_swap(&test->scratch, 1, 2)) 364 pause(); 365 pause(); 366 pause(); 367 pause(); 368 mmio_insn = 0x90d8200f; // mov %cr3, %rax; nop 369 } 370 371 static void prepare_cr3_intercept_bypass(struct test *test) 372 { 373 default_prepare(test); 374 test->vmcb->control.intercept_cr_read |= 1 << 3; 375 on_cpu_async(1, corrupt_cr3_intercept_bypass, test); 376 } 377 378 static void test_cr3_intercept_bypass(struct test *test) 379 { 380 ulong a = 0xa0000; 381 382 test->scratch = 1; 383 while (test->scratch != 2) 384 barrier(); 385 386 asm volatile ("mmio_insn: mov %0, (%0); nop" 387 : "+a"(a) : : "memory"); 388 test->scratch = a; 389 } 390 391 static bool next_rip_supported(void) 392 { 393 return (cpuid(SVM_CPUID_FUNC).d & 8); 394 } 395 396 static void prepare_next_rip(struct test *test) 397 { 398 test->vmcb->control.intercept |= (1ULL << INTERCEPT_RDTSC); 399 } 400 401 402 static void test_next_rip(struct test *test) 403 { 404 asm volatile ("rdtsc\n\t" 405 ".globl exp_next_rip\n\t" 406 "exp_next_rip:\n\t" ::: "eax", "edx"); 407 } 408 409 static bool check_next_rip(struct test *test) 410 { 411 extern char exp_next_rip; 412 unsigned long address = (unsigned long)&exp_next_rip; 413 414 return address == test->vmcb->control.next_rip; 415 } 416 417 static void prepare_mode_switch(struct test *test) 418 { 419 test->vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR) 420 | (1ULL << UD_VECTOR) 421 | (1ULL << DF_VECTOR) 422 | (1ULL << PF_VECTOR); 423 test->scratch = 0; 424 } 425 426 static void test_mode_switch(struct test *test) 427 { 428 asm volatile(" cli\n" 429 " ljmp *1f\n" /* jump to 32-bit code segment */ 430 "1:\n" 431 " .long 2f\n" 432 " .long " xstr(KERNEL_CS32) "\n" 433 ".code32\n" 434 "2:\n" 435 " movl %%cr0, %%eax\n" 436 " btcl $31, %%eax\n" /* clear PG */ 437 " movl %%eax, %%cr0\n" 438 " movl $0xc0000080, %%ecx\n" /* EFER */ 439 " rdmsr\n" 440 " btcl $8, %%eax\n" /* clear LME */ 441 " wrmsr\n" 442 " movl %%cr4, %%eax\n" 443 " btcl $5, %%eax\n" /* clear PAE */ 444 " movl %%eax, %%cr4\n" 445 " movw %[ds16], %%ax\n" 446 " movw %%ax, %%ds\n" 447 " ljmpl %[cs16], $3f\n" /* jump to 16 bit protected-mode */ 448 ".code16\n" 449 "3:\n" 450 " movl %%cr0, %%eax\n" 451 " btcl $0, %%eax\n" /* clear PE */ 452 " movl %%eax, %%cr0\n" 453 " ljmpl $0, $4f\n" /* jump to real-mode */ 454 "4:\n" 455 " vmmcall\n" 456 " movl %%cr0, %%eax\n" 457 " btsl $0, %%eax\n" /* set PE */ 458 " movl %%eax, %%cr0\n" 459 " ljmpl %[cs32], $5f\n" /* back to protected mode */ 460 ".code32\n" 461 "5:\n" 462 " movl %%cr4, %%eax\n" 463 " btsl $5, %%eax\n" /* set PAE */ 464 " movl %%eax, %%cr4\n" 465 " movl $0xc0000080, %%ecx\n" /* EFER */ 466 " rdmsr\n" 467 " btsl $8, %%eax\n" /* set LME */ 468 " wrmsr\n" 469 " movl %%cr0, %%eax\n" 470 " btsl $31, %%eax\n" /* set PG */ 471 " movl %%eax, %%cr0\n" 472 " ljmpl %[cs64], $6f\n" /* back to long mode */ 473 ".code64\n\t" 474 "6:\n" 475 " vmmcall\n" 476 :: [cs16] "i"(KERNEL_CS16), [ds16] "i"(KERNEL_DS16), 477 [cs32] "i"(KERNEL_CS32), [cs64] "i"(KERNEL_CS64) 478 : "rax", "rbx", "rcx", "rdx", "memory"); 479 } 480 481 static bool mode_switch_finished(struct test *test) 482 { 483 u64 cr0, cr4, efer; 484 485 cr0 = test->vmcb->save.cr0; 486 cr4 = test->vmcb->save.cr4; 487 efer = test->vmcb->save.efer; 488 489 /* Only expect VMMCALL intercepts */ 490 if (test->vmcb->control.exit_code != SVM_EXIT_VMMCALL) 491 return true; 492 493 /* Jump over VMMCALL instruction */ 494 test->vmcb->save.rip += 3; 495 496 /* Do sanity checks */ 497 switch (test->scratch) { 498 case 0: 499 /* Test should be in real mode now - check for this */ 500 if ((cr0 & 0x80000001) || /* CR0.PG, CR0.PE */ 501 (cr4 & 0x00000020) || /* CR4.PAE */ 502 (efer & 0x00000500)) /* EFER.LMA, EFER.LME */ 503 return true; 504 break; 505 case 2: 506 /* Test should be back in long-mode now - check for this */ 507 if (((cr0 & 0x80000001) != 0x80000001) || /* CR0.PG, CR0.PE */ 508 ((cr4 & 0x00000020) != 0x00000020) || /* CR4.PAE */ 509 ((efer & 0x00000500) != 0x00000500)) /* EFER.LMA, EFER.LME */ 510 return true; 511 break; 512 } 513 514 /* one step forward */ 515 test->scratch += 1; 516 517 return test->scratch == 2; 518 } 519 520 static bool check_mode_switch(struct test *test) 521 { 522 return test->scratch == 2; 523 } 524 525 static void prepare_ioio(struct test *test) 526 { 527 test->vmcb->control.intercept |= (1ULL << INTERCEPT_IOIO_PROT); 528 test->scratch = 0; 529 memset(io_bitmap, 0, 8192); 530 io_bitmap[8192] = 0xFF; 531 } 532 533 int get_test_stage(struct test *test) 534 { 535 barrier(); 536 return test->scratch; 537 } 538 539 void inc_test_stage(struct test *test) 540 { 541 barrier(); 542 test->scratch++; 543 barrier(); 544 } 545 546 static void test_ioio(struct test *test) 547 { 548 // stage 0, test IO pass 549 inb(0x5000); 550 outb(0x0, 0x5000); 551 if (get_test_stage(test) != 0) 552 goto fail; 553 554 // test IO width, in/out 555 io_bitmap[0] = 0xFF; 556 inc_test_stage(test); 557 inb(0x0); 558 if (get_test_stage(test) != 2) 559 goto fail; 560 561 outw(0x0, 0x0); 562 if (get_test_stage(test) != 3) 563 goto fail; 564 565 inl(0x0); 566 if (get_test_stage(test) != 4) 567 goto fail; 568 569 // test low/high IO port 570 io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8)); 571 inb(0x5000); 572 if (get_test_stage(test) != 5) 573 goto fail; 574 575 io_bitmap[0x9000 / 8] = (1 << (0x9000 % 8)); 576 inw(0x9000); 577 if (get_test_stage(test) != 6) 578 goto fail; 579 580 // test partial pass 581 io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8)); 582 inl(0x4FFF); 583 if (get_test_stage(test) != 7) 584 goto fail; 585 586 // test across pages 587 inc_test_stage(test); 588 inl(0x7FFF); 589 if (get_test_stage(test) != 8) 590 goto fail; 591 592 inc_test_stage(test); 593 io_bitmap[0x8000 / 8] = 1 << (0x8000 % 8); 594 inl(0x7FFF); 595 if (get_test_stage(test) != 10) 596 goto fail; 597 598 io_bitmap[0] = 0; 599 inl(0xFFFF); 600 if (get_test_stage(test) != 11) 601 goto fail; 602 603 io_bitmap[0] = 0xFF; 604 io_bitmap[8192] = 0; 605 inl(0xFFFF); 606 inc_test_stage(test); 607 if (get_test_stage(test) != 12) 608 goto fail; 609 610 return; 611 612 fail: 613 printf("test failure, stage %d\n", get_test_stage(test)); 614 test->scratch = -1; 615 } 616 617 static bool ioio_finished(struct test *test) 618 { 619 unsigned port, size; 620 621 /* Only expect IOIO intercepts */ 622 if (test->vmcb->control.exit_code == SVM_EXIT_VMMCALL) 623 return true; 624 625 if (test->vmcb->control.exit_code != SVM_EXIT_IOIO) 626 return true; 627 628 /* one step forward */ 629 test->scratch += 1; 630 631 port = test->vmcb->control.exit_info_1 >> 16; 632 size = (test->vmcb->control.exit_info_1 >> SVM_IOIO_SIZE_SHIFT) & 7; 633 634 while (size--) { 635 io_bitmap[port / 8] &= ~(1 << (port & 7)); 636 port++; 637 } 638 639 return false; 640 } 641 642 static bool check_ioio(struct test *test) 643 { 644 memset(io_bitmap, 0, 8193); 645 return test->scratch != -1; 646 } 647 648 static void prepare_asid_zero(struct test *test) 649 { 650 test->vmcb->control.asid = 0; 651 } 652 653 static void test_asid_zero(struct test *test) 654 { 655 asm volatile ("vmmcall\n\t"); 656 } 657 658 static bool check_asid_zero(struct test *test) 659 { 660 return test->vmcb->control.exit_code == SVM_EXIT_ERR; 661 } 662 663 static void sel_cr0_bug_prepare(struct test *test) 664 { 665 vmcb_ident(test->vmcb); 666 test->vmcb->control.intercept |= (1ULL << INTERCEPT_SELECTIVE_CR0); 667 } 668 669 static bool sel_cr0_bug_finished(struct test *test) 670 { 671 return true; 672 } 673 674 static void sel_cr0_bug_test(struct test *test) 675 { 676 unsigned long cr0; 677 678 /* read cr0, clear CD, and write back */ 679 cr0 = read_cr0(); 680 cr0 |= (1UL << 30); 681 write_cr0(cr0); 682 683 /* 684 * If we are here the test failed, not sure what to do now because we 685 * are not in guest-mode anymore so we can't trigger an intercept. 686 * Trigger a tripple-fault for now. 687 */ 688 printf("sel_cr0 test failed. Can not recover from this - exiting\n"); 689 exit(1); 690 } 691 692 static bool sel_cr0_bug_check(struct test *test) 693 { 694 return test->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE; 695 } 696 697 static void npt_nx_prepare(struct test *test) 698 { 699 700 u64 *pte; 701 702 vmcb_ident(test->vmcb); 703 pte = npt_get_pte((u64)null_test); 704 705 *pte |= (1ULL << 63); 706 } 707 708 static bool npt_nx_check(struct test *test) 709 { 710 u64 *pte = npt_get_pte((u64)null_test); 711 712 *pte &= ~(1ULL << 63); 713 714 test->vmcb->save.efer |= (1 << 11); 715 716 return (test->vmcb->control.exit_code == SVM_EXIT_NPF) 717 && (test->vmcb->control.exit_info_1 == 0x100000015ULL); 718 } 719 720 static void npt_us_prepare(struct test *test) 721 { 722 u64 *pte; 723 724 vmcb_ident(test->vmcb); 725 pte = npt_get_pte((u64)scratch_page); 726 727 *pte &= ~(1ULL << 2); 728 } 729 730 static void npt_us_test(struct test *test) 731 { 732 (void) *(volatile u64 *)scratch_page; 733 } 734 735 static bool npt_us_check(struct test *test) 736 { 737 u64 *pte = npt_get_pte((u64)scratch_page); 738 739 *pte |= (1ULL << 2); 740 741 return (test->vmcb->control.exit_code == SVM_EXIT_NPF) 742 && (test->vmcb->control.exit_info_1 == 0x100000005ULL); 743 } 744 745 u64 save_pde; 746 747 static void npt_rsvd_prepare(struct test *test) 748 { 749 u64 *pde; 750 751 vmcb_ident(test->vmcb); 752 pde = npt_get_pde((u64) null_test); 753 754 save_pde = *pde; 755 *pde = (1ULL << 19) | (1ULL << 7) | 0x27; 756 } 757 758 static bool npt_rsvd_check(struct test *test) 759 { 760 u64 *pde = npt_get_pde((u64) null_test); 761 762 *pde = save_pde; 763 764 return (test->vmcb->control.exit_code == SVM_EXIT_NPF) 765 && (test->vmcb->control.exit_info_1 == 0x10000001dULL); 766 } 767 768 static void npt_rw_prepare(struct test *test) 769 { 770 771 u64 *pte; 772 773 vmcb_ident(test->vmcb); 774 pte = npt_get_pte(0x80000); 775 776 *pte &= ~(1ULL << 1); 777 } 778 779 static void npt_rw_test(struct test *test) 780 { 781 u64 *data = (void*)(0x80000); 782 783 *data = 0; 784 } 785 786 static bool npt_rw_check(struct test *test) 787 { 788 u64 *pte = npt_get_pte(0x80000); 789 790 *pte |= (1ULL << 1); 791 792 return (test->vmcb->control.exit_code == SVM_EXIT_NPF) 793 && (test->vmcb->control.exit_info_1 == 0x100000007ULL); 794 } 795 796 static void npt_rw_pfwalk_prepare(struct test *test) 797 { 798 799 u64 *pte; 800 801 vmcb_ident(test->vmcb); 802 pte = npt_get_pte(read_cr3()); 803 804 *pte &= ~(1ULL << 1); 805 } 806 807 static bool npt_rw_pfwalk_check(struct test *test) 808 { 809 u64 *pte = npt_get_pte(read_cr3()); 810 811 *pte |= (1ULL << 1); 812 813 return (test->vmcb->control.exit_code == SVM_EXIT_NPF) 814 && (test->vmcb->control.exit_info_1 == 0x200000006ULL) 815 && (test->vmcb->control.exit_info_2 == read_cr3()); 816 } 817 818 static void npt_rsvd_pfwalk_prepare(struct test *test) 819 { 820 821 vmcb_ident(test->vmcb); 822 823 pdpe[0] |= (1ULL << 8); 824 } 825 826 static bool npt_rsvd_pfwalk_check(struct test *test) 827 { 828 pdpe[0] &= ~(1ULL << 8); 829 830 return (test->vmcb->control.exit_code == SVM_EXIT_NPF) 831 && (test->vmcb->control.exit_info_1 == 0x200000006ULL); 832 } 833 834 static void npt_l1mmio_prepare(struct test *test) 835 { 836 vmcb_ident(test->vmcb); 837 } 838 839 u32 nested_apic_version1; 840 u32 nested_apic_version2; 841 842 static void npt_l1mmio_test(struct test *test) 843 { 844 volatile u32 *data = (volatile void*)(0xfee00030UL); 845 846 nested_apic_version1 = *data; 847 nested_apic_version2 = *data; 848 } 849 850 static bool npt_l1mmio_check(struct test *test) 851 { 852 volatile u32 *data = (volatile void*)(0xfee00030); 853 u32 lvr = *data; 854 855 return nested_apic_version1 == lvr && nested_apic_version2 == lvr; 856 } 857 858 static void npt_rw_l1mmio_prepare(struct test *test) 859 { 860 861 u64 *pte; 862 863 vmcb_ident(test->vmcb); 864 pte = npt_get_pte(0xfee00080); 865 866 *pte &= ~(1ULL << 1); 867 } 868 869 static void npt_rw_l1mmio_test(struct test *test) 870 { 871 volatile u32 *data = (volatile void*)(0xfee00080); 872 873 *data = *data; 874 } 875 876 static bool npt_rw_l1mmio_check(struct test *test) 877 { 878 u64 *pte = npt_get_pte(0xfee00080); 879 880 *pte |= (1ULL << 1); 881 882 return (test->vmcb->control.exit_code == SVM_EXIT_NPF) 883 && (test->vmcb->control.exit_info_1 == 0x100000007ULL); 884 } 885 886 static void latency_prepare(struct test *test) 887 { 888 default_prepare(test); 889 runs = LATENCY_RUNS; 890 latvmrun_min = latvmexit_min = -1ULL; 891 latvmrun_max = latvmexit_max = 0; 892 vmrun_sum = vmexit_sum = 0; 893 } 894 895 static void latency_test(struct test *test) 896 { 897 u64 cycles; 898 899 start: 900 tsc_end = rdtsc(); 901 902 cycles = tsc_end - tsc_start; 903 904 if (cycles > latvmrun_max) 905 latvmrun_max = cycles; 906 907 if (cycles < latvmrun_min) 908 latvmrun_min = cycles; 909 910 vmrun_sum += cycles; 911 912 tsc_start = rdtsc(); 913 914 asm volatile ("vmmcall" : : : "memory"); 915 goto start; 916 } 917 918 static bool latency_finished(struct test *test) 919 { 920 u64 cycles; 921 922 tsc_end = rdtsc(); 923 924 cycles = tsc_end - tsc_start; 925 926 if (cycles > latvmexit_max) 927 latvmexit_max = cycles; 928 929 if (cycles < latvmexit_min) 930 latvmexit_min = cycles; 931 932 vmexit_sum += cycles; 933 934 test->vmcb->save.rip += 3; 935 936 runs -= 1; 937 938 return runs == 0; 939 } 940 941 static bool latency_check(struct test *test) 942 { 943 printf(" Latency VMRUN : max: %d min: %d avg: %d\n", latvmrun_max, 944 latvmrun_min, vmrun_sum / LATENCY_RUNS); 945 printf(" Latency VMEXIT: max: %d min: %d avg: %d\n", latvmexit_max, 946 latvmexit_min, vmexit_sum / LATENCY_RUNS); 947 return true; 948 } 949 950 static void lat_svm_insn_prepare(struct test *test) 951 { 952 default_prepare(test); 953 runs = LATENCY_RUNS; 954 latvmload_min = latvmsave_min = latstgi_min = latclgi_min = -1ULL; 955 latvmload_max = latvmsave_max = latstgi_max = latclgi_max = 0; 956 vmload_sum = vmsave_sum = stgi_sum = clgi_sum; 957 } 958 959 static bool lat_svm_insn_finished(struct test *test) 960 { 961 u64 vmcb_phys = virt_to_phys(test->vmcb); 962 u64 cycles; 963 964 for ( ; runs != 0; runs--) { 965 tsc_start = rdtsc(); 966 asm volatile("vmload\n\t" : : "a"(vmcb_phys) : "memory"); 967 cycles = rdtsc() - tsc_start; 968 if (cycles > latvmload_max) 969 latvmload_max = cycles; 970 if (cycles < latvmload_min) 971 latvmload_min = cycles; 972 vmload_sum += cycles; 973 974 tsc_start = rdtsc(); 975 asm volatile("vmsave\n\t" : : "a"(vmcb_phys) : "memory"); 976 cycles = rdtsc() - tsc_start; 977 if (cycles > latvmsave_max) 978 latvmsave_max = cycles; 979 if (cycles < latvmsave_min) 980 latvmsave_min = cycles; 981 vmsave_sum += cycles; 982 983 tsc_start = rdtsc(); 984 asm volatile("stgi\n\t"); 985 cycles = rdtsc() - tsc_start; 986 if (cycles > latstgi_max) 987 latstgi_max = cycles; 988 if (cycles < latstgi_min) 989 latstgi_min = cycles; 990 stgi_sum += cycles; 991 992 tsc_start = rdtsc(); 993 asm volatile("clgi\n\t"); 994 cycles = rdtsc() - tsc_start; 995 if (cycles > latclgi_max) 996 latclgi_max = cycles; 997 if (cycles < latclgi_min) 998 latclgi_min = cycles; 999 clgi_sum += cycles; 1000 } 1001 1002 return true; 1003 } 1004 1005 static bool lat_svm_insn_check(struct test *test) 1006 { 1007 printf(" Latency VMLOAD: max: %d min: %d avg: %d\n", latvmload_max, 1008 latvmload_min, vmload_sum / LATENCY_RUNS); 1009 printf(" Latency VMSAVE: max: %d min: %d avg: %d\n", latvmsave_max, 1010 latvmsave_min, vmsave_sum / LATENCY_RUNS); 1011 printf(" Latency STGI: max: %d min: %d avg: %d\n", latstgi_max, 1012 latstgi_min, stgi_sum / LATENCY_RUNS); 1013 printf(" Latency CLGI: max: %d min: %d avg: %d\n", latclgi_max, 1014 latclgi_min, clgi_sum / LATENCY_RUNS); 1015 return true; 1016 } 1017 static struct test tests[] = { 1018 { "null", default_supported, default_prepare, null_test, 1019 default_finished, null_check }, 1020 { "vmrun", default_supported, default_prepare, test_vmrun, 1021 default_finished, check_vmrun }, 1022 { "ioio", default_supported, prepare_ioio, test_ioio, 1023 ioio_finished, check_ioio }, 1024 { "vmrun intercept check", default_supported, prepare_no_vmrun_int, 1025 null_test, default_finished, check_no_vmrun_int }, 1026 { "cr3 read intercept", default_supported, prepare_cr3_intercept, 1027 test_cr3_intercept, default_finished, check_cr3_intercept }, 1028 { "cr3 read nointercept", default_supported, default_prepare, 1029 test_cr3_intercept, default_finished, check_cr3_nointercept }, 1030 { "cr3 read intercept emulate", smp_supported, 1031 prepare_cr3_intercept_bypass, test_cr3_intercept_bypass, 1032 default_finished, check_cr3_intercept }, 1033 { "next_rip", next_rip_supported, prepare_next_rip, test_next_rip, 1034 default_finished, check_next_rip }, 1035 { "mode_switch", default_supported, prepare_mode_switch, test_mode_switch, 1036 mode_switch_finished, check_mode_switch }, 1037 { "asid_zero", default_supported, prepare_asid_zero, test_asid_zero, 1038 default_finished, check_asid_zero }, 1039 { "sel_cr0_bug", default_supported, sel_cr0_bug_prepare, sel_cr0_bug_test, 1040 sel_cr0_bug_finished, sel_cr0_bug_check }, 1041 { "npt_nx", npt_supported, npt_nx_prepare, null_test, 1042 default_finished, npt_nx_check }, 1043 { "npt_us", npt_supported, npt_us_prepare, npt_us_test, 1044 default_finished, npt_us_check }, 1045 { "npt_rsvd", npt_supported, npt_rsvd_prepare, null_test, 1046 default_finished, npt_rsvd_check }, 1047 { "npt_rw", npt_supported, npt_rw_prepare, npt_rw_test, 1048 default_finished, npt_rw_check }, 1049 { "npt_rsvd_pfwalk", npt_supported, npt_rsvd_pfwalk_prepare, null_test, 1050 default_finished, npt_rsvd_pfwalk_check }, 1051 { "npt_rw_pfwalk", npt_supported, npt_rw_pfwalk_prepare, null_test, 1052 default_finished, npt_rw_pfwalk_check }, 1053 { "npt_l1mmio", npt_supported, npt_l1mmio_prepare, npt_l1mmio_test, 1054 default_finished, npt_l1mmio_check }, 1055 { "npt_rw_l1mmio", npt_supported, npt_rw_l1mmio_prepare, npt_rw_l1mmio_test, 1056 default_finished, npt_rw_l1mmio_check }, 1057 { "latency_run_exit", default_supported, latency_prepare, latency_test, 1058 latency_finished, latency_check }, 1059 { "latency_svm_insn", default_supported, lat_svm_insn_prepare, null_test, 1060 lat_svm_insn_finished, lat_svm_insn_check }, 1061 }; 1062 1063 int main(int ac, char **av) 1064 { 1065 int i, nr, passed, done; 1066 struct vmcb *vmcb; 1067 1068 setup_vm(); 1069 smp_init(); 1070 1071 if (!(cpuid(0x80000001).c & 4)) { 1072 printf("SVM not availble\n"); 1073 return 0; 1074 } 1075 1076 setup_svm(); 1077 1078 vmcb = alloc_page(); 1079 1080 nr = ARRAY_SIZE(tests); 1081 passed = done = 0; 1082 for (i = 0; i < nr; ++i) { 1083 if (!tests[i].supported()) 1084 continue; 1085 done += 1; 1086 passed += test_run(&tests[i], vmcb); 1087 } 1088 1089 printf("\nSUMMARY: %d TESTS, %d FAILURES\n", done, (done - passed)); 1090 return passed == done ? 0 : 1; 1091 } 1092