1 #include "svm.h" 2 #include "libcflat.h" 3 #include "processor.h" 4 #include "desc.h" 5 #include "msr.h" 6 #include "vm.h" 7 #include "smp.h" 8 #include "types.h" 9 #include "io.h" 10 11 /* for the nested page table*/ 12 u64 *pml4e; 13 u64 *pdpe; 14 u64 *pde[4]; 15 u64 *pte[2048]; 16 void *scratch_page; 17 18 #define LATENCY_RUNS 1000000 19 20 u64 tsc_start; 21 u64 tsc_end; 22 23 u64 vmrun_sum, vmexit_sum; 24 u64 vmsave_sum, vmload_sum; 25 u64 stgi_sum, clgi_sum; 26 u64 latvmrun_max; 27 u64 latvmrun_min; 28 u64 latvmexit_max; 29 u64 latvmexit_min; 30 u64 latvmload_max; 31 u64 latvmload_min; 32 u64 latvmsave_max; 33 u64 latvmsave_min; 34 u64 latstgi_max; 35 u64 latstgi_min; 36 u64 latclgi_max; 37 u64 latclgi_min; 38 u64 runs; 39 40 u8 *io_bitmap; 41 u8 io_bitmap_area[16384]; 42 43 static bool npt_supported(void) 44 { 45 return cpuid(0x8000000A).d & 1; 46 } 47 48 static void setup_svm(void) 49 { 50 void *hsave = alloc_page(); 51 u64 *page, address; 52 int i,j; 53 54 wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave)); 55 wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME); 56 wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NX); 57 58 scratch_page = alloc_page(); 59 60 io_bitmap = (void *) (((ulong)io_bitmap_area + 4095) & ~4095); 61 62 if (!npt_supported()) 63 return; 64 65 printf("NPT detected - running all tests with NPT enabled\n"); 66 67 /* 68 * Nested paging supported - Build a nested page table 69 * Build the page-table bottom-up and map everything with 4k pages 70 * to get enough granularity for the NPT unit-tests. 71 */ 72 73 address = 0; 74 75 /* PTE level */ 76 for (i = 0; i < 2048; ++i) { 77 page = alloc_page(); 78 79 for (j = 0; j < 512; ++j, address += 4096) 80 page[j] = address | 0x067ULL; 81 82 pte[i] = page; 83 } 84 85 /* PDE level */ 86 for (i = 0; i < 4; ++i) { 87 page = alloc_page(); 88 89 for (j = 0; j < 512; ++j) 90 page[j] = (u64)pte[(i * 514) + j] | 0x027ULL; 91 92 pde[i] = page; 93 } 94 95 /* PDPe level */ 96 pdpe = alloc_page(); 97 for (i = 0; i < 4; ++i) 98 pdpe[i] = ((u64)(pde[i])) | 0x27; 99 100 /* PML4e level */ 101 pml4e = alloc_page(); 102 pml4e[0] = ((u64)pdpe) | 0x27; 103 } 104 105 static u64 *npt_get_pte(u64 address) 106 { 107 int i1, i2; 108 109 address >>= 12; 110 i1 = (address >> 9) & 0x7ff; 111 i2 = address & 0x1ff; 112 113 return &pte[i1][i2]; 114 } 115 116 static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector, 117 u64 base, u32 limit, u32 attr) 118 { 119 seg->selector = selector; 120 seg->attrib = attr; 121 seg->limit = limit; 122 seg->base = base; 123 } 124 125 static void vmcb_ident(struct vmcb *vmcb) 126 { 127 u64 vmcb_phys = virt_to_phys(vmcb); 128 struct vmcb_save_area *save = &vmcb->save; 129 struct vmcb_control_area *ctrl = &vmcb->control; 130 u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK 131 | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK; 132 u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK 133 | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK; 134 struct descriptor_table_ptr desc_table_ptr; 135 136 memset(vmcb, 0, sizeof(*vmcb)); 137 asm volatile ("vmsave" : : "a"(vmcb_phys) : "memory"); 138 vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr); 139 vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr); 140 vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr); 141 vmcb_set_seg(&save->ds, read_ds(), 0, -1U, data_seg_attr); 142 sgdt(&desc_table_ptr); 143 vmcb_set_seg(&save->gdtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0); 144 sidt(&desc_table_ptr); 145 vmcb_set_seg(&save->idtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0); 146 ctrl->asid = 1; 147 save->cpl = 0; 148 save->efer = rdmsr(MSR_EFER); 149 save->cr4 = read_cr4(); 150 save->cr3 = read_cr3(); 151 save->cr0 = read_cr0(); 152 save->dr7 = read_dr7(); 153 save->dr6 = read_dr6(); 154 save->cr2 = read_cr2(); 155 save->g_pat = rdmsr(MSR_IA32_CR_PAT); 156 save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 157 ctrl->intercept = (1ULL << INTERCEPT_VMRUN) | (1ULL << INTERCEPT_VMMCALL); 158 ctrl->iopm_base_pa = virt_to_phys(io_bitmap); 159 160 if (npt_supported()) { 161 ctrl->nested_ctl = 1; 162 ctrl->nested_cr3 = (u64)pml4e; 163 } 164 } 165 166 struct test { 167 const char *name; 168 bool (*supported)(void); 169 void (*prepare)(struct test *test); 170 void (*guest_func)(struct test *test); 171 bool (*finished)(struct test *test); 172 bool (*succeeded)(struct test *test); 173 struct vmcb *vmcb; 174 int exits; 175 ulong scratch; 176 }; 177 178 static void test_thunk(struct test *test) 179 { 180 test->guest_func(test); 181 asm volatile ("vmmcall" : : : "memory"); 182 } 183 184 struct regs { 185 u64 rax; 186 u64 rcx; 187 u64 rdx; 188 u64 rbx; 189 u64 cr2; 190 u64 rbp; 191 u64 rsi; 192 u64 rdi; 193 u64 r8; 194 u64 r9; 195 u64 r10; 196 u64 r11; 197 u64 r12; 198 u64 r13; 199 u64 r14; 200 u64 r15; 201 u64 rflags; 202 }; 203 204 struct regs regs; 205 206 // rax handled specially below 207 208 #define SAVE_GPR_C \ 209 "xchg %%rbx, regs+0x8\n\t" \ 210 "xchg %%rcx, regs+0x10\n\t" \ 211 "xchg %%rdx, regs+0x18\n\t" \ 212 "xchg %%rbp, regs+0x28\n\t" \ 213 "xchg %%rsi, regs+0x30\n\t" \ 214 "xchg %%rdi, regs+0x38\n\t" \ 215 "xchg %%r8, regs+0x40\n\t" \ 216 "xchg %%r9, regs+0x48\n\t" \ 217 "xchg %%r10, regs+0x50\n\t" \ 218 "xchg %%r11, regs+0x58\n\t" \ 219 "xchg %%r12, regs+0x60\n\t" \ 220 "xchg %%r13, regs+0x68\n\t" \ 221 "xchg %%r14, regs+0x70\n\t" \ 222 "xchg %%r15, regs+0x78\n\t" 223 224 #define LOAD_GPR_C SAVE_GPR_C 225 226 static bool test_run(struct test *test, struct vmcb *vmcb) 227 { 228 u64 vmcb_phys = virt_to_phys(vmcb); 229 u64 guest_stack[10000]; 230 bool success; 231 232 test->vmcb = vmcb; 233 test->prepare(test); 234 vmcb->save.rip = (ulong)test_thunk; 235 vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack)); 236 regs.rdi = (ulong)test; 237 do { 238 tsc_start = rdtsc(); 239 asm volatile ( 240 "clgi \n\t" 241 "vmload \n\t" 242 "mov regs+0x80, %%r15\n\t" // rflags 243 "mov %%r15, 0x170(%0)\n\t" 244 "mov regs, %%r15\n\t" // rax 245 "mov %%r15, 0x1f8(%0)\n\t" 246 LOAD_GPR_C 247 "vmrun \n\t" 248 SAVE_GPR_C 249 "mov 0x170(%0), %%r15\n\t" // rflags 250 "mov %%r15, regs+0x80\n\t" 251 "mov 0x1f8(%0), %%r15\n\t" // rax 252 "mov %%r15, regs\n\t" 253 "vmsave \n\t" 254 "stgi" 255 : : "a"(vmcb_phys) 256 : "rbx", "rcx", "rdx", "rsi", 257 "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15", 258 "memory"); 259 tsc_end = rdtsc(); 260 ++test->exits; 261 } while (!test->finished(test)); 262 263 264 success = test->succeeded(test); 265 266 printf("%s: %s\n", test->name, success ? "PASS" : "FAIL"); 267 268 return success; 269 } 270 271 static bool smp_supported(void) 272 { 273 return cpu_count() > 1; 274 } 275 276 static bool default_supported(void) 277 { 278 return true; 279 } 280 281 static void default_prepare(struct test *test) 282 { 283 vmcb_ident(test->vmcb); 284 cli(); 285 } 286 287 static bool default_finished(struct test *test) 288 { 289 return true; /* one vmexit */ 290 } 291 292 static void null_test(struct test *test) 293 { 294 } 295 296 static bool null_check(struct test *test) 297 { 298 return test->vmcb->control.exit_code == SVM_EXIT_VMMCALL; 299 } 300 301 static void prepare_no_vmrun_int(struct test *test) 302 { 303 test->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMRUN); 304 } 305 306 static bool check_no_vmrun_int(struct test *test) 307 { 308 return test->vmcb->control.exit_code == SVM_EXIT_ERR; 309 } 310 311 static void test_vmrun(struct test *test) 312 { 313 asm volatile ("vmrun" : : "a"(virt_to_phys(test->vmcb))); 314 } 315 316 static bool check_vmrun(struct test *test) 317 { 318 return test->vmcb->control.exit_code == SVM_EXIT_VMRUN; 319 } 320 321 static void prepare_cr3_intercept(struct test *test) 322 { 323 default_prepare(test); 324 test->vmcb->control.intercept_cr_read |= 1 << 3; 325 } 326 327 static void test_cr3_intercept(struct test *test) 328 { 329 asm volatile ("mov %%cr3, %0" : "=r"(test->scratch) : : "memory"); 330 } 331 332 static bool check_cr3_intercept(struct test *test) 333 { 334 return test->vmcb->control.exit_code == SVM_EXIT_READ_CR3; 335 } 336 337 static bool check_cr3_nointercept(struct test *test) 338 { 339 return null_check(test) && test->scratch == read_cr3(); 340 } 341 342 static void corrupt_cr3_intercept_bypass(void *_test) 343 { 344 struct test *test = _test; 345 extern volatile u32 mmio_insn; 346 347 while (!__sync_bool_compare_and_swap(&test->scratch, 1, 2)) 348 pause(); 349 pause(); 350 pause(); 351 pause(); 352 mmio_insn = 0x90d8200f; // mov %cr3, %rax; nop 353 } 354 355 static void prepare_cr3_intercept_bypass(struct test *test) 356 { 357 default_prepare(test); 358 test->vmcb->control.intercept_cr_read |= 1 << 3; 359 on_cpu_async(1, corrupt_cr3_intercept_bypass, test); 360 } 361 362 static void test_cr3_intercept_bypass(struct test *test) 363 { 364 ulong a = 0xa0000; 365 366 test->scratch = 1; 367 while (test->scratch != 2) 368 barrier(); 369 370 asm volatile ("mmio_insn: mov %0, (%0); nop" 371 : "+a"(a) : : "memory"); 372 test->scratch = a; 373 } 374 375 static bool next_rip_supported(void) 376 { 377 return (cpuid(SVM_CPUID_FUNC).d & 8); 378 } 379 380 static void prepare_next_rip(struct test *test) 381 { 382 test->vmcb->control.intercept |= (1ULL << INTERCEPT_RDTSC); 383 } 384 385 386 static void test_next_rip(struct test *test) 387 { 388 asm volatile ("rdtsc\n\t" 389 ".globl exp_next_rip\n\t" 390 "exp_next_rip:\n\t" ::: "eax", "edx"); 391 } 392 393 static bool check_next_rip(struct test *test) 394 { 395 extern char exp_next_rip; 396 unsigned long address = (unsigned long)&exp_next_rip; 397 398 return address == test->vmcb->control.next_rip; 399 } 400 401 static void prepare_mode_switch(struct test *test) 402 { 403 test->vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR) 404 | (1ULL << UD_VECTOR) 405 | (1ULL << DF_VECTOR) 406 | (1ULL << PF_VECTOR); 407 test->scratch = 0; 408 } 409 410 static void test_mode_switch(struct test *test) 411 { 412 asm volatile(" cli\n" 413 " ljmp *1f\n" /* jump to 32-bit code segment */ 414 "1:\n" 415 " .long 2f\n" 416 " .long " xstr(KERNEL_CS32) "\n" 417 ".code32\n" 418 "2:\n" 419 " movl %%cr0, %%eax\n" 420 " btcl $31, %%eax\n" /* clear PG */ 421 " movl %%eax, %%cr0\n" 422 " movl $0xc0000080, %%ecx\n" /* EFER */ 423 " rdmsr\n" 424 " btcl $8, %%eax\n" /* clear LME */ 425 " wrmsr\n" 426 " movl %%cr4, %%eax\n" 427 " btcl $5, %%eax\n" /* clear PAE */ 428 " movl %%eax, %%cr4\n" 429 " movw %[ds16], %%ax\n" 430 " movw %%ax, %%ds\n" 431 " ljmpl %[cs16], $3f\n" /* jump to 16 bit protected-mode */ 432 ".code16\n" 433 "3:\n" 434 " movl %%cr0, %%eax\n" 435 " btcl $0, %%eax\n" /* clear PE */ 436 " movl %%eax, %%cr0\n" 437 " ljmpl $0, $4f\n" /* jump to real-mode */ 438 "4:\n" 439 " vmmcall\n" 440 " movl %%cr0, %%eax\n" 441 " btsl $0, %%eax\n" /* set PE */ 442 " movl %%eax, %%cr0\n" 443 " ljmpl %[cs32], $5f\n" /* back to protected mode */ 444 ".code32\n" 445 "5:\n" 446 " movl %%cr4, %%eax\n" 447 " btsl $5, %%eax\n" /* set PAE */ 448 " movl %%eax, %%cr4\n" 449 " movl $0xc0000080, %%ecx\n" /* EFER */ 450 " rdmsr\n" 451 " btsl $8, %%eax\n" /* set LME */ 452 " wrmsr\n" 453 " movl %%cr0, %%eax\n" 454 " btsl $31, %%eax\n" /* set PG */ 455 " movl %%eax, %%cr0\n" 456 " ljmpl %[cs64], $6f\n" /* back to long mode */ 457 ".code64\n\t" 458 "6:\n" 459 " vmmcall\n" 460 :: [cs16] "i"(KERNEL_CS16), [ds16] "i"(KERNEL_DS16), 461 [cs32] "i"(KERNEL_CS32), [cs64] "i"(KERNEL_CS64) 462 : "rax", "rbx", "rcx", "rdx", "memory"); 463 } 464 465 static bool mode_switch_finished(struct test *test) 466 { 467 u64 cr0, cr4, efer; 468 469 cr0 = test->vmcb->save.cr0; 470 cr4 = test->vmcb->save.cr4; 471 efer = test->vmcb->save.efer; 472 473 /* Only expect VMMCALL intercepts */ 474 if (test->vmcb->control.exit_code != SVM_EXIT_VMMCALL) 475 return true; 476 477 /* Jump over VMMCALL instruction */ 478 test->vmcb->save.rip += 3; 479 480 /* Do sanity checks */ 481 switch (test->scratch) { 482 case 0: 483 /* Test should be in real mode now - check for this */ 484 if ((cr0 & 0x80000001) || /* CR0.PG, CR0.PE */ 485 (cr4 & 0x00000020) || /* CR4.PAE */ 486 (efer & 0x00000500)) /* EFER.LMA, EFER.LME */ 487 return true; 488 break; 489 case 2: 490 /* Test should be back in long-mode now - check for this */ 491 if (((cr0 & 0x80000001) != 0x80000001) || /* CR0.PG, CR0.PE */ 492 ((cr4 & 0x00000020) != 0x00000020) || /* CR4.PAE */ 493 ((efer & 0x00000500) != 0x00000500)) /* EFER.LMA, EFER.LME */ 494 return true; 495 break; 496 } 497 498 /* one step forward */ 499 test->scratch += 1; 500 501 return test->scratch == 2; 502 } 503 504 static bool check_mode_switch(struct test *test) 505 { 506 return test->scratch == 2; 507 } 508 509 static void prepare_ioio(struct test *test) 510 { 511 test->vmcb->control.intercept |= (1ULL << INTERCEPT_IOIO_PROT); 512 test->scratch = 0; 513 memset(io_bitmap, 0, 8192); 514 io_bitmap[8192] = 0xFF; 515 } 516 517 int get_test_stage(struct test *test) 518 { 519 barrier(); 520 return test->scratch; 521 } 522 523 void inc_test_stage(struct test *test) 524 { 525 barrier(); 526 test->scratch++; 527 barrier(); 528 } 529 530 static void test_ioio(struct test *test) 531 { 532 // stage 0, test IO pass 533 inb(0x5000); 534 outb(0x0, 0x5000); 535 if (get_test_stage(test) != 0) 536 goto fail; 537 538 // test IO width, in/out 539 io_bitmap[0] = 0xFF; 540 inc_test_stage(test); 541 inb(0x0); 542 if (get_test_stage(test) != 2) 543 goto fail; 544 545 outw(0x0, 0x0); 546 if (get_test_stage(test) != 3) 547 goto fail; 548 549 inl(0x0); 550 if (get_test_stage(test) != 4) 551 goto fail; 552 553 // test low/high IO port 554 io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8)); 555 inb(0x5000); 556 if (get_test_stage(test) != 5) 557 goto fail; 558 559 io_bitmap[0x9000 / 8] = (1 << (0x9000 % 8)); 560 inw(0x9000); 561 if (get_test_stage(test) != 6) 562 goto fail; 563 564 // test partial pass 565 io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8)); 566 inl(0x4FFF); 567 if (get_test_stage(test) != 7) 568 goto fail; 569 570 // test across pages 571 inc_test_stage(test); 572 inl(0x7FFF); 573 if (get_test_stage(test) != 8) 574 goto fail; 575 576 inc_test_stage(test); 577 io_bitmap[0x8000 / 8] = 1 << (0x8000 % 8); 578 inl(0x7FFF); 579 if (get_test_stage(test) != 10) 580 goto fail; 581 582 io_bitmap[0] = 0; 583 inl(0xFFFF); 584 if (get_test_stage(test) != 11) 585 goto fail; 586 587 io_bitmap[0] = 0xFF; 588 io_bitmap[8192] = 0; 589 inl(0xFFFF); 590 inc_test_stage(test); 591 if (get_test_stage(test) != 12) 592 goto fail; 593 594 return; 595 596 fail: 597 printf("test failure, stage %d\n", get_test_stage(test)); 598 test->scratch = -1; 599 } 600 601 static bool ioio_finished(struct test *test) 602 { 603 unsigned port, size; 604 605 /* Only expect IOIO intercepts */ 606 if (test->vmcb->control.exit_code == SVM_EXIT_VMMCALL) 607 return true; 608 609 if (test->vmcb->control.exit_code != SVM_EXIT_IOIO) 610 return true; 611 612 /* one step forward */ 613 test->scratch += 1; 614 615 port = test->vmcb->control.exit_info_1 >> 16; 616 size = (test->vmcb->control.exit_info_1 >> SVM_IOIO_SIZE_SHIFT) & 7; 617 618 while (size--) { 619 io_bitmap[port / 8] &= ~(1 << (port & 7)); 620 port++; 621 } 622 623 return false; 624 } 625 626 static bool check_ioio(struct test *test) 627 { 628 memset(io_bitmap, 0, 8193); 629 return test->scratch != -1; 630 } 631 632 static void prepare_asid_zero(struct test *test) 633 { 634 test->vmcb->control.asid = 0; 635 } 636 637 static void test_asid_zero(struct test *test) 638 { 639 asm volatile ("vmmcall\n\t"); 640 } 641 642 static bool check_asid_zero(struct test *test) 643 { 644 return test->vmcb->control.exit_code == SVM_EXIT_ERR; 645 } 646 647 static void sel_cr0_bug_prepare(struct test *test) 648 { 649 vmcb_ident(test->vmcb); 650 test->vmcb->control.intercept |= (1ULL << INTERCEPT_SELECTIVE_CR0); 651 } 652 653 static bool sel_cr0_bug_finished(struct test *test) 654 { 655 return true; 656 } 657 658 static void sel_cr0_bug_test(struct test *test) 659 { 660 unsigned long cr0; 661 662 /* read cr0, clear CD, and write back */ 663 cr0 = read_cr0(); 664 cr0 |= (1UL << 30); 665 write_cr0(cr0); 666 667 /* 668 * If we are here the test failed, not sure what to do now because we 669 * are not in guest-mode anymore so we can't trigger an intercept. 670 * Trigger a tripple-fault for now. 671 */ 672 printf("sel_cr0 test failed. Can not recover from this - exiting\n"); 673 exit(1); 674 } 675 676 static bool sel_cr0_bug_check(struct test *test) 677 { 678 return test->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE; 679 } 680 681 static void npt_nx_prepare(struct test *test) 682 { 683 684 u64 *pte; 685 686 vmcb_ident(test->vmcb); 687 pte = npt_get_pte((u64)null_test); 688 689 *pte |= (1ULL << 63); 690 } 691 692 static bool npt_nx_check(struct test *test) 693 { 694 u64 *pte = npt_get_pte((u64)null_test); 695 696 *pte &= ~(1ULL << 63); 697 698 test->vmcb->save.efer |= (1 << 11); 699 700 return (test->vmcb->control.exit_code == SVM_EXIT_NPF) 701 && (test->vmcb->control.exit_info_1 == 0x15); 702 } 703 704 static void npt_us_prepare(struct test *test) 705 { 706 u64 *pte; 707 708 vmcb_ident(test->vmcb); 709 pte = npt_get_pte((u64)scratch_page); 710 711 *pte &= ~(1ULL << 2); 712 } 713 714 static void npt_us_test(struct test *test) 715 { 716 (void) *(volatile u64 *)scratch_page; 717 } 718 719 static bool npt_us_check(struct test *test) 720 { 721 u64 *pte = npt_get_pte((u64)scratch_page); 722 723 *pte |= (1ULL << 2); 724 725 return (test->vmcb->control.exit_code == SVM_EXIT_NPF) 726 && (test->vmcb->control.exit_info_1 == 0x05); 727 } 728 729 static void npt_rsvd_prepare(struct test *test) 730 { 731 732 vmcb_ident(test->vmcb); 733 734 pdpe[0] |= (1ULL << 8); 735 } 736 737 static bool npt_rsvd_check(struct test *test) 738 { 739 pdpe[0] &= ~(1ULL << 8); 740 741 return (test->vmcb->control.exit_code == SVM_EXIT_NPF) 742 && (test->vmcb->control.exit_info_1 == 0x0f); 743 } 744 745 static void npt_rw_prepare(struct test *test) 746 { 747 748 u64 *pte; 749 750 vmcb_ident(test->vmcb); 751 pte = npt_get_pte(0x80000); 752 753 *pte &= ~(1ULL << 1); 754 } 755 756 static void npt_rw_test(struct test *test) 757 { 758 u64 *data = (void*)(0x80000); 759 760 *data = 0; 761 } 762 763 static bool npt_rw_check(struct test *test) 764 { 765 u64 *pte = npt_get_pte(0x80000); 766 767 *pte |= (1ULL << 1); 768 769 return (test->vmcb->control.exit_code == SVM_EXIT_NPF) 770 && (test->vmcb->control.exit_info_1 == 0x07); 771 } 772 773 static void npt_pfwalk_prepare(struct test *test) 774 { 775 776 u64 *pte; 777 778 vmcb_ident(test->vmcb); 779 pte = npt_get_pte(read_cr3()); 780 781 *pte &= ~(1ULL << 1); 782 } 783 784 static bool npt_pfwalk_check(struct test *test) 785 { 786 u64 *pte = npt_get_pte(read_cr3()); 787 788 *pte |= (1ULL << 1); 789 790 return (test->vmcb->control.exit_code == SVM_EXIT_NPF) 791 && (test->vmcb->control.exit_info_1 == 0x7) 792 && (test->vmcb->control.exit_info_2 == read_cr3()); 793 } 794 795 static void latency_prepare(struct test *test) 796 { 797 default_prepare(test); 798 runs = LATENCY_RUNS; 799 latvmrun_min = latvmexit_min = -1ULL; 800 latvmrun_max = latvmexit_max = 0; 801 vmrun_sum = vmexit_sum = 0; 802 } 803 804 static void latency_test(struct test *test) 805 { 806 u64 cycles; 807 808 start: 809 tsc_end = rdtsc(); 810 811 cycles = tsc_end - tsc_start; 812 813 if (cycles > latvmrun_max) 814 latvmrun_max = cycles; 815 816 if (cycles < latvmrun_min) 817 latvmrun_min = cycles; 818 819 vmrun_sum += cycles; 820 821 tsc_start = rdtsc(); 822 823 asm volatile ("vmmcall" : : : "memory"); 824 goto start; 825 } 826 827 static bool latency_finished(struct test *test) 828 { 829 u64 cycles; 830 831 tsc_end = rdtsc(); 832 833 cycles = tsc_end - tsc_start; 834 835 if (cycles > latvmexit_max) 836 latvmexit_max = cycles; 837 838 if (cycles < latvmexit_min) 839 latvmexit_min = cycles; 840 841 vmexit_sum += cycles; 842 843 test->vmcb->save.rip += 3; 844 845 runs -= 1; 846 847 return runs == 0; 848 } 849 850 static bool latency_check(struct test *test) 851 { 852 printf(" Latency VMRUN : max: %d min: %d avg: %d\n", latvmrun_max, 853 latvmrun_min, vmrun_sum / LATENCY_RUNS); 854 printf(" Latency VMEXIT: max: %d min: %d avg: %d\n", latvmexit_max, 855 latvmexit_min, vmexit_sum / LATENCY_RUNS); 856 return true; 857 } 858 859 static void lat_svm_insn_prepare(struct test *test) 860 { 861 default_prepare(test); 862 runs = LATENCY_RUNS; 863 latvmload_min = latvmsave_min = latstgi_min = latclgi_min = -1ULL; 864 latvmload_max = latvmsave_max = latstgi_max = latclgi_max = 0; 865 vmload_sum = vmsave_sum = stgi_sum = clgi_sum; 866 } 867 868 static bool lat_svm_insn_finished(struct test *test) 869 { 870 u64 vmcb_phys = virt_to_phys(test->vmcb); 871 u64 cycles; 872 873 for ( ; runs != 0; runs--) { 874 tsc_start = rdtsc(); 875 asm volatile("vmload\n\t" : : "a"(vmcb_phys) : "memory"); 876 cycles = rdtsc() - tsc_start; 877 if (cycles > latvmload_max) 878 latvmload_max = cycles; 879 if (cycles < latvmload_min) 880 latvmload_min = cycles; 881 vmload_sum += cycles; 882 883 tsc_start = rdtsc(); 884 asm volatile("vmsave\n\t" : : "a"(vmcb_phys) : "memory"); 885 cycles = rdtsc() - tsc_start; 886 if (cycles > latvmsave_max) 887 latvmsave_max = cycles; 888 if (cycles < latvmsave_min) 889 latvmsave_min = cycles; 890 vmsave_sum += cycles; 891 892 tsc_start = rdtsc(); 893 asm volatile("stgi\n\t"); 894 cycles = rdtsc() - tsc_start; 895 if (cycles > latstgi_max) 896 latstgi_max = cycles; 897 if (cycles < latstgi_min) 898 latstgi_min = cycles; 899 stgi_sum += cycles; 900 901 tsc_start = rdtsc(); 902 asm volatile("clgi\n\t"); 903 cycles = rdtsc() - tsc_start; 904 if (cycles > latclgi_max) 905 latclgi_max = cycles; 906 if (cycles < latclgi_min) 907 latclgi_min = cycles; 908 clgi_sum += cycles; 909 } 910 911 return true; 912 } 913 914 static bool lat_svm_insn_check(struct test *test) 915 { 916 printf(" Latency VMLOAD: max: %d min: %d avg: %d\n", latvmload_max, 917 latvmload_min, vmload_sum / LATENCY_RUNS); 918 printf(" Latency VMSAVE: max: %d min: %d avg: %d\n", latvmsave_max, 919 latvmsave_min, vmsave_sum / LATENCY_RUNS); 920 printf(" Latency STGI: max: %d min: %d avg: %d\n", latstgi_max, 921 latstgi_min, stgi_sum / LATENCY_RUNS); 922 printf(" Latency CLGI: max: %d min: %d avg: %d\n", latclgi_max, 923 latclgi_min, clgi_sum / LATENCY_RUNS); 924 return true; 925 } 926 static struct test tests[] = { 927 { "null", default_supported, default_prepare, null_test, 928 default_finished, null_check }, 929 { "vmrun", default_supported, default_prepare, test_vmrun, 930 default_finished, check_vmrun }, 931 { "ioio", default_supported, prepare_ioio, test_ioio, 932 ioio_finished, check_ioio }, 933 { "vmrun intercept check", default_supported, prepare_no_vmrun_int, 934 null_test, default_finished, check_no_vmrun_int }, 935 { "cr3 read intercept", default_supported, prepare_cr3_intercept, 936 test_cr3_intercept, default_finished, check_cr3_intercept }, 937 { "cr3 read nointercept", default_supported, default_prepare, 938 test_cr3_intercept, default_finished, check_cr3_nointercept }, 939 { "cr3 read intercept emulate", smp_supported, 940 prepare_cr3_intercept_bypass, test_cr3_intercept_bypass, 941 default_finished, check_cr3_intercept }, 942 { "next_rip", next_rip_supported, prepare_next_rip, test_next_rip, 943 default_finished, check_next_rip }, 944 { "mode_switch", default_supported, prepare_mode_switch, test_mode_switch, 945 mode_switch_finished, check_mode_switch }, 946 { "asid_zero", default_supported, prepare_asid_zero, test_asid_zero, 947 default_finished, check_asid_zero }, 948 { "sel_cr0_bug", default_supported, sel_cr0_bug_prepare, sel_cr0_bug_test, 949 sel_cr0_bug_finished, sel_cr0_bug_check }, 950 { "npt_nx", npt_supported, npt_nx_prepare, null_test, 951 default_finished, npt_nx_check }, 952 { "npt_us", npt_supported, npt_us_prepare, npt_us_test, 953 default_finished, npt_us_check }, 954 { "npt_rsvd", npt_supported, npt_rsvd_prepare, null_test, 955 default_finished, npt_rsvd_check }, 956 { "npt_rw", npt_supported, npt_rw_prepare, npt_rw_test, 957 default_finished, npt_rw_check }, 958 { "npt_pfwalk", npt_supported, npt_pfwalk_prepare, null_test, 959 default_finished, npt_pfwalk_check }, 960 { "latency_run_exit", default_supported, latency_prepare, latency_test, 961 latency_finished, latency_check }, 962 { "latency_svm_insn", default_supported, lat_svm_insn_prepare, null_test, 963 lat_svm_insn_finished, lat_svm_insn_check }, 964 }; 965 966 int main(int ac, char **av) 967 { 968 int i, nr, passed, done; 969 struct vmcb *vmcb; 970 971 setup_vm(); 972 smp_init(); 973 974 if (!(cpuid(0x80000001).c & 4)) { 975 printf("SVM not availble\n"); 976 return 0; 977 } 978 979 setup_svm(); 980 981 vmcb = alloc_page(); 982 983 nr = ARRAY_SIZE(tests); 984 passed = done = 0; 985 for (i = 0; i < nr; ++i) { 986 if (!tests[i].supported()) 987 continue; 988 done += 1; 989 passed += test_run(&tests[i], vmcb); 990 } 991 992 printf("\nSUMMARY: %d TESTS, %d FAILURES\n", done, (done - passed)); 993 return passed == done ? 0 : 1; 994 } 995