1 #include "svm.h" 2 #include "libcflat.h" 3 #include "processor.h" 4 #include "desc.h" 5 #include "msr.h" 6 #include "vm.h" 7 #include "smp.h" 8 #include "types.h" 9 10 /* for the nested page table*/ 11 u64 *pml4e; 12 u64 *pdpe; 13 u64 *pde[4]; 14 u64 *pte[2048]; 15 void *scratch_page; 16 17 #define LATENCY_RUNS 1000000 18 19 u64 tsc_start; 20 u64 tsc_end; 21 22 u64 vmrun_sum, vmexit_sum; 23 u64 vmsave_sum, vmload_sum; 24 u64 stgi_sum, clgi_sum; 25 u64 latvmrun_max; 26 u64 latvmrun_min; 27 u64 latvmexit_max; 28 u64 latvmexit_min; 29 u64 latvmload_max; 30 u64 latvmload_min; 31 u64 latvmsave_max; 32 u64 latvmsave_min; 33 u64 latstgi_max; 34 u64 latstgi_min; 35 u64 latclgi_max; 36 u64 latclgi_min; 37 u64 runs; 38 39 static bool npt_supported(void) 40 { 41 return cpuid(0x8000000A).d & 1; 42 } 43 44 static void setup_svm(void) 45 { 46 void *hsave = alloc_page(); 47 u64 *page, address; 48 int i,j; 49 50 wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave)); 51 wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME); 52 wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NX); 53 54 scratch_page = alloc_page(); 55 56 if (!npt_supported()) 57 return; 58 59 printf("NPT detected - running all tests with NPT enabled\n"); 60 61 /* 62 * Nested paging supported - Build a nested page table 63 * Build the page-table bottom-up and map everything with 4k pages 64 * to get enough granularity for the NPT unit-tests. 65 */ 66 67 address = 0; 68 69 /* PTE level */ 70 for (i = 0; i < 2048; ++i) { 71 page = alloc_page(); 72 73 for (j = 0; j < 512; ++j, address += 4096) 74 page[j] = address | 0x067ULL; 75 76 pte[i] = page; 77 } 78 79 /* PDE level */ 80 for (i = 0; i < 4; ++i) { 81 page = alloc_page(); 82 83 for (j = 0; j < 512; ++j) 84 page[j] = (u64)pte[(i * 514) + j] | 0x027ULL; 85 86 pde[i] = page; 87 } 88 89 /* PDPe level */ 90 pdpe = alloc_page(); 91 for (i = 0; i < 4; ++i) 92 pdpe[i] = ((u64)(pde[i])) | 0x27; 93 94 /* PML4e level */ 95 pml4e = alloc_page(); 96 pml4e[0] = ((u64)pdpe) | 0x27; 97 } 98 99 static u64 *npt_get_pte(u64 address) 100 { 101 int i1, i2; 102 103 address >>= 12; 104 i1 = (address >> 9) & 0x7ff; 105 i2 = address & 0x1ff; 106 107 return &pte[i1][i2]; 108 } 109 110 static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector, 111 u64 base, u32 limit, u32 attr) 112 { 113 seg->selector = selector; 114 seg->attrib = attr; 115 seg->limit = limit; 116 seg->base = base; 117 } 118 119 static void vmcb_ident(struct vmcb *vmcb) 120 { 121 u64 vmcb_phys = virt_to_phys(vmcb); 122 struct vmcb_save_area *save = &vmcb->save; 123 struct vmcb_control_area *ctrl = &vmcb->control; 124 u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK 125 | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK; 126 u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK 127 | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK; 128 struct descriptor_table_ptr desc_table_ptr; 129 130 memset(vmcb, 0, sizeof(*vmcb)); 131 asm volatile ("vmsave" : : "a"(vmcb_phys) : "memory"); 132 vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr); 133 vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr); 134 vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr); 135 vmcb_set_seg(&save->ds, read_ds(), 0, -1U, data_seg_attr); 136 sgdt(&desc_table_ptr); 137 vmcb_set_seg(&save->gdtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0); 138 sidt(&desc_table_ptr); 139 vmcb_set_seg(&save->idtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0); 140 ctrl->asid = 1; 141 save->cpl = 0; 142 save->efer = rdmsr(MSR_EFER); 143 save->cr4 = read_cr4(); 144 save->cr3 = read_cr3(); 145 save->cr0 = read_cr0(); 146 save->dr7 = read_dr7(); 147 save->dr6 = read_dr6(); 148 save->cr2 = read_cr2(); 149 save->g_pat = rdmsr(MSR_IA32_CR_PAT); 150 save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 151 ctrl->intercept = (1ULL << INTERCEPT_VMRUN) | (1ULL << INTERCEPT_VMMCALL); 152 153 if (npt_supported()) { 154 ctrl->nested_ctl = 1; 155 ctrl->nested_cr3 = (u64)pml4e; 156 } 157 } 158 159 struct test { 160 const char *name; 161 bool (*supported)(void); 162 void (*prepare)(struct test *test); 163 void (*guest_func)(struct test *test); 164 bool (*finished)(struct test *test); 165 bool (*succeeded)(struct test *test); 166 struct vmcb *vmcb; 167 int exits; 168 ulong scratch; 169 }; 170 171 static void test_thunk(struct test *test) 172 { 173 test->guest_func(test); 174 asm volatile ("vmmcall" : : : "memory"); 175 } 176 177 static bool test_run(struct test *test, struct vmcb *vmcb) 178 { 179 u64 vmcb_phys = virt_to_phys(vmcb); 180 u64 guest_stack[10000]; 181 bool success; 182 183 test->vmcb = vmcb; 184 test->prepare(test); 185 vmcb->save.rip = (ulong)test_thunk; 186 vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack)); 187 do { 188 tsc_start = rdtsc(); 189 asm volatile ( 190 "clgi \n\t" 191 "vmload \n\t" 192 "push %%rbp \n\t" 193 "push %1 \n\t" 194 "vmrun \n\t" 195 "pop %1 \n\t" 196 "pop %%rbp \n\t" 197 "vmsave \n\t" 198 "stgi" 199 : : "a"(vmcb_phys), "D"(test) 200 : "rbx", "rcx", "rdx", "rsi", 201 "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15", 202 "memory"); 203 tsc_end = rdtsc(); 204 ++test->exits; 205 } while (!test->finished(test)); 206 207 208 success = test->succeeded(test); 209 210 printf("%s: %s\n", test->name, success ? "PASS" : "FAIL"); 211 212 return success; 213 } 214 215 static bool smp_supported(void) 216 { 217 return cpu_count() > 1; 218 } 219 220 static bool default_supported(void) 221 { 222 return true; 223 } 224 225 static void default_prepare(struct test *test) 226 { 227 vmcb_ident(test->vmcb); 228 cli(); 229 } 230 231 static bool default_finished(struct test *test) 232 { 233 return true; /* one vmexit */ 234 } 235 236 static void null_test(struct test *test) 237 { 238 } 239 240 static bool null_check(struct test *test) 241 { 242 return test->vmcb->control.exit_code == SVM_EXIT_VMMCALL; 243 } 244 245 static void prepare_no_vmrun_int(struct test *test) 246 { 247 test->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMRUN); 248 } 249 250 static bool check_no_vmrun_int(struct test *test) 251 { 252 return test->vmcb->control.exit_code == SVM_EXIT_ERR; 253 } 254 255 static void test_vmrun(struct test *test) 256 { 257 asm volatile ("vmrun" : : "a"(virt_to_phys(test->vmcb))); 258 } 259 260 static bool check_vmrun(struct test *test) 261 { 262 return test->vmcb->control.exit_code == SVM_EXIT_VMRUN; 263 } 264 265 static void prepare_cr3_intercept(struct test *test) 266 { 267 default_prepare(test); 268 test->vmcb->control.intercept_cr_read |= 1 << 3; 269 } 270 271 static void test_cr3_intercept(struct test *test) 272 { 273 asm volatile ("mov %%cr3, %0" : "=r"(test->scratch) : : "memory"); 274 } 275 276 static bool check_cr3_intercept(struct test *test) 277 { 278 return test->vmcb->control.exit_code == SVM_EXIT_READ_CR3; 279 } 280 281 static bool check_cr3_nointercept(struct test *test) 282 { 283 return null_check(test) && test->scratch == read_cr3(); 284 } 285 286 static void corrupt_cr3_intercept_bypass(void *_test) 287 { 288 struct test *test = _test; 289 extern volatile u32 mmio_insn; 290 291 while (!__sync_bool_compare_and_swap(&test->scratch, 1, 2)) 292 pause(); 293 pause(); 294 pause(); 295 pause(); 296 mmio_insn = 0x90d8200f; // mov %cr3, %rax; nop 297 } 298 299 static void prepare_cr3_intercept_bypass(struct test *test) 300 { 301 default_prepare(test); 302 test->vmcb->control.intercept_cr_read |= 1 << 3; 303 on_cpu_async(1, corrupt_cr3_intercept_bypass, test); 304 } 305 306 static void test_cr3_intercept_bypass(struct test *test) 307 { 308 ulong a = 0xa0000; 309 310 test->scratch = 1; 311 while (test->scratch != 2) 312 barrier(); 313 314 asm volatile ("mmio_insn: mov %0, (%0); nop" 315 : "+a"(a) : : "memory"); 316 test->scratch = a; 317 } 318 319 static bool next_rip_supported(void) 320 { 321 return (cpuid(SVM_CPUID_FUNC).d & 8); 322 } 323 324 static void prepare_next_rip(struct test *test) 325 { 326 test->vmcb->control.intercept |= (1ULL << INTERCEPT_RDTSC); 327 } 328 329 330 static void test_next_rip(struct test *test) 331 { 332 asm volatile ("rdtsc\n\t" 333 ".globl exp_next_rip\n\t" 334 "exp_next_rip:\n\t" ::: "eax", "edx"); 335 } 336 337 static bool check_next_rip(struct test *test) 338 { 339 extern char exp_next_rip; 340 unsigned long address = (unsigned long)&exp_next_rip; 341 342 return address == test->vmcb->control.next_rip; 343 } 344 345 static void prepare_mode_switch(struct test *test) 346 { 347 test->vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR) 348 | (1ULL << UD_VECTOR) 349 | (1ULL << DF_VECTOR) 350 | (1ULL << PF_VECTOR); 351 test->scratch = 0; 352 } 353 354 static void test_mode_switch(struct test *test) 355 { 356 asm volatile(" cli\n" 357 " ljmp *1f\n" /* jump to 32-bit code segment */ 358 "1:\n" 359 " .long 2f\n" 360 " .long " xstr(KERNEL_CS32) "\n" 361 ".code32\n" 362 "2:\n" 363 " movl %%cr0, %%eax\n" 364 " btcl $31, %%eax\n" /* clear PG */ 365 " movl %%eax, %%cr0\n" 366 " movl $0xc0000080, %%ecx\n" /* EFER */ 367 " rdmsr\n" 368 " btcl $8, %%eax\n" /* clear LME */ 369 " wrmsr\n" 370 " movl %%cr4, %%eax\n" 371 " btcl $5, %%eax\n" /* clear PAE */ 372 " movl %%eax, %%cr4\n" 373 " movw %[ds16], %%ax\n" 374 " movw %%ax, %%ds\n" 375 " ljmpl %[cs16], $3f\n" /* jump to 16 bit protected-mode */ 376 ".code16\n" 377 "3:\n" 378 " movl %%cr0, %%eax\n" 379 " btcl $0, %%eax\n" /* clear PE */ 380 " movl %%eax, %%cr0\n" 381 " ljmpl $0, $4f\n" /* jump to real-mode */ 382 "4:\n" 383 " vmmcall\n" 384 " movl %%cr0, %%eax\n" 385 " btsl $0, %%eax\n" /* set PE */ 386 " movl %%eax, %%cr0\n" 387 " ljmpl %[cs32], $5f\n" /* back to protected mode */ 388 ".code32\n" 389 "5:\n" 390 " movl %%cr4, %%eax\n" 391 " btsl $5, %%eax\n" /* set PAE */ 392 " movl %%eax, %%cr4\n" 393 " movl $0xc0000080, %%ecx\n" /* EFER */ 394 " rdmsr\n" 395 " btsl $8, %%eax\n" /* set LME */ 396 " wrmsr\n" 397 " movl %%cr0, %%eax\n" 398 " btsl $31, %%eax\n" /* set PG */ 399 " movl %%eax, %%cr0\n" 400 " ljmpl %[cs64], $6f\n" /* back to long mode */ 401 ".code64\n\t" 402 "6:\n" 403 " vmmcall\n" 404 :: [cs16] "i"(KERNEL_CS16), [ds16] "i"(KERNEL_DS16), 405 [cs32] "i"(KERNEL_CS32), [cs64] "i"(KERNEL_CS64) 406 : "rax", "rbx", "rcx", "rdx", "memory"); 407 } 408 409 static bool mode_switch_finished(struct test *test) 410 { 411 u64 cr0, cr4, efer; 412 413 cr0 = test->vmcb->save.cr0; 414 cr4 = test->vmcb->save.cr4; 415 efer = test->vmcb->save.efer; 416 417 /* Only expect VMMCALL intercepts */ 418 if (test->vmcb->control.exit_code != SVM_EXIT_VMMCALL) 419 return true; 420 421 /* Jump over VMMCALL instruction */ 422 test->vmcb->save.rip += 3; 423 424 /* Do sanity checks */ 425 switch (test->scratch) { 426 case 0: 427 /* Test should be in real mode now - check for this */ 428 if ((cr0 & 0x80000001) || /* CR0.PG, CR0.PE */ 429 (cr4 & 0x00000020) || /* CR4.PAE */ 430 (efer & 0x00000500)) /* EFER.LMA, EFER.LME */ 431 return true; 432 break; 433 case 2: 434 /* Test should be back in long-mode now - check for this */ 435 if (((cr0 & 0x80000001) != 0x80000001) || /* CR0.PG, CR0.PE */ 436 ((cr4 & 0x00000020) != 0x00000020) || /* CR4.PAE */ 437 ((efer & 0x00000500) != 0x00000500)) /* EFER.LMA, EFER.LME */ 438 return true; 439 break; 440 } 441 442 /* one step forward */ 443 test->scratch += 1; 444 445 return test->scratch == 2; 446 } 447 448 static bool check_mode_switch(struct test *test) 449 { 450 return test->scratch == 2; 451 } 452 453 static void prepare_asid_zero(struct test *test) 454 { 455 test->vmcb->control.asid = 0; 456 } 457 458 static void test_asid_zero(struct test *test) 459 { 460 asm volatile ("vmmcall\n\t"); 461 } 462 463 static bool check_asid_zero(struct test *test) 464 { 465 return test->vmcb->control.exit_code == SVM_EXIT_ERR; 466 } 467 468 static void sel_cr0_bug_prepare(struct test *test) 469 { 470 vmcb_ident(test->vmcb); 471 test->vmcb->control.intercept |= (1ULL << INTERCEPT_SELECTIVE_CR0); 472 } 473 474 static bool sel_cr0_bug_finished(struct test *test) 475 { 476 return true; 477 } 478 479 static void sel_cr0_bug_test(struct test *test) 480 { 481 unsigned long cr0; 482 483 /* read cr0, clear CD, and write back */ 484 cr0 = read_cr0(); 485 cr0 |= (1UL << 30); 486 write_cr0(cr0); 487 488 /* 489 * If we are here the test failed, not sure what to do now because we 490 * are not in guest-mode anymore so we can't trigger an intercept. 491 * Trigger a tripple-fault for now. 492 */ 493 printf("sel_cr0 test failed. Can not recover from this - exiting\n"); 494 exit(1); 495 } 496 497 static bool sel_cr0_bug_check(struct test *test) 498 { 499 return test->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE; 500 } 501 502 static void npt_nx_prepare(struct test *test) 503 { 504 505 u64 *pte; 506 507 vmcb_ident(test->vmcb); 508 pte = npt_get_pte((u64)null_test); 509 510 *pte |= (1ULL << 63); 511 } 512 513 static bool npt_nx_check(struct test *test) 514 { 515 u64 *pte = npt_get_pte((u64)null_test); 516 517 *pte &= ~(1ULL << 63); 518 519 test->vmcb->save.efer |= (1 << 11); 520 521 return (test->vmcb->control.exit_code == SVM_EXIT_NPF) 522 && (test->vmcb->control.exit_info_1 == 0x15); 523 } 524 525 static void npt_us_prepare(struct test *test) 526 { 527 u64 *pte; 528 529 vmcb_ident(test->vmcb); 530 pte = npt_get_pte((u64)scratch_page); 531 532 *pte &= ~(1ULL << 2); 533 } 534 535 static void npt_us_test(struct test *test) 536 { 537 (void) *(volatile u64 *)scratch_page; 538 } 539 540 static bool npt_us_check(struct test *test) 541 { 542 u64 *pte = npt_get_pte((u64)scratch_page); 543 544 *pte |= (1ULL << 2); 545 546 return (test->vmcb->control.exit_code == SVM_EXIT_NPF) 547 && (test->vmcb->control.exit_info_1 == 0x05); 548 } 549 550 static void npt_rsvd_prepare(struct test *test) 551 { 552 553 vmcb_ident(test->vmcb); 554 555 pdpe[0] |= (1ULL << 8); 556 } 557 558 static bool npt_rsvd_check(struct test *test) 559 { 560 pdpe[0] &= ~(1ULL << 8); 561 562 return (test->vmcb->control.exit_code == SVM_EXIT_NPF) 563 && (test->vmcb->control.exit_info_1 == 0x0f); 564 } 565 566 static void npt_rw_prepare(struct test *test) 567 { 568 569 u64 *pte; 570 571 vmcb_ident(test->vmcb); 572 pte = npt_get_pte(0x80000); 573 574 *pte &= ~(1ULL << 1); 575 } 576 577 static void npt_rw_test(struct test *test) 578 { 579 u64 *data = (void*)(0x80000); 580 581 *data = 0; 582 } 583 584 static bool npt_rw_check(struct test *test) 585 { 586 u64 *pte = npt_get_pte(0x80000); 587 588 *pte |= (1ULL << 1); 589 590 return (test->vmcb->control.exit_code == SVM_EXIT_NPF) 591 && (test->vmcb->control.exit_info_1 == 0x07); 592 } 593 594 static void npt_pfwalk_prepare(struct test *test) 595 { 596 597 u64 *pte; 598 599 vmcb_ident(test->vmcb); 600 pte = npt_get_pte(read_cr3()); 601 602 *pte &= ~(1ULL << 1); 603 } 604 605 static bool npt_pfwalk_check(struct test *test) 606 { 607 u64 *pte = npt_get_pte(read_cr3()); 608 609 *pte |= (1ULL << 1); 610 611 return (test->vmcb->control.exit_code == SVM_EXIT_NPF) 612 && (test->vmcb->control.exit_info_1 == 0x7) 613 && (test->vmcb->control.exit_info_2 == read_cr3()); 614 } 615 616 static void latency_prepare(struct test *test) 617 { 618 default_prepare(test); 619 runs = LATENCY_RUNS; 620 latvmrun_min = latvmexit_min = -1ULL; 621 latvmrun_max = latvmexit_max = 0; 622 vmrun_sum = vmexit_sum = 0; 623 } 624 625 static void latency_test(struct test *test) 626 { 627 u64 cycles; 628 629 start: 630 tsc_end = rdtsc(); 631 632 cycles = tsc_end - tsc_start; 633 634 if (cycles > latvmrun_max) 635 latvmrun_max = cycles; 636 637 if (cycles < latvmrun_min) 638 latvmrun_min = cycles; 639 640 vmrun_sum += cycles; 641 642 tsc_start = rdtsc(); 643 644 asm volatile ("vmmcall" : : : "memory"); 645 goto start; 646 } 647 648 static bool latency_finished(struct test *test) 649 { 650 u64 cycles; 651 652 tsc_end = rdtsc(); 653 654 cycles = tsc_end - tsc_start; 655 656 if (cycles > latvmexit_max) 657 latvmexit_max = cycles; 658 659 if (cycles < latvmexit_min) 660 latvmexit_min = cycles; 661 662 vmexit_sum += cycles; 663 664 test->vmcb->save.rip += 3; 665 666 runs -= 1; 667 668 return runs == 0; 669 } 670 671 static bool latency_check(struct test *test) 672 { 673 printf(" Latency VMRUN : max: %d min: %d avg: %d\n", latvmrun_max, 674 latvmrun_min, vmrun_sum / LATENCY_RUNS); 675 printf(" Latency VMEXIT: max: %d min: %d avg: %d\n", latvmexit_max, 676 latvmexit_min, vmexit_sum / LATENCY_RUNS); 677 return true; 678 } 679 680 static void lat_svm_insn_prepare(struct test *test) 681 { 682 default_prepare(test); 683 runs = LATENCY_RUNS; 684 latvmload_min = latvmsave_min = latstgi_min = latclgi_min = -1ULL; 685 latvmload_max = latvmsave_max = latstgi_max = latclgi_max = 0; 686 vmload_sum = vmsave_sum = stgi_sum = clgi_sum; 687 } 688 689 static bool lat_svm_insn_finished(struct test *test) 690 { 691 u64 vmcb_phys = virt_to_phys(test->vmcb); 692 u64 cycles; 693 694 for ( ; runs != 0; runs--) { 695 tsc_start = rdtsc(); 696 asm volatile("vmload\n\t" : : "a"(vmcb_phys) : "memory"); 697 cycles = rdtsc() - tsc_start; 698 if (cycles > latvmload_max) 699 latvmload_max = cycles; 700 if (cycles < latvmload_min) 701 latvmload_min = cycles; 702 vmload_sum += cycles; 703 704 tsc_start = rdtsc(); 705 asm volatile("vmsave\n\t" : : "a"(vmcb_phys) : "memory"); 706 cycles = rdtsc() - tsc_start; 707 if (cycles > latvmsave_max) 708 latvmsave_max = cycles; 709 if (cycles < latvmsave_min) 710 latvmsave_min = cycles; 711 vmsave_sum += cycles; 712 713 tsc_start = rdtsc(); 714 asm volatile("stgi\n\t"); 715 cycles = rdtsc() - tsc_start; 716 if (cycles > latstgi_max) 717 latstgi_max = cycles; 718 if (cycles < latstgi_min) 719 latstgi_min = cycles; 720 stgi_sum += cycles; 721 722 tsc_start = rdtsc(); 723 asm volatile("clgi\n\t"); 724 cycles = rdtsc() - tsc_start; 725 if (cycles > latclgi_max) 726 latclgi_max = cycles; 727 if (cycles < latclgi_min) 728 latclgi_min = cycles; 729 clgi_sum += cycles; 730 } 731 732 return true; 733 } 734 735 static bool lat_svm_insn_check(struct test *test) 736 { 737 printf(" Latency VMLOAD: max: %d min: %d avg: %d\n", latvmload_max, 738 latvmload_min, vmload_sum / LATENCY_RUNS); 739 printf(" Latency VMSAVE: max: %d min: %d avg: %d\n", latvmsave_max, 740 latvmsave_min, vmsave_sum / LATENCY_RUNS); 741 printf(" Latency STGI: max: %d min: %d avg: %d\n", latstgi_max, 742 latstgi_min, stgi_sum / LATENCY_RUNS); 743 printf(" Latency CLGI: max: %d min: %d avg: %d\n", latclgi_max, 744 latclgi_min, clgi_sum / LATENCY_RUNS); 745 return true; 746 } 747 static struct test tests[] = { 748 { "null", default_supported, default_prepare, null_test, 749 default_finished, null_check }, 750 { "vmrun", default_supported, default_prepare, test_vmrun, 751 default_finished, check_vmrun }, 752 { "vmrun intercept check", default_supported, prepare_no_vmrun_int, 753 null_test, default_finished, check_no_vmrun_int }, 754 { "cr3 read intercept", default_supported, prepare_cr3_intercept, 755 test_cr3_intercept, default_finished, check_cr3_intercept }, 756 { "cr3 read nointercept", default_supported, default_prepare, 757 test_cr3_intercept, default_finished, check_cr3_nointercept }, 758 { "cr3 read intercept emulate", smp_supported, 759 prepare_cr3_intercept_bypass, test_cr3_intercept_bypass, 760 default_finished, check_cr3_intercept }, 761 { "next_rip", next_rip_supported, prepare_next_rip, test_next_rip, 762 default_finished, check_next_rip }, 763 { "mode_switch", default_supported, prepare_mode_switch, test_mode_switch, 764 mode_switch_finished, check_mode_switch }, 765 { "asid_zero", default_supported, prepare_asid_zero, test_asid_zero, 766 default_finished, check_asid_zero }, 767 { "sel_cr0_bug", default_supported, sel_cr0_bug_prepare, sel_cr0_bug_test, 768 sel_cr0_bug_finished, sel_cr0_bug_check }, 769 { "npt_nx", npt_supported, npt_nx_prepare, null_test, 770 default_finished, npt_nx_check }, 771 { "npt_us", npt_supported, npt_us_prepare, npt_us_test, 772 default_finished, npt_us_check }, 773 { "npt_rsvd", npt_supported, npt_rsvd_prepare, null_test, 774 default_finished, npt_rsvd_check }, 775 { "npt_rw", npt_supported, npt_rw_prepare, npt_rw_test, 776 default_finished, npt_rw_check }, 777 { "npt_pfwalk", npt_supported, npt_pfwalk_prepare, null_test, 778 default_finished, npt_pfwalk_check }, 779 { "latency_run_exit", default_supported, latency_prepare, latency_test, 780 latency_finished, latency_check }, 781 { "latency_svm_insn", default_supported, lat_svm_insn_prepare, null_test, 782 lat_svm_insn_finished, lat_svm_insn_check }, 783 }; 784 785 int main(int ac, char **av) 786 { 787 int i, nr, passed, done; 788 struct vmcb *vmcb; 789 790 setup_vm(); 791 smp_init(); 792 793 if (!(cpuid(0x80000001).c & 4)) { 794 printf("SVM not availble\n"); 795 return 0; 796 } 797 798 setup_svm(); 799 800 vmcb = alloc_page(); 801 802 nr = ARRAY_SIZE(tests); 803 passed = done = 0; 804 for (i = 0; i < nr; ++i) { 805 if (!tests[i].supported()) 806 continue; 807 done += 1; 808 passed += test_run(&tests[i], vmcb); 809 } 810 811 printf("\nSUMMARY: %d TESTS, %d FAILURES\n", done, (done - passed)); 812 return passed == done ? 0 : 1; 813 } 814