1 #include "svm.h" 2 #include "libcflat.h" 3 #include "processor.h" 4 #include "msr.h" 5 #include "vm.h" 6 #include "smp.h" 7 #include "types.h" 8 9 /* for the nested page table*/ 10 u64 *pml4e; 11 u64 *pdpe; 12 u64 *pde[4]; 13 u64 *pte[2048]; 14 u64 *scratch_page; 15 16 #define LATENCY_RUNS 1000000 17 18 u64 tsc_start; 19 u64 tsc_end; 20 21 u64 vmrun_sum, vmexit_sum; 22 u64 vmsave_sum, vmload_sum; 23 u64 stgi_sum, clgi_sum; 24 u64 latvmrun_max; 25 u64 latvmrun_min; 26 u64 latvmexit_max; 27 u64 latvmexit_min; 28 u64 latvmload_max; 29 u64 latvmload_min; 30 u64 latvmsave_max; 31 u64 latvmsave_min; 32 u64 latstgi_max; 33 u64 latstgi_min; 34 u64 latclgi_max; 35 u64 latclgi_min; 36 u64 runs; 37 38 static bool npt_supported(void) 39 { 40 return cpuid(0x8000000A).d & 1; 41 } 42 43 static void setup_svm(void) 44 { 45 void *hsave = alloc_page(); 46 u64 *page, address; 47 int i,j; 48 49 wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave)); 50 wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME); 51 wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NX); 52 53 scratch_page = alloc_page(); 54 55 if (!npt_supported()) 56 return; 57 58 printf("NPT detected - running all tests with NPT enabled\n"); 59 60 /* 61 * Nested paging supported - Build a nested page table 62 * Build the page-table bottom-up and map everything with 4k pages 63 * to get enough granularity for the NPT unit-tests. 64 */ 65 66 address = 0; 67 68 /* PTE level */ 69 for (i = 0; i < 2048; ++i) { 70 page = alloc_page(); 71 72 for (j = 0; j < 512; ++j, address += 4096) 73 page[j] = address | 0x067ULL; 74 75 pte[i] = page; 76 } 77 78 /* PDE level */ 79 for (i = 0; i < 4; ++i) { 80 page = alloc_page(); 81 82 for (j = 0; j < 512; ++j) 83 page[j] = (u64)pte[(i * 514) + j] | 0x027ULL; 84 85 pde[i] = page; 86 } 87 88 /* PDPe level */ 89 pdpe = alloc_page(); 90 for (i = 0; i < 4; ++i) 91 pdpe[i] = ((u64)(pde[i])) | 0x27; 92 93 /* PML4e level */ 94 pml4e = alloc_page(); 95 pml4e[0] = ((u64)pdpe) | 0x27; 96 } 97 98 static u64 *get_pte(u64 address) 99 { 100 int i1, i2; 101 102 address >>= 12; 103 i1 = (address >> 9) & 0x7ff; 104 i2 = address & 0x1ff; 105 106 return &pte[i1][i2]; 107 } 108 109 static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector, 110 u64 base, u32 limit, u32 attr) 111 { 112 seg->selector = selector; 113 seg->attrib = attr; 114 seg->limit = limit; 115 seg->base = base; 116 } 117 118 static void vmcb_ident(struct vmcb *vmcb) 119 { 120 u64 vmcb_phys = virt_to_phys(vmcb); 121 struct vmcb_save_area *save = &vmcb->save; 122 struct vmcb_control_area *ctrl = &vmcb->control; 123 u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK 124 | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK; 125 u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK 126 | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK; 127 struct descriptor_table_ptr desc_table_ptr; 128 129 memset(vmcb, 0, sizeof(*vmcb)); 130 asm volatile ("vmsave" : : "a"(vmcb_phys) : "memory"); 131 vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr); 132 vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr); 133 vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr); 134 vmcb_set_seg(&save->ds, read_ds(), 0, -1U, data_seg_attr); 135 sgdt(&desc_table_ptr); 136 vmcb_set_seg(&save->gdtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0); 137 sidt(&desc_table_ptr); 138 vmcb_set_seg(&save->idtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0); 139 ctrl->asid = 1; 140 save->cpl = 0; 141 save->efer = rdmsr(MSR_EFER); 142 save->cr4 = read_cr4(); 143 save->cr3 = read_cr3(); 144 save->cr0 = read_cr0(); 145 save->dr7 = read_dr7(); 146 save->dr6 = read_dr6(); 147 save->cr2 = read_cr2(); 148 save->g_pat = rdmsr(MSR_IA32_CR_PAT); 149 save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 150 ctrl->intercept = (1ULL << INTERCEPT_VMRUN) | (1ULL << INTERCEPT_VMMCALL); 151 152 if (npt_supported()) { 153 ctrl->nested_ctl = 1; 154 ctrl->nested_cr3 = (u64)pml4e; 155 } 156 } 157 158 struct test { 159 const char *name; 160 bool (*supported)(void); 161 void (*prepare)(struct test *test); 162 void (*guest_func)(struct test *test); 163 bool (*finished)(struct test *test); 164 bool (*succeeded)(struct test *test); 165 struct vmcb *vmcb; 166 int exits; 167 ulong scratch; 168 }; 169 170 static void test_thunk(struct test *test) 171 { 172 test->guest_func(test); 173 asm volatile ("vmmcall" : : : "memory"); 174 } 175 176 static bool test_run(struct test *test, struct vmcb *vmcb) 177 { 178 u64 vmcb_phys = virt_to_phys(vmcb); 179 u64 guest_stack[10000]; 180 bool success; 181 182 test->vmcb = vmcb; 183 test->prepare(test); 184 vmcb->save.rip = (ulong)test_thunk; 185 vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack)); 186 do { 187 tsc_start = rdtsc(); 188 asm volatile ( 189 "clgi \n\t" 190 "vmload \n\t" 191 "push %%rbp \n\t" 192 "push %1 \n\t" 193 "vmrun \n\t" 194 "pop %1 \n\t" 195 "pop %%rbp \n\t" 196 "vmsave \n\t" 197 "stgi" 198 : : "a"(vmcb_phys), "D"(test) 199 : "rbx", "rcx", "rdx", "rsi", 200 "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15", 201 "memory"); 202 tsc_end = rdtsc(); 203 ++test->exits; 204 } while (!test->finished(test)); 205 206 207 success = test->succeeded(test); 208 209 printf("%s: %s\n", test->name, success ? "PASS" : "FAIL"); 210 211 return success; 212 } 213 214 static bool smp_supported(void) 215 { 216 return cpu_count() > 1; 217 } 218 219 static bool default_supported(void) 220 { 221 return true; 222 } 223 224 static void default_prepare(struct test *test) 225 { 226 vmcb_ident(test->vmcb); 227 cli(); 228 } 229 230 static bool default_finished(struct test *test) 231 { 232 return true; /* one vmexit */ 233 } 234 235 static void null_test(struct test *test) 236 { 237 } 238 239 static bool null_check(struct test *test) 240 { 241 return test->vmcb->control.exit_code == SVM_EXIT_VMMCALL; 242 } 243 244 static void prepare_no_vmrun_int(struct test *test) 245 { 246 test->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMRUN); 247 } 248 249 static bool check_no_vmrun_int(struct test *test) 250 { 251 return test->vmcb->control.exit_code == SVM_EXIT_ERR; 252 } 253 254 static void test_vmrun(struct test *test) 255 { 256 asm volatile ("vmrun" : : "a"(virt_to_phys(test->vmcb))); 257 } 258 259 static bool check_vmrun(struct test *test) 260 { 261 return test->vmcb->control.exit_code == SVM_EXIT_VMRUN; 262 } 263 264 static void prepare_cr3_intercept(struct test *test) 265 { 266 default_prepare(test); 267 test->vmcb->control.intercept_cr_read |= 1 << 3; 268 } 269 270 static void test_cr3_intercept(struct test *test) 271 { 272 asm volatile ("mov %%cr3, %0" : "=r"(test->scratch) : : "memory"); 273 } 274 275 static bool check_cr3_intercept(struct test *test) 276 { 277 return test->vmcb->control.exit_code == SVM_EXIT_READ_CR3; 278 } 279 280 static bool check_cr3_nointercept(struct test *test) 281 { 282 return null_check(test) && test->scratch == read_cr3(); 283 } 284 285 static void corrupt_cr3_intercept_bypass(void *_test) 286 { 287 struct test *test = _test; 288 extern volatile u32 mmio_insn; 289 290 while (!__sync_bool_compare_and_swap(&test->scratch, 1, 2)) 291 pause(); 292 pause(); 293 pause(); 294 pause(); 295 mmio_insn = 0x90d8200f; // mov %cr3, %rax; nop 296 } 297 298 static void prepare_cr3_intercept_bypass(struct test *test) 299 { 300 default_prepare(test); 301 test->vmcb->control.intercept_cr_read |= 1 << 3; 302 on_cpu_async(1, corrupt_cr3_intercept_bypass, test); 303 } 304 305 static void test_cr3_intercept_bypass(struct test *test) 306 { 307 ulong a = 0xa0000; 308 309 test->scratch = 1; 310 while (test->scratch != 2) 311 barrier(); 312 313 asm volatile ("mmio_insn: mov %0, (%0); nop" 314 : "+a"(a) : : "memory"); 315 test->scratch = a; 316 } 317 318 static bool next_rip_supported(void) 319 { 320 return (cpuid(SVM_CPUID_FUNC).d & 8); 321 } 322 323 static void prepare_next_rip(struct test *test) 324 { 325 test->vmcb->control.intercept |= (1ULL << INTERCEPT_RDTSC); 326 } 327 328 329 static void test_next_rip(struct test *test) 330 { 331 asm volatile ("rdtsc\n\t" 332 ".globl exp_next_rip\n\t" 333 "exp_next_rip:\n\t" ::: "eax", "edx"); 334 } 335 336 static bool check_next_rip(struct test *test) 337 { 338 extern char exp_next_rip; 339 unsigned long address = (unsigned long)&exp_next_rip; 340 341 return address == test->vmcb->control.next_rip; 342 } 343 344 static void prepare_mode_switch(struct test *test) 345 { 346 test->vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR) 347 | (1ULL << UD_VECTOR) 348 | (1ULL << DF_VECTOR) 349 | (1ULL << PF_VECTOR); 350 test->scratch = 0; 351 } 352 353 static void test_mode_switch(struct test *test) 354 { 355 asm volatile(" cli\n" 356 " ljmp *1f\n" /* jump to 32-bit code segment */ 357 "1:\n" 358 " .long 2f\n" 359 " .long 40\n" 360 ".code32\n" 361 "2:\n" 362 " movl %%cr0, %%eax\n" 363 " btcl $31, %%eax\n" /* clear PG */ 364 " movl %%eax, %%cr0\n" 365 " movl $0xc0000080, %%ecx\n" /* EFER */ 366 " rdmsr\n" 367 " btcl $8, %%eax\n" /* clear LME */ 368 " wrmsr\n" 369 " movl %%cr4, %%eax\n" 370 " btcl $5, %%eax\n" /* clear PAE */ 371 " movl %%eax, %%cr4\n" 372 " movw $64, %%ax\n" 373 " movw %%ax, %%ds\n" 374 " ljmpl $56, $3f\n" /* jump to 16 bit protected-mode */ 375 ".code16\n" 376 "3:\n" 377 " movl %%cr0, %%eax\n" 378 " btcl $0, %%eax\n" /* clear PE */ 379 " movl %%eax, %%cr0\n" 380 " ljmpl $0, $4f\n" /* jump to real-mode */ 381 "4:\n" 382 " vmmcall\n" 383 " movl %%cr0, %%eax\n" 384 " btsl $0, %%eax\n" /* set PE */ 385 " movl %%eax, %%cr0\n" 386 " ljmpl $40, $5f\n" /* back to protected mode */ 387 ".code32\n" 388 "5:\n" 389 " movl %%cr4, %%eax\n" 390 " btsl $5, %%eax\n" /* set PAE */ 391 " movl %%eax, %%cr4\n" 392 " movl $0xc0000080, %%ecx\n" /* EFER */ 393 " rdmsr\n" 394 " btsl $8, %%eax\n" /* set LME */ 395 " wrmsr\n" 396 " movl %%cr0, %%eax\n" 397 " btsl $31, %%eax\n" /* set PG */ 398 " movl %%eax, %%cr0\n" 399 " ljmpl $8, $6f\n" /* back to long mode */ 400 ".code64\n\t" 401 "6:\n" 402 " vmmcall\n" 403 ::: "rax", "rbx", "rcx", "rdx", "memory"); 404 } 405 406 static bool mode_switch_finished(struct test *test) 407 { 408 u64 cr0, cr4, efer; 409 410 cr0 = test->vmcb->save.cr0; 411 cr4 = test->vmcb->save.cr4; 412 efer = test->vmcb->save.efer; 413 414 /* Only expect VMMCALL intercepts */ 415 if (test->vmcb->control.exit_code != SVM_EXIT_VMMCALL) 416 return true; 417 418 /* Jump over VMMCALL instruction */ 419 test->vmcb->save.rip += 3; 420 421 /* Do sanity checks */ 422 switch (test->scratch) { 423 case 0: 424 /* Test should be in real mode now - check for this */ 425 if ((cr0 & 0x80000001) || /* CR0.PG, CR0.PE */ 426 (cr4 & 0x00000020) || /* CR4.PAE */ 427 (efer & 0x00000500)) /* EFER.LMA, EFER.LME */ 428 return true; 429 break; 430 case 2: 431 /* Test should be back in long-mode now - check for this */ 432 if (((cr0 & 0x80000001) != 0x80000001) || /* CR0.PG, CR0.PE */ 433 ((cr4 & 0x00000020) != 0x00000020) || /* CR4.PAE */ 434 ((efer & 0x00000500) != 0x00000500)) /* EFER.LMA, EFER.LME */ 435 return true; 436 break; 437 } 438 439 /* one step forward */ 440 test->scratch += 1; 441 442 return test->scratch == 2; 443 } 444 445 static bool check_mode_switch(struct test *test) 446 { 447 return test->scratch == 2; 448 } 449 450 static void prepare_asid_zero(struct test *test) 451 { 452 test->vmcb->control.asid = 0; 453 } 454 455 static void test_asid_zero(struct test *test) 456 { 457 asm volatile ("vmmcall\n\t"); 458 } 459 460 static bool check_asid_zero(struct test *test) 461 { 462 return test->vmcb->control.exit_code == SVM_EXIT_ERR; 463 } 464 465 static void sel_cr0_bug_prepare(struct test *test) 466 { 467 vmcb_ident(test->vmcb); 468 test->vmcb->control.intercept |= (1ULL << INTERCEPT_SELECTIVE_CR0); 469 } 470 471 static bool sel_cr0_bug_finished(struct test *test) 472 { 473 return true; 474 } 475 476 static void sel_cr0_bug_test(struct test *test) 477 { 478 unsigned long cr0; 479 480 /* read cr0, clear CD, and write back */ 481 cr0 = read_cr0(); 482 cr0 |= (1UL << 30); 483 write_cr0(cr0); 484 485 /* 486 * If we are here the test failed, not sure what to do now because we 487 * are not in guest-mode anymore so we can't trigger an intercept. 488 * Trigger a tripple-fault for now. 489 */ 490 printf("sel_cr0 test failed. Can not recover from this - exiting\n"); 491 exit(1); 492 } 493 494 static bool sel_cr0_bug_check(struct test *test) 495 { 496 return test->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE; 497 } 498 499 static void npt_nx_prepare(struct test *test) 500 { 501 502 u64 *pte; 503 504 vmcb_ident(test->vmcb); 505 pte = get_pte((u64)null_test); 506 507 *pte |= (1ULL << 63); 508 } 509 510 static bool npt_nx_check(struct test *test) 511 { 512 u64 *pte = get_pte((u64)null_test); 513 514 *pte &= ~(1ULL << 63); 515 516 test->vmcb->save.efer |= (1 << 11); 517 518 return (test->vmcb->control.exit_code == SVM_EXIT_NPF) 519 && (test->vmcb->control.exit_info_1 == 0x15); 520 } 521 522 static void npt_us_prepare(struct test *test) 523 { 524 u64 *pte; 525 526 vmcb_ident(test->vmcb); 527 pte = get_pte((u64)scratch_page); 528 529 *pte &= ~(1ULL << 2); 530 } 531 532 static void npt_us_test(struct test *test) 533 { 534 volatile u64 data; 535 536 data = *scratch_page; 537 } 538 539 static bool npt_us_check(struct test *test) 540 { 541 u64 *pte = get_pte((u64)scratch_page); 542 543 *pte |= (1ULL << 2); 544 545 return (test->vmcb->control.exit_code == SVM_EXIT_NPF) 546 && (test->vmcb->control.exit_info_1 == 0x05); 547 } 548 549 static void npt_rsvd_prepare(struct test *test) 550 { 551 552 vmcb_ident(test->vmcb); 553 554 pdpe[0] |= (1ULL << 8); 555 } 556 557 static bool npt_rsvd_check(struct test *test) 558 { 559 pdpe[0] &= ~(1ULL << 8); 560 561 return (test->vmcb->control.exit_code == SVM_EXIT_NPF) 562 && (test->vmcb->control.exit_info_1 == 0x0f); 563 } 564 565 static void npt_rw_prepare(struct test *test) 566 { 567 568 u64 *pte; 569 570 vmcb_ident(test->vmcb); 571 pte = get_pte(0x80000); 572 573 *pte &= ~(1ULL << 1); 574 } 575 576 static void npt_rw_test(struct test *test) 577 { 578 u64 *data = (void*)(0x80000); 579 580 *data = 0; 581 } 582 583 static bool npt_rw_check(struct test *test) 584 { 585 u64 *pte = get_pte(0x80000); 586 587 *pte |= (1ULL << 1); 588 589 return (test->vmcb->control.exit_code == SVM_EXIT_NPF) 590 && (test->vmcb->control.exit_info_1 == 0x07); 591 } 592 593 static void npt_pfwalk_prepare(struct test *test) 594 { 595 596 u64 *pte; 597 598 vmcb_ident(test->vmcb); 599 pte = get_pte(read_cr3()); 600 601 *pte &= ~(1ULL << 1); 602 } 603 604 static bool npt_pfwalk_check(struct test *test) 605 { 606 u64 *pte = get_pte(read_cr3()); 607 608 *pte |= (1ULL << 1); 609 610 return (test->vmcb->control.exit_code == SVM_EXIT_NPF) 611 && (test->vmcb->control.exit_info_1 == 0x7) 612 && (test->vmcb->control.exit_info_2 == read_cr3()); 613 } 614 615 static void latency_prepare(struct test *test) 616 { 617 default_prepare(test); 618 runs = LATENCY_RUNS; 619 latvmrun_min = latvmexit_min = -1ULL; 620 latvmrun_max = latvmexit_max = 0; 621 vmrun_sum = vmexit_sum = 0; 622 } 623 624 static void latency_test(struct test *test) 625 { 626 u64 cycles; 627 628 start: 629 tsc_end = rdtsc(); 630 631 cycles = tsc_end - tsc_start; 632 633 if (cycles > latvmrun_max) 634 latvmrun_max = cycles; 635 636 if (cycles < latvmrun_min) 637 latvmrun_min = cycles; 638 639 vmrun_sum += cycles; 640 641 tsc_start = rdtsc(); 642 643 asm volatile ("vmmcall" : : : "memory"); 644 goto start; 645 } 646 647 static bool latency_finished(struct test *test) 648 { 649 u64 cycles; 650 651 tsc_end = rdtsc(); 652 653 cycles = tsc_end - tsc_start; 654 655 if (cycles > latvmexit_max) 656 latvmexit_max = cycles; 657 658 if (cycles < latvmexit_min) 659 latvmexit_min = cycles; 660 661 vmexit_sum += cycles; 662 663 test->vmcb->save.rip += 3; 664 665 runs -= 1; 666 667 return runs == 0; 668 } 669 670 static bool latency_check(struct test *test) 671 { 672 printf(" Latency VMRUN : max: %d min: %d avg: %d\n", latvmrun_max, 673 latvmrun_min, vmrun_sum / LATENCY_RUNS); 674 printf(" Latency VMEXIT: max: %d min: %d avg: %d\n", latvmexit_max, 675 latvmexit_min, vmexit_sum / LATENCY_RUNS); 676 return true; 677 } 678 679 static void lat_svm_insn_prepare(struct test *test) 680 { 681 default_prepare(test); 682 runs = LATENCY_RUNS; 683 latvmload_min = latvmsave_min = latstgi_min = latclgi_min = -1ULL; 684 latvmload_max = latvmsave_max = latstgi_max = latclgi_max = 0; 685 vmload_sum = vmsave_sum = stgi_sum = clgi_sum; 686 } 687 688 static bool lat_svm_insn_finished(struct test *test) 689 { 690 u64 vmcb_phys = virt_to_phys(test->vmcb); 691 u64 cycles; 692 693 for ( ; runs != 0; runs--) { 694 tsc_start = rdtsc(); 695 asm volatile("vmload\n\t" : : "a"(vmcb_phys) : "memory"); 696 cycles = rdtsc() - tsc_start; 697 if (cycles > latvmload_max) 698 latvmload_max = cycles; 699 if (cycles < latvmload_min) 700 latvmload_min = cycles; 701 vmload_sum += cycles; 702 703 tsc_start = rdtsc(); 704 asm volatile("vmsave\n\t" : : "a"(vmcb_phys) : "memory"); 705 cycles = rdtsc() - tsc_start; 706 if (cycles > latvmsave_max) 707 latvmsave_max = cycles; 708 if (cycles < latvmsave_min) 709 latvmsave_min = cycles; 710 vmsave_sum += cycles; 711 712 tsc_start = rdtsc(); 713 asm volatile("stgi\n\t"); 714 cycles = rdtsc() - tsc_start; 715 if (cycles > latstgi_max) 716 latstgi_max = cycles; 717 if (cycles < latstgi_min) 718 latstgi_min = cycles; 719 stgi_sum += cycles; 720 721 tsc_start = rdtsc(); 722 asm volatile("clgi\n\t"); 723 cycles = rdtsc() - tsc_start; 724 if (cycles > latclgi_max) 725 latclgi_max = cycles; 726 if (cycles < latclgi_min) 727 latclgi_min = cycles; 728 clgi_sum += cycles; 729 } 730 731 return true; 732 } 733 734 static bool lat_svm_insn_check(struct test *test) 735 { 736 printf(" Latency VMLOAD: max: %d min: %d avg: %d\n", latvmload_max, 737 latvmload_min, vmload_sum / LATENCY_RUNS); 738 printf(" Latency VMSAVE: max: %d min: %d avg: %d\n", latvmsave_max, 739 latvmsave_min, vmsave_sum / LATENCY_RUNS); 740 printf(" Latency STGI: max: %d min: %d avg: %d\n", latstgi_max, 741 latstgi_min, stgi_sum / LATENCY_RUNS); 742 printf(" Latency CLGI: max: %d min: %d avg: %d\n", latclgi_max, 743 latclgi_min, clgi_sum / LATENCY_RUNS); 744 return true; 745 } 746 static struct test tests[] = { 747 { "null", default_supported, default_prepare, null_test, 748 default_finished, null_check }, 749 { "vmrun", default_supported, default_prepare, test_vmrun, 750 default_finished, check_vmrun }, 751 { "vmrun intercept check", default_supported, prepare_no_vmrun_int, 752 null_test, default_finished, check_no_vmrun_int }, 753 { "cr3 read intercept", default_supported, prepare_cr3_intercept, 754 test_cr3_intercept, default_finished, check_cr3_intercept }, 755 { "cr3 read nointercept", default_supported, default_prepare, 756 test_cr3_intercept, default_finished, check_cr3_nointercept }, 757 { "cr3 read intercept emulate", smp_supported, 758 prepare_cr3_intercept_bypass, test_cr3_intercept_bypass, 759 default_finished, check_cr3_intercept }, 760 { "next_rip", next_rip_supported, prepare_next_rip, test_next_rip, 761 default_finished, check_next_rip }, 762 { "mode_switch", default_supported, prepare_mode_switch, test_mode_switch, 763 mode_switch_finished, check_mode_switch }, 764 { "asid_zero", default_supported, prepare_asid_zero, test_asid_zero, 765 default_finished, check_asid_zero }, 766 { "sel_cr0_bug", default_supported, sel_cr0_bug_prepare, sel_cr0_bug_test, 767 sel_cr0_bug_finished, sel_cr0_bug_check }, 768 { "npt_nx", npt_supported, npt_nx_prepare, null_test, 769 default_finished, npt_nx_check }, 770 { "npt_us", npt_supported, npt_us_prepare, npt_us_test, 771 default_finished, npt_us_check }, 772 { "npt_rsvd", npt_supported, npt_rsvd_prepare, null_test, 773 default_finished, npt_rsvd_check }, 774 { "npt_rw", npt_supported, npt_rw_prepare, npt_rw_test, 775 default_finished, npt_rw_check }, 776 { "npt_pfwalk", npt_supported, npt_pfwalk_prepare, null_test, 777 default_finished, npt_pfwalk_check }, 778 { "latency_run_exit", default_supported, latency_prepare, latency_test, 779 latency_finished, latency_check }, 780 { "latency_svm_insn", default_supported, lat_svm_insn_prepare, null_test, 781 lat_svm_insn_finished, lat_svm_insn_check }, 782 }; 783 784 int main(int ac, char **av) 785 { 786 int i, nr, passed, done; 787 struct vmcb *vmcb; 788 789 setup_vm(); 790 smp_init(); 791 792 if (!(cpuid(0x80000001).c & 4)) { 793 printf("SVM not availble\n"); 794 return 0; 795 } 796 797 setup_svm(); 798 799 vmcb = alloc_page(); 800 801 nr = ARRAY_SIZE(tests); 802 passed = done = 0; 803 for (i = 0; i < nr; ++i) { 804 if (!tests[i].supported()) 805 continue; 806 done += 1; 807 passed += test_run(&tests[i], vmcb); 808 } 809 810 printf("\nSUMMARY: %d TESTS, %d FAILURES\n", done, (done - passed)); 811 return passed == done ? 0 : 1; 812 } 813