1 #include "svm.h" 2 #include "libcflat.h" 3 #include "processor.h" 4 #include "desc.h" 5 #include "msr.h" 6 #include "vm.h" 7 #include "smp.h" 8 #include "types.h" 9 #include "alloc_page.h" 10 #include "isr.h" 11 #include "apic.h" 12 #include "delay.h" 13 14 #define SVM_EXIT_MAX_DR_INTERCEPT 0x3f 15 16 #define LATENCY_RUNS 1000000 17 18 extern u16 cpu_online_count; 19 20 u64 tsc_start; 21 u64 tsc_end; 22 23 u64 vmrun_sum, vmexit_sum; 24 u64 vmsave_sum, vmload_sum; 25 u64 stgi_sum, clgi_sum; 26 u64 latvmrun_max; 27 u64 latvmrun_min; 28 u64 latvmexit_max; 29 u64 latvmexit_min; 30 u64 latvmload_max; 31 u64 latvmload_min; 32 u64 latvmsave_max; 33 u64 latvmsave_min; 34 u64 latstgi_max; 35 u64 latstgi_min; 36 u64 latclgi_max; 37 u64 latclgi_min; 38 u64 runs; 39 40 static void null_test(struct svm_test *test) 41 { 42 } 43 44 static bool null_check(struct svm_test *test) 45 { 46 return vmcb->control.exit_code == SVM_EXIT_VMMCALL; 47 } 48 49 static void prepare_no_vmrun_int(struct svm_test *test) 50 { 51 vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMRUN); 52 } 53 54 static bool check_no_vmrun_int(struct svm_test *test) 55 { 56 return vmcb->control.exit_code == SVM_EXIT_ERR; 57 } 58 59 static void test_vmrun(struct svm_test *test) 60 { 61 asm volatile ("vmrun %0" : : "a"(virt_to_phys(vmcb))); 62 } 63 64 static bool check_vmrun(struct svm_test *test) 65 { 66 return vmcb->control.exit_code == SVM_EXIT_VMRUN; 67 } 68 69 static void prepare_rsm_intercept(struct svm_test *test) 70 { 71 default_prepare(test); 72 vmcb->control.intercept |= 1 << INTERCEPT_RSM; 73 vmcb->control.intercept_exceptions |= (1ULL << UD_VECTOR); 74 } 75 76 static void test_rsm_intercept(struct svm_test *test) 77 { 78 asm volatile ("rsm" : : : "memory"); 79 } 80 81 static bool check_rsm_intercept(struct svm_test *test) 82 { 83 return get_test_stage(test) == 2; 84 } 85 86 static bool finished_rsm_intercept(struct svm_test *test) 87 { 88 switch (get_test_stage(test)) { 89 case 0: 90 if (vmcb->control.exit_code != SVM_EXIT_RSM) { 91 report_fail("VMEXIT not due to rsm. Exit reason 0x%x", 92 vmcb->control.exit_code); 93 return true; 94 } 95 vmcb->control.intercept &= ~(1 << INTERCEPT_RSM); 96 inc_test_stage(test); 97 break; 98 99 case 1: 100 if (vmcb->control.exit_code != SVM_EXIT_EXCP_BASE + UD_VECTOR) { 101 report_fail("VMEXIT not due to #UD. Exit reason 0x%x", 102 vmcb->control.exit_code); 103 return true; 104 } 105 vmcb->save.rip += 2; 106 inc_test_stage(test); 107 break; 108 109 default: 110 return true; 111 } 112 return get_test_stage(test) == 2; 113 } 114 115 static void prepare_cr3_intercept(struct svm_test *test) 116 { 117 default_prepare(test); 118 vmcb->control.intercept_cr_read |= 1 << 3; 119 } 120 121 static void test_cr3_intercept(struct svm_test *test) 122 { 123 asm volatile ("mov %%cr3, %0" : "=r"(test->scratch) : : "memory"); 124 } 125 126 static bool check_cr3_intercept(struct svm_test *test) 127 { 128 return vmcb->control.exit_code == SVM_EXIT_READ_CR3; 129 } 130 131 static bool check_cr3_nointercept(struct svm_test *test) 132 { 133 return null_check(test) && test->scratch == read_cr3(); 134 } 135 136 static void corrupt_cr3_intercept_bypass(void *_test) 137 { 138 struct svm_test *test = _test; 139 extern volatile u32 mmio_insn; 140 141 while (!__sync_bool_compare_and_swap(&test->scratch, 1, 2)) 142 pause(); 143 pause(); 144 pause(); 145 pause(); 146 mmio_insn = 0x90d8200f; // mov %cr3, %rax; nop 147 } 148 149 static void prepare_cr3_intercept_bypass(struct svm_test *test) 150 { 151 default_prepare(test); 152 vmcb->control.intercept_cr_read |= 1 << 3; 153 on_cpu_async(1, corrupt_cr3_intercept_bypass, test); 154 } 155 156 static void test_cr3_intercept_bypass(struct svm_test *test) 157 { 158 ulong a = 0xa0000; 159 160 test->scratch = 1; 161 while (test->scratch != 2) 162 barrier(); 163 164 asm volatile ("mmio_insn: mov %0, (%0); nop" 165 : "+a"(a) : : "memory"); 166 test->scratch = a; 167 } 168 169 static void prepare_dr_intercept(struct svm_test *test) 170 { 171 default_prepare(test); 172 vmcb->control.intercept_dr_read = 0xff; 173 vmcb->control.intercept_dr_write = 0xff; 174 } 175 176 static void test_dr_intercept(struct svm_test *test) 177 { 178 unsigned int i, failcnt = 0; 179 180 /* Loop testing debug register reads */ 181 for (i = 0; i < 8; i++) { 182 183 switch (i) { 184 case 0: 185 asm volatile ("mov %%dr0, %0" : "=r"(test->scratch) : : "memory"); 186 break; 187 case 1: 188 asm volatile ("mov %%dr1, %0" : "=r"(test->scratch) : : "memory"); 189 break; 190 case 2: 191 asm volatile ("mov %%dr2, %0" : "=r"(test->scratch) : : "memory"); 192 break; 193 case 3: 194 asm volatile ("mov %%dr3, %0" : "=r"(test->scratch) : : "memory"); 195 break; 196 case 4: 197 asm volatile ("mov %%dr4, %0" : "=r"(test->scratch) : : "memory"); 198 break; 199 case 5: 200 asm volatile ("mov %%dr5, %0" : "=r"(test->scratch) : : "memory"); 201 break; 202 case 6: 203 asm volatile ("mov %%dr6, %0" : "=r"(test->scratch) : : "memory"); 204 break; 205 case 7: 206 asm volatile ("mov %%dr7, %0" : "=r"(test->scratch) : : "memory"); 207 break; 208 } 209 210 if (test->scratch != i) { 211 report_fail("dr%u read intercept", i); 212 failcnt++; 213 } 214 } 215 216 /* Loop testing debug register writes */ 217 for (i = 0; i < 8; i++) { 218 219 switch (i) { 220 case 0: 221 asm volatile ("mov %0, %%dr0" : : "r"(test->scratch) : "memory"); 222 break; 223 case 1: 224 asm volatile ("mov %0, %%dr1" : : "r"(test->scratch) : "memory"); 225 break; 226 case 2: 227 asm volatile ("mov %0, %%dr2" : : "r"(test->scratch) : "memory"); 228 break; 229 case 3: 230 asm volatile ("mov %0, %%dr3" : : "r"(test->scratch) : "memory"); 231 break; 232 case 4: 233 asm volatile ("mov %0, %%dr4" : : "r"(test->scratch) : "memory"); 234 break; 235 case 5: 236 asm volatile ("mov %0, %%dr5" : : "r"(test->scratch) : "memory"); 237 break; 238 case 6: 239 asm volatile ("mov %0, %%dr6" : : "r"(test->scratch) : "memory"); 240 break; 241 case 7: 242 asm volatile ("mov %0, %%dr7" : : "r"(test->scratch) : "memory"); 243 break; 244 } 245 246 if (test->scratch != i) { 247 report_fail("dr%u write intercept", i); 248 failcnt++; 249 } 250 } 251 252 test->scratch = failcnt; 253 } 254 255 static bool dr_intercept_finished(struct svm_test *test) 256 { 257 ulong n = (vmcb->control.exit_code - SVM_EXIT_READ_DR0); 258 259 /* Only expect DR intercepts */ 260 if (n > (SVM_EXIT_MAX_DR_INTERCEPT - SVM_EXIT_READ_DR0)) 261 return true; 262 263 /* 264 * Compute debug register number. 265 * Per Appendix C "SVM Intercept Exit Codes" of AMD64 Architecture 266 * Programmer's Manual Volume 2 - System Programming: 267 * http://support.amd.com/TechDocs/24593.pdf 268 * there are 16 VMEXIT codes each for DR read and write. 269 */ 270 test->scratch = (n % 16); 271 272 /* Jump over MOV instruction */ 273 vmcb->save.rip += 3; 274 275 return false; 276 } 277 278 static bool check_dr_intercept(struct svm_test *test) 279 { 280 return !test->scratch; 281 } 282 283 static bool next_rip_supported(void) 284 { 285 return this_cpu_has(X86_FEATURE_NRIPS); 286 } 287 288 static void prepare_next_rip(struct svm_test *test) 289 { 290 vmcb->control.intercept |= (1ULL << INTERCEPT_RDTSC); 291 } 292 293 294 static void test_next_rip(struct svm_test *test) 295 { 296 asm volatile ("rdtsc\n\t" 297 ".globl exp_next_rip\n\t" 298 "exp_next_rip:\n\t" ::: "eax", "edx"); 299 } 300 301 static bool check_next_rip(struct svm_test *test) 302 { 303 extern char exp_next_rip; 304 unsigned long address = (unsigned long)&exp_next_rip; 305 306 return address == vmcb->control.next_rip; 307 } 308 309 extern u8 *msr_bitmap; 310 311 static void prepare_msr_intercept(struct svm_test *test) 312 { 313 default_prepare(test); 314 vmcb->control.intercept |= (1ULL << INTERCEPT_MSR_PROT); 315 vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR); 316 memset(msr_bitmap, 0xff, MSR_BITMAP_SIZE); 317 } 318 319 static void test_msr_intercept(struct svm_test *test) 320 { 321 unsigned long msr_value = 0xef8056791234abcd; /* Arbitrary value */ 322 unsigned long msr_index; 323 324 for (msr_index = 0; msr_index <= 0xc0011fff; msr_index++) { 325 if (msr_index == 0xC0010131 /* MSR_SEV_STATUS */) { 326 /* 327 * Per section 15.34.10 "SEV_STATUS MSR" of AMD64 Architecture 328 * Programmer's Manual volume 2 - System Programming: 329 * http://support.amd.com/TechDocs/24593.pdf 330 * SEV_STATUS MSR (C001_0131) is a non-interceptable MSR. 331 */ 332 continue; 333 } 334 335 /* Skips gaps between supported MSR ranges */ 336 if (msr_index == 0x2000) 337 msr_index = 0xc0000000; 338 else if (msr_index == 0xc0002000) 339 msr_index = 0xc0010000; 340 341 test->scratch = -1; 342 343 rdmsr(msr_index); 344 345 /* Check that a read intercept occurred for MSR at msr_index */ 346 if (test->scratch != msr_index) 347 report_fail("MSR 0x%lx read intercept", msr_index); 348 349 /* 350 * Poor man approach to generate a value that 351 * seems arbitrary each time around the loop. 352 */ 353 msr_value += (msr_value << 1); 354 355 wrmsr(msr_index, msr_value); 356 357 /* Check that a write intercept occurred for MSR with msr_value */ 358 if (test->scratch != msr_value) 359 report_fail("MSR 0x%lx write intercept", msr_index); 360 } 361 362 test->scratch = -2; 363 } 364 365 static bool msr_intercept_finished(struct svm_test *test) 366 { 367 u32 exit_code = vmcb->control.exit_code; 368 u64 exit_info_1; 369 u8 *opcode; 370 371 if (exit_code == SVM_EXIT_MSR) { 372 exit_info_1 = vmcb->control.exit_info_1; 373 } else { 374 /* 375 * If #GP exception occurs instead, check that it was 376 * for RDMSR/WRMSR and set exit_info_1 accordingly. 377 */ 378 379 if (exit_code != (SVM_EXIT_EXCP_BASE + GP_VECTOR)) 380 return true; 381 382 opcode = (u8 *)vmcb->save.rip; 383 if (opcode[0] != 0x0f) 384 return true; 385 386 switch (opcode[1]) { 387 case 0x30: /* WRMSR */ 388 exit_info_1 = 1; 389 break; 390 case 0x32: /* RDMSR */ 391 exit_info_1 = 0; 392 break; 393 default: 394 return true; 395 } 396 397 /* 398 * Warn that #GP exception occured instead. 399 * RCX holds the MSR index. 400 */ 401 printf("%s 0x%lx #GP exception\n", 402 exit_info_1 ? "WRMSR" : "RDMSR", get_regs().rcx); 403 } 404 405 /* Jump over RDMSR/WRMSR instruction */ 406 vmcb->save.rip += 2; 407 408 /* 409 * Test whether the intercept was for RDMSR/WRMSR. 410 * For RDMSR, test->scratch is set to the MSR index; 411 * RCX holds the MSR index. 412 * For WRMSR, test->scratch is set to the MSR value; 413 * RDX holds the upper 32 bits of the MSR value, 414 * while RAX hold its lower 32 bits. 415 */ 416 if (exit_info_1) 417 test->scratch = 418 ((get_regs().rdx << 32) | (vmcb->save.rax & 0xffffffff)); 419 else 420 test->scratch = get_regs().rcx; 421 422 return false; 423 } 424 425 static bool check_msr_intercept(struct svm_test *test) 426 { 427 memset(msr_bitmap, 0, MSR_BITMAP_SIZE); 428 return (test->scratch == -2); 429 } 430 431 static void prepare_mode_switch(struct svm_test *test) 432 { 433 vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR) 434 | (1ULL << UD_VECTOR) 435 | (1ULL << DF_VECTOR) 436 | (1ULL << PF_VECTOR); 437 test->scratch = 0; 438 } 439 440 static void test_mode_switch(struct svm_test *test) 441 { 442 asm volatile(" cli\n" 443 " ljmp *1f\n" /* jump to 32-bit code segment */ 444 "1:\n" 445 " .long 2f\n" 446 " .long " xstr(KERNEL_CS32) "\n" 447 ".code32\n" 448 "2:\n" 449 " movl %%cr0, %%eax\n" 450 " btcl $31, %%eax\n" /* clear PG */ 451 " movl %%eax, %%cr0\n" 452 " movl $0xc0000080, %%ecx\n" /* EFER */ 453 " rdmsr\n" 454 " btcl $8, %%eax\n" /* clear LME */ 455 " wrmsr\n" 456 " movl %%cr4, %%eax\n" 457 " btcl $5, %%eax\n" /* clear PAE */ 458 " movl %%eax, %%cr4\n" 459 " movw %[ds16], %%ax\n" 460 " movw %%ax, %%ds\n" 461 " ljmpl %[cs16], $3f\n" /* jump to 16 bit protected-mode */ 462 ".code16\n" 463 "3:\n" 464 " movl %%cr0, %%eax\n" 465 " btcl $0, %%eax\n" /* clear PE */ 466 " movl %%eax, %%cr0\n" 467 " ljmpl $0, $4f\n" /* jump to real-mode */ 468 "4:\n" 469 " vmmcall\n" 470 " movl %%cr0, %%eax\n" 471 " btsl $0, %%eax\n" /* set PE */ 472 " movl %%eax, %%cr0\n" 473 " ljmpl %[cs32], $5f\n" /* back to protected mode */ 474 ".code32\n" 475 "5:\n" 476 " movl %%cr4, %%eax\n" 477 " btsl $5, %%eax\n" /* set PAE */ 478 " movl %%eax, %%cr4\n" 479 " movl $0xc0000080, %%ecx\n" /* EFER */ 480 " rdmsr\n" 481 " btsl $8, %%eax\n" /* set LME */ 482 " wrmsr\n" 483 " movl %%cr0, %%eax\n" 484 " btsl $31, %%eax\n" /* set PG */ 485 " movl %%eax, %%cr0\n" 486 " ljmpl %[cs64], $6f\n" /* back to long mode */ 487 ".code64\n\t" 488 "6:\n" 489 " vmmcall\n" 490 :: [cs16] "i"(KERNEL_CS16), [ds16] "i"(KERNEL_DS16), 491 [cs32] "i"(KERNEL_CS32), [cs64] "i"(KERNEL_CS64) 492 : "rax", "rbx", "rcx", "rdx", "memory"); 493 } 494 495 static bool mode_switch_finished(struct svm_test *test) 496 { 497 u64 cr0, cr4, efer; 498 499 cr0 = vmcb->save.cr0; 500 cr4 = vmcb->save.cr4; 501 efer = vmcb->save.efer; 502 503 /* Only expect VMMCALL intercepts */ 504 if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) 505 return true; 506 507 /* Jump over VMMCALL instruction */ 508 vmcb->save.rip += 3; 509 510 /* Do sanity checks */ 511 switch (test->scratch) { 512 case 0: 513 /* Test should be in real mode now - check for this */ 514 if ((cr0 & 0x80000001) || /* CR0.PG, CR0.PE */ 515 (cr4 & 0x00000020) || /* CR4.PAE */ 516 (efer & 0x00000500)) /* EFER.LMA, EFER.LME */ 517 return true; 518 break; 519 case 2: 520 /* Test should be back in long-mode now - check for this */ 521 if (((cr0 & 0x80000001) != 0x80000001) || /* CR0.PG, CR0.PE */ 522 ((cr4 & 0x00000020) != 0x00000020) || /* CR4.PAE */ 523 ((efer & 0x00000500) != 0x00000500)) /* EFER.LMA, EFER.LME */ 524 return true; 525 break; 526 } 527 528 /* one step forward */ 529 test->scratch += 1; 530 531 return test->scratch == 2; 532 } 533 534 static bool check_mode_switch(struct svm_test *test) 535 { 536 return test->scratch == 2; 537 } 538 539 extern u8 *io_bitmap; 540 541 static void prepare_ioio(struct svm_test *test) 542 { 543 vmcb->control.intercept |= (1ULL << INTERCEPT_IOIO_PROT); 544 test->scratch = 0; 545 memset(io_bitmap, 0, 8192); 546 io_bitmap[8192] = 0xFF; 547 } 548 549 static void test_ioio(struct svm_test *test) 550 { 551 // stage 0, test IO pass 552 inb(0x5000); 553 outb(0x0, 0x5000); 554 if (get_test_stage(test) != 0) 555 goto fail; 556 557 // test IO width, in/out 558 io_bitmap[0] = 0xFF; 559 inc_test_stage(test); 560 inb(0x0); 561 if (get_test_stage(test) != 2) 562 goto fail; 563 564 outw(0x0, 0x0); 565 if (get_test_stage(test) != 3) 566 goto fail; 567 568 inl(0x0); 569 if (get_test_stage(test) != 4) 570 goto fail; 571 572 // test low/high IO port 573 io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8)); 574 inb(0x5000); 575 if (get_test_stage(test) != 5) 576 goto fail; 577 578 io_bitmap[0x9000 / 8] = (1 << (0x9000 % 8)); 579 inw(0x9000); 580 if (get_test_stage(test) != 6) 581 goto fail; 582 583 // test partial pass 584 io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8)); 585 inl(0x4FFF); 586 if (get_test_stage(test) != 7) 587 goto fail; 588 589 // test across pages 590 inc_test_stage(test); 591 inl(0x7FFF); 592 if (get_test_stage(test) != 8) 593 goto fail; 594 595 inc_test_stage(test); 596 io_bitmap[0x8000 / 8] = 1 << (0x8000 % 8); 597 inl(0x7FFF); 598 if (get_test_stage(test) != 10) 599 goto fail; 600 601 io_bitmap[0] = 0; 602 inl(0xFFFF); 603 if (get_test_stage(test) != 11) 604 goto fail; 605 606 io_bitmap[0] = 0xFF; 607 io_bitmap[8192] = 0; 608 inl(0xFFFF); 609 inc_test_stage(test); 610 if (get_test_stage(test) != 12) 611 goto fail; 612 613 return; 614 615 fail: 616 report_fail("stage %d", get_test_stage(test)); 617 test->scratch = -1; 618 } 619 620 static bool ioio_finished(struct svm_test *test) 621 { 622 unsigned port, size; 623 624 /* Only expect IOIO intercepts */ 625 if (vmcb->control.exit_code == SVM_EXIT_VMMCALL) 626 return true; 627 628 if (vmcb->control.exit_code != SVM_EXIT_IOIO) 629 return true; 630 631 /* one step forward */ 632 test->scratch += 1; 633 634 port = vmcb->control.exit_info_1 >> 16; 635 size = (vmcb->control.exit_info_1 >> SVM_IOIO_SIZE_SHIFT) & 7; 636 637 while (size--) { 638 io_bitmap[port / 8] &= ~(1 << (port & 7)); 639 port++; 640 } 641 642 return false; 643 } 644 645 static bool check_ioio(struct svm_test *test) 646 { 647 memset(io_bitmap, 0, 8193); 648 return test->scratch != -1; 649 } 650 651 static void prepare_asid_zero(struct svm_test *test) 652 { 653 vmcb->control.asid = 0; 654 } 655 656 static void test_asid_zero(struct svm_test *test) 657 { 658 asm volatile ("vmmcall\n\t"); 659 } 660 661 static bool check_asid_zero(struct svm_test *test) 662 { 663 return vmcb->control.exit_code == SVM_EXIT_ERR; 664 } 665 666 static void sel_cr0_bug_prepare(struct svm_test *test) 667 { 668 vmcb->control.intercept |= (1ULL << INTERCEPT_SELECTIVE_CR0); 669 } 670 671 static bool sel_cr0_bug_finished(struct svm_test *test) 672 { 673 return true; 674 } 675 676 static void sel_cr0_bug_test(struct svm_test *test) 677 { 678 unsigned long cr0; 679 680 /* read cr0, clear CD, and write back */ 681 cr0 = read_cr0(); 682 cr0 |= (1UL << 30); 683 write_cr0(cr0); 684 685 /* 686 * If we are here the test failed, not sure what to do now because we 687 * are not in guest-mode anymore so we can't trigger an intercept. 688 * Trigger a tripple-fault for now. 689 */ 690 report_fail("sel_cr0 test. Can not recover from this - exiting"); 691 exit(report_summary()); 692 } 693 694 static bool sel_cr0_bug_check(struct svm_test *test) 695 { 696 return vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE; 697 } 698 699 #define TSC_ADJUST_VALUE (1ll << 32) 700 #define TSC_OFFSET_VALUE (~0ull << 48) 701 static bool ok; 702 703 static bool tsc_adjust_supported(void) 704 { 705 return this_cpu_has(X86_FEATURE_TSC_ADJUST); 706 } 707 708 static void tsc_adjust_prepare(struct svm_test *test) 709 { 710 default_prepare(test); 711 vmcb->control.tsc_offset = TSC_OFFSET_VALUE; 712 713 wrmsr(MSR_IA32_TSC_ADJUST, -TSC_ADJUST_VALUE); 714 int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST); 715 ok = adjust == -TSC_ADJUST_VALUE; 716 } 717 718 static void tsc_adjust_test(struct svm_test *test) 719 { 720 int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST); 721 ok &= adjust == -TSC_ADJUST_VALUE; 722 723 uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE; 724 wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE); 725 726 adjust = rdmsr(MSR_IA32_TSC_ADJUST); 727 ok &= adjust <= -2 * TSC_ADJUST_VALUE; 728 729 uint64_t l1_tsc_end = rdtsc() - TSC_OFFSET_VALUE; 730 ok &= (l1_tsc_end + TSC_ADJUST_VALUE - l1_tsc) < TSC_ADJUST_VALUE; 731 732 uint64_t l1_tsc_msr = rdmsr(MSR_IA32_TSC) - TSC_OFFSET_VALUE; 733 ok &= (l1_tsc_msr + TSC_ADJUST_VALUE - l1_tsc) < TSC_ADJUST_VALUE; 734 } 735 736 static bool tsc_adjust_check(struct svm_test *test) 737 { 738 int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST); 739 740 wrmsr(MSR_IA32_TSC_ADJUST, 0); 741 return ok && adjust <= -2 * TSC_ADJUST_VALUE; 742 } 743 744 745 static u64 guest_tsc_delay_value; 746 /* number of bits to shift tsc right for stable result */ 747 #define TSC_SHIFT 24 748 #define TSC_SCALE_ITERATIONS 10 749 750 static void svm_tsc_scale_guest(struct svm_test *test) 751 { 752 u64 start_tsc = rdtsc(); 753 754 while (rdtsc() - start_tsc < guest_tsc_delay_value) 755 cpu_relax(); 756 } 757 758 static void svm_tsc_scale_run_testcase(u64 duration, 759 double tsc_scale, u64 tsc_offset) 760 { 761 u64 start_tsc, actual_duration; 762 763 guest_tsc_delay_value = (duration << TSC_SHIFT) * tsc_scale; 764 765 test_set_guest(svm_tsc_scale_guest); 766 vmcb->control.tsc_offset = tsc_offset; 767 wrmsr(MSR_AMD64_TSC_RATIO, (u64)(tsc_scale * (1ULL << 32))); 768 769 start_tsc = rdtsc(); 770 771 if (svm_vmrun() != SVM_EXIT_VMMCALL) 772 report_fail("unexpected vm exit code 0x%x", vmcb->control.exit_code); 773 774 actual_duration = (rdtsc() - start_tsc) >> TSC_SHIFT; 775 776 report(duration == actual_duration, "tsc delay (expected: %lu, actual: %lu)", 777 duration, actual_duration); 778 } 779 780 static void svm_tsc_scale_test(void) 781 { 782 int i; 783 784 if (!tsc_scale_supported()) { 785 report_skip("TSC scale not supported in the guest"); 786 return; 787 } 788 789 report(rdmsr(MSR_AMD64_TSC_RATIO) == TSC_RATIO_DEFAULT, 790 "initial TSC scale ratio"); 791 792 for (i = 0 ; i < TSC_SCALE_ITERATIONS; i++) { 793 794 double tsc_scale = (double)(rdrand() % 100 + 1) / 10; 795 int duration = rdrand() % 50 + 1; 796 u64 tsc_offset = rdrand(); 797 798 report_info("duration=%d, tsc_scale=%d, tsc_offset=%ld", 799 duration, (int)(tsc_scale * 100), tsc_offset); 800 801 svm_tsc_scale_run_testcase(duration, tsc_scale, tsc_offset); 802 } 803 804 svm_tsc_scale_run_testcase(50, 255, rdrand()); 805 svm_tsc_scale_run_testcase(50, 0.0001, rdrand()); 806 } 807 808 static void latency_prepare(struct svm_test *test) 809 { 810 default_prepare(test); 811 runs = LATENCY_RUNS; 812 latvmrun_min = latvmexit_min = -1ULL; 813 latvmrun_max = latvmexit_max = 0; 814 vmrun_sum = vmexit_sum = 0; 815 tsc_start = rdtsc(); 816 } 817 818 static void latency_test(struct svm_test *test) 819 { 820 u64 cycles; 821 822 start: 823 tsc_end = rdtsc(); 824 825 cycles = tsc_end - tsc_start; 826 827 if (cycles > latvmrun_max) 828 latvmrun_max = cycles; 829 830 if (cycles < latvmrun_min) 831 latvmrun_min = cycles; 832 833 vmrun_sum += cycles; 834 835 tsc_start = rdtsc(); 836 837 asm volatile ("vmmcall" : : : "memory"); 838 goto start; 839 } 840 841 static bool latency_finished(struct svm_test *test) 842 { 843 u64 cycles; 844 845 tsc_end = rdtsc(); 846 847 cycles = tsc_end - tsc_start; 848 849 if (cycles > latvmexit_max) 850 latvmexit_max = cycles; 851 852 if (cycles < latvmexit_min) 853 latvmexit_min = cycles; 854 855 vmexit_sum += cycles; 856 857 vmcb->save.rip += 3; 858 859 runs -= 1; 860 861 tsc_end = rdtsc(); 862 863 return runs == 0; 864 } 865 866 static bool latency_finished_clean(struct svm_test *test) 867 { 868 vmcb->control.clean = VMCB_CLEAN_ALL; 869 return latency_finished(test); 870 } 871 872 static bool latency_check(struct svm_test *test) 873 { 874 printf(" Latency VMRUN : max: %ld min: %ld avg: %ld\n", latvmrun_max, 875 latvmrun_min, vmrun_sum / LATENCY_RUNS); 876 printf(" Latency VMEXIT: max: %ld min: %ld avg: %ld\n", latvmexit_max, 877 latvmexit_min, vmexit_sum / LATENCY_RUNS); 878 return true; 879 } 880 881 static void lat_svm_insn_prepare(struct svm_test *test) 882 { 883 default_prepare(test); 884 runs = LATENCY_RUNS; 885 latvmload_min = latvmsave_min = latstgi_min = latclgi_min = -1ULL; 886 latvmload_max = latvmsave_max = latstgi_max = latclgi_max = 0; 887 vmload_sum = vmsave_sum = stgi_sum = clgi_sum; 888 } 889 890 static bool lat_svm_insn_finished(struct svm_test *test) 891 { 892 u64 vmcb_phys = virt_to_phys(vmcb); 893 u64 cycles; 894 895 for ( ; runs != 0; runs--) { 896 tsc_start = rdtsc(); 897 asm volatile("vmload %0\n\t" : : "a"(vmcb_phys) : "memory"); 898 cycles = rdtsc() - tsc_start; 899 if (cycles > latvmload_max) 900 latvmload_max = cycles; 901 if (cycles < latvmload_min) 902 latvmload_min = cycles; 903 vmload_sum += cycles; 904 905 tsc_start = rdtsc(); 906 asm volatile("vmsave %0\n\t" : : "a"(vmcb_phys) : "memory"); 907 cycles = rdtsc() - tsc_start; 908 if (cycles > latvmsave_max) 909 latvmsave_max = cycles; 910 if (cycles < latvmsave_min) 911 latvmsave_min = cycles; 912 vmsave_sum += cycles; 913 914 tsc_start = rdtsc(); 915 asm volatile("stgi\n\t"); 916 cycles = rdtsc() - tsc_start; 917 if (cycles > latstgi_max) 918 latstgi_max = cycles; 919 if (cycles < latstgi_min) 920 latstgi_min = cycles; 921 stgi_sum += cycles; 922 923 tsc_start = rdtsc(); 924 asm volatile("clgi\n\t"); 925 cycles = rdtsc() - tsc_start; 926 if (cycles > latclgi_max) 927 latclgi_max = cycles; 928 if (cycles < latclgi_min) 929 latclgi_min = cycles; 930 clgi_sum += cycles; 931 } 932 933 tsc_end = rdtsc(); 934 935 return true; 936 } 937 938 static bool lat_svm_insn_check(struct svm_test *test) 939 { 940 printf(" Latency VMLOAD: max: %ld min: %ld avg: %ld\n", latvmload_max, 941 latvmload_min, vmload_sum / LATENCY_RUNS); 942 printf(" Latency VMSAVE: max: %ld min: %ld avg: %ld\n", latvmsave_max, 943 latvmsave_min, vmsave_sum / LATENCY_RUNS); 944 printf(" Latency STGI: max: %ld min: %ld avg: %ld\n", latstgi_max, 945 latstgi_min, stgi_sum / LATENCY_RUNS); 946 printf(" Latency CLGI: max: %ld min: %ld avg: %ld\n", latclgi_max, 947 latclgi_min, clgi_sum / LATENCY_RUNS); 948 return true; 949 } 950 951 bool pending_event_ipi_fired; 952 bool pending_event_guest_run; 953 954 static void pending_event_ipi_isr(isr_regs_t *regs) 955 { 956 pending_event_ipi_fired = true; 957 eoi(); 958 } 959 960 static void pending_event_prepare(struct svm_test *test) 961 { 962 int ipi_vector = 0xf1; 963 964 default_prepare(test); 965 966 pending_event_ipi_fired = false; 967 968 handle_irq(ipi_vector, pending_event_ipi_isr); 969 970 pending_event_guest_run = false; 971 972 vmcb->control.intercept |= (1ULL << INTERCEPT_INTR); 973 vmcb->control.int_ctl |= V_INTR_MASKING_MASK; 974 975 apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | 976 APIC_DM_FIXED | ipi_vector, 0); 977 978 set_test_stage(test, 0); 979 } 980 981 static void pending_event_test(struct svm_test *test) 982 { 983 pending_event_guest_run = true; 984 } 985 986 static bool pending_event_finished(struct svm_test *test) 987 { 988 switch (get_test_stage(test)) { 989 case 0: 990 if (vmcb->control.exit_code != SVM_EXIT_INTR) { 991 report_fail("VMEXIT not due to pending interrupt. Exit reason 0x%x", 992 vmcb->control.exit_code); 993 return true; 994 } 995 996 vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR); 997 vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; 998 999 if (pending_event_guest_run) { 1000 report_fail("Guest ran before host received IPI\n"); 1001 return true; 1002 } 1003 1004 irq_enable(); 1005 asm volatile ("nop"); 1006 irq_disable(); 1007 1008 if (!pending_event_ipi_fired) { 1009 report_fail("Pending interrupt not dispatched after IRQ enabled\n"); 1010 return true; 1011 } 1012 break; 1013 1014 case 1: 1015 if (!pending_event_guest_run) { 1016 report_fail("Guest did not resume when no interrupt\n"); 1017 return true; 1018 } 1019 break; 1020 } 1021 1022 inc_test_stage(test); 1023 1024 return get_test_stage(test) == 2; 1025 } 1026 1027 static bool pending_event_check(struct svm_test *test) 1028 { 1029 return get_test_stage(test) == 2; 1030 } 1031 1032 static void pending_event_cli_prepare(struct svm_test *test) 1033 { 1034 default_prepare(test); 1035 1036 pending_event_ipi_fired = false; 1037 1038 handle_irq(0xf1, pending_event_ipi_isr); 1039 1040 apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | 1041 APIC_DM_FIXED | 0xf1, 0); 1042 1043 set_test_stage(test, 0); 1044 } 1045 1046 static void pending_event_cli_prepare_gif_clear(struct svm_test *test) 1047 { 1048 asm("cli"); 1049 } 1050 1051 static void pending_event_cli_test(struct svm_test *test) 1052 { 1053 if (pending_event_ipi_fired == true) { 1054 set_test_stage(test, -1); 1055 report_fail("Interrupt preceeded guest"); 1056 vmmcall(); 1057 } 1058 1059 /* VINTR_MASKING is zero. This should cause the IPI to fire. */ 1060 irq_enable(); 1061 asm volatile ("nop"); 1062 irq_disable(); 1063 1064 if (pending_event_ipi_fired != true) { 1065 set_test_stage(test, -1); 1066 report_fail("Interrupt not triggered by guest"); 1067 } 1068 1069 vmmcall(); 1070 1071 /* 1072 * Now VINTR_MASKING=1, but no interrupt is pending so 1073 * the VINTR interception should be clear in VMCB02. Check 1074 * that L0 did not leave a stale VINTR in the VMCB. 1075 */ 1076 irq_enable(); 1077 asm volatile ("nop"); 1078 irq_disable(); 1079 } 1080 1081 static bool pending_event_cli_finished(struct svm_test *test) 1082 { 1083 if ( vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1084 report_fail("VM_EXIT return to host is not EXIT_VMMCALL exit reason 0x%x", 1085 vmcb->control.exit_code); 1086 return true; 1087 } 1088 1089 switch (get_test_stage(test)) { 1090 case 0: 1091 vmcb->save.rip += 3; 1092 1093 pending_event_ipi_fired = false; 1094 1095 vmcb->control.int_ctl |= V_INTR_MASKING_MASK; 1096 1097 /* Now entering again with VINTR_MASKING=1. */ 1098 apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | 1099 APIC_DM_FIXED | 0xf1, 0); 1100 1101 break; 1102 1103 case 1: 1104 if (pending_event_ipi_fired == true) { 1105 report_fail("Interrupt triggered by guest"); 1106 return true; 1107 } 1108 1109 irq_enable(); 1110 asm volatile ("nop"); 1111 irq_disable(); 1112 1113 if (pending_event_ipi_fired != true) { 1114 report_fail("Interrupt not triggered by host"); 1115 return true; 1116 } 1117 1118 break; 1119 1120 default: 1121 return true; 1122 } 1123 1124 inc_test_stage(test); 1125 1126 return get_test_stage(test) == 2; 1127 } 1128 1129 static bool pending_event_cli_check(struct svm_test *test) 1130 { 1131 return get_test_stage(test) == 2; 1132 } 1133 1134 #define TIMER_VECTOR 222 1135 1136 static volatile bool timer_fired; 1137 1138 static void timer_isr(isr_regs_t *regs) 1139 { 1140 timer_fired = true; 1141 apic_write(APIC_EOI, 0); 1142 } 1143 1144 static void interrupt_prepare(struct svm_test *test) 1145 { 1146 default_prepare(test); 1147 handle_irq(TIMER_VECTOR, timer_isr); 1148 timer_fired = false; 1149 set_test_stage(test, 0); 1150 } 1151 1152 static void interrupt_test(struct svm_test *test) 1153 { 1154 long long start, loops; 1155 1156 apic_write(APIC_LVTT, TIMER_VECTOR); 1157 irq_enable(); 1158 apic_write(APIC_TMICT, 1); //Timer Initial Count Register 0x380 one-shot 1159 for (loops = 0; loops < 10000000 && !timer_fired; loops++) 1160 asm volatile ("nop"); 1161 1162 report(timer_fired, "direct interrupt while running guest"); 1163 1164 if (!timer_fired) { 1165 set_test_stage(test, -1); 1166 vmmcall(); 1167 } 1168 1169 apic_write(APIC_TMICT, 0); 1170 irq_disable(); 1171 vmmcall(); 1172 1173 timer_fired = false; 1174 apic_write(APIC_TMICT, 1); 1175 for (loops = 0; loops < 10000000 && !timer_fired; loops++) 1176 asm volatile ("nop"); 1177 1178 report(timer_fired, "intercepted interrupt while running guest"); 1179 1180 if (!timer_fired) { 1181 set_test_stage(test, -1); 1182 vmmcall(); 1183 } 1184 1185 irq_enable(); 1186 apic_write(APIC_TMICT, 0); 1187 irq_disable(); 1188 1189 timer_fired = false; 1190 start = rdtsc(); 1191 apic_write(APIC_TMICT, 1000000); 1192 safe_halt(); 1193 1194 report(rdtsc() - start > 10000 && timer_fired, 1195 "direct interrupt + hlt"); 1196 1197 if (!timer_fired) { 1198 set_test_stage(test, -1); 1199 vmmcall(); 1200 } 1201 1202 apic_write(APIC_TMICT, 0); 1203 irq_disable(); 1204 vmmcall(); 1205 1206 timer_fired = false; 1207 start = rdtsc(); 1208 apic_write(APIC_TMICT, 1000000); 1209 asm volatile ("hlt"); 1210 1211 report(rdtsc() - start > 10000 && timer_fired, 1212 "intercepted interrupt + hlt"); 1213 1214 if (!timer_fired) { 1215 set_test_stage(test, -1); 1216 vmmcall(); 1217 } 1218 1219 apic_write(APIC_TMICT, 0); 1220 irq_disable(); 1221 } 1222 1223 static bool interrupt_finished(struct svm_test *test) 1224 { 1225 switch (get_test_stage(test)) { 1226 case 0: 1227 case 2: 1228 if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1229 report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x", 1230 vmcb->control.exit_code); 1231 return true; 1232 } 1233 vmcb->save.rip += 3; 1234 1235 vmcb->control.intercept |= (1ULL << INTERCEPT_INTR); 1236 vmcb->control.int_ctl |= V_INTR_MASKING_MASK; 1237 break; 1238 1239 case 1: 1240 case 3: 1241 if (vmcb->control.exit_code != SVM_EXIT_INTR) { 1242 report_fail("VMEXIT not due to intr intercept. Exit reason 0x%x", 1243 vmcb->control.exit_code); 1244 return true; 1245 } 1246 1247 irq_enable(); 1248 asm volatile ("nop"); 1249 irq_disable(); 1250 1251 vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR); 1252 vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; 1253 break; 1254 1255 case 4: 1256 break; 1257 1258 default: 1259 return true; 1260 } 1261 1262 inc_test_stage(test); 1263 1264 return get_test_stage(test) == 5; 1265 } 1266 1267 static bool interrupt_check(struct svm_test *test) 1268 { 1269 return get_test_stage(test) == 5; 1270 } 1271 1272 static volatile bool nmi_fired; 1273 1274 static void nmi_handler(struct ex_regs *regs) 1275 { 1276 nmi_fired = true; 1277 } 1278 1279 static void nmi_prepare(struct svm_test *test) 1280 { 1281 default_prepare(test); 1282 nmi_fired = false; 1283 handle_exception(NMI_VECTOR, nmi_handler); 1284 set_test_stage(test, 0); 1285 } 1286 1287 static void nmi_test(struct svm_test *test) 1288 { 1289 apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, 0); 1290 1291 report(nmi_fired, "direct NMI while running guest"); 1292 1293 if (!nmi_fired) 1294 set_test_stage(test, -1); 1295 1296 vmmcall(); 1297 1298 nmi_fired = false; 1299 1300 apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, 0); 1301 1302 if (!nmi_fired) { 1303 report(nmi_fired, "intercepted pending NMI not dispatched"); 1304 set_test_stage(test, -1); 1305 } 1306 1307 } 1308 1309 static bool nmi_finished(struct svm_test *test) 1310 { 1311 switch (get_test_stage(test)) { 1312 case 0: 1313 if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1314 report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x", 1315 vmcb->control.exit_code); 1316 return true; 1317 } 1318 vmcb->save.rip += 3; 1319 1320 vmcb->control.intercept |= (1ULL << INTERCEPT_NMI); 1321 break; 1322 1323 case 1: 1324 if (vmcb->control.exit_code != SVM_EXIT_NMI) { 1325 report_fail("VMEXIT not due to NMI intercept. Exit reason 0x%x", 1326 vmcb->control.exit_code); 1327 return true; 1328 } 1329 1330 report_pass("NMI intercept while running guest"); 1331 break; 1332 1333 case 2: 1334 break; 1335 1336 default: 1337 return true; 1338 } 1339 1340 inc_test_stage(test); 1341 1342 return get_test_stage(test) == 3; 1343 } 1344 1345 static bool nmi_check(struct svm_test *test) 1346 { 1347 return get_test_stage(test) == 3; 1348 } 1349 1350 #define NMI_DELAY 100000000ULL 1351 1352 static void nmi_message_thread(void *_test) 1353 { 1354 struct svm_test *test = _test; 1355 1356 while (get_test_stage(test) != 1) 1357 pause(); 1358 1359 delay(NMI_DELAY); 1360 1361 apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, id_map[0]); 1362 1363 while (get_test_stage(test) != 2) 1364 pause(); 1365 1366 delay(NMI_DELAY); 1367 1368 apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, id_map[0]); 1369 } 1370 1371 static void nmi_hlt_test(struct svm_test *test) 1372 { 1373 long long start; 1374 1375 on_cpu_async(1, nmi_message_thread, test); 1376 1377 start = rdtsc(); 1378 1379 set_test_stage(test, 1); 1380 1381 asm volatile ("hlt"); 1382 1383 report((rdtsc() - start > NMI_DELAY) && nmi_fired, 1384 "direct NMI + hlt"); 1385 1386 if (!nmi_fired) 1387 set_test_stage(test, -1); 1388 1389 nmi_fired = false; 1390 1391 vmmcall(); 1392 1393 start = rdtsc(); 1394 1395 set_test_stage(test, 2); 1396 1397 asm volatile ("hlt"); 1398 1399 report((rdtsc() - start > NMI_DELAY) && nmi_fired, 1400 "intercepted NMI + hlt"); 1401 1402 if (!nmi_fired) { 1403 report(nmi_fired, "intercepted pending NMI not dispatched"); 1404 set_test_stage(test, -1); 1405 vmmcall(); 1406 } 1407 1408 set_test_stage(test, 3); 1409 } 1410 1411 static bool nmi_hlt_finished(struct svm_test *test) 1412 { 1413 switch (get_test_stage(test)) { 1414 case 1: 1415 if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1416 report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x", 1417 vmcb->control.exit_code); 1418 return true; 1419 } 1420 vmcb->save.rip += 3; 1421 1422 vmcb->control.intercept |= (1ULL << INTERCEPT_NMI); 1423 break; 1424 1425 case 2: 1426 if (vmcb->control.exit_code != SVM_EXIT_NMI) { 1427 report_fail("VMEXIT not due to NMI intercept. Exit reason 0x%x", 1428 vmcb->control.exit_code); 1429 return true; 1430 } 1431 1432 report_pass("NMI intercept while running guest"); 1433 break; 1434 1435 case 3: 1436 break; 1437 1438 default: 1439 return true; 1440 } 1441 1442 return get_test_stage(test) == 3; 1443 } 1444 1445 static bool nmi_hlt_check(struct svm_test *test) 1446 { 1447 return get_test_stage(test) == 3; 1448 } 1449 1450 static volatile int count_exc = 0; 1451 1452 static void my_isr(struct ex_regs *r) 1453 { 1454 count_exc++; 1455 } 1456 1457 static void exc_inject_prepare(struct svm_test *test) 1458 { 1459 default_prepare(test); 1460 handle_exception(DE_VECTOR, my_isr); 1461 handle_exception(NMI_VECTOR, my_isr); 1462 } 1463 1464 1465 static void exc_inject_test(struct svm_test *test) 1466 { 1467 asm volatile ("vmmcall\n\tvmmcall\n\t"); 1468 } 1469 1470 static bool exc_inject_finished(struct svm_test *test) 1471 { 1472 switch (get_test_stage(test)) { 1473 case 0: 1474 if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1475 report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x", 1476 vmcb->control.exit_code); 1477 return true; 1478 } 1479 vmcb->save.rip += 3; 1480 vmcb->control.event_inj = NMI_VECTOR | SVM_EVTINJ_TYPE_EXEPT | SVM_EVTINJ_VALID; 1481 break; 1482 1483 case 1: 1484 if (vmcb->control.exit_code != SVM_EXIT_ERR) { 1485 report_fail("VMEXIT not due to error. Exit reason 0x%x", 1486 vmcb->control.exit_code); 1487 return true; 1488 } 1489 report(count_exc == 0, "exception with vector 2 not injected"); 1490 vmcb->control.event_inj = DE_VECTOR | SVM_EVTINJ_TYPE_EXEPT | SVM_EVTINJ_VALID; 1491 break; 1492 1493 case 2: 1494 if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1495 report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x", 1496 vmcb->control.exit_code); 1497 return true; 1498 } 1499 vmcb->save.rip += 3; 1500 report(count_exc == 1, "divide overflow exception injected"); 1501 report(!(vmcb->control.event_inj & SVM_EVTINJ_VALID), "eventinj.VALID cleared"); 1502 break; 1503 1504 default: 1505 return true; 1506 } 1507 1508 inc_test_stage(test); 1509 1510 return get_test_stage(test) == 3; 1511 } 1512 1513 static bool exc_inject_check(struct svm_test *test) 1514 { 1515 return count_exc == 1 && get_test_stage(test) == 3; 1516 } 1517 1518 static volatile bool virq_fired; 1519 1520 static void virq_isr(isr_regs_t *regs) 1521 { 1522 virq_fired = true; 1523 } 1524 1525 static void virq_inject_prepare(struct svm_test *test) 1526 { 1527 handle_irq(0xf1, virq_isr); 1528 default_prepare(test); 1529 vmcb->control.int_ctl = V_INTR_MASKING_MASK | V_IRQ_MASK | 1530 (0x0f << V_INTR_PRIO_SHIFT); // Set to the highest priority 1531 vmcb->control.int_vector = 0xf1; 1532 virq_fired = false; 1533 set_test_stage(test, 0); 1534 } 1535 1536 static void virq_inject_test(struct svm_test *test) 1537 { 1538 if (virq_fired) { 1539 report_fail("virtual interrupt fired before L2 sti"); 1540 set_test_stage(test, -1); 1541 vmmcall(); 1542 } 1543 1544 irq_enable(); 1545 asm volatile ("nop"); 1546 irq_disable(); 1547 1548 if (!virq_fired) { 1549 report_fail("virtual interrupt not fired after L2 sti"); 1550 set_test_stage(test, -1); 1551 } 1552 1553 vmmcall(); 1554 1555 if (virq_fired) { 1556 report_fail("virtual interrupt fired before L2 sti after VINTR intercept"); 1557 set_test_stage(test, -1); 1558 vmmcall(); 1559 } 1560 1561 irq_enable(); 1562 asm volatile ("nop"); 1563 irq_disable(); 1564 1565 if (!virq_fired) { 1566 report_fail("virtual interrupt not fired after return from VINTR intercept"); 1567 set_test_stage(test, -1); 1568 } 1569 1570 vmmcall(); 1571 1572 irq_enable(); 1573 asm volatile ("nop"); 1574 irq_disable(); 1575 1576 if (virq_fired) { 1577 report_fail("virtual interrupt fired when V_IRQ_PRIO less than V_TPR"); 1578 set_test_stage(test, -1); 1579 } 1580 1581 vmmcall(); 1582 vmmcall(); 1583 } 1584 1585 static bool virq_inject_finished(struct svm_test *test) 1586 { 1587 vmcb->save.rip += 3; 1588 1589 switch (get_test_stage(test)) { 1590 case 0: 1591 if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1592 report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x", 1593 vmcb->control.exit_code); 1594 return true; 1595 } 1596 if (vmcb->control.int_ctl & V_IRQ_MASK) { 1597 report_fail("V_IRQ not cleared on VMEXIT after firing"); 1598 return true; 1599 } 1600 virq_fired = false; 1601 vmcb->control.intercept |= (1ULL << INTERCEPT_VINTR); 1602 vmcb->control.int_ctl = V_INTR_MASKING_MASK | V_IRQ_MASK | 1603 (0x0f << V_INTR_PRIO_SHIFT); 1604 break; 1605 1606 case 1: 1607 if (vmcb->control.exit_code != SVM_EXIT_VINTR) { 1608 report_fail("VMEXIT not due to vintr. Exit reason 0x%x", 1609 vmcb->control.exit_code); 1610 return true; 1611 } 1612 if (virq_fired) { 1613 report_fail("V_IRQ fired before SVM_EXIT_VINTR"); 1614 return true; 1615 } 1616 vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR); 1617 break; 1618 1619 case 2: 1620 if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1621 report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x", 1622 vmcb->control.exit_code); 1623 return true; 1624 } 1625 virq_fired = false; 1626 // Set irq to lower priority 1627 vmcb->control.int_ctl = V_INTR_MASKING_MASK | V_IRQ_MASK | 1628 (0x08 << V_INTR_PRIO_SHIFT); 1629 // Raise guest TPR 1630 vmcb->control.int_ctl |= 0x0a & V_TPR_MASK; 1631 break; 1632 1633 case 3: 1634 if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1635 report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x", 1636 vmcb->control.exit_code); 1637 return true; 1638 } 1639 vmcb->control.intercept |= (1ULL << INTERCEPT_VINTR); 1640 break; 1641 1642 case 4: 1643 // INTERCEPT_VINTR should be ignored because V_INTR_PRIO < V_TPR 1644 if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1645 report_fail("VMEXIT not due to vmmcall. Exit reason 0x%x", 1646 vmcb->control.exit_code); 1647 return true; 1648 } 1649 break; 1650 1651 default: 1652 return true; 1653 } 1654 1655 inc_test_stage(test); 1656 1657 return get_test_stage(test) == 5; 1658 } 1659 1660 static bool virq_inject_check(struct svm_test *test) 1661 { 1662 return get_test_stage(test) == 5; 1663 } 1664 1665 /* 1666 * Detect nested guest RIP corruption as explained in kernel commit 1667 * b6162e82aef19fee9c32cb3fe9ac30d9116a8c73 1668 * 1669 * In the assembly loop below 'ins' is executed while IO instructions 1670 * are not intercepted; the instruction is emulated by L0. 1671 * 1672 * At the same time we are getting interrupts from the local APIC timer, 1673 * and we do intercept them in L1 1674 * 1675 * If the interrupt happens on the insb instruction, L0 will VMexit, emulate 1676 * the insb instruction and then it will inject the interrupt to L1 through 1677 * a nested VMexit. Due to a bug, it would leave pre-emulation values of RIP, 1678 * RAX and RSP in the VMCB. 1679 * 1680 * In our intercept handler we detect the bug by checking that RIP is that of 1681 * the insb instruction, but its memory operand has already been written. 1682 * This means that insb was already executed. 1683 */ 1684 1685 static volatile int isr_cnt = 0; 1686 static volatile uint8_t io_port_var = 0xAA; 1687 extern const char insb_instruction_label[]; 1688 1689 static void reg_corruption_isr(isr_regs_t *regs) 1690 { 1691 isr_cnt++; 1692 apic_write(APIC_EOI, 0); 1693 } 1694 1695 static void reg_corruption_prepare(struct svm_test *test) 1696 { 1697 default_prepare(test); 1698 set_test_stage(test, 0); 1699 1700 vmcb->control.int_ctl = V_INTR_MASKING_MASK; 1701 vmcb->control.intercept |= (1ULL << INTERCEPT_INTR); 1702 1703 handle_irq(TIMER_VECTOR, reg_corruption_isr); 1704 1705 /* set local APIC to inject external interrupts */ 1706 apic_write(APIC_TMICT, 0); 1707 apic_write(APIC_TDCR, 0); 1708 apic_write(APIC_LVTT, TIMER_VECTOR | APIC_LVT_TIMER_PERIODIC); 1709 apic_write(APIC_TMICT, 1000); 1710 } 1711 1712 static void reg_corruption_test(struct svm_test *test) 1713 { 1714 /* this is endless loop, which is interrupted by the timer interrupt */ 1715 asm volatile ( 1716 "1:\n\t" 1717 "movw $0x4d0, %%dx\n\t" // IO port 1718 "lea %[io_port_var], %%rdi\n\t" 1719 "movb $0xAA, %[io_port_var]\n\t" 1720 "insb_instruction_label:\n\t" 1721 "insb\n\t" 1722 "jmp 1b\n\t" 1723 1724 : [io_port_var] "=m" (io_port_var) 1725 : /* no inputs*/ 1726 : "rdx", "rdi" 1727 ); 1728 } 1729 1730 static bool reg_corruption_finished(struct svm_test *test) 1731 { 1732 if (isr_cnt == 10000) { 1733 report_pass("No RIP corruption detected after %d timer interrupts", 1734 isr_cnt); 1735 set_test_stage(test, 1); 1736 goto cleanup; 1737 } 1738 1739 if (vmcb->control.exit_code == SVM_EXIT_INTR) { 1740 1741 void* guest_rip = (void*)vmcb->save.rip; 1742 1743 irq_enable(); 1744 asm volatile ("nop"); 1745 irq_disable(); 1746 1747 if (guest_rip == insb_instruction_label && io_port_var != 0xAA) { 1748 report_fail("RIP corruption detected after %d timer interrupts", 1749 isr_cnt); 1750 goto cleanup; 1751 } 1752 1753 } 1754 return false; 1755 cleanup: 1756 apic_write(APIC_LVTT, APIC_LVT_TIMER_MASK); 1757 apic_write(APIC_TMICT, 0); 1758 return true; 1759 1760 } 1761 1762 static bool reg_corruption_check(struct svm_test *test) 1763 { 1764 return get_test_stage(test) == 1; 1765 } 1766 1767 static void get_tss_entry(void *data) 1768 { 1769 *((gdt_entry_t **)data) = get_tss_descr(); 1770 } 1771 1772 static int orig_cpu_count; 1773 1774 static void init_startup_prepare(struct svm_test *test) 1775 { 1776 gdt_entry_t *tss_entry; 1777 int i; 1778 1779 on_cpu(1, get_tss_entry, &tss_entry); 1780 1781 orig_cpu_count = cpu_online_count; 1782 1783 apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_INIT | APIC_INT_ASSERT, 1784 id_map[1]); 1785 1786 delay(100000000ULL); 1787 1788 --cpu_online_count; 1789 1790 tss_entry->type &= ~DESC_BUSY; 1791 1792 apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_STARTUP, id_map[1]); 1793 1794 for (i = 0; i < 5 && cpu_online_count < orig_cpu_count; i++) 1795 delay(100000000ULL); 1796 } 1797 1798 static bool init_startup_finished(struct svm_test *test) 1799 { 1800 return true; 1801 } 1802 1803 static bool init_startup_check(struct svm_test *test) 1804 { 1805 return cpu_online_count == orig_cpu_count; 1806 } 1807 1808 static volatile bool init_intercept; 1809 1810 static void init_intercept_prepare(struct svm_test *test) 1811 { 1812 init_intercept = false; 1813 vmcb->control.intercept |= (1ULL << INTERCEPT_INIT); 1814 } 1815 1816 static void init_intercept_test(struct svm_test *test) 1817 { 1818 apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_INIT | APIC_INT_ASSERT, 0); 1819 } 1820 1821 static bool init_intercept_finished(struct svm_test *test) 1822 { 1823 vmcb->save.rip += 3; 1824 1825 if (vmcb->control.exit_code != SVM_EXIT_INIT) { 1826 report_fail("VMEXIT not due to init intercept. Exit reason 0x%x", 1827 vmcb->control.exit_code); 1828 1829 return true; 1830 } 1831 1832 init_intercept = true; 1833 1834 report_pass("INIT to vcpu intercepted"); 1835 1836 return true; 1837 } 1838 1839 static bool init_intercept_check(struct svm_test *test) 1840 { 1841 return init_intercept; 1842 } 1843 1844 /* 1845 * Setting host EFLAGS.TF causes a #DB trap after the VMRUN completes on the 1846 * host side (i.e., after the #VMEXIT from the guest). 1847 * 1848 * Setting host EFLAGS.RF suppresses any potential instruction breakpoint 1849 * match on the VMRUN and completion of the VMRUN instruction clears the 1850 * host EFLAGS.RF bit. 1851 * 1852 * [AMD APM] 1853 */ 1854 static volatile u8 host_rflags_guest_main_flag = 0; 1855 static volatile u8 host_rflags_db_handler_flag = 0; 1856 static volatile bool host_rflags_ss_on_vmrun = false; 1857 static volatile bool host_rflags_vmrun_reached = false; 1858 static volatile bool host_rflags_set_tf = false; 1859 static volatile bool host_rflags_set_rf = false; 1860 static u64 rip_detected; 1861 1862 extern u64 *vmrun_rip; 1863 1864 static void host_rflags_db_handler(struct ex_regs *r) 1865 { 1866 if (host_rflags_ss_on_vmrun) { 1867 if (host_rflags_vmrun_reached) { 1868 if (!host_rflags_set_rf) { 1869 r->rflags &= ~X86_EFLAGS_TF; 1870 rip_detected = r->rip; 1871 } else { 1872 r->rflags |= X86_EFLAGS_RF; 1873 ++host_rflags_db_handler_flag; 1874 } 1875 } else { 1876 if (r->rip == (u64)&vmrun_rip) { 1877 host_rflags_vmrun_reached = true; 1878 1879 if (host_rflags_set_rf) { 1880 host_rflags_guest_main_flag = 0; 1881 rip_detected = r->rip; 1882 r->rflags &= ~X86_EFLAGS_TF; 1883 1884 /* Trigger #DB via debug registers */ 1885 write_dr0((void *)&vmrun_rip); 1886 write_dr7(0x403); 1887 } 1888 } 1889 } 1890 } else { 1891 r->rflags &= ~X86_EFLAGS_TF; 1892 } 1893 } 1894 1895 static void host_rflags_prepare(struct svm_test *test) 1896 { 1897 default_prepare(test); 1898 handle_exception(DB_VECTOR, host_rflags_db_handler); 1899 set_test_stage(test, 0); 1900 } 1901 1902 static void host_rflags_prepare_gif_clear(struct svm_test *test) 1903 { 1904 if (host_rflags_set_tf) 1905 write_rflags(read_rflags() | X86_EFLAGS_TF); 1906 } 1907 1908 static void host_rflags_test(struct svm_test *test) 1909 { 1910 while (1) { 1911 if (get_test_stage(test) > 0) { 1912 if ((host_rflags_set_tf && !host_rflags_ss_on_vmrun && !host_rflags_db_handler_flag) || 1913 (host_rflags_set_rf && host_rflags_db_handler_flag == 1)) 1914 host_rflags_guest_main_flag = 1; 1915 } 1916 1917 if (get_test_stage(test) == 4) 1918 break; 1919 vmmcall(); 1920 } 1921 } 1922 1923 static bool host_rflags_finished(struct svm_test *test) 1924 { 1925 switch (get_test_stage(test)) { 1926 case 0: 1927 if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1928 report_fail("Unexpected VMEXIT. Exit reason 0x%x", 1929 vmcb->control.exit_code); 1930 return true; 1931 } 1932 vmcb->save.rip += 3; 1933 /* 1934 * Setting host EFLAGS.TF not immediately before VMRUN, causes 1935 * #DB trap before first guest instruction is executed 1936 */ 1937 host_rflags_set_tf = true; 1938 break; 1939 case 1: 1940 if (vmcb->control.exit_code != SVM_EXIT_VMMCALL || 1941 host_rflags_guest_main_flag != 1) { 1942 report_fail("Unexpected VMEXIT or #DB handler" 1943 " invoked before guest main. Exit reason 0x%x", 1944 vmcb->control.exit_code); 1945 return true; 1946 } 1947 vmcb->save.rip += 3; 1948 /* 1949 * Setting host EFLAGS.TF immediately before VMRUN, causes #DB 1950 * trap after VMRUN completes on the host side (i.e., after 1951 * VMEXIT from guest). 1952 */ 1953 host_rflags_ss_on_vmrun = true; 1954 break; 1955 case 2: 1956 if (vmcb->control.exit_code != SVM_EXIT_VMMCALL || 1957 rip_detected != (u64)&vmrun_rip + 3) { 1958 report_fail("Unexpected VMEXIT or RIP mismatch." 1959 " Exit reason 0x%x, RIP actual: %lx, RIP expected: " 1960 "%lx", vmcb->control.exit_code, 1961 (u64)&vmrun_rip + 3, rip_detected); 1962 return true; 1963 } 1964 host_rflags_set_rf = true; 1965 host_rflags_guest_main_flag = 0; 1966 host_rflags_vmrun_reached = false; 1967 vmcb->save.rip += 3; 1968 break; 1969 case 3: 1970 if (vmcb->control.exit_code != SVM_EXIT_VMMCALL || 1971 rip_detected != (u64)&vmrun_rip || 1972 host_rflags_guest_main_flag != 1 || 1973 host_rflags_db_handler_flag > 1 || 1974 read_rflags() & X86_EFLAGS_RF) { 1975 report_fail("Unexpected VMEXIT or RIP mismatch or " 1976 "EFLAGS.RF not cleared." 1977 " Exit reason 0x%x, RIP actual: %lx, RIP expected: " 1978 "%lx", vmcb->control.exit_code, 1979 (u64)&vmrun_rip, rip_detected); 1980 return true; 1981 } 1982 host_rflags_set_tf = false; 1983 host_rflags_set_rf = false; 1984 vmcb->save.rip += 3; 1985 break; 1986 default: 1987 return true; 1988 } 1989 inc_test_stage(test); 1990 return get_test_stage(test) == 5; 1991 } 1992 1993 static bool host_rflags_check(struct svm_test *test) 1994 { 1995 return get_test_stage(test) == 4; 1996 } 1997 1998 #define TEST(name) { #name, .v2 = name } 1999 2000 /* 2001 * v2 tests 2002 */ 2003 2004 /* 2005 * Ensure that kvm recalculates the L1 guest's CPUID.01H:ECX.OSXSAVE 2006 * after VM-exit from an L2 guest that sets CR4.OSXSAVE to a different 2007 * value than in L1. 2008 */ 2009 2010 static void svm_cr4_osxsave_test_guest(struct svm_test *test) 2011 { 2012 write_cr4(read_cr4() & ~X86_CR4_OSXSAVE); 2013 } 2014 2015 static void svm_cr4_osxsave_test(void) 2016 { 2017 if (!this_cpu_has(X86_FEATURE_XSAVE)) { 2018 report_skip("XSAVE not detected"); 2019 return; 2020 } 2021 2022 if (!(read_cr4() & X86_CR4_OSXSAVE)) { 2023 unsigned long cr4 = read_cr4() | X86_CR4_OSXSAVE; 2024 2025 write_cr4(cr4); 2026 vmcb->save.cr4 = cr4; 2027 } 2028 2029 report(cpuid_osxsave(), "CPUID.01H:ECX.XSAVE set before VMRUN"); 2030 2031 test_set_guest(svm_cr4_osxsave_test_guest); 2032 report(svm_vmrun() == SVM_EXIT_VMMCALL, 2033 "svm_cr4_osxsave_test_guest finished with VMMCALL"); 2034 2035 report(cpuid_osxsave(), "CPUID.01H:ECX.XSAVE set after VMRUN"); 2036 } 2037 2038 static void basic_guest_main(struct svm_test *test) 2039 { 2040 } 2041 2042 2043 #define SVM_TEST_REG_RESERVED_BITS(start, end, inc, str_name, reg, val, \ 2044 resv_mask) \ 2045 { \ 2046 u64 tmp, mask; \ 2047 int i; \ 2048 \ 2049 for (i = start; i <= end; i = i + inc) { \ 2050 mask = 1ull << i; \ 2051 if (!(mask & resv_mask)) \ 2052 continue; \ 2053 tmp = val | mask; \ 2054 reg = tmp; \ 2055 report(svm_vmrun() == SVM_EXIT_ERR, "Test %s %d:%d: %lx", \ 2056 str_name, end, start, tmp); \ 2057 } \ 2058 } 2059 2060 #define SVM_TEST_CR_RESERVED_BITS(start, end, inc, cr, val, resv_mask, \ 2061 exit_code, test_name) \ 2062 { \ 2063 u64 tmp, mask; \ 2064 u32 r; \ 2065 int i; \ 2066 \ 2067 for (i = start; i <= end; i = i + inc) { \ 2068 mask = 1ull << i; \ 2069 if (!(mask & resv_mask)) \ 2070 continue; \ 2071 tmp = val | mask; \ 2072 switch (cr) { \ 2073 case 0: \ 2074 vmcb->save.cr0 = tmp; \ 2075 break; \ 2076 case 3: \ 2077 vmcb->save.cr3 = tmp; \ 2078 break; \ 2079 case 4: \ 2080 vmcb->save.cr4 = tmp; \ 2081 } \ 2082 r = svm_vmrun(); \ 2083 report(r == exit_code, "Test CR%d %s%d:%d: %lx, wanted exit 0x%x, got 0x%x", \ 2084 cr, test_name, end, start, tmp, exit_code, r); \ 2085 } \ 2086 } 2087 2088 static void test_efer(void) 2089 { 2090 /* 2091 * Un-setting EFER.SVME is illegal 2092 */ 2093 u64 efer_saved = vmcb->save.efer; 2094 u64 efer = efer_saved; 2095 2096 report (svm_vmrun() == SVM_EXIT_VMMCALL, "EFER.SVME: %lx", efer); 2097 efer &= ~EFER_SVME; 2098 vmcb->save.efer = efer; 2099 report (svm_vmrun() == SVM_EXIT_ERR, "EFER.SVME: %lx", efer); 2100 vmcb->save.efer = efer_saved; 2101 2102 /* 2103 * EFER MBZ bits: 63:16, 9 2104 */ 2105 efer_saved = vmcb->save.efer; 2106 2107 SVM_TEST_REG_RESERVED_BITS(8, 9, 1, "EFER", vmcb->save.efer, 2108 efer_saved, SVM_EFER_RESERVED_MASK); 2109 SVM_TEST_REG_RESERVED_BITS(16, 63, 4, "EFER", vmcb->save.efer, 2110 efer_saved, SVM_EFER_RESERVED_MASK); 2111 2112 /* 2113 * EFER.LME and CR0.PG are both set and CR4.PAE is zero. 2114 */ 2115 u64 cr0_saved = vmcb->save.cr0; 2116 u64 cr0; 2117 u64 cr4_saved = vmcb->save.cr4; 2118 u64 cr4; 2119 2120 efer = efer_saved | EFER_LME; 2121 vmcb->save.efer = efer; 2122 cr0 = cr0_saved | X86_CR0_PG | X86_CR0_PE; 2123 vmcb->save.cr0 = cr0; 2124 cr4 = cr4_saved & ~X86_CR4_PAE; 2125 vmcb->save.cr4 = cr4; 2126 report(svm_vmrun() == SVM_EXIT_ERR, "EFER.LME=1 (%lx), " 2127 "CR0.PG=1 (%lx) and CR4.PAE=0 (%lx)", efer, cr0, cr4); 2128 2129 /* 2130 * EFER.LME and CR0.PG are both set and CR0.PE is zero. 2131 * CR4.PAE needs to be set as we otherwise cannot 2132 * determine if CR4.PAE=0 or CR0.PE=0 triggered the 2133 * SVM_EXIT_ERR. 2134 */ 2135 cr4 = cr4_saved | X86_CR4_PAE; 2136 vmcb->save.cr4 = cr4; 2137 cr0 &= ~X86_CR0_PE; 2138 vmcb->save.cr0 = cr0; 2139 report(svm_vmrun() == SVM_EXIT_ERR, "EFER.LME=1 (%lx), " 2140 "CR0.PG=1 and CR0.PE=0 (%lx)", efer, cr0); 2141 2142 /* 2143 * EFER.LME, CR0.PG, CR4.PAE, CS.L, and CS.D are all non-zero. 2144 */ 2145 u32 cs_attrib_saved = vmcb->save.cs.attrib; 2146 u32 cs_attrib; 2147 2148 cr0 |= X86_CR0_PE; 2149 vmcb->save.cr0 = cr0; 2150 cs_attrib = cs_attrib_saved | SVM_SELECTOR_L_MASK | 2151 SVM_SELECTOR_DB_MASK; 2152 vmcb->save.cs.attrib = cs_attrib; 2153 report(svm_vmrun() == SVM_EXIT_ERR, "EFER.LME=1 (%lx), " 2154 "CR0.PG=1 (%lx), CR4.PAE=1 (%lx), CS.L=1 and CS.D=1 (%x)", 2155 efer, cr0, cr4, cs_attrib); 2156 2157 vmcb->save.cr0 = cr0_saved; 2158 vmcb->save.cr4 = cr4_saved; 2159 vmcb->save.efer = efer_saved; 2160 vmcb->save.cs.attrib = cs_attrib_saved; 2161 } 2162 2163 static void test_cr0(void) 2164 { 2165 /* 2166 * Un-setting CR0.CD and setting CR0.NW is illegal combination 2167 */ 2168 u64 cr0_saved = vmcb->save.cr0; 2169 u64 cr0 = cr0_saved; 2170 2171 cr0 |= X86_CR0_CD; 2172 cr0 &= ~X86_CR0_NW; 2173 vmcb->save.cr0 = cr0; 2174 report (svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR0 CD=1,NW=0: %lx", 2175 cr0); 2176 cr0 |= X86_CR0_NW; 2177 vmcb->save.cr0 = cr0; 2178 report (svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR0 CD=1,NW=1: %lx", 2179 cr0); 2180 cr0 &= ~X86_CR0_NW; 2181 cr0 &= ~X86_CR0_CD; 2182 vmcb->save.cr0 = cr0; 2183 report (svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR0 CD=0,NW=0: %lx", 2184 cr0); 2185 cr0 |= X86_CR0_NW; 2186 vmcb->save.cr0 = cr0; 2187 report (svm_vmrun() == SVM_EXIT_ERR, "Test CR0 CD=0,NW=1: %lx", 2188 cr0); 2189 vmcb->save.cr0 = cr0_saved; 2190 2191 /* 2192 * CR0[63:32] are not zero 2193 */ 2194 cr0 = cr0_saved; 2195 2196 SVM_TEST_REG_RESERVED_BITS(32, 63, 4, "CR0", vmcb->save.cr0, cr0_saved, 2197 SVM_CR0_RESERVED_MASK); 2198 vmcb->save.cr0 = cr0_saved; 2199 } 2200 2201 static void test_cr3(void) 2202 { 2203 /* 2204 * CR3 MBZ bits based on different modes: 2205 * [63:52] - long mode 2206 */ 2207 u64 cr3_saved = vmcb->save.cr3; 2208 2209 SVM_TEST_CR_RESERVED_BITS(0, 63, 1, 3, cr3_saved, 2210 SVM_CR3_LONG_MBZ_MASK, SVM_EXIT_ERR, ""); 2211 2212 vmcb->save.cr3 = cr3_saved & ~SVM_CR3_LONG_MBZ_MASK; 2213 report(svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR3 63:0: %lx", 2214 vmcb->save.cr3); 2215 2216 /* 2217 * CR3 non-MBZ reserved bits based on different modes: 2218 * [11:5] [2:0] - long mode (PCIDE=0) 2219 * [2:0] - PAE legacy mode 2220 */ 2221 u64 cr4_saved = vmcb->save.cr4; 2222 u64 *pdpe = npt_get_pml4e(); 2223 2224 /* 2225 * Long mode 2226 */ 2227 if (this_cpu_has(X86_FEATURE_PCID)) { 2228 vmcb->save.cr4 = cr4_saved | X86_CR4_PCIDE; 2229 SVM_TEST_CR_RESERVED_BITS(0, 11, 1, 3, cr3_saved, 2230 SVM_CR3_LONG_RESERVED_MASK, SVM_EXIT_VMMCALL, "(PCIDE=1) "); 2231 2232 vmcb->save.cr3 = cr3_saved & ~SVM_CR3_LONG_RESERVED_MASK; 2233 report(svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR3 63:0: %lx", 2234 vmcb->save.cr3); 2235 } 2236 2237 vmcb->save.cr4 = cr4_saved & ~X86_CR4_PCIDE; 2238 2239 if (!npt_supported()) 2240 goto skip_npt_only; 2241 2242 /* Clear P (Present) bit in NPT in order to trigger #NPF */ 2243 pdpe[0] &= ~1ULL; 2244 2245 SVM_TEST_CR_RESERVED_BITS(0, 11, 1, 3, cr3_saved, 2246 SVM_CR3_LONG_RESERVED_MASK, SVM_EXIT_NPF, "(PCIDE=0) "); 2247 2248 pdpe[0] |= 1ULL; 2249 vmcb->save.cr3 = cr3_saved; 2250 2251 /* 2252 * PAE legacy 2253 */ 2254 pdpe[0] &= ~1ULL; 2255 vmcb->save.cr4 = cr4_saved | X86_CR4_PAE; 2256 SVM_TEST_CR_RESERVED_BITS(0, 2, 1, 3, cr3_saved, 2257 SVM_CR3_PAE_LEGACY_RESERVED_MASK, SVM_EXIT_NPF, "(PAE) "); 2258 2259 pdpe[0] |= 1ULL; 2260 2261 skip_npt_only: 2262 vmcb->save.cr3 = cr3_saved; 2263 vmcb->save.cr4 = cr4_saved; 2264 } 2265 2266 /* Test CR4 MBZ bits based on legacy or long modes */ 2267 static void test_cr4(void) 2268 { 2269 u64 cr4_saved = vmcb->save.cr4; 2270 u64 efer_saved = vmcb->save.efer; 2271 u64 efer = efer_saved; 2272 2273 efer &= ~EFER_LME; 2274 vmcb->save.efer = efer; 2275 SVM_TEST_CR_RESERVED_BITS(12, 31, 1, 4, cr4_saved, 2276 SVM_CR4_LEGACY_RESERVED_MASK, SVM_EXIT_ERR, ""); 2277 2278 efer |= EFER_LME; 2279 vmcb->save.efer = efer; 2280 SVM_TEST_CR_RESERVED_BITS(12, 31, 1, 4, cr4_saved, 2281 SVM_CR4_RESERVED_MASK, SVM_EXIT_ERR, ""); 2282 SVM_TEST_CR_RESERVED_BITS(32, 63, 4, 4, cr4_saved, 2283 SVM_CR4_RESERVED_MASK, SVM_EXIT_ERR, ""); 2284 2285 vmcb->save.cr4 = cr4_saved; 2286 vmcb->save.efer = efer_saved; 2287 } 2288 2289 static void test_dr(void) 2290 { 2291 /* 2292 * DR6[63:32] and DR7[63:32] are MBZ 2293 */ 2294 u64 dr_saved = vmcb->save.dr6; 2295 2296 SVM_TEST_REG_RESERVED_BITS(32, 63, 4, "DR6", vmcb->save.dr6, dr_saved, 2297 SVM_DR6_RESERVED_MASK); 2298 vmcb->save.dr6 = dr_saved; 2299 2300 dr_saved = vmcb->save.dr7; 2301 SVM_TEST_REG_RESERVED_BITS(32, 63, 4, "DR7", vmcb->save.dr7, dr_saved, 2302 SVM_DR7_RESERVED_MASK); 2303 2304 vmcb->save.dr7 = dr_saved; 2305 } 2306 2307 /* TODO: verify if high 32-bits are sign- or zero-extended on bare metal */ 2308 #define TEST_BITMAP_ADDR(save_intercept, type, addr, exit_code, \ 2309 msg) { \ 2310 vmcb->control.intercept = saved_intercept | 1ULL << type; \ 2311 if (type == INTERCEPT_MSR_PROT) \ 2312 vmcb->control.msrpm_base_pa = addr; \ 2313 else \ 2314 vmcb->control.iopm_base_pa = addr; \ 2315 report(svm_vmrun() == exit_code, \ 2316 "Test %s address: %lx", msg, addr); \ 2317 } 2318 2319 /* 2320 * If the MSR or IOIO intercept table extends to a physical address that 2321 * is greater than or equal to the maximum supported physical address, the 2322 * guest state is illegal. 2323 * 2324 * The VMRUN instruction ignores the lower 12 bits of the address specified 2325 * in the VMCB. 2326 * 2327 * MSRPM spans 2 contiguous 4KB pages while IOPM spans 2 contiguous 4KB 2328 * pages + 1 byte. 2329 * 2330 * [APM vol 2] 2331 * 2332 * Note: Unallocated MSRPM addresses conforming to consistency checks, generate 2333 * #NPF. 2334 */ 2335 static void test_msrpm_iopm_bitmap_addrs(void) 2336 { 2337 u64 saved_intercept = vmcb->control.intercept; 2338 u64 addr_beyond_limit = 1ull << cpuid_maxphyaddr(); 2339 u64 addr = virt_to_phys(msr_bitmap) & (~((1ull << 12) - 1)); 2340 2341 TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_MSR_PROT, 2342 addr_beyond_limit - 2 * PAGE_SIZE, SVM_EXIT_ERR, 2343 "MSRPM"); 2344 TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_MSR_PROT, 2345 addr_beyond_limit - 2 * PAGE_SIZE + 1, SVM_EXIT_ERR, 2346 "MSRPM"); 2347 TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_MSR_PROT, 2348 addr_beyond_limit - PAGE_SIZE, SVM_EXIT_ERR, 2349 "MSRPM"); 2350 TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_MSR_PROT, addr, 2351 SVM_EXIT_VMMCALL, "MSRPM"); 2352 addr |= (1ull << 12) - 1; 2353 TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_MSR_PROT, addr, 2354 SVM_EXIT_VMMCALL, "MSRPM"); 2355 2356 TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT, 2357 addr_beyond_limit - 4 * PAGE_SIZE, SVM_EXIT_VMMCALL, 2358 "IOPM"); 2359 TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT, 2360 addr_beyond_limit - 3 * PAGE_SIZE, SVM_EXIT_VMMCALL, 2361 "IOPM"); 2362 TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT, 2363 addr_beyond_limit - 2 * PAGE_SIZE - 2, SVM_EXIT_VMMCALL, 2364 "IOPM"); 2365 TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT, 2366 addr_beyond_limit - 2 * PAGE_SIZE, SVM_EXIT_ERR, 2367 "IOPM"); 2368 TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT, 2369 addr_beyond_limit - PAGE_SIZE, SVM_EXIT_ERR, 2370 "IOPM"); 2371 addr = virt_to_phys(io_bitmap) & (~((1ull << 11) - 1)); 2372 TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT, addr, 2373 SVM_EXIT_VMMCALL, "IOPM"); 2374 addr |= (1ull << 12) - 1; 2375 TEST_BITMAP_ADDR(saved_intercept, INTERCEPT_IOIO_PROT, addr, 2376 SVM_EXIT_VMMCALL, "IOPM"); 2377 2378 vmcb->control.intercept = saved_intercept; 2379 } 2380 2381 /* 2382 * Unlike VMSAVE, VMRUN seems not to update the value of noncanonical 2383 * segment bases in the VMCB. However, VMENTRY succeeds as documented. 2384 */ 2385 #define TEST_CANONICAL_VMRUN(seg_base, msg) \ 2386 saved_addr = seg_base; \ 2387 seg_base = (seg_base & ((1ul << addr_limit) - 1)) | noncanonical_mask; \ 2388 return_value = svm_vmrun(); \ 2389 report(return_value == SVM_EXIT_VMMCALL, \ 2390 "Successful VMRUN with noncanonical %s.base", msg); \ 2391 seg_base = saved_addr; 2392 2393 2394 #define TEST_CANONICAL_VMLOAD(seg_base, msg) \ 2395 saved_addr = seg_base; \ 2396 seg_base = (seg_base & ((1ul << addr_limit) - 1)) | noncanonical_mask; \ 2397 asm volatile ("vmload %0" : : "a"(vmcb_phys) : "memory"); \ 2398 asm volatile ("vmsave %0" : : "a"(vmcb_phys) : "memory"); \ 2399 report(is_canonical(seg_base), \ 2400 "Test %s.base for canonical form: %lx", msg, seg_base); \ 2401 seg_base = saved_addr; 2402 2403 static void test_canonicalization(void) 2404 { 2405 u64 saved_addr; 2406 u64 return_value; 2407 u64 addr_limit; 2408 u64 vmcb_phys = virt_to_phys(vmcb); 2409 2410 addr_limit = (this_cpu_has(X86_FEATURE_LA57)) ? 57 : 48; 2411 u64 noncanonical_mask = NONCANONICAL & ~((1ul << addr_limit) - 1); 2412 2413 TEST_CANONICAL_VMLOAD(vmcb->save.fs.base, "FS"); 2414 TEST_CANONICAL_VMLOAD(vmcb->save.gs.base, "GS"); 2415 TEST_CANONICAL_VMLOAD(vmcb->save.ldtr.base, "LDTR"); 2416 TEST_CANONICAL_VMLOAD(vmcb->save.tr.base, "TR"); 2417 TEST_CANONICAL_VMLOAD(vmcb->save.kernel_gs_base, "KERNEL GS"); 2418 TEST_CANONICAL_VMRUN(vmcb->save.es.base, "ES"); 2419 TEST_CANONICAL_VMRUN(vmcb->save.cs.base, "CS"); 2420 TEST_CANONICAL_VMRUN(vmcb->save.ss.base, "SS"); 2421 TEST_CANONICAL_VMRUN(vmcb->save.ds.base, "DS"); 2422 TEST_CANONICAL_VMRUN(vmcb->save.gdtr.base, "GDTR"); 2423 TEST_CANONICAL_VMRUN(vmcb->save.idtr.base, "IDTR"); 2424 } 2425 2426 /* 2427 * When VMRUN loads a guest value of 1 in EFLAGS.TF, that value does not 2428 * cause a trace trap between the VMRUN and the first guest instruction, but 2429 * rather after completion of the first guest instruction. 2430 * 2431 * [APM vol 2] 2432 */ 2433 u64 guest_rflags_test_trap_rip; 2434 2435 static void guest_rflags_test_db_handler(struct ex_regs *r) 2436 { 2437 guest_rflags_test_trap_rip = r->rip; 2438 r->rflags &= ~X86_EFLAGS_TF; 2439 } 2440 2441 static void svm_guest_state_test(void) 2442 { 2443 test_set_guest(basic_guest_main); 2444 test_efer(); 2445 test_cr0(); 2446 test_cr3(); 2447 test_cr4(); 2448 test_dr(); 2449 test_msrpm_iopm_bitmap_addrs(); 2450 test_canonicalization(); 2451 } 2452 2453 extern void guest_rflags_test_guest(struct svm_test *test); 2454 extern u64 *insn2; 2455 extern u64 *guest_end; 2456 2457 asm("guest_rflags_test_guest:\n\t" 2458 "push %rbp\n\t" 2459 ".global insn2\n\t" 2460 "insn2:\n\t" 2461 "mov %rsp,%rbp\n\t" 2462 "vmmcall\n\t" 2463 "vmmcall\n\t" 2464 ".global guest_end\n\t" 2465 "guest_end:\n\t" 2466 "vmmcall\n\t" 2467 "pop %rbp\n\t" 2468 "ret"); 2469 2470 static void svm_test_singlestep(void) 2471 { 2472 handle_exception(DB_VECTOR, guest_rflags_test_db_handler); 2473 2474 /* 2475 * Trap expected after completion of first guest instruction 2476 */ 2477 vmcb->save.rflags |= X86_EFLAGS_TF; 2478 report (__svm_vmrun((u64)guest_rflags_test_guest) == SVM_EXIT_VMMCALL && 2479 guest_rflags_test_trap_rip == (u64)&insn2, 2480 "Test EFLAGS.TF on VMRUN: trap expected after completion of first guest instruction"); 2481 /* 2482 * No trap expected 2483 */ 2484 guest_rflags_test_trap_rip = 0; 2485 vmcb->save.rip += 3; 2486 vmcb->save.rflags |= X86_EFLAGS_TF; 2487 report (__svm_vmrun(vmcb->save.rip) == SVM_EXIT_VMMCALL && 2488 guest_rflags_test_trap_rip == 0, "Test EFLAGS.TF on VMRUN: trap not expected"); 2489 2490 /* 2491 * Let guest finish execution 2492 */ 2493 vmcb->save.rip += 3; 2494 report (__svm_vmrun(vmcb->save.rip) == SVM_EXIT_VMMCALL && 2495 vmcb->save.rip == (u64)&guest_end, "Test EFLAGS.TF on VMRUN: guest execution completion"); 2496 } 2497 2498 static bool volatile svm_errata_reproduced = false; 2499 static unsigned long volatile physical = 0; 2500 2501 2502 /* 2503 * 2504 * Test the following errata: 2505 * If the VMRUN/VMSAVE/VMLOAD are attempted by the nested guest, 2506 * the CPU would first check the EAX against host reserved memory 2507 * regions (so far only SMM_ADDR/SMM_MASK are known to cause it), 2508 * and only then signal #VMexit 2509 * 2510 * Try to reproduce this by trying vmsave on each possible 4K aligned memory 2511 * address in the low 4G where the SMM area has to reside. 2512 */ 2513 2514 static void gp_isr(struct ex_regs *r) 2515 { 2516 svm_errata_reproduced = true; 2517 /* skip over the vmsave instruction*/ 2518 r->rip += 3; 2519 } 2520 2521 static void svm_vmrun_errata_test(void) 2522 { 2523 unsigned long *last_page = NULL; 2524 2525 handle_exception(GP_VECTOR, gp_isr); 2526 2527 while (!svm_errata_reproduced) { 2528 2529 unsigned long *page = alloc_pages(1); 2530 2531 if (!page) { 2532 report_pass("All guest memory tested, no bug found"); 2533 break; 2534 } 2535 2536 physical = virt_to_phys(page); 2537 2538 asm volatile ( 2539 "mov %[_physical], %%rax\n\t" 2540 "vmsave %%rax\n\t" 2541 2542 : [_physical] "=m" (physical) 2543 : /* no inputs*/ 2544 : "rax" /*clobbers*/ 2545 ); 2546 2547 if (svm_errata_reproduced) { 2548 report_fail("Got #GP exception - svm errata reproduced at 0x%lx", 2549 physical); 2550 break; 2551 } 2552 2553 *page = (unsigned long)last_page; 2554 last_page = page; 2555 } 2556 2557 while (last_page) { 2558 unsigned long *page = last_page; 2559 last_page = (unsigned long *)*last_page; 2560 free_pages_by_order(page, 1); 2561 } 2562 } 2563 2564 static void vmload_vmsave_guest_main(struct svm_test *test) 2565 { 2566 u64 vmcb_phys = virt_to_phys(vmcb); 2567 2568 asm volatile ("vmload %0" : : "a"(vmcb_phys)); 2569 asm volatile ("vmsave %0" : : "a"(vmcb_phys)); 2570 } 2571 2572 static void svm_vmload_vmsave(void) 2573 { 2574 u32 intercept_saved = vmcb->control.intercept; 2575 2576 test_set_guest(vmload_vmsave_guest_main); 2577 2578 /* 2579 * Disabling intercept for VMLOAD and VMSAVE doesn't cause 2580 * respective #VMEXIT to host 2581 */ 2582 vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMLOAD); 2583 vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMSAVE); 2584 svm_vmrun(); 2585 report(vmcb->control.exit_code == SVM_EXIT_VMMCALL, "Test " 2586 "VMLOAD/VMSAVE intercept: Expected VMMCALL #VMEXIT"); 2587 2588 /* 2589 * Enabling intercept for VMLOAD and VMSAVE causes respective 2590 * #VMEXIT to host 2591 */ 2592 vmcb->control.intercept |= (1ULL << INTERCEPT_VMLOAD); 2593 svm_vmrun(); 2594 report(vmcb->control.exit_code == SVM_EXIT_VMLOAD, "Test " 2595 "VMLOAD/VMSAVE intercept: Expected VMLOAD #VMEXIT"); 2596 vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMLOAD); 2597 vmcb->control.intercept |= (1ULL << INTERCEPT_VMSAVE); 2598 svm_vmrun(); 2599 report(vmcb->control.exit_code == SVM_EXIT_VMSAVE, "Test " 2600 "VMLOAD/VMSAVE intercept: Expected VMSAVE #VMEXIT"); 2601 vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMSAVE); 2602 svm_vmrun(); 2603 report(vmcb->control.exit_code == SVM_EXIT_VMMCALL, "Test " 2604 "VMLOAD/VMSAVE intercept: Expected VMMCALL #VMEXIT"); 2605 2606 vmcb->control.intercept |= (1ULL << INTERCEPT_VMLOAD); 2607 svm_vmrun(); 2608 report(vmcb->control.exit_code == SVM_EXIT_VMLOAD, "Test " 2609 "VMLOAD/VMSAVE intercept: Expected VMLOAD #VMEXIT"); 2610 vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMLOAD); 2611 svm_vmrun(); 2612 report(vmcb->control.exit_code == SVM_EXIT_VMMCALL, "Test " 2613 "VMLOAD/VMSAVE intercept: Expected VMMCALL #VMEXIT"); 2614 2615 vmcb->control.intercept |= (1ULL << INTERCEPT_VMSAVE); 2616 svm_vmrun(); 2617 report(vmcb->control.exit_code == SVM_EXIT_VMSAVE, "Test " 2618 "VMLOAD/VMSAVE intercept: Expected VMSAVE #VMEXIT"); 2619 vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMSAVE); 2620 svm_vmrun(); 2621 report(vmcb->control.exit_code == SVM_EXIT_VMMCALL, "Test " 2622 "VMLOAD/VMSAVE intercept: Expected VMMCALL #VMEXIT"); 2623 2624 vmcb->control.intercept = intercept_saved; 2625 } 2626 2627 static void prepare_vgif_enabled(struct svm_test *test) 2628 { 2629 default_prepare(test); 2630 } 2631 2632 static void test_vgif(struct svm_test *test) 2633 { 2634 asm volatile ("vmmcall\n\tstgi\n\tvmmcall\n\tclgi\n\tvmmcall\n\t"); 2635 } 2636 2637 static bool vgif_finished(struct svm_test *test) 2638 { 2639 switch (get_test_stage(test)) 2640 { 2641 case 0: 2642 if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 2643 report_fail("VMEXIT not due to vmmcall."); 2644 return true; 2645 } 2646 vmcb->control.int_ctl |= V_GIF_ENABLED_MASK; 2647 vmcb->save.rip += 3; 2648 inc_test_stage(test); 2649 break; 2650 case 1: 2651 if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 2652 report_fail("VMEXIT not due to vmmcall."); 2653 return true; 2654 } 2655 if (!(vmcb->control.int_ctl & V_GIF_MASK)) { 2656 report_fail("Failed to set VGIF when executing STGI."); 2657 vmcb->control.int_ctl &= ~V_GIF_ENABLED_MASK; 2658 return true; 2659 } 2660 report_pass("STGI set VGIF bit."); 2661 vmcb->save.rip += 3; 2662 inc_test_stage(test); 2663 break; 2664 case 2: 2665 if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 2666 report_fail("VMEXIT not due to vmmcall."); 2667 return true; 2668 } 2669 if (vmcb->control.int_ctl & V_GIF_MASK) { 2670 report_fail("Failed to clear VGIF when executing CLGI."); 2671 vmcb->control.int_ctl &= ~V_GIF_ENABLED_MASK; 2672 return true; 2673 } 2674 report_pass("CLGI cleared VGIF bit."); 2675 vmcb->save.rip += 3; 2676 inc_test_stage(test); 2677 vmcb->control.int_ctl &= ~V_GIF_ENABLED_MASK; 2678 break; 2679 default: 2680 return true; 2681 break; 2682 } 2683 2684 return get_test_stage(test) == 3; 2685 } 2686 2687 static bool vgif_check(struct svm_test *test) 2688 { 2689 return get_test_stage(test) == 3; 2690 } 2691 2692 2693 static int pause_test_counter; 2694 static int wait_counter; 2695 2696 static void pause_filter_test_guest_main(struct svm_test *test) 2697 { 2698 int i; 2699 for (i = 0 ; i < pause_test_counter ; i++) 2700 pause(); 2701 2702 if (!wait_counter) 2703 return; 2704 2705 for (i = 0; i < wait_counter; i++) 2706 ; 2707 2708 for (i = 0 ; i < pause_test_counter ; i++) 2709 pause(); 2710 2711 } 2712 2713 static void pause_filter_run_test(int pause_iterations, int filter_value, int wait_iterations, int threshold) 2714 { 2715 test_set_guest(pause_filter_test_guest_main); 2716 2717 pause_test_counter = pause_iterations; 2718 wait_counter = wait_iterations; 2719 2720 vmcb->control.pause_filter_count = filter_value; 2721 vmcb->control.pause_filter_thresh = threshold; 2722 svm_vmrun(); 2723 2724 if (filter_value <= pause_iterations || wait_iterations < threshold) 2725 report(vmcb->control.exit_code == SVM_EXIT_PAUSE, "expected PAUSE vmexit"); 2726 else 2727 report(vmcb->control.exit_code == SVM_EXIT_VMMCALL, "no expected PAUSE vmexit"); 2728 } 2729 2730 static void pause_filter_test(void) 2731 { 2732 if (!pause_filter_supported()) { 2733 report_skip("PAUSE filter not supported in the guest"); 2734 return; 2735 } 2736 2737 vmcb->control.intercept |= (1 << INTERCEPT_PAUSE); 2738 2739 // filter count more that pause count - no VMexit 2740 pause_filter_run_test(10, 9, 0, 0); 2741 2742 // filter count smaller pause count - no VMexit 2743 pause_filter_run_test(20, 21, 0, 0); 2744 2745 2746 if (pause_threshold_supported()) { 2747 // filter count smaller pause count - no VMexit + large enough threshold 2748 // so that filter counter resets 2749 pause_filter_run_test(20, 21, 1000, 10); 2750 2751 // filter count smaller pause count - no VMexit + small threshold 2752 // so that filter doesn't reset 2753 pause_filter_run_test(20, 21, 10, 1000); 2754 } else { 2755 report_skip("PAUSE threshold not supported in the guest"); 2756 return; 2757 } 2758 } 2759 2760 2761 static int of_test_counter; 2762 2763 static void guest_test_of_handler(struct ex_regs *r) 2764 { 2765 of_test_counter++; 2766 } 2767 2768 static void svm_of_test_guest(struct svm_test *test) 2769 { 2770 struct far_pointer32 fp = { 2771 .offset = (uintptr_t)&&into, 2772 .selector = KERNEL_CS32, 2773 }; 2774 uintptr_t rsp; 2775 2776 asm volatile ("mov %%rsp, %0" : "=r"(rsp)); 2777 2778 if (fp.offset != (uintptr_t)&&into) { 2779 printf("Codee address too high.\n"); 2780 return; 2781 } 2782 2783 if ((u32)rsp != rsp) { 2784 printf("Stack address too high.\n"); 2785 } 2786 2787 asm goto("lcall *%0" : : "m" (fp) : "rax" : into); 2788 return; 2789 into: 2790 2791 asm volatile (".code32;" 2792 "movl $0x7fffffff, %eax;" 2793 "addl %eax, %eax;" 2794 "into;" 2795 "lret;" 2796 ".code64"); 2797 __builtin_unreachable(); 2798 } 2799 2800 static void svm_into_test(void) 2801 { 2802 handle_exception(OF_VECTOR, guest_test_of_handler); 2803 test_set_guest(svm_of_test_guest); 2804 report(svm_vmrun() == SVM_EXIT_VMMCALL && of_test_counter == 1, 2805 "#OF is generated in L2 exception handler"); 2806 } 2807 2808 static int bp_test_counter; 2809 2810 static void guest_test_bp_handler(struct ex_regs *r) 2811 { 2812 bp_test_counter++; 2813 } 2814 2815 static void svm_bp_test_guest(struct svm_test *test) 2816 { 2817 asm volatile("int3"); 2818 } 2819 2820 static void svm_int3_test(void) 2821 { 2822 handle_exception(BP_VECTOR, guest_test_bp_handler); 2823 test_set_guest(svm_bp_test_guest); 2824 report(svm_vmrun() == SVM_EXIT_VMMCALL && bp_test_counter == 1, 2825 "#BP is handled in L2 exception handler"); 2826 } 2827 2828 static int nm_test_counter; 2829 2830 static void guest_test_nm_handler(struct ex_regs *r) 2831 { 2832 nm_test_counter++; 2833 write_cr0(read_cr0() & ~X86_CR0_TS); 2834 write_cr0(read_cr0() & ~X86_CR0_EM); 2835 } 2836 2837 static void svm_nm_test_guest(struct svm_test *test) 2838 { 2839 asm volatile("fnop"); 2840 } 2841 2842 /* This test checks that: 2843 * 2844 * (a) If CR0.TS is set in L2, #NM is handled by L2 when 2845 * just an L2 handler is registered. 2846 * 2847 * (b) If CR0.TS is cleared and CR0.EM is set, #NM is handled 2848 * by L2 when just an l2 handler is registered. 2849 * 2850 * (c) If CR0.TS and CR0.EM are cleared in L2, no exception 2851 * is generated. 2852 */ 2853 2854 static void svm_nm_test(void) 2855 { 2856 handle_exception(NM_VECTOR, guest_test_nm_handler); 2857 write_cr0(read_cr0() & ~X86_CR0_TS); 2858 test_set_guest(svm_nm_test_guest); 2859 2860 vmcb->save.cr0 = vmcb->save.cr0 | X86_CR0_TS; 2861 report(svm_vmrun() == SVM_EXIT_VMMCALL && nm_test_counter == 1, 2862 "fnop with CR0.TS set in L2, #NM is triggered"); 2863 2864 vmcb->save.cr0 = (vmcb->save.cr0 & ~X86_CR0_TS) | X86_CR0_EM; 2865 report(svm_vmrun() == SVM_EXIT_VMMCALL && nm_test_counter == 2, 2866 "fnop with CR0.EM set in L2, #NM is triggered"); 2867 2868 vmcb->save.cr0 = vmcb->save.cr0 & ~(X86_CR0_TS | X86_CR0_EM); 2869 report(svm_vmrun() == SVM_EXIT_VMMCALL && nm_test_counter == 2, 2870 "fnop with CR0.TS and CR0.EM unset no #NM excpetion"); 2871 } 2872 2873 static bool check_lbr(u64 *from_excepted, u64 *to_expected) 2874 { 2875 u64 from = rdmsr(MSR_IA32_LASTBRANCHFROMIP); 2876 u64 to = rdmsr(MSR_IA32_LASTBRANCHTOIP); 2877 2878 if ((u64)from_excepted != from) { 2879 report(false, "MSR_IA32_LASTBRANCHFROMIP, expected=0x%lx, actual=0x%lx", 2880 (u64)from_excepted, from); 2881 return false; 2882 } 2883 2884 if ((u64)to_expected != to) { 2885 report(false, "MSR_IA32_LASTBRANCHFROMIP, expected=0x%lx, actual=0x%lx", 2886 (u64)from_excepted, from); 2887 return false; 2888 } 2889 2890 return true; 2891 } 2892 2893 static bool check_dbgctl(u64 dbgctl, u64 dbgctl_expected) 2894 { 2895 if (dbgctl != dbgctl_expected) { 2896 report(false, "Unexpected MSR_IA32_DEBUGCTLMSR value 0x%lx", dbgctl); 2897 return false; 2898 } 2899 return true; 2900 } 2901 2902 2903 #define DO_BRANCH(branch_name) \ 2904 asm volatile ( \ 2905 # branch_name "_from:" \ 2906 "jmp " # branch_name "_to\n" \ 2907 "nop\n" \ 2908 "nop\n" \ 2909 # branch_name "_to:" \ 2910 "nop\n" \ 2911 ) 2912 2913 2914 extern u64 guest_branch0_from, guest_branch0_to; 2915 extern u64 guest_branch2_from, guest_branch2_to; 2916 2917 extern u64 host_branch0_from, host_branch0_to; 2918 extern u64 host_branch2_from, host_branch2_to; 2919 extern u64 host_branch3_from, host_branch3_to; 2920 extern u64 host_branch4_from, host_branch4_to; 2921 2922 u64 dbgctl; 2923 2924 static void svm_lbrv_test_guest1(void) 2925 { 2926 /* 2927 * This guest expects the LBR to be already enabled when it starts, 2928 * it does a branch, and then disables the LBR and then checks. 2929 */ 2930 2931 DO_BRANCH(guest_branch0); 2932 2933 dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 2934 wrmsr(MSR_IA32_DEBUGCTLMSR, 0); 2935 2936 if (dbgctl != DEBUGCTLMSR_LBR) 2937 asm volatile("ud2\n"); 2938 if (rdmsr(MSR_IA32_DEBUGCTLMSR) != 0) 2939 asm volatile("ud2\n"); 2940 if (rdmsr(MSR_IA32_LASTBRANCHFROMIP) != (u64)&guest_branch0_from) 2941 asm volatile("ud2\n"); 2942 if (rdmsr(MSR_IA32_LASTBRANCHTOIP) != (u64)&guest_branch0_to) 2943 asm volatile("ud2\n"); 2944 2945 asm volatile ("vmmcall\n"); 2946 } 2947 2948 static void svm_lbrv_test_guest2(void) 2949 { 2950 /* 2951 * This guest expects the LBR to be disabled when it starts, 2952 * enables it, does a branch, disables it and then checks. 2953 */ 2954 2955 DO_BRANCH(guest_branch1); 2956 dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 2957 2958 if (dbgctl != 0) 2959 asm volatile("ud2\n"); 2960 2961 if (rdmsr(MSR_IA32_LASTBRANCHFROMIP) != (u64)&host_branch2_from) 2962 asm volatile("ud2\n"); 2963 if (rdmsr(MSR_IA32_LASTBRANCHTOIP) != (u64)&host_branch2_to) 2964 asm volatile("ud2\n"); 2965 2966 2967 wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR); 2968 dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 2969 DO_BRANCH(guest_branch2); 2970 wrmsr(MSR_IA32_DEBUGCTLMSR, 0); 2971 2972 if (dbgctl != DEBUGCTLMSR_LBR) 2973 asm volatile("ud2\n"); 2974 if (rdmsr(MSR_IA32_LASTBRANCHFROMIP) != (u64)&guest_branch2_from) 2975 asm volatile("ud2\n"); 2976 if (rdmsr(MSR_IA32_LASTBRANCHTOIP) != (u64)&guest_branch2_to) 2977 asm volatile("ud2\n"); 2978 2979 asm volatile ("vmmcall\n"); 2980 } 2981 2982 static void svm_lbrv_test0(void) 2983 { 2984 report(true, "Basic LBR test"); 2985 wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR); 2986 DO_BRANCH(host_branch0); 2987 dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 2988 wrmsr(MSR_IA32_DEBUGCTLMSR, 0); 2989 2990 check_dbgctl(dbgctl, DEBUGCTLMSR_LBR); 2991 dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 2992 check_dbgctl(dbgctl, 0); 2993 2994 check_lbr(&host_branch0_from, &host_branch0_to); 2995 } 2996 2997 static void svm_lbrv_test1(void) 2998 { 2999 report(true, "Test that without LBRV enabled, guest LBR state does 'leak' to the host(1)"); 3000 3001 vmcb->save.rip = (ulong)svm_lbrv_test_guest1; 3002 vmcb->control.virt_ext = 0; 3003 3004 wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR); 3005 DO_BRANCH(host_branch1); 3006 SVM_BARE_VMRUN; 3007 dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 3008 3009 if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 3010 report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x", 3011 vmcb->control.exit_code); 3012 return; 3013 } 3014 3015 check_dbgctl(dbgctl, 0); 3016 check_lbr(&guest_branch0_from, &guest_branch0_to); 3017 } 3018 3019 static void svm_lbrv_test2(void) 3020 { 3021 report(true, "Test that without LBRV enabled, guest LBR state does 'leak' to the host(2)"); 3022 3023 vmcb->save.rip = (ulong)svm_lbrv_test_guest2; 3024 vmcb->control.virt_ext = 0; 3025 3026 wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR); 3027 DO_BRANCH(host_branch2); 3028 wrmsr(MSR_IA32_DEBUGCTLMSR, 0); 3029 SVM_BARE_VMRUN; 3030 dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 3031 wrmsr(MSR_IA32_DEBUGCTLMSR, 0); 3032 3033 if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 3034 report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x", 3035 vmcb->control.exit_code); 3036 return; 3037 } 3038 3039 check_dbgctl(dbgctl, 0); 3040 check_lbr(&guest_branch2_from, &guest_branch2_to); 3041 } 3042 3043 static void svm_lbrv_nested_test1(void) 3044 { 3045 if (!lbrv_supported()) { 3046 report_skip("LBRV not supported in the guest"); 3047 return; 3048 } 3049 3050 report(true, "Test that with LBRV enabled, guest LBR state doesn't leak (1)"); 3051 vmcb->save.rip = (ulong)svm_lbrv_test_guest1; 3052 vmcb->control.virt_ext = LBR_CTL_ENABLE_MASK; 3053 vmcb->save.dbgctl = DEBUGCTLMSR_LBR; 3054 3055 wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR); 3056 DO_BRANCH(host_branch3); 3057 SVM_BARE_VMRUN; 3058 dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 3059 wrmsr(MSR_IA32_DEBUGCTLMSR, 0); 3060 3061 if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 3062 report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x", 3063 vmcb->control.exit_code); 3064 return; 3065 } 3066 3067 if (vmcb->save.dbgctl != 0) { 3068 report(false, "unexpected virtual guest MSR_IA32_DEBUGCTLMSR value 0x%lx", vmcb->save.dbgctl); 3069 return; 3070 } 3071 3072 check_dbgctl(dbgctl, DEBUGCTLMSR_LBR); 3073 check_lbr(&host_branch3_from, &host_branch3_to); 3074 } 3075 3076 static void svm_lbrv_nested_test2(void) 3077 { 3078 if (!lbrv_supported()) { 3079 report_skip("LBRV not supported in the guest"); 3080 return; 3081 } 3082 3083 report(true, "Test that with LBRV enabled, guest LBR state doesn't leak (2)"); 3084 vmcb->save.rip = (ulong)svm_lbrv_test_guest2; 3085 vmcb->control.virt_ext = LBR_CTL_ENABLE_MASK; 3086 3087 vmcb->save.dbgctl = 0; 3088 vmcb->save.br_from = (u64)&host_branch2_from; 3089 vmcb->save.br_to = (u64)&host_branch2_to; 3090 3091 wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR); 3092 DO_BRANCH(host_branch4); 3093 SVM_BARE_VMRUN; 3094 dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 3095 wrmsr(MSR_IA32_DEBUGCTLMSR, 0); 3096 3097 if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 3098 report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x", 3099 vmcb->control.exit_code); 3100 return; 3101 } 3102 3103 check_dbgctl(dbgctl, DEBUGCTLMSR_LBR); 3104 check_lbr(&host_branch4_from, &host_branch4_to); 3105 } 3106 3107 3108 // test that a nested guest which does enable INTR interception 3109 // but doesn't enable virtual interrupt masking works 3110 3111 static volatile int dummy_isr_recevied; 3112 static void dummy_isr(isr_regs_t *regs) 3113 { 3114 dummy_isr_recevied++; 3115 eoi(); 3116 } 3117 3118 3119 static volatile int nmi_recevied; 3120 static void dummy_nmi_handler(struct ex_regs *regs) 3121 { 3122 nmi_recevied++; 3123 } 3124 3125 3126 static void svm_intr_intercept_mix_run_guest(volatile int *counter, int expected_vmexit) 3127 { 3128 if (counter) 3129 *counter = 0; 3130 3131 sti(); // host IF value should not matter 3132 clgi(); // vmrun will set back GI to 1 3133 3134 svm_vmrun(); 3135 3136 if (counter) 3137 report(!*counter, "No interrupt expected"); 3138 3139 stgi(); 3140 3141 if (counter) 3142 report(*counter == 1, "Interrupt is expected"); 3143 3144 report (vmcb->control.exit_code == expected_vmexit, "Test expected VM exit"); 3145 report(vmcb->save.rflags & X86_EFLAGS_IF, "Guest should have EFLAGS.IF set now"); 3146 cli(); 3147 } 3148 3149 3150 // subtest: test that enabling EFLAGS.IF is enought to trigger an interrupt 3151 static void svm_intr_intercept_mix_if_guest(struct svm_test *test) 3152 { 3153 asm volatile("nop;nop;nop;nop"); 3154 report(!dummy_isr_recevied, "No interrupt expected"); 3155 sti(); 3156 asm volatile("nop"); 3157 report(0, "must not reach here"); 3158 } 3159 3160 static void svm_intr_intercept_mix_if(void) 3161 { 3162 // make a physical interrupt to be pending 3163 handle_irq(0x55, dummy_isr); 3164 3165 vmcb->control.intercept |= (1 << INTERCEPT_INTR); 3166 vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; 3167 vmcb->save.rflags &= ~X86_EFLAGS_IF; 3168 3169 test_set_guest(svm_intr_intercept_mix_if_guest); 3170 apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_FIXED | 0x55, 0); 3171 svm_intr_intercept_mix_run_guest(&dummy_isr_recevied, SVM_EXIT_INTR); 3172 } 3173 3174 3175 // subtest: test that a clever guest can trigger an interrupt by setting GIF 3176 // if GIF is not intercepted 3177 static void svm_intr_intercept_mix_gif_guest(struct svm_test *test) 3178 { 3179 3180 asm volatile("nop;nop;nop;nop"); 3181 report(!dummy_isr_recevied, "No interrupt expected"); 3182 3183 // clear GIF and enable IF 3184 // that should still not cause VM exit 3185 clgi(); 3186 sti(); 3187 asm volatile("nop"); 3188 report(!dummy_isr_recevied, "No interrupt expected"); 3189 3190 stgi(); 3191 asm volatile("nop"); 3192 report(0, "must not reach here"); 3193 } 3194 3195 static void svm_intr_intercept_mix_gif(void) 3196 { 3197 handle_irq(0x55, dummy_isr); 3198 3199 vmcb->control.intercept |= (1 << INTERCEPT_INTR); 3200 vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; 3201 vmcb->save.rflags &= ~X86_EFLAGS_IF; 3202 3203 test_set_guest(svm_intr_intercept_mix_gif_guest); 3204 apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_FIXED | 0x55, 0); 3205 svm_intr_intercept_mix_run_guest(&dummy_isr_recevied, SVM_EXIT_INTR); 3206 } 3207 3208 // subtest: test that a clever guest can trigger an interrupt by setting GIF 3209 // if GIF is not intercepted and interrupt comes after guest 3210 // started running 3211 static void svm_intr_intercept_mix_gif_guest2(struct svm_test *test) 3212 { 3213 asm volatile("nop;nop;nop;nop"); 3214 report(!dummy_isr_recevied, "No interrupt expected"); 3215 3216 clgi(); 3217 apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_FIXED | 0x55, 0); 3218 report(!dummy_isr_recevied, "No interrupt expected"); 3219 3220 stgi(); 3221 asm volatile("nop"); 3222 report(0, "must not reach here"); 3223 } 3224 3225 static void svm_intr_intercept_mix_gif2(void) 3226 { 3227 handle_irq(0x55, dummy_isr); 3228 3229 vmcb->control.intercept |= (1 << INTERCEPT_INTR); 3230 vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; 3231 vmcb->save.rflags |= X86_EFLAGS_IF; 3232 3233 test_set_guest(svm_intr_intercept_mix_gif_guest2); 3234 svm_intr_intercept_mix_run_guest(&dummy_isr_recevied, SVM_EXIT_INTR); 3235 } 3236 3237 3238 // subtest: test that pending NMI will be handled when guest enables GIF 3239 static void svm_intr_intercept_mix_nmi_guest(struct svm_test *test) 3240 { 3241 asm volatile("nop;nop;nop;nop"); 3242 report(!nmi_recevied, "No NMI expected"); 3243 cli(); // should have no effect 3244 3245 clgi(); 3246 asm volatile("nop"); 3247 apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_NMI, 0); 3248 sti(); // should have no effect 3249 asm volatile("nop"); 3250 report(!nmi_recevied, "No NMI expected"); 3251 3252 stgi(); 3253 asm volatile("nop"); 3254 report(0, "must not reach here"); 3255 } 3256 3257 static void svm_intr_intercept_mix_nmi(void) 3258 { 3259 handle_exception(2, dummy_nmi_handler); 3260 3261 vmcb->control.intercept |= (1 << INTERCEPT_NMI); 3262 vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; 3263 vmcb->save.rflags |= X86_EFLAGS_IF; 3264 3265 test_set_guest(svm_intr_intercept_mix_nmi_guest); 3266 svm_intr_intercept_mix_run_guest(&nmi_recevied, SVM_EXIT_NMI); 3267 } 3268 3269 // test that pending SMI will be handled when guest enables GIF 3270 // TODO: can't really count #SMIs so just test that guest doesn't hang 3271 // and VMexits on SMI 3272 static void svm_intr_intercept_mix_smi_guest(struct svm_test *test) 3273 { 3274 asm volatile("nop;nop;nop;nop"); 3275 3276 clgi(); 3277 asm volatile("nop"); 3278 apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_SMI, 0); 3279 sti(); // should have no effect 3280 asm volatile("nop"); 3281 stgi(); 3282 asm volatile("nop"); 3283 report(0, "must not reach here"); 3284 } 3285 3286 static void svm_intr_intercept_mix_smi(void) 3287 { 3288 vmcb->control.intercept |= (1 << INTERCEPT_SMI); 3289 vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; 3290 test_set_guest(svm_intr_intercept_mix_smi_guest); 3291 svm_intr_intercept_mix_run_guest(NULL, SVM_EXIT_SMI); 3292 } 3293 3294 struct svm_test svm_tests[] = { 3295 { "null", default_supported, default_prepare, 3296 default_prepare_gif_clear, null_test, 3297 default_finished, null_check }, 3298 { "vmrun", default_supported, default_prepare, 3299 default_prepare_gif_clear, test_vmrun, 3300 default_finished, check_vmrun }, 3301 { "ioio", default_supported, prepare_ioio, 3302 default_prepare_gif_clear, test_ioio, 3303 ioio_finished, check_ioio }, 3304 { "vmrun intercept check", default_supported, prepare_no_vmrun_int, 3305 default_prepare_gif_clear, null_test, default_finished, 3306 check_no_vmrun_int }, 3307 { "rsm", default_supported, 3308 prepare_rsm_intercept, default_prepare_gif_clear, 3309 test_rsm_intercept, finished_rsm_intercept, check_rsm_intercept }, 3310 { "cr3 read intercept", default_supported, 3311 prepare_cr3_intercept, default_prepare_gif_clear, 3312 test_cr3_intercept, default_finished, check_cr3_intercept }, 3313 { "cr3 read nointercept", default_supported, default_prepare, 3314 default_prepare_gif_clear, test_cr3_intercept, default_finished, 3315 check_cr3_nointercept }, 3316 { "cr3 read intercept emulate", smp_supported, 3317 prepare_cr3_intercept_bypass, default_prepare_gif_clear, 3318 test_cr3_intercept_bypass, default_finished, check_cr3_intercept }, 3319 { "dr intercept check", default_supported, prepare_dr_intercept, 3320 default_prepare_gif_clear, test_dr_intercept, dr_intercept_finished, 3321 check_dr_intercept }, 3322 { "next_rip", next_rip_supported, prepare_next_rip, 3323 default_prepare_gif_clear, test_next_rip, 3324 default_finished, check_next_rip }, 3325 { "msr intercept check", default_supported, prepare_msr_intercept, 3326 default_prepare_gif_clear, test_msr_intercept, 3327 msr_intercept_finished, check_msr_intercept }, 3328 { "mode_switch", default_supported, prepare_mode_switch, 3329 default_prepare_gif_clear, test_mode_switch, 3330 mode_switch_finished, check_mode_switch }, 3331 { "asid_zero", default_supported, prepare_asid_zero, 3332 default_prepare_gif_clear, test_asid_zero, 3333 default_finished, check_asid_zero }, 3334 { "sel_cr0_bug", default_supported, sel_cr0_bug_prepare, 3335 default_prepare_gif_clear, sel_cr0_bug_test, 3336 sel_cr0_bug_finished, sel_cr0_bug_check }, 3337 { "tsc_adjust", tsc_adjust_supported, tsc_adjust_prepare, 3338 default_prepare_gif_clear, tsc_adjust_test, 3339 default_finished, tsc_adjust_check }, 3340 { "latency_run_exit", default_supported, latency_prepare, 3341 default_prepare_gif_clear, latency_test, 3342 latency_finished, latency_check }, 3343 { "latency_run_exit_clean", default_supported, latency_prepare, 3344 default_prepare_gif_clear, latency_test, 3345 latency_finished_clean, latency_check }, 3346 { "latency_svm_insn", default_supported, lat_svm_insn_prepare, 3347 default_prepare_gif_clear, null_test, 3348 lat_svm_insn_finished, lat_svm_insn_check }, 3349 { "exc_inject", default_supported, exc_inject_prepare, 3350 default_prepare_gif_clear, exc_inject_test, 3351 exc_inject_finished, exc_inject_check }, 3352 { "pending_event", default_supported, pending_event_prepare, 3353 default_prepare_gif_clear, 3354 pending_event_test, pending_event_finished, pending_event_check }, 3355 { "pending_event_cli", default_supported, pending_event_cli_prepare, 3356 pending_event_cli_prepare_gif_clear, 3357 pending_event_cli_test, pending_event_cli_finished, 3358 pending_event_cli_check }, 3359 { "interrupt", default_supported, interrupt_prepare, 3360 default_prepare_gif_clear, interrupt_test, 3361 interrupt_finished, interrupt_check }, 3362 { "nmi", default_supported, nmi_prepare, 3363 default_prepare_gif_clear, nmi_test, 3364 nmi_finished, nmi_check }, 3365 { "nmi_hlt", smp_supported, nmi_prepare, 3366 default_prepare_gif_clear, nmi_hlt_test, 3367 nmi_hlt_finished, nmi_hlt_check }, 3368 { "virq_inject", default_supported, virq_inject_prepare, 3369 default_prepare_gif_clear, virq_inject_test, 3370 virq_inject_finished, virq_inject_check }, 3371 { "reg_corruption", default_supported, reg_corruption_prepare, 3372 default_prepare_gif_clear, reg_corruption_test, 3373 reg_corruption_finished, reg_corruption_check }, 3374 { "svm_init_startup_test", smp_supported, init_startup_prepare, 3375 default_prepare_gif_clear, null_test, 3376 init_startup_finished, init_startup_check }, 3377 { "svm_init_intercept_test", smp_supported, init_intercept_prepare, 3378 default_prepare_gif_clear, init_intercept_test, 3379 init_intercept_finished, init_intercept_check, .on_vcpu = 2 }, 3380 { "host_rflags", default_supported, host_rflags_prepare, 3381 host_rflags_prepare_gif_clear, host_rflags_test, 3382 host_rflags_finished, host_rflags_check }, 3383 { "vgif", vgif_supported, prepare_vgif_enabled, 3384 default_prepare_gif_clear, test_vgif, vgif_finished, 3385 vgif_check }, 3386 TEST(svm_cr4_osxsave_test), 3387 TEST(svm_guest_state_test), 3388 TEST(svm_vmrun_errata_test), 3389 TEST(svm_vmload_vmsave), 3390 TEST(svm_test_singlestep), 3391 TEST(svm_nm_test), 3392 TEST(svm_int3_test), 3393 TEST(svm_into_test), 3394 TEST(svm_lbrv_test0), 3395 TEST(svm_lbrv_test1), 3396 TEST(svm_lbrv_test2), 3397 TEST(svm_lbrv_nested_test1), 3398 TEST(svm_lbrv_nested_test2), 3399 TEST(svm_intr_intercept_mix_if), 3400 TEST(svm_intr_intercept_mix_gif), 3401 TEST(svm_intr_intercept_mix_gif2), 3402 TEST(svm_intr_intercept_mix_nmi), 3403 TEST(svm_intr_intercept_mix_smi), 3404 TEST(svm_tsc_scale_test), 3405 TEST(pause_filter_test), 3406 { NULL, NULL, NULL, NULL, NULL, NULL, NULL } 3407 }; 3408 3409 int main(int ac, char **av) 3410 { 3411 setup_vm(); 3412 return run_svm_tests(ac, av, svm_tests); 3413 } 3414