1 #include "svm.h" 2 #include "libcflat.h" 3 #include "processor.h" 4 #include "desc.h" 5 #include "msr.h" 6 #include "vm.h" 7 #include "smp.h" 8 #include "types.h" 9 #include "alloc_page.h" 10 #include "isr.h" 11 #include "apic.h" 12 #include "delay.h" 13 14 #define SVM_EXIT_MAX_DR_INTERCEPT 0x3f 15 16 static void *scratch_page; 17 18 #define LATENCY_RUNS 1000000 19 20 u64 tsc_start; 21 u64 tsc_end; 22 23 u64 vmrun_sum, vmexit_sum; 24 u64 vmsave_sum, vmload_sum; 25 u64 stgi_sum, clgi_sum; 26 u64 latvmrun_max; 27 u64 latvmrun_min; 28 u64 latvmexit_max; 29 u64 latvmexit_min; 30 u64 latvmload_max; 31 u64 latvmload_min; 32 u64 latvmsave_max; 33 u64 latvmsave_min; 34 u64 latstgi_max; 35 u64 latstgi_min; 36 u64 latclgi_max; 37 u64 latclgi_min; 38 u64 runs; 39 40 static void null_test(struct svm_test *test) 41 { 42 } 43 44 static bool null_check(struct svm_test *test) 45 { 46 return vmcb->control.exit_code == SVM_EXIT_VMMCALL; 47 } 48 49 static void prepare_no_vmrun_int(struct svm_test *test) 50 { 51 vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMRUN); 52 } 53 54 static bool check_no_vmrun_int(struct svm_test *test) 55 { 56 return vmcb->control.exit_code == SVM_EXIT_ERR; 57 } 58 59 static void test_vmrun(struct svm_test *test) 60 { 61 asm volatile ("vmrun %0" : : "a"(virt_to_phys(vmcb))); 62 } 63 64 static bool check_vmrun(struct svm_test *test) 65 { 66 return vmcb->control.exit_code == SVM_EXIT_VMRUN; 67 } 68 69 static void prepare_rsm_intercept(struct svm_test *test) 70 { 71 default_prepare(test); 72 vmcb->control.intercept |= 1 << INTERCEPT_RSM; 73 vmcb->control.intercept_exceptions |= (1ULL << UD_VECTOR); 74 } 75 76 static void test_rsm_intercept(struct svm_test *test) 77 { 78 asm volatile ("rsm" : : : "memory"); 79 } 80 81 static bool check_rsm_intercept(struct svm_test *test) 82 { 83 return get_test_stage(test) == 2; 84 } 85 86 static bool finished_rsm_intercept(struct svm_test *test) 87 { 88 switch (get_test_stage(test)) { 89 case 0: 90 if (vmcb->control.exit_code != SVM_EXIT_RSM) { 91 report(false, "VMEXIT not due to rsm. Exit reason 0x%x", 92 vmcb->control.exit_code); 93 return true; 94 } 95 vmcb->control.intercept &= ~(1 << INTERCEPT_RSM); 96 inc_test_stage(test); 97 break; 98 99 case 1: 100 if (vmcb->control.exit_code != SVM_EXIT_EXCP_BASE + UD_VECTOR) { 101 report(false, "VMEXIT not due to #UD. Exit reason 0x%x", 102 vmcb->control.exit_code); 103 return true; 104 } 105 vmcb->save.rip += 2; 106 inc_test_stage(test); 107 break; 108 109 default: 110 return true; 111 } 112 return get_test_stage(test) == 2; 113 } 114 115 static void prepare_cr3_intercept(struct svm_test *test) 116 { 117 default_prepare(test); 118 vmcb->control.intercept_cr_read |= 1 << 3; 119 } 120 121 static void test_cr3_intercept(struct svm_test *test) 122 { 123 asm volatile ("mov %%cr3, %0" : "=r"(test->scratch) : : "memory"); 124 } 125 126 static bool check_cr3_intercept(struct svm_test *test) 127 { 128 return vmcb->control.exit_code == SVM_EXIT_READ_CR3; 129 } 130 131 static bool check_cr3_nointercept(struct svm_test *test) 132 { 133 return null_check(test) && test->scratch == read_cr3(); 134 } 135 136 static void corrupt_cr3_intercept_bypass(void *_test) 137 { 138 struct svm_test *test = _test; 139 extern volatile u32 mmio_insn; 140 141 while (!__sync_bool_compare_and_swap(&test->scratch, 1, 2)) 142 pause(); 143 pause(); 144 pause(); 145 pause(); 146 mmio_insn = 0x90d8200f; // mov %cr3, %rax; nop 147 } 148 149 static void prepare_cr3_intercept_bypass(struct svm_test *test) 150 { 151 default_prepare(test); 152 vmcb->control.intercept_cr_read |= 1 << 3; 153 on_cpu_async(1, corrupt_cr3_intercept_bypass, test); 154 } 155 156 static void test_cr3_intercept_bypass(struct svm_test *test) 157 { 158 ulong a = 0xa0000; 159 160 test->scratch = 1; 161 while (test->scratch != 2) 162 barrier(); 163 164 asm volatile ("mmio_insn: mov %0, (%0); nop" 165 : "+a"(a) : : "memory"); 166 test->scratch = a; 167 } 168 169 static void prepare_dr_intercept(struct svm_test *test) 170 { 171 default_prepare(test); 172 vmcb->control.intercept_dr_read = 0xff; 173 vmcb->control.intercept_dr_write = 0xff; 174 } 175 176 static void test_dr_intercept(struct svm_test *test) 177 { 178 unsigned int i, failcnt = 0; 179 180 /* Loop testing debug register reads */ 181 for (i = 0; i < 8; i++) { 182 183 switch (i) { 184 case 0: 185 asm volatile ("mov %%dr0, %0" : "=r"(test->scratch) : : "memory"); 186 break; 187 case 1: 188 asm volatile ("mov %%dr1, %0" : "=r"(test->scratch) : : "memory"); 189 break; 190 case 2: 191 asm volatile ("mov %%dr2, %0" : "=r"(test->scratch) : : "memory"); 192 break; 193 case 3: 194 asm volatile ("mov %%dr3, %0" : "=r"(test->scratch) : : "memory"); 195 break; 196 case 4: 197 asm volatile ("mov %%dr4, %0" : "=r"(test->scratch) : : "memory"); 198 break; 199 case 5: 200 asm volatile ("mov %%dr5, %0" : "=r"(test->scratch) : : "memory"); 201 break; 202 case 6: 203 asm volatile ("mov %%dr6, %0" : "=r"(test->scratch) : : "memory"); 204 break; 205 case 7: 206 asm volatile ("mov %%dr7, %0" : "=r"(test->scratch) : : "memory"); 207 break; 208 } 209 210 if (test->scratch != i) { 211 report(false, "dr%u read intercept", i); 212 failcnt++; 213 } 214 } 215 216 /* Loop testing debug register writes */ 217 for (i = 0; i < 8; i++) { 218 219 switch (i) { 220 case 0: 221 asm volatile ("mov %0, %%dr0" : : "r"(test->scratch) : "memory"); 222 break; 223 case 1: 224 asm volatile ("mov %0, %%dr1" : : "r"(test->scratch) : "memory"); 225 break; 226 case 2: 227 asm volatile ("mov %0, %%dr2" : : "r"(test->scratch) : "memory"); 228 break; 229 case 3: 230 asm volatile ("mov %0, %%dr3" : : "r"(test->scratch) : "memory"); 231 break; 232 case 4: 233 asm volatile ("mov %0, %%dr4" : : "r"(test->scratch) : "memory"); 234 break; 235 case 5: 236 asm volatile ("mov %0, %%dr5" : : "r"(test->scratch) : "memory"); 237 break; 238 case 6: 239 asm volatile ("mov %0, %%dr6" : : "r"(test->scratch) : "memory"); 240 break; 241 case 7: 242 asm volatile ("mov %0, %%dr7" : : "r"(test->scratch) : "memory"); 243 break; 244 } 245 246 if (test->scratch != i) { 247 report(false, "dr%u write intercept", i); 248 failcnt++; 249 } 250 } 251 252 test->scratch = failcnt; 253 } 254 255 static bool dr_intercept_finished(struct svm_test *test) 256 { 257 ulong n = (vmcb->control.exit_code - SVM_EXIT_READ_DR0); 258 259 /* Only expect DR intercepts */ 260 if (n > (SVM_EXIT_MAX_DR_INTERCEPT - SVM_EXIT_READ_DR0)) 261 return true; 262 263 /* 264 * Compute debug register number. 265 * Per Appendix C "SVM Intercept Exit Codes" of AMD64 Architecture 266 * Programmer's Manual Volume 2 - System Programming: 267 * http://support.amd.com/TechDocs/24593.pdf 268 * there are 16 VMEXIT codes each for DR read and write. 269 */ 270 test->scratch = (n % 16); 271 272 /* Jump over MOV instruction */ 273 vmcb->save.rip += 3; 274 275 return false; 276 } 277 278 static bool check_dr_intercept(struct svm_test *test) 279 { 280 return !test->scratch; 281 } 282 283 static bool next_rip_supported(void) 284 { 285 return this_cpu_has(X86_FEATURE_NRIPS); 286 } 287 288 static void prepare_next_rip(struct svm_test *test) 289 { 290 vmcb->control.intercept |= (1ULL << INTERCEPT_RDTSC); 291 } 292 293 294 static void test_next_rip(struct svm_test *test) 295 { 296 asm volatile ("rdtsc\n\t" 297 ".globl exp_next_rip\n\t" 298 "exp_next_rip:\n\t" ::: "eax", "edx"); 299 } 300 301 static bool check_next_rip(struct svm_test *test) 302 { 303 extern char exp_next_rip; 304 unsigned long address = (unsigned long)&exp_next_rip; 305 306 return address == vmcb->control.next_rip; 307 } 308 309 extern u8 *msr_bitmap; 310 311 static void prepare_msr_intercept(struct svm_test *test) 312 { 313 default_prepare(test); 314 vmcb->control.intercept |= (1ULL << INTERCEPT_MSR_PROT); 315 vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR); 316 memset(msr_bitmap, 0xff, MSR_BITMAP_SIZE); 317 } 318 319 static void test_msr_intercept(struct svm_test *test) 320 { 321 unsigned long msr_value = 0xef8056791234abcd; /* Arbitrary value */ 322 unsigned long msr_index; 323 324 for (msr_index = 0; msr_index <= 0xc0011fff; msr_index++) { 325 if (msr_index == 0xC0010131 /* MSR_SEV_STATUS */) { 326 /* 327 * Per section 15.34.10 "SEV_STATUS MSR" of AMD64 Architecture 328 * Programmer's Manual volume 2 - System Programming: 329 * http://support.amd.com/TechDocs/24593.pdf 330 * SEV_STATUS MSR (C001_0131) is a non-interceptable MSR. 331 */ 332 continue; 333 } 334 335 /* Skips gaps between supported MSR ranges */ 336 if (msr_index == 0x2000) 337 msr_index = 0xc0000000; 338 else if (msr_index == 0xc0002000) 339 msr_index = 0xc0010000; 340 341 test->scratch = -1; 342 343 rdmsr(msr_index); 344 345 /* Check that a read intercept occurred for MSR at msr_index */ 346 if (test->scratch != msr_index) 347 report(false, "MSR 0x%lx read intercept", msr_index); 348 349 /* 350 * Poor man approach to generate a value that 351 * seems arbitrary each time around the loop. 352 */ 353 msr_value += (msr_value << 1); 354 355 wrmsr(msr_index, msr_value); 356 357 /* Check that a write intercept occurred for MSR with msr_value */ 358 if (test->scratch != msr_value) 359 report(false, "MSR 0x%lx write intercept", msr_index); 360 } 361 362 test->scratch = -2; 363 } 364 365 static bool msr_intercept_finished(struct svm_test *test) 366 { 367 u32 exit_code = vmcb->control.exit_code; 368 u64 exit_info_1; 369 u8 *opcode; 370 371 if (exit_code == SVM_EXIT_MSR) { 372 exit_info_1 = vmcb->control.exit_info_1; 373 } else { 374 /* 375 * If #GP exception occurs instead, check that it was 376 * for RDMSR/WRMSR and set exit_info_1 accordingly. 377 */ 378 379 if (exit_code != (SVM_EXIT_EXCP_BASE + GP_VECTOR)) 380 return true; 381 382 opcode = (u8 *)vmcb->save.rip; 383 if (opcode[0] != 0x0f) 384 return true; 385 386 switch (opcode[1]) { 387 case 0x30: /* WRMSR */ 388 exit_info_1 = 1; 389 break; 390 case 0x32: /* RDMSR */ 391 exit_info_1 = 0; 392 break; 393 default: 394 return true; 395 } 396 397 /* 398 * Warn that #GP exception occured instead. 399 * RCX holds the MSR index. 400 */ 401 printf("%s 0x%lx #GP exception\n", 402 exit_info_1 ? "WRMSR" : "RDMSR", get_regs().rcx); 403 } 404 405 /* Jump over RDMSR/WRMSR instruction */ 406 vmcb->save.rip += 2; 407 408 /* 409 * Test whether the intercept was for RDMSR/WRMSR. 410 * For RDMSR, test->scratch is set to the MSR index; 411 * RCX holds the MSR index. 412 * For WRMSR, test->scratch is set to the MSR value; 413 * RDX holds the upper 32 bits of the MSR value, 414 * while RAX hold its lower 32 bits. 415 */ 416 if (exit_info_1) 417 test->scratch = 418 ((get_regs().rdx << 32) | (vmcb->save.rax & 0xffffffff)); 419 else 420 test->scratch = get_regs().rcx; 421 422 return false; 423 } 424 425 static bool check_msr_intercept(struct svm_test *test) 426 { 427 memset(msr_bitmap, 0, MSR_BITMAP_SIZE); 428 return (test->scratch == -2); 429 } 430 431 static void prepare_mode_switch(struct svm_test *test) 432 { 433 vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR) 434 | (1ULL << UD_VECTOR) 435 | (1ULL << DF_VECTOR) 436 | (1ULL << PF_VECTOR); 437 test->scratch = 0; 438 } 439 440 static void test_mode_switch(struct svm_test *test) 441 { 442 asm volatile(" cli\n" 443 " ljmp *1f\n" /* jump to 32-bit code segment */ 444 "1:\n" 445 " .long 2f\n" 446 " .long " xstr(KERNEL_CS32) "\n" 447 ".code32\n" 448 "2:\n" 449 " movl %%cr0, %%eax\n" 450 " btcl $31, %%eax\n" /* clear PG */ 451 " movl %%eax, %%cr0\n" 452 " movl $0xc0000080, %%ecx\n" /* EFER */ 453 " rdmsr\n" 454 " btcl $8, %%eax\n" /* clear LME */ 455 " wrmsr\n" 456 " movl %%cr4, %%eax\n" 457 " btcl $5, %%eax\n" /* clear PAE */ 458 " movl %%eax, %%cr4\n" 459 " movw %[ds16], %%ax\n" 460 " movw %%ax, %%ds\n" 461 " ljmpl %[cs16], $3f\n" /* jump to 16 bit protected-mode */ 462 ".code16\n" 463 "3:\n" 464 " movl %%cr0, %%eax\n" 465 " btcl $0, %%eax\n" /* clear PE */ 466 " movl %%eax, %%cr0\n" 467 " ljmpl $0, $4f\n" /* jump to real-mode */ 468 "4:\n" 469 " vmmcall\n" 470 " movl %%cr0, %%eax\n" 471 " btsl $0, %%eax\n" /* set PE */ 472 " movl %%eax, %%cr0\n" 473 " ljmpl %[cs32], $5f\n" /* back to protected mode */ 474 ".code32\n" 475 "5:\n" 476 " movl %%cr4, %%eax\n" 477 " btsl $5, %%eax\n" /* set PAE */ 478 " movl %%eax, %%cr4\n" 479 " movl $0xc0000080, %%ecx\n" /* EFER */ 480 " rdmsr\n" 481 " btsl $8, %%eax\n" /* set LME */ 482 " wrmsr\n" 483 " movl %%cr0, %%eax\n" 484 " btsl $31, %%eax\n" /* set PG */ 485 " movl %%eax, %%cr0\n" 486 " ljmpl %[cs64], $6f\n" /* back to long mode */ 487 ".code64\n\t" 488 "6:\n" 489 " vmmcall\n" 490 :: [cs16] "i"(KERNEL_CS16), [ds16] "i"(KERNEL_DS16), 491 [cs32] "i"(KERNEL_CS32), [cs64] "i"(KERNEL_CS64) 492 : "rax", "rbx", "rcx", "rdx", "memory"); 493 } 494 495 static bool mode_switch_finished(struct svm_test *test) 496 { 497 u64 cr0, cr4, efer; 498 499 cr0 = vmcb->save.cr0; 500 cr4 = vmcb->save.cr4; 501 efer = vmcb->save.efer; 502 503 /* Only expect VMMCALL intercepts */ 504 if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) 505 return true; 506 507 /* Jump over VMMCALL instruction */ 508 vmcb->save.rip += 3; 509 510 /* Do sanity checks */ 511 switch (test->scratch) { 512 case 0: 513 /* Test should be in real mode now - check for this */ 514 if ((cr0 & 0x80000001) || /* CR0.PG, CR0.PE */ 515 (cr4 & 0x00000020) || /* CR4.PAE */ 516 (efer & 0x00000500)) /* EFER.LMA, EFER.LME */ 517 return true; 518 break; 519 case 2: 520 /* Test should be back in long-mode now - check for this */ 521 if (((cr0 & 0x80000001) != 0x80000001) || /* CR0.PG, CR0.PE */ 522 ((cr4 & 0x00000020) != 0x00000020) || /* CR4.PAE */ 523 ((efer & 0x00000500) != 0x00000500)) /* EFER.LMA, EFER.LME */ 524 return true; 525 break; 526 } 527 528 /* one step forward */ 529 test->scratch += 1; 530 531 return test->scratch == 2; 532 } 533 534 static bool check_mode_switch(struct svm_test *test) 535 { 536 return test->scratch == 2; 537 } 538 539 extern u8 *io_bitmap; 540 541 static void prepare_ioio(struct svm_test *test) 542 { 543 vmcb->control.intercept |= (1ULL << INTERCEPT_IOIO_PROT); 544 test->scratch = 0; 545 memset(io_bitmap, 0, 8192); 546 io_bitmap[8192] = 0xFF; 547 } 548 549 static void test_ioio(struct svm_test *test) 550 { 551 // stage 0, test IO pass 552 inb(0x5000); 553 outb(0x0, 0x5000); 554 if (get_test_stage(test) != 0) 555 goto fail; 556 557 // test IO width, in/out 558 io_bitmap[0] = 0xFF; 559 inc_test_stage(test); 560 inb(0x0); 561 if (get_test_stage(test) != 2) 562 goto fail; 563 564 outw(0x0, 0x0); 565 if (get_test_stage(test) != 3) 566 goto fail; 567 568 inl(0x0); 569 if (get_test_stage(test) != 4) 570 goto fail; 571 572 // test low/high IO port 573 io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8)); 574 inb(0x5000); 575 if (get_test_stage(test) != 5) 576 goto fail; 577 578 io_bitmap[0x9000 / 8] = (1 << (0x9000 % 8)); 579 inw(0x9000); 580 if (get_test_stage(test) != 6) 581 goto fail; 582 583 // test partial pass 584 io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8)); 585 inl(0x4FFF); 586 if (get_test_stage(test) != 7) 587 goto fail; 588 589 // test across pages 590 inc_test_stage(test); 591 inl(0x7FFF); 592 if (get_test_stage(test) != 8) 593 goto fail; 594 595 inc_test_stage(test); 596 io_bitmap[0x8000 / 8] = 1 << (0x8000 % 8); 597 inl(0x7FFF); 598 if (get_test_stage(test) != 10) 599 goto fail; 600 601 io_bitmap[0] = 0; 602 inl(0xFFFF); 603 if (get_test_stage(test) != 11) 604 goto fail; 605 606 io_bitmap[0] = 0xFF; 607 io_bitmap[8192] = 0; 608 inl(0xFFFF); 609 inc_test_stage(test); 610 if (get_test_stage(test) != 12) 611 goto fail; 612 613 return; 614 615 fail: 616 report(false, "stage %d", get_test_stage(test)); 617 test->scratch = -1; 618 } 619 620 static bool ioio_finished(struct svm_test *test) 621 { 622 unsigned port, size; 623 624 /* Only expect IOIO intercepts */ 625 if (vmcb->control.exit_code == SVM_EXIT_VMMCALL) 626 return true; 627 628 if (vmcb->control.exit_code != SVM_EXIT_IOIO) 629 return true; 630 631 /* one step forward */ 632 test->scratch += 1; 633 634 port = vmcb->control.exit_info_1 >> 16; 635 size = (vmcb->control.exit_info_1 >> SVM_IOIO_SIZE_SHIFT) & 7; 636 637 while (size--) { 638 io_bitmap[port / 8] &= ~(1 << (port & 7)); 639 port++; 640 } 641 642 return false; 643 } 644 645 static bool check_ioio(struct svm_test *test) 646 { 647 memset(io_bitmap, 0, 8193); 648 return test->scratch != -1; 649 } 650 651 static void prepare_asid_zero(struct svm_test *test) 652 { 653 vmcb->control.asid = 0; 654 } 655 656 static void test_asid_zero(struct svm_test *test) 657 { 658 asm volatile ("vmmcall\n\t"); 659 } 660 661 static bool check_asid_zero(struct svm_test *test) 662 { 663 return vmcb->control.exit_code == SVM_EXIT_ERR; 664 } 665 666 static void sel_cr0_bug_prepare(struct svm_test *test) 667 { 668 vmcb_ident(vmcb); 669 vmcb->control.intercept |= (1ULL << INTERCEPT_SELECTIVE_CR0); 670 } 671 672 static bool sel_cr0_bug_finished(struct svm_test *test) 673 { 674 return true; 675 } 676 677 static void sel_cr0_bug_test(struct svm_test *test) 678 { 679 unsigned long cr0; 680 681 /* read cr0, clear CD, and write back */ 682 cr0 = read_cr0(); 683 cr0 |= (1UL << 30); 684 write_cr0(cr0); 685 686 /* 687 * If we are here the test failed, not sure what to do now because we 688 * are not in guest-mode anymore so we can't trigger an intercept. 689 * Trigger a tripple-fault for now. 690 */ 691 report(false, "sel_cr0 test. Can not recover from this - exiting"); 692 exit(report_summary()); 693 } 694 695 static bool sel_cr0_bug_check(struct svm_test *test) 696 { 697 return vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE; 698 } 699 700 static void npt_nx_prepare(struct svm_test *test) 701 { 702 703 u64 *pte; 704 705 vmcb_ident(vmcb); 706 pte = npt_get_pte((u64)null_test); 707 708 *pte |= (1ULL << 63); 709 } 710 711 static bool npt_nx_check(struct svm_test *test) 712 { 713 u64 *pte = npt_get_pte((u64)null_test); 714 715 *pte &= ~(1ULL << 63); 716 717 vmcb->save.efer |= (1 << 11); 718 719 return (vmcb->control.exit_code == SVM_EXIT_NPF) 720 && (vmcb->control.exit_info_1 == 0x100000015ULL); 721 } 722 723 static void npt_us_prepare(struct svm_test *test) 724 { 725 u64 *pte; 726 727 scratch_page = alloc_page(); 728 vmcb_ident(vmcb); 729 pte = npt_get_pte((u64)scratch_page); 730 731 *pte &= ~(1ULL << 2); 732 } 733 734 static void npt_us_test(struct svm_test *test) 735 { 736 (void) *(volatile u64 *)scratch_page; 737 } 738 739 static bool npt_us_check(struct svm_test *test) 740 { 741 u64 *pte = npt_get_pte((u64)scratch_page); 742 743 *pte |= (1ULL << 2); 744 745 return (vmcb->control.exit_code == SVM_EXIT_NPF) 746 && (vmcb->control.exit_info_1 == 0x100000005ULL); 747 } 748 749 u64 save_pde; 750 751 static void npt_rsvd_prepare(struct svm_test *test) 752 { 753 u64 *pde; 754 755 vmcb_ident(vmcb); 756 pde = npt_get_pde((u64) null_test); 757 758 save_pde = *pde; 759 *pde = (1ULL << 19) | (1ULL << 7) | 0x27; 760 } 761 762 static bool npt_rsvd_check(struct svm_test *test) 763 { 764 u64 *pde = npt_get_pde((u64) null_test); 765 766 *pde = save_pde; 767 768 return (vmcb->control.exit_code == SVM_EXIT_NPF) 769 && (vmcb->control.exit_info_1 == 0x10000001dULL); 770 } 771 772 static void npt_rw_prepare(struct svm_test *test) 773 { 774 775 u64 *pte; 776 777 vmcb_ident(vmcb); 778 pte = npt_get_pte(0x80000); 779 780 *pte &= ~(1ULL << 1); 781 } 782 783 static void npt_rw_test(struct svm_test *test) 784 { 785 u64 *data = (void*)(0x80000); 786 787 *data = 0; 788 } 789 790 static bool npt_rw_check(struct svm_test *test) 791 { 792 u64 *pte = npt_get_pte(0x80000); 793 794 *pte |= (1ULL << 1); 795 796 return (vmcb->control.exit_code == SVM_EXIT_NPF) 797 && (vmcb->control.exit_info_1 == 0x100000007ULL); 798 } 799 800 static void npt_rw_pfwalk_prepare(struct svm_test *test) 801 { 802 803 u64 *pte; 804 805 vmcb_ident(vmcb); 806 pte = npt_get_pte(read_cr3()); 807 808 *pte &= ~(1ULL << 1); 809 } 810 811 static bool npt_rw_pfwalk_check(struct svm_test *test) 812 { 813 u64 *pte = npt_get_pte(read_cr3()); 814 815 *pte |= (1ULL << 1); 816 817 return (vmcb->control.exit_code == SVM_EXIT_NPF) 818 && (vmcb->control.exit_info_1 == 0x200000007ULL) 819 && (vmcb->control.exit_info_2 == read_cr3()); 820 } 821 822 static void npt_rsvd_pfwalk_prepare(struct svm_test *test) 823 { 824 u64 *pdpe; 825 vmcb_ident(vmcb); 826 827 pdpe = npt_get_pml4e(); 828 pdpe[0] |= (1ULL << 8); 829 } 830 831 static bool npt_rsvd_pfwalk_check(struct svm_test *test) 832 { 833 u64 *pdpe = npt_get_pml4e(); 834 pdpe[0] &= ~(1ULL << 8); 835 836 return (vmcb->control.exit_code == SVM_EXIT_NPF) 837 && (vmcb->control.exit_info_1 == 0x20000000fULL); 838 } 839 840 static void npt_l1mmio_prepare(struct svm_test *test) 841 { 842 vmcb_ident(vmcb); 843 } 844 845 u32 nested_apic_version1; 846 u32 nested_apic_version2; 847 848 static void npt_l1mmio_test(struct svm_test *test) 849 { 850 volatile u32 *data = (volatile void*)(0xfee00030UL); 851 852 nested_apic_version1 = *data; 853 nested_apic_version2 = *data; 854 } 855 856 static bool npt_l1mmio_check(struct svm_test *test) 857 { 858 volatile u32 *data = (volatile void*)(0xfee00030); 859 u32 lvr = *data; 860 861 return nested_apic_version1 == lvr && nested_apic_version2 == lvr; 862 } 863 864 static void npt_rw_l1mmio_prepare(struct svm_test *test) 865 { 866 867 u64 *pte; 868 869 vmcb_ident(vmcb); 870 pte = npt_get_pte(0xfee00080); 871 872 *pte &= ~(1ULL << 1); 873 } 874 875 static void npt_rw_l1mmio_test(struct svm_test *test) 876 { 877 volatile u32 *data = (volatile void*)(0xfee00080); 878 879 *data = *data; 880 } 881 882 static bool npt_rw_l1mmio_check(struct svm_test *test) 883 { 884 u64 *pte = npt_get_pte(0xfee00080); 885 886 *pte |= (1ULL << 1); 887 888 return (vmcb->control.exit_code == SVM_EXIT_NPF) 889 && (vmcb->control.exit_info_1 == 0x100000007ULL); 890 } 891 892 #define TSC_ADJUST_VALUE (1ll << 32) 893 #define TSC_OFFSET_VALUE (~0ull << 48) 894 static bool ok; 895 896 static bool tsc_adjust_supported(void) 897 { 898 return this_cpu_has(X86_FEATURE_TSC_ADJUST); 899 } 900 901 static void tsc_adjust_prepare(struct svm_test *test) 902 { 903 default_prepare(test); 904 vmcb->control.tsc_offset = TSC_OFFSET_VALUE; 905 906 wrmsr(MSR_IA32_TSC_ADJUST, -TSC_ADJUST_VALUE); 907 int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST); 908 ok = adjust == -TSC_ADJUST_VALUE; 909 } 910 911 static void tsc_adjust_test(struct svm_test *test) 912 { 913 int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST); 914 ok &= adjust == -TSC_ADJUST_VALUE; 915 916 uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE; 917 wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE); 918 919 adjust = rdmsr(MSR_IA32_TSC_ADJUST); 920 ok &= adjust <= -2 * TSC_ADJUST_VALUE; 921 922 uint64_t l1_tsc_end = rdtsc() - TSC_OFFSET_VALUE; 923 ok &= (l1_tsc_end + TSC_ADJUST_VALUE - l1_tsc) < TSC_ADJUST_VALUE; 924 925 uint64_t l1_tsc_msr = rdmsr(MSR_IA32_TSC) - TSC_OFFSET_VALUE; 926 ok &= (l1_tsc_msr + TSC_ADJUST_VALUE - l1_tsc) < TSC_ADJUST_VALUE; 927 } 928 929 static bool tsc_adjust_check(struct svm_test *test) 930 { 931 int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST); 932 933 wrmsr(MSR_IA32_TSC_ADJUST, 0); 934 return ok && adjust <= -2 * TSC_ADJUST_VALUE; 935 } 936 937 static void latency_prepare(struct svm_test *test) 938 { 939 default_prepare(test); 940 runs = LATENCY_RUNS; 941 latvmrun_min = latvmexit_min = -1ULL; 942 latvmrun_max = latvmexit_max = 0; 943 vmrun_sum = vmexit_sum = 0; 944 tsc_start = rdtsc(); 945 } 946 947 static void latency_test(struct svm_test *test) 948 { 949 u64 cycles; 950 951 start: 952 tsc_end = rdtsc(); 953 954 cycles = tsc_end - tsc_start; 955 956 if (cycles > latvmrun_max) 957 latvmrun_max = cycles; 958 959 if (cycles < latvmrun_min) 960 latvmrun_min = cycles; 961 962 vmrun_sum += cycles; 963 964 tsc_start = rdtsc(); 965 966 asm volatile ("vmmcall" : : : "memory"); 967 goto start; 968 } 969 970 static bool latency_finished(struct svm_test *test) 971 { 972 u64 cycles; 973 974 tsc_end = rdtsc(); 975 976 cycles = tsc_end - tsc_start; 977 978 if (cycles > latvmexit_max) 979 latvmexit_max = cycles; 980 981 if (cycles < latvmexit_min) 982 latvmexit_min = cycles; 983 984 vmexit_sum += cycles; 985 986 vmcb->save.rip += 3; 987 988 runs -= 1; 989 990 tsc_end = rdtsc(); 991 992 return runs == 0; 993 } 994 995 static bool latency_check(struct svm_test *test) 996 { 997 printf(" Latency VMRUN : max: %ld min: %ld avg: %ld\n", latvmrun_max, 998 latvmrun_min, vmrun_sum / LATENCY_RUNS); 999 printf(" Latency VMEXIT: max: %ld min: %ld avg: %ld\n", latvmexit_max, 1000 latvmexit_min, vmexit_sum / LATENCY_RUNS); 1001 return true; 1002 } 1003 1004 static void lat_svm_insn_prepare(struct svm_test *test) 1005 { 1006 default_prepare(test); 1007 runs = LATENCY_RUNS; 1008 latvmload_min = latvmsave_min = latstgi_min = latclgi_min = -1ULL; 1009 latvmload_max = latvmsave_max = latstgi_max = latclgi_max = 0; 1010 vmload_sum = vmsave_sum = stgi_sum = clgi_sum; 1011 } 1012 1013 static bool lat_svm_insn_finished(struct svm_test *test) 1014 { 1015 u64 vmcb_phys = virt_to_phys(vmcb); 1016 u64 cycles; 1017 1018 for ( ; runs != 0; runs--) { 1019 tsc_start = rdtsc(); 1020 asm volatile("vmload %0\n\t" : : "a"(vmcb_phys) : "memory"); 1021 cycles = rdtsc() - tsc_start; 1022 if (cycles > latvmload_max) 1023 latvmload_max = cycles; 1024 if (cycles < latvmload_min) 1025 latvmload_min = cycles; 1026 vmload_sum += cycles; 1027 1028 tsc_start = rdtsc(); 1029 asm volatile("vmsave %0\n\t" : : "a"(vmcb_phys) : "memory"); 1030 cycles = rdtsc() - tsc_start; 1031 if (cycles > latvmsave_max) 1032 latvmsave_max = cycles; 1033 if (cycles < latvmsave_min) 1034 latvmsave_min = cycles; 1035 vmsave_sum += cycles; 1036 1037 tsc_start = rdtsc(); 1038 asm volatile("stgi\n\t"); 1039 cycles = rdtsc() - tsc_start; 1040 if (cycles > latstgi_max) 1041 latstgi_max = cycles; 1042 if (cycles < latstgi_min) 1043 latstgi_min = cycles; 1044 stgi_sum += cycles; 1045 1046 tsc_start = rdtsc(); 1047 asm volatile("clgi\n\t"); 1048 cycles = rdtsc() - tsc_start; 1049 if (cycles > latclgi_max) 1050 latclgi_max = cycles; 1051 if (cycles < latclgi_min) 1052 latclgi_min = cycles; 1053 clgi_sum += cycles; 1054 } 1055 1056 tsc_end = rdtsc(); 1057 1058 return true; 1059 } 1060 1061 static bool lat_svm_insn_check(struct svm_test *test) 1062 { 1063 printf(" Latency VMLOAD: max: %ld min: %ld avg: %ld\n", latvmload_max, 1064 latvmload_min, vmload_sum / LATENCY_RUNS); 1065 printf(" Latency VMSAVE: max: %ld min: %ld avg: %ld\n", latvmsave_max, 1066 latvmsave_min, vmsave_sum / LATENCY_RUNS); 1067 printf(" Latency STGI: max: %ld min: %ld avg: %ld\n", latstgi_max, 1068 latstgi_min, stgi_sum / LATENCY_RUNS); 1069 printf(" Latency CLGI: max: %ld min: %ld avg: %ld\n", latclgi_max, 1070 latclgi_min, clgi_sum / LATENCY_RUNS); 1071 return true; 1072 } 1073 1074 bool pending_event_ipi_fired; 1075 bool pending_event_guest_run; 1076 1077 static void pending_event_ipi_isr(isr_regs_t *regs) 1078 { 1079 pending_event_ipi_fired = true; 1080 eoi(); 1081 } 1082 1083 static void pending_event_prepare(struct svm_test *test) 1084 { 1085 int ipi_vector = 0xf1; 1086 1087 default_prepare(test); 1088 1089 pending_event_ipi_fired = false; 1090 1091 handle_irq(ipi_vector, pending_event_ipi_isr); 1092 1093 pending_event_guest_run = false; 1094 1095 vmcb->control.intercept |= (1ULL << INTERCEPT_INTR); 1096 vmcb->control.int_ctl |= V_INTR_MASKING_MASK; 1097 1098 apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | 1099 APIC_DM_FIXED | ipi_vector, 0); 1100 1101 set_test_stage(test, 0); 1102 } 1103 1104 static void pending_event_test(struct svm_test *test) 1105 { 1106 pending_event_guest_run = true; 1107 } 1108 1109 static bool pending_event_finished(struct svm_test *test) 1110 { 1111 switch (get_test_stage(test)) { 1112 case 0: 1113 if (vmcb->control.exit_code != SVM_EXIT_INTR) { 1114 report(false, "VMEXIT not due to pending interrupt. Exit reason 0x%x", 1115 vmcb->control.exit_code); 1116 return true; 1117 } 1118 1119 vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR); 1120 vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; 1121 1122 if (pending_event_guest_run) { 1123 report(false, "Guest ran before host received IPI\n"); 1124 return true; 1125 } 1126 1127 irq_enable(); 1128 asm volatile ("nop"); 1129 irq_disable(); 1130 1131 if (!pending_event_ipi_fired) { 1132 report(false, "Pending interrupt not dispatched after IRQ enabled\n"); 1133 return true; 1134 } 1135 break; 1136 1137 case 1: 1138 if (!pending_event_guest_run) { 1139 report(false, "Guest did not resume when no interrupt\n"); 1140 return true; 1141 } 1142 break; 1143 } 1144 1145 inc_test_stage(test); 1146 1147 return get_test_stage(test) == 2; 1148 } 1149 1150 static bool pending_event_check(struct svm_test *test) 1151 { 1152 return get_test_stage(test) == 2; 1153 } 1154 1155 static void pending_event_cli_prepare(struct svm_test *test) 1156 { 1157 default_prepare(test); 1158 1159 pending_event_ipi_fired = false; 1160 1161 handle_irq(0xf1, pending_event_ipi_isr); 1162 1163 apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | 1164 APIC_DM_FIXED | 0xf1, 0); 1165 1166 set_test_stage(test, 0); 1167 } 1168 1169 static void pending_event_cli_prepare_gif_clear(struct svm_test *test) 1170 { 1171 asm("cli"); 1172 } 1173 1174 static void pending_event_cli_test(struct svm_test *test) 1175 { 1176 if (pending_event_ipi_fired == true) { 1177 set_test_stage(test, -1); 1178 report(false, "Interrupt preceeded guest"); 1179 vmmcall(); 1180 } 1181 1182 /* VINTR_MASKING is zero. This should cause the IPI to fire. */ 1183 irq_enable(); 1184 asm volatile ("nop"); 1185 irq_disable(); 1186 1187 if (pending_event_ipi_fired != true) { 1188 set_test_stage(test, -1); 1189 report(false, "Interrupt not triggered by guest"); 1190 } 1191 1192 vmmcall(); 1193 1194 /* 1195 * Now VINTR_MASKING=1, but no interrupt is pending so 1196 * the VINTR interception should be clear in VMCB02. Check 1197 * that L0 did not leave a stale VINTR in the VMCB. 1198 */ 1199 irq_enable(); 1200 asm volatile ("nop"); 1201 irq_disable(); 1202 } 1203 1204 static bool pending_event_cli_finished(struct svm_test *test) 1205 { 1206 if ( vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1207 report(false, "VM_EXIT return to host is not EXIT_VMMCALL exit reason 0x%x", 1208 vmcb->control.exit_code); 1209 return true; 1210 } 1211 1212 switch (get_test_stage(test)) { 1213 case 0: 1214 vmcb->save.rip += 3; 1215 1216 pending_event_ipi_fired = false; 1217 1218 vmcb->control.int_ctl |= V_INTR_MASKING_MASK; 1219 1220 /* Now entering again with VINTR_MASKING=1. */ 1221 apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | 1222 APIC_DM_FIXED | 0xf1, 0); 1223 1224 break; 1225 1226 case 1: 1227 if (pending_event_ipi_fired == true) { 1228 report(false, "Interrupt triggered by guest"); 1229 return true; 1230 } 1231 1232 irq_enable(); 1233 asm volatile ("nop"); 1234 irq_disable(); 1235 1236 if (pending_event_ipi_fired != true) { 1237 report(false, "Interrupt not triggered by host"); 1238 return true; 1239 } 1240 1241 break; 1242 1243 default: 1244 return true; 1245 } 1246 1247 inc_test_stage(test); 1248 1249 return get_test_stage(test) == 2; 1250 } 1251 1252 static bool pending_event_cli_check(struct svm_test *test) 1253 { 1254 return get_test_stage(test) == 2; 1255 } 1256 1257 #define TIMER_VECTOR 222 1258 1259 static volatile bool timer_fired; 1260 1261 static void timer_isr(isr_regs_t *regs) 1262 { 1263 timer_fired = true; 1264 apic_write(APIC_EOI, 0); 1265 } 1266 1267 static void interrupt_prepare(struct svm_test *test) 1268 { 1269 default_prepare(test); 1270 handle_irq(TIMER_VECTOR, timer_isr); 1271 timer_fired = false; 1272 set_test_stage(test, 0); 1273 } 1274 1275 static void interrupt_test(struct svm_test *test) 1276 { 1277 long long start, loops; 1278 1279 apic_write(APIC_LVTT, TIMER_VECTOR); 1280 irq_enable(); 1281 apic_write(APIC_TMICT, 1); //Timer Initial Count Register 0x380 one-shot 1282 for (loops = 0; loops < 10000000 && !timer_fired; loops++) 1283 asm volatile ("nop"); 1284 1285 report(timer_fired, "direct interrupt while running guest"); 1286 1287 if (!timer_fired) { 1288 set_test_stage(test, -1); 1289 vmmcall(); 1290 } 1291 1292 apic_write(APIC_TMICT, 0); 1293 irq_disable(); 1294 vmmcall(); 1295 1296 timer_fired = false; 1297 apic_write(APIC_TMICT, 1); 1298 for (loops = 0; loops < 10000000 && !timer_fired; loops++) 1299 asm volatile ("nop"); 1300 1301 report(timer_fired, "intercepted interrupt while running guest"); 1302 1303 if (!timer_fired) { 1304 set_test_stage(test, -1); 1305 vmmcall(); 1306 } 1307 1308 irq_enable(); 1309 apic_write(APIC_TMICT, 0); 1310 irq_disable(); 1311 1312 timer_fired = false; 1313 start = rdtsc(); 1314 apic_write(APIC_TMICT, 1000000); 1315 asm volatile ("sti; hlt"); 1316 1317 report(rdtsc() - start > 10000 && timer_fired, 1318 "direct interrupt + hlt"); 1319 1320 if (!timer_fired) { 1321 set_test_stage(test, -1); 1322 vmmcall(); 1323 } 1324 1325 apic_write(APIC_TMICT, 0); 1326 irq_disable(); 1327 vmmcall(); 1328 1329 timer_fired = false; 1330 start = rdtsc(); 1331 apic_write(APIC_TMICT, 1000000); 1332 asm volatile ("hlt"); 1333 1334 report(rdtsc() - start > 10000 && timer_fired, 1335 "intercepted interrupt + hlt"); 1336 1337 if (!timer_fired) { 1338 set_test_stage(test, -1); 1339 vmmcall(); 1340 } 1341 1342 apic_write(APIC_TMICT, 0); 1343 irq_disable(); 1344 } 1345 1346 static bool interrupt_finished(struct svm_test *test) 1347 { 1348 switch (get_test_stage(test)) { 1349 case 0: 1350 case 2: 1351 if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1352 report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x", 1353 vmcb->control.exit_code); 1354 return true; 1355 } 1356 vmcb->save.rip += 3; 1357 1358 vmcb->control.intercept |= (1ULL << INTERCEPT_INTR); 1359 vmcb->control.int_ctl |= V_INTR_MASKING_MASK; 1360 break; 1361 1362 case 1: 1363 case 3: 1364 if (vmcb->control.exit_code != SVM_EXIT_INTR) { 1365 report(false, "VMEXIT not due to intr intercept. Exit reason 0x%x", 1366 vmcb->control.exit_code); 1367 return true; 1368 } 1369 1370 irq_enable(); 1371 asm volatile ("nop"); 1372 irq_disable(); 1373 1374 vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR); 1375 vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; 1376 break; 1377 1378 case 4: 1379 break; 1380 1381 default: 1382 return true; 1383 } 1384 1385 inc_test_stage(test); 1386 1387 return get_test_stage(test) == 5; 1388 } 1389 1390 static bool interrupt_check(struct svm_test *test) 1391 { 1392 return get_test_stage(test) == 5; 1393 } 1394 1395 static volatile bool nmi_fired; 1396 1397 static void nmi_handler(isr_regs_t *regs) 1398 { 1399 nmi_fired = true; 1400 apic_write(APIC_EOI, 0); 1401 } 1402 1403 static void nmi_prepare(struct svm_test *test) 1404 { 1405 default_prepare(test); 1406 nmi_fired = false; 1407 handle_irq(NMI_VECTOR, nmi_handler); 1408 set_test_stage(test, 0); 1409 } 1410 1411 static void nmi_test(struct svm_test *test) 1412 { 1413 apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, 0); 1414 1415 report(nmi_fired, "direct NMI while running guest"); 1416 1417 if (!nmi_fired) 1418 set_test_stage(test, -1); 1419 1420 vmmcall(); 1421 1422 nmi_fired = false; 1423 1424 apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, 0); 1425 1426 if (!nmi_fired) { 1427 report(nmi_fired, "intercepted pending NMI not dispatched"); 1428 set_test_stage(test, -1); 1429 } 1430 1431 } 1432 1433 static bool nmi_finished(struct svm_test *test) 1434 { 1435 switch (get_test_stage(test)) { 1436 case 0: 1437 if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1438 report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x", 1439 vmcb->control.exit_code); 1440 return true; 1441 } 1442 vmcb->save.rip += 3; 1443 1444 vmcb->control.intercept |= (1ULL << INTERCEPT_NMI); 1445 break; 1446 1447 case 1: 1448 if (vmcb->control.exit_code != SVM_EXIT_NMI) { 1449 report(false, "VMEXIT not due to NMI intercept. Exit reason 0x%x", 1450 vmcb->control.exit_code); 1451 return true; 1452 } 1453 1454 report(true, "NMI intercept while running guest"); 1455 break; 1456 1457 case 2: 1458 break; 1459 1460 default: 1461 return true; 1462 } 1463 1464 inc_test_stage(test); 1465 1466 return get_test_stage(test) == 3; 1467 } 1468 1469 static bool nmi_check(struct svm_test *test) 1470 { 1471 return get_test_stage(test) == 3; 1472 } 1473 1474 #define NMI_DELAY 100000000ULL 1475 1476 static void nmi_message_thread(void *_test) 1477 { 1478 struct svm_test *test = _test; 1479 1480 while (get_test_stage(test) != 1) 1481 pause(); 1482 1483 delay(NMI_DELAY); 1484 1485 apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, id_map[0]); 1486 1487 while (get_test_stage(test) != 2) 1488 pause(); 1489 1490 delay(NMI_DELAY); 1491 1492 apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, id_map[0]); 1493 } 1494 1495 static void nmi_hlt_test(struct svm_test *test) 1496 { 1497 long long start; 1498 1499 on_cpu_async(1, nmi_message_thread, test); 1500 1501 start = rdtsc(); 1502 1503 set_test_stage(test, 1); 1504 1505 asm volatile ("hlt"); 1506 1507 report((rdtsc() - start > NMI_DELAY) && nmi_fired, 1508 "direct NMI + hlt"); 1509 1510 if (!nmi_fired) 1511 set_test_stage(test, -1); 1512 1513 nmi_fired = false; 1514 1515 vmmcall(); 1516 1517 start = rdtsc(); 1518 1519 set_test_stage(test, 2); 1520 1521 asm volatile ("hlt"); 1522 1523 report((rdtsc() - start > NMI_DELAY) && nmi_fired, 1524 "intercepted NMI + hlt"); 1525 1526 if (!nmi_fired) { 1527 report(nmi_fired, "intercepted pending NMI not dispatched"); 1528 set_test_stage(test, -1); 1529 vmmcall(); 1530 } 1531 1532 set_test_stage(test, 3); 1533 } 1534 1535 static bool nmi_hlt_finished(struct svm_test *test) 1536 { 1537 switch (get_test_stage(test)) { 1538 case 1: 1539 if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1540 report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x", 1541 vmcb->control.exit_code); 1542 return true; 1543 } 1544 vmcb->save.rip += 3; 1545 1546 vmcb->control.intercept |= (1ULL << INTERCEPT_NMI); 1547 break; 1548 1549 case 2: 1550 if (vmcb->control.exit_code != SVM_EXIT_NMI) { 1551 report(false, "VMEXIT not due to NMI intercept. Exit reason 0x%x", 1552 vmcb->control.exit_code); 1553 return true; 1554 } 1555 1556 report(true, "NMI intercept while running guest"); 1557 break; 1558 1559 case 3: 1560 break; 1561 1562 default: 1563 return true; 1564 } 1565 1566 return get_test_stage(test) == 3; 1567 } 1568 1569 static bool nmi_hlt_check(struct svm_test *test) 1570 { 1571 return get_test_stage(test) == 3; 1572 } 1573 1574 static volatile int count_exc = 0; 1575 1576 static void my_isr(struct ex_regs *r) 1577 { 1578 count_exc++; 1579 } 1580 1581 static void exc_inject_prepare(struct svm_test *test) 1582 { 1583 default_prepare(test); 1584 handle_exception(DE_VECTOR, my_isr); 1585 handle_exception(NMI_VECTOR, my_isr); 1586 } 1587 1588 1589 static void exc_inject_test(struct svm_test *test) 1590 { 1591 asm volatile ("vmmcall\n\tvmmcall\n\t"); 1592 } 1593 1594 static bool exc_inject_finished(struct svm_test *test) 1595 { 1596 switch (get_test_stage(test)) { 1597 case 0: 1598 if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1599 report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x", 1600 vmcb->control.exit_code); 1601 return true; 1602 } 1603 vmcb->save.rip += 3; 1604 vmcb->control.event_inj = NMI_VECTOR | SVM_EVTINJ_TYPE_EXEPT | SVM_EVTINJ_VALID; 1605 break; 1606 1607 case 1: 1608 if (vmcb->control.exit_code != SVM_EXIT_ERR) { 1609 report(false, "VMEXIT not due to error. Exit reason 0x%x", 1610 vmcb->control.exit_code); 1611 return true; 1612 } 1613 report(count_exc == 0, "exception with vector 2 not injected"); 1614 vmcb->control.event_inj = DE_VECTOR | SVM_EVTINJ_TYPE_EXEPT | SVM_EVTINJ_VALID; 1615 break; 1616 1617 case 2: 1618 if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1619 report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x", 1620 vmcb->control.exit_code); 1621 return true; 1622 } 1623 vmcb->save.rip += 3; 1624 report(count_exc == 1, "divide overflow exception injected"); 1625 report(!(vmcb->control.event_inj & SVM_EVTINJ_VALID), "eventinj.VALID cleared"); 1626 break; 1627 1628 default: 1629 return true; 1630 } 1631 1632 inc_test_stage(test); 1633 1634 return get_test_stage(test) == 3; 1635 } 1636 1637 static bool exc_inject_check(struct svm_test *test) 1638 { 1639 return count_exc == 1 && get_test_stage(test) == 3; 1640 } 1641 1642 static volatile bool virq_fired; 1643 1644 static void virq_isr(isr_regs_t *regs) 1645 { 1646 virq_fired = true; 1647 } 1648 1649 static void virq_inject_prepare(struct svm_test *test) 1650 { 1651 handle_irq(0xf1, virq_isr); 1652 default_prepare(test); 1653 vmcb->control.int_ctl = V_INTR_MASKING_MASK | V_IRQ_MASK | 1654 (0x0f << V_INTR_PRIO_SHIFT); // Set to the highest priority 1655 vmcb->control.int_vector = 0xf1; 1656 virq_fired = false; 1657 set_test_stage(test, 0); 1658 } 1659 1660 static void virq_inject_test(struct svm_test *test) 1661 { 1662 if (virq_fired) { 1663 report(false, "virtual interrupt fired before L2 sti"); 1664 set_test_stage(test, -1); 1665 vmmcall(); 1666 } 1667 1668 irq_enable(); 1669 asm volatile ("nop"); 1670 irq_disable(); 1671 1672 if (!virq_fired) { 1673 report(false, "virtual interrupt not fired after L2 sti"); 1674 set_test_stage(test, -1); 1675 } 1676 1677 vmmcall(); 1678 1679 if (virq_fired) { 1680 report(false, "virtual interrupt fired before L2 sti after VINTR intercept"); 1681 set_test_stage(test, -1); 1682 vmmcall(); 1683 } 1684 1685 irq_enable(); 1686 asm volatile ("nop"); 1687 irq_disable(); 1688 1689 if (!virq_fired) { 1690 report(false, "virtual interrupt not fired after return from VINTR intercept"); 1691 set_test_stage(test, -1); 1692 } 1693 1694 vmmcall(); 1695 1696 irq_enable(); 1697 asm volatile ("nop"); 1698 irq_disable(); 1699 1700 if (virq_fired) { 1701 report(false, "virtual interrupt fired when V_IRQ_PRIO less than V_TPR"); 1702 set_test_stage(test, -1); 1703 } 1704 1705 vmmcall(); 1706 vmmcall(); 1707 } 1708 1709 static bool virq_inject_finished(struct svm_test *test) 1710 { 1711 vmcb->save.rip += 3; 1712 1713 switch (get_test_stage(test)) { 1714 case 0: 1715 if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1716 report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x", 1717 vmcb->control.exit_code); 1718 return true; 1719 } 1720 if (vmcb->control.int_ctl & V_IRQ_MASK) { 1721 report(false, "V_IRQ not cleared on VMEXIT after firing"); 1722 return true; 1723 } 1724 virq_fired = false; 1725 vmcb->control.intercept |= (1ULL << INTERCEPT_VINTR); 1726 vmcb->control.int_ctl = V_INTR_MASKING_MASK | V_IRQ_MASK | 1727 (0x0f << V_INTR_PRIO_SHIFT); 1728 break; 1729 1730 case 1: 1731 if (vmcb->control.exit_code != SVM_EXIT_VINTR) { 1732 report(false, "VMEXIT not due to vintr. Exit reason 0x%x", 1733 vmcb->control.exit_code); 1734 return true; 1735 } 1736 if (virq_fired) { 1737 report(false, "V_IRQ fired before SVM_EXIT_VINTR"); 1738 return true; 1739 } 1740 vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR); 1741 break; 1742 1743 case 2: 1744 if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1745 report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x", 1746 vmcb->control.exit_code); 1747 return true; 1748 } 1749 virq_fired = false; 1750 // Set irq to lower priority 1751 vmcb->control.int_ctl = V_INTR_MASKING_MASK | V_IRQ_MASK | 1752 (0x08 << V_INTR_PRIO_SHIFT); 1753 // Raise guest TPR 1754 vmcb->control.int_ctl |= 0x0a & V_TPR_MASK; 1755 break; 1756 1757 case 3: 1758 if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1759 report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x", 1760 vmcb->control.exit_code); 1761 return true; 1762 } 1763 vmcb->control.intercept |= (1ULL << INTERCEPT_VINTR); 1764 break; 1765 1766 case 4: 1767 // INTERCEPT_VINTR should be ignored because V_INTR_PRIO < V_TPR 1768 if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1769 report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x", 1770 vmcb->control.exit_code); 1771 return true; 1772 } 1773 break; 1774 1775 default: 1776 return true; 1777 } 1778 1779 inc_test_stage(test); 1780 1781 return get_test_stage(test) == 5; 1782 } 1783 1784 static bool virq_inject_check(struct svm_test *test) 1785 { 1786 return get_test_stage(test) == 5; 1787 } 1788 1789 /* 1790 * Detect nested guest RIP corruption as explained in kernel commit 1791 * b6162e82aef19fee9c32cb3fe9ac30d9116a8c73 1792 * 1793 * In the assembly loop below 'ins' is executed while IO instructions 1794 * are not intercepted; the instruction is emulated by L0. 1795 * 1796 * At the same time we are getting interrupts from the local APIC timer, 1797 * and we do intercept them in L1 1798 * 1799 * If the interrupt happens on the insb instruction, L0 will VMexit, emulate 1800 * the insb instruction and then it will inject the interrupt to L1 through 1801 * a nested VMexit. Due to a bug, it would leave pre-emulation values of RIP, 1802 * RAX and RSP in the VMCB. 1803 * 1804 * In our intercept handler we detect the bug by checking that RIP is that of 1805 * the insb instruction, but its memory operand has already been written. 1806 * This means that insb was already executed. 1807 */ 1808 1809 static volatile int isr_cnt = 0; 1810 static volatile uint8_t io_port_var = 0xAA; 1811 extern const char insb_instruction_label[]; 1812 1813 static void reg_corruption_isr(isr_regs_t *regs) 1814 { 1815 isr_cnt++; 1816 apic_write(APIC_EOI, 0); 1817 } 1818 1819 static void reg_corruption_prepare(struct svm_test *test) 1820 { 1821 default_prepare(test); 1822 set_test_stage(test, 0); 1823 1824 vmcb->control.int_ctl = V_INTR_MASKING_MASK; 1825 vmcb->control.intercept |= (1ULL << INTERCEPT_INTR); 1826 1827 handle_irq(TIMER_VECTOR, reg_corruption_isr); 1828 1829 /* set local APIC to inject external interrupts */ 1830 apic_write(APIC_TMICT, 0); 1831 apic_write(APIC_TDCR, 0); 1832 apic_write(APIC_LVTT, TIMER_VECTOR | APIC_LVT_TIMER_PERIODIC); 1833 apic_write(APIC_TMICT, 1000); 1834 } 1835 1836 static void reg_corruption_test(struct svm_test *test) 1837 { 1838 /* this is endless loop, which is interrupted by the timer interrupt */ 1839 asm volatile ( 1840 "1:\n\t" 1841 "movw $0x4d0, %%dx\n\t" // IO port 1842 "lea %[io_port_var], %%rdi\n\t" 1843 "movb $0xAA, %[io_port_var]\n\t" 1844 "insb_instruction_label:\n\t" 1845 "insb\n\t" 1846 "jmp 1b\n\t" 1847 1848 : [io_port_var] "=m" (io_port_var) 1849 : /* no inputs*/ 1850 : "rdx", "rdi" 1851 ); 1852 } 1853 1854 static bool reg_corruption_finished(struct svm_test *test) 1855 { 1856 if (isr_cnt == 10000) { 1857 report(true, 1858 "No RIP corruption detected after %d timer interrupts", 1859 isr_cnt); 1860 set_test_stage(test, 1); 1861 return true; 1862 } 1863 1864 if (vmcb->control.exit_code == SVM_EXIT_INTR) { 1865 1866 void* guest_rip = (void*)vmcb->save.rip; 1867 1868 irq_enable(); 1869 asm volatile ("nop"); 1870 irq_disable(); 1871 1872 if (guest_rip == insb_instruction_label && io_port_var != 0xAA) { 1873 report(false, 1874 "RIP corruption detected after %d timer interrupts", 1875 isr_cnt); 1876 return true; 1877 } 1878 1879 } 1880 return false; 1881 } 1882 1883 static bool reg_corruption_check(struct svm_test *test) 1884 { 1885 return get_test_stage(test) == 1; 1886 } 1887 1888 #define TEST(name) { #name, .v2 = name } 1889 1890 /* 1891 * v2 tests 1892 */ 1893 1894 static void basic_guest_main(struct svm_test *test) 1895 { 1896 } 1897 1898 1899 #define SVM_TEST_REG_RESERVED_BITS(start, end, inc, str_name, reg, val, \ 1900 resv_mask) \ 1901 { \ 1902 u64 tmp, mask; \ 1903 int i; \ 1904 \ 1905 for (i = start; i <= end; i = i + inc) { \ 1906 mask = 1ull << i; \ 1907 if (!(mask & resv_mask)) \ 1908 continue; \ 1909 tmp = val | mask; \ 1910 reg = tmp; \ 1911 report(svm_vmrun() == SVM_EXIT_ERR, "Test %s %d:%d: %lx",\ 1912 str_name, end, start, tmp); \ 1913 } \ 1914 } 1915 1916 #define SVM_TEST_CR_RESERVED_BITS(start, end, inc, cr, val, resv_mask) \ 1917 { \ 1918 u64 tmp, mask; \ 1919 int i; \ 1920 \ 1921 for (i = start; i <= end; i = i + inc) { \ 1922 mask = 1ull << i; \ 1923 if (!(mask & resv_mask)) \ 1924 continue; \ 1925 tmp = val | mask; \ 1926 switch (cr) { \ 1927 case 0: \ 1928 vmcb->save.cr0 = tmp; \ 1929 break; \ 1930 case 3: \ 1931 vmcb->save.cr3 = tmp; \ 1932 break; \ 1933 case 4: \ 1934 vmcb->save.cr4 = tmp; \ 1935 } \ 1936 report(svm_vmrun() == SVM_EXIT_ERR, "Test CR%d %d:%d: %lx",\ 1937 cr, end, start, tmp); \ 1938 } \ 1939 } 1940 1941 static void test_efer(void) 1942 { 1943 /* 1944 * Un-setting EFER.SVME is illegal 1945 */ 1946 u64 efer_saved = vmcb->save.efer; 1947 u64 efer = efer_saved; 1948 1949 report (svm_vmrun() == SVM_EXIT_VMMCALL, "EFER.SVME: %lx", efer); 1950 efer &= ~EFER_SVME; 1951 vmcb->save.efer = efer; 1952 report (svm_vmrun() == SVM_EXIT_ERR, "EFER.SVME: %lx", efer); 1953 vmcb->save.efer = efer_saved; 1954 1955 /* 1956 * EFER MBZ bits: 63:16, 9 1957 */ 1958 efer_saved = vmcb->save.efer; 1959 1960 SVM_TEST_REG_RESERVED_BITS(8, 9, 1, "EFER", vmcb->save.efer, 1961 efer_saved, SVM_EFER_RESERVED_MASK); 1962 SVM_TEST_REG_RESERVED_BITS(16, 63, 4, "EFER", vmcb->save.efer, 1963 efer_saved, SVM_EFER_RESERVED_MASK); 1964 1965 vmcb->save.efer = efer_saved; 1966 } 1967 1968 static void test_cr0(void) 1969 { 1970 /* 1971 * Un-setting CR0.CD and setting CR0.NW is illegal combination 1972 */ 1973 u64 cr0_saved = vmcb->save.cr0; 1974 u64 cr0 = cr0_saved; 1975 1976 cr0 |= X86_CR0_CD; 1977 cr0 &= ~X86_CR0_NW; 1978 vmcb->save.cr0 = cr0; 1979 report (svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR0 CD=1,NW=0: %lx", 1980 cr0); 1981 cr0 |= X86_CR0_NW; 1982 vmcb->save.cr0 = cr0; 1983 report (svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR0 CD=1,NW=1: %lx", 1984 cr0); 1985 cr0 &= ~X86_CR0_NW; 1986 cr0 &= ~X86_CR0_CD; 1987 vmcb->save.cr0 = cr0; 1988 report (svm_vmrun() == SVM_EXIT_VMMCALL, "Test CR0 CD=0,NW=0: %lx", 1989 cr0); 1990 cr0 |= X86_CR0_NW; 1991 vmcb->save.cr0 = cr0; 1992 report (svm_vmrun() == SVM_EXIT_ERR, "Test CR0 CD=0,NW=1: %lx", 1993 cr0); 1994 vmcb->save.cr0 = cr0_saved; 1995 1996 /* 1997 * CR0[63:32] are not zero 1998 */ 1999 cr0 = cr0_saved; 2000 2001 SVM_TEST_REG_RESERVED_BITS(32, 63, 4, "CR0", vmcb->save.cr0, cr0_saved, 2002 SVM_CR0_RESERVED_MASK); 2003 vmcb->save.cr0 = cr0_saved; 2004 } 2005 2006 static void test_cr3(void) 2007 { 2008 /* 2009 * CR3 MBZ bits based on different modes: 2010 * [63:52] - long mode 2011 */ 2012 u64 cr3_saved = vmcb->save.cr3; 2013 2014 SVM_TEST_CR_RESERVED_BITS(0, 63, 1, 3, cr3_saved, 2015 SVM_CR3_LONG_RESERVED_MASK); 2016 2017 vmcb->save.cr3 = cr3_saved; 2018 } 2019 2020 static void test_cr4(void) 2021 { 2022 /* 2023 * CR4 MBZ bits based on different modes: 2024 * [15:12], 17, 19, [31:22] - legacy mode 2025 * [15:12], 17, 19, [63:22] - long mode 2026 */ 2027 u64 cr4_saved = vmcb->save.cr4; 2028 u64 efer_saved = vmcb->save.efer; 2029 u64 efer = efer_saved; 2030 2031 efer &= ~EFER_LME; 2032 vmcb->save.efer = efer; 2033 SVM_TEST_CR_RESERVED_BITS(12, 31, 1, 4, cr4_saved, 2034 SVM_CR4_LEGACY_RESERVED_MASK); 2035 2036 efer |= EFER_LME; 2037 vmcb->save.efer = efer; 2038 SVM_TEST_CR_RESERVED_BITS(12, 31, 1, 4, cr4_saved, 2039 SVM_CR4_RESERVED_MASK); 2040 SVM_TEST_CR_RESERVED_BITS(32, 63, 4, 4, cr4_saved, 2041 SVM_CR4_RESERVED_MASK); 2042 2043 vmcb->save.cr4 = cr4_saved; 2044 vmcb->save.efer = efer_saved; 2045 } 2046 2047 static void test_dr(void) 2048 { 2049 /* 2050 * DR6[63:32] and DR7[63:32] are MBZ 2051 */ 2052 u64 dr_saved = vmcb->save.dr6; 2053 2054 SVM_TEST_REG_RESERVED_BITS(32, 63, 4, "DR6", vmcb->save.dr6, dr_saved, 2055 SVM_DR6_RESERVED_MASK); 2056 vmcb->save.dr6 = dr_saved; 2057 2058 dr_saved = vmcb->save.dr7; 2059 SVM_TEST_REG_RESERVED_BITS(32, 63, 4, "DR7", vmcb->save.dr7, dr_saved, 2060 SVM_DR7_RESERVED_MASK); 2061 2062 vmcb->save.dr7 = dr_saved; 2063 } 2064 2065 static void svm_guest_state_test(void) 2066 { 2067 test_set_guest(basic_guest_main); 2068 2069 test_efer(); 2070 test_cr0(); 2071 test_cr3(); 2072 test_cr4(); 2073 test_dr(); 2074 } 2075 2076 struct svm_test svm_tests[] = { 2077 { "null", default_supported, default_prepare, 2078 default_prepare_gif_clear, null_test, 2079 default_finished, null_check }, 2080 { "vmrun", default_supported, default_prepare, 2081 default_prepare_gif_clear, test_vmrun, 2082 default_finished, check_vmrun }, 2083 { "ioio", default_supported, prepare_ioio, 2084 default_prepare_gif_clear, test_ioio, 2085 ioio_finished, check_ioio }, 2086 { "vmrun intercept check", default_supported, prepare_no_vmrun_int, 2087 default_prepare_gif_clear, null_test, default_finished, 2088 check_no_vmrun_int }, 2089 { "rsm", default_supported, 2090 prepare_rsm_intercept, default_prepare_gif_clear, 2091 test_rsm_intercept, finished_rsm_intercept, check_rsm_intercept }, 2092 { "cr3 read intercept", default_supported, 2093 prepare_cr3_intercept, default_prepare_gif_clear, 2094 test_cr3_intercept, default_finished, check_cr3_intercept }, 2095 { "cr3 read nointercept", default_supported, default_prepare, 2096 default_prepare_gif_clear, test_cr3_intercept, default_finished, 2097 check_cr3_nointercept }, 2098 { "cr3 read intercept emulate", smp_supported, 2099 prepare_cr3_intercept_bypass, default_prepare_gif_clear, 2100 test_cr3_intercept_bypass, default_finished, check_cr3_intercept }, 2101 { "dr intercept check", default_supported, prepare_dr_intercept, 2102 default_prepare_gif_clear, test_dr_intercept, dr_intercept_finished, 2103 check_dr_intercept }, 2104 { "next_rip", next_rip_supported, prepare_next_rip, 2105 default_prepare_gif_clear, test_next_rip, 2106 default_finished, check_next_rip }, 2107 { "msr intercept check", default_supported, prepare_msr_intercept, 2108 default_prepare_gif_clear, test_msr_intercept, 2109 msr_intercept_finished, check_msr_intercept }, 2110 { "mode_switch", default_supported, prepare_mode_switch, 2111 default_prepare_gif_clear, test_mode_switch, 2112 mode_switch_finished, check_mode_switch }, 2113 { "asid_zero", default_supported, prepare_asid_zero, 2114 default_prepare_gif_clear, test_asid_zero, 2115 default_finished, check_asid_zero }, 2116 { "sel_cr0_bug", default_supported, sel_cr0_bug_prepare, 2117 default_prepare_gif_clear, sel_cr0_bug_test, 2118 sel_cr0_bug_finished, sel_cr0_bug_check }, 2119 { "npt_nx", npt_supported, npt_nx_prepare, 2120 default_prepare_gif_clear, null_test, 2121 default_finished, npt_nx_check }, 2122 { "npt_us", npt_supported, npt_us_prepare, 2123 default_prepare_gif_clear, npt_us_test, 2124 default_finished, npt_us_check }, 2125 { "npt_rsvd", npt_supported, npt_rsvd_prepare, 2126 default_prepare_gif_clear, null_test, 2127 default_finished, npt_rsvd_check }, 2128 { "npt_rw", npt_supported, npt_rw_prepare, 2129 default_prepare_gif_clear, npt_rw_test, 2130 default_finished, npt_rw_check }, 2131 { "npt_rsvd_pfwalk", npt_supported, npt_rsvd_pfwalk_prepare, 2132 default_prepare_gif_clear, null_test, 2133 default_finished, npt_rsvd_pfwalk_check }, 2134 { "npt_rw_pfwalk", npt_supported, npt_rw_pfwalk_prepare, 2135 default_prepare_gif_clear, null_test, 2136 default_finished, npt_rw_pfwalk_check }, 2137 { "npt_l1mmio", npt_supported, npt_l1mmio_prepare, 2138 default_prepare_gif_clear, npt_l1mmio_test, 2139 default_finished, npt_l1mmio_check }, 2140 { "npt_rw_l1mmio", npt_supported, npt_rw_l1mmio_prepare, 2141 default_prepare_gif_clear, npt_rw_l1mmio_test, 2142 default_finished, npt_rw_l1mmio_check }, 2143 { "tsc_adjust", tsc_adjust_supported, tsc_adjust_prepare, 2144 default_prepare_gif_clear, tsc_adjust_test, 2145 default_finished, tsc_adjust_check }, 2146 { "latency_run_exit", default_supported, latency_prepare, 2147 default_prepare_gif_clear, latency_test, 2148 latency_finished, latency_check }, 2149 { "latency_svm_insn", default_supported, lat_svm_insn_prepare, 2150 default_prepare_gif_clear, null_test, 2151 lat_svm_insn_finished, lat_svm_insn_check }, 2152 { "exc_inject", default_supported, exc_inject_prepare, 2153 default_prepare_gif_clear, exc_inject_test, 2154 exc_inject_finished, exc_inject_check }, 2155 { "pending_event", default_supported, pending_event_prepare, 2156 default_prepare_gif_clear, 2157 pending_event_test, pending_event_finished, pending_event_check }, 2158 { "pending_event_cli", default_supported, pending_event_cli_prepare, 2159 pending_event_cli_prepare_gif_clear, 2160 pending_event_cli_test, pending_event_cli_finished, 2161 pending_event_cli_check }, 2162 { "interrupt", default_supported, interrupt_prepare, 2163 default_prepare_gif_clear, interrupt_test, 2164 interrupt_finished, interrupt_check }, 2165 { "nmi", default_supported, nmi_prepare, 2166 default_prepare_gif_clear, nmi_test, 2167 nmi_finished, nmi_check }, 2168 { "nmi_hlt", smp_supported, nmi_prepare, 2169 default_prepare_gif_clear, nmi_hlt_test, 2170 nmi_hlt_finished, nmi_hlt_check }, 2171 { "virq_inject", default_supported, virq_inject_prepare, 2172 default_prepare_gif_clear, virq_inject_test, 2173 virq_inject_finished, virq_inject_check }, 2174 { "reg_corruption", default_supported, reg_corruption_prepare, 2175 default_prepare_gif_clear, reg_corruption_test, 2176 reg_corruption_finished, reg_corruption_check }, 2177 TEST(svm_guest_state_test), 2178 { NULL, NULL, NULL, NULL, NULL, NULL, NULL } 2179 }; 2180