1 #include "svm.h" 2 #include "libcflat.h" 3 #include "processor.h" 4 #include "desc.h" 5 #include "msr.h" 6 #include "vm.h" 7 #include "smp.h" 8 #include "types.h" 9 #include "alloc_page.h" 10 #include "isr.h" 11 #include "apic.h" 12 #include "delay.h" 13 14 #define SVM_EXIT_MAX_DR_INTERCEPT 0x3f 15 16 static void *scratch_page; 17 18 #define LATENCY_RUNS 1000000 19 20 u64 tsc_start; 21 u64 tsc_end; 22 23 u64 vmrun_sum, vmexit_sum; 24 u64 vmsave_sum, vmload_sum; 25 u64 stgi_sum, clgi_sum; 26 u64 latvmrun_max; 27 u64 latvmrun_min; 28 u64 latvmexit_max; 29 u64 latvmexit_min; 30 u64 latvmload_max; 31 u64 latvmload_min; 32 u64 latvmsave_max; 33 u64 latvmsave_min; 34 u64 latstgi_max; 35 u64 latstgi_min; 36 u64 latclgi_max; 37 u64 latclgi_min; 38 u64 runs; 39 40 static void null_test(struct svm_test *test) 41 { 42 } 43 44 static bool null_check(struct svm_test *test) 45 { 46 return vmcb->control.exit_code == SVM_EXIT_VMMCALL; 47 } 48 49 static void prepare_no_vmrun_int(struct svm_test *test) 50 { 51 vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMRUN); 52 } 53 54 static bool check_no_vmrun_int(struct svm_test *test) 55 { 56 return vmcb->control.exit_code == SVM_EXIT_ERR; 57 } 58 59 static void test_vmrun(struct svm_test *test) 60 { 61 asm volatile ("vmrun %0" : : "a"(virt_to_phys(vmcb))); 62 } 63 64 static bool check_vmrun(struct svm_test *test) 65 { 66 return vmcb->control.exit_code == SVM_EXIT_VMRUN; 67 } 68 69 static void prepare_rsm_intercept(struct svm_test *test) 70 { 71 default_prepare(test); 72 vmcb->control.intercept |= 1 << INTERCEPT_RSM; 73 vmcb->control.intercept_exceptions |= (1ULL << UD_VECTOR); 74 } 75 76 static void test_rsm_intercept(struct svm_test *test) 77 { 78 asm volatile ("rsm" : : : "memory"); 79 } 80 81 static bool check_rsm_intercept(struct svm_test *test) 82 { 83 return get_test_stage(test) == 2; 84 } 85 86 static bool finished_rsm_intercept(struct svm_test *test) 87 { 88 switch (get_test_stage(test)) { 89 case 0: 90 if (vmcb->control.exit_code != SVM_EXIT_RSM) { 91 report(false, "VMEXIT not due to rsm. Exit reason 0x%x", 92 vmcb->control.exit_code); 93 return true; 94 } 95 vmcb->control.intercept &= ~(1 << INTERCEPT_RSM); 96 inc_test_stage(test); 97 break; 98 99 case 1: 100 if (vmcb->control.exit_code != SVM_EXIT_EXCP_BASE + UD_VECTOR) { 101 report(false, "VMEXIT not due to #UD. Exit reason 0x%x", 102 vmcb->control.exit_code); 103 return true; 104 } 105 vmcb->save.rip += 2; 106 inc_test_stage(test); 107 break; 108 109 default: 110 return true; 111 } 112 return get_test_stage(test) == 2; 113 } 114 115 static void prepare_cr3_intercept(struct svm_test *test) 116 { 117 default_prepare(test); 118 vmcb->control.intercept_cr_read |= 1 << 3; 119 } 120 121 static void test_cr3_intercept(struct svm_test *test) 122 { 123 asm volatile ("mov %%cr3, %0" : "=r"(test->scratch) : : "memory"); 124 } 125 126 static bool check_cr3_intercept(struct svm_test *test) 127 { 128 return vmcb->control.exit_code == SVM_EXIT_READ_CR3; 129 } 130 131 static bool check_cr3_nointercept(struct svm_test *test) 132 { 133 return null_check(test) && test->scratch == read_cr3(); 134 } 135 136 static void corrupt_cr3_intercept_bypass(void *_test) 137 { 138 struct svm_test *test = _test; 139 extern volatile u32 mmio_insn; 140 141 while (!__sync_bool_compare_and_swap(&test->scratch, 1, 2)) 142 pause(); 143 pause(); 144 pause(); 145 pause(); 146 mmio_insn = 0x90d8200f; // mov %cr3, %rax; nop 147 } 148 149 static void prepare_cr3_intercept_bypass(struct svm_test *test) 150 { 151 default_prepare(test); 152 vmcb->control.intercept_cr_read |= 1 << 3; 153 on_cpu_async(1, corrupt_cr3_intercept_bypass, test); 154 } 155 156 static void test_cr3_intercept_bypass(struct svm_test *test) 157 { 158 ulong a = 0xa0000; 159 160 test->scratch = 1; 161 while (test->scratch != 2) 162 barrier(); 163 164 asm volatile ("mmio_insn: mov %0, (%0); nop" 165 : "+a"(a) : : "memory"); 166 test->scratch = a; 167 } 168 169 static void prepare_dr_intercept(struct svm_test *test) 170 { 171 default_prepare(test); 172 vmcb->control.intercept_dr_read = 0xff; 173 vmcb->control.intercept_dr_write = 0xff; 174 } 175 176 static void test_dr_intercept(struct svm_test *test) 177 { 178 unsigned int i, failcnt = 0; 179 180 /* Loop testing debug register reads */ 181 for (i = 0; i < 8; i++) { 182 183 switch (i) { 184 case 0: 185 asm volatile ("mov %%dr0, %0" : "=r"(test->scratch) : : "memory"); 186 break; 187 case 1: 188 asm volatile ("mov %%dr1, %0" : "=r"(test->scratch) : : "memory"); 189 break; 190 case 2: 191 asm volatile ("mov %%dr2, %0" : "=r"(test->scratch) : : "memory"); 192 break; 193 case 3: 194 asm volatile ("mov %%dr3, %0" : "=r"(test->scratch) : : "memory"); 195 break; 196 case 4: 197 asm volatile ("mov %%dr4, %0" : "=r"(test->scratch) : : "memory"); 198 break; 199 case 5: 200 asm volatile ("mov %%dr5, %0" : "=r"(test->scratch) : : "memory"); 201 break; 202 case 6: 203 asm volatile ("mov %%dr6, %0" : "=r"(test->scratch) : : "memory"); 204 break; 205 case 7: 206 asm volatile ("mov %%dr7, %0" : "=r"(test->scratch) : : "memory"); 207 break; 208 } 209 210 if (test->scratch != i) { 211 report(false, "dr%u read intercept", i); 212 failcnt++; 213 } 214 } 215 216 /* Loop testing debug register writes */ 217 for (i = 0; i < 8; i++) { 218 219 switch (i) { 220 case 0: 221 asm volatile ("mov %0, %%dr0" : : "r"(test->scratch) : "memory"); 222 break; 223 case 1: 224 asm volatile ("mov %0, %%dr1" : : "r"(test->scratch) : "memory"); 225 break; 226 case 2: 227 asm volatile ("mov %0, %%dr2" : : "r"(test->scratch) : "memory"); 228 break; 229 case 3: 230 asm volatile ("mov %0, %%dr3" : : "r"(test->scratch) : "memory"); 231 break; 232 case 4: 233 asm volatile ("mov %0, %%dr4" : : "r"(test->scratch) : "memory"); 234 break; 235 case 5: 236 asm volatile ("mov %0, %%dr5" : : "r"(test->scratch) : "memory"); 237 break; 238 case 6: 239 asm volatile ("mov %0, %%dr6" : : "r"(test->scratch) : "memory"); 240 break; 241 case 7: 242 asm volatile ("mov %0, %%dr7" : : "r"(test->scratch) : "memory"); 243 break; 244 } 245 246 if (test->scratch != i) { 247 report(false, "dr%u write intercept", i); 248 failcnt++; 249 } 250 } 251 252 test->scratch = failcnt; 253 } 254 255 static bool dr_intercept_finished(struct svm_test *test) 256 { 257 ulong n = (vmcb->control.exit_code - SVM_EXIT_READ_DR0); 258 259 /* Only expect DR intercepts */ 260 if (n > (SVM_EXIT_MAX_DR_INTERCEPT - SVM_EXIT_READ_DR0)) 261 return true; 262 263 /* 264 * Compute debug register number. 265 * Per Appendix C "SVM Intercept Exit Codes" of AMD64 Architecture 266 * Programmer's Manual Volume 2 - System Programming: 267 * http://support.amd.com/TechDocs/24593.pdf 268 * there are 16 VMEXIT codes each for DR read and write. 269 */ 270 test->scratch = (n % 16); 271 272 /* Jump over MOV instruction */ 273 vmcb->save.rip += 3; 274 275 return false; 276 } 277 278 static bool check_dr_intercept(struct svm_test *test) 279 { 280 return !test->scratch; 281 } 282 283 static bool next_rip_supported(void) 284 { 285 return this_cpu_has(X86_FEATURE_NRIPS); 286 } 287 288 static void prepare_next_rip(struct svm_test *test) 289 { 290 vmcb->control.intercept |= (1ULL << INTERCEPT_RDTSC); 291 } 292 293 294 static void test_next_rip(struct svm_test *test) 295 { 296 asm volatile ("rdtsc\n\t" 297 ".globl exp_next_rip\n\t" 298 "exp_next_rip:\n\t" ::: "eax", "edx"); 299 } 300 301 static bool check_next_rip(struct svm_test *test) 302 { 303 extern char exp_next_rip; 304 unsigned long address = (unsigned long)&exp_next_rip; 305 306 return address == vmcb->control.next_rip; 307 } 308 309 extern u8 *msr_bitmap; 310 311 static void prepare_msr_intercept(struct svm_test *test) 312 { 313 default_prepare(test); 314 vmcb->control.intercept |= (1ULL << INTERCEPT_MSR_PROT); 315 vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR); 316 memset(msr_bitmap, 0xff, MSR_BITMAP_SIZE); 317 } 318 319 static void test_msr_intercept(struct svm_test *test) 320 { 321 unsigned long msr_value = 0xef8056791234abcd; /* Arbitrary value */ 322 unsigned long msr_index; 323 324 for (msr_index = 0; msr_index <= 0xc0011fff; msr_index++) { 325 if (msr_index == 0xC0010131 /* MSR_SEV_STATUS */) { 326 /* 327 * Per section 15.34.10 "SEV_STATUS MSR" of AMD64 Architecture 328 * Programmer's Manual volume 2 - System Programming: 329 * http://support.amd.com/TechDocs/24593.pdf 330 * SEV_STATUS MSR (C001_0131) is a non-interceptable MSR. 331 */ 332 continue; 333 } 334 335 /* Skips gaps between supported MSR ranges */ 336 if (msr_index == 0x2000) 337 msr_index = 0xc0000000; 338 else if (msr_index == 0xc0002000) 339 msr_index = 0xc0010000; 340 341 test->scratch = -1; 342 343 rdmsr(msr_index); 344 345 /* Check that a read intercept occurred for MSR at msr_index */ 346 if (test->scratch != msr_index) 347 report(false, "MSR 0x%lx read intercept", msr_index); 348 349 /* 350 * Poor man approach to generate a value that 351 * seems arbitrary each time around the loop. 352 */ 353 msr_value += (msr_value << 1); 354 355 wrmsr(msr_index, msr_value); 356 357 /* Check that a write intercept occurred for MSR with msr_value */ 358 if (test->scratch != msr_value) 359 report(false, "MSR 0x%lx write intercept", msr_index); 360 } 361 362 test->scratch = -2; 363 } 364 365 static bool msr_intercept_finished(struct svm_test *test) 366 { 367 u32 exit_code = vmcb->control.exit_code; 368 u64 exit_info_1; 369 u8 *opcode; 370 371 if (exit_code == SVM_EXIT_MSR) { 372 exit_info_1 = vmcb->control.exit_info_1; 373 } else { 374 /* 375 * If #GP exception occurs instead, check that it was 376 * for RDMSR/WRMSR and set exit_info_1 accordingly. 377 */ 378 379 if (exit_code != (SVM_EXIT_EXCP_BASE + GP_VECTOR)) 380 return true; 381 382 opcode = (u8 *)vmcb->save.rip; 383 if (opcode[0] != 0x0f) 384 return true; 385 386 switch (opcode[1]) { 387 case 0x30: /* WRMSR */ 388 exit_info_1 = 1; 389 break; 390 case 0x32: /* RDMSR */ 391 exit_info_1 = 0; 392 break; 393 default: 394 return true; 395 } 396 397 /* 398 * Warn that #GP exception occured instead. 399 * RCX holds the MSR index. 400 */ 401 printf("%s 0x%lx #GP exception\n", 402 exit_info_1 ? "WRMSR" : "RDMSR", get_regs().rcx); 403 } 404 405 /* Jump over RDMSR/WRMSR instruction */ 406 vmcb->save.rip += 2; 407 408 /* 409 * Test whether the intercept was for RDMSR/WRMSR. 410 * For RDMSR, test->scratch is set to the MSR index; 411 * RCX holds the MSR index. 412 * For WRMSR, test->scratch is set to the MSR value; 413 * RDX holds the upper 32 bits of the MSR value, 414 * while RAX hold its lower 32 bits. 415 */ 416 if (exit_info_1) 417 test->scratch = 418 ((get_regs().rdx << 32) | (vmcb->save.rax & 0xffffffff)); 419 else 420 test->scratch = get_regs().rcx; 421 422 return false; 423 } 424 425 static bool check_msr_intercept(struct svm_test *test) 426 { 427 memset(msr_bitmap, 0, MSR_BITMAP_SIZE); 428 return (test->scratch == -2); 429 } 430 431 static void prepare_mode_switch(struct svm_test *test) 432 { 433 vmcb->control.intercept_exceptions |= (1ULL << GP_VECTOR) 434 | (1ULL << UD_VECTOR) 435 | (1ULL << DF_VECTOR) 436 | (1ULL << PF_VECTOR); 437 test->scratch = 0; 438 } 439 440 static void test_mode_switch(struct svm_test *test) 441 { 442 asm volatile(" cli\n" 443 " ljmp *1f\n" /* jump to 32-bit code segment */ 444 "1:\n" 445 " .long 2f\n" 446 " .long " xstr(KERNEL_CS32) "\n" 447 ".code32\n" 448 "2:\n" 449 " movl %%cr0, %%eax\n" 450 " btcl $31, %%eax\n" /* clear PG */ 451 " movl %%eax, %%cr0\n" 452 " movl $0xc0000080, %%ecx\n" /* EFER */ 453 " rdmsr\n" 454 " btcl $8, %%eax\n" /* clear LME */ 455 " wrmsr\n" 456 " movl %%cr4, %%eax\n" 457 " btcl $5, %%eax\n" /* clear PAE */ 458 " movl %%eax, %%cr4\n" 459 " movw %[ds16], %%ax\n" 460 " movw %%ax, %%ds\n" 461 " ljmpl %[cs16], $3f\n" /* jump to 16 bit protected-mode */ 462 ".code16\n" 463 "3:\n" 464 " movl %%cr0, %%eax\n" 465 " btcl $0, %%eax\n" /* clear PE */ 466 " movl %%eax, %%cr0\n" 467 " ljmpl $0, $4f\n" /* jump to real-mode */ 468 "4:\n" 469 " vmmcall\n" 470 " movl %%cr0, %%eax\n" 471 " btsl $0, %%eax\n" /* set PE */ 472 " movl %%eax, %%cr0\n" 473 " ljmpl %[cs32], $5f\n" /* back to protected mode */ 474 ".code32\n" 475 "5:\n" 476 " movl %%cr4, %%eax\n" 477 " btsl $5, %%eax\n" /* set PAE */ 478 " movl %%eax, %%cr4\n" 479 " movl $0xc0000080, %%ecx\n" /* EFER */ 480 " rdmsr\n" 481 " btsl $8, %%eax\n" /* set LME */ 482 " wrmsr\n" 483 " movl %%cr0, %%eax\n" 484 " btsl $31, %%eax\n" /* set PG */ 485 " movl %%eax, %%cr0\n" 486 " ljmpl %[cs64], $6f\n" /* back to long mode */ 487 ".code64\n\t" 488 "6:\n" 489 " vmmcall\n" 490 :: [cs16] "i"(KERNEL_CS16), [ds16] "i"(KERNEL_DS16), 491 [cs32] "i"(KERNEL_CS32), [cs64] "i"(KERNEL_CS64) 492 : "rax", "rbx", "rcx", "rdx", "memory"); 493 } 494 495 static bool mode_switch_finished(struct svm_test *test) 496 { 497 u64 cr0, cr4, efer; 498 499 cr0 = vmcb->save.cr0; 500 cr4 = vmcb->save.cr4; 501 efer = vmcb->save.efer; 502 503 /* Only expect VMMCALL intercepts */ 504 if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) 505 return true; 506 507 /* Jump over VMMCALL instruction */ 508 vmcb->save.rip += 3; 509 510 /* Do sanity checks */ 511 switch (test->scratch) { 512 case 0: 513 /* Test should be in real mode now - check for this */ 514 if ((cr0 & 0x80000001) || /* CR0.PG, CR0.PE */ 515 (cr4 & 0x00000020) || /* CR4.PAE */ 516 (efer & 0x00000500)) /* EFER.LMA, EFER.LME */ 517 return true; 518 break; 519 case 2: 520 /* Test should be back in long-mode now - check for this */ 521 if (((cr0 & 0x80000001) != 0x80000001) || /* CR0.PG, CR0.PE */ 522 ((cr4 & 0x00000020) != 0x00000020) || /* CR4.PAE */ 523 ((efer & 0x00000500) != 0x00000500)) /* EFER.LMA, EFER.LME */ 524 return true; 525 break; 526 } 527 528 /* one step forward */ 529 test->scratch += 1; 530 531 return test->scratch == 2; 532 } 533 534 static bool check_mode_switch(struct svm_test *test) 535 { 536 return test->scratch == 2; 537 } 538 539 extern u8 *io_bitmap; 540 541 static void prepare_ioio(struct svm_test *test) 542 { 543 vmcb->control.intercept |= (1ULL << INTERCEPT_IOIO_PROT); 544 test->scratch = 0; 545 memset(io_bitmap, 0, 8192); 546 io_bitmap[8192] = 0xFF; 547 } 548 549 static void test_ioio(struct svm_test *test) 550 { 551 // stage 0, test IO pass 552 inb(0x5000); 553 outb(0x0, 0x5000); 554 if (get_test_stage(test) != 0) 555 goto fail; 556 557 // test IO width, in/out 558 io_bitmap[0] = 0xFF; 559 inc_test_stage(test); 560 inb(0x0); 561 if (get_test_stage(test) != 2) 562 goto fail; 563 564 outw(0x0, 0x0); 565 if (get_test_stage(test) != 3) 566 goto fail; 567 568 inl(0x0); 569 if (get_test_stage(test) != 4) 570 goto fail; 571 572 // test low/high IO port 573 io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8)); 574 inb(0x5000); 575 if (get_test_stage(test) != 5) 576 goto fail; 577 578 io_bitmap[0x9000 / 8] = (1 << (0x9000 % 8)); 579 inw(0x9000); 580 if (get_test_stage(test) != 6) 581 goto fail; 582 583 // test partial pass 584 io_bitmap[0x5000 / 8] = (1 << (0x5000 % 8)); 585 inl(0x4FFF); 586 if (get_test_stage(test) != 7) 587 goto fail; 588 589 // test across pages 590 inc_test_stage(test); 591 inl(0x7FFF); 592 if (get_test_stage(test) != 8) 593 goto fail; 594 595 inc_test_stage(test); 596 io_bitmap[0x8000 / 8] = 1 << (0x8000 % 8); 597 inl(0x7FFF); 598 if (get_test_stage(test) != 10) 599 goto fail; 600 601 io_bitmap[0] = 0; 602 inl(0xFFFF); 603 if (get_test_stage(test) != 11) 604 goto fail; 605 606 io_bitmap[0] = 0xFF; 607 io_bitmap[8192] = 0; 608 inl(0xFFFF); 609 inc_test_stage(test); 610 if (get_test_stage(test) != 12) 611 goto fail; 612 613 return; 614 615 fail: 616 report(false, "stage %d", get_test_stage(test)); 617 test->scratch = -1; 618 } 619 620 static bool ioio_finished(struct svm_test *test) 621 { 622 unsigned port, size; 623 624 /* Only expect IOIO intercepts */ 625 if (vmcb->control.exit_code == SVM_EXIT_VMMCALL) 626 return true; 627 628 if (vmcb->control.exit_code != SVM_EXIT_IOIO) 629 return true; 630 631 /* one step forward */ 632 test->scratch += 1; 633 634 port = vmcb->control.exit_info_1 >> 16; 635 size = (vmcb->control.exit_info_1 >> SVM_IOIO_SIZE_SHIFT) & 7; 636 637 while (size--) { 638 io_bitmap[port / 8] &= ~(1 << (port & 7)); 639 port++; 640 } 641 642 return false; 643 } 644 645 static bool check_ioio(struct svm_test *test) 646 { 647 memset(io_bitmap, 0, 8193); 648 return test->scratch != -1; 649 } 650 651 static void prepare_asid_zero(struct svm_test *test) 652 { 653 vmcb->control.asid = 0; 654 } 655 656 static void test_asid_zero(struct svm_test *test) 657 { 658 asm volatile ("vmmcall\n\t"); 659 } 660 661 static bool check_asid_zero(struct svm_test *test) 662 { 663 return vmcb->control.exit_code == SVM_EXIT_ERR; 664 } 665 666 static void sel_cr0_bug_prepare(struct svm_test *test) 667 { 668 vmcb_ident(vmcb); 669 vmcb->control.intercept |= (1ULL << INTERCEPT_SELECTIVE_CR0); 670 } 671 672 static bool sel_cr0_bug_finished(struct svm_test *test) 673 { 674 return true; 675 } 676 677 static void sel_cr0_bug_test(struct svm_test *test) 678 { 679 unsigned long cr0; 680 681 /* read cr0, clear CD, and write back */ 682 cr0 = read_cr0(); 683 cr0 |= (1UL << 30); 684 write_cr0(cr0); 685 686 /* 687 * If we are here the test failed, not sure what to do now because we 688 * are not in guest-mode anymore so we can't trigger an intercept. 689 * Trigger a tripple-fault for now. 690 */ 691 report(false, "sel_cr0 test. Can not recover from this - exiting"); 692 exit(report_summary()); 693 } 694 695 static bool sel_cr0_bug_check(struct svm_test *test) 696 { 697 return vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE; 698 } 699 700 static void npt_nx_prepare(struct svm_test *test) 701 { 702 703 u64 *pte; 704 705 vmcb_ident(vmcb); 706 pte = npt_get_pte((u64)null_test); 707 708 *pte |= (1ULL << 63); 709 } 710 711 static bool npt_nx_check(struct svm_test *test) 712 { 713 u64 *pte = npt_get_pte((u64)null_test); 714 715 *pte &= ~(1ULL << 63); 716 717 vmcb->save.efer |= (1 << 11); 718 719 return (vmcb->control.exit_code == SVM_EXIT_NPF) 720 && (vmcb->control.exit_info_1 == 0x100000015ULL); 721 } 722 723 static void npt_us_prepare(struct svm_test *test) 724 { 725 u64 *pte; 726 727 scratch_page = alloc_page(); 728 vmcb_ident(vmcb); 729 pte = npt_get_pte((u64)scratch_page); 730 731 *pte &= ~(1ULL << 2); 732 } 733 734 static void npt_us_test(struct svm_test *test) 735 { 736 (void) *(volatile u64 *)scratch_page; 737 } 738 739 static bool npt_us_check(struct svm_test *test) 740 { 741 u64 *pte = npt_get_pte((u64)scratch_page); 742 743 *pte |= (1ULL << 2); 744 745 return (vmcb->control.exit_code == SVM_EXIT_NPF) 746 && (vmcb->control.exit_info_1 == 0x100000005ULL); 747 } 748 749 u64 save_pde; 750 751 static void npt_rsvd_prepare(struct svm_test *test) 752 { 753 u64 *pde; 754 755 vmcb_ident(vmcb); 756 pde = npt_get_pde((u64) null_test); 757 758 save_pde = *pde; 759 *pde = (1ULL << 19) | (1ULL << 7) | 0x27; 760 } 761 762 static bool npt_rsvd_check(struct svm_test *test) 763 { 764 u64 *pde = npt_get_pde((u64) null_test); 765 766 *pde = save_pde; 767 768 return (vmcb->control.exit_code == SVM_EXIT_NPF) 769 && (vmcb->control.exit_info_1 == 0x10000001dULL); 770 } 771 772 static void npt_rw_prepare(struct svm_test *test) 773 { 774 775 u64 *pte; 776 777 vmcb_ident(vmcb); 778 pte = npt_get_pte(0x80000); 779 780 *pte &= ~(1ULL << 1); 781 } 782 783 static void npt_rw_test(struct svm_test *test) 784 { 785 u64 *data = (void*)(0x80000); 786 787 *data = 0; 788 } 789 790 static bool npt_rw_check(struct svm_test *test) 791 { 792 u64 *pte = npt_get_pte(0x80000); 793 794 *pte |= (1ULL << 1); 795 796 return (vmcb->control.exit_code == SVM_EXIT_NPF) 797 && (vmcb->control.exit_info_1 == 0x100000007ULL); 798 } 799 800 static void npt_rw_pfwalk_prepare(struct svm_test *test) 801 { 802 803 u64 *pte; 804 805 vmcb_ident(vmcb); 806 pte = npt_get_pte(read_cr3()); 807 808 *pte &= ~(1ULL << 1); 809 } 810 811 static bool npt_rw_pfwalk_check(struct svm_test *test) 812 { 813 u64 *pte = npt_get_pte(read_cr3()); 814 815 *pte |= (1ULL << 1); 816 817 return (vmcb->control.exit_code == SVM_EXIT_NPF) 818 && (vmcb->control.exit_info_1 == 0x200000006ULL) 819 && (vmcb->control.exit_info_2 == read_cr3()); 820 } 821 822 static void npt_rsvd_pfwalk_prepare(struct svm_test *test) 823 { 824 u64 *pdpe; 825 vmcb_ident(vmcb); 826 827 pdpe = npt_get_pdpe(); 828 pdpe[0] |= (1ULL << 8); 829 } 830 831 static bool npt_rsvd_pfwalk_check(struct svm_test *test) 832 { 833 u64 *pdpe = npt_get_pdpe(); 834 pdpe[0] &= ~(1ULL << 8); 835 836 return (vmcb->control.exit_code == SVM_EXIT_NPF) 837 && (vmcb->control.exit_info_1 == 0x20000000eULL); 838 } 839 840 static void npt_l1mmio_prepare(struct svm_test *test) 841 { 842 vmcb_ident(vmcb); 843 } 844 845 u32 nested_apic_version1; 846 u32 nested_apic_version2; 847 848 static void npt_l1mmio_test(struct svm_test *test) 849 { 850 volatile u32 *data = (volatile void*)(0xfee00030UL); 851 852 nested_apic_version1 = *data; 853 nested_apic_version2 = *data; 854 } 855 856 static bool npt_l1mmio_check(struct svm_test *test) 857 { 858 volatile u32 *data = (volatile void*)(0xfee00030); 859 u32 lvr = *data; 860 861 return nested_apic_version1 == lvr && nested_apic_version2 == lvr; 862 } 863 864 static void npt_rw_l1mmio_prepare(struct svm_test *test) 865 { 866 867 u64 *pte; 868 869 vmcb_ident(vmcb); 870 pte = npt_get_pte(0xfee00080); 871 872 *pte &= ~(1ULL << 1); 873 } 874 875 static void npt_rw_l1mmio_test(struct svm_test *test) 876 { 877 volatile u32 *data = (volatile void*)(0xfee00080); 878 879 *data = *data; 880 } 881 882 static bool npt_rw_l1mmio_check(struct svm_test *test) 883 { 884 u64 *pte = npt_get_pte(0xfee00080); 885 886 *pte |= (1ULL << 1); 887 888 return (vmcb->control.exit_code == SVM_EXIT_NPF) 889 && (vmcb->control.exit_info_1 == 0x100000007ULL); 890 } 891 892 #define TSC_ADJUST_VALUE (1ll << 32) 893 #define TSC_OFFSET_VALUE (-1ll << 48) 894 static bool ok; 895 896 static void tsc_adjust_prepare(struct svm_test *test) 897 { 898 default_prepare(test); 899 vmcb->control.tsc_offset = TSC_OFFSET_VALUE; 900 901 wrmsr(MSR_IA32_TSC_ADJUST, -TSC_ADJUST_VALUE); 902 int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST); 903 ok = adjust == -TSC_ADJUST_VALUE; 904 } 905 906 static void tsc_adjust_test(struct svm_test *test) 907 { 908 int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST); 909 ok &= adjust == -TSC_ADJUST_VALUE; 910 911 uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE; 912 wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE); 913 914 adjust = rdmsr(MSR_IA32_TSC_ADJUST); 915 ok &= adjust <= -2 * TSC_ADJUST_VALUE; 916 917 uint64_t l1_tsc_end = rdtsc() - TSC_OFFSET_VALUE; 918 ok &= (l1_tsc_end + TSC_ADJUST_VALUE - l1_tsc) < TSC_ADJUST_VALUE; 919 920 uint64_t l1_tsc_msr = rdmsr(MSR_IA32_TSC) - TSC_OFFSET_VALUE; 921 ok &= (l1_tsc_msr + TSC_ADJUST_VALUE - l1_tsc) < TSC_ADJUST_VALUE; 922 } 923 924 static bool tsc_adjust_check(struct svm_test *test) 925 { 926 int64_t adjust = rdmsr(MSR_IA32_TSC_ADJUST); 927 928 wrmsr(MSR_IA32_TSC_ADJUST, 0); 929 return ok && adjust <= -2 * TSC_ADJUST_VALUE; 930 } 931 932 static void latency_prepare(struct svm_test *test) 933 { 934 default_prepare(test); 935 runs = LATENCY_RUNS; 936 latvmrun_min = latvmexit_min = -1ULL; 937 latvmrun_max = latvmexit_max = 0; 938 vmrun_sum = vmexit_sum = 0; 939 tsc_start = rdtsc(); 940 } 941 942 static void latency_test(struct svm_test *test) 943 { 944 u64 cycles; 945 946 start: 947 tsc_end = rdtsc(); 948 949 cycles = tsc_end - tsc_start; 950 951 if (cycles > latvmrun_max) 952 latvmrun_max = cycles; 953 954 if (cycles < latvmrun_min) 955 latvmrun_min = cycles; 956 957 vmrun_sum += cycles; 958 959 tsc_start = rdtsc(); 960 961 asm volatile ("vmmcall" : : : "memory"); 962 goto start; 963 } 964 965 static bool latency_finished(struct svm_test *test) 966 { 967 u64 cycles; 968 969 tsc_end = rdtsc(); 970 971 cycles = tsc_end - tsc_start; 972 973 if (cycles > latvmexit_max) 974 latvmexit_max = cycles; 975 976 if (cycles < latvmexit_min) 977 latvmexit_min = cycles; 978 979 vmexit_sum += cycles; 980 981 vmcb->save.rip += 3; 982 983 runs -= 1; 984 985 tsc_end = rdtsc(); 986 987 return runs == 0; 988 } 989 990 static bool latency_check(struct svm_test *test) 991 { 992 printf(" Latency VMRUN : max: %ld min: %ld avg: %ld\n", latvmrun_max, 993 latvmrun_min, vmrun_sum / LATENCY_RUNS); 994 printf(" Latency VMEXIT: max: %ld min: %ld avg: %ld\n", latvmexit_max, 995 latvmexit_min, vmexit_sum / LATENCY_RUNS); 996 return true; 997 } 998 999 static void lat_svm_insn_prepare(struct svm_test *test) 1000 { 1001 default_prepare(test); 1002 runs = LATENCY_RUNS; 1003 latvmload_min = latvmsave_min = latstgi_min = latclgi_min = -1ULL; 1004 latvmload_max = latvmsave_max = latstgi_max = latclgi_max = 0; 1005 vmload_sum = vmsave_sum = stgi_sum = clgi_sum; 1006 } 1007 1008 static bool lat_svm_insn_finished(struct svm_test *test) 1009 { 1010 u64 vmcb_phys = virt_to_phys(vmcb); 1011 u64 cycles; 1012 1013 for ( ; runs != 0; runs--) { 1014 tsc_start = rdtsc(); 1015 asm volatile("vmload %0\n\t" : : "a"(vmcb_phys) : "memory"); 1016 cycles = rdtsc() - tsc_start; 1017 if (cycles > latvmload_max) 1018 latvmload_max = cycles; 1019 if (cycles < latvmload_min) 1020 latvmload_min = cycles; 1021 vmload_sum += cycles; 1022 1023 tsc_start = rdtsc(); 1024 asm volatile("vmsave %0\n\t" : : "a"(vmcb_phys) : "memory"); 1025 cycles = rdtsc() - tsc_start; 1026 if (cycles > latvmsave_max) 1027 latvmsave_max = cycles; 1028 if (cycles < latvmsave_min) 1029 latvmsave_min = cycles; 1030 vmsave_sum += cycles; 1031 1032 tsc_start = rdtsc(); 1033 asm volatile("stgi\n\t"); 1034 cycles = rdtsc() - tsc_start; 1035 if (cycles > latstgi_max) 1036 latstgi_max = cycles; 1037 if (cycles < latstgi_min) 1038 latstgi_min = cycles; 1039 stgi_sum += cycles; 1040 1041 tsc_start = rdtsc(); 1042 asm volatile("clgi\n\t"); 1043 cycles = rdtsc() - tsc_start; 1044 if (cycles > latclgi_max) 1045 latclgi_max = cycles; 1046 if (cycles < latclgi_min) 1047 latclgi_min = cycles; 1048 clgi_sum += cycles; 1049 } 1050 1051 tsc_end = rdtsc(); 1052 1053 return true; 1054 } 1055 1056 static bool lat_svm_insn_check(struct svm_test *test) 1057 { 1058 printf(" Latency VMLOAD: max: %ld min: %ld avg: %ld\n", latvmload_max, 1059 latvmload_min, vmload_sum / LATENCY_RUNS); 1060 printf(" Latency VMSAVE: max: %ld min: %ld avg: %ld\n", latvmsave_max, 1061 latvmsave_min, vmsave_sum / LATENCY_RUNS); 1062 printf(" Latency STGI: max: %ld min: %ld avg: %ld\n", latstgi_max, 1063 latstgi_min, stgi_sum / LATENCY_RUNS); 1064 printf(" Latency CLGI: max: %ld min: %ld avg: %ld\n", latclgi_max, 1065 latclgi_min, clgi_sum / LATENCY_RUNS); 1066 return true; 1067 } 1068 1069 bool pending_event_ipi_fired; 1070 bool pending_event_guest_run; 1071 1072 static void pending_event_ipi_isr(isr_regs_t *regs) 1073 { 1074 pending_event_ipi_fired = true; 1075 eoi(); 1076 } 1077 1078 static void pending_event_prepare(struct svm_test *test) 1079 { 1080 int ipi_vector = 0xf1; 1081 1082 default_prepare(test); 1083 1084 pending_event_ipi_fired = false; 1085 1086 handle_irq(ipi_vector, pending_event_ipi_isr); 1087 1088 pending_event_guest_run = false; 1089 1090 vmcb->control.intercept |= (1ULL << INTERCEPT_INTR); 1091 vmcb->control.int_ctl |= V_INTR_MASKING_MASK; 1092 1093 apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | 1094 APIC_DM_FIXED | ipi_vector, 0); 1095 1096 set_test_stage(test, 0); 1097 } 1098 1099 static void pending_event_test(struct svm_test *test) 1100 { 1101 pending_event_guest_run = true; 1102 } 1103 1104 static bool pending_event_finished(struct svm_test *test) 1105 { 1106 switch (get_test_stage(test)) { 1107 case 0: 1108 if (vmcb->control.exit_code != SVM_EXIT_INTR) { 1109 report(false, "VMEXIT not due to pending interrupt. Exit reason 0x%x", 1110 vmcb->control.exit_code); 1111 return true; 1112 } 1113 1114 vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR); 1115 vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; 1116 1117 if (pending_event_guest_run) { 1118 report(false, "Guest ran before host received IPI\n"); 1119 return true; 1120 } 1121 1122 irq_enable(); 1123 asm volatile ("nop"); 1124 irq_disable(); 1125 1126 if (!pending_event_ipi_fired) { 1127 report(false, "Pending interrupt not dispatched after IRQ enabled\n"); 1128 return true; 1129 } 1130 break; 1131 1132 case 1: 1133 if (!pending_event_guest_run) { 1134 report(false, "Guest did not resume when no interrupt\n"); 1135 return true; 1136 } 1137 break; 1138 } 1139 1140 inc_test_stage(test); 1141 1142 return get_test_stage(test) == 2; 1143 } 1144 1145 static bool pending_event_check(struct svm_test *test) 1146 { 1147 return get_test_stage(test) == 2; 1148 } 1149 1150 static void pending_event_cli_prepare(struct svm_test *test) 1151 { 1152 default_prepare(test); 1153 1154 pending_event_ipi_fired = false; 1155 1156 handle_irq(0xf1, pending_event_ipi_isr); 1157 1158 apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | 1159 APIC_DM_FIXED | 0xf1, 0); 1160 1161 set_test_stage(test, 0); 1162 } 1163 1164 static void pending_event_cli_prepare_gif_clear(struct svm_test *test) 1165 { 1166 asm("cli"); 1167 } 1168 1169 static void pending_event_cli_test(struct svm_test *test) 1170 { 1171 if (pending_event_ipi_fired == true) { 1172 set_test_stage(test, -1); 1173 report(false, "Interrupt preceeded guest"); 1174 vmmcall(); 1175 } 1176 1177 /* VINTR_MASKING is zero. This should cause the IPI to fire. */ 1178 irq_enable(); 1179 asm volatile ("nop"); 1180 irq_disable(); 1181 1182 if (pending_event_ipi_fired != true) { 1183 set_test_stage(test, -1); 1184 report(false, "Interrupt not triggered by guest"); 1185 } 1186 1187 vmmcall(); 1188 1189 /* 1190 * Now VINTR_MASKING=1, but no interrupt is pending so 1191 * the VINTR interception should be clear in VMCB02. Check 1192 * that L0 did not leave a stale VINTR in the VMCB. 1193 */ 1194 irq_enable(); 1195 asm volatile ("nop"); 1196 irq_disable(); 1197 } 1198 1199 static bool pending_event_cli_finished(struct svm_test *test) 1200 { 1201 if ( vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1202 report(false, "VM_EXIT return to host is not EXIT_VMMCALL exit reason 0x%x", 1203 vmcb->control.exit_code); 1204 return true; 1205 } 1206 1207 switch (get_test_stage(test)) { 1208 case 0: 1209 vmcb->save.rip += 3; 1210 1211 pending_event_ipi_fired = false; 1212 1213 vmcb->control.int_ctl |= V_INTR_MASKING_MASK; 1214 1215 /* Now entering again with VINTR_MASKING=1. */ 1216 apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | 1217 APIC_DM_FIXED | 0xf1, 0); 1218 1219 break; 1220 1221 case 1: 1222 if (pending_event_ipi_fired == true) { 1223 report(false, "Interrupt triggered by guest"); 1224 return true; 1225 } 1226 1227 irq_enable(); 1228 asm volatile ("nop"); 1229 irq_disable(); 1230 1231 if (pending_event_ipi_fired != true) { 1232 report(false, "Interrupt not triggered by host"); 1233 return true; 1234 } 1235 1236 break; 1237 1238 default: 1239 return true; 1240 } 1241 1242 inc_test_stage(test); 1243 1244 return get_test_stage(test) == 2; 1245 } 1246 1247 static bool pending_event_cli_check(struct svm_test *test) 1248 { 1249 return get_test_stage(test) == 2; 1250 } 1251 1252 #define TIMER_VECTOR 222 1253 1254 static volatile bool timer_fired; 1255 1256 static void timer_isr(isr_regs_t *regs) 1257 { 1258 timer_fired = true; 1259 apic_write(APIC_EOI, 0); 1260 } 1261 1262 static void interrupt_prepare(struct svm_test *test) 1263 { 1264 default_prepare(test); 1265 handle_irq(TIMER_VECTOR, timer_isr); 1266 timer_fired = false; 1267 set_test_stage(test, 0); 1268 } 1269 1270 static void interrupt_test(struct svm_test *test) 1271 { 1272 long long start, loops; 1273 1274 apic_write(APIC_LVTT, TIMER_VECTOR); 1275 irq_enable(); 1276 apic_write(APIC_TMICT, 1); //Timer Initial Count Register 0x380 one-shot 1277 for (loops = 0; loops < 10000000 && !timer_fired; loops++) 1278 asm volatile ("nop"); 1279 1280 report(timer_fired, "direct interrupt while running guest"); 1281 1282 if (!timer_fired) { 1283 set_test_stage(test, -1); 1284 vmmcall(); 1285 } 1286 1287 apic_write(APIC_TMICT, 0); 1288 irq_disable(); 1289 vmmcall(); 1290 1291 timer_fired = false; 1292 apic_write(APIC_TMICT, 1); 1293 for (loops = 0; loops < 10000000 && !timer_fired; loops++) 1294 asm volatile ("nop"); 1295 1296 report(timer_fired, "intercepted interrupt while running guest"); 1297 1298 if (!timer_fired) { 1299 set_test_stage(test, -1); 1300 vmmcall(); 1301 } 1302 1303 irq_enable(); 1304 apic_write(APIC_TMICT, 0); 1305 irq_disable(); 1306 1307 timer_fired = false; 1308 start = rdtsc(); 1309 apic_write(APIC_TMICT, 1000000); 1310 asm volatile ("sti; hlt"); 1311 1312 report(rdtsc() - start > 10000 && timer_fired, 1313 "direct interrupt + hlt"); 1314 1315 if (!timer_fired) { 1316 set_test_stage(test, -1); 1317 vmmcall(); 1318 } 1319 1320 apic_write(APIC_TMICT, 0); 1321 irq_disable(); 1322 vmmcall(); 1323 1324 timer_fired = false; 1325 start = rdtsc(); 1326 apic_write(APIC_TMICT, 1000000); 1327 asm volatile ("hlt"); 1328 1329 report(rdtsc() - start > 10000 && timer_fired, 1330 "intercepted interrupt + hlt"); 1331 1332 if (!timer_fired) { 1333 set_test_stage(test, -1); 1334 vmmcall(); 1335 } 1336 1337 apic_write(APIC_TMICT, 0); 1338 irq_disable(); 1339 } 1340 1341 static bool interrupt_finished(struct svm_test *test) 1342 { 1343 switch (get_test_stage(test)) { 1344 case 0: 1345 case 2: 1346 if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1347 report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x", 1348 vmcb->control.exit_code); 1349 return true; 1350 } 1351 vmcb->save.rip += 3; 1352 1353 vmcb->control.intercept |= (1ULL << INTERCEPT_INTR); 1354 vmcb->control.int_ctl |= V_INTR_MASKING_MASK; 1355 break; 1356 1357 case 1: 1358 case 3: 1359 if (vmcb->control.exit_code != SVM_EXIT_INTR) { 1360 report(false, "VMEXIT not due to intr intercept. Exit reason 0x%x", 1361 vmcb->control.exit_code); 1362 return true; 1363 } 1364 1365 /* The guest is not woken up from HLT and RIP still points to it. */ 1366 if (get_test_stage(test) == 3) { 1367 vmcb->save.rip++; 1368 } 1369 1370 irq_enable(); 1371 asm volatile ("nop"); 1372 irq_disable(); 1373 1374 vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR); 1375 vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; 1376 break; 1377 1378 case 4: 1379 break; 1380 1381 default: 1382 return true; 1383 } 1384 1385 inc_test_stage(test); 1386 1387 return get_test_stage(test) == 5; 1388 } 1389 1390 static bool interrupt_check(struct svm_test *test) 1391 { 1392 return get_test_stage(test) == 5; 1393 } 1394 1395 static volatile bool nmi_fired; 1396 1397 static void nmi_handler(isr_regs_t *regs) 1398 { 1399 nmi_fired = true; 1400 apic_write(APIC_EOI, 0); 1401 } 1402 1403 static void nmi_prepare(struct svm_test *test) 1404 { 1405 default_prepare(test); 1406 nmi_fired = false; 1407 handle_irq(NMI_VECTOR, nmi_handler); 1408 set_test_stage(test, 0); 1409 } 1410 1411 static void nmi_test(struct svm_test *test) 1412 { 1413 apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, 0); 1414 1415 report(nmi_fired, "direct NMI while running guest"); 1416 1417 if (!nmi_fired) 1418 set_test_stage(test, -1); 1419 1420 vmmcall(); 1421 1422 nmi_fired = false; 1423 1424 apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, 0); 1425 1426 if (!nmi_fired) { 1427 report(nmi_fired, "intercepted pending NMI not dispatched"); 1428 set_test_stage(test, -1); 1429 } 1430 1431 } 1432 1433 static bool nmi_finished(struct svm_test *test) 1434 { 1435 switch (get_test_stage(test)) { 1436 case 0: 1437 if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1438 report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x", 1439 vmcb->control.exit_code); 1440 return true; 1441 } 1442 vmcb->save.rip += 3; 1443 1444 vmcb->control.intercept |= (1ULL << INTERCEPT_NMI); 1445 break; 1446 1447 case 1: 1448 if (vmcb->control.exit_code != SVM_EXIT_NMI) { 1449 report(false, "VMEXIT not due to NMI intercept. Exit reason 0x%x", 1450 vmcb->control.exit_code); 1451 return true; 1452 } 1453 1454 report(true, "NMI intercept while running guest"); 1455 break; 1456 1457 case 2: 1458 break; 1459 1460 default: 1461 return true; 1462 } 1463 1464 inc_test_stage(test); 1465 1466 return get_test_stage(test) == 3; 1467 } 1468 1469 static bool nmi_check(struct svm_test *test) 1470 { 1471 return get_test_stage(test) == 3; 1472 } 1473 1474 #define NMI_DELAY 100000000ULL 1475 1476 static void nmi_message_thread(void *_test) 1477 { 1478 struct svm_test *test = _test; 1479 1480 while (get_test_stage(test) != 1) 1481 pause(); 1482 1483 delay(NMI_DELAY); 1484 1485 apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, id_map[0]); 1486 1487 while (get_test_stage(test) != 2) 1488 pause(); 1489 1490 delay(NMI_DELAY); 1491 1492 apic_icr_write(APIC_DEST_PHYSICAL | APIC_DM_NMI | APIC_INT_ASSERT, id_map[0]); 1493 } 1494 1495 static void nmi_hlt_test(struct svm_test *test) 1496 { 1497 long long start; 1498 1499 on_cpu_async(1, nmi_message_thread, test); 1500 1501 start = rdtsc(); 1502 1503 set_test_stage(test, 1); 1504 1505 asm volatile ("hlt"); 1506 1507 report((rdtsc() - start > NMI_DELAY) && nmi_fired, 1508 "direct NMI + hlt"); 1509 1510 if (!nmi_fired) 1511 set_test_stage(test, -1); 1512 1513 nmi_fired = false; 1514 1515 vmmcall(); 1516 1517 start = rdtsc(); 1518 1519 set_test_stage(test, 2); 1520 1521 asm volatile ("hlt"); 1522 1523 report((rdtsc() - start > NMI_DELAY) && nmi_fired, 1524 "intercepted NMI + hlt"); 1525 1526 if (!nmi_fired) { 1527 report(nmi_fired, "intercepted pending NMI not dispatched"); 1528 set_test_stage(test, -1); 1529 vmmcall(); 1530 } 1531 1532 set_test_stage(test, 3); 1533 } 1534 1535 static bool nmi_hlt_finished(struct svm_test *test) 1536 { 1537 switch (get_test_stage(test)) { 1538 case 1: 1539 if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1540 report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x", 1541 vmcb->control.exit_code); 1542 return true; 1543 } 1544 vmcb->save.rip += 3; 1545 1546 vmcb->control.intercept |= (1ULL << INTERCEPT_NMI); 1547 break; 1548 1549 case 2: 1550 if (vmcb->control.exit_code != SVM_EXIT_NMI) { 1551 report(false, "VMEXIT not due to NMI intercept. Exit reason 0x%x", 1552 vmcb->control.exit_code); 1553 return true; 1554 } 1555 1556 /* The guest is not woken up from HLT and RIP still points to it. */ 1557 vmcb->save.rip++; 1558 1559 report(true, "NMI intercept while running guest"); 1560 break; 1561 1562 case 3: 1563 break; 1564 1565 default: 1566 return true; 1567 } 1568 1569 return get_test_stage(test) == 3; 1570 } 1571 1572 static bool nmi_hlt_check(struct svm_test *test) 1573 { 1574 return get_test_stage(test) == 3; 1575 } 1576 1577 static volatile int count_exc = 0; 1578 1579 static void my_isr(struct ex_regs *r) 1580 { 1581 count_exc++; 1582 } 1583 1584 static void exc_inject_prepare(struct svm_test *test) 1585 { 1586 default_prepare(test); 1587 handle_exception(DE_VECTOR, my_isr); 1588 handle_exception(NMI_VECTOR, my_isr); 1589 } 1590 1591 1592 static void exc_inject_test(struct svm_test *test) 1593 { 1594 asm volatile ("vmmcall\n\tvmmcall\n\t"); 1595 } 1596 1597 static bool exc_inject_finished(struct svm_test *test) 1598 { 1599 vmcb->save.rip += 3; 1600 1601 switch (get_test_stage(test)) { 1602 case 0: 1603 if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1604 report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x", 1605 vmcb->control.exit_code); 1606 return true; 1607 } 1608 vmcb->control.event_inj = NMI_VECTOR | SVM_EVTINJ_TYPE_EXEPT | SVM_EVTINJ_VALID; 1609 break; 1610 1611 case 1: 1612 if (vmcb->control.exit_code != SVM_EXIT_ERR) { 1613 report(false, "VMEXIT not due to error. Exit reason 0x%x", 1614 vmcb->control.exit_code); 1615 return true; 1616 } 1617 report(count_exc == 0, "exception with vector 2 not injected"); 1618 vmcb->control.event_inj = DE_VECTOR | SVM_EVTINJ_TYPE_EXEPT | SVM_EVTINJ_VALID; 1619 break; 1620 1621 case 2: 1622 if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1623 report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x", 1624 vmcb->control.exit_code); 1625 return true; 1626 } 1627 report(count_exc == 1, "divide overflow exception injected"); 1628 report(!(vmcb->control.event_inj & SVM_EVTINJ_VALID), "eventinj.VALID cleared"); 1629 break; 1630 1631 default: 1632 return true; 1633 } 1634 1635 inc_test_stage(test); 1636 1637 return get_test_stage(test) == 3; 1638 } 1639 1640 static bool exc_inject_check(struct svm_test *test) 1641 { 1642 return count_exc == 1 && get_test_stage(test) == 3; 1643 } 1644 1645 static volatile bool virq_fired; 1646 1647 static void virq_isr(isr_regs_t *regs) 1648 { 1649 virq_fired = true; 1650 } 1651 1652 static void virq_inject_prepare(struct svm_test *test) 1653 { 1654 handle_irq(0xf1, virq_isr); 1655 default_prepare(test); 1656 vmcb->control.int_ctl = V_INTR_MASKING_MASK | V_IRQ_MASK | 1657 (0x0f << V_INTR_PRIO_SHIFT); // Set to the highest priority 1658 vmcb->control.int_vector = 0xf1; 1659 virq_fired = false; 1660 set_test_stage(test, 0); 1661 } 1662 1663 static void virq_inject_test(struct svm_test *test) 1664 { 1665 if (virq_fired) { 1666 report(false, "virtual interrupt fired before L2 sti"); 1667 set_test_stage(test, -1); 1668 vmmcall(); 1669 } 1670 1671 irq_enable(); 1672 asm volatile ("nop"); 1673 irq_disable(); 1674 1675 if (!virq_fired) { 1676 report(false, "virtual interrupt not fired after L2 sti"); 1677 set_test_stage(test, -1); 1678 } 1679 1680 vmmcall(); 1681 1682 if (virq_fired) { 1683 report(false, "virtual interrupt fired before L2 sti after VINTR intercept"); 1684 set_test_stage(test, -1); 1685 vmmcall(); 1686 } 1687 1688 irq_enable(); 1689 asm volatile ("nop"); 1690 irq_disable(); 1691 1692 if (!virq_fired) { 1693 report(false, "virtual interrupt not fired after return from VINTR intercept"); 1694 set_test_stage(test, -1); 1695 } 1696 1697 vmmcall(); 1698 1699 irq_enable(); 1700 asm volatile ("nop"); 1701 irq_disable(); 1702 1703 if (virq_fired) { 1704 report(false, "virtual interrupt fired when V_IRQ_PRIO less than V_TPR"); 1705 set_test_stage(test, -1); 1706 } 1707 1708 vmmcall(); 1709 vmmcall(); 1710 } 1711 1712 static bool virq_inject_finished(struct svm_test *test) 1713 { 1714 vmcb->save.rip += 3; 1715 1716 switch (get_test_stage(test)) { 1717 case 0: 1718 if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1719 report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x", 1720 vmcb->control.exit_code); 1721 return true; 1722 } 1723 if (vmcb->control.int_ctl & V_IRQ_MASK) { 1724 report(false, "V_IRQ not cleared on VMEXIT after firing"); 1725 return true; 1726 } 1727 virq_fired = false; 1728 vmcb->control.intercept |= (1ULL << INTERCEPT_VINTR); 1729 vmcb->control.int_ctl = V_INTR_MASKING_MASK | V_IRQ_MASK | 1730 (0x0f << V_INTR_PRIO_SHIFT); 1731 break; 1732 1733 case 1: 1734 if (vmcb->control.exit_code != SVM_EXIT_VINTR) { 1735 report(false, "VMEXIT not due to vintr. Exit reason 0x%x", 1736 vmcb->control.exit_code); 1737 return true; 1738 } 1739 if (virq_fired) { 1740 report(false, "V_IRQ fired before SVM_EXIT_VINTR"); 1741 return true; 1742 } 1743 vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR); 1744 break; 1745 1746 case 2: 1747 if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1748 report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x", 1749 vmcb->control.exit_code); 1750 return true; 1751 } 1752 virq_fired = false; 1753 // Set irq to lower priority 1754 vmcb->control.int_ctl = V_INTR_MASKING_MASK | V_IRQ_MASK | 1755 (0x08 << V_INTR_PRIO_SHIFT); 1756 // Raise guest TPR 1757 vmcb->control.int_ctl |= 0x0a & V_TPR_MASK; 1758 break; 1759 1760 case 3: 1761 if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1762 report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x", 1763 vmcb->control.exit_code); 1764 return true; 1765 } 1766 vmcb->control.intercept |= (1ULL << INTERCEPT_VINTR); 1767 break; 1768 1769 case 4: 1770 // INTERCEPT_VINTR should be ignored because V_INTR_PRIO < V_TPR 1771 if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { 1772 report(false, "VMEXIT not due to vmmcall. Exit reason 0x%x", 1773 vmcb->control.exit_code); 1774 return true; 1775 } 1776 break; 1777 1778 default: 1779 return true; 1780 } 1781 1782 inc_test_stage(test); 1783 1784 return get_test_stage(test) == 5; 1785 } 1786 1787 static bool virq_inject_check(struct svm_test *test) 1788 { 1789 return get_test_stage(test) == 5; 1790 } 1791 1792 /* 1793 * Detect nested guest RIP corruption as explained in kernel commit 1794 * b6162e82aef19fee9c32cb3fe9ac30d9116a8c73 1795 * 1796 * In the assembly loop below 'ins' is executed while IO instructions 1797 * are not intercepted; the instruction is emulated by L0. 1798 * 1799 * At the same time we are getting interrupts from the local APIC timer, 1800 * and we do intercept them in L1 1801 * 1802 * If the interrupt happens on the insb instruction, L0 will VMexit, emulate 1803 * the insb instruction and then it will inject the interrupt to L1 through 1804 * a nested VMexit. Due to a bug, it would leave pre-emulation values of RIP, 1805 * RAX and RSP in the VMCB. 1806 * 1807 * In our intercept handler we detect the bug by checking that RIP is that of 1808 * the insb instruction, but its memory operand has already been written. 1809 * This means that insb was already executed. 1810 */ 1811 1812 static volatile int isr_cnt = 0; 1813 static volatile uint8_t io_port_var = 0xAA; 1814 extern const char insb_instruction_label[]; 1815 1816 static void reg_corruption_isr(isr_regs_t *regs) 1817 { 1818 isr_cnt++; 1819 apic_write(APIC_EOI, 0); 1820 } 1821 1822 static void reg_corruption_prepare(struct svm_test *test) 1823 { 1824 default_prepare(test); 1825 set_test_stage(test, 0); 1826 1827 vmcb->control.int_ctl = V_INTR_MASKING_MASK; 1828 vmcb->control.intercept |= (1ULL << INTERCEPT_INTR); 1829 1830 handle_irq(TIMER_VECTOR, reg_corruption_isr); 1831 1832 /* set local APIC to inject external interrupts */ 1833 apic_write(APIC_TMICT, 0); 1834 apic_write(APIC_TDCR, 0); 1835 apic_write(APIC_LVTT, TIMER_VECTOR | APIC_LVT_TIMER_PERIODIC); 1836 apic_write(APIC_TMICT, 1000); 1837 } 1838 1839 static void reg_corruption_test(struct svm_test *test) 1840 { 1841 /* this is endless loop, which is interrupted by the timer interrupt */ 1842 asm volatile ( 1843 "1:\n\t" 1844 "movw $0x4d0, %%dx\n\t" // IO port 1845 "lea %[io_port_var], %%rdi\n\t" 1846 "movb $0xAA, %[io_port_var]\n\t" 1847 "insb_instruction_label:\n\t" 1848 "insb\n\t" 1849 "jmp 1b\n\t" 1850 1851 : [io_port_var] "=m" (io_port_var) 1852 : /* no inputs*/ 1853 : "rdx", "rdi" 1854 ); 1855 } 1856 1857 static bool reg_corruption_finished(struct svm_test *test) 1858 { 1859 if (isr_cnt == 10000) { 1860 report(true, 1861 "No RIP corruption detected after %d timer interrupts", 1862 isr_cnt); 1863 set_test_stage(test, 1); 1864 return true; 1865 } 1866 1867 if (vmcb->control.exit_code == SVM_EXIT_INTR) { 1868 1869 void* guest_rip = (void*)vmcb->save.rip; 1870 1871 irq_enable(); 1872 asm volatile ("nop"); 1873 irq_disable(); 1874 1875 if (guest_rip == insb_instruction_label && io_port_var != 0xAA) { 1876 report(false, 1877 "RIP corruption detected after %d timer interrupts", 1878 isr_cnt); 1879 return true; 1880 } 1881 1882 } 1883 return false; 1884 } 1885 1886 static bool reg_corruption_check(struct svm_test *test) 1887 { 1888 return get_test_stage(test) == 1; 1889 } 1890 1891 #define TEST(name) { #name, .v2 = name } 1892 1893 /* 1894 * v2 tests 1895 */ 1896 1897 static void basic_guest_main(struct svm_test *test) 1898 { 1899 } 1900 1901 static void svm_guest_state_test(void) 1902 { 1903 test_set_guest(basic_guest_main); 1904 1905 /* 1906 * Un-setting EFER.SVME is illegal 1907 */ 1908 u64 efer_saved = vmcb->save.efer; 1909 u64 efer = efer_saved; 1910 1911 report (svm_vmrun() == SVM_EXIT_VMMCALL, "EFER.SVME: %lx", efer); 1912 efer &= ~EFER_SVME; 1913 vmcb->save.efer = efer; 1914 report (svm_vmrun() == SVM_EXIT_ERR, "EFER.SVME: %lx", efer); 1915 vmcb->save.efer = efer_saved; 1916 1917 /* 1918 * Un-setting CR0.CD and setting CR0.NW is illegal combination 1919 */ 1920 u64 cr0_saved = vmcb->save.cr0; 1921 u64 cr0 = cr0_saved; 1922 1923 cr0 |= X86_CR0_CD; 1924 cr0 &= ~X86_CR0_NW; 1925 vmcb->save.cr0 = cr0; 1926 report (svm_vmrun() == SVM_EXIT_VMMCALL, "CR0: %lx", cr0); 1927 cr0 |= X86_CR0_NW; 1928 vmcb->save.cr0 = cr0; 1929 report (svm_vmrun() == SVM_EXIT_VMMCALL, "CR0: %lx", cr0); 1930 cr0 &= ~X86_CR0_NW; 1931 cr0 &= ~X86_CR0_CD; 1932 vmcb->save.cr0 = cr0; 1933 report (svm_vmrun() == SVM_EXIT_VMMCALL, "CR0: %lx", cr0); 1934 cr0 |= X86_CR0_NW; 1935 vmcb->save.cr0 = cr0; 1936 report (svm_vmrun() == SVM_EXIT_ERR, "CR0: %lx", cr0); 1937 vmcb->save.cr0 = cr0_saved; 1938 1939 /* 1940 * CR0[63:32] are not zero 1941 */ 1942 int i; 1943 1944 cr0 = cr0_saved; 1945 for (i = 32; i < 63; i = i + 4) { 1946 cr0 = cr0_saved | (1ull << i); 1947 vmcb->save.cr0 = cr0; 1948 report (svm_vmrun() == SVM_EXIT_ERR, "CR0[63:32]: %lx", 1949 cr0 >> 32); 1950 } 1951 vmcb->save.cr0 = cr0_saved; 1952 } 1953 1954 struct svm_test svm_tests[] = { 1955 { "null", default_supported, default_prepare, 1956 default_prepare_gif_clear, null_test, 1957 default_finished, null_check }, 1958 { "vmrun", default_supported, default_prepare, 1959 default_prepare_gif_clear, test_vmrun, 1960 default_finished, check_vmrun }, 1961 { "ioio", default_supported, prepare_ioio, 1962 default_prepare_gif_clear, test_ioio, 1963 ioio_finished, check_ioio }, 1964 { "vmrun intercept check", default_supported, prepare_no_vmrun_int, 1965 default_prepare_gif_clear, null_test, default_finished, 1966 check_no_vmrun_int }, 1967 { "rsm", default_supported, 1968 prepare_rsm_intercept, default_prepare_gif_clear, 1969 test_rsm_intercept, finished_rsm_intercept, check_rsm_intercept }, 1970 { "cr3 read intercept", default_supported, 1971 prepare_cr3_intercept, default_prepare_gif_clear, 1972 test_cr3_intercept, default_finished, check_cr3_intercept }, 1973 { "cr3 read nointercept", default_supported, default_prepare, 1974 default_prepare_gif_clear, test_cr3_intercept, default_finished, 1975 check_cr3_nointercept }, 1976 { "cr3 read intercept emulate", smp_supported, 1977 prepare_cr3_intercept_bypass, default_prepare_gif_clear, 1978 test_cr3_intercept_bypass, default_finished, check_cr3_intercept }, 1979 { "dr intercept check", default_supported, prepare_dr_intercept, 1980 default_prepare_gif_clear, test_dr_intercept, dr_intercept_finished, 1981 check_dr_intercept }, 1982 { "next_rip", next_rip_supported, prepare_next_rip, 1983 default_prepare_gif_clear, test_next_rip, 1984 default_finished, check_next_rip }, 1985 { "msr intercept check", default_supported, prepare_msr_intercept, 1986 default_prepare_gif_clear, test_msr_intercept, 1987 msr_intercept_finished, check_msr_intercept }, 1988 { "mode_switch", default_supported, prepare_mode_switch, 1989 default_prepare_gif_clear, test_mode_switch, 1990 mode_switch_finished, check_mode_switch }, 1991 { "asid_zero", default_supported, prepare_asid_zero, 1992 default_prepare_gif_clear, test_asid_zero, 1993 default_finished, check_asid_zero }, 1994 { "sel_cr0_bug", default_supported, sel_cr0_bug_prepare, 1995 default_prepare_gif_clear, sel_cr0_bug_test, 1996 sel_cr0_bug_finished, sel_cr0_bug_check }, 1997 { "npt_nx", npt_supported, npt_nx_prepare, 1998 default_prepare_gif_clear, null_test, 1999 default_finished, npt_nx_check }, 2000 { "npt_us", npt_supported, npt_us_prepare, 2001 default_prepare_gif_clear, npt_us_test, 2002 default_finished, npt_us_check }, 2003 { "npt_rsvd", npt_supported, npt_rsvd_prepare, 2004 default_prepare_gif_clear, null_test, 2005 default_finished, npt_rsvd_check }, 2006 { "npt_rw", npt_supported, npt_rw_prepare, 2007 default_prepare_gif_clear, npt_rw_test, 2008 default_finished, npt_rw_check }, 2009 { "npt_rsvd_pfwalk", npt_supported, npt_rsvd_pfwalk_prepare, 2010 default_prepare_gif_clear, null_test, 2011 default_finished, npt_rsvd_pfwalk_check }, 2012 { "npt_rw_pfwalk", npt_supported, npt_rw_pfwalk_prepare, 2013 default_prepare_gif_clear, null_test, 2014 default_finished, npt_rw_pfwalk_check }, 2015 { "npt_l1mmio", npt_supported, npt_l1mmio_prepare, 2016 default_prepare_gif_clear, npt_l1mmio_test, 2017 default_finished, npt_l1mmio_check }, 2018 { "npt_rw_l1mmio", npt_supported, npt_rw_l1mmio_prepare, 2019 default_prepare_gif_clear, npt_rw_l1mmio_test, 2020 default_finished, npt_rw_l1mmio_check }, 2021 { "tsc_adjust", default_supported, tsc_adjust_prepare, 2022 default_prepare_gif_clear, tsc_adjust_test, 2023 default_finished, tsc_adjust_check }, 2024 { "latency_run_exit", default_supported, latency_prepare, 2025 default_prepare_gif_clear, latency_test, 2026 latency_finished, latency_check }, 2027 { "latency_svm_insn", default_supported, lat_svm_insn_prepare, 2028 default_prepare_gif_clear, null_test, 2029 lat_svm_insn_finished, lat_svm_insn_check }, 2030 { "exc_inject", default_supported, exc_inject_prepare, 2031 default_prepare_gif_clear, exc_inject_test, 2032 exc_inject_finished, exc_inject_check }, 2033 { "pending_event", default_supported, pending_event_prepare, 2034 default_prepare_gif_clear, 2035 pending_event_test, pending_event_finished, pending_event_check }, 2036 { "pending_event_cli", default_supported, pending_event_cli_prepare, 2037 pending_event_cli_prepare_gif_clear, 2038 pending_event_cli_test, pending_event_cli_finished, 2039 pending_event_cli_check }, 2040 { "interrupt", default_supported, interrupt_prepare, 2041 default_prepare_gif_clear, interrupt_test, 2042 interrupt_finished, interrupt_check }, 2043 { "nmi", default_supported, nmi_prepare, 2044 default_prepare_gif_clear, nmi_test, 2045 nmi_finished, nmi_check }, 2046 { "nmi_hlt", smp_supported, nmi_prepare, 2047 default_prepare_gif_clear, nmi_hlt_test, 2048 nmi_hlt_finished, nmi_hlt_check }, 2049 { "virq_inject", default_supported, virq_inject_prepare, 2050 default_prepare_gif_clear, virq_inject_test, 2051 virq_inject_finished, virq_inject_check }, 2052 { "reg_corruption", default_supported, reg_corruption_prepare, 2053 default_prepare_gif_clear, reg_corruption_test, 2054 reg_corruption_finished, reg_corruption_check }, 2055 TEST(svm_guest_state_test), 2056 { NULL, NULL, NULL, NULL, NULL, NULL, NULL } 2057 }; 2058