1 /* 2 * All test cases of nested virtualization should be in this file 3 * 4 * Author : Arthur Chunqi Li <yzt356@gmail.com> 5 */ 6 7 #include <asm/debugreg.h> 8 9 #include "vmx.h" 10 #include "msr.h" 11 #include "processor.h" 12 #include "vm.h" 13 #include "pci.h" 14 #include "fwcfg.h" 15 #include "isr.h" 16 #include "desc.h" 17 #include "apic.h" 18 #include "types.h" 19 #include "vmalloc.h" 20 #include "alloc_page.h" 21 #include "smp.h" 22 #include "delay.h" 23 24 #define NONCANONICAL 0xaaaaaaaaaaaaaaaaull 25 26 #define VPID_CAP_INVVPID_TYPES_SHIFT 40 27 28 u64 ia32_pat; 29 u64 ia32_efer; 30 void *io_bitmap_a, *io_bitmap_b; 31 u16 ioport; 32 33 unsigned long *pml4; 34 u64 eptp; 35 void *data_page1, *data_page2; 36 37 phys_addr_t pci_physaddr; 38 39 void *pml_log; 40 #define PML_INDEX 512 41 42 static inline unsigned ffs(unsigned x) 43 { 44 int pos = -1; 45 46 __asm__ __volatile__("bsf %1, %%eax; cmovnz %%eax, %0" 47 : "+r"(pos) : "rm"(x) : "eax"); 48 return pos + 1; 49 } 50 51 static inline void vmcall(void) 52 { 53 asm volatile("vmcall"); 54 } 55 56 static void basic_guest_main(void) 57 { 58 report("Basic VMX test", 1); 59 } 60 61 static int basic_exit_handler(void) 62 { 63 report("Basic VMX test", 0); 64 print_vmexit_info(); 65 return VMX_TEST_EXIT; 66 } 67 68 static void vmenter_main(void) 69 { 70 u64 rax; 71 u64 rsp, resume_rsp; 72 73 report("test vmlaunch", 1); 74 75 asm volatile( 76 "mov %%rsp, %0\n\t" 77 "mov %3, %%rax\n\t" 78 "vmcall\n\t" 79 "mov %%rax, %1\n\t" 80 "mov %%rsp, %2\n\t" 81 : "=r"(rsp), "=r"(rax), "=r"(resume_rsp) 82 : "g"(0xABCD)); 83 report("test vmresume", (rax == 0xFFFF) && (rsp == resume_rsp)); 84 } 85 86 static int vmenter_exit_handler(void) 87 { 88 u64 guest_rip; 89 ulong reason; 90 91 guest_rip = vmcs_read(GUEST_RIP); 92 reason = vmcs_read(EXI_REASON) & 0xff; 93 switch (reason) { 94 case VMX_VMCALL: 95 if (regs.rax != 0xABCD) { 96 report("test vmresume", 0); 97 return VMX_TEST_VMEXIT; 98 } 99 regs.rax = 0xFFFF; 100 vmcs_write(GUEST_RIP, guest_rip + 3); 101 return VMX_TEST_RESUME; 102 default: 103 report("test vmresume", 0); 104 print_vmexit_info(); 105 } 106 return VMX_TEST_VMEXIT; 107 } 108 109 u32 preempt_scale; 110 volatile unsigned long long tsc_val; 111 volatile u32 preempt_val; 112 u64 saved_rip; 113 114 static int preemption_timer_init(struct vmcs *vmcs) 115 { 116 if (!(ctrl_pin_rev.clr & PIN_PREEMPT)) { 117 printf("\tPreemption timer is not supported\n"); 118 return VMX_TEST_EXIT; 119 } 120 vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) | PIN_PREEMPT); 121 preempt_val = 10000000; 122 vmcs_write(PREEMPT_TIMER_VALUE, preempt_val); 123 preempt_scale = rdmsr(MSR_IA32_VMX_MISC) & 0x1F; 124 125 if (!(ctrl_exit_rev.clr & EXI_SAVE_PREEMPT)) 126 printf("\tSave preemption value is not supported\n"); 127 128 return VMX_TEST_START; 129 } 130 131 static void preemption_timer_main(void) 132 { 133 tsc_val = rdtsc(); 134 if (ctrl_exit_rev.clr & EXI_SAVE_PREEMPT) { 135 vmx_set_test_stage(0); 136 vmcall(); 137 if (vmx_get_test_stage() == 1) 138 vmcall(); 139 } 140 vmx_set_test_stage(1); 141 while (vmx_get_test_stage() == 1) { 142 if (((rdtsc() - tsc_val) >> preempt_scale) 143 > 10 * preempt_val) { 144 vmx_set_test_stage(2); 145 vmcall(); 146 } 147 } 148 tsc_val = rdtsc(); 149 asm volatile ("hlt"); 150 vmcall(); 151 vmx_set_test_stage(5); 152 vmcall(); 153 } 154 155 static int preemption_timer_exit_handler(void) 156 { 157 bool guest_halted; 158 u64 guest_rip; 159 ulong reason; 160 u32 insn_len; 161 u32 ctrl_exit; 162 163 guest_rip = vmcs_read(GUEST_RIP); 164 reason = vmcs_read(EXI_REASON) & 0xff; 165 insn_len = vmcs_read(EXI_INST_LEN); 166 switch (reason) { 167 case VMX_PREEMPT: 168 switch (vmx_get_test_stage()) { 169 case 1: 170 case 2: 171 report("busy-wait for preemption timer", 172 ((rdtsc() - tsc_val) >> preempt_scale) >= 173 preempt_val); 174 vmx_set_test_stage(3); 175 vmcs_write(PREEMPT_TIMER_VALUE, preempt_val); 176 return VMX_TEST_RESUME; 177 case 3: 178 guest_halted = 179 (vmcs_read(GUEST_ACTV_STATE) == ACTV_HLT); 180 report("preemption timer during hlt", 181 ((rdtsc() - tsc_val) >> preempt_scale) >= 182 preempt_val && guest_halted); 183 vmx_set_test_stage(4); 184 vmcs_write(PIN_CONTROLS, 185 vmcs_read(PIN_CONTROLS) & ~PIN_PREEMPT); 186 vmcs_write(EXI_CONTROLS, 187 vmcs_read(EXI_CONTROLS) & ~EXI_SAVE_PREEMPT); 188 vmcs_write(GUEST_ACTV_STATE, ACTV_ACTIVE); 189 return VMX_TEST_RESUME; 190 case 4: 191 report("preemption timer with 0 value", 192 saved_rip == guest_rip); 193 break; 194 default: 195 report("Invalid stage.", false); 196 print_vmexit_info(); 197 break; 198 } 199 break; 200 case VMX_VMCALL: 201 vmcs_write(GUEST_RIP, guest_rip + insn_len); 202 switch (vmx_get_test_stage()) { 203 case 0: 204 report("Keep preemption value", 205 vmcs_read(PREEMPT_TIMER_VALUE) == preempt_val); 206 vmx_set_test_stage(1); 207 vmcs_write(PREEMPT_TIMER_VALUE, preempt_val); 208 ctrl_exit = (vmcs_read(EXI_CONTROLS) | 209 EXI_SAVE_PREEMPT) & ctrl_exit_rev.clr; 210 vmcs_write(EXI_CONTROLS, ctrl_exit); 211 return VMX_TEST_RESUME; 212 case 1: 213 report("Save preemption value", 214 vmcs_read(PREEMPT_TIMER_VALUE) < preempt_val); 215 return VMX_TEST_RESUME; 216 case 2: 217 report("busy-wait for preemption timer", 0); 218 vmx_set_test_stage(3); 219 vmcs_write(PREEMPT_TIMER_VALUE, preempt_val); 220 return VMX_TEST_RESUME; 221 case 3: 222 report("preemption timer during hlt", 0); 223 vmx_set_test_stage(4); 224 /* fall through */ 225 case 4: 226 vmcs_write(PIN_CONTROLS, 227 vmcs_read(PIN_CONTROLS) | PIN_PREEMPT); 228 vmcs_write(PREEMPT_TIMER_VALUE, 0); 229 saved_rip = guest_rip + insn_len; 230 return VMX_TEST_RESUME; 231 case 5: 232 report("preemption timer with 0 value (vmcall stage 5)", 0); 233 break; 234 default: 235 // Should not reach here 236 report("unexpected stage, %d", false, 237 vmx_get_test_stage()); 238 print_vmexit_info(); 239 return VMX_TEST_VMEXIT; 240 } 241 break; 242 default: 243 report("Unknown exit reason, %ld", false, reason); 244 print_vmexit_info(); 245 } 246 vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) & ~PIN_PREEMPT); 247 return VMX_TEST_VMEXIT; 248 } 249 250 static void msr_bmp_init(void) 251 { 252 void *msr_bitmap; 253 u32 ctrl_cpu0; 254 255 msr_bitmap = alloc_page(); 256 memset(msr_bitmap, 0x0, PAGE_SIZE); 257 ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0); 258 ctrl_cpu0 |= CPU_MSR_BITMAP; 259 vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0); 260 vmcs_write(MSR_BITMAP, (u64)msr_bitmap); 261 } 262 263 static void *get_msr_bitmap(void) 264 { 265 void *msr_bitmap; 266 267 if (vmcs_read(CPU_EXEC_CTRL0) & CPU_MSR_BITMAP) { 268 msr_bitmap = (void *)vmcs_read(MSR_BITMAP); 269 } else { 270 msr_bitmap = alloc_page(); 271 memset(msr_bitmap, 0xff, PAGE_SIZE); 272 vmcs_write(MSR_BITMAP, (u64)msr_bitmap); 273 vmcs_set_bits(CPU_EXEC_CTRL0, CPU_MSR_BITMAP); 274 } 275 276 return msr_bitmap; 277 } 278 279 static void disable_intercept_for_x2apic_msrs(void) 280 { 281 unsigned long *msr_bitmap = (unsigned long *)get_msr_bitmap(); 282 u32 msr; 283 284 for (msr = APIC_BASE_MSR; 285 msr < (APIC_BASE_MSR+0xff); 286 msr += BITS_PER_LONG) { 287 unsigned int word = msr / BITS_PER_LONG; 288 289 msr_bitmap[word] = 0; 290 msr_bitmap[word + (0x800 / sizeof(long))] = 0; 291 } 292 } 293 294 static int test_ctrl_pat_init(struct vmcs *vmcs) 295 { 296 u64 ctrl_ent; 297 u64 ctrl_exi; 298 299 msr_bmp_init(); 300 if (!(ctrl_exit_rev.clr & EXI_SAVE_PAT) && 301 !(ctrl_exit_rev.clr & EXI_LOAD_PAT) && 302 !(ctrl_enter_rev.clr & ENT_LOAD_PAT)) { 303 printf("\tSave/load PAT is not supported\n"); 304 return 1; 305 } 306 307 ctrl_ent = vmcs_read(ENT_CONTROLS); 308 ctrl_exi = vmcs_read(EXI_CONTROLS); 309 ctrl_ent |= ctrl_enter_rev.clr & ENT_LOAD_PAT; 310 ctrl_exi |= ctrl_exit_rev.clr & (EXI_SAVE_PAT | EXI_LOAD_PAT); 311 vmcs_write(ENT_CONTROLS, ctrl_ent); 312 vmcs_write(EXI_CONTROLS, ctrl_exi); 313 ia32_pat = rdmsr(MSR_IA32_CR_PAT); 314 vmcs_write(GUEST_PAT, 0x0); 315 vmcs_write(HOST_PAT, ia32_pat); 316 return VMX_TEST_START; 317 } 318 319 static void test_ctrl_pat_main(void) 320 { 321 u64 guest_ia32_pat; 322 323 guest_ia32_pat = rdmsr(MSR_IA32_CR_PAT); 324 if (!(ctrl_enter_rev.clr & ENT_LOAD_PAT)) 325 printf("\tENT_LOAD_PAT is not supported.\n"); 326 else { 327 if (guest_ia32_pat != 0) { 328 report("Entry load PAT", 0); 329 return; 330 } 331 } 332 wrmsr(MSR_IA32_CR_PAT, 0x6); 333 vmcall(); 334 guest_ia32_pat = rdmsr(MSR_IA32_CR_PAT); 335 if (ctrl_enter_rev.clr & ENT_LOAD_PAT) 336 report("Entry load PAT", guest_ia32_pat == ia32_pat); 337 } 338 339 static int test_ctrl_pat_exit_handler(void) 340 { 341 u64 guest_rip; 342 ulong reason; 343 u64 guest_pat; 344 345 guest_rip = vmcs_read(GUEST_RIP); 346 reason = vmcs_read(EXI_REASON) & 0xff; 347 switch (reason) { 348 case VMX_VMCALL: 349 guest_pat = vmcs_read(GUEST_PAT); 350 if (!(ctrl_exit_rev.clr & EXI_SAVE_PAT)) { 351 printf("\tEXI_SAVE_PAT is not supported\n"); 352 vmcs_write(GUEST_PAT, 0x6); 353 } else { 354 report("Exit save PAT", guest_pat == 0x6); 355 } 356 if (!(ctrl_exit_rev.clr & EXI_LOAD_PAT)) 357 printf("\tEXI_LOAD_PAT is not supported\n"); 358 else 359 report("Exit load PAT", rdmsr(MSR_IA32_CR_PAT) == ia32_pat); 360 vmcs_write(GUEST_PAT, ia32_pat); 361 vmcs_write(GUEST_RIP, guest_rip + 3); 362 return VMX_TEST_RESUME; 363 default: 364 printf("ERROR : Undefined exit reason, reason = %ld.\n", reason); 365 break; 366 } 367 return VMX_TEST_VMEXIT; 368 } 369 370 static int test_ctrl_efer_init(struct vmcs *vmcs) 371 { 372 u64 ctrl_ent; 373 u64 ctrl_exi; 374 375 msr_bmp_init(); 376 ctrl_ent = vmcs_read(ENT_CONTROLS) | ENT_LOAD_EFER; 377 ctrl_exi = vmcs_read(EXI_CONTROLS) | EXI_SAVE_EFER | EXI_LOAD_EFER; 378 vmcs_write(ENT_CONTROLS, ctrl_ent & ctrl_enter_rev.clr); 379 vmcs_write(EXI_CONTROLS, ctrl_exi & ctrl_exit_rev.clr); 380 ia32_efer = rdmsr(MSR_EFER); 381 vmcs_write(GUEST_EFER, ia32_efer ^ EFER_NX); 382 vmcs_write(HOST_EFER, ia32_efer ^ EFER_NX); 383 return VMX_TEST_START; 384 } 385 386 static void test_ctrl_efer_main(void) 387 { 388 u64 guest_ia32_efer; 389 390 guest_ia32_efer = rdmsr(MSR_EFER); 391 if (!(ctrl_enter_rev.clr & ENT_LOAD_EFER)) 392 printf("\tENT_LOAD_EFER is not supported.\n"); 393 else { 394 if (guest_ia32_efer != (ia32_efer ^ EFER_NX)) { 395 report("Entry load EFER", 0); 396 return; 397 } 398 } 399 wrmsr(MSR_EFER, ia32_efer); 400 vmcall(); 401 guest_ia32_efer = rdmsr(MSR_EFER); 402 if (ctrl_enter_rev.clr & ENT_LOAD_EFER) 403 report("Entry load EFER", guest_ia32_efer == ia32_efer); 404 } 405 406 static int test_ctrl_efer_exit_handler(void) 407 { 408 u64 guest_rip; 409 ulong reason; 410 u64 guest_efer; 411 412 guest_rip = vmcs_read(GUEST_RIP); 413 reason = vmcs_read(EXI_REASON) & 0xff; 414 switch (reason) { 415 case VMX_VMCALL: 416 guest_efer = vmcs_read(GUEST_EFER); 417 if (!(ctrl_exit_rev.clr & EXI_SAVE_EFER)) { 418 printf("\tEXI_SAVE_EFER is not supported\n"); 419 vmcs_write(GUEST_EFER, ia32_efer); 420 } else { 421 report("Exit save EFER", guest_efer == ia32_efer); 422 } 423 if (!(ctrl_exit_rev.clr & EXI_LOAD_EFER)) { 424 printf("\tEXI_LOAD_EFER is not supported\n"); 425 wrmsr(MSR_EFER, ia32_efer ^ EFER_NX); 426 } else { 427 report("Exit load EFER", rdmsr(MSR_EFER) == (ia32_efer ^ EFER_NX)); 428 } 429 vmcs_write(GUEST_PAT, ia32_efer); 430 vmcs_write(GUEST_RIP, guest_rip + 3); 431 return VMX_TEST_RESUME; 432 default: 433 printf("ERROR : Undefined exit reason, reason = %ld.\n", reason); 434 break; 435 } 436 return VMX_TEST_VMEXIT; 437 } 438 439 u32 guest_cr0, guest_cr4; 440 441 static void cr_shadowing_main(void) 442 { 443 u32 cr0, cr4, tmp; 444 445 // Test read through 446 vmx_set_test_stage(0); 447 guest_cr0 = read_cr0(); 448 if (vmx_get_test_stage() == 1) 449 report("Read through CR0", 0); 450 else 451 vmcall(); 452 vmx_set_test_stage(1); 453 guest_cr4 = read_cr4(); 454 if (vmx_get_test_stage() == 2) 455 report("Read through CR4", 0); 456 else 457 vmcall(); 458 // Test write through 459 guest_cr0 = guest_cr0 ^ (X86_CR0_TS | X86_CR0_MP); 460 guest_cr4 = guest_cr4 ^ (X86_CR4_TSD | X86_CR4_DE); 461 vmx_set_test_stage(2); 462 write_cr0(guest_cr0); 463 if (vmx_get_test_stage() == 3) 464 report("Write throuth CR0", 0); 465 else 466 vmcall(); 467 vmx_set_test_stage(3); 468 write_cr4(guest_cr4); 469 if (vmx_get_test_stage() == 4) 470 report("Write through CR4", 0); 471 else 472 vmcall(); 473 // Test read shadow 474 vmx_set_test_stage(4); 475 vmcall(); 476 cr0 = read_cr0(); 477 if (vmx_get_test_stage() != 5) 478 report("Read shadowing CR0", cr0 == guest_cr0); 479 vmx_set_test_stage(5); 480 cr4 = read_cr4(); 481 if (vmx_get_test_stage() != 6) 482 report("Read shadowing CR4", cr4 == guest_cr4); 483 // Test write shadow (same value with shadow) 484 vmx_set_test_stage(6); 485 write_cr0(guest_cr0); 486 if (vmx_get_test_stage() == 7) 487 report("Write shadowing CR0 (same value with shadow)", 0); 488 else 489 vmcall(); 490 vmx_set_test_stage(7); 491 write_cr4(guest_cr4); 492 if (vmx_get_test_stage() == 8) 493 report("Write shadowing CR4 (same value with shadow)", 0); 494 else 495 vmcall(); 496 // Test write shadow (different value) 497 vmx_set_test_stage(8); 498 tmp = guest_cr0 ^ X86_CR0_TS; 499 asm volatile("mov %0, %%rsi\n\t" 500 "mov %%rsi, %%cr0\n\t" 501 ::"m"(tmp) 502 :"rsi", "memory", "cc"); 503 report("Write shadowing different X86_CR0_TS", vmx_get_test_stage() == 9); 504 vmx_set_test_stage(9); 505 tmp = guest_cr0 ^ X86_CR0_MP; 506 asm volatile("mov %0, %%rsi\n\t" 507 "mov %%rsi, %%cr0\n\t" 508 ::"m"(tmp) 509 :"rsi", "memory", "cc"); 510 report("Write shadowing different X86_CR0_MP", vmx_get_test_stage() == 10); 511 vmx_set_test_stage(10); 512 tmp = guest_cr4 ^ X86_CR4_TSD; 513 asm volatile("mov %0, %%rsi\n\t" 514 "mov %%rsi, %%cr4\n\t" 515 ::"m"(tmp) 516 :"rsi", "memory", "cc"); 517 report("Write shadowing different X86_CR4_TSD", vmx_get_test_stage() == 11); 518 vmx_set_test_stage(11); 519 tmp = guest_cr4 ^ X86_CR4_DE; 520 asm volatile("mov %0, %%rsi\n\t" 521 "mov %%rsi, %%cr4\n\t" 522 ::"m"(tmp) 523 :"rsi", "memory", "cc"); 524 report("Write shadowing different X86_CR4_DE", vmx_get_test_stage() == 12); 525 } 526 527 static int cr_shadowing_exit_handler(void) 528 { 529 u64 guest_rip; 530 ulong reason; 531 u32 insn_len; 532 u32 exit_qual; 533 534 guest_rip = vmcs_read(GUEST_RIP); 535 reason = vmcs_read(EXI_REASON) & 0xff; 536 insn_len = vmcs_read(EXI_INST_LEN); 537 exit_qual = vmcs_read(EXI_QUALIFICATION); 538 switch (reason) { 539 case VMX_VMCALL: 540 switch (vmx_get_test_stage()) { 541 case 0: 542 report("Read through CR0", guest_cr0 == vmcs_read(GUEST_CR0)); 543 break; 544 case 1: 545 report("Read through CR4", guest_cr4 == vmcs_read(GUEST_CR4)); 546 break; 547 case 2: 548 report("Write through CR0", guest_cr0 == vmcs_read(GUEST_CR0)); 549 break; 550 case 3: 551 report("Write through CR4", guest_cr4 == vmcs_read(GUEST_CR4)); 552 break; 553 case 4: 554 guest_cr0 = vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP); 555 guest_cr4 = vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE); 556 vmcs_write(CR0_MASK, X86_CR0_TS | X86_CR0_MP); 557 vmcs_write(CR0_READ_SHADOW, guest_cr0 & (X86_CR0_TS | X86_CR0_MP)); 558 vmcs_write(CR4_MASK, X86_CR4_TSD | X86_CR4_DE); 559 vmcs_write(CR4_READ_SHADOW, guest_cr4 & (X86_CR4_TSD | X86_CR4_DE)); 560 break; 561 case 6: 562 report("Write shadowing CR0 (same value)", 563 guest_cr0 == (vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP))); 564 break; 565 case 7: 566 report("Write shadowing CR4 (same value)", 567 guest_cr4 == (vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE))); 568 break; 569 default: 570 // Should not reach here 571 report("unexpected stage, %d", false, 572 vmx_get_test_stage()); 573 print_vmexit_info(); 574 return VMX_TEST_VMEXIT; 575 } 576 vmcs_write(GUEST_RIP, guest_rip + insn_len); 577 return VMX_TEST_RESUME; 578 case VMX_CR: 579 switch (vmx_get_test_stage()) { 580 case 4: 581 report("Read shadowing CR0", 0); 582 vmx_inc_test_stage(); 583 break; 584 case 5: 585 report("Read shadowing CR4", 0); 586 vmx_inc_test_stage(); 587 break; 588 case 6: 589 report("Write shadowing CR0 (same value)", 0); 590 vmx_inc_test_stage(); 591 break; 592 case 7: 593 report("Write shadowing CR4 (same value)", 0); 594 vmx_inc_test_stage(); 595 break; 596 case 8: 597 case 9: 598 // 0x600 encodes "mov %esi, %cr0" 599 if (exit_qual == 0x600) 600 vmx_inc_test_stage(); 601 break; 602 case 10: 603 case 11: 604 // 0x604 encodes "mov %esi, %cr4" 605 if (exit_qual == 0x604) 606 vmx_inc_test_stage(); 607 break; 608 default: 609 // Should not reach here 610 report("unexpected stage, %d", false, 611 vmx_get_test_stage()); 612 print_vmexit_info(); 613 return VMX_TEST_VMEXIT; 614 } 615 vmcs_write(GUEST_RIP, guest_rip + insn_len); 616 return VMX_TEST_RESUME; 617 default: 618 report("Unknown exit reason, %ld", false, reason); 619 print_vmexit_info(); 620 } 621 return VMX_TEST_VMEXIT; 622 } 623 624 static int iobmp_init(struct vmcs *vmcs) 625 { 626 u32 ctrl_cpu0; 627 628 io_bitmap_a = alloc_page(); 629 io_bitmap_b = alloc_page(); 630 memset(io_bitmap_a, 0x0, PAGE_SIZE); 631 memset(io_bitmap_b, 0x0, PAGE_SIZE); 632 ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0); 633 ctrl_cpu0 |= CPU_IO_BITMAP; 634 ctrl_cpu0 &= (~CPU_IO); 635 vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0); 636 vmcs_write(IO_BITMAP_A, (u64)io_bitmap_a); 637 vmcs_write(IO_BITMAP_B, (u64)io_bitmap_b); 638 return VMX_TEST_START; 639 } 640 641 static void iobmp_main(void) 642 { 643 // stage 0, test IO pass 644 vmx_set_test_stage(0); 645 inb(0x5000); 646 outb(0x0, 0x5000); 647 report("I/O bitmap - I/O pass", vmx_get_test_stage() == 0); 648 // test IO width, in/out 649 ((u8 *)io_bitmap_a)[0] = 0xFF; 650 vmx_set_test_stage(2); 651 inb(0x0); 652 report("I/O bitmap - trap in", vmx_get_test_stage() == 3); 653 vmx_set_test_stage(3); 654 outw(0x0, 0x0); 655 report("I/O bitmap - trap out", vmx_get_test_stage() == 4); 656 vmx_set_test_stage(4); 657 inl(0x0); 658 report("I/O bitmap - I/O width, long", vmx_get_test_stage() == 5); 659 // test low/high IO port 660 vmx_set_test_stage(5); 661 ((u8 *)io_bitmap_a)[0x5000 / 8] = (1 << (0x5000 % 8)); 662 inb(0x5000); 663 report("I/O bitmap - I/O port, low part", vmx_get_test_stage() == 6); 664 vmx_set_test_stage(6); 665 ((u8 *)io_bitmap_b)[0x1000 / 8] = (1 << (0x1000 % 8)); 666 inb(0x9000); 667 report("I/O bitmap - I/O port, high part", vmx_get_test_stage() == 7); 668 // test partial pass 669 vmx_set_test_stage(7); 670 inl(0x4FFF); 671 report("I/O bitmap - partial pass", vmx_get_test_stage() == 8); 672 // test overrun 673 vmx_set_test_stage(8); 674 memset(io_bitmap_a, 0x0, PAGE_SIZE); 675 memset(io_bitmap_b, 0x0, PAGE_SIZE); 676 inl(0xFFFF); 677 report("I/O bitmap - overrun", vmx_get_test_stage() == 9); 678 vmx_set_test_stage(9); 679 vmcall(); 680 outb(0x0, 0x0); 681 report("I/O bitmap - ignore unconditional exiting", 682 vmx_get_test_stage() == 9); 683 vmx_set_test_stage(10); 684 vmcall(); 685 outb(0x0, 0x0); 686 report("I/O bitmap - unconditional exiting", 687 vmx_get_test_stage() == 11); 688 } 689 690 static int iobmp_exit_handler(void) 691 { 692 u64 guest_rip; 693 ulong reason, exit_qual; 694 u32 insn_len, ctrl_cpu0; 695 696 guest_rip = vmcs_read(GUEST_RIP); 697 reason = vmcs_read(EXI_REASON) & 0xff; 698 exit_qual = vmcs_read(EXI_QUALIFICATION); 699 insn_len = vmcs_read(EXI_INST_LEN); 700 switch (reason) { 701 case VMX_IO: 702 switch (vmx_get_test_stage()) { 703 case 0: 704 case 1: 705 vmx_inc_test_stage(); 706 break; 707 case 2: 708 report("I/O bitmap - I/O width, byte", 709 (exit_qual & VMX_IO_SIZE_MASK) == _VMX_IO_BYTE); 710 report("I/O bitmap - I/O direction, in", exit_qual & VMX_IO_IN); 711 vmx_inc_test_stage(); 712 break; 713 case 3: 714 report("I/O bitmap - I/O width, word", 715 (exit_qual & VMX_IO_SIZE_MASK) == _VMX_IO_WORD); 716 report("I/O bitmap - I/O direction, out", 717 !(exit_qual & VMX_IO_IN)); 718 vmx_inc_test_stage(); 719 break; 720 case 4: 721 report("I/O bitmap - I/O width, long", 722 (exit_qual & VMX_IO_SIZE_MASK) == _VMX_IO_LONG); 723 vmx_inc_test_stage(); 724 break; 725 case 5: 726 if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x5000) 727 vmx_inc_test_stage(); 728 break; 729 case 6: 730 if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x9000) 731 vmx_inc_test_stage(); 732 break; 733 case 7: 734 if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x4FFF) 735 vmx_inc_test_stage(); 736 break; 737 case 8: 738 if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0xFFFF) 739 vmx_inc_test_stage(); 740 break; 741 case 9: 742 case 10: 743 ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0); 744 vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0 & ~CPU_IO); 745 vmx_inc_test_stage(); 746 break; 747 default: 748 // Should not reach here 749 report("unexpected stage, %d", false, 750 vmx_get_test_stage()); 751 print_vmexit_info(); 752 return VMX_TEST_VMEXIT; 753 } 754 vmcs_write(GUEST_RIP, guest_rip + insn_len); 755 return VMX_TEST_RESUME; 756 case VMX_VMCALL: 757 switch (vmx_get_test_stage()) { 758 case 9: 759 ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0); 760 ctrl_cpu0 |= CPU_IO | CPU_IO_BITMAP; 761 vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0); 762 break; 763 case 10: 764 ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0); 765 ctrl_cpu0 = (ctrl_cpu0 & ~CPU_IO_BITMAP) | CPU_IO; 766 vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0); 767 break; 768 default: 769 // Should not reach here 770 report("unexpected stage, %d", false, 771 vmx_get_test_stage()); 772 print_vmexit_info(); 773 return VMX_TEST_VMEXIT; 774 } 775 vmcs_write(GUEST_RIP, guest_rip + insn_len); 776 return VMX_TEST_RESUME; 777 default: 778 printf("guest_rip = %#lx\n", guest_rip); 779 printf("\tERROR : Undefined exit reason, reason = %ld.\n", reason); 780 break; 781 } 782 return VMX_TEST_VMEXIT; 783 } 784 785 #define INSN_CPU0 0 786 #define INSN_CPU1 1 787 #define INSN_ALWAYS_TRAP 2 788 789 #define FIELD_EXIT_QUAL (1 << 0) 790 #define FIELD_INSN_INFO (1 << 1) 791 792 asm( 793 "insn_hlt: hlt;ret\n\t" 794 "insn_invlpg: invlpg 0x12345678;ret\n\t" 795 "insn_mwait: xor %eax, %eax; xor %ecx, %ecx; mwait;ret\n\t" 796 "insn_rdpmc: xor %ecx, %ecx; rdpmc;ret\n\t" 797 "insn_rdtsc: rdtsc;ret\n\t" 798 "insn_cr3_load: mov cr3,%rax; mov %rax,%cr3;ret\n\t" 799 "insn_cr3_store: mov %cr3,%rax;ret\n\t" 800 #ifdef __x86_64__ 801 "insn_cr8_load: xor %eax, %eax; mov %rax,%cr8;ret\n\t" 802 "insn_cr8_store: mov %cr8,%rax;ret\n\t" 803 #endif 804 "insn_monitor: xor %eax, %eax; xor %ecx, %ecx; xor %edx, %edx; monitor;ret\n\t" 805 "insn_pause: pause;ret\n\t" 806 "insn_wbinvd: wbinvd;ret\n\t" 807 "insn_cpuid: mov $10, %eax; cpuid;ret\n\t" 808 "insn_invd: invd;ret\n\t" 809 "insn_sgdt: sgdt gdt64_desc;ret\n\t" 810 "insn_lgdt: lgdt gdt64_desc;ret\n\t" 811 "insn_sidt: sidt idt_descr;ret\n\t" 812 "insn_lidt: lidt idt_descr;ret\n\t" 813 "insn_sldt: sldt %ax;ret\n\t" 814 "insn_lldt: xor %eax, %eax; lldt %ax;ret\n\t" 815 "insn_str: str %ax;ret\n\t" 816 "insn_rdrand: rdrand %rax;ret\n\t" 817 "insn_rdseed: rdseed %rax;ret\n\t" 818 ); 819 extern void insn_hlt(void); 820 extern void insn_invlpg(void); 821 extern void insn_mwait(void); 822 extern void insn_rdpmc(void); 823 extern void insn_rdtsc(void); 824 extern void insn_cr3_load(void); 825 extern void insn_cr3_store(void); 826 #ifdef __x86_64__ 827 extern void insn_cr8_load(void); 828 extern void insn_cr8_store(void); 829 #endif 830 extern void insn_monitor(void); 831 extern void insn_pause(void); 832 extern void insn_wbinvd(void); 833 extern void insn_sgdt(void); 834 extern void insn_lgdt(void); 835 extern void insn_sidt(void); 836 extern void insn_lidt(void); 837 extern void insn_sldt(void); 838 extern void insn_lldt(void); 839 extern void insn_str(void); 840 extern void insn_cpuid(void); 841 extern void insn_invd(void); 842 extern void insn_rdrand(void); 843 extern void insn_rdseed(void); 844 845 u32 cur_insn; 846 u64 cr3; 847 848 struct insn_table { 849 const char *name; 850 u32 flag; 851 void (*insn_func)(void); 852 u32 type; 853 u32 reason; 854 ulong exit_qual; 855 u32 insn_info; 856 // Use FIELD_EXIT_QUAL and FIELD_INSN_INFO to define 857 // which field need to be tested, reason is always tested 858 u32 test_field; 859 }; 860 861 /* 862 * Add more test cases of instruction intercept here. Elements in this 863 * table is: 864 * name/control flag/insn function/type/exit reason/exit qulification/ 865 * instruction info/field to test 866 * The last field defines which fields (exit_qual and insn_info) need to be 867 * tested in exit handler. If set to 0, only "reason" is checked. 868 */ 869 static struct insn_table insn_table[] = { 870 // Flags for Primary Processor-Based VM-Execution Controls 871 {"HLT", CPU_HLT, insn_hlt, INSN_CPU0, 12, 0, 0, 0}, 872 {"INVLPG", CPU_INVLPG, insn_invlpg, INSN_CPU0, 14, 873 0x12345678, 0, FIELD_EXIT_QUAL}, 874 {"MWAIT", CPU_MWAIT, insn_mwait, INSN_CPU0, 36, 0, 0, 0}, 875 {"RDPMC", CPU_RDPMC, insn_rdpmc, INSN_CPU0, 15, 0, 0, 0}, 876 {"RDTSC", CPU_RDTSC, insn_rdtsc, INSN_CPU0, 16, 0, 0, 0}, 877 {"CR3 load", CPU_CR3_LOAD, insn_cr3_load, INSN_CPU0, 28, 0x3, 0, 878 FIELD_EXIT_QUAL}, 879 {"CR3 store", CPU_CR3_STORE, insn_cr3_store, INSN_CPU0, 28, 0x13, 0, 880 FIELD_EXIT_QUAL}, 881 #ifdef __x86_64__ 882 {"CR8 load", CPU_CR8_LOAD, insn_cr8_load, INSN_CPU0, 28, 0x8, 0, 883 FIELD_EXIT_QUAL}, 884 {"CR8 store", CPU_CR8_STORE, insn_cr8_store, INSN_CPU0, 28, 0x18, 0, 885 FIELD_EXIT_QUAL}, 886 #endif 887 {"MONITOR", CPU_MONITOR, insn_monitor, INSN_CPU0, 39, 0, 0, 0}, 888 {"PAUSE", CPU_PAUSE, insn_pause, INSN_CPU0, 40, 0, 0, 0}, 889 // Flags for Secondary Processor-Based VM-Execution Controls 890 {"WBINVD", CPU_WBINVD, insn_wbinvd, INSN_CPU1, 54, 0, 0, 0}, 891 {"DESC_TABLE (SGDT)", CPU_DESC_TABLE, insn_sgdt, INSN_CPU1, 46, 0, 0, 0}, 892 {"DESC_TABLE (LGDT)", CPU_DESC_TABLE, insn_lgdt, INSN_CPU1, 46, 0, 0, 0}, 893 {"DESC_TABLE (SIDT)", CPU_DESC_TABLE, insn_sidt, INSN_CPU1, 46, 0, 0, 0}, 894 {"DESC_TABLE (LIDT)", CPU_DESC_TABLE, insn_lidt, INSN_CPU1, 46, 0, 0, 0}, 895 {"DESC_TABLE (SLDT)", CPU_DESC_TABLE, insn_sldt, INSN_CPU1, 47, 0, 0, 0}, 896 {"DESC_TABLE (LLDT)", CPU_DESC_TABLE, insn_lldt, INSN_CPU1, 47, 0, 0, 0}, 897 {"DESC_TABLE (STR)", CPU_DESC_TABLE, insn_str, INSN_CPU1, 47, 0, 0, 0}, 898 /* LTR causes a #GP if done with a busy selector, so it is not tested. */ 899 {"RDRAND", CPU_RDRAND, insn_rdrand, INSN_CPU1, VMX_RDRAND, 0, 0, 0}, 900 {"RDSEED", CPU_RDSEED, insn_rdseed, INSN_CPU1, VMX_RDSEED, 0, 0, 0}, 901 // Instructions always trap 902 {"CPUID", 0, insn_cpuid, INSN_ALWAYS_TRAP, 10, 0, 0, 0}, 903 {"INVD", 0, insn_invd, INSN_ALWAYS_TRAP, 13, 0, 0, 0}, 904 // Instructions never trap 905 {NULL}, 906 }; 907 908 static int insn_intercept_init(struct vmcs *vmcs) 909 { 910 u32 ctrl_cpu; 911 912 ctrl_cpu = ctrl_cpu_rev[0].set | CPU_SECONDARY; 913 ctrl_cpu &= ctrl_cpu_rev[0].clr; 914 vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu); 915 vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu_rev[1].set); 916 cr3 = read_cr3(); 917 return VMX_TEST_START; 918 } 919 920 static void insn_intercept_main(void) 921 { 922 for (cur_insn = 0; insn_table[cur_insn].name != NULL; cur_insn++) { 923 vmx_set_test_stage(cur_insn * 2); 924 if ((insn_table[cur_insn].type == INSN_CPU0 && 925 !(ctrl_cpu_rev[0].clr & insn_table[cur_insn].flag)) || 926 (insn_table[cur_insn].type == INSN_CPU1 && 927 !(ctrl_cpu_rev[1].clr & insn_table[cur_insn].flag))) { 928 printf("\tCPU_CTRL%d.CPU_%s is not supported.\n", 929 insn_table[cur_insn].type - INSN_CPU0, 930 insn_table[cur_insn].name); 931 continue; 932 } 933 934 if ((insn_table[cur_insn].type == INSN_CPU0 && 935 !(ctrl_cpu_rev[0].set & insn_table[cur_insn].flag)) || 936 (insn_table[cur_insn].type == INSN_CPU1 && 937 !(ctrl_cpu_rev[1].set & insn_table[cur_insn].flag))) { 938 /* skip hlt, it stalls the guest and is tested below */ 939 if (insn_table[cur_insn].insn_func != insn_hlt) 940 insn_table[cur_insn].insn_func(); 941 report("execute %s", vmx_get_test_stage() == cur_insn * 2, 942 insn_table[cur_insn].name); 943 } else if (insn_table[cur_insn].type != INSN_ALWAYS_TRAP) 944 printf("\tCPU_CTRL%d.CPU_%s always traps.\n", 945 insn_table[cur_insn].type - INSN_CPU0, 946 insn_table[cur_insn].name); 947 948 vmcall(); 949 950 insn_table[cur_insn].insn_func(); 951 report("intercept %s", vmx_get_test_stage() == cur_insn * 2 + 1, 952 insn_table[cur_insn].name); 953 954 vmx_set_test_stage(cur_insn * 2 + 1); 955 vmcall(); 956 } 957 } 958 959 static int insn_intercept_exit_handler(void) 960 { 961 u64 guest_rip; 962 u32 reason; 963 ulong exit_qual; 964 u32 insn_len; 965 u32 insn_info; 966 bool pass; 967 968 guest_rip = vmcs_read(GUEST_RIP); 969 reason = vmcs_read(EXI_REASON) & 0xff; 970 exit_qual = vmcs_read(EXI_QUALIFICATION); 971 insn_len = vmcs_read(EXI_INST_LEN); 972 insn_info = vmcs_read(EXI_INST_INFO); 973 974 if (reason == VMX_VMCALL) { 975 u32 val = 0; 976 977 if (insn_table[cur_insn].type == INSN_CPU0) 978 val = vmcs_read(CPU_EXEC_CTRL0); 979 else if (insn_table[cur_insn].type == INSN_CPU1) 980 val = vmcs_read(CPU_EXEC_CTRL1); 981 982 if (vmx_get_test_stage() & 1) 983 val &= ~insn_table[cur_insn].flag; 984 else 985 val |= insn_table[cur_insn].flag; 986 987 if (insn_table[cur_insn].type == INSN_CPU0) 988 vmcs_write(CPU_EXEC_CTRL0, val | ctrl_cpu_rev[0].set); 989 else if (insn_table[cur_insn].type == INSN_CPU1) 990 vmcs_write(CPU_EXEC_CTRL1, val | ctrl_cpu_rev[1].set); 991 } else { 992 pass = (cur_insn * 2 == vmx_get_test_stage()) && 993 insn_table[cur_insn].reason == reason; 994 if (insn_table[cur_insn].test_field & FIELD_EXIT_QUAL && 995 insn_table[cur_insn].exit_qual != exit_qual) 996 pass = false; 997 if (insn_table[cur_insn].test_field & FIELD_INSN_INFO && 998 insn_table[cur_insn].insn_info != insn_info) 999 pass = false; 1000 if (pass) 1001 vmx_inc_test_stage(); 1002 } 1003 vmcs_write(GUEST_RIP, guest_rip + insn_len); 1004 return VMX_TEST_RESUME; 1005 } 1006 1007 1008 /* Enables EPT and sets up the identity map. */ 1009 static int setup_ept(bool enable_ad) 1010 { 1011 unsigned long end_of_memory; 1012 u32 ctrl_cpu[2]; 1013 1014 if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY) || 1015 !(ctrl_cpu_rev[1].clr & CPU_EPT)) { 1016 printf("\tEPT is not supported"); 1017 return 1; 1018 } 1019 1020 1021 if (!(ept_vpid.val & EPT_CAP_UC) && 1022 !(ept_vpid.val & EPT_CAP_WB)) { 1023 printf("\tEPT paging-structure memory type " 1024 "UC&WB are not supported\n"); 1025 return 1; 1026 } 1027 if (ept_vpid.val & EPT_CAP_UC) 1028 eptp = EPT_MEM_TYPE_UC; 1029 else 1030 eptp = EPT_MEM_TYPE_WB; 1031 if (!(ept_vpid.val & EPT_CAP_PWL4)) { 1032 printf("\tPWL4 is not supported\n"); 1033 return 1; 1034 } 1035 ctrl_cpu[0] = vmcs_read(CPU_EXEC_CTRL0); 1036 ctrl_cpu[1] = vmcs_read(CPU_EXEC_CTRL1); 1037 ctrl_cpu[0] = (ctrl_cpu[0] | CPU_SECONDARY) 1038 & ctrl_cpu_rev[0].clr; 1039 ctrl_cpu[1] = (ctrl_cpu[1] | CPU_EPT) 1040 & ctrl_cpu_rev[1].clr; 1041 vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu[0]); 1042 vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu[1]); 1043 eptp |= (3 << EPTP_PG_WALK_LEN_SHIFT); 1044 pml4 = alloc_page(); 1045 memset(pml4, 0, PAGE_SIZE); 1046 eptp |= virt_to_phys(pml4); 1047 if (enable_ad) 1048 eptp |= EPTP_AD_FLAG; 1049 vmcs_write(EPTP, eptp); 1050 end_of_memory = fwcfg_get_u64(FW_CFG_RAM_SIZE); 1051 if (end_of_memory < (1ul << 32)) 1052 end_of_memory = (1ul << 32); 1053 /* Cannot use large EPT pages if we need to track EPT 1054 * accessed/dirty bits at 4K granularity. 1055 */ 1056 setup_ept_range(pml4, 0, end_of_memory, 0, 1057 !enable_ad && ept_2m_supported(), 1058 EPT_WA | EPT_RA | EPT_EA); 1059 return 0; 1060 } 1061 1062 static void ept_enable_ad_bits(void) 1063 { 1064 eptp |= EPTP_AD_FLAG; 1065 vmcs_write(EPTP, eptp); 1066 } 1067 1068 static void ept_disable_ad_bits(void) 1069 { 1070 eptp &= ~EPTP_AD_FLAG; 1071 vmcs_write(EPTP, eptp); 1072 } 1073 1074 static void ept_enable_ad_bits_or_skip_test(void) 1075 { 1076 if (!ept_ad_bits_supported()) 1077 test_skip("EPT AD bits not supported."); 1078 ept_enable_ad_bits(); 1079 } 1080 1081 static int apic_version; 1082 1083 static int ept_init_common(bool have_ad) 1084 { 1085 int ret; 1086 struct pci_dev pcidev; 1087 1088 if (setup_ept(have_ad)) 1089 return VMX_TEST_EXIT; 1090 data_page1 = alloc_page(); 1091 data_page2 = alloc_page(); 1092 memset(data_page1, 0x0, PAGE_SIZE); 1093 memset(data_page2, 0x0, PAGE_SIZE); 1094 *((u32 *)data_page1) = MAGIC_VAL_1; 1095 *((u32 *)data_page2) = MAGIC_VAL_2; 1096 install_ept(pml4, (unsigned long)data_page1, (unsigned long)data_page2, 1097 EPT_RA | EPT_WA | EPT_EA); 1098 1099 apic_version = apic_read(APIC_LVR); 1100 1101 ret = pci_find_dev(PCI_VENDOR_ID_REDHAT, PCI_DEVICE_ID_REDHAT_TEST); 1102 if (ret != PCIDEVADDR_INVALID) { 1103 pci_dev_init(&pcidev, ret); 1104 pci_physaddr = pcidev.resource[PCI_TESTDEV_BAR_MEM]; 1105 } 1106 1107 return VMX_TEST_START; 1108 } 1109 1110 static int ept_init(struct vmcs *vmcs) 1111 { 1112 return ept_init_common(false); 1113 } 1114 1115 static void ept_common(void) 1116 { 1117 vmx_set_test_stage(0); 1118 if (*((u32 *)data_page2) != MAGIC_VAL_1 || 1119 *((u32 *)data_page1) != MAGIC_VAL_1) 1120 report("EPT basic framework - read", 0); 1121 else { 1122 *((u32 *)data_page2) = MAGIC_VAL_3; 1123 vmcall(); 1124 if (vmx_get_test_stage() == 1) { 1125 if (*((u32 *)data_page1) == MAGIC_VAL_3 && 1126 *((u32 *)data_page2) == MAGIC_VAL_2) 1127 report("EPT basic framework", 1); 1128 else 1129 report("EPT basic framework - remap", 1); 1130 } 1131 } 1132 // Test EPT Misconfigurations 1133 vmx_set_test_stage(1); 1134 vmcall(); 1135 *((u32 *)data_page1) = MAGIC_VAL_1; 1136 if (vmx_get_test_stage() != 2) { 1137 report("EPT misconfigurations", 0); 1138 goto t1; 1139 } 1140 vmx_set_test_stage(2); 1141 vmcall(); 1142 *((u32 *)data_page1) = MAGIC_VAL_1; 1143 report("EPT misconfigurations", vmx_get_test_stage() == 3); 1144 t1: 1145 // Test EPT violation 1146 vmx_set_test_stage(3); 1147 vmcall(); 1148 *((u32 *)data_page1) = MAGIC_VAL_1; 1149 report("EPT violation - page permission", vmx_get_test_stage() == 4); 1150 // Violation caused by EPT paging structure 1151 vmx_set_test_stage(4); 1152 vmcall(); 1153 *((u32 *)data_page1) = MAGIC_VAL_2; 1154 report("EPT violation - paging structure", vmx_get_test_stage() == 5); 1155 1156 // MMIO Read/Write 1157 vmx_set_test_stage(5); 1158 vmcall(); 1159 1160 *(u32 volatile *)pci_physaddr; 1161 report("MMIO EPT violation - read", vmx_get_test_stage() == 6); 1162 1163 *(u32 volatile *)pci_physaddr = MAGIC_VAL_1; 1164 report("MMIO EPT violation - write", vmx_get_test_stage() == 7); 1165 } 1166 1167 static void ept_main(void) 1168 { 1169 ept_common(); 1170 1171 // Test EPT access to L1 MMIO 1172 vmx_set_test_stage(7); 1173 report("EPT - MMIO access", *((u32 *)0xfee00030UL) == apic_version); 1174 1175 // Test invalid operand for INVEPT 1176 vmcall(); 1177 report("EPT - unsupported INVEPT", vmx_get_test_stage() == 8); 1178 } 1179 1180 static bool invept_test(int type, u64 eptp) 1181 { 1182 bool ret, supported; 1183 1184 supported = ept_vpid.val & (EPT_CAP_INVEPT_SINGLE >> INVEPT_SINGLE << type); 1185 ret = invept(type, eptp); 1186 1187 if (ret == !supported) 1188 return false; 1189 1190 if (!supported) 1191 printf("WARNING: unsupported invept passed!\n"); 1192 else 1193 printf("WARNING: invept failed!\n"); 1194 1195 return true; 1196 } 1197 1198 static int pml_exit_handler(void) 1199 { 1200 u16 index, count; 1201 ulong reason = vmcs_read(EXI_REASON) & 0xff; 1202 u64 *pmlbuf = pml_log; 1203 u64 guest_rip = vmcs_read(GUEST_RIP);; 1204 u64 guest_cr3 = vmcs_read(GUEST_CR3); 1205 u32 insn_len = vmcs_read(EXI_INST_LEN); 1206 1207 switch (reason) { 1208 case VMX_VMCALL: 1209 switch (vmx_get_test_stage()) { 1210 case 0: 1211 index = vmcs_read(GUEST_PML_INDEX); 1212 for (count = index + 1; count < PML_INDEX; count++) { 1213 if (pmlbuf[count] == (u64)data_page2) { 1214 vmx_inc_test_stage(); 1215 clear_ept_ad(pml4, guest_cr3, (unsigned long)data_page2); 1216 break; 1217 } 1218 } 1219 break; 1220 case 1: 1221 index = vmcs_read(GUEST_PML_INDEX); 1222 /* Keep clearing the dirty bit till a overflow */ 1223 clear_ept_ad(pml4, guest_cr3, (unsigned long)data_page2); 1224 break; 1225 default: 1226 report("unexpected stage, %d.", false, 1227 vmx_get_test_stage()); 1228 print_vmexit_info(); 1229 return VMX_TEST_VMEXIT; 1230 } 1231 vmcs_write(GUEST_RIP, guest_rip + insn_len); 1232 return VMX_TEST_RESUME; 1233 case VMX_PML_FULL: 1234 vmx_inc_test_stage(); 1235 vmcs_write(GUEST_PML_INDEX, PML_INDEX - 1); 1236 return VMX_TEST_RESUME; 1237 default: 1238 report("Unknown exit reason, %ld", false, reason); 1239 print_vmexit_info(); 1240 } 1241 return VMX_TEST_VMEXIT; 1242 } 1243 1244 static int ept_exit_handler_common(bool have_ad) 1245 { 1246 u64 guest_rip; 1247 u64 guest_cr3; 1248 ulong reason; 1249 u32 insn_len; 1250 u32 exit_qual; 1251 static unsigned long data_page1_pte, data_page1_pte_pte, memaddr_pte; 1252 1253 guest_rip = vmcs_read(GUEST_RIP); 1254 guest_cr3 = vmcs_read(GUEST_CR3); 1255 reason = vmcs_read(EXI_REASON) & 0xff; 1256 insn_len = vmcs_read(EXI_INST_LEN); 1257 exit_qual = vmcs_read(EXI_QUALIFICATION); 1258 switch (reason) { 1259 case VMX_VMCALL: 1260 switch (vmx_get_test_stage()) { 1261 case 0: 1262 check_ept_ad(pml4, guest_cr3, 1263 (unsigned long)data_page1, 1264 have_ad ? EPT_ACCESS_FLAG : 0, 1265 have_ad ? EPT_ACCESS_FLAG | EPT_DIRTY_FLAG : 0); 1266 check_ept_ad(pml4, guest_cr3, 1267 (unsigned long)data_page2, 1268 have_ad ? EPT_ACCESS_FLAG | EPT_DIRTY_FLAG : 0, 1269 have_ad ? EPT_ACCESS_FLAG | EPT_DIRTY_FLAG : 0); 1270 clear_ept_ad(pml4, guest_cr3, (unsigned long)data_page1); 1271 clear_ept_ad(pml4, guest_cr3, (unsigned long)data_page2); 1272 if (have_ad) 1273 ept_sync(INVEPT_SINGLE, eptp);; 1274 if (*((u32 *)data_page1) == MAGIC_VAL_3 && 1275 *((u32 *)data_page2) == MAGIC_VAL_2) { 1276 vmx_inc_test_stage(); 1277 install_ept(pml4, (unsigned long)data_page2, 1278 (unsigned long)data_page2, 1279 EPT_RA | EPT_WA | EPT_EA); 1280 } else 1281 report("EPT basic framework - write", 0); 1282 break; 1283 case 1: 1284 install_ept(pml4, (unsigned long)data_page1, 1285 (unsigned long)data_page1, EPT_WA); 1286 ept_sync(INVEPT_SINGLE, eptp); 1287 break; 1288 case 2: 1289 install_ept(pml4, (unsigned long)data_page1, 1290 (unsigned long)data_page1, 1291 EPT_RA | EPT_WA | EPT_EA | 1292 (2 << EPT_MEM_TYPE_SHIFT)); 1293 ept_sync(INVEPT_SINGLE, eptp); 1294 break; 1295 case 3: 1296 clear_ept_ad(pml4, guest_cr3, (unsigned long)data_page1); 1297 TEST_ASSERT(get_ept_pte(pml4, (unsigned long)data_page1, 1298 1, &data_page1_pte)); 1299 set_ept_pte(pml4, (unsigned long)data_page1, 1300 1, data_page1_pte & ~EPT_PRESENT); 1301 ept_sync(INVEPT_SINGLE, eptp); 1302 break; 1303 case 4: 1304 TEST_ASSERT(get_ept_pte(pml4, (unsigned long)data_page1, 1305 2, &data_page1_pte)); 1306 data_page1_pte &= PAGE_MASK; 1307 TEST_ASSERT(get_ept_pte(pml4, data_page1_pte, 1308 2, &data_page1_pte_pte)); 1309 set_ept_pte(pml4, data_page1_pte, 2, 1310 data_page1_pte_pte & ~EPT_PRESENT); 1311 ept_sync(INVEPT_SINGLE, eptp); 1312 break; 1313 case 5: 1314 install_ept(pml4, (unsigned long)pci_physaddr, 1315 (unsigned long)pci_physaddr, 0); 1316 ept_sync(INVEPT_SINGLE, eptp); 1317 break; 1318 case 7: 1319 if (!invept_test(0, eptp)) 1320 vmx_inc_test_stage(); 1321 break; 1322 // Should not reach here 1323 default: 1324 report("ERROR - unexpected stage, %d.", false, 1325 vmx_get_test_stage()); 1326 print_vmexit_info(); 1327 return VMX_TEST_VMEXIT; 1328 } 1329 vmcs_write(GUEST_RIP, guest_rip + insn_len); 1330 return VMX_TEST_RESUME; 1331 case VMX_EPT_MISCONFIG: 1332 switch (vmx_get_test_stage()) { 1333 case 1: 1334 case 2: 1335 vmx_inc_test_stage(); 1336 install_ept(pml4, (unsigned long)data_page1, 1337 (unsigned long)data_page1, 1338 EPT_RA | EPT_WA | EPT_EA); 1339 ept_sync(INVEPT_SINGLE, eptp); 1340 break; 1341 // Should not reach here 1342 default: 1343 report("ERROR - unexpected stage, %d.", false, 1344 vmx_get_test_stage()); 1345 print_vmexit_info(); 1346 return VMX_TEST_VMEXIT; 1347 } 1348 return VMX_TEST_RESUME; 1349 case VMX_EPT_VIOLATION: 1350 switch(vmx_get_test_stage()) { 1351 case 3: 1352 check_ept_ad(pml4, guest_cr3, (unsigned long)data_page1, 0, 1353 have_ad ? EPT_ACCESS_FLAG | EPT_DIRTY_FLAG : 0); 1354 clear_ept_ad(pml4, guest_cr3, (unsigned long)data_page1); 1355 if (exit_qual == (EPT_VLT_WR | EPT_VLT_LADDR_VLD | 1356 EPT_VLT_PADDR)) 1357 vmx_inc_test_stage(); 1358 set_ept_pte(pml4, (unsigned long)data_page1, 1359 1, data_page1_pte | (EPT_PRESENT)); 1360 ept_sync(INVEPT_SINGLE, eptp); 1361 break; 1362 case 4: 1363 check_ept_ad(pml4, guest_cr3, (unsigned long)data_page1, 0, 1364 have_ad ? EPT_ACCESS_FLAG | EPT_DIRTY_FLAG : 0); 1365 clear_ept_ad(pml4, guest_cr3, (unsigned long)data_page1); 1366 if (exit_qual == (EPT_VLT_RD | 1367 (have_ad ? EPT_VLT_WR : 0) | 1368 EPT_VLT_LADDR_VLD)) 1369 vmx_inc_test_stage(); 1370 set_ept_pte(pml4, data_page1_pte, 2, 1371 data_page1_pte_pte | (EPT_PRESENT)); 1372 ept_sync(INVEPT_SINGLE, eptp); 1373 break; 1374 case 5: 1375 if (exit_qual & EPT_VLT_RD) 1376 vmx_inc_test_stage(); 1377 TEST_ASSERT(get_ept_pte(pml4, (unsigned long)pci_physaddr, 1378 1, &memaddr_pte)); 1379 set_ept_pte(pml4, memaddr_pte, 1, memaddr_pte | EPT_RA); 1380 ept_sync(INVEPT_SINGLE, eptp); 1381 break; 1382 case 6: 1383 if (exit_qual & EPT_VLT_WR) 1384 vmx_inc_test_stage(); 1385 TEST_ASSERT(get_ept_pte(pml4, (unsigned long)pci_physaddr, 1386 1, &memaddr_pte)); 1387 set_ept_pte(pml4, memaddr_pte, 1, memaddr_pte | EPT_RA | EPT_WA); 1388 ept_sync(INVEPT_SINGLE, eptp); 1389 break; 1390 default: 1391 // Should not reach here 1392 report("ERROR : unexpected stage, %d", false, 1393 vmx_get_test_stage()); 1394 print_vmexit_info(); 1395 return VMX_TEST_VMEXIT; 1396 } 1397 return VMX_TEST_RESUME; 1398 default: 1399 report("Unknown exit reason, %ld", false, reason); 1400 print_vmexit_info(); 1401 } 1402 return VMX_TEST_VMEXIT; 1403 } 1404 1405 static int ept_exit_handler(void) 1406 { 1407 return ept_exit_handler_common(false); 1408 } 1409 1410 static int eptad_init(struct vmcs *vmcs) 1411 { 1412 int r = ept_init_common(true); 1413 1414 if (r == VMX_TEST_EXIT) 1415 return r; 1416 1417 if ((rdmsr(MSR_IA32_VMX_EPT_VPID_CAP) & EPT_CAP_AD_FLAG) == 0) { 1418 printf("\tEPT A/D bits are not supported"); 1419 return VMX_TEST_EXIT; 1420 } 1421 1422 return r; 1423 } 1424 1425 static int pml_init(struct vmcs *vmcs) 1426 { 1427 u32 ctrl_cpu; 1428 int r = eptad_init(vmcs); 1429 1430 if (r == VMX_TEST_EXIT) 1431 return r; 1432 1433 if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY) || 1434 !(ctrl_cpu_rev[1].clr & CPU_PML)) { 1435 printf("\tPML is not supported"); 1436 return VMX_TEST_EXIT; 1437 } 1438 1439 pml_log = alloc_page(); 1440 memset(pml_log, 0x0, PAGE_SIZE); 1441 vmcs_write(PMLADDR, (u64)pml_log); 1442 vmcs_write(GUEST_PML_INDEX, PML_INDEX - 1); 1443 1444 ctrl_cpu = vmcs_read(CPU_EXEC_CTRL1) | CPU_PML; 1445 vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu); 1446 1447 return VMX_TEST_START; 1448 } 1449 1450 static void pml_main(void) 1451 { 1452 int count = 0; 1453 1454 vmx_set_test_stage(0); 1455 *((u32 *)data_page2) = 0x1; 1456 vmcall(); 1457 report("PML - Dirty GPA Logging", vmx_get_test_stage() == 1); 1458 1459 while (vmx_get_test_stage() == 1) { 1460 vmcall(); 1461 *((u32 *)data_page2) = 0x1; 1462 if (count++ > PML_INDEX) 1463 break; 1464 } 1465 report("PML Full Event", vmx_get_test_stage() == 2); 1466 } 1467 1468 static void eptad_main(void) 1469 { 1470 ept_common(); 1471 } 1472 1473 static int eptad_exit_handler(void) 1474 { 1475 return ept_exit_handler_common(true); 1476 } 1477 1478 static bool invvpid_test(int type, u16 vpid) 1479 { 1480 bool ret, supported; 1481 1482 supported = ept_vpid.val & 1483 (VPID_CAP_INVVPID_ADDR >> INVVPID_ADDR << type); 1484 ret = invvpid(type, vpid, 0); 1485 1486 if (ret == !supported) 1487 return false; 1488 1489 if (!supported) 1490 printf("WARNING: unsupported invvpid passed!\n"); 1491 else 1492 printf("WARNING: invvpid failed!\n"); 1493 1494 return true; 1495 } 1496 1497 static int vpid_init(struct vmcs *vmcs) 1498 { 1499 u32 ctrl_cpu1; 1500 1501 if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY) || 1502 !(ctrl_cpu_rev[1].clr & CPU_VPID)) { 1503 printf("\tVPID is not supported"); 1504 return VMX_TEST_EXIT; 1505 } 1506 1507 ctrl_cpu1 = vmcs_read(CPU_EXEC_CTRL1); 1508 ctrl_cpu1 |= CPU_VPID; 1509 vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu1); 1510 return VMX_TEST_START; 1511 } 1512 1513 static void vpid_main(void) 1514 { 1515 vmx_set_test_stage(0); 1516 vmcall(); 1517 report("INVVPID SINGLE ADDRESS", vmx_get_test_stage() == 1); 1518 vmx_set_test_stage(2); 1519 vmcall(); 1520 report("INVVPID SINGLE", vmx_get_test_stage() == 3); 1521 vmx_set_test_stage(4); 1522 vmcall(); 1523 report("INVVPID ALL", vmx_get_test_stage() == 5); 1524 } 1525 1526 static int vpid_exit_handler(void) 1527 { 1528 u64 guest_rip; 1529 ulong reason; 1530 u32 insn_len; 1531 1532 guest_rip = vmcs_read(GUEST_RIP); 1533 reason = vmcs_read(EXI_REASON) & 0xff; 1534 insn_len = vmcs_read(EXI_INST_LEN); 1535 1536 switch (reason) { 1537 case VMX_VMCALL: 1538 switch(vmx_get_test_stage()) { 1539 case 0: 1540 if (!invvpid_test(INVVPID_ADDR, 1)) 1541 vmx_inc_test_stage(); 1542 break; 1543 case 2: 1544 if (!invvpid_test(INVVPID_CONTEXT_GLOBAL, 1)) 1545 vmx_inc_test_stage(); 1546 break; 1547 case 4: 1548 if (!invvpid_test(INVVPID_ALL, 1)) 1549 vmx_inc_test_stage(); 1550 break; 1551 default: 1552 report("ERROR: unexpected stage, %d", false, 1553 vmx_get_test_stage()); 1554 print_vmexit_info(); 1555 return VMX_TEST_VMEXIT; 1556 } 1557 vmcs_write(GUEST_RIP, guest_rip + insn_len); 1558 return VMX_TEST_RESUME; 1559 default: 1560 report("Unknown exit reason, %ld", false, reason); 1561 print_vmexit_info(); 1562 } 1563 return VMX_TEST_VMEXIT; 1564 } 1565 1566 #define TIMER_VECTOR 222 1567 1568 static volatile bool timer_fired; 1569 1570 static void timer_isr(isr_regs_t *regs) 1571 { 1572 timer_fired = true; 1573 apic_write(APIC_EOI, 0); 1574 } 1575 1576 static int interrupt_init(struct vmcs *vmcs) 1577 { 1578 msr_bmp_init(); 1579 vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) & ~PIN_EXTINT); 1580 handle_irq(TIMER_VECTOR, timer_isr); 1581 return VMX_TEST_START; 1582 } 1583 1584 static void interrupt_main(void) 1585 { 1586 long long start, loops; 1587 1588 vmx_set_test_stage(0); 1589 1590 apic_write(APIC_LVTT, TIMER_VECTOR); 1591 irq_enable(); 1592 1593 apic_write(APIC_TMICT, 1); 1594 for (loops = 0; loops < 10000000 && !timer_fired; loops++) 1595 asm volatile ("nop"); 1596 report("direct interrupt while running guest", timer_fired); 1597 1598 apic_write(APIC_TMICT, 0); 1599 irq_disable(); 1600 vmcall(); 1601 timer_fired = false; 1602 apic_write(APIC_TMICT, 1); 1603 for (loops = 0; loops < 10000000 && !timer_fired; loops++) 1604 asm volatile ("nop"); 1605 report("intercepted interrupt while running guest", timer_fired); 1606 1607 irq_enable(); 1608 apic_write(APIC_TMICT, 0); 1609 irq_disable(); 1610 vmcall(); 1611 timer_fired = false; 1612 start = rdtsc(); 1613 apic_write(APIC_TMICT, 1000000); 1614 1615 asm volatile ("sti; hlt"); 1616 1617 report("direct interrupt + hlt", 1618 rdtsc() - start > 1000000 && timer_fired); 1619 1620 apic_write(APIC_TMICT, 0); 1621 irq_disable(); 1622 vmcall(); 1623 timer_fired = false; 1624 start = rdtsc(); 1625 apic_write(APIC_TMICT, 1000000); 1626 1627 asm volatile ("sti; hlt"); 1628 1629 report("intercepted interrupt + hlt", 1630 rdtsc() - start > 10000 && timer_fired); 1631 1632 apic_write(APIC_TMICT, 0); 1633 irq_disable(); 1634 vmcall(); 1635 timer_fired = false; 1636 start = rdtsc(); 1637 apic_write(APIC_TMICT, 1000000); 1638 1639 irq_enable(); 1640 asm volatile ("nop"); 1641 vmcall(); 1642 1643 report("direct interrupt + activity state hlt", 1644 rdtsc() - start > 10000 && timer_fired); 1645 1646 apic_write(APIC_TMICT, 0); 1647 irq_disable(); 1648 vmcall(); 1649 timer_fired = false; 1650 start = rdtsc(); 1651 apic_write(APIC_TMICT, 1000000); 1652 1653 irq_enable(); 1654 asm volatile ("nop"); 1655 vmcall(); 1656 1657 report("intercepted interrupt + activity state hlt", 1658 rdtsc() - start > 10000 && timer_fired); 1659 1660 apic_write(APIC_TMICT, 0); 1661 irq_disable(); 1662 vmx_set_test_stage(7); 1663 vmcall(); 1664 timer_fired = false; 1665 apic_write(APIC_TMICT, 1); 1666 for (loops = 0; loops < 10000000 && !timer_fired; loops++) 1667 asm volatile ("nop"); 1668 report("running a guest with interrupt acknowledgement set", timer_fired); 1669 1670 apic_write(APIC_TMICT, 0); 1671 irq_enable(); 1672 timer_fired = false; 1673 vmcall(); 1674 report("Inject an event to a halted guest", timer_fired); 1675 } 1676 1677 static int interrupt_exit_handler(void) 1678 { 1679 u64 guest_rip = vmcs_read(GUEST_RIP); 1680 ulong reason = vmcs_read(EXI_REASON) & 0xff; 1681 u32 insn_len = vmcs_read(EXI_INST_LEN); 1682 1683 switch (reason) { 1684 case VMX_VMCALL: 1685 switch (vmx_get_test_stage()) { 1686 case 0: 1687 case 2: 1688 case 5: 1689 vmcs_write(PIN_CONTROLS, 1690 vmcs_read(PIN_CONTROLS) | PIN_EXTINT); 1691 break; 1692 case 7: 1693 vmcs_write(EXI_CONTROLS, vmcs_read(EXI_CONTROLS) | EXI_INTA); 1694 vmcs_write(PIN_CONTROLS, 1695 vmcs_read(PIN_CONTROLS) | PIN_EXTINT); 1696 break; 1697 case 1: 1698 case 3: 1699 vmcs_write(PIN_CONTROLS, 1700 vmcs_read(PIN_CONTROLS) & ~PIN_EXTINT); 1701 break; 1702 case 4: 1703 case 6: 1704 vmcs_write(GUEST_ACTV_STATE, ACTV_HLT); 1705 break; 1706 1707 case 8: 1708 vmcs_write(GUEST_ACTV_STATE, ACTV_HLT); 1709 vmcs_write(ENT_INTR_INFO, 1710 TIMER_VECTOR | 1711 (VMX_INTR_TYPE_EXT_INTR << INTR_INFO_INTR_TYPE_SHIFT) | 1712 INTR_INFO_VALID_MASK); 1713 break; 1714 } 1715 vmx_inc_test_stage(); 1716 vmcs_write(GUEST_RIP, guest_rip + insn_len); 1717 return VMX_TEST_RESUME; 1718 case VMX_EXTINT: 1719 if (vmcs_read(EXI_CONTROLS) & EXI_INTA) { 1720 int vector = vmcs_read(EXI_INTR_INFO) & 0xff; 1721 handle_external_interrupt(vector); 1722 } else { 1723 irq_enable(); 1724 asm volatile ("nop"); 1725 irq_disable(); 1726 } 1727 if (vmx_get_test_stage() >= 2) 1728 vmcs_write(GUEST_ACTV_STATE, ACTV_ACTIVE); 1729 return VMX_TEST_RESUME; 1730 default: 1731 report("Unknown exit reason, %ld", false, reason); 1732 print_vmexit_info(); 1733 } 1734 1735 return VMX_TEST_VMEXIT; 1736 } 1737 1738 static int dbgctls_init(struct vmcs *vmcs) 1739 { 1740 u64 dr7 = 0x402; 1741 u64 zero = 0; 1742 1743 msr_bmp_init(); 1744 asm volatile( 1745 "mov %0,%%dr0\n\t" 1746 "mov %0,%%dr1\n\t" 1747 "mov %0,%%dr2\n\t" 1748 "mov %1,%%dr7\n\t" 1749 : : "r" (zero), "r" (dr7)); 1750 wrmsr(MSR_IA32_DEBUGCTLMSR, 0x1); 1751 vmcs_write(GUEST_DR7, 0x404); 1752 vmcs_write(GUEST_DEBUGCTL, 0x2); 1753 1754 vmcs_write(ENT_CONTROLS, vmcs_read(ENT_CONTROLS) | ENT_LOAD_DBGCTLS); 1755 vmcs_write(EXI_CONTROLS, vmcs_read(EXI_CONTROLS) | EXI_SAVE_DBGCTLS); 1756 1757 return VMX_TEST_START; 1758 } 1759 1760 static void dbgctls_main(void) 1761 { 1762 u64 dr7, debugctl; 1763 1764 asm volatile("mov %%dr7,%0" : "=r" (dr7)); 1765 debugctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 1766 /* Commented out: KVM does not support DEBUGCTL so far */ 1767 (void)debugctl; 1768 report("Load debug controls", dr7 == 0x404 /* && debugctl == 0x2 */); 1769 1770 dr7 = 0x408; 1771 asm volatile("mov %0,%%dr7" : : "r" (dr7)); 1772 wrmsr(MSR_IA32_DEBUGCTLMSR, 0x3); 1773 1774 vmx_set_test_stage(0); 1775 vmcall(); 1776 report("Save debug controls", vmx_get_test_stage() == 1); 1777 1778 if (ctrl_enter_rev.set & ENT_LOAD_DBGCTLS || 1779 ctrl_exit_rev.set & EXI_SAVE_DBGCTLS) { 1780 printf("\tDebug controls are always loaded/saved\n"); 1781 return; 1782 } 1783 vmx_set_test_stage(2); 1784 vmcall(); 1785 1786 asm volatile("mov %%dr7,%0" : "=r" (dr7)); 1787 debugctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 1788 /* Commented out: KVM does not support DEBUGCTL so far */ 1789 (void)debugctl; 1790 report("Guest=host debug controls", dr7 == 0x402 /* && debugctl == 0x1 */); 1791 1792 dr7 = 0x408; 1793 asm volatile("mov %0,%%dr7" : : "r" (dr7)); 1794 wrmsr(MSR_IA32_DEBUGCTLMSR, 0x3); 1795 1796 vmx_set_test_stage(3); 1797 vmcall(); 1798 report("Don't save debug controls", vmx_get_test_stage() == 4); 1799 } 1800 1801 static int dbgctls_exit_handler(void) 1802 { 1803 unsigned int reason = vmcs_read(EXI_REASON) & 0xff; 1804 u32 insn_len = vmcs_read(EXI_INST_LEN); 1805 u64 guest_rip = vmcs_read(GUEST_RIP); 1806 u64 dr7, debugctl; 1807 1808 asm volatile("mov %%dr7,%0" : "=r" (dr7)); 1809 debugctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 1810 1811 switch (reason) { 1812 case VMX_VMCALL: 1813 switch (vmx_get_test_stage()) { 1814 case 0: 1815 if (dr7 == 0x400 && debugctl == 0 && 1816 vmcs_read(GUEST_DR7) == 0x408 /* && 1817 Commented out: KVM does not support DEBUGCTL so far 1818 vmcs_read(GUEST_DEBUGCTL) == 0x3 */) 1819 vmx_inc_test_stage(); 1820 break; 1821 case 2: 1822 dr7 = 0x402; 1823 asm volatile("mov %0,%%dr7" : : "r" (dr7)); 1824 wrmsr(MSR_IA32_DEBUGCTLMSR, 0x1); 1825 vmcs_write(GUEST_DR7, 0x404); 1826 vmcs_write(GUEST_DEBUGCTL, 0x2); 1827 1828 vmcs_write(ENT_CONTROLS, 1829 vmcs_read(ENT_CONTROLS) & ~ENT_LOAD_DBGCTLS); 1830 vmcs_write(EXI_CONTROLS, 1831 vmcs_read(EXI_CONTROLS) & ~EXI_SAVE_DBGCTLS); 1832 break; 1833 case 3: 1834 if (dr7 == 0x400 && debugctl == 0 && 1835 vmcs_read(GUEST_DR7) == 0x404 /* && 1836 Commented out: KVM does not support DEBUGCTL so far 1837 vmcs_read(GUEST_DEBUGCTL) == 0x2 */) 1838 vmx_inc_test_stage(); 1839 break; 1840 } 1841 vmcs_write(GUEST_RIP, guest_rip + insn_len); 1842 return VMX_TEST_RESUME; 1843 default: 1844 report("Unknown exit reason, %d", false, reason); 1845 print_vmexit_info(); 1846 } 1847 return VMX_TEST_VMEXIT; 1848 } 1849 1850 struct vmx_msr_entry { 1851 u32 index; 1852 u32 reserved; 1853 u64 value; 1854 } __attribute__((packed)); 1855 1856 #define MSR_MAGIC 0x31415926 1857 struct vmx_msr_entry *exit_msr_store, *entry_msr_load, *exit_msr_load; 1858 1859 static int msr_switch_init(struct vmcs *vmcs) 1860 { 1861 msr_bmp_init(); 1862 exit_msr_store = alloc_page(); 1863 exit_msr_load = alloc_page(); 1864 entry_msr_load = alloc_page(); 1865 memset(exit_msr_store, 0, PAGE_SIZE); 1866 memset(exit_msr_load, 0, PAGE_SIZE); 1867 memset(entry_msr_load, 0, PAGE_SIZE); 1868 entry_msr_load[0].index = MSR_KERNEL_GS_BASE; 1869 entry_msr_load[0].value = MSR_MAGIC; 1870 1871 vmx_set_test_stage(1); 1872 vmcs_write(ENT_MSR_LD_CNT, 1); 1873 vmcs_write(ENTER_MSR_LD_ADDR, (u64)entry_msr_load); 1874 vmcs_write(EXI_MSR_ST_CNT, 1); 1875 vmcs_write(EXIT_MSR_ST_ADDR, (u64)exit_msr_store); 1876 vmcs_write(EXI_MSR_LD_CNT, 1); 1877 vmcs_write(EXIT_MSR_LD_ADDR, (u64)exit_msr_load); 1878 return VMX_TEST_START; 1879 } 1880 1881 static void msr_switch_main(void) 1882 { 1883 if (vmx_get_test_stage() == 1) { 1884 report("VM entry MSR load", 1885 rdmsr(MSR_KERNEL_GS_BASE) == MSR_MAGIC); 1886 vmx_set_test_stage(2); 1887 wrmsr(MSR_KERNEL_GS_BASE, MSR_MAGIC + 1); 1888 exit_msr_store[0].index = MSR_KERNEL_GS_BASE; 1889 exit_msr_load[0].index = MSR_KERNEL_GS_BASE; 1890 exit_msr_load[0].value = MSR_MAGIC + 2; 1891 } 1892 vmcall(); 1893 } 1894 1895 static int msr_switch_exit_handler(void) 1896 { 1897 ulong reason; 1898 1899 reason = vmcs_read(EXI_REASON); 1900 if (reason == VMX_VMCALL && vmx_get_test_stage() == 2) { 1901 report("VM exit MSR store", 1902 exit_msr_store[0].value == MSR_MAGIC + 1); 1903 report("VM exit MSR load", 1904 rdmsr(MSR_KERNEL_GS_BASE) == MSR_MAGIC + 2); 1905 vmx_set_test_stage(3); 1906 entry_msr_load[0].index = MSR_FS_BASE; 1907 return VMX_TEST_RESUME; 1908 } 1909 printf("ERROR %s: unexpected stage=%u or reason=%lu\n", 1910 __func__, vmx_get_test_stage(), reason); 1911 return VMX_TEST_EXIT; 1912 } 1913 1914 static int msr_switch_entry_failure(struct vmentry_failure *failure) 1915 { 1916 ulong reason; 1917 1918 if (failure->early) { 1919 printf("ERROR %s: early exit\n", __func__); 1920 return VMX_TEST_EXIT; 1921 } 1922 1923 reason = vmcs_read(EXI_REASON); 1924 if (reason == (VMX_ENTRY_FAILURE | VMX_FAIL_MSR) && 1925 vmx_get_test_stage() == 3) { 1926 report("VM entry MSR load: try to load FS_BASE", 1927 vmcs_read(EXI_QUALIFICATION) == 1); 1928 return VMX_TEST_VMEXIT; 1929 } 1930 printf("ERROR %s: unexpected stage=%u or reason=%lu\n", 1931 __func__, vmx_get_test_stage(), reason); 1932 return VMX_TEST_EXIT; 1933 } 1934 1935 static int vmmcall_init(struct vmcs *vmcs) 1936 { 1937 vmcs_write(EXC_BITMAP, 1 << UD_VECTOR); 1938 return VMX_TEST_START; 1939 } 1940 1941 static void vmmcall_main(void) 1942 { 1943 asm volatile( 1944 "mov $0xABCD, %%rax\n\t" 1945 "vmmcall\n\t" 1946 ::: "rax"); 1947 1948 report("VMMCALL", 0); 1949 } 1950 1951 static int vmmcall_exit_handler(void) 1952 { 1953 ulong reason; 1954 1955 reason = vmcs_read(EXI_REASON); 1956 switch (reason) { 1957 case VMX_VMCALL: 1958 printf("here\n"); 1959 report("VMMCALL triggers #UD", 0); 1960 break; 1961 case VMX_EXC_NMI: 1962 report("VMMCALL triggers #UD", 1963 (vmcs_read(EXI_INTR_INFO) & 0xff) == UD_VECTOR); 1964 break; 1965 default: 1966 report("Unknown exit reason, %ld", false, reason); 1967 print_vmexit_info(); 1968 } 1969 1970 return VMX_TEST_VMEXIT; 1971 } 1972 1973 static int disable_rdtscp_init(struct vmcs *vmcs) 1974 { 1975 u32 ctrl_cpu1; 1976 1977 if (ctrl_cpu_rev[0].clr & CPU_SECONDARY) { 1978 ctrl_cpu1 = vmcs_read(CPU_EXEC_CTRL1); 1979 ctrl_cpu1 &= ~CPU_RDTSCP; 1980 vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu1); 1981 } 1982 1983 return VMX_TEST_START; 1984 } 1985 1986 static void disable_rdtscp_ud_handler(struct ex_regs *regs) 1987 { 1988 switch (vmx_get_test_stage()) { 1989 case 0: 1990 report("RDTSCP triggers #UD", true); 1991 vmx_inc_test_stage(); 1992 regs->rip += 3; 1993 break; 1994 case 2: 1995 report("RDPID triggers #UD", true); 1996 vmx_inc_test_stage(); 1997 regs->rip += 4; 1998 break; 1999 } 2000 return; 2001 2002 } 2003 2004 static void disable_rdtscp_main(void) 2005 { 2006 /* Test that #UD is properly injected in L2. */ 2007 handle_exception(UD_VECTOR, disable_rdtscp_ud_handler); 2008 2009 vmx_set_test_stage(0); 2010 asm volatile("rdtscp" : : : "eax", "ecx", "edx"); 2011 vmcall(); 2012 asm volatile(".byte 0xf3, 0x0f, 0xc7, 0xf8" : : : "eax"); 2013 2014 handle_exception(UD_VECTOR, 0); 2015 vmcall(); 2016 } 2017 2018 static int disable_rdtscp_exit_handler(void) 2019 { 2020 unsigned int reason = vmcs_read(EXI_REASON) & 0xff; 2021 2022 switch (reason) { 2023 case VMX_VMCALL: 2024 switch (vmx_get_test_stage()) { 2025 case 0: 2026 report("RDTSCP triggers #UD", false); 2027 vmx_inc_test_stage(); 2028 /* fallthrough */ 2029 case 1: 2030 vmx_inc_test_stage(); 2031 vmcs_write(GUEST_RIP, vmcs_read(GUEST_RIP) + 3); 2032 return VMX_TEST_RESUME; 2033 case 2: 2034 report("RDPID triggers #UD", false); 2035 break; 2036 } 2037 break; 2038 2039 default: 2040 report("Unknown exit reason, %d", false, reason); 2041 print_vmexit_info(); 2042 } 2043 return VMX_TEST_VMEXIT; 2044 } 2045 2046 static int int3_init(struct vmcs *vmcs) 2047 { 2048 vmcs_write(EXC_BITMAP, ~0u); 2049 return VMX_TEST_START; 2050 } 2051 2052 static void int3_guest_main(void) 2053 { 2054 asm volatile ("int3"); 2055 } 2056 2057 static int int3_exit_handler(void) 2058 { 2059 u32 reason = vmcs_read(EXI_REASON); 2060 u32 intr_info = vmcs_read(EXI_INTR_INFO); 2061 2062 report("L1 intercepts #BP", reason == VMX_EXC_NMI && 2063 (intr_info & INTR_INFO_VALID_MASK) && 2064 (intr_info & INTR_INFO_VECTOR_MASK) == BP_VECTOR && 2065 ((intr_info & INTR_INFO_INTR_TYPE_MASK) >> 2066 INTR_INFO_INTR_TYPE_SHIFT) == VMX_INTR_TYPE_SOFT_EXCEPTION); 2067 2068 return VMX_TEST_VMEXIT; 2069 } 2070 2071 static int into_init(struct vmcs *vmcs) 2072 { 2073 vmcs_write(EXC_BITMAP, ~0u); 2074 return VMX_TEST_START; 2075 } 2076 2077 static void into_guest_main(void) 2078 { 2079 struct far_pointer32 fp = { 2080 .offset = (uintptr_t)&&into, 2081 .selector = KERNEL_CS32, 2082 }; 2083 register uintptr_t rsp asm("rsp"); 2084 2085 if (fp.offset != (uintptr_t)&&into) { 2086 printf("Code address too high.\n"); 2087 return; 2088 } 2089 if ((u32)rsp != rsp) { 2090 printf("Stack address too high.\n"); 2091 return; 2092 } 2093 2094 asm goto ("lcall *%0" : : "m" (fp) : "rax" : into); 2095 return; 2096 into: 2097 asm volatile (".code32;" 2098 "movl $0x7fffffff, %eax;" 2099 "addl %eax, %eax;" 2100 "into;" 2101 "lret;" 2102 ".code64"); 2103 __builtin_unreachable(); 2104 } 2105 2106 static int into_exit_handler(void) 2107 { 2108 u32 reason = vmcs_read(EXI_REASON); 2109 u32 intr_info = vmcs_read(EXI_INTR_INFO); 2110 2111 report("L1 intercepts #OF", reason == VMX_EXC_NMI && 2112 (intr_info & INTR_INFO_VALID_MASK) && 2113 (intr_info & INTR_INFO_VECTOR_MASK) == OF_VECTOR && 2114 ((intr_info & INTR_INFO_INTR_TYPE_MASK) >> 2115 INTR_INFO_INTR_TYPE_SHIFT) == VMX_INTR_TYPE_SOFT_EXCEPTION); 2116 2117 return VMX_TEST_VMEXIT; 2118 } 2119 2120 static void exit_monitor_from_l2_main(void) 2121 { 2122 printf("Calling exit(0) from l2...\n"); 2123 exit(0); 2124 } 2125 2126 static int exit_monitor_from_l2_handler(void) 2127 { 2128 report("The guest should have killed the VMM", false); 2129 return VMX_TEST_EXIT; 2130 } 2131 2132 static void assert_exit_reason(u64 expected) 2133 { 2134 u64 actual = vmcs_read(EXI_REASON); 2135 2136 TEST_ASSERT_EQ_MSG(expected, actual, "Expected %s, got %s.", 2137 exit_reason_description(expected), 2138 exit_reason_description(actual)); 2139 } 2140 2141 static void skip_exit_insn(void) 2142 { 2143 u64 guest_rip = vmcs_read(GUEST_RIP); 2144 u32 insn_len = vmcs_read(EXI_INST_LEN); 2145 vmcs_write(GUEST_RIP, guest_rip + insn_len); 2146 } 2147 2148 static void skip_exit_vmcall(void) 2149 { 2150 assert_exit_reason(VMX_VMCALL); 2151 skip_exit_insn(); 2152 } 2153 2154 static void v2_null_test_guest(void) 2155 { 2156 } 2157 2158 static void v2_null_test(void) 2159 { 2160 test_set_guest(v2_null_test_guest); 2161 enter_guest(); 2162 report(__func__, 1); 2163 } 2164 2165 static void v2_multiple_entries_test_guest(void) 2166 { 2167 vmx_set_test_stage(1); 2168 vmcall(); 2169 vmx_set_test_stage(2); 2170 } 2171 2172 static void v2_multiple_entries_test(void) 2173 { 2174 test_set_guest(v2_multiple_entries_test_guest); 2175 enter_guest(); 2176 TEST_ASSERT_EQ(vmx_get_test_stage(), 1); 2177 skip_exit_vmcall(); 2178 enter_guest(); 2179 TEST_ASSERT_EQ(vmx_get_test_stage(), 2); 2180 report(__func__, 1); 2181 } 2182 2183 static int fixture_test_data = 1; 2184 2185 static void fixture_test_teardown(void *data) 2186 { 2187 *((int *) data) = 1; 2188 } 2189 2190 static void fixture_test_guest(void) 2191 { 2192 fixture_test_data++; 2193 } 2194 2195 2196 static void fixture_test_setup(void) 2197 { 2198 TEST_ASSERT_EQ_MSG(1, fixture_test_data, 2199 "fixture_test_teardown didn't run?!"); 2200 fixture_test_data = 2; 2201 test_add_teardown(fixture_test_teardown, &fixture_test_data); 2202 test_set_guest(fixture_test_guest); 2203 } 2204 2205 static void fixture_test_case1(void) 2206 { 2207 fixture_test_setup(); 2208 TEST_ASSERT_EQ(2, fixture_test_data); 2209 enter_guest(); 2210 TEST_ASSERT_EQ(3, fixture_test_data); 2211 report(__func__, 1); 2212 } 2213 2214 static void fixture_test_case2(void) 2215 { 2216 fixture_test_setup(); 2217 TEST_ASSERT_EQ(2, fixture_test_data); 2218 enter_guest(); 2219 TEST_ASSERT_EQ(3, fixture_test_data); 2220 report(__func__, 1); 2221 } 2222 2223 enum ept_access_op { 2224 OP_READ, 2225 OP_WRITE, 2226 OP_EXEC, 2227 OP_FLUSH_TLB, 2228 OP_EXIT, 2229 }; 2230 2231 static struct ept_access_test_data { 2232 unsigned long gpa; 2233 unsigned long *gva; 2234 unsigned long hpa; 2235 unsigned long *hva; 2236 enum ept_access_op op; 2237 } ept_access_test_data; 2238 2239 extern unsigned char ret42_start; 2240 extern unsigned char ret42_end; 2241 2242 /* Returns 42. */ 2243 asm( 2244 ".align 64\n" 2245 "ret42_start:\n" 2246 "mov $42, %eax\n" 2247 "ret\n" 2248 "ret42_end:\n" 2249 ); 2250 2251 static void 2252 diagnose_ept_violation_qual(u64 expected, u64 actual) 2253 { 2254 2255 #define DIAGNOSE(flag) \ 2256 do { \ 2257 if ((expected & flag) != (actual & flag)) \ 2258 printf(#flag " %sexpected\n", \ 2259 (expected & flag) ? "" : "un"); \ 2260 } while (0) 2261 2262 DIAGNOSE(EPT_VLT_RD); 2263 DIAGNOSE(EPT_VLT_WR); 2264 DIAGNOSE(EPT_VLT_FETCH); 2265 DIAGNOSE(EPT_VLT_PERM_RD); 2266 DIAGNOSE(EPT_VLT_PERM_WR); 2267 DIAGNOSE(EPT_VLT_PERM_EX); 2268 DIAGNOSE(EPT_VLT_LADDR_VLD); 2269 DIAGNOSE(EPT_VLT_PADDR); 2270 2271 #undef DIAGNOSE 2272 } 2273 2274 static void do_ept_access_op(enum ept_access_op op) 2275 { 2276 ept_access_test_data.op = op; 2277 enter_guest(); 2278 } 2279 2280 /* 2281 * Force the guest to flush its TLB (i.e., flush gva -> gpa mappings). Only 2282 * needed by tests that modify guest PTEs. 2283 */ 2284 static void ept_access_test_guest_flush_tlb(void) 2285 { 2286 do_ept_access_op(OP_FLUSH_TLB); 2287 skip_exit_vmcall(); 2288 } 2289 2290 /* 2291 * Modifies the EPT entry at @level in the mapping of @gpa. First clears the 2292 * bits in @clear then sets the bits in @set. @mkhuge transforms the entry into 2293 * a huge page. 2294 */ 2295 static unsigned long ept_twiddle(unsigned long gpa, bool mkhuge, int level, 2296 unsigned long clear, unsigned long set) 2297 { 2298 struct ept_access_test_data *data = &ept_access_test_data; 2299 unsigned long orig_pte; 2300 unsigned long pte; 2301 2302 /* Screw with the mapping at the requested level. */ 2303 TEST_ASSERT(get_ept_pte(pml4, gpa, level, &orig_pte)); 2304 pte = orig_pte; 2305 if (mkhuge) 2306 pte = (orig_pte & ~EPT_ADDR_MASK) | data->hpa | EPT_LARGE_PAGE; 2307 else 2308 pte = orig_pte; 2309 pte = (pte & ~clear) | set; 2310 set_ept_pte(pml4, gpa, level, pte); 2311 ept_sync(INVEPT_SINGLE, eptp); 2312 2313 return orig_pte; 2314 } 2315 2316 static void ept_untwiddle(unsigned long gpa, int level, unsigned long orig_pte) 2317 { 2318 set_ept_pte(pml4, gpa, level, orig_pte); 2319 } 2320 2321 static void do_ept_violation(bool leaf, enum ept_access_op op, 2322 u64 expected_qual, u64 expected_paddr) 2323 { 2324 u64 qual; 2325 2326 /* Try the access and observe the violation. */ 2327 do_ept_access_op(op); 2328 2329 assert_exit_reason(VMX_EPT_VIOLATION); 2330 2331 qual = vmcs_read(EXI_QUALIFICATION); 2332 2333 diagnose_ept_violation_qual(expected_qual, qual); 2334 TEST_EXPECT_EQ(expected_qual, qual); 2335 2336 #if 0 2337 /* Disable for now otherwise every test will fail */ 2338 TEST_EXPECT_EQ(vmcs_read(GUEST_LINEAR_ADDRESS), 2339 (unsigned long) ( 2340 op == OP_EXEC ? data->gva + 1 : data->gva)); 2341 #endif 2342 /* 2343 * TODO: tests that probe expected_paddr in pages other than the one at 2344 * the beginning of the 1g region. 2345 */ 2346 TEST_EXPECT_EQ(vmcs_read(INFO_PHYS_ADDR), expected_paddr); 2347 } 2348 2349 static void 2350 ept_violation_at_level_mkhuge(bool mkhuge, int level, unsigned long clear, 2351 unsigned long set, enum ept_access_op op, 2352 u64 expected_qual) 2353 { 2354 struct ept_access_test_data *data = &ept_access_test_data; 2355 unsigned long orig_pte; 2356 2357 orig_pte = ept_twiddle(data->gpa, mkhuge, level, clear, set); 2358 2359 do_ept_violation(level == 1 || mkhuge, op, expected_qual, 2360 op == OP_EXEC ? data->gpa + sizeof(unsigned long) : 2361 data->gpa); 2362 2363 /* Fix the violation and resume the op loop. */ 2364 ept_untwiddle(data->gpa, level, orig_pte); 2365 enter_guest(); 2366 skip_exit_vmcall(); 2367 } 2368 2369 static void 2370 ept_violation_at_level(int level, unsigned long clear, unsigned long set, 2371 enum ept_access_op op, u64 expected_qual) 2372 { 2373 ept_violation_at_level_mkhuge(false, level, clear, set, op, 2374 expected_qual); 2375 if (ept_huge_pages_supported(level)) 2376 ept_violation_at_level_mkhuge(true, level, clear, set, op, 2377 expected_qual); 2378 } 2379 2380 static void ept_violation(unsigned long clear, unsigned long set, 2381 enum ept_access_op op, u64 expected_qual) 2382 { 2383 ept_violation_at_level(1, clear, set, op, expected_qual); 2384 ept_violation_at_level(2, clear, set, op, expected_qual); 2385 ept_violation_at_level(3, clear, set, op, expected_qual); 2386 ept_violation_at_level(4, clear, set, op, expected_qual); 2387 } 2388 2389 static void ept_access_violation(unsigned long access, enum ept_access_op op, 2390 u64 expected_qual) 2391 { 2392 ept_violation(EPT_PRESENT, access, op, 2393 expected_qual | EPT_VLT_LADDR_VLD | EPT_VLT_PADDR); 2394 } 2395 2396 /* 2397 * For translations that don't involve a GVA, that is physical address (paddr) 2398 * accesses, EPT violations don't set the flag EPT_VLT_PADDR. For a typical 2399 * guest memory access, the hardware does GVA -> GPA -> HPA. However, certain 2400 * translations don't involve GVAs, such as when the hardware does the guest 2401 * page table walk. For example, in translating GVA_1 -> GPA_1, the guest MMU 2402 * might try to set an A bit on a guest PTE. If the GPA_2 that the PTE resides 2403 * on isn't present in the EPT, then the EPT violation will be for GPA_2 and 2404 * the EPT_VLT_PADDR bit will be clear in the exit qualification. 2405 * 2406 * Note that paddr violations can also be triggered by loading PAE page tables 2407 * with wonky addresses. We don't test that yet. 2408 * 2409 * This function modifies the EPT entry that maps the GPA that the guest page 2410 * table entry mapping ept_access_data.gva resides on. 2411 * 2412 * @ept_access EPT permissions to set. Other permissions are cleared. 2413 * 2414 * @pte_ad Set the A/D bits on the guest PTE accordingly. 2415 * 2416 * @op Guest operation to perform with ept_access_data.gva. 2417 * 2418 * @expect_violation 2419 * Is a violation expected during the paddr access? 2420 * 2421 * @expected_qual Expected qualification for the EPT violation. 2422 * EPT_VLT_PADDR should be clear. 2423 */ 2424 static void ept_access_paddr(unsigned long ept_access, unsigned long pte_ad, 2425 enum ept_access_op op, bool expect_violation, 2426 u64 expected_qual) 2427 { 2428 struct ept_access_test_data *data = &ept_access_test_data; 2429 unsigned long *ptep; 2430 unsigned long gpa; 2431 unsigned long orig_epte; 2432 2433 /* Modify the guest PTE mapping data->gva according to @pte_ad. */ 2434 ptep = get_pte_level(current_page_table(), data->gva, /*level=*/1); 2435 TEST_ASSERT(ptep); 2436 TEST_ASSERT_EQ(*ptep & PT_ADDR_MASK, data->gpa); 2437 *ptep = (*ptep & ~PT_AD_MASK) | pte_ad; 2438 ept_access_test_guest_flush_tlb(); 2439 2440 /* 2441 * Now modify the access bits on the EPT entry for the GPA that the 2442 * guest PTE resides on. Note that by modifying a single EPT entry, 2443 * we're potentially affecting 512 guest PTEs. However, we've carefully 2444 * constructed our test such that those other 511 PTEs aren't used by 2445 * the guest: data->gva is at the beginning of a 1G huge page, thus the 2446 * PTE we're modifying is at the beginning of a 4K page and the 2447 * following 511 entires are also under our control (and not touched by 2448 * the guest). 2449 */ 2450 gpa = virt_to_phys(ptep); 2451 TEST_ASSERT_EQ(gpa & ~PAGE_MASK, 0); 2452 /* 2453 * Make sure the guest page table page is mapped with a 4K EPT entry, 2454 * otherwise our level=1 twiddling below will fail. We use the 2455 * identity map (gpa = gpa) since page tables are shared with the host. 2456 */ 2457 install_ept(pml4, gpa, gpa, EPT_PRESENT); 2458 orig_epte = ept_twiddle(gpa, /*mkhuge=*/0, /*level=*/1, 2459 /*clear=*/EPT_PRESENT, /*set=*/ept_access); 2460 2461 if (expect_violation) { 2462 do_ept_violation(/*leaf=*/true, op, 2463 expected_qual | EPT_VLT_LADDR_VLD, gpa); 2464 ept_untwiddle(gpa, /*level=*/1, orig_epte); 2465 do_ept_access_op(op); 2466 } else { 2467 do_ept_access_op(op); 2468 ept_untwiddle(gpa, /*level=*/1, orig_epte); 2469 } 2470 2471 TEST_ASSERT(*ptep & PT_ACCESSED_MASK); 2472 if ((pte_ad & PT_DIRTY_MASK) || op == OP_WRITE) 2473 TEST_ASSERT(*ptep & PT_DIRTY_MASK); 2474 2475 skip_exit_vmcall(); 2476 } 2477 2478 static void ept_access_allowed_paddr(unsigned long ept_access, 2479 unsigned long pte_ad, 2480 enum ept_access_op op) 2481 { 2482 ept_access_paddr(ept_access, pte_ad, op, /*expect_violation=*/false, 2483 /*expected_qual=*/-1); 2484 } 2485 2486 static void ept_access_violation_paddr(unsigned long ept_access, 2487 unsigned long pte_ad, 2488 enum ept_access_op op, 2489 u64 expected_qual) 2490 { 2491 ept_access_paddr(ept_access, pte_ad, op, /*expect_violation=*/true, 2492 expected_qual); 2493 } 2494 2495 2496 static void ept_allowed_at_level_mkhuge(bool mkhuge, int level, 2497 unsigned long clear, 2498 unsigned long set, 2499 enum ept_access_op op) 2500 { 2501 struct ept_access_test_data *data = &ept_access_test_data; 2502 unsigned long orig_pte; 2503 2504 orig_pte = ept_twiddle(data->gpa, mkhuge, level, clear, set); 2505 2506 /* No violation. Should proceed to vmcall. */ 2507 do_ept_access_op(op); 2508 skip_exit_vmcall(); 2509 2510 ept_untwiddle(data->gpa, level, orig_pte); 2511 } 2512 2513 static void ept_allowed_at_level(int level, unsigned long clear, 2514 unsigned long set, enum ept_access_op op) 2515 { 2516 ept_allowed_at_level_mkhuge(false, level, clear, set, op); 2517 if (ept_huge_pages_supported(level)) 2518 ept_allowed_at_level_mkhuge(true, level, clear, set, op); 2519 } 2520 2521 static void ept_allowed(unsigned long clear, unsigned long set, 2522 enum ept_access_op op) 2523 { 2524 ept_allowed_at_level(1, clear, set, op); 2525 ept_allowed_at_level(2, clear, set, op); 2526 ept_allowed_at_level(3, clear, set, op); 2527 ept_allowed_at_level(4, clear, set, op); 2528 } 2529 2530 static void ept_ignored_bit(int bit) 2531 { 2532 /* Set the bit. */ 2533 ept_allowed(0, 1ul << bit, OP_READ); 2534 ept_allowed(0, 1ul << bit, OP_WRITE); 2535 ept_allowed(0, 1ul << bit, OP_EXEC); 2536 2537 /* Clear the bit. */ 2538 ept_allowed(1ul << bit, 0, OP_READ); 2539 ept_allowed(1ul << bit, 0, OP_WRITE); 2540 ept_allowed(1ul << bit, 0, OP_EXEC); 2541 } 2542 2543 static void ept_access_allowed(unsigned long access, enum ept_access_op op) 2544 { 2545 ept_allowed(EPT_PRESENT, access, op); 2546 } 2547 2548 2549 static void ept_misconfig_at_level_mkhuge_op(bool mkhuge, int level, 2550 unsigned long clear, 2551 unsigned long set, 2552 enum ept_access_op op) 2553 { 2554 struct ept_access_test_data *data = &ept_access_test_data; 2555 unsigned long orig_pte; 2556 2557 orig_pte = ept_twiddle(data->gpa, mkhuge, level, clear, set); 2558 2559 do_ept_access_op(op); 2560 assert_exit_reason(VMX_EPT_MISCONFIG); 2561 2562 /* Intel 27.2.1, "For all other VM exits, this field is cleared." */ 2563 #if 0 2564 /* broken: */ 2565 TEST_EXPECT_EQ_MSG(vmcs_read(EXI_QUALIFICATION), 0); 2566 #endif 2567 #if 0 2568 /* 2569 * broken: 2570 * According to description of exit qual for EPT violation, 2571 * EPT_VLT_LADDR_VLD indicates if GUEST_LINEAR_ADDRESS is valid. 2572 * However, I can't find anything that says GUEST_LINEAR_ADDRESS ought 2573 * to be set for msiconfig. 2574 */ 2575 TEST_EXPECT_EQ(vmcs_read(GUEST_LINEAR_ADDRESS), 2576 (unsigned long) ( 2577 op == OP_EXEC ? data->gva + 1 : data->gva)); 2578 #endif 2579 2580 /* Fix the violation and resume the op loop. */ 2581 ept_untwiddle(data->gpa, level, orig_pte); 2582 enter_guest(); 2583 skip_exit_vmcall(); 2584 } 2585 2586 static void ept_misconfig_at_level_mkhuge(bool mkhuge, int level, 2587 unsigned long clear, 2588 unsigned long set) 2589 { 2590 /* The op shouldn't matter (read, write, exec), so try them all! */ 2591 ept_misconfig_at_level_mkhuge_op(mkhuge, level, clear, set, OP_READ); 2592 ept_misconfig_at_level_mkhuge_op(mkhuge, level, clear, set, OP_WRITE); 2593 ept_misconfig_at_level_mkhuge_op(mkhuge, level, clear, set, OP_EXEC); 2594 } 2595 2596 static void ept_misconfig_at_level(int level, unsigned long clear, 2597 unsigned long set) 2598 { 2599 ept_misconfig_at_level_mkhuge(false, level, clear, set); 2600 if (ept_huge_pages_supported(level)) 2601 ept_misconfig_at_level_mkhuge(true, level, clear, set); 2602 } 2603 2604 static void ept_misconfig(unsigned long clear, unsigned long set) 2605 { 2606 ept_misconfig_at_level(1, clear, set); 2607 ept_misconfig_at_level(2, clear, set); 2608 ept_misconfig_at_level(3, clear, set); 2609 ept_misconfig_at_level(4, clear, set); 2610 } 2611 2612 static void ept_access_misconfig(unsigned long access) 2613 { 2614 ept_misconfig(EPT_PRESENT, access); 2615 } 2616 2617 static void ept_reserved_bit_at_level_nohuge(int level, int bit) 2618 { 2619 /* Setting the bit causes a misconfig. */ 2620 ept_misconfig_at_level_mkhuge(false, level, 0, 1ul << bit); 2621 2622 /* Making the entry non-present turns reserved bits into ignored. */ 2623 ept_violation_at_level(level, EPT_PRESENT, 1ul << bit, OP_READ, 2624 EPT_VLT_RD | EPT_VLT_LADDR_VLD | EPT_VLT_PADDR); 2625 } 2626 2627 static void ept_reserved_bit_at_level_huge(int level, int bit) 2628 { 2629 /* Setting the bit causes a misconfig. */ 2630 ept_misconfig_at_level_mkhuge(true, level, 0, 1ul << bit); 2631 2632 /* Making the entry non-present turns reserved bits into ignored. */ 2633 ept_violation_at_level(level, EPT_PRESENT, 1ul << bit, OP_READ, 2634 EPT_VLT_RD | EPT_VLT_LADDR_VLD | EPT_VLT_PADDR); 2635 } 2636 2637 static void ept_reserved_bit_at_level(int level, int bit) 2638 { 2639 /* Setting the bit causes a misconfig. */ 2640 ept_misconfig_at_level(level, 0, 1ul << bit); 2641 2642 /* Making the entry non-present turns reserved bits into ignored. */ 2643 ept_violation_at_level(level, EPT_PRESENT, 1ul << bit, OP_READ, 2644 EPT_VLT_RD | EPT_VLT_LADDR_VLD | EPT_VLT_PADDR); 2645 } 2646 2647 static void ept_reserved_bit(int bit) 2648 { 2649 ept_reserved_bit_at_level(1, bit); 2650 ept_reserved_bit_at_level(2, bit); 2651 ept_reserved_bit_at_level(3, bit); 2652 ept_reserved_bit_at_level(4, bit); 2653 } 2654 2655 #define PAGE_2M_ORDER 9 2656 #define PAGE_1G_ORDER 18 2657 2658 static void *get_1g_page(void) 2659 { 2660 static void *alloc; 2661 2662 if (!alloc) 2663 alloc = alloc_pages(PAGE_1G_ORDER); 2664 return alloc; 2665 } 2666 2667 static void ept_access_test_teardown(void *unused) 2668 { 2669 /* Exit the guest cleanly. */ 2670 do_ept_access_op(OP_EXIT); 2671 } 2672 2673 static void ept_access_test_guest(void) 2674 { 2675 struct ept_access_test_data *data = &ept_access_test_data; 2676 int (*code)(void) = (int (*)(void)) &data->gva[1]; 2677 2678 while (true) { 2679 switch (data->op) { 2680 case OP_READ: 2681 TEST_ASSERT_EQ(*data->gva, MAGIC_VAL_1); 2682 break; 2683 case OP_WRITE: 2684 *data->gva = MAGIC_VAL_2; 2685 TEST_ASSERT_EQ(*data->gva, MAGIC_VAL_2); 2686 *data->gva = MAGIC_VAL_1; 2687 break; 2688 case OP_EXEC: 2689 TEST_ASSERT_EQ(42, code()); 2690 break; 2691 case OP_FLUSH_TLB: 2692 write_cr3(read_cr3()); 2693 break; 2694 case OP_EXIT: 2695 return; 2696 default: 2697 TEST_ASSERT_MSG(false, "Unknown op %d", data->op); 2698 } 2699 vmcall(); 2700 } 2701 } 2702 2703 static void ept_access_test_setup(void) 2704 { 2705 struct ept_access_test_data *data = &ept_access_test_data; 2706 unsigned long npages = 1ul << PAGE_1G_ORDER; 2707 unsigned long size = npages * PAGE_SIZE; 2708 unsigned long *page_table = current_page_table(); 2709 unsigned long pte; 2710 2711 if (setup_ept(false)) 2712 test_skip("EPT not supported"); 2713 2714 /* We use data->gpa = 1 << 39 so that test data has a separate pml4 entry */ 2715 if (cpuid_maxphyaddr() < 40) 2716 test_skip("Test needs MAXPHYADDR >= 40"); 2717 2718 test_set_guest(ept_access_test_guest); 2719 test_add_teardown(ept_access_test_teardown, NULL); 2720 2721 data->hva = get_1g_page(); 2722 TEST_ASSERT(data->hva); 2723 data->hpa = virt_to_phys(data->hva); 2724 2725 data->gpa = 1ul << 39; 2726 data->gva = (void *) ALIGN((unsigned long) alloc_vpages(npages * 2), 2727 size); 2728 TEST_ASSERT(!any_present_pages(page_table, data->gva, size)); 2729 install_pages(page_table, data->gpa, size, data->gva); 2730 2731 /* 2732 * Make sure nothing's mapped here so the tests that screw with the 2733 * pml4 entry don't inadvertently break something. 2734 */ 2735 TEST_ASSERT(get_ept_pte(pml4, data->gpa, 4, &pte) && pte == 0); 2736 TEST_ASSERT(get_ept_pte(pml4, data->gpa + size - 1, 4, &pte) && pte == 0); 2737 install_ept(pml4, data->hpa, data->gpa, EPT_PRESENT); 2738 2739 data->hva[0] = MAGIC_VAL_1; 2740 memcpy(&data->hva[1], &ret42_start, &ret42_end - &ret42_start); 2741 } 2742 2743 static void ept_access_test_not_present(void) 2744 { 2745 ept_access_test_setup(); 2746 /* --- */ 2747 ept_access_violation(0, OP_READ, EPT_VLT_RD); 2748 ept_access_violation(0, OP_WRITE, EPT_VLT_WR); 2749 ept_access_violation(0, OP_EXEC, EPT_VLT_FETCH); 2750 } 2751 2752 static void ept_access_test_read_only(void) 2753 { 2754 ept_access_test_setup(); 2755 2756 /* r-- */ 2757 ept_access_allowed(EPT_RA, OP_READ); 2758 ept_access_violation(EPT_RA, OP_WRITE, EPT_VLT_WR | EPT_VLT_PERM_RD); 2759 ept_access_violation(EPT_RA, OP_EXEC, EPT_VLT_FETCH | EPT_VLT_PERM_RD); 2760 } 2761 2762 static void ept_access_test_write_only(void) 2763 { 2764 ept_access_test_setup(); 2765 /* -w- */ 2766 ept_access_misconfig(EPT_WA); 2767 } 2768 2769 static void ept_access_test_read_write(void) 2770 { 2771 ept_access_test_setup(); 2772 /* rw- */ 2773 ept_access_allowed(EPT_RA | EPT_WA, OP_READ); 2774 ept_access_allowed(EPT_RA | EPT_WA, OP_WRITE); 2775 ept_access_violation(EPT_RA | EPT_WA, OP_EXEC, 2776 EPT_VLT_FETCH | EPT_VLT_PERM_RD | EPT_VLT_PERM_WR); 2777 } 2778 2779 2780 static void ept_access_test_execute_only(void) 2781 { 2782 ept_access_test_setup(); 2783 /* --x */ 2784 if (ept_execute_only_supported()) { 2785 ept_access_violation(EPT_EA, OP_READ, 2786 EPT_VLT_RD | EPT_VLT_PERM_EX); 2787 ept_access_violation(EPT_EA, OP_WRITE, 2788 EPT_VLT_WR | EPT_VLT_PERM_EX); 2789 ept_access_allowed(EPT_EA, OP_EXEC); 2790 } else { 2791 ept_access_misconfig(EPT_EA); 2792 } 2793 } 2794 2795 static void ept_access_test_read_execute(void) 2796 { 2797 ept_access_test_setup(); 2798 /* r-x */ 2799 ept_access_allowed(EPT_RA | EPT_EA, OP_READ); 2800 ept_access_violation(EPT_RA | EPT_EA, OP_WRITE, 2801 EPT_VLT_WR | EPT_VLT_PERM_RD | EPT_VLT_PERM_EX); 2802 ept_access_allowed(EPT_RA | EPT_EA, OP_EXEC); 2803 } 2804 2805 static void ept_access_test_write_execute(void) 2806 { 2807 ept_access_test_setup(); 2808 /* -wx */ 2809 ept_access_misconfig(EPT_WA | EPT_EA); 2810 } 2811 2812 static void ept_access_test_read_write_execute(void) 2813 { 2814 ept_access_test_setup(); 2815 /* rwx */ 2816 ept_access_allowed(EPT_RA | EPT_WA | EPT_EA, OP_READ); 2817 ept_access_allowed(EPT_RA | EPT_WA | EPT_EA, OP_WRITE); 2818 ept_access_allowed(EPT_RA | EPT_WA | EPT_EA, OP_EXEC); 2819 } 2820 2821 static void ept_access_test_reserved_bits(void) 2822 { 2823 int i; 2824 int maxphyaddr; 2825 2826 ept_access_test_setup(); 2827 2828 /* Reserved bits above maxphyaddr. */ 2829 maxphyaddr = cpuid_maxphyaddr(); 2830 for (i = maxphyaddr; i <= 51; i++) { 2831 report_prefix_pushf("reserved_bit=%d", i); 2832 ept_reserved_bit(i); 2833 report_prefix_pop(); 2834 } 2835 2836 /* Level-specific reserved bits. */ 2837 ept_reserved_bit_at_level_nohuge(2, 3); 2838 ept_reserved_bit_at_level_nohuge(2, 4); 2839 ept_reserved_bit_at_level_nohuge(2, 5); 2840 ept_reserved_bit_at_level_nohuge(2, 6); 2841 /* 2M alignment. */ 2842 for (i = 12; i < 20; i++) { 2843 report_prefix_pushf("reserved_bit=%d", i); 2844 ept_reserved_bit_at_level_huge(2, i); 2845 report_prefix_pop(); 2846 } 2847 ept_reserved_bit_at_level_nohuge(3, 3); 2848 ept_reserved_bit_at_level_nohuge(3, 4); 2849 ept_reserved_bit_at_level_nohuge(3, 5); 2850 ept_reserved_bit_at_level_nohuge(3, 6); 2851 /* 1G alignment. */ 2852 for (i = 12; i < 29; i++) { 2853 report_prefix_pushf("reserved_bit=%d", i); 2854 ept_reserved_bit_at_level_huge(3, i); 2855 report_prefix_pop(); 2856 } 2857 ept_reserved_bit_at_level(4, 3); 2858 ept_reserved_bit_at_level(4, 4); 2859 ept_reserved_bit_at_level(4, 5); 2860 ept_reserved_bit_at_level(4, 6); 2861 ept_reserved_bit_at_level(4, 7); 2862 } 2863 2864 static void ept_access_test_ignored_bits(void) 2865 { 2866 ept_access_test_setup(); 2867 /* 2868 * Bits ignored at every level. Bits 8 and 9 (A and D) are ignored as 2869 * far as translation is concerned even if AD bits are enabled in the 2870 * EPTP. Bit 63 is ignored because "EPT-violation #VE" VM-execution 2871 * control is 0. 2872 */ 2873 ept_ignored_bit(8); 2874 ept_ignored_bit(9); 2875 ept_ignored_bit(10); 2876 ept_ignored_bit(11); 2877 ept_ignored_bit(52); 2878 ept_ignored_bit(53); 2879 ept_ignored_bit(54); 2880 ept_ignored_bit(55); 2881 ept_ignored_bit(56); 2882 ept_ignored_bit(57); 2883 ept_ignored_bit(58); 2884 ept_ignored_bit(59); 2885 ept_ignored_bit(60); 2886 ept_ignored_bit(61); 2887 ept_ignored_bit(62); 2888 ept_ignored_bit(63); 2889 } 2890 2891 static void ept_access_test_paddr_not_present_ad_disabled(void) 2892 { 2893 ept_access_test_setup(); 2894 ept_disable_ad_bits(); 2895 2896 ept_access_violation_paddr(0, PT_AD_MASK, OP_READ, EPT_VLT_RD); 2897 ept_access_violation_paddr(0, PT_AD_MASK, OP_WRITE, EPT_VLT_RD); 2898 ept_access_violation_paddr(0, PT_AD_MASK, OP_EXEC, EPT_VLT_RD); 2899 } 2900 2901 static void ept_access_test_paddr_not_present_ad_enabled(void) 2902 { 2903 u64 qual = EPT_VLT_RD | EPT_VLT_WR; 2904 2905 ept_access_test_setup(); 2906 ept_enable_ad_bits_or_skip_test(); 2907 2908 ept_access_violation_paddr(0, PT_AD_MASK, OP_READ, qual); 2909 ept_access_violation_paddr(0, PT_AD_MASK, OP_WRITE, qual); 2910 ept_access_violation_paddr(0, PT_AD_MASK, OP_EXEC, qual); 2911 } 2912 2913 static void ept_access_test_paddr_read_only_ad_disabled(void) 2914 { 2915 /* 2916 * When EPT AD bits are disabled, all accesses to guest paging 2917 * structures are reported separately as a read and (after 2918 * translation of the GPA to host physical address) a read+write 2919 * if the A/D bits have to be set. 2920 */ 2921 u64 qual = EPT_VLT_WR | EPT_VLT_RD | EPT_VLT_PERM_RD; 2922 2923 ept_access_test_setup(); 2924 ept_disable_ad_bits(); 2925 2926 /* Can't update A bit, so all accesses fail. */ 2927 ept_access_violation_paddr(EPT_RA, 0, OP_READ, qual); 2928 ept_access_violation_paddr(EPT_RA, 0, OP_WRITE, qual); 2929 ept_access_violation_paddr(EPT_RA, 0, OP_EXEC, qual); 2930 /* AD bits disabled, so only writes try to update the D bit. */ 2931 ept_access_allowed_paddr(EPT_RA, PT_ACCESSED_MASK, OP_READ); 2932 ept_access_violation_paddr(EPT_RA, PT_ACCESSED_MASK, OP_WRITE, qual); 2933 ept_access_allowed_paddr(EPT_RA, PT_ACCESSED_MASK, OP_EXEC); 2934 /* Both A and D already set, so read-only is OK. */ 2935 ept_access_allowed_paddr(EPT_RA, PT_AD_MASK, OP_READ); 2936 ept_access_allowed_paddr(EPT_RA, PT_AD_MASK, OP_WRITE); 2937 ept_access_allowed_paddr(EPT_RA, PT_AD_MASK, OP_EXEC); 2938 } 2939 2940 static void ept_access_test_paddr_read_only_ad_enabled(void) 2941 { 2942 /* 2943 * When EPT AD bits are enabled, all accesses to guest paging 2944 * structures are considered writes as far as EPT translation 2945 * is concerned. 2946 */ 2947 u64 qual = EPT_VLT_WR | EPT_VLT_RD | EPT_VLT_PERM_RD; 2948 2949 ept_access_test_setup(); 2950 ept_enable_ad_bits_or_skip_test(); 2951 2952 ept_access_violation_paddr(EPT_RA, 0, OP_READ, qual); 2953 ept_access_violation_paddr(EPT_RA, 0, OP_WRITE, qual); 2954 ept_access_violation_paddr(EPT_RA, 0, OP_EXEC, qual); 2955 ept_access_violation_paddr(EPT_RA, PT_ACCESSED_MASK, OP_READ, qual); 2956 ept_access_violation_paddr(EPT_RA, PT_ACCESSED_MASK, OP_WRITE, qual); 2957 ept_access_violation_paddr(EPT_RA, PT_ACCESSED_MASK, OP_EXEC, qual); 2958 ept_access_violation_paddr(EPT_RA, PT_AD_MASK, OP_READ, qual); 2959 ept_access_violation_paddr(EPT_RA, PT_AD_MASK, OP_WRITE, qual); 2960 ept_access_violation_paddr(EPT_RA, PT_AD_MASK, OP_EXEC, qual); 2961 } 2962 2963 static void ept_access_test_paddr_read_write(void) 2964 { 2965 ept_access_test_setup(); 2966 /* Read-write access to paging structure. */ 2967 ept_access_allowed_paddr(EPT_RA | EPT_WA, 0, OP_READ); 2968 ept_access_allowed_paddr(EPT_RA | EPT_WA, 0, OP_WRITE); 2969 ept_access_allowed_paddr(EPT_RA | EPT_WA, 0, OP_EXEC); 2970 } 2971 2972 static void ept_access_test_paddr_read_write_execute(void) 2973 { 2974 ept_access_test_setup(); 2975 /* RWX access to paging structure. */ 2976 ept_access_allowed_paddr(EPT_PRESENT, 0, OP_READ); 2977 ept_access_allowed_paddr(EPT_PRESENT, 0, OP_WRITE); 2978 ept_access_allowed_paddr(EPT_PRESENT, 0, OP_EXEC); 2979 } 2980 2981 static void ept_access_test_paddr_read_execute_ad_disabled(void) 2982 { 2983 /* 2984 * When EPT AD bits are disabled, all accesses to guest paging 2985 * structures are reported separately as a read and (after 2986 * translation of the GPA to host physical address) a read+write 2987 * if the A/D bits have to be set. 2988 */ 2989 u64 qual = EPT_VLT_WR | EPT_VLT_RD | EPT_VLT_PERM_RD | EPT_VLT_PERM_EX; 2990 2991 ept_access_test_setup(); 2992 ept_disable_ad_bits(); 2993 2994 /* Can't update A bit, so all accesses fail. */ 2995 ept_access_violation_paddr(EPT_RA | EPT_EA, 0, OP_READ, qual); 2996 ept_access_violation_paddr(EPT_RA | EPT_EA, 0, OP_WRITE, qual); 2997 ept_access_violation_paddr(EPT_RA | EPT_EA, 0, OP_EXEC, qual); 2998 /* AD bits disabled, so only writes try to update the D bit. */ 2999 ept_access_allowed_paddr(EPT_RA | EPT_EA, PT_ACCESSED_MASK, OP_READ); 3000 ept_access_violation_paddr(EPT_RA | EPT_EA, PT_ACCESSED_MASK, OP_WRITE, qual); 3001 ept_access_allowed_paddr(EPT_RA | EPT_EA, PT_ACCESSED_MASK, OP_EXEC); 3002 /* Both A and D already set, so read-only is OK. */ 3003 ept_access_allowed_paddr(EPT_RA | EPT_EA, PT_AD_MASK, OP_READ); 3004 ept_access_allowed_paddr(EPT_RA | EPT_EA, PT_AD_MASK, OP_WRITE); 3005 ept_access_allowed_paddr(EPT_RA | EPT_EA, PT_AD_MASK, OP_EXEC); 3006 } 3007 3008 static void ept_access_test_paddr_read_execute_ad_enabled(void) 3009 { 3010 /* 3011 * When EPT AD bits are enabled, all accesses to guest paging 3012 * structures are considered writes as far as EPT translation 3013 * is concerned. 3014 */ 3015 u64 qual = EPT_VLT_WR | EPT_VLT_RD | EPT_VLT_PERM_RD | EPT_VLT_PERM_EX; 3016 3017 ept_access_test_setup(); 3018 ept_enable_ad_bits_or_skip_test(); 3019 3020 ept_access_violation_paddr(EPT_RA | EPT_EA, 0, OP_READ, qual); 3021 ept_access_violation_paddr(EPT_RA | EPT_EA, 0, OP_WRITE, qual); 3022 ept_access_violation_paddr(EPT_RA | EPT_EA, 0, OP_EXEC, qual); 3023 ept_access_violation_paddr(EPT_RA | EPT_EA, PT_ACCESSED_MASK, OP_READ, qual); 3024 ept_access_violation_paddr(EPT_RA | EPT_EA, PT_ACCESSED_MASK, OP_WRITE, qual); 3025 ept_access_violation_paddr(EPT_RA | EPT_EA, PT_ACCESSED_MASK, OP_EXEC, qual); 3026 ept_access_violation_paddr(EPT_RA | EPT_EA, PT_AD_MASK, OP_READ, qual); 3027 ept_access_violation_paddr(EPT_RA | EPT_EA, PT_AD_MASK, OP_WRITE, qual); 3028 ept_access_violation_paddr(EPT_RA | EPT_EA, PT_AD_MASK, OP_EXEC, qual); 3029 } 3030 3031 static void ept_access_test_paddr_not_present_page_fault(void) 3032 { 3033 ept_access_test_setup(); 3034 /* 3035 * TODO: test no EPT violation as long as guest PF occurs. e.g., GPA is 3036 * page is read-only in EPT but GVA is also mapped read only in PT. 3037 * Thus guest page fault before host takes EPT violation for trying to 3038 * update A bit. 3039 */ 3040 } 3041 3042 static void ept_access_test_force_2m_page(void) 3043 { 3044 ept_access_test_setup(); 3045 3046 TEST_ASSERT_EQ(ept_2m_supported(), true); 3047 ept_allowed_at_level_mkhuge(true, 2, 0, 0, OP_READ); 3048 ept_violation_at_level_mkhuge(true, 2, EPT_PRESENT, EPT_RA, OP_WRITE, 3049 EPT_VLT_WR | EPT_VLT_PERM_RD | 3050 EPT_VLT_LADDR_VLD | EPT_VLT_PADDR); 3051 ept_misconfig_at_level_mkhuge(true, 2, EPT_PRESENT, EPT_WA); 3052 } 3053 3054 static bool invvpid_valid(u64 type, u64 vpid, u64 gla) 3055 { 3056 u64 msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP); 3057 3058 TEST_ASSERT(msr & VPID_CAP_INVVPID); 3059 3060 if (type < INVVPID_ADDR || type > INVVPID_CONTEXT_LOCAL) 3061 return false; 3062 3063 if (!(msr & (1ull << (type + VPID_CAP_INVVPID_TYPES_SHIFT)))) 3064 return false; 3065 3066 if (vpid >> 16) 3067 return false; 3068 3069 if (type != INVVPID_ALL && !vpid) 3070 return false; 3071 3072 if (type == INVVPID_ADDR && !is_canonical(gla)) 3073 return false; 3074 3075 return true; 3076 } 3077 3078 static void try_invvpid(u64 type, u64 vpid, u64 gla) 3079 { 3080 int rc; 3081 bool valid = invvpid_valid(type, vpid, gla); 3082 u64 expected = valid ? VMXERR_UNSUPPORTED_VMCS_COMPONENT 3083 : VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID; 3084 /* 3085 * Set VMX_INST_ERROR to VMXERR_UNVALID_VMCS_COMPONENT, so 3086 * that we can tell if it is updated by INVVPID. 3087 */ 3088 vmcs_read(~0); 3089 rc = invvpid(type, vpid, gla); 3090 report("INVVPID type %ld VPID %lx GLA %lx %s", 3091 !rc == valid, type, vpid, gla, 3092 valid ? "passes" : "fails"); 3093 report("After %s INVVPID, VMX_INST_ERR is %ld (actual %ld)", 3094 vmcs_read(VMX_INST_ERROR) == expected, 3095 rc ? "failed" : "successful", 3096 expected, vmcs_read(VMX_INST_ERROR)); 3097 } 3098 3099 static void ds_invvpid(void *data) 3100 { 3101 u64 msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP); 3102 u64 type = ffs(msr >> VPID_CAP_INVVPID_TYPES_SHIFT) - 1; 3103 3104 TEST_ASSERT(type >= INVVPID_ADDR && type <= INVVPID_CONTEXT_LOCAL); 3105 asm volatile("invvpid %0, %1" 3106 : 3107 : "m"(*(struct invvpid_operand *)data), 3108 "r"(type)); 3109 } 3110 3111 /* 3112 * The SS override is ignored in 64-bit mode, so we use an addressing 3113 * mode with %rsp as the base register to generate an implicit SS 3114 * reference. 3115 */ 3116 static void ss_invvpid(void *data) 3117 { 3118 u64 msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP); 3119 u64 type = ffs(msr >> VPID_CAP_INVVPID_TYPES_SHIFT) - 1; 3120 3121 TEST_ASSERT(type >= INVVPID_ADDR && type <= INVVPID_CONTEXT_LOCAL); 3122 asm volatile("sub %%rsp,%0; invvpid (%%rsp,%0,1), %1" 3123 : "+r"(data) 3124 : "r"(type)); 3125 } 3126 3127 static void invvpid_test_gp(void) 3128 { 3129 bool fault; 3130 3131 fault = test_for_exception(GP_VECTOR, &ds_invvpid, 3132 (void *)NONCANONICAL); 3133 report("INVVPID with non-canonical DS operand raises #GP", fault); 3134 } 3135 3136 static void invvpid_test_ss(void) 3137 { 3138 bool fault; 3139 3140 fault = test_for_exception(SS_VECTOR, &ss_invvpid, 3141 (void *)NONCANONICAL); 3142 report("INVVPID with non-canonical SS operand raises #SS", fault); 3143 } 3144 3145 static void invvpid_test_pf(void) 3146 { 3147 void *vpage = alloc_vpage(); 3148 bool fault; 3149 3150 fault = test_for_exception(PF_VECTOR, &ds_invvpid, vpage); 3151 report("INVVPID with unmapped operand raises #PF", fault); 3152 } 3153 3154 static void try_compat_invvpid(void *unused) 3155 { 3156 struct far_pointer32 fp = { 3157 .offset = (uintptr_t)&&invvpid, 3158 .selector = KERNEL_CS32, 3159 }; 3160 register uintptr_t rsp asm("rsp"); 3161 3162 TEST_ASSERT_MSG(fp.offset == (uintptr_t)&&invvpid, 3163 "Code address too high."); 3164 TEST_ASSERT_MSG(rsp == (u32)rsp, "Stack address too high."); 3165 3166 asm goto ("lcall *%0" : : "m" (fp) : "rax" : invvpid); 3167 return; 3168 invvpid: 3169 asm volatile (".code32;" 3170 "invvpid (%eax), %eax;" 3171 "lret;" 3172 ".code64"); 3173 __builtin_unreachable(); 3174 } 3175 3176 static void invvpid_test_compatibility_mode(void) 3177 { 3178 bool fault; 3179 3180 fault = test_for_exception(UD_VECTOR, &try_compat_invvpid, NULL); 3181 report("Compatibility mode INVVPID raises #UD", fault); 3182 } 3183 3184 static void invvpid_test_not_in_vmx_operation(void) 3185 { 3186 bool fault; 3187 3188 TEST_ASSERT(!vmx_off()); 3189 fault = test_for_exception(UD_VECTOR, &ds_invvpid, NULL); 3190 report("INVVPID outside of VMX operation raises #UD", fault); 3191 TEST_ASSERT(!vmx_on()); 3192 } 3193 3194 /* 3195 * This does not test real-address mode, virtual-8086 mode, protected mode, 3196 * or CPL > 0. 3197 */ 3198 static void invvpid_test_v2(void) 3199 { 3200 u64 msr; 3201 int i; 3202 unsigned types = 0; 3203 unsigned type; 3204 3205 if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY) || 3206 !(ctrl_cpu_rev[1].clr & CPU_VPID)) 3207 test_skip("VPID not supported"); 3208 3209 msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP); 3210 3211 if (!(msr & VPID_CAP_INVVPID)) 3212 test_skip("INVVPID not supported.\n"); 3213 3214 if (msr & VPID_CAP_INVVPID_ADDR) 3215 types |= 1u << INVVPID_ADDR; 3216 if (msr & VPID_CAP_INVVPID_CXTGLB) 3217 types |= 1u << INVVPID_CONTEXT_GLOBAL; 3218 if (msr & VPID_CAP_INVVPID_ALL) 3219 types |= 1u << INVVPID_ALL; 3220 if (msr & VPID_CAP_INVVPID_CXTLOC) 3221 types |= 1u << INVVPID_CONTEXT_LOCAL; 3222 3223 if (!types) 3224 test_skip("No INVVPID types supported.\n"); 3225 3226 for (i = -127; i < 128; i++) 3227 try_invvpid(i, 0xffff, 0); 3228 3229 /* 3230 * VPID must not be more than 16 bits. 3231 */ 3232 for (i = 0; i < 64; i++) 3233 for (type = 0; type < 4; type++) 3234 if (types & (1u << type)) 3235 try_invvpid(type, 1ul << i, 0); 3236 3237 /* 3238 * VPID must not be zero, except for "all contexts." 3239 */ 3240 for (type = 0; type < 4; type++) 3241 if (types & (1u << type)) 3242 try_invvpid(type, 0, 0); 3243 3244 /* 3245 * The gla operand is only validated for single-address INVVPID. 3246 */ 3247 if (types & (1u << INVVPID_ADDR)) 3248 try_invvpid(INVVPID_ADDR, 0xffff, NONCANONICAL); 3249 3250 invvpid_test_gp(); 3251 invvpid_test_ss(); 3252 invvpid_test_pf(); 3253 invvpid_test_compatibility_mode(); 3254 invvpid_test_not_in_vmx_operation(); 3255 } 3256 3257 /* 3258 * Test for early VMLAUNCH failure. Returns true if VMLAUNCH makes it 3259 * at least as far as the guest-state checks. Returns false if the 3260 * VMLAUNCH fails early and execution falls through to the next 3261 * instruction. 3262 */ 3263 static bool vmlaunch_succeeds(void) 3264 { 3265 /* 3266 * Indirectly set VMX_INST_ERR to 12 ("VMREAD/VMWRITE from/to 3267 * unsupported VMCS component"). The caller can then check 3268 * to see if a failed VM-entry sets VMX_INST_ERR as expected. 3269 */ 3270 vmcs_write(~0u, 0); 3271 3272 vmcs_write(HOST_RIP, (uintptr_t)&&success); 3273 __asm__ __volatile__ goto ("vmwrite %%rsp, %0; vmlaunch" 3274 : 3275 : "r" ((u64)HOST_RSP) 3276 : "cc", "memory" 3277 : success); 3278 return false; 3279 success: 3280 TEST_ASSERT(vmcs_read(EXI_REASON) == 3281 (VMX_FAIL_STATE | VMX_ENTRY_FAILURE)); 3282 return true; 3283 } 3284 3285 /* 3286 * Try to launch the current VMCS. 3287 */ 3288 static void test_vmx_controls(bool controls_valid, bool xfail) 3289 { 3290 bool success = vmlaunch_succeeds(); 3291 u32 vmx_inst_err; 3292 3293 report_xfail("vmlaunch %s", xfail, success == controls_valid, 3294 controls_valid ? "succeeds" : "fails"); 3295 if (!success) { 3296 vmx_inst_err = vmcs_read(VMX_INST_ERROR); 3297 report("VMX inst error is %d (actual %d)", 3298 vmx_inst_err == VMXERR_ENTRY_INVALID_CONTROL_FIELD, 3299 VMXERR_ENTRY_INVALID_CONTROL_FIELD, vmx_inst_err); 3300 } 3301 } 3302 3303 /* 3304 * Test a particular value of a VM-execution control bit, if the value 3305 * is required or if the value is zero. 3306 */ 3307 static void test_rsvd_ctl_bit_value(const char *name, union vmx_ctrl_msr msr, 3308 enum Encoding encoding, unsigned bit, 3309 unsigned val) 3310 { 3311 u32 mask = 1u << bit; 3312 bool expected; 3313 u32 controls; 3314 3315 if (msr.set & mask) 3316 TEST_ASSERT(msr.clr & mask); 3317 3318 /* 3319 * We can't arbitrarily turn on a control bit, because it may 3320 * introduce dependencies on other VMCS fields. So, we only 3321 * test turning on bits that have a required setting. 3322 */ 3323 if (val && (msr.clr & mask) && !(msr.set & mask)) 3324 return; 3325 3326 report_prefix_pushf("%s %s bit %d", 3327 val ? "Set" : "Clear", name, bit); 3328 3329 controls = vmcs_read(encoding); 3330 if (val) { 3331 vmcs_write(encoding, msr.set | mask); 3332 expected = (msr.clr & mask); 3333 } else { 3334 vmcs_write(encoding, msr.set & ~mask); 3335 expected = !(msr.set & mask); 3336 } 3337 test_vmx_controls(expected, false); 3338 vmcs_write(encoding, controls); 3339 report_prefix_pop(); 3340 } 3341 3342 /* 3343 * Test reserved values of a VM-execution control bit, based on the 3344 * allowed bit settings from the corresponding VMX capability MSR. 3345 */ 3346 static void test_rsvd_ctl_bit(const char *name, union vmx_ctrl_msr msr, 3347 enum Encoding encoding, unsigned bit) 3348 { 3349 test_rsvd_ctl_bit_value(name, msr, encoding, bit, 0); 3350 test_rsvd_ctl_bit_value(name, msr, encoding, bit, 1); 3351 } 3352 3353 /* 3354 * Reserved bits in the pin-based VM-execution controls must be set 3355 * properly. Software may consult the VMX capability MSRs to determine 3356 * the proper settings. 3357 * [Intel SDM] 3358 */ 3359 static void test_pin_based_ctls(void) 3360 { 3361 unsigned bit; 3362 3363 printf("%s: %lx\n", basic.ctrl ? "MSR_IA32_VMX_TRUE_PIN" : 3364 "MSR_IA32_VMX_PINBASED_CTLS", ctrl_pin_rev.val); 3365 for (bit = 0; bit < 32; bit++) 3366 test_rsvd_ctl_bit("pin-based controls", 3367 ctrl_pin_rev, PIN_CONTROLS, bit); 3368 } 3369 3370 /* 3371 * Reserved bits in the primary processor-based VM-execution controls 3372 * must be set properly. Software may consult the VMX capability MSRs 3373 * to determine the proper settings. 3374 * [Intel SDM] 3375 */ 3376 static void test_primary_processor_based_ctls(void) 3377 { 3378 unsigned bit; 3379 3380 printf("\n%s: %lx\n", basic.ctrl ? "MSR_IA32_VMX_TRUE_PROC" : 3381 "MSR_IA32_VMX_PROCBASED_CTLS", ctrl_cpu_rev[0].val); 3382 for (bit = 0; bit < 32; bit++) 3383 test_rsvd_ctl_bit("primary processor-based controls", 3384 ctrl_cpu_rev[0], CPU_EXEC_CTRL0, bit); 3385 } 3386 3387 /* 3388 * If the "activate secondary controls" primary processor-based 3389 * VM-execution control is 1, reserved bits in the secondary 3390 * processor-based VM-execution controls must be cleared. Software may 3391 * consult the VMX capability MSRs to determine which bits are 3392 * reserved. 3393 * If the "activate secondary controls" primary processor-based 3394 * VM-execution control is 0 (or if the processor does not support the 3395 * 1-setting of that control), no checks are performed on the 3396 * secondary processor-based VM-execution controls. 3397 * [Intel SDM] 3398 */ 3399 static void test_secondary_processor_based_ctls(void) 3400 { 3401 u32 primary; 3402 u32 secondary; 3403 unsigned bit; 3404 3405 if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY)) 3406 return; 3407 3408 primary = vmcs_read(CPU_EXEC_CTRL0); 3409 secondary = vmcs_read(CPU_EXEC_CTRL1); 3410 3411 vmcs_write(CPU_EXEC_CTRL0, primary | CPU_SECONDARY); 3412 printf("\nMSR_IA32_VMX_PROCBASED_CTLS2: %lx\n", ctrl_cpu_rev[1].val); 3413 for (bit = 0; bit < 32; bit++) 3414 test_rsvd_ctl_bit("secondary processor-based controls", 3415 ctrl_cpu_rev[1], CPU_EXEC_CTRL1, bit); 3416 3417 /* 3418 * When the "activate secondary controls" VM-execution control 3419 * is clear, there are no checks on the secondary controls. 3420 */ 3421 vmcs_write(CPU_EXEC_CTRL0, primary & ~CPU_SECONDARY); 3422 vmcs_write(CPU_EXEC_CTRL1, ~0); 3423 report("Secondary processor-based controls ignored", 3424 vmlaunch_succeeds()); 3425 vmcs_write(CPU_EXEC_CTRL1, secondary); 3426 vmcs_write(CPU_EXEC_CTRL0, primary); 3427 } 3428 3429 static void try_cr3_target_count(unsigned i, unsigned max) 3430 { 3431 report_prefix_pushf("CR3 target count 0x%x", i); 3432 vmcs_write(CR3_TARGET_COUNT, i); 3433 test_vmx_controls(i <= max, false); 3434 report_prefix_pop(); 3435 } 3436 3437 /* 3438 * The CR3-target count must not be greater than 4. Future processors 3439 * may support a different number of CR3-target values. Software 3440 * should read the VMX capability MSR IA32_VMX_MISC to determine the 3441 * number of values supported. 3442 * [Intel SDM] 3443 */ 3444 static void test_cr3_targets(void) 3445 { 3446 unsigned supported_targets = (rdmsr(MSR_IA32_VMX_MISC) >> 16) & 0x1ff; 3447 u32 cr3_targets = vmcs_read(CR3_TARGET_COUNT); 3448 unsigned i; 3449 3450 printf("\nSupported CR3 targets: %d\n", supported_targets); 3451 TEST_ASSERT(supported_targets <= 256); 3452 3453 try_cr3_target_count(-1u, supported_targets); 3454 try_cr3_target_count(0x80000000, supported_targets); 3455 try_cr3_target_count(0x7fffffff, supported_targets); 3456 for (i = 0; i <= supported_targets + 1; i++) 3457 try_cr3_target_count(i, supported_targets); 3458 vmcs_write(CR3_TARGET_COUNT, cr3_targets); 3459 } 3460 3461 /* 3462 * Test a particular address setting in the VMCS 3463 */ 3464 static void test_vmcs_addr(const char *name, 3465 enum Encoding encoding, 3466 u64 align, 3467 bool ignored, 3468 bool xfail_beyond_mapped_ram, 3469 u64 addr) 3470 { 3471 bool xfail = 3472 (xfail_beyond_mapped_ram && 3473 addr > fwcfg_get_u64(FW_CFG_RAM_SIZE) - align && 3474 addr < (1ul << cpuid_maxphyaddr())); 3475 3476 report_prefix_pushf("%s = %lx", name, addr); 3477 vmcs_write(encoding, addr); 3478 test_vmx_controls(ignored || (IS_ALIGNED(addr, align) && 3479 addr < (1ul << cpuid_maxphyaddr())), 3480 xfail); 3481 report_prefix_pop(); 3482 xfail = false; 3483 } 3484 3485 /* 3486 * Test interesting values for a VMCS address 3487 */ 3488 static void test_vmcs_addr_values(const char *name, 3489 enum Encoding encoding, 3490 u64 align, 3491 bool ignored, 3492 bool xfail_beyond_mapped_ram, 3493 u32 bit_start, u32 bit_end) 3494 { 3495 unsigned i; 3496 u64 orig_val = vmcs_read(encoding); 3497 3498 for (i = bit_start; i <= bit_end; i++) 3499 test_vmcs_addr(name, encoding, align, ignored, 3500 xfail_beyond_mapped_ram, 1ul << i); 3501 3502 test_vmcs_addr(name, encoding, align, ignored, 3503 xfail_beyond_mapped_ram, PAGE_SIZE - 1); 3504 test_vmcs_addr(name, encoding, align, ignored, 3505 xfail_beyond_mapped_ram, PAGE_SIZE); 3506 test_vmcs_addr(name, encoding, align, ignored, 3507 xfail_beyond_mapped_ram, 3508 (1ul << cpuid_maxphyaddr()) - PAGE_SIZE); 3509 test_vmcs_addr(name, encoding, align, ignored, 3510 xfail_beyond_mapped_ram, -1ul); 3511 3512 vmcs_write(encoding, orig_val); 3513 } 3514 3515 /* 3516 * Test a physical address reference in the VMCS, when the corresponding 3517 * feature is enabled and when the corresponding feature is disabled. 3518 */ 3519 static void test_vmcs_addr_reference(u32 control_bit, enum Encoding field, 3520 const char *field_name, 3521 const char *control_name, u64 align, 3522 bool xfail_beyond_mapped_ram, 3523 bool control_primary) 3524 { 3525 u32 primary = vmcs_read(CPU_EXEC_CTRL0); 3526 u32 secondary = vmcs_read(CPU_EXEC_CTRL1); 3527 u64 page_addr; 3528 3529 if (control_primary) { 3530 if (!(ctrl_cpu_rev[0].clr & control_bit)) 3531 return; 3532 } else { 3533 if (!(ctrl_cpu_rev[1].clr & control_bit)) 3534 return; 3535 } 3536 3537 page_addr = vmcs_read(field); 3538 3539 report_prefix_pushf("%s enabled", control_name); 3540 if (control_primary) { 3541 vmcs_write(CPU_EXEC_CTRL0, primary | control_bit); 3542 } else { 3543 vmcs_write(CPU_EXEC_CTRL0, primary | CPU_SECONDARY); 3544 vmcs_write(CPU_EXEC_CTRL1, secondary | control_bit); 3545 } 3546 3547 test_vmcs_addr_values(field_name, field, align, false, 3548 xfail_beyond_mapped_ram, 0, 63); 3549 report_prefix_pop(); 3550 3551 report_prefix_pushf("%s disabled", control_name); 3552 if (control_primary) { 3553 vmcs_write(CPU_EXEC_CTRL0, primary & ~control_bit); 3554 } else { 3555 vmcs_write(CPU_EXEC_CTRL0, primary & ~CPU_SECONDARY); 3556 vmcs_write(CPU_EXEC_CTRL1, secondary & ~control_bit); 3557 } 3558 3559 test_vmcs_addr_values(field_name, field, align, true, false, 0, 63); 3560 report_prefix_pop(); 3561 3562 vmcs_write(field, page_addr); 3563 vmcs_write(CPU_EXEC_CTRL0, primary); 3564 } 3565 3566 /* 3567 * If the "use I/O bitmaps" VM-execution control is 1, bits 11:0 of 3568 * each I/O-bitmap address must be 0. Neither address should set any 3569 * bits beyond the processor's physical-address width. 3570 * [Intel SDM] 3571 */ 3572 static void test_io_bitmaps(void) 3573 { 3574 test_vmcs_addr_reference(CPU_IO_BITMAP, IO_BITMAP_A, 3575 "I/O bitmap A", "Use I/O bitmaps", 3576 PAGE_SIZE, false, true); 3577 test_vmcs_addr_reference(CPU_IO_BITMAP, IO_BITMAP_B, 3578 "I/O bitmap B", "Use I/O bitmaps", 3579 PAGE_SIZE, false, true); 3580 } 3581 3582 /* 3583 * If the "use MSR bitmaps" VM-execution control is 1, bits 11:0 of 3584 * the MSR-bitmap address must be 0. The address should not set any 3585 * bits beyond the processor's physical-address width. 3586 * [Intel SDM] 3587 */ 3588 static void test_msr_bitmap(void) 3589 { 3590 test_vmcs_addr_reference(CPU_MSR_BITMAP, MSR_BITMAP, 3591 "MSR bitmap", "Use MSR bitmaps", 3592 PAGE_SIZE, false, true); 3593 } 3594 3595 /* 3596 * If the "use TPR shadow" VM-execution control is 1, the virtual-APIC 3597 * address must satisfy the following checks: 3598 * - Bits 11:0 of the address must be 0. 3599 * - The address should not set any bits beyond the processor's 3600 * physical-address width. 3601 * [Intel SDM] 3602 */ 3603 static void test_apic_virt_addr(void) 3604 { 3605 test_vmcs_addr_reference(CPU_TPR_SHADOW, APIC_VIRT_ADDR, 3606 "virtual-APIC address", "Use TPR shadow", 3607 PAGE_SIZE, true, true); 3608 } 3609 3610 /* 3611 * If the "virtualize APIC-accesses" VM-execution control is 1, the 3612 * APIC-access address must satisfy the following checks: 3613 * - Bits 11:0 of the address must be 0. 3614 * - The address should not set any bits beyond the processor's 3615 * physical-address width. 3616 * [Intel SDM] 3617 */ 3618 static void test_apic_access_addr(void) 3619 { 3620 void *apic_access_page = alloc_page(); 3621 3622 vmcs_write(APIC_ACCS_ADDR, virt_to_phys(apic_access_page)); 3623 3624 test_vmcs_addr_reference(CPU_VIRT_APIC_ACCESSES, APIC_ACCS_ADDR, 3625 "APIC-access address", 3626 "virtualize APIC-accesses", PAGE_SIZE, 3627 false, false); 3628 } 3629 3630 static bool set_bit_pattern(u8 mask, u32 *secondary) 3631 { 3632 u8 i; 3633 bool flag = false; 3634 u32 test_bits[3] = { 3635 CPU_VIRT_X2APIC, 3636 CPU_APIC_REG_VIRT, 3637 CPU_VINTD 3638 }; 3639 3640 for (i = 0; i < ARRAY_SIZE(test_bits); i++) { 3641 if ((mask & (1u << i)) && 3642 (ctrl_cpu_rev[1].clr & test_bits[i])) { 3643 *secondary |= test_bits[i]; 3644 flag = true; 3645 } 3646 } 3647 3648 return (flag); 3649 } 3650 3651 /* 3652 * If the "use TPR shadow" VM-execution control is 0, the following 3653 * VM-execution controls must also be 0: 3654 * - virtualize x2APIC mode 3655 * - APIC-register virtualization 3656 * - virtual-interrupt delivery 3657 * [Intel SDM] 3658 * 3659 * 2. If the "virtualize x2APIC mode" VM-execution control is 1, the 3660 * "virtualize APIC accesses" VM-execution control must be 0. 3661 * [Intel SDM] 3662 */ 3663 static void test_apic_virtual_ctls(void) 3664 { 3665 u32 saved_primary = vmcs_read(CPU_EXEC_CTRL0); 3666 u32 saved_secondary = vmcs_read(CPU_EXEC_CTRL1); 3667 u32 primary = saved_primary; 3668 u32 secondary = saved_secondary; 3669 bool ctrl = false; 3670 char str[10] = "disabled"; 3671 u8 i = 0, j; 3672 3673 /* 3674 * First test 3675 */ 3676 if (!((ctrl_cpu_rev[0].clr & (CPU_SECONDARY | CPU_TPR_SHADOW)) == 3677 (CPU_SECONDARY | CPU_TPR_SHADOW))) 3678 return; 3679 3680 primary |= CPU_SECONDARY; 3681 primary &= ~CPU_TPR_SHADOW; 3682 vmcs_write(CPU_EXEC_CTRL0, primary); 3683 3684 while (1) { 3685 for (j = 1; j < 8; j++) { 3686 secondary &= ~(CPU_VIRT_X2APIC | CPU_APIC_REG_VIRT | CPU_VINTD); 3687 if (primary & CPU_TPR_SHADOW) { 3688 ctrl = true; 3689 } else { 3690 if (! set_bit_pattern(j, &secondary)) 3691 ctrl = true; 3692 else 3693 ctrl = false; 3694 } 3695 3696 vmcs_write(CPU_EXEC_CTRL1, secondary); 3697 report_prefix_pushf("Use TPR shadow %s, virtualize x2APIC mode %s, APIC-register virtualization %s, virtual-interrupt delivery %s", 3698 str, (secondary & CPU_VIRT_X2APIC) ? "enabled" : "disabled", (secondary & CPU_APIC_REG_VIRT) ? "enabled" : "disabled", (secondary & CPU_VINTD) ? "enabled" : "disabled"); 3699 test_vmx_controls(ctrl, false); 3700 report_prefix_pop(); 3701 } 3702 3703 if (i == 1) 3704 break; 3705 i++; 3706 3707 primary |= CPU_TPR_SHADOW; 3708 vmcs_write(CPU_EXEC_CTRL0, primary); 3709 strcpy(str, "enabled"); 3710 } 3711 3712 /* 3713 * Second test 3714 */ 3715 u32 apic_virt_ctls = (CPU_VIRT_X2APIC | CPU_VIRT_APIC_ACCESSES); 3716 3717 primary = saved_primary; 3718 secondary = saved_secondary; 3719 if (!((ctrl_cpu_rev[1].clr & apic_virt_ctls) == apic_virt_ctls)) 3720 return; 3721 3722 vmcs_write(CPU_EXEC_CTRL0, primary | CPU_SECONDARY); 3723 secondary &= ~CPU_VIRT_APIC_ACCESSES; 3724 vmcs_write(CPU_EXEC_CTRL1, secondary & ~CPU_VIRT_X2APIC); 3725 report_prefix_pushf("Virtualize x2APIC mode disabled; virtualize APIC access disabled"); 3726 test_vmx_controls(true, false); 3727 report_prefix_pop(); 3728 3729 vmcs_write(CPU_EXEC_CTRL1, secondary | CPU_VIRT_APIC_ACCESSES); 3730 report_prefix_pushf("Virtualize x2APIC mode disabled; virtualize APIC access enabled"); 3731 test_vmx_controls(true, false); 3732 report_prefix_pop(); 3733 3734 vmcs_write(CPU_EXEC_CTRL1, secondary | CPU_VIRT_X2APIC); 3735 report_prefix_pushf("Virtualize x2APIC mode enabled; virtualize APIC access enabled"); 3736 test_vmx_controls(false, false); 3737 report_prefix_pop(); 3738 3739 vmcs_write(CPU_EXEC_CTRL1, secondary & ~CPU_VIRT_APIC_ACCESSES); 3740 report_prefix_pushf("Virtualize x2APIC mode enabled; virtualize APIC access disabled"); 3741 test_vmx_controls(true, false); 3742 report_prefix_pop(); 3743 3744 vmcs_write(CPU_EXEC_CTRL0, saved_primary); 3745 vmcs_write(CPU_EXEC_CTRL1, saved_secondary); 3746 } 3747 3748 /* 3749 * If the "virtual-interrupt delivery" VM-execution control is 1, the 3750 * "external-interrupt exiting" VM-execution control must be 1. 3751 * [Intel SDM] 3752 */ 3753 static void test_virtual_intr_ctls(void) 3754 { 3755 u32 saved_primary = vmcs_read(CPU_EXEC_CTRL0); 3756 u32 saved_secondary = vmcs_read(CPU_EXEC_CTRL1); 3757 u32 saved_pin = vmcs_read(PIN_CONTROLS); 3758 u32 primary = saved_primary; 3759 u32 secondary = saved_secondary; 3760 u32 pin = saved_pin; 3761 3762 if (!((ctrl_cpu_rev[1].clr & CPU_VINTD) && 3763 (ctrl_pin_rev.clr & PIN_EXTINT))) 3764 return; 3765 3766 vmcs_write(CPU_EXEC_CTRL0, primary | CPU_SECONDARY | CPU_TPR_SHADOW); 3767 vmcs_write(CPU_EXEC_CTRL1, secondary & ~CPU_VINTD); 3768 vmcs_write(PIN_CONTROLS, pin & ~PIN_EXTINT); 3769 report_prefix_pushf("Virtualize interrupt-delivery disabled; external-interrupt exiting disabled"); 3770 test_vmx_controls(true, false); 3771 report_prefix_pop(); 3772 3773 vmcs_write(CPU_EXEC_CTRL1, secondary | CPU_VINTD); 3774 report_prefix_pushf("Virtualize interrupt-delivery enabled; external-interrupt exiting disabled"); 3775 test_vmx_controls(false, false); 3776 report_prefix_pop(); 3777 3778 vmcs_write(PIN_CONTROLS, pin | PIN_EXTINT); 3779 report_prefix_pushf("Virtualize interrupt-delivery enabled; external-interrupt exiting enabled"); 3780 test_vmx_controls(true, false); 3781 report_prefix_pop(); 3782 3783 vmcs_write(PIN_CONTROLS, pin & ~PIN_EXTINT); 3784 report_prefix_pushf("Virtualize interrupt-delivery enabled; external-interrupt exiting disabled"); 3785 test_vmx_controls(false, false); 3786 report_prefix_pop(); 3787 3788 vmcs_write(CPU_EXEC_CTRL0, saved_primary); 3789 vmcs_write(CPU_EXEC_CTRL1, saved_secondary); 3790 vmcs_write(PIN_CONTROLS, saved_pin); 3791 } 3792 3793 static void test_pi_desc_addr(u64 addr, bool ctrl) 3794 { 3795 vmcs_write(POSTED_INTR_DESC_ADDR, addr); 3796 report_prefix_pushf("Process-posted-interrupts enabled; posted-interrupt-descriptor-address 0x%lx", addr); 3797 test_vmx_controls(ctrl, false); 3798 report_prefix_pop(); 3799 } 3800 3801 /* 3802 * If the “process posted interrupts†VM-execution control is 1, the 3803 * following must be true: 3804 * 3805 * - The “virtual-interrupt delivery†VM-execution control is 1. 3806 * - The “acknowledge interrupt on exit†VM-exit control is 1. 3807 * - The posted-interrupt notification vector has a value in the 3808 * - range 0–255 (bits 15:8 are all 0). 3809 * - Bits 5:0 of the posted-interrupt descriptor address are all 0. 3810 * - The posted-interrupt descriptor address does not set any bits 3811 * beyond the processor's physical-address width. 3812 * [Intel SDM] 3813 */ 3814 static void test_posted_intr(void) 3815 { 3816 u32 saved_primary = vmcs_read(CPU_EXEC_CTRL0); 3817 u32 saved_secondary = vmcs_read(CPU_EXEC_CTRL1); 3818 u32 saved_pin = vmcs_read(PIN_CONTROLS); 3819 u32 exit_ctl_saved = vmcs_read(EXI_CONTROLS); 3820 u32 primary = saved_primary; 3821 u32 secondary = saved_secondary; 3822 u32 pin = saved_pin; 3823 u32 exit_ctl = exit_ctl_saved; 3824 u16 vec; 3825 int i; 3826 3827 if (!((ctrl_pin_rev.clr & PIN_POST_INTR) && 3828 (ctrl_cpu_rev[1].clr & CPU_VINTD) && 3829 (ctrl_exit_rev.clr & EXI_INTA))) 3830 return; 3831 3832 vmcs_write(CPU_EXEC_CTRL0, primary | CPU_SECONDARY | CPU_TPR_SHADOW); 3833 3834 /* 3835 * Test virtual-interrupt-delivery and acknowledge-interrupt-on-exit 3836 */ 3837 pin |= PIN_POST_INTR; 3838 vmcs_write(PIN_CONTROLS, pin); 3839 secondary &= ~CPU_VINTD; 3840 vmcs_write(CPU_EXEC_CTRL1, secondary); 3841 report_prefix_pushf("Process-posted-interrupts enabled; virtual-interrupt-delivery disabled"); 3842 test_vmx_controls(false, false); 3843 report_prefix_pop(); 3844 3845 secondary |= CPU_VINTD; 3846 vmcs_write(CPU_EXEC_CTRL1, secondary); 3847 report_prefix_pushf("Process-posted-interrupts enabled; virtual-interrupt-delivery enabled"); 3848 test_vmx_controls(false, false); 3849 report_prefix_pop(); 3850 3851 exit_ctl &= ~EXI_INTA; 3852 vmcs_write(EXI_CONTROLS, exit_ctl); 3853 report_prefix_pushf("Process-posted-interrupts enabled; virtual-interrupt-delivery enabled; acknowledge-interrupt-on-exit disabled"); 3854 test_vmx_controls(false, false); 3855 report_prefix_pop(); 3856 3857 exit_ctl |= EXI_INTA; 3858 vmcs_write(EXI_CONTROLS, exit_ctl); 3859 report_prefix_pushf("Process-posted-interrupts enabled; virtual-interrupt-delivery enabled; acknowledge-interrupt-on-exit enabled"); 3860 test_vmx_controls(true, false); 3861 report_prefix_pop(); 3862 3863 secondary &= ~CPU_VINTD; 3864 vmcs_write(CPU_EXEC_CTRL1, secondary); 3865 report_prefix_pushf("Process-posted-interrupts enabled; virtual-interrupt-delivery disabled; acknowledge-interrupt-on-exit enabled"); 3866 test_vmx_controls(false, false); 3867 report_prefix_pop(); 3868 3869 secondary |= CPU_VINTD; 3870 vmcs_write(CPU_EXEC_CTRL1, secondary); 3871 report_prefix_pushf("Process-posted-interrupts enabled; virtual-interrupt-delivery enabled; acknowledge-interrupt-on-exit enabled"); 3872 test_vmx_controls(true, false); 3873 report_prefix_pop(); 3874 3875 /* 3876 * Test posted-interrupt notification vector 3877 */ 3878 for (i = 0; i < 8; i++) { 3879 vec = (1ul << i); 3880 vmcs_write(PINV, vec); 3881 report_prefix_pushf("Process-posted-interrupts enabled; posted-interrupt-notification-vector %u", vec); 3882 test_vmx_controls(true, false); 3883 report_prefix_pop(); 3884 } 3885 for (i = 8; i < 16; i++) { 3886 vec = (1ul << i); 3887 vmcs_write(PINV, vec); 3888 report_prefix_pushf("Process-posted-interrupts enabled; posted-interrupt-notification-vector %u", vec); 3889 test_vmx_controls(false, false); 3890 report_prefix_pop(); 3891 } 3892 3893 vec &= ~(0xff << 8); 3894 vmcs_write(PINV, vec); 3895 report_prefix_pushf("Process-posted-interrupts enabled; posted-interrupt-notification-vector %u", vec); 3896 test_vmx_controls(true, false); 3897 report_prefix_pop(); 3898 3899 /* 3900 * Test posted-interrupt descriptor addresss 3901 */ 3902 for (i = 0; i < 6; i++) { 3903 test_pi_desc_addr(1ul << i, false); 3904 } 3905 3906 test_pi_desc_addr(0xf0, false); 3907 test_pi_desc_addr(0xff, false); 3908 test_pi_desc_addr(0x0f, false); 3909 test_pi_desc_addr(0x8000, true); 3910 test_pi_desc_addr(0x00, true); 3911 test_pi_desc_addr(0xc000, true); 3912 3913 test_vmcs_addr_values("process-posted interrupts", 3914 POSTED_INTR_DESC_ADDR, PAGE_SIZE, 3915 false, false, 0, 63); 3916 3917 vmcs_write(CPU_EXEC_CTRL0, saved_primary); 3918 vmcs_write(CPU_EXEC_CTRL1, saved_secondary); 3919 vmcs_write(PIN_CONTROLS, saved_pin); 3920 } 3921 3922 static void test_apic_ctls(void) 3923 { 3924 test_apic_virt_addr(); 3925 test_apic_access_addr(); 3926 test_apic_virtual_ctls(); 3927 test_virtual_intr_ctls(); 3928 test_posted_intr(); 3929 } 3930 3931 /* 3932 * If the “enable VPID†VM-execution control is 1, the value of the 3933 * of the VPID VM-execution control field must not be 0000H. 3934 * [Intel SDM] 3935 */ 3936 static void test_vpid(void) 3937 { 3938 u32 saved_primary = vmcs_read(CPU_EXEC_CTRL0); 3939 u32 saved_secondary = vmcs_read(CPU_EXEC_CTRL1); 3940 u16 vpid = 0x0000; 3941 int i; 3942 3943 if (!((ctrl_cpu_rev[0].clr & CPU_SECONDARY) && 3944 (ctrl_cpu_rev[1].clr & CPU_VPID))) { 3945 test_skip("Secondary controls and/or VPID not supported"); 3946 return; 3947 } 3948 3949 vmcs_write(CPU_EXEC_CTRL0, saved_primary | CPU_SECONDARY); 3950 vmcs_write(CPU_EXEC_CTRL1, saved_secondary & ~CPU_VPID); 3951 vmcs_write(VPID, vpid); 3952 report_prefix_pushf("VPID disabled; VPID value %x", vpid); 3953 test_vmx_controls(true, false); 3954 report_prefix_pop(); 3955 3956 vmcs_write(CPU_EXEC_CTRL1, saved_secondary | CPU_VPID); 3957 report_prefix_pushf("VPID enabled; VPID value %x", vpid); 3958 test_vmx_controls(false, false); 3959 report_prefix_pop(); 3960 3961 for (i = 0; i < 16; i++) { 3962 vpid = (short)1 << i;; 3963 vmcs_write(VPID, vpid); 3964 report_prefix_pushf("VPID enabled; VPID value %x", vpid); 3965 test_vmx_controls(true, false); 3966 report_prefix_pop(); 3967 } 3968 3969 vmcs_write(CPU_EXEC_CTRL0, saved_primary); 3970 vmcs_write(CPU_EXEC_CTRL1, saved_secondary); 3971 } 3972 3973 static void set_vtpr(unsigned vtpr) 3974 { 3975 *(u32 *)phys_to_virt(vmcs_read(APIC_VIRT_ADDR) + APIC_TASKPRI) = vtpr; 3976 } 3977 3978 static void try_tpr_threshold_and_vtpr(unsigned threshold, unsigned vtpr) 3979 { 3980 bool valid = true; 3981 u32 primary = vmcs_read(CPU_EXEC_CTRL0); 3982 u32 secondary = vmcs_read(CPU_EXEC_CTRL1); 3983 3984 if ((primary & CPU_TPR_SHADOW) && 3985 (!(primary & CPU_SECONDARY) || 3986 !(secondary & (CPU_VINTD | CPU_VIRT_APIC_ACCESSES)))) 3987 valid = (threshold & 0xf) <= ((vtpr >> 4) & 0xf); 3988 3989 set_vtpr(vtpr); 3990 report_prefix_pushf("TPR threshold 0x%x, VTPR.class 0x%x", 3991 threshold, (vtpr >> 4) & 0xf); 3992 test_vmx_controls(valid, false); 3993 report_prefix_pop(); 3994 } 3995 3996 static void test_invalid_event_injection(void) 3997 { 3998 u32 ent_intr_info_save = vmcs_read(ENT_INTR_INFO); 3999 u32 ent_intr_error_save = vmcs_read(ENT_INTR_ERROR); 4000 u32 ent_inst_len_save = vmcs_read(ENT_INST_LEN); 4001 u32 primary_save = vmcs_read(CPU_EXEC_CTRL0); 4002 u32 secondary_save = vmcs_read(CPU_EXEC_CTRL1); 4003 u64 guest_cr0_save = vmcs_read(GUEST_CR0); 4004 u32 ent_intr_info_base = INTR_INFO_VALID_MASK; 4005 u32 ent_intr_info, ent_intr_err, ent_intr_len; 4006 u32 cnt; 4007 4008 /* Setup */ 4009 report_prefix_push("invalid event injection"); 4010 vmcs_write(ENT_INTR_ERROR, 0x00000000); 4011 vmcs_write(ENT_INST_LEN, 0x00000001); 4012 4013 /* The field’s interruption type is not set to a reserved value. */ 4014 ent_intr_info = ent_intr_info_base | INTR_TYPE_RESERVED | DE_VECTOR; 4015 report_prefix_pushf("%s, VM-entry intr info=0x%x", 4016 "RESERVED interruption type invalid [-]", 4017 ent_intr_info); 4018 vmcs_write(ENT_INTR_INFO, ent_intr_info); 4019 test_vmx_controls(false, false); 4020 report_prefix_pop(); 4021 4022 ent_intr_info = ent_intr_info_base | INTR_TYPE_EXT_INTR | 4023 DE_VECTOR; 4024 report_prefix_pushf("%s, VM-entry intr info=0x%x", 4025 "RESERVED interruption type invalid [+]", 4026 ent_intr_info); 4027 vmcs_write(ENT_INTR_INFO, ent_intr_info); 4028 test_vmx_controls(true, false); 4029 report_prefix_pop(); 4030 4031 /* If the interruption type is other event, the vector is 0. */ 4032 ent_intr_info = ent_intr_info_base | INTR_TYPE_OTHER_EVENT | DB_VECTOR; 4033 report_prefix_pushf("%s, VM-entry intr info=0x%x", 4034 "(OTHER EVENT && vector != 0) invalid [-]", 4035 ent_intr_info); 4036 vmcs_write(ENT_INTR_INFO, ent_intr_info); 4037 test_vmx_controls(false, false); 4038 report_prefix_pop(); 4039 4040 /* If the interruption type is NMI, the vector is 2 (negative case). */ 4041 ent_intr_info = ent_intr_info_base | INTR_TYPE_NMI_INTR | DE_VECTOR; 4042 report_prefix_pushf("%s, VM-entry intr info=0x%x", 4043 "(NMI && vector != 2) invalid [-]", ent_intr_info); 4044 vmcs_write(ENT_INTR_INFO, ent_intr_info); 4045 test_vmx_controls(false, false); 4046 report_prefix_pop(); 4047 4048 /* If the interruption type is NMI, the vector is 2 (positive case). */ 4049 ent_intr_info = ent_intr_info_base | INTR_TYPE_NMI_INTR | NMI_VECTOR; 4050 report_prefix_pushf("%s, VM-entry intr info=0x%x", 4051 "(NMI && vector == 2) valid [+]", ent_intr_info); 4052 vmcs_write(ENT_INTR_INFO, ent_intr_info); 4053 test_vmx_controls(true, false); 4054 report_prefix_pop(); 4055 4056 /* 4057 * If the interruption type 4058 * is HW exception, the vector is at most 31. 4059 */ 4060 ent_intr_info = ent_intr_info_base | INTR_TYPE_HARD_EXCEPTION | 0x20; 4061 report_prefix_pushf("%s, VM-entry intr info=0x%x", 4062 "(HW exception && vector > 31) invalid [-]", 4063 ent_intr_info); 4064 vmcs_write(ENT_INTR_INFO, ent_intr_info); 4065 test_vmx_controls(false, false); 4066 report_prefix_pop(); 4067 4068 /* 4069 * deliver-error-code is 1 iff either 4070 * (a) the "unrestricted guest" VM-execution control is 0 4071 * (b) CR0.PE is set. 4072 */ 4073 ent_intr_info = ent_intr_info_base | INTR_TYPE_HARD_EXCEPTION | 4074 GP_VECTOR; 4075 report_prefix_pushf("%s, VM-entry intr info=0x%x", 4076 "error code <-> (!URG || prot_mode) [-]", 4077 ent_intr_info); 4078 disable_unrestricted_guest(); 4079 vmcs_write(GUEST_CR0, guest_cr0_save & ~X86_CR0_PE & ~X86_CR0_PG); 4080 vmcs_write(ENT_INTR_INFO, ent_intr_info); 4081 test_vmx_controls(false, false); 4082 report_prefix_pop(); 4083 4084 ent_intr_info = ent_intr_info_base | INTR_INFO_DELIVER_CODE_MASK | 4085 INTR_TYPE_HARD_EXCEPTION | GP_VECTOR; 4086 report_prefix_pushf("%s, VM-entry intr info=0x%x", 4087 "error code <-> (!URG || prot_mode) [+]", 4088 ent_intr_info); 4089 disable_unrestricted_guest(); 4090 vmcs_write(GUEST_CR0, guest_cr0_save & ~X86_CR0_PE & ~X86_CR0_PG); 4091 vmcs_write(ENT_INTR_INFO, ent_intr_info); 4092 test_vmx_controls(true, false); 4093 report_prefix_pop(); 4094 4095 ent_intr_info = ent_intr_info_base | INTR_INFO_DELIVER_CODE_MASK | 4096 INTR_TYPE_HARD_EXCEPTION | GP_VECTOR; 4097 report_prefix_pushf("%s, VM-entry intr info=0x%x", 4098 "error code <-> (!URG || prot_mode) [-]", 4099 ent_intr_info); 4100 enable_unrestricted_guest(); 4101 vmcs_write(GUEST_CR0, guest_cr0_save & ~X86_CR0_PE & ~X86_CR0_PG); 4102 vmcs_write(ENT_INTR_INFO, ent_intr_info); 4103 test_vmx_controls(false, false); 4104 report_prefix_pop(); 4105 4106 ent_intr_info = ent_intr_info_base | INTR_TYPE_HARD_EXCEPTION | 4107 GP_VECTOR; 4108 report_prefix_pushf("%s, VM-entry intr info=0x%x", 4109 "error code <-> (!URG || prot_mode) [-]", 4110 ent_intr_info); 4111 vmcs_write(GUEST_CR0, guest_cr0_save | X86_CR0_PE); 4112 vmcs_write(ENT_INTR_INFO, ent_intr_info); 4113 test_vmx_controls(false, false); 4114 report_prefix_pop(); 4115 4116 /* deliver-error-code is 1 iff the interruption type is HW exception */ 4117 report_prefix_push("error code <-> HW exception"); 4118 for (cnt = 0; cnt < 8; cnt++) { 4119 u32 exception_type_mask = cnt << 8; 4120 u32 deliver_error_code_mask = 4121 exception_type_mask != INTR_TYPE_HARD_EXCEPTION ? 4122 INTR_INFO_DELIVER_CODE_MASK : 0; 4123 4124 ent_intr_info = ent_intr_info_base | deliver_error_code_mask | 4125 exception_type_mask | GP_VECTOR; 4126 report_prefix_pushf("VM-entry intr info=0x%x [-]", 4127 ent_intr_info); 4128 vmcs_write(ENT_INTR_INFO, ent_intr_info); 4129 test_vmx_controls(false, false); 4130 report_prefix_pop(); 4131 } 4132 report_prefix_pop(); 4133 4134 /* 4135 * deliver-error-code is 1 iff the the vector 4136 * indicates an exception that would normally deliver an error code 4137 */ 4138 report_prefix_push("error code <-> vector delivers error code"); 4139 for (cnt = 0; cnt < 32; cnt++) { 4140 bool has_error_code = false; 4141 u32 deliver_error_code_mask; 4142 4143 switch (cnt) { 4144 case DF_VECTOR: 4145 case TS_VECTOR: 4146 case NP_VECTOR: 4147 case SS_VECTOR: 4148 case GP_VECTOR: 4149 case PF_VECTOR: 4150 case AC_VECTOR: 4151 has_error_code = true; 4152 } 4153 4154 /* Negative case */ 4155 deliver_error_code_mask = has_error_code ? 4156 0 : 4157 INTR_INFO_DELIVER_CODE_MASK; 4158 ent_intr_info = ent_intr_info_base | deliver_error_code_mask | 4159 INTR_TYPE_HARD_EXCEPTION | cnt; 4160 report_prefix_pushf("VM-entry intr info=0x%x [-]", 4161 ent_intr_info); 4162 vmcs_write(ENT_INTR_INFO, ent_intr_info); 4163 test_vmx_controls(false, false); 4164 report_prefix_pop(); 4165 4166 /* Positive case */ 4167 deliver_error_code_mask = has_error_code ? 4168 INTR_INFO_DELIVER_CODE_MASK : 4169 0; 4170 ent_intr_info = ent_intr_info_base | deliver_error_code_mask | 4171 INTR_TYPE_HARD_EXCEPTION | cnt; 4172 report_prefix_pushf("VM-entry intr info=0x%x [+]", 4173 ent_intr_info); 4174 vmcs_write(ENT_INTR_INFO, ent_intr_info); 4175 test_vmx_controls(true, false); 4176 report_prefix_pop(); 4177 } 4178 report_prefix_pop(); 4179 4180 /* Reserved bits in the field (30:12) are 0. */ 4181 report_prefix_push("reserved bits clear"); 4182 for (cnt = 12; cnt <= 30; cnt++) { 4183 ent_intr_info = ent_intr_info_base | 4184 INTR_INFO_DELIVER_CODE_MASK | 4185 INTR_TYPE_HARD_EXCEPTION | GP_VECTOR | 4186 (1U << cnt); 4187 report_prefix_pushf("VM-entry intr info=0x%x [-]", 4188 ent_intr_info); 4189 vmcs_write(ENT_INTR_INFO, ent_intr_info); 4190 test_vmx_controls(false, false); 4191 report_prefix_pop(); 4192 } 4193 report_prefix_pop(); 4194 4195 /* 4196 * If deliver-error-code is 1 4197 * bits 31:15 of the VM-entry exception error-code field are 0. 4198 */ 4199 ent_intr_info = ent_intr_info_base | INTR_INFO_DELIVER_CODE_MASK | 4200 INTR_TYPE_HARD_EXCEPTION | GP_VECTOR; 4201 report_prefix_pushf("%s, VM-entry intr info=0x%x", 4202 "VM-entry exception error code[31:15] clear", 4203 ent_intr_info); 4204 vmcs_write(ENT_INTR_INFO, ent_intr_info); 4205 for (cnt = 15; cnt <= 31; cnt++) { 4206 ent_intr_err = 1U << cnt; 4207 report_prefix_pushf("VM-entry intr error=0x%x [-]", 4208 ent_intr_err); 4209 vmcs_write(ENT_INTR_ERROR, ent_intr_err); 4210 test_vmx_controls(false, false); 4211 report_prefix_pop(); 4212 } 4213 vmcs_write(ENT_INTR_ERROR, 0x00000000); 4214 report_prefix_pop(); 4215 4216 /* 4217 * If the interruption type is software interrupt, software exception, 4218 * or privileged software exception, the VM-entry instruction-length 4219 * field is in the range 0–15. 4220 */ 4221 4222 for (cnt = 0; cnt < 3; cnt++) { 4223 switch (cnt) { 4224 case 0: 4225 ent_intr_info = ent_intr_info_base | 4226 INTR_TYPE_SOFT_INTR; 4227 break; 4228 case 1: 4229 ent_intr_info = ent_intr_info_base | 4230 INTR_TYPE_SOFT_EXCEPTION; 4231 break; 4232 case 2: 4233 ent_intr_info = ent_intr_info_base | 4234 INTR_TYPE_PRIV_SW_EXCEPTION; 4235 break; 4236 } 4237 report_prefix_pushf("%s, VM-entry intr info=0x%x", 4238 "VM-entry instruction-length check", 4239 ent_intr_info); 4240 vmcs_write(ENT_INTR_INFO, ent_intr_info); 4241 4242 /* Instruction length set to -1 (0xFFFFFFFF) should fail */ 4243 ent_intr_len = -1; 4244 report_prefix_pushf("VM-entry intr length = 0x%x [-]", 4245 ent_intr_len); 4246 vmcs_write(ENT_INST_LEN, ent_intr_len); 4247 test_vmx_controls(false, false); 4248 report_prefix_pop(); 4249 4250 /* Instruction length set to 16 should fail */ 4251 ent_intr_len = 0x00000010; 4252 report_prefix_pushf("VM-entry intr length = 0x%x [-]", 4253 ent_intr_len); 4254 vmcs_write(ENT_INST_LEN, 0x00000010); 4255 test_vmx_controls(false, false); 4256 report_prefix_pop(); 4257 4258 report_prefix_pop(); 4259 } 4260 4261 /* Cleanup */ 4262 vmcs_write(ENT_INTR_INFO, ent_intr_info_save); 4263 vmcs_write(ENT_INTR_ERROR, ent_intr_error_save); 4264 vmcs_write(ENT_INST_LEN, ent_inst_len_save); 4265 vmcs_write(CPU_EXEC_CTRL0, primary_save); 4266 vmcs_write(CPU_EXEC_CTRL1, secondary_save); 4267 vmcs_write(GUEST_CR0, guest_cr0_save); 4268 report_prefix_pop(); 4269 } 4270 4271 /* 4272 * Test interesting vTPR values for a given TPR threshold. 4273 */ 4274 static void test_vtpr_values(unsigned threshold) 4275 { 4276 try_tpr_threshold_and_vtpr(threshold, (threshold - 1) << 4); 4277 try_tpr_threshold_and_vtpr(threshold, threshold << 4); 4278 try_tpr_threshold_and_vtpr(threshold, (threshold + 1) << 4); 4279 } 4280 4281 static void try_tpr_threshold(unsigned threshold) 4282 { 4283 bool valid = true; 4284 4285 u32 primary = vmcs_read(CPU_EXEC_CTRL0); 4286 u32 secondary = vmcs_read(CPU_EXEC_CTRL1); 4287 4288 if ((primary & CPU_TPR_SHADOW) && !((primary & CPU_SECONDARY) && 4289 (secondary & CPU_VINTD))) 4290 valid = !(threshold >> 4); 4291 4292 set_vtpr(-1); 4293 vmcs_write(TPR_THRESHOLD, threshold); 4294 report_prefix_pushf("TPR threshold 0x%x, VTPR.class 0xf", threshold); 4295 test_vmx_controls(valid, false); 4296 report_prefix_pop(); 4297 4298 if (valid) 4299 test_vtpr_values(threshold); 4300 } 4301 4302 /* 4303 * Test interesting TPR threshold values. 4304 */ 4305 static void test_tpr_threshold_values(void) 4306 { 4307 unsigned i; 4308 4309 for (i = 0; i < 0x10; i++) 4310 try_tpr_threshold(i); 4311 for (i = 4; i < 32; i++) 4312 try_tpr_threshold(1u << i); 4313 try_tpr_threshold(-1u); 4314 try_tpr_threshold(0x7fffffff); 4315 } 4316 4317 /* 4318 * This test covers the following two VM entry checks: 4319 * 4320 * i) If the "use TPR shadow" VM-execution control is 1 and the 4321 * "virtual-interrupt delivery" VM-execution control is 0, bits 4322 * 31:4 of the TPR threshold VM-execution control field must 4323 be 0. 4324 * [Intel SDM] 4325 * 4326 * ii) If the "use TPR shadow" VM-execution control is 1, the 4327 * "virtual-interrupt delivery" VM-execution control is 0 4328 * and the "virtualize APIC accesses" VM-execution control 4329 * is 0, the value of bits 3:0 of the TPR threshold VM-execution 4330 * control field must not be greater than the value of bits 4331 * 7:4 of VTPR. 4332 * [Intel SDM] 4333 */ 4334 static void test_tpr_threshold(void) 4335 { 4336 u32 primary = vmcs_read(CPU_EXEC_CTRL0); 4337 void *virtual_apic_page; 4338 4339 if (!(ctrl_cpu_rev[0].clr & CPU_TPR_SHADOW)) 4340 return; 4341 4342 virtual_apic_page = alloc_page(); 4343 memset(virtual_apic_page, 0xff, PAGE_SIZE); 4344 vmcs_write(APIC_VIRT_ADDR, virt_to_phys(virtual_apic_page)); 4345 4346 vmcs_write(CPU_EXEC_CTRL0, primary & ~(CPU_TPR_SHADOW | CPU_SECONDARY)); 4347 report_prefix_pushf("Use TPR shadow disabled, secondary controls disabled"); 4348 test_tpr_threshold_values(); 4349 report_prefix_pop(); 4350 vmcs_write(CPU_EXEC_CTRL0, vmcs_read(CPU_EXEC_CTRL0) | CPU_TPR_SHADOW); 4351 report_prefix_pushf("Use TPR shadow enabled, secondary controls disabled"); 4352 test_tpr_threshold_values(); 4353 report_prefix_pop(); 4354 4355 if (!((ctrl_cpu_rev[0].clr & CPU_SECONDARY) && 4356 (ctrl_cpu_rev[1].clr & (CPU_VINTD | CPU_VIRT_APIC_ACCESSES)))) { 4357 vmcs_write(CPU_EXEC_CTRL0, primary); 4358 return; 4359 } 4360 4361 u32 secondary = vmcs_read(CPU_EXEC_CTRL1); 4362 4363 if (ctrl_cpu_rev[1].clr & CPU_VINTD) { 4364 vmcs_write(CPU_EXEC_CTRL1, CPU_VINTD); 4365 report_prefix_pushf("Use TPR shadow enabled; secondary controls disabled; virtual-interrupt delivery enabled; virtualize APIC accesses disabled"); 4366 test_tpr_threshold_values(); 4367 report_prefix_pop(); 4368 4369 vmcs_write(CPU_EXEC_CTRL0, 4370 vmcs_read(CPU_EXEC_CTRL0) | CPU_SECONDARY); 4371 report_prefix_pushf("Use TPR shadow enabled; secondary controls enabled; virtual-interrupt delivery enabled; virtualize APIC accesses disabled"); 4372 test_tpr_threshold_values(); 4373 report_prefix_pop(); 4374 } 4375 4376 if (ctrl_cpu_rev[1].clr & CPU_VIRT_APIC_ACCESSES) { 4377 vmcs_write(CPU_EXEC_CTRL0, 4378 vmcs_read(CPU_EXEC_CTRL0) & ~CPU_SECONDARY); 4379 vmcs_write(CPU_EXEC_CTRL1, CPU_VIRT_APIC_ACCESSES); 4380 report_prefix_pushf("Use TPR shadow enabled; secondary controls disabled; virtual-interrupt delivery enabled; virtualize APIC accesses enabled"); 4381 test_tpr_threshold_values(); 4382 report_prefix_pop(); 4383 4384 vmcs_write(CPU_EXEC_CTRL0, 4385 vmcs_read(CPU_EXEC_CTRL0) | CPU_SECONDARY); 4386 report_prefix_pushf("Use TPR shadow enabled; secondary controls enabled; virtual-interrupt delivery enabled; virtualize APIC accesses enabled"); 4387 test_tpr_threshold_values(); 4388 report_prefix_pop(); 4389 } 4390 4391 if ((ctrl_cpu_rev[1].clr & 4392 (CPU_VINTD | CPU_VIRT_APIC_ACCESSES)) == 4393 (CPU_VINTD | CPU_VIRT_APIC_ACCESSES)) { 4394 vmcs_write(CPU_EXEC_CTRL0, 4395 vmcs_read(CPU_EXEC_CTRL0) & ~CPU_SECONDARY); 4396 vmcs_write(CPU_EXEC_CTRL1, 4397 CPU_VINTD | CPU_VIRT_APIC_ACCESSES); 4398 report_prefix_pushf("Use TPR shadow enabled; secondary controls disabled; virtual-interrupt delivery enabled; virtualize APIC accesses enabled"); 4399 test_tpr_threshold_values(); 4400 report_prefix_pop(); 4401 4402 vmcs_write(CPU_EXEC_CTRL0, 4403 vmcs_read(CPU_EXEC_CTRL0) | CPU_SECONDARY); 4404 report_prefix_pushf("Use TPR shadow enabled; secondary controls enabled; virtual-interrupt delivery enabled; virtualize APIC accesses enabled"); 4405 test_tpr_threshold_values(); 4406 report_prefix_pop(); 4407 } 4408 4409 vmcs_write(CPU_EXEC_CTRL1, secondary); 4410 vmcs_write(CPU_EXEC_CTRL0, primary); 4411 } 4412 4413 /* 4414 * This test verifies the following two vmentry checks: 4415 * 4416 * If the "NMI exiting" VM-execution control is 0, "Virtual NMIs" 4417 * VM-execution control must be 0. 4418 * [Intel SDM] 4419 * 4420 * If the “virtual NMIs” VM-execution control is 0, the “NMI-window 4421 * exiting” VM-execution control must be 0. 4422 * [Intel SDM] 4423 */ 4424 static void test_nmi_ctrls(void) 4425 { 4426 u32 pin_ctrls, cpu_ctrls0, test_pin_ctrls, test_cpu_ctrls0; 4427 4428 if ((ctrl_pin_rev.clr & (PIN_NMI | PIN_VIRT_NMI)) != 4429 (PIN_NMI | PIN_VIRT_NMI)) { 4430 test_skip("NMI exiting and Virtual NMIs are not supported !"); 4431 return; 4432 } 4433 4434 /* Save the controls so that we can restore them after our tests */ 4435 pin_ctrls = vmcs_read(PIN_CONTROLS); 4436 cpu_ctrls0 = vmcs_read(CPU_EXEC_CTRL0); 4437 4438 test_pin_ctrls = pin_ctrls & ~(PIN_NMI | PIN_VIRT_NMI); 4439 test_cpu_ctrls0 = cpu_ctrls0 & ~CPU_NMI_WINDOW; 4440 4441 vmcs_write(PIN_CONTROLS, test_pin_ctrls); 4442 report_prefix_pushf("NMI-exiting disabled, virtual-NMIs disabled"); 4443 test_vmx_controls(true, false); 4444 report_prefix_pop(); 4445 4446 vmcs_write(PIN_CONTROLS, test_pin_ctrls | PIN_VIRT_NMI); 4447 report_prefix_pushf("NMI-exiting disabled, virtual-NMIs enabled"); 4448 test_vmx_controls(false, false); 4449 report_prefix_pop(); 4450 4451 vmcs_write(PIN_CONTROLS, test_pin_ctrls | (PIN_NMI | PIN_VIRT_NMI)); 4452 report_prefix_pushf("NMI-exiting enabled, virtual-NMIs enabled"); 4453 test_vmx_controls(true, false); 4454 report_prefix_pop(); 4455 4456 vmcs_write(PIN_CONTROLS, test_pin_ctrls | PIN_NMI); 4457 report_prefix_pushf("NMI-exiting enabled, virtual-NMIs disabled"); 4458 test_vmx_controls(true, false); 4459 report_prefix_pop(); 4460 4461 if (!(ctrl_cpu_rev[0].clr & CPU_NMI_WINDOW)) { 4462 report_info("NMI-window exiting is not supported, skipping..."); 4463 goto done; 4464 } 4465 4466 vmcs_write(PIN_CONTROLS, test_pin_ctrls); 4467 vmcs_write(CPU_EXEC_CTRL0, test_cpu_ctrls0 | CPU_NMI_WINDOW); 4468 report_prefix_pushf("Virtual-NMIs disabled, NMI-window-exiting enabled"); 4469 test_vmx_controls(false, false); 4470 report_prefix_pop(); 4471 4472 vmcs_write(PIN_CONTROLS, test_pin_ctrls); 4473 vmcs_write(CPU_EXEC_CTRL0, test_cpu_ctrls0); 4474 report_prefix_pushf("Virtual-NMIs disabled, NMI-window-exiting disabled"); 4475 test_vmx_controls(true, false); 4476 report_prefix_pop(); 4477 4478 vmcs_write(PIN_CONTROLS, test_pin_ctrls | (PIN_NMI | PIN_VIRT_NMI)); 4479 vmcs_write(CPU_EXEC_CTRL0, test_cpu_ctrls0 | CPU_NMI_WINDOW); 4480 report_prefix_pushf("Virtual-NMIs enabled, NMI-window-exiting enabled"); 4481 test_vmx_controls(true, false); 4482 report_prefix_pop(); 4483 4484 vmcs_write(PIN_CONTROLS, test_pin_ctrls | (PIN_NMI | PIN_VIRT_NMI)); 4485 vmcs_write(CPU_EXEC_CTRL0, test_cpu_ctrls0); 4486 report_prefix_pushf("Virtual-NMIs enabled, NMI-window-exiting disabled"); 4487 test_vmx_controls(true, false); 4488 report_prefix_pop(); 4489 4490 /* Restore the controls to their original values */ 4491 vmcs_write(CPU_EXEC_CTRL0, cpu_ctrls0); 4492 done: 4493 vmcs_write(PIN_CONTROLS, pin_ctrls); 4494 } 4495 4496 static void test_eptp_ad_bit(u64 eptp, bool ctrl) 4497 { 4498 vmcs_write(EPTP, eptp); 4499 report_prefix_pushf("Enable-EPT enabled; EPT accessed and dirty flag %s", 4500 (eptp & EPTP_AD_FLAG) ? "1": "0"); 4501 test_vmx_controls(ctrl, false); 4502 report_prefix_pop(); 4503 4504 } 4505 4506 /* 4507 * 1. If the "enable EPT" VM-execution control is 1, the "EPTP VM-execution" 4508 * control field must satisfy the following checks: 4509 * 4510 * - The EPT memory type (bits 2:0) must be a value supported by the 4511 * processor as indicated in the IA32_VMX_EPT_VPID_CAP MSR. 4512 * - Bits 5:3 (1 less than the EPT page-walk length) must be 3, 4513 * indicating an EPT page-walk length of 4. 4514 * - Bit 6 (enable bit for accessed and dirty flags for EPT) must be 4515 * 0 if bit 21 of the IA32_VMX_EPT_VPID_CAP MSR is read as 0, 4516 * indicating that the processor does not support accessed and dirty 4517 * dirty flags for EPT. 4518 * - Reserved bits 11:7 and 63:N (where N is the processor's 4519 * physical-address width) must all be 0. 4520 * 4521 * 2. If the "unrestricted guest" VM-execution control is 1, the 4522 * "enable EPT" VM-execution control must also be 1. 4523 */ 4524 static void test_ept_eptp(void) 4525 { 4526 u32 primary_saved = vmcs_read(CPU_EXEC_CTRL0); 4527 u32 secondary_saved = vmcs_read(CPU_EXEC_CTRL1); 4528 u64 eptp_saved = vmcs_read(EPTP); 4529 u32 primary = primary_saved; 4530 u32 secondary = secondary_saved; 4531 u64 msr, eptp = eptp_saved; 4532 bool un_cache = false; 4533 bool wr_bk = false; 4534 bool ctrl; 4535 u32 i, maxphysaddr; 4536 u64 j, resv_bits_mask = 0; 4537 4538 if (!((ctrl_cpu_rev[0].clr & CPU_SECONDARY) && 4539 (ctrl_cpu_rev[1].clr & CPU_EPT))) { 4540 test_skip("\"CPU secondary\" and/or \"enable EPT\" execution controls are not supported !"); 4541 return; 4542 } 4543 4544 /* 4545 * Memory type (bits 2:0) 4546 */ 4547 msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP); 4548 if (msr & EPT_CAP_UC) 4549 un_cache = true; 4550 if (msr & EPT_CAP_WB) 4551 wr_bk = true; 4552 4553 primary |= CPU_SECONDARY; 4554 vmcs_write(CPU_EXEC_CTRL0, primary); 4555 secondary |= CPU_EPT; 4556 vmcs_write(CPU_EXEC_CTRL1, secondary); 4557 eptp = (eptp & ~EPTP_PG_WALK_LEN_MASK) | 4558 (3ul << EPTP_PG_WALK_LEN_SHIFT); 4559 vmcs_write(EPTP, eptp); 4560 4561 for (i = 0; i < 8; i++) { 4562 if (i == 0) { 4563 if (un_cache) { 4564 report_info("EPT paging structure memory-type is Un-cacheable\n"); 4565 ctrl = true; 4566 } else { 4567 ctrl = false; 4568 } 4569 } else if (i == 6) { 4570 if (wr_bk) { 4571 report_info("EPT paging structure memory-type is Write-back\n"); 4572 ctrl = true; 4573 } else { 4574 ctrl = false; 4575 } 4576 } else { 4577 ctrl = false; 4578 } 4579 4580 eptp = (eptp & ~EPT_MEM_TYPE_MASK) | i; 4581 vmcs_write(EPTP, eptp); 4582 report_prefix_pushf("Enable-EPT enabled; EPT memory type %lu", 4583 eptp & EPT_MEM_TYPE_MASK); 4584 test_vmx_controls(ctrl, false); 4585 report_prefix_pop(); 4586 } 4587 4588 eptp = (eptp & ~EPT_MEM_TYPE_MASK) | 6ul; 4589 4590 /* 4591 * Page walk length (bits 5:3) 4592 */ 4593 for (i = 0; i < 8; i++) { 4594 eptp = (eptp & ~EPTP_PG_WALK_LEN_MASK) | 4595 (i << EPTP_PG_WALK_LEN_SHIFT); 4596 if (i == 3) 4597 ctrl = true; 4598 else 4599 ctrl = false; 4600 4601 vmcs_write(EPTP, eptp); 4602 report_prefix_pushf("Enable-EPT enabled; EPT page walk length %lu", 4603 eptp & EPTP_PG_WALK_LEN_MASK); 4604 test_vmx_controls(ctrl, false); 4605 report_prefix_pop(); 4606 } 4607 4608 eptp = (eptp & ~EPTP_PG_WALK_LEN_MASK) | 4609 3ul << EPTP_PG_WALK_LEN_SHIFT; 4610 4611 /* 4612 * Accessed and dirty flag (bit 6) 4613 */ 4614 if (msr & EPT_CAP_AD_FLAG) { 4615 report_info("Processor supports accessed and dirty flag"); 4616 eptp &= ~EPTP_AD_FLAG; 4617 test_eptp_ad_bit(eptp, true); 4618 4619 eptp |= EPTP_AD_FLAG; 4620 test_eptp_ad_bit(eptp, true); 4621 } else { 4622 report_info("Processor does not supports accessed and dirty flag"); 4623 eptp &= ~EPTP_AD_FLAG; 4624 test_eptp_ad_bit(eptp, true); 4625 4626 eptp |= EPTP_AD_FLAG; 4627 test_eptp_ad_bit(eptp, false); 4628 } 4629 4630 /* 4631 * Reserved bits [11:7] and [63:N] 4632 */ 4633 for (i = 0; i < 32; i++) { 4634 if (i == 0) 4635 ctrl = true; 4636 else 4637 ctrl = false; 4638 4639 eptp = (eptp & 4640 ~(EPTP_RESERV_BITS_MASK << EPTP_RESERV_BITS_SHIFT)) | 4641 (i << EPTP_RESERV_BITS_SHIFT); 4642 vmcs_write(EPTP, eptp); 4643 report_prefix_pushf("Enable-EPT enabled; reserved bits [11:7] %lu", 4644 (eptp >> EPTP_RESERV_BITS_SHIFT) & 4645 EPTP_RESERV_BITS_MASK); 4646 test_vmx_controls(ctrl, false); 4647 report_prefix_pop(); 4648 } 4649 4650 eptp = (eptp & ~(EPTP_RESERV_BITS_MASK << EPTP_RESERV_BITS_SHIFT)); 4651 4652 maxphysaddr = cpuid_maxphyaddr(); 4653 for (i = 0; i < (63 - maxphysaddr + 1); i++) { 4654 resv_bits_mask |= 1ul << i; 4655 } 4656 4657 for (j = 0; j < (63 - maxphysaddr + 1); j++) { 4658 if (j == 0) 4659 ctrl = true; 4660 else 4661 ctrl = false; 4662 4663 eptp = (eptp & ~(resv_bits_mask << maxphysaddr)) | 4664 (j << maxphysaddr); 4665 vmcs_write(EPTP, eptp); 4666 report_prefix_pushf("Enable-EPT enabled; reserved bits [63:N] %lu", 4667 (eptp >> maxphysaddr) & resv_bits_mask); 4668 test_vmx_controls(ctrl, false); 4669 report_prefix_pop(); 4670 } 4671 4672 secondary &= ~(CPU_EPT | CPU_URG); 4673 vmcs_write(CPU_EXEC_CTRL1, secondary); 4674 report_prefix_pushf("Enable-EPT disabled, unrestricted-guest disabled"); 4675 test_vmx_controls(true, false); 4676 report_prefix_pop(); 4677 4678 secondary |= CPU_URG; 4679 vmcs_write(CPU_EXEC_CTRL1, secondary); 4680 report_prefix_pushf("Enable-EPT disabled, unrestricted-guest enabled"); 4681 test_vmx_controls(false, false); 4682 report_prefix_pop(); 4683 4684 secondary |= CPU_EPT; 4685 vmcs_write(CPU_EXEC_CTRL1, secondary); 4686 report_prefix_pushf("Enable-EPT enabled, unrestricted-guest enabled"); 4687 test_vmx_controls(true, false); 4688 report_prefix_pop(); 4689 4690 secondary &= ~CPU_URG; 4691 vmcs_write(CPU_EXEC_CTRL1, secondary); 4692 report_prefix_pushf("Enable-EPT enabled, unrestricted-guest disabled"); 4693 test_vmx_controls(true, false); 4694 report_prefix_pop(); 4695 4696 vmcs_write(CPU_EXEC_CTRL0, primary_saved); 4697 vmcs_write(CPU_EXEC_CTRL1, secondary_saved); 4698 vmcs_write(EPTP, eptp_saved); 4699 } 4700 4701 /* 4702 * If the 'enable PML' VM-execution control is 1, the 'enable EPT' 4703 * VM-execution control must also be 1. In addition, the PML address 4704 * must satisfy the following checks: 4705 * 4706 * * Bits 11:0 of the address must be 0. 4707 * * The address should not set any bits beyond the processor's 4708 * physical-address width. 4709 * 4710 * [Intel SDM] 4711 */ 4712 static void test_pml(void) 4713 { 4714 u32 primary_saved = vmcs_read(CPU_EXEC_CTRL0); 4715 u32 secondary_saved = vmcs_read(CPU_EXEC_CTRL1); 4716 u32 primary = primary_saved; 4717 u32 secondary = secondary_saved; 4718 4719 if (!((ctrl_cpu_rev[0].clr & CPU_SECONDARY) && 4720 (ctrl_cpu_rev[1].clr & CPU_EPT) && (ctrl_cpu_rev[1].clr & CPU_PML))) { 4721 test_skip("\"Secondary execution\" control or \"enable EPT\" control or \"enable PML\" control is not supported !"); 4722 return; 4723 } 4724 4725 primary |= CPU_SECONDARY; 4726 vmcs_write(CPU_EXEC_CTRL0, primary); 4727 secondary &= ~(CPU_PML | CPU_EPT); 4728 vmcs_write(CPU_EXEC_CTRL1, secondary); 4729 report_prefix_pushf("enable-PML disabled, enable-EPT disabled"); 4730 test_vmx_controls(true, false); 4731 report_prefix_pop(); 4732 4733 secondary |= CPU_PML; 4734 vmcs_write(CPU_EXEC_CTRL1, secondary); 4735 report_prefix_pushf("enable-PML enabled, enable-EPT disabled"); 4736 test_vmx_controls(false, false); 4737 report_prefix_pop(); 4738 4739 secondary |= CPU_EPT; 4740 vmcs_write(CPU_EXEC_CTRL1, secondary); 4741 report_prefix_pushf("enable-PML enabled, enable-EPT enabled"); 4742 test_vmx_controls(true, false); 4743 report_prefix_pop(); 4744 4745 secondary &= ~CPU_PML; 4746 vmcs_write(CPU_EXEC_CTRL1, secondary); 4747 report_prefix_pushf("enable-PML disabled, enable EPT enabled"); 4748 test_vmx_controls(true, false); 4749 report_prefix_pop(); 4750 4751 test_vmcs_addr_reference(CPU_PML, PMLADDR, "PML address", "PML", 4752 PAGE_SIZE, false, false); 4753 4754 vmcs_write(CPU_EXEC_CTRL0, primary_saved); 4755 vmcs_write(CPU_EXEC_CTRL1, secondary_saved); 4756 } 4757 4758 /* 4759 * If the "activate VMX-preemption timer" VM-execution control is 0, the 4760 * the "save VMX-preemption timer value" VM-exit control must also be 0. 4761 * 4762 * [Intel SDM] 4763 */ 4764 static void test_vmx_preemption_timer(void) 4765 { 4766 u32 saved_pin = vmcs_read(PIN_CONTROLS); 4767 u32 saved_exit = vmcs_read(EXI_CONTROLS); 4768 u32 pin = saved_pin; 4769 u32 exit = saved_exit; 4770 4771 if (!((ctrl_exit_rev.clr & EXI_SAVE_PREEMPT) || 4772 (ctrl_pin_rev.clr & PIN_PREEMPT))) { 4773 printf("\"Save-VMX-preemption-timer\" control and/or \"Enable-VMX-preemption-timer\" control is not supported\n"); 4774 return; 4775 } 4776 4777 pin |= PIN_PREEMPT; 4778 vmcs_write(PIN_CONTROLS, pin); 4779 exit &= ~EXI_SAVE_PREEMPT; 4780 vmcs_write(EXI_CONTROLS, exit); 4781 report_prefix_pushf("enable-VMX-preemption-timer enabled, save-VMX-preemption-timer disabled"); 4782 test_vmx_controls(true, false); 4783 report_prefix_pop(); 4784 4785 exit |= EXI_SAVE_PREEMPT; 4786 vmcs_write(EXI_CONTROLS, exit); 4787 report_prefix_pushf("enable-VMX-preemption-timer enabled, save-VMX-preemption-timer enabled"); 4788 test_vmx_controls(true, false); 4789 report_prefix_pop(); 4790 4791 pin &= ~PIN_PREEMPT; 4792 vmcs_write(PIN_CONTROLS, pin); 4793 report_prefix_pushf("enable-VMX-preemption-timer disabled, save-VMX-preemption-timer enabled"); 4794 test_vmx_controls(false, false); 4795 report_prefix_pop(); 4796 4797 exit &= ~EXI_SAVE_PREEMPT; 4798 vmcs_write(EXI_CONTROLS, exit); 4799 report_prefix_pushf("enable-VMX-preemption-timer disabled, save-VMX-preemption-timer disabled"); 4800 test_vmx_controls(true, false); 4801 report_prefix_pop(); 4802 4803 vmcs_write(PIN_CONTROLS, saved_pin); 4804 vmcs_write(EXI_CONTROLS, saved_exit); 4805 } 4806 4807 /* 4808 * Tests for VM-execution control fields 4809 */ 4810 static void test_vm_execution_ctls(void) 4811 { 4812 test_pin_based_ctls(); 4813 test_primary_processor_based_ctls(); 4814 test_secondary_processor_based_ctls(); 4815 test_cr3_targets(); 4816 test_io_bitmaps(); 4817 test_msr_bitmap(); 4818 test_apic_ctls(); 4819 test_tpr_threshold(); 4820 test_nmi_ctrls(); 4821 test_pml(); 4822 test_vpid(); 4823 test_ept_eptp(); 4824 test_vmx_preemption_timer(); 4825 } 4826 4827 /* 4828 * Tests for VM-entry control fields 4829 */ 4830 static void test_vm_entry_ctls(void) 4831 { 4832 test_invalid_event_injection(); 4833 } 4834 4835 /* 4836 * Check that the virtual CPU checks all of the VMX controls as 4837 * documented in the Intel SDM. 4838 */ 4839 static void vmx_controls_test(void) 4840 { 4841 /* 4842 * Bit 1 of the guest's RFLAGS must be 1, or VM-entry will 4843 * fail due to invalid guest state, should we make it that 4844 * far. 4845 */ 4846 vmcs_write(GUEST_RFLAGS, 0); 4847 4848 test_vm_execution_ctls(); 4849 test_vm_entry_ctls(); 4850 } 4851 4852 static bool valid_vmcs_for_vmentry(void) 4853 { 4854 struct vmcs *current_vmcs = NULL; 4855 4856 if (vmcs_save(¤t_vmcs)) 4857 return false; 4858 4859 return current_vmcs && !current_vmcs->hdr.shadow_vmcs; 4860 } 4861 4862 static void try_vmentry_in_movss_shadow(void) 4863 { 4864 u32 vm_inst_err; 4865 u32 flags; 4866 bool early_failure = false; 4867 u32 expected_flags = X86_EFLAGS_FIXED; 4868 bool valid_vmcs = valid_vmcs_for_vmentry(); 4869 4870 expected_flags |= valid_vmcs ? X86_EFLAGS_ZF : X86_EFLAGS_CF; 4871 4872 /* 4873 * Indirectly set VM_INST_ERR to 12 ("VMREAD/VMWRITE from/to 4874 * unsupported VMCS component"). 4875 */ 4876 vmcs_write(~0u, 0); 4877 4878 __asm__ __volatile__ ("mov %[host_rsp], %%edx;" 4879 "vmwrite %%rsp, %%rdx;" 4880 "mov 0f, %%rax;" 4881 "mov %[host_rip], %%edx;" 4882 "vmwrite %%rax, %%rdx;" 4883 "mov $-1, %%ah;" 4884 "sahf;" 4885 "mov %%ss, %%ax;" 4886 "mov %%ax, %%ss;" 4887 "vmlaunch;" 4888 "mov $1, %[early_failure];" 4889 "0: lahf;" 4890 "movzbl %%ah, %[flags]" 4891 : [early_failure] "+r" (early_failure), 4892 [flags] "=&a" (flags) 4893 : [host_rsp] "i" (HOST_RSP), 4894 [host_rip] "i" (HOST_RIP) 4895 : "rdx", "cc", "memory"); 4896 vm_inst_err = vmcs_read(VMX_INST_ERROR); 4897 4898 report("Early VM-entry failure", early_failure); 4899 report("RFLAGS[8:0] is %x (actual %x)", flags == expected_flags, 4900 expected_flags, flags); 4901 if (valid_vmcs) 4902 report("VM-instruction error is %d (actual %d)", 4903 vm_inst_err == VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS, 4904 VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS, vm_inst_err); 4905 } 4906 4907 static void vmentry_movss_shadow_test(void) 4908 { 4909 struct vmcs *orig_vmcs; 4910 4911 TEST_ASSERT(!vmcs_save(&orig_vmcs)); 4912 4913 /* 4914 * Set the launched flag on the current VMCS to verify the correct 4915 * error priority, below. 4916 */ 4917 test_set_guest(v2_null_test_guest); 4918 enter_guest(); 4919 4920 /* 4921 * With bit 1 of the guest's RFLAGS clear, VM-entry should 4922 * fail due to invalid guest state (if we make it that far). 4923 */ 4924 vmcs_write(GUEST_RFLAGS, 0); 4925 4926 /* 4927 * "VM entry with events blocked by MOV SS" takes precedence over 4928 * "VMLAUNCH with non-clear VMCS." 4929 */ 4930 report_prefix_push("valid current-VMCS"); 4931 try_vmentry_in_movss_shadow(); 4932 report_prefix_pop(); 4933 4934 /* 4935 * VMfailInvalid takes precedence over "VM entry with events 4936 * blocked by MOV SS." 4937 */ 4938 TEST_ASSERT(!vmcs_clear(orig_vmcs)); 4939 report_prefix_push("no current-VMCS"); 4940 try_vmentry_in_movss_shadow(); 4941 report_prefix_pop(); 4942 4943 TEST_ASSERT(!make_vmcs_current(orig_vmcs)); 4944 vmcs_write(GUEST_RFLAGS, X86_EFLAGS_FIXED); 4945 } 4946 4947 #define X86_FEATURE_PCID (1 << 17) 4948 #define X86_FEATURE_MCE (1 << 7) 4949 4950 static int write_cr4_checking(unsigned long val) 4951 { 4952 asm volatile(ASM_TRY("1f") 4953 "mov %0, %%cr4\n\t" 4954 "1:": : "r" (val)); 4955 return exception_vector(); 4956 } 4957 4958 static void vmx_cr_load_test(void) 4959 { 4960 struct cpuid _cpuid = cpuid(1); 4961 unsigned long cr4 = read_cr4(), cr3 = read_cr3(); 4962 4963 if (!(_cpuid.c & X86_FEATURE_PCID)) { 4964 report_skip("PCID not detected"); 4965 return; 4966 } 4967 if (!(_cpuid.d & X86_FEATURE_MCE)) { 4968 report_skip("MCE not detected"); 4969 return; 4970 } 4971 4972 TEST_ASSERT(!(cr4 & (X86_CR4_PCIDE | X86_CR4_MCE))); 4973 TEST_ASSERT(!(cr3 & X86_CR3_PCID_MASK)); 4974 4975 /* Enable PCID for L1. */ 4976 cr4 |= X86_CR4_PCIDE; 4977 cr3 |= 0x1; 4978 TEST_ASSERT(!write_cr4_checking(cr4)); 4979 write_cr3(cr3); 4980 4981 test_set_guest(v2_null_test_guest); 4982 vmcs_write(HOST_CR4, cr4); 4983 vmcs_write(HOST_CR3, cr3); 4984 enter_guest(); 4985 4986 /* 4987 * No exception is expected. 4988 * 4989 * NB. KVM loads the last guest write to CR4 into CR4 read 4990 * shadow. In order to trigger an exit to KVM, we can set a 4991 * bit that was zero in the above CR4 write and is owned by 4992 * KVM. We choose to set CR4.MCE, which shall have no side 4993 * effect because normally no guest MCE (e.g., as the result 4994 * of bad memory) would happen during this test. 4995 */ 4996 TEST_ASSERT(!write_cr4_checking(cr4 | X86_CR4_MCE)); 4997 4998 /* Cleanup L1 state: disable PCID. */ 4999 write_cr3(cr3 & ~X86_CR3_PCID_MASK); 5000 TEST_ASSERT(!write_cr4_checking(cr4 & ~X86_CR4_PCIDE)); 5001 } 5002 5003 static void vmx_nm_test_guest(void) 5004 { 5005 write_cr0(read_cr0() | X86_CR0_TS); 5006 asm volatile("fnop"); 5007 } 5008 5009 static void check_nm_exit(const char *test) 5010 { 5011 u32 reason = vmcs_read(EXI_REASON); 5012 u32 intr_info = vmcs_read(EXI_INTR_INFO); 5013 const u32 expected = INTR_INFO_VALID_MASK | INTR_TYPE_HARD_EXCEPTION | 5014 NM_VECTOR; 5015 5016 report("%s", reason == VMX_EXC_NMI && intr_info == expected, test); 5017 } 5018 5019 /* 5020 * This test checks that: 5021 * 5022 * (a) If L2 launches with CR0.TS clear, but later sets CR0.TS, then 5023 * a subsequent #NM VM-exit is reflected to L1. 5024 * 5025 * (b) If L2 launches with CR0.TS clear and CR0.EM set, then a 5026 * subsequent #NM VM-exit is reflected to L1. 5027 */ 5028 static void vmx_nm_test(void) 5029 { 5030 unsigned long cr0 = read_cr0(); 5031 5032 test_set_guest(vmx_nm_test_guest); 5033 5034 /* 5035 * L1 wants to intercept #NM exceptions encountered in L2. 5036 */ 5037 vmcs_write(EXC_BITMAP, 1 << NM_VECTOR); 5038 5039 /* 5040 * Launch L2 with CR0.TS clear, but don't claim host ownership of 5041 * any CR0 bits. L2 will set CR0.TS and then try to execute fnop, 5042 * which will raise #NM. L0 should reflect the #NM VM-exit to L1. 5043 */ 5044 vmcs_write(CR0_MASK, 0); 5045 vmcs_write(GUEST_CR0, cr0 & ~X86_CR0_TS); 5046 enter_guest(); 5047 check_nm_exit("fnop with CR0.TS set in L2 triggers #NM VM-exit to L1"); 5048 5049 /* 5050 * Re-enter L2 at the fnop instruction, with CR0.TS clear but 5051 * CR0.EM set. The fnop will still raise #NM, and L0 should 5052 * reflect the #NM VM-exit to L1. 5053 */ 5054 vmcs_write(GUEST_CR0, (cr0 & ~X86_CR0_TS) | X86_CR0_EM); 5055 enter_guest(); 5056 check_nm_exit("fnop with CR0.EM set in L2 triggers #NM VM-exit to L1"); 5057 5058 /* 5059 * Re-enter L2 at the fnop instruction, with both CR0.TS and 5060 * CR0.EM clear. There will be no #NM, and the L2 guest should 5061 * exit normally. 5062 */ 5063 vmcs_write(GUEST_CR0, cr0 & ~(X86_CR0_TS | X86_CR0_EM)); 5064 enter_guest(); 5065 } 5066 5067 bool vmx_pending_event_ipi_fired; 5068 static void vmx_pending_event_ipi_isr(isr_regs_t *regs) 5069 { 5070 vmx_pending_event_ipi_fired = true; 5071 eoi(); 5072 } 5073 5074 bool vmx_pending_event_guest_run; 5075 static void vmx_pending_event_guest(void) 5076 { 5077 vmcall(); 5078 vmx_pending_event_guest_run = true; 5079 } 5080 5081 static void vmx_pending_event_test_core(bool guest_hlt) 5082 { 5083 int ipi_vector = 0xf1; 5084 5085 vmx_pending_event_ipi_fired = false; 5086 handle_irq(ipi_vector, vmx_pending_event_ipi_isr); 5087 5088 vmx_pending_event_guest_run = false; 5089 test_set_guest(vmx_pending_event_guest); 5090 5091 vmcs_set_bits(PIN_CONTROLS, PIN_EXTINT); 5092 5093 enter_guest(); 5094 skip_exit_vmcall(); 5095 5096 if (guest_hlt) 5097 vmcs_write(GUEST_ACTV_STATE, ACTV_HLT); 5098 5099 irq_disable(); 5100 apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL | 5101 APIC_DM_FIXED | ipi_vector, 5102 0); 5103 5104 enter_guest(); 5105 5106 assert_exit_reason(VMX_EXTINT); 5107 report("Guest did not run before host received IPI", 5108 !vmx_pending_event_guest_run); 5109 5110 irq_enable(); 5111 asm volatile ("nop"); 5112 irq_disable(); 5113 report("Got pending interrupt after IRQ enabled", 5114 vmx_pending_event_ipi_fired); 5115 5116 if (guest_hlt) 5117 vmcs_write(GUEST_ACTV_STATE, ACTV_ACTIVE); 5118 5119 enter_guest(); 5120 report("Guest finished running when no interrupt", 5121 vmx_pending_event_guest_run); 5122 } 5123 5124 static void vmx_pending_event_test(void) 5125 { 5126 vmx_pending_event_test_core(false); 5127 } 5128 5129 static void vmx_pending_event_hlt_test(void) 5130 { 5131 vmx_pending_event_test_core(true); 5132 } 5133 5134 static int vmx_window_test_ud_count; 5135 5136 static void vmx_window_test_ud_handler(struct ex_regs *regs) 5137 { 5138 vmx_window_test_ud_count++; 5139 } 5140 5141 static void vmx_nmi_window_test_guest(void) 5142 { 5143 handle_exception(UD_VECTOR, vmx_window_test_ud_handler); 5144 5145 asm volatile("vmcall\n\t" 5146 "nop\n\t"); 5147 5148 handle_exception(UD_VECTOR, NULL); 5149 } 5150 5151 static void verify_nmi_window_exit(u64 rip) 5152 { 5153 u32 exit_reason = vmcs_read(EXI_REASON); 5154 5155 report("Exit reason (%d) is 'NMI window'", 5156 exit_reason == VMX_NMI_WINDOW, exit_reason); 5157 report("RIP (%#lx) is %#lx", vmcs_read(GUEST_RIP) == rip, 5158 vmcs_read(GUEST_RIP), rip); 5159 report("Activity state (%ld) is 'ACTIVE'", 5160 vmcs_read(GUEST_ACTV_STATE) == ACTV_ACTIVE, 5161 vmcs_read(GUEST_ACTV_STATE)); 5162 } 5163 5164 static void vmx_nmi_window_test(void) 5165 { 5166 u64 nop_addr; 5167 void *ud_fault_addr = get_idt_addr(&boot_idt[UD_VECTOR]); 5168 5169 if (!(ctrl_pin_rev.clr & PIN_VIRT_NMI)) { 5170 report_skip("CPU does not support the \"Virtual NMIs\" VM-execution control."); 5171 return; 5172 } 5173 5174 if (!(ctrl_cpu_rev[0].clr & CPU_NMI_WINDOW)) { 5175 report_skip("CPU does not support the \"NMI-window exiting\" VM-execution control."); 5176 return; 5177 } 5178 5179 vmx_window_test_ud_count = 0; 5180 5181 report_prefix_push("NMI-window"); 5182 test_set_guest(vmx_nmi_window_test_guest); 5183 vmcs_set_bits(PIN_CONTROLS, PIN_VIRT_NMI); 5184 enter_guest(); 5185 skip_exit_vmcall(); 5186 nop_addr = vmcs_read(GUEST_RIP); 5187 5188 /* 5189 * Ask for "NMI-window exiting," and expect an immediate VM-exit. 5190 * RIP will not advance. 5191 */ 5192 report_prefix_push("active, no blocking"); 5193 vmcs_set_bits(CPU_EXEC_CTRL0, CPU_NMI_WINDOW); 5194 enter_guest(); 5195 verify_nmi_window_exit(nop_addr); 5196 report_prefix_pop(); 5197 5198 /* 5199 * Ask for "NMI-window exiting" in a MOV-SS shadow, and expect 5200 * a VM-exit on the next instruction after the nop. (The nop 5201 * is one byte.) 5202 */ 5203 report_prefix_push("active, blocking by MOV-SS"); 5204 vmcs_write(GUEST_INTR_STATE, GUEST_INTR_STATE_MOVSS); 5205 enter_guest(); 5206 verify_nmi_window_exit(nop_addr + 1); 5207 report_prefix_pop(); 5208 5209 /* 5210 * Ask for "NMI-window exiting" (with event injection), and 5211 * expect a VM-exit after the event is injected. (RIP should 5212 * be at the address specified in the IDT entry for #UD.) 5213 */ 5214 report_prefix_push("active, no blocking, injecting #UD"); 5215 vmcs_write(ENT_INTR_INFO, 5216 INTR_INFO_VALID_MASK | INTR_TYPE_HARD_EXCEPTION | UD_VECTOR); 5217 enter_guest(); 5218 verify_nmi_window_exit((u64)ud_fault_addr); 5219 report_prefix_pop(); 5220 5221 /* 5222 * Ask for "NMI-window exiting" with NMI blocking, and expect 5223 * a VM-exit after the next IRET (i.e. after the #UD handler 5224 * returns). So, RIP should be back at one byte past the nop. 5225 */ 5226 report_prefix_push("active, blocking by NMI"); 5227 vmcs_write(GUEST_INTR_STATE, GUEST_INTR_STATE_NMI); 5228 enter_guest(); 5229 verify_nmi_window_exit(nop_addr + 1); 5230 report("#UD handler executed once (actual %d times)", 5231 vmx_window_test_ud_count == 1, 5232 vmx_window_test_ud_count); 5233 report_prefix_pop(); 5234 5235 if (!(rdmsr(MSR_IA32_VMX_MISC) & (1 << 6))) { 5236 report_skip("CPU does not support activity state HLT."); 5237 } else { 5238 /* 5239 * Ask for "NMI-window exiting" when entering activity 5240 * state HLT, and expect an immediate VM-exit. RIP is 5241 * still one byte past the nop. 5242 */ 5243 report_prefix_push("halted, no blocking"); 5244 vmcs_write(GUEST_ACTV_STATE, ACTV_HLT); 5245 enter_guest(); 5246 verify_nmi_window_exit(nop_addr + 1); 5247 report_prefix_pop(); 5248 5249 /* 5250 * Ask for "NMI-window exiting" when entering activity 5251 * state HLT (with event injection), and expect a 5252 * VM-exit after the event is injected. (RIP should be 5253 * at the address specified in the IDT entry for #UD.) 5254 */ 5255 report_prefix_push("halted, no blocking, injecting #UD"); 5256 vmcs_write(GUEST_ACTV_STATE, ACTV_HLT); 5257 vmcs_write(ENT_INTR_INFO, 5258 INTR_INFO_VALID_MASK | INTR_TYPE_HARD_EXCEPTION | 5259 UD_VECTOR); 5260 enter_guest(); 5261 verify_nmi_window_exit((u64)ud_fault_addr); 5262 report_prefix_pop(); 5263 } 5264 5265 vmcs_clear_bits(CPU_EXEC_CTRL0, CPU_NMI_WINDOW); 5266 enter_guest(); 5267 report_prefix_pop(); 5268 } 5269 5270 static void vmx_intr_window_test_guest(void) 5271 { 5272 handle_exception(UD_VECTOR, vmx_window_test_ud_handler); 5273 5274 /* 5275 * The two consecutive STIs are to ensure that only the first 5276 * one has a shadow. Note that NOP and STI are one byte 5277 * instructions. 5278 */ 5279 asm volatile("vmcall\n\t" 5280 "nop\n\t" 5281 "sti\n\t" 5282 "sti\n\t"); 5283 5284 handle_exception(UD_VECTOR, NULL); 5285 } 5286 5287 static void verify_intr_window_exit(u64 rip) 5288 { 5289 u32 exit_reason = vmcs_read(EXI_REASON); 5290 5291 report("Exit reason (%d) is 'interrupt window'", 5292 exit_reason == VMX_INTR_WINDOW, exit_reason); 5293 report("RIP (%#lx) is %#lx", vmcs_read(GUEST_RIP) == rip, 5294 vmcs_read(GUEST_RIP), rip); 5295 report("Activity state (%ld) is 'ACTIVE'", 5296 vmcs_read(GUEST_ACTV_STATE) == ACTV_ACTIVE, 5297 vmcs_read(GUEST_ACTV_STATE)); 5298 } 5299 5300 static void vmx_intr_window_test(void) 5301 { 5302 u64 vmcall_addr; 5303 u64 nop_addr; 5304 unsigned int orig_ud_gate_type; 5305 void *ud_fault_addr = get_idt_addr(&boot_idt[UD_VECTOR]); 5306 5307 if (!(ctrl_cpu_rev[0].clr & CPU_INTR_WINDOW)) { 5308 report_skip("CPU does not support the \"interrupt-window exiting\" VM-execution control."); 5309 return; 5310 } 5311 5312 /* 5313 * Change the IDT entry for #UD from interrupt gate to trap gate, 5314 * so that it won't clear RFLAGS.IF. We don't want interrupts to 5315 * be disabled after vectoring a #UD. 5316 */ 5317 orig_ud_gate_type = boot_idt[UD_VECTOR].type; 5318 boot_idt[UD_VECTOR].type = 15; 5319 5320 report_prefix_push("interrupt-window"); 5321 test_set_guest(vmx_intr_window_test_guest); 5322 enter_guest(); 5323 assert_exit_reason(VMX_VMCALL); 5324 vmcall_addr = vmcs_read(GUEST_RIP); 5325 5326 /* 5327 * Ask for "interrupt-window exiting" with RFLAGS.IF set and 5328 * no blocking; expect an immediate VM-exit. Note that we have 5329 * not advanced past the vmcall instruction yet, so RIP should 5330 * point to the vmcall instruction. 5331 */ 5332 report_prefix_push("active, no blocking, RFLAGS.IF=1"); 5333 vmcs_set_bits(CPU_EXEC_CTRL0, CPU_INTR_WINDOW); 5334 vmcs_write(GUEST_RFLAGS, X86_EFLAGS_FIXED | X86_EFLAGS_IF); 5335 enter_guest(); 5336 verify_intr_window_exit(vmcall_addr); 5337 report_prefix_pop(); 5338 5339 /* 5340 * Ask for "interrupt-window exiting" (with event injection) 5341 * with RFLAGS.IF set and no blocking; expect a VM-exit after 5342 * the event is injected. That is, RIP should should be at the 5343 * address specified in the IDT entry for #UD. 5344 */ 5345 report_prefix_push("active, no blocking, RFLAGS.IF=1, injecting #UD"); 5346 vmcs_write(ENT_INTR_INFO, 5347 INTR_INFO_VALID_MASK | INTR_TYPE_HARD_EXCEPTION | UD_VECTOR); 5348 vmcall_addr = vmcs_read(GUEST_RIP); 5349 enter_guest(); 5350 verify_intr_window_exit((u64)ud_fault_addr); 5351 report_prefix_pop(); 5352 5353 /* 5354 * Let the L2 guest run through the IRET, back to the VMCALL. 5355 * We have to clear the "interrupt-window exiting" 5356 * VM-execution control, or it would just keep causing 5357 * VM-exits. Then, advance past the VMCALL and set the 5358 * "interrupt-window exiting" VM-execution control again. 5359 */ 5360 vmcs_clear_bits(CPU_EXEC_CTRL0, CPU_INTR_WINDOW); 5361 enter_guest(); 5362 skip_exit_vmcall(); 5363 nop_addr = vmcs_read(GUEST_RIP); 5364 vmcs_set_bits(CPU_EXEC_CTRL0, CPU_INTR_WINDOW); 5365 5366 /* 5367 * Ask for "interrupt-window exiting" in a MOV-SS shadow with 5368 * RFLAGS.IF set, and expect a VM-exit on the next 5369 * instruction. (NOP is one byte.) 5370 */ 5371 report_prefix_push("active, blocking by MOV-SS, RFLAGS.IF=1"); 5372 vmcs_write(GUEST_INTR_STATE, GUEST_INTR_STATE_MOVSS); 5373 enter_guest(); 5374 verify_intr_window_exit(nop_addr + 1); 5375 report_prefix_pop(); 5376 5377 /* 5378 * Back up to the NOP and ask for "interrupt-window exiting" 5379 * in an STI shadow with RFLAGS.IF set, and expect a VM-exit 5380 * on the next instruction. (NOP is one byte.) 5381 */ 5382 report_prefix_push("active, blocking by STI, RFLAGS.IF=1"); 5383 vmcs_write(GUEST_RIP, nop_addr); 5384 vmcs_write(GUEST_INTR_STATE, GUEST_INTR_STATE_STI); 5385 enter_guest(); 5386 verify_intr_window_exit(nop_addr + 1); 5387 report_prefix_pop(); 5388 5389 /* 5390 * Ask for "interrupt-window exiting" with RFLAGS.IF clear, 5391 * and expect a VM-exit on the instruction following the STI 5392 * shadow. Only the first STI (which is one byte past the NOP) 5393 * should have a shadow. The second STI (which is two bytes 5394 * past the NOP) has no shadow. Therefore, the interrupt 5395 * window opens at three bytes past the NOP. 5396 */ 5397 report_prefix_push("active, RFLAGS.IF = 0"); 5398 vmcs_write(GUEST_RFLAGS, X86_EFLAGS_FIXED); 5399 enter_guest(); 5400 verify_intr_window_exit(nop_addr + 3); 5401 report_prefix_pop(); 5402 5403 if (!(rdmsr(MSR_IA32_VMX_MISC) & (1 << 6))) { 5404 report_skip("CPU does not support activity state HLT."); 5405 } else { 5406 /* 5407 * Ask for "interrupt-window exiting" when entering 5408 * activity state HLT, and expect an immediate 5409 * VM-exit. RIP is still three bytes past the nop. 5410 */ 5411 report_prefix_push("halted, no blocking"); 5412 vmcs_write(GUEST_ACTV_STATE, ACTV_HLT); 5413 enter_guest(); 5414 verify_intr_window_exit(nop_addr + 3); 5415 report_prefix_pop(); 5416 5417 /* 5418 * Ask for "interrupt-window exiting" when entering 5419 * activity state HLT (with event injection), and 5420 * expect a VM-exit after the event is injected. That 5421 * is, RIP should should be at the address specified 5422 * in the IDT entry for #UD. 5423 */ 5424 report_prefix_push("halted, no blocking, injecting #UD"); 5425 vmcs_write(GUEST_ACTV_STATE, ACTV_HLT); 5426 vmcs_write(ENT_INTR_INFO, 5427 INTR_INFO_VALID_MASK | INTR_TYPE_HARD_EXCEPTION | 5428 UD_VECTOR); 5429 enter_guest(); 5430 verify_intr_window_exit((u64)ud_fault_addr); 5431 report_prefix_pop(); 5432 } 5433 5434 boot_idt[UD_VECTOR].type = orig_ud_gate_type; 5435 vmcs_clear_bits(CPU_EXEC_CTRL0, CPU_INTR_WINDOW); 5436 enter_guest(); 5437 report_prefix_pop(); 5438 } 5439 5440 #define GUEST_TSC_OFFSET (1u << 30) 5441 5442 static u64 guest_tsc; 5443 5444 static void vmx_store_tsc_test_guest(void) 5445 { 5446 guest_tsc = rdtsc(); 5447 } 5448 5449 /* 5450 * This test ensures that when IA32_TSC is in the VM-exit MSR-store 5451 * list, the value saved is not subject to the TSC offset that is 5452 * applied to RDTSC/RDTSCP/RDMSR(IA32_TSC) in guest execution. 5453 */ 5454 static void vmx_store_tsc_test(void) 5455 { 5456 struct vmx_msr_entry msr_entry = { .index = MSR_IA32_TSC }; 5457 u64 low, high; 5458 5459 if (!(ctrl_cpu_rev[0].clr & CPU_USE_TSC_OFFSET)) { 5460 report_skip("'Use TSC offsetting' not supported"); 5461 return; 5462 } 5463 5464 test_set_guest(vmx_store_tsc_test_guest); 5465 5466 vmcs_set_bits(CPU_EXEC_CTRL0, CPU_USE_TSC_OFFSET); 5467 vmcs_write(EXI_MSR_ST_CNT, 1); 5468 vmcs_write(EXIT_MSR_ST_ADDR, virt_to_phys(&msr_entry)); 5469 vmcs_write(TSC_OFFSET, GUEST_TSC_OFFSET); 5470 5471 low = rdtsc(); 5472 enter_guest(); 5473 high = rdtsc(); 5474 5475 report("RDTSC value in the guest (%lu) is in range [%lu, %lu]", 5476 low + GUEST_TSC_OFFSET <= guest_tsc && 5477 guest_tsc <= high + GUEST_TSC_OFFSET, 5478 guest_tsc, low + GUEST_TSC_OFFSET, high + GUEST_TSC_OFFSET); 5479 report("IA32_TSC value saved in the VM-exit MSR-store list (%lu) is in range [%lu, %lu]", 5480 low <= msr_entry.value && msr_entry.value <= high, 5481 msr_entry.value, low, high); 5482 } 5483 5484 static void vmx_db_test_guest(void) 5485 { 5486 /* 5487 * For a hardware generated single-step #DB. 5488 */ 5489 asm volatile("vmcall;" 5490 "nop;" 5491 ".Lpost_nop:"); 5492 /* 5493 * ...in a MOVSS shadow, with pending debug exceptions. 5494 */ 5495 asm volatile("vmcall;" 5496 "nop;" 5497 ".Lpost_movss_nop:"); 5498 /* 5499 * For an L0 synthesized single-step #DB. (L0 intercepts WBINVD and 5500 * emulates it in software.) 5501 */ 5502 asm volatile("vmcall;" 5503 "wbinvd;" 5504 ".Lpost_wbinvd:"); 5505 /* 5506 * ...in a MOVSS shadow, with pending debug exceptions. 5507 */ 5508 asm volatile("vmcall;" 5509 "wbinvd;" 5510 ".Lpost_movss_wbinvd:"); 5511 /* 5512 * For a hardware generated single-step #DB in a transactional region. 5513 */ 5514 asm volatile("vmcall;" 5515 ".Lxbegin: xbegin .Lskip_rtm;" 5516 "xend;" 5517 ".Lskip_rtm:"); 5518 } 5519 5520 /* 5521 * Clear the pending debug exceptions and RFLAGS.TF and re-enter 5522 * L2. No #DB is delivered and L2 continues to the next point of 5523 * interest. 5524 */ 5525 static void dismiss_db(void) 5526 { 5527 vmcs_write(GUEST_PENDING_DEBUG, 0); 5528 vmcs_write(GUEST_RFLAGS, X86_EFLAGS_FIXED); 5529 enter_guest(); 5530 } 5531 5532 /* 5533 * Check a variety of VMCS fields relevant to an intercepted #DB exception. 5534 * Then throw away the #DB exception and resume L2. 5535 */ 5536 static void check_db_exit(bool xfail_qual, bool xfail_dr6, bool xfail_pdbg, 5537 void *expected_rip, u64 expected_exit_qual, 5538 u64 expected_dr6) 5539 { 5540 u32 reason = vmcs_read(EXI_REASON); 5541 u32 intr_info = vmcs_read(EXI_INTR_INFO); 5542 u64 exit_qual = vmcs_read(EXI_QUALIFICATION); 5543 u64 guest_rip = vmcs_read(GUEST_RIP); 5544 u64 guest_pending_dbg = vmcs_read(GUEST_PENDING_DEBUG); 5545 u64 dr6 = read_dr6(); 5546 const u32 expected_intr_info = INTR_INFO_VALID_MASK | 5547 INTR_TYPE_HARD_EXCEPTION | DB_VECTOR; 5548 5549 report("Expected #DB VM-exit", 5550 reason == VMX_EXC_NMI && intr_info == expected_intr_info); 5551 report("Expected RIP %p (actual %lx)", (u64)expected_rip == guest_rip, 5552 expected_rip, guest_rip); 5553 report_xfail("Expected pending debug exceptions 0 (actual %lx)", 5554 xfail_pdbg, 0 == guest_pending_dbg, guest_pending_dbg); 5555 report_xfail("Expected exit qualification %lx (actual %lx)", xfail_qual, 5556 expected_exit_qual == exit_qual, 5557 expected_exit_qual, exit_qual); 5558 report_xfail("Expected DR6 %lx (actual %lx)", xfail_dr6, 5559 expected_dr6 == dr6, expected_dr6, dr6); 5560 dismiss_db(); 5561 } 5562 5563 /* 5564 * Assuming the guest has just exited on a VMCALL instruction, skip 5565 * over the vmcall, and set the guest's RFLAGS.TF in the VMCS. If 5566 * pending debug exceptions are non-zero, set the VMCS up as if the 5567 * previous instruction was a MOVSS that generated the indicated 5568 * pending debug exceptions. Then enter L2. 5569 */ 5570 static void single_step_guest(const char *test_name, u64 starting_dr6, 5571 u64 pending_debug_exceptions) 5572 { 5573 printf("\n%s\n", test_name); 5574 skip_exit_vmcall(); 5575 write_dr6(starting_dr6); 5576 vmcs_write(GUEST_RFLAGS, X86_EFLAGS_FIXED | X86_EFLAGS_TF); 5577 if (pending_debug_exceptions) { 5578 vmcs_write(GUEST_PENDING_DEBUG, pending_debug_exceptions); 5579 vmcs_write(GUEST_INTR_STATE, GUEST_INTR_STATE_MOVSS); 5580 } 5581 enter_guest(); 5582 } 5583 5584 /* 5585 * When L1 intercepts #DB, verify that a single-step trap clears 5586 * pending debug exceptions, populates the exit qualification field 5587 * properly, and that DR6 is not prematurely clobbered. In a 5588 * (simulated) MOVSS shadow, make sure that the pending debug 5589 * exception bits are properly accumulated into the exit qualification 5590 * field. 5591 */ 5592 static void vmx_db_test(void) 5593 { 5594 /* 5595 * We are going to set a few arbitrary bits in DR6 to verify that 5596 * (a) DR6 is not modified by an intercepted #DB, and 5597 * (b) stale bits in DR6 (DR6.BD, in particular) don't leak into 5598 * the exit qualification field for a subsequent #DB exception. 5599 */ 5600 const u64 starting_dr6 = DR6_RESERVED | BIT(13) | DR_TRAP3 | DR_TRAP1; 5601 extern char post_nop asm(".Lpost_nop"); 5602 extern char post_movss_nop asm(".Lpost_movss_nop"); 5603 extern char post_wbinvd asm(".Lpost_wbinvd"); 5604 extern char post_movss_wbinvd asm(".Lpost_movss_wbinvd"); 5605 extern char xbegin asm(".Lxbegin"); 5606 extern char skip_rtm asm(".Lskip_rtm"); 5607 5608 /* 5609 * L1 wants to intercept #DB exceptions encountered in L2. 5610 */ 5611 vmcs_write(EXC_BITMAP, BIT(DB_VECTOR)); 5612 5613 /* 5614 * Start L2 and run it up to the first point of interest. 5615 */ 5616 test_set_guest(vmx_db_test_guest); 5617 enter_guest(); 5618 5619 /* 5620 * Hardware-delivered #DB trap for single-step sets the 5621 * standard that L0 has to follow for emulated instructions. 5622 */ 5623 single_step_guest("Hardware delivered single-step", starting_dr6, 0); 5624 check_db_exit(false, false, false, &post_nop, DR_STEP, starting_dr6); 5625 5626 /* 5627 * Hardware-delivered #DB trap for single-step in MOVSS shadow 5628 * also sets the standard that L0 has to follow for emulated 5629 * instructions. Here, we establish the VMCS pending debug 5630 * exceptions to indicate that the simulated MOVSS triggered a 5631 * data breakpoint as well as the single-step trap. 5632 */ 5633 single_step_guest("Hardware delivered single-step in MOVSS shadow", 5634 starting_dr6, BIT(12) | DR_STEP | DR_TRAP0 ); 5635 check_db_exit(false, false, false, &post_movss_nop, DR_STEP | DR_TRAP0, 5636 starting_dr6); 5637 5638 /* 5639 * L0 synthesized #DB trap for single-step is buggy, because 5640 * kvm (a) clobbers DR6 too early, and (b) tries its best to 5641 * reconstitute the exit qualification from the prematurely 5642 * modified DR6, but fails miserably. 5643 */ 5644 single_step_guest("Software synthesized single-step", starting_dr6, 0); 5645 check_db_exit(true, true, false, &post_wbinvd, DR_STEP, starting_dr6); 5646 5647 /* 5648 * L0 synthesized #DB trap for single-step in MOVSS shadow is 5649 * even worse, because L0 also leaves the pending debug 5650 * exceptions in the VMCS instead of accumulating them into 5651 * the exit qualification field for the #DB exception. 5652 */ 5653 single_step_guest("Software synthesized single-step in MOVSS shadow", 5654 starting_dr6, BIT(12) | DR_STEP | DR_TRAP0); 5655 check_db_exit(true, true, true, &post_movss_wbinvd, DR_STEP | DR_TRAP0, 5656 starting_dr6); 5657 5658 /* 5659 * Optional RTM test for hardware that supports RTM, to 5660 * demonstrate that the current volume 3 of the SDM 5661 * (325384-067US), table 27-1 is incorrect. Bit 16 of the exit 5662 * qualification for debug exceptions is not reserved. It is 5663 * set to 1 if a debug exception (#DB) or a breakpoint 5664 * exception (#BP) occurs inside an RTM region while advanced 5665 * debugging of RTM transactional regions is enabled. 5666 */ 5667 if (cpuid(7).b & BIT(11)) { 5668 vmcs_write(ENT_CONTROLS, 5669 vmcs_read(ENT_CONTROLS) | ENT_LOAD_DBGCTLS); 5670 /* 5671 * Set DR7.RTM[bit 11] and IA32_DEBUGCTL.RTM[bit 15] 5672 * in the guest to enable advanced debugging of RTM 5673 * transactional regions. 5674 */ 5675 vmcs_write(GUEST_DR7, BIT(11)); 5676 vmcs_write(GUEST_DEBUGCTL, BIT(15)); 5677 single_step_guest("Hardware delivered single-step in " 5678 "transactional region", starting_dr6, 0); 5679 check_db_exit(false, false, false, &xbegin, BIT(16), 5680 starting_dr6); 5681 } else { 5682 vmcs_write(GUEST_RIP, (u64)&skip_rtm); 5683 enter_guest(); 5684 } 5685 } 5686 5687 static bool cpu_has_apicv(void) 5688 { 5689 return ((ctrl_cpu_rev[1].clr & CPU_APIC_REG_VIRT) && 5690 (ctrl_cpu_rev[1].clr & CPU_VINTD) && 5691 (ctrl_pin_rev.clr & PIN_POST_INTR)); 5692 } 5693 5694 static void enable_vid(void) 5695 { 5696 void *virtual_apic_page; 5697 5698 assert(cpu_has_apicv()); 5699 5700 disable_intercept_for_x2apic_msrs(); 5701 5702 virtual_apic_page = alloc_page(); 5703 vmcs_write(APIC_VIRT_ADDR, (u64)virtual_apic_page); 5704 5705 vmcs_set_bits(PIN_CONTROLS, PIN_EXTINT); 5706 5707 vmcs_write(EOI_EXIT_BITMAP0, 0x0); 5708 vmcs_write(EOI_EXIT_BITMAP1, 0x0); 5709 vmcs_write(EOI_EXIT_BITMAP2, 0x0); 5710 vmcs_write(EOI_EXIT_BITMAP3, 0x0); 5711 5712 vmcs_set_bits(CPU_EXEC_CTRL0, CPU_SECONDARY | CPU_TPR_SHADOW); 5713 vmcs_set_bits(CPU_EXEC_CTRL1, CPU_VINTD | CPU_VIRT_X2APIC); 5714 } 5715 5716 static void trigger_ioapic_scan_thread(void *data) 5717 { 5718 /* Wait until other CPU entered L2 */ 5719 while (vmx_get_test_stage() != 1) 5720 ; 5721 5722 /* Trigger ioapic scan */ 5723 ioapic_set_redir(0xf, 0x79, TRIGGER_LEVEL); 5724 vmx_set_test_stage(2); 5725 } 5726 5727 static void irq_79_handler_guest(isr_regs_t *regs) 5728 { 5729 eoi(); 5730 5731 /* L1 expects vmexit on VMX_VMCALL and not VMX_EOI_INDUCED */ 5732 vmcall(); 5733 } 5734 5735 /* 5736 * Constant for num of busy-loop iterations after which 5737 * a timer interrupt should have happened in host 5738 */ 5739 #define TIMER_INTERRUPT_DELAY 100000000 5740 5741 static void vmx_eoi_bitmap_ioapic_scan_test_guest(void) 5742 { 5743 handle_irq(0x79, irq_79_handler_guest); 5744 irq_enable(); 5745 5746 /* Signal to L1 CPU to trigger ioapic scan */ 5747 vmx_set_test_stage(1); 5748 /* Wait until L1 CPU to trigger ioapic scan */ 5749 while (vmx_get_test_stage() != 2) 5750 ; 5751 5752 /* 5753 * Wait for L0 timer interrupt to be raised while we run in L2 5754 * such that L0 will process the IOAPIC scan request before 5755 * resuming L2 5756 */ 5757 delay(TIMER_INTERRUPT_DELAY); 5758 5759 asm volatile ("int $0x79"); 5760 } 5761 5762 static void vmx_eoi_bitmap_ioapic_scan_test(void) 5763 { 5764 if (!cpu_has_apicv() || (cpu_count() < 2)) { 5765 report_skip(__func__); 5766 return; 5767 } 5768 5769 enable_vid(); 5770 5771 on_cpu_async(1, trigger_ioapic_scan_thread, NULL); 5772 test_set_guest(vmx_eoi_bitmap_ioapic_scan_test_guest); 5773 5774 /* 5775 * Launch L2. 5776 * We expect the exit reason to be VMX_VMCALL (and not EOI INDUCED). 5777 * In case the reason isn't VMX_VMCALL, the asserion inside 5778 * skip_exit_vmcall() will fail. 5779 */ 5780 enter_guest(); 5781 skip_exit_vmcall(); 5782 5783 /* Let L2 finish */ 5784 enter_guest(); 5785 report(__func__, 1); 5786 } 5787 5788 #define HLT_WITH_RVI_VECTOR (0xf1) 5789 5790 bool vmx_hlt_with_rvi_guest_isr_fired; 5791 static void vmx_hlt_with_rvi_guest_isr(isr_regs_t *regs) 5792 { 5793 vmx_hlt_with_rvi_guest_isr_fired = true; 5794 eoi(); 5795 } 5796 5797 static void vmx_hlt_with_rvi_guest(void) 5798 { 5799 handle_irq(HLT_WITH_RVI_VECTOR, vmx_hlt_with_rvi_guest_isr); 5800 5801 irq_enable(); 5802 asm volatile ("nop"); 5803 5804 vmcall(); 5805 } 5806 5807 static void vmx_hlt_with_rvi_test(void) 5808 { 5809 if (!cpu_has_apicv()) { 5810 report_skip(__func__); 5811 return; 5812 } 5813 5814 enable_vid(); 5815 5816 vmx_hlt_with_rvi_guest_isr_fired = false; 5817 test_set_guest(vmx_hlt_with_rvi_guest); 5818 5819 enter_guest(); 5820 skip_exit_vmcall(); 5821 5822 vmcs_write(GUEST_ACTV_STATE, ACTV_HLT); 5823 vmcs_write(GUEST_INT_STATUS, HLT_WITH_RVI_VECTOR); 5824 enter_guest(); 5825 5826 report("Interrupt raised in guest", vmx_hlt_with_rvi_guest_isr_fired); 5827 } 5828 5829 static void set_irq_line_thread(void *data) 5830 { 5831 /* Wait until other CPU entered L2 */ 5832 while (vmx_get_test_stage() != 1) 5833 ; 5834 5835 /* Set irq-line 0xf to raise vector 0x78 for vCPU 0 */ 5836 ioapic_set_redir(0xf, 0x78, TRIGGER_LEVEL); 5837 vmx_set_test_stage(2); 5838 } 5839 5840 static bool irq_78_handler_vmcall_before_eoi; 5841 static void irq_78_handler_guest(isr_regs_t *regs) 5842 { 5843 set_irq_line(0xf, 0); 5844 if (irq_78_handler_vmcall_before_eoi) 5845 vmcall(); 5846 eoi(); 5847 vmcall(); 5848 } 5849 5850 static void vmx_apic_passthrough_guest(void) 5851 { 5852 handle_irq(0x78, irq_78_handler_guest); 5853 irq_enable(); 5854 5855 /* If requested, wait for other CPU to trigger ioapic scan */ 5856 if (vmx_get_test_stage() < 1) { 5857 vmx_set_test_stage(1); 5858 while (vmx_get_test_stage() != 2) 5859 ; 5860 } 5861 5862 set_irq_line(0xf, 1); 5863 } 5864 5865 static void vmx_apic_passthrough(bool set_irq_line_from_thread) 5866 { 5867 if (set_irq_line_from_thread && (cpu_count() < 2)) { 5868 report_skip(__func__); 5869 return; 5870 } 5871 5872 u64 cpu_ctrl_0 = CPU_SECONDARY; 5873 u64 cpu_ctrl_1 = 0; 5874 5875 disable_intercept_for_x2apic_msrs(); 5876 5877 vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) & ~PIN_EXTINT); 5878 5879 vmcs_write(CPU_EXEC_CTRL0, vmcs_read(CPU_EXEC_CTRL0) | cpu_ctrl_0); 5880 vmcs_write(CPU_EXEC_CTRL1, vmcs_read(CPU_EXEC_CTRL1) | cpu_ctrl_1); 5881 5882 if (set_irq_line_from_thread) { 5883 irq_78_handler_vmcall_before_eoi = false; 5884 on_cpu_async(1, set_irq_line_thread, NULL); 5885 } else { 5886 irq_78_handler_vmcall_before_eoi = true; 5887 ioapic_set_redir(0xf, 0x78, TRIGGER_LEVEL); 5888 vmx_set_test_stage(2); 5889 } 5890 test_set_guest(vmx_apic_passthrough_guest); 5891 5892 if (irq_78_handler_vmcall_before_eoi) { 5893 /* Before EOI remote_irr should still be set */ 5894 enter_guest(); 5895 skip_exit_vmcall(); 5896 TEST_ASSERT_EQ_MSG(1, (int)ioapic_read_redir(0xf).remote_irr, 5897 "IOAPIC pass-through: remote_irr=1 before EOI"); 5898 } 5899 5900 /* After EOI remote_irr should be cleared */ 5901 enter_guest(); 5902 skip_exit_vmcall(); 5903 TEST_ASSERT_EQ_MSG(0, (int)ioapic_read_redir(0xf).remote_irr, 5904 "IOAPIC pass-through: remote_irr=0 after EOI"); 5905 5906 /* Let L2 finish */ 5907 enter_guest(); 5908 report(__func__, 1); 5909 } 5910 5911 static void vmx_apic_passthrough_test(void) 5912 { 5913 vmx_apic_passthrough(false); 5914 } 5915 5916 static void vmx_apic_passthrough_thread_test(void) 5917 { 5918 vmx_apic_passthrough(true); 5919 } 5920 5921 enum vmcs_access { 5922 ACCESS_VMREAD, 5923 ACCESS_VMWRITE, 5924 ACCESS_NONE, 5925 }; 5926 5927 struct vmcs_shadow_test_common { 5928 enum vmcs_access op; 5929 enum Reason reason; 5930 u64 field; 5931 u64 value; 5932 u64 flags; 5933 u64 time; 5934 } l1_l2_common; 5935 5936 static inline u64 vmread_flags(u64 field, u64 *val) 5937 { 5938 u64 flags; 5939 5940 asm volatile ("vmread %2, %1; pushf; pop %0" 5941 : "=r" (flags), "=rm" (*val) : "r" (field) : "cc"); 5942 return flags & X86_EFLAGS_ALU; 5943 } 5944 5945 static inline u64 vmwrite_flags(u64 field, u64 val) 5946 { 5947 u64 flags; 5948 5949 asm volatile ("vmwrite %1, %2; pushf; pop %0" 5950 : "=r"(flags) : "rm" (val), "r" (field) : "cc"); 5951 return flags & X86_EFLAGS_ALU; 5952 } 5953 5954 static void vmx_vmcs_shadow_test_guest(void) 5955 { 5956 struct vmcs_shadow_test_common *c = &l1_l2_common; 5957 u64 start; 5958 5959 while (c->op != ACCESS_NONE) { 5960 start = rdtsc(); 5961 switch (c->op) { 5962 default: 5963 c->flags = -1ull; 5964 break; 5965 case ACCESS_VMREAD: 5966 c->flags = vmread_flags(c->field, &c->value); 5967 break; 5968 case ACCESS_VMWRITE: 5969 c->flags = vmwrite_flags(c->field, 0); 5970 break; 5971 } 5972 c->time = rdtsc() - start; 5973 vmcall(); 5974 } 5975 } 5976 5977 static u64 vmread_from_shadow(u64 field) 5978 { 5979 struct vmcs *primary; 5980 struct vmcs *shadow; 5981 u64 value; 5982 5983 TEST_ASSERT(!vmcs_save(&primary)); 5984 shadow = (struct vmcs *)vmcs_read(VMCS_LINK_PTR); 5985 TEST_ASSERT(!make_vmcs_current(shadow)); 5986 value = vmcs_read(field); 5987 TEST_ASSERT(!make_vmcs_current(primary)); 5988 return value; 5989 } 5990 5991 static u64 vmwrite_to_shadow(u64 field, u64 value) 5992 { 5993 struct vmcs *primary; 5994 struct vmcs *shadow; 5995 5996 TEST_ASSERT(!vmcs_save(&primary)); 5997 shadow = (struct vmcs *)vmcs_read(VMCS_LINK_PTR); 5998 TEST_ASSERT(!make_vmcs_current(shadow)); 5999 vmcs_write(field, value); 6000 value = vmcs_read(field); 6001 TEST_ASSERT(!make_vmcs_current(primary)); 6002 return value; 6003 } 6004 6005 static void vmcs_shadow_test_access(u8 *bitmap[2], enum vmcs_access access) 6006 { 6007 struct vmcs_shadow_test_common *c = &l1_l2_common; 6008 6009 c->op = access; 6010 vmcs_write(VMX_INST_ERROR, 0); 6011 enter_guest(); 6012 c->reason = vmcs_read(EXI_REASON) & 0xffff; 6013 if (c->reason != VMX_VMCALL) { 6014 skip_exit_insn(); 6015 enter_guest(); 6016 } 6017 skip_exit_vmcall(); 6018 } 6019 6020 static void vmcs_shadow_test_field(u8 *bitmap[2], u64 field) 6021 { 6022 struct vmcs_shadow_test_common *c = &l1_l2_common; 6023 struct vmcs *shadow; 6024 u64 value; 6025 uintptr_t flags[2]; 6026 bool good_shadow; 6027 u32 vmx_inst_error; 6028 6029 report_prefix_pushf("field %lx", field); 6030 c->field = field; 6031 6032 shadow = (struct vmcs *)vmcs_read(VMCS_LINK_PTR); 6033 if (shadow != (struct vmcs *)-1ull) { 6034 flags[ACCESS_VMREAD] = vmread_flags(field, &value); 6035 flags[ACCESS_VMWRITE] = vmwrite_flags(field, value); 6036 good_shadow = !flags[ACCESS_VMREAD] && !flags[ACCESS_VMWRITE]; 6037 } else { 6038 /* 6039 * When VMCS link pointer is -1ull, VMWRITE/VMREAD on 6040 * shadowed-fields should fail with setting RFLAGS.CF. 6041 */ 6042 flags[ACCESS_VMREAD] = X86_EFLAGS_CF; 6043 flags[ACCESS_VMWRITE] = X86_EFLAGS_CF; 6044 good_shadow = false; 6045 } 6046 6047 /* Intercept both VMREAD and VMWRITE. */ 6048 report_prefix_push("no VMREAD/VMWRITE permission"); 6049 /* VMWRITE/VMREAD done on reserved-bit should always intercept */ 6050 if (!(field >> VMCS_FIELD_RESERVED_SHIFT)) { 6051 set_bit(field, bitmap[ACCESS_VMREAD]); 6052 set_bit(field, bitmap[ACCESS_VMWRITE]); 6053 } 6054 vmcs_shadow_test_access(bitmap, ACCESS_VMWRITE); 6055 report("not shadowed for VMWRITE", c->reason == VMX_VMWRITE); 6056 vmcs_shadow_test_access(bitmap, ACCESS_VMREAD); 6057 report("not shadowed for VMREAD", c->reason == VMX_VMREAD); 6058 report_prefix_pop(); 6059 6060 if (field >> VMCS_FIELD_RESERVED_SHIFT) 6061 goto out; 6062 6063 /* Permit shadowed VMREAD. */ 6064 report_prefix_push("VMREAD permission only"); 6065 clear_bit(field, bitmap[ACCESS_VMREAD]); 6066 set_bit(field, bitmap[ACCESS_VMWRITE]); 6067 if (good_shadow) 6068 value = vmwrite_to_shadow(field, MAGIC_VAL_1 + field); 6069 vmcs_shadow_test_access(bitmap, ACCESS_VMWRITE); 6070 report("not shadowed for VMWRITE", c->reason == VMX_VMWRITE); 6071 vmcs_shadow_test_access(bitmap, ACCESS_VMREAD); 6072 vmx_inst_error = vmcs_read(VMX_INST_ERROR); 6073 report("shadowed for VMREAD (in %ld cycles)", c->reason == VMX_VMCALL, 6074 c->time); 6075 report("ALU flags after VMREAD (%lx) are as expected (%lx)", 6076 c->flags == flags[ACCESS_VMREAD], 6077 c->flags, flags[ACCESS_VMREAD]); 6078 if (good_shadow) 6079 report("value read from shadow (%lx) is as expected (%lx)", 6080 c->value == value, c->value, value); 6081 else if (shadow != (struct vmcs *)-1ull && flags[ACCESS_VMREAD]) 6082 report("VMX_INST_ERROR (%d) is as expected (%d)", 6083 vmx_inst_error == VMXERR_UNSUPPORTED_VMCS_COMPONENT, 6084 vmx_inst_error, VMXERR_UNSUPPORTED_VMCS_COMPONENT); 6085 report_prefix_pop(); 6086 6087 /* Permit shadowed VMWRITE. */ 6088 report_prefix_push("VMWRITE permission only"); 6089 set_bit(field, bitmap[ACCESS_VMREAD]); 6090 clear_bit(field, bitmap[ACCESS_VMWRITE]); 6091 if (good_shadow) 6092 vmwrite_to_shadow(field, MAGIC_VAL_1 + field); 6093 vmcs_shadow_test_access(bitmap, ACCESS_VMWRITE); 6094 vmx_inst_error = vmcs_read(VMX_INST_ERROR); 6095 report("shadowed for VMWRITE (in %ld cycles)", c->reason == VMX_VMCALL, 6096 c->time); 6097 report("ALU flags after VMWRITE (%lx) are as expected (%lx)", 6098 c->flags == flags[ACCESS_VMREAD], 6099 c->flags, flags[ACCESS_VMREAD]); 6100 if (good_shadow) { 6101 value = vmread_from_shadow(field); 6102 report("shadow VMCS value (%lx) is as expected (%lx)", 6103 value == 0, value, 0ul); 6104 } else if (shadow != (struct vmcs *)-1ull && flags[ACCESS_VMWRITE]) { 6105 report("VMX_INST_ERROR (%d) is as expected (%d)", 6106 vmx_inst_error == VMXERR_UNSUPPORTED_VMCS_COMPONENT, 6107 vmx_inst_error, VMXERR_UNSUPPORTED_VMCS_COMPONENT); 6108 } 6109 vmcs_shadow_test_access(bitmap, ACCESS_VMREAD); 6110 report("not shadowed for VMREAD", c->reason == VMX_VMREAD); 6111 report_prefix_pop(); 6112 6113 /* Permit shadowed VMREAD and VMWRITE. */ 6114 report_prefix_push("VMREAD and VMWRITE permission"); 6115 clear_bit(field, bitmap[ACCESS_VMREAD]); 6116 clear_bit(field, bitmap[ACCESS_VMWRITE]); 6117 if (good_shadow) 6118 vmwrite_to_shadow(field, MAGIC_VAL_1 + field); 6119 vmcs_shadow_test_access(bitmap, ACCESS_VMWRITE); 6120 vmx_inst_error = vmcs_read(VMX_INST_ERROR); 6121 report("shadowed for VMWRITE (in %ld cycles)", c->reason == VMX_VMCALL, 6122 c->time); 6123 report("ALU flags after VMWRITE (%lx) are as expected (%lx)", 6124 c->flags == flags[ACCESS_VMREAD], 6125 c->flags, flags[ACCESS_VMREAD]); 6126 if (good_shadow) { 6127 value = vmread_from_shadow(field); 6128 report("shadow VMCS value (%lx) is as expected (%lx)", 6129 value == 0, value, 0ul); 6130 } else if (shadow != (struct vmcs *)-1ull && flags[ACCESS_VMWRITE]) { 6131 report("VMX_INST_ERROR (%d) is as expected (%d)", 6132 vmx_inst_error == VMXERR_UNSUPPORTED_VMCS_COMPONENT, 6133 vmx_inst_error, VMXERR_UNSUPPORTED_VMCS_COMPONENT); 6134 } 6135 vmcs_shadow_test_access(bitmap, ACCESS_VMREAD); 6136 vmx_inst_error = vmcs_read(VMX_INST_ERROR); 6137 report("shadowed for VMREAD (in %ld cycles)", c->reason == VMX_VMCALL, 6138 c->time); 6139 report("ALU flags after VMREAD (%lx) are as expected (%lx)", 6140 c->flags == flags[ACCESS_VMREAD], 6141 c->flags, flags[ACCESS_VMREAD]); 6142 if (good_shadow) 6143 report("value read from shadow (%lx) is as expected (%lx)", 6144 c->value == 0, c->value, 0ul); 6145 else if (shadow != (struct vmcs *)-1ull && flags[ACCESS_VMREAD]) 6146 report("VMX_INST_ERROR (%d) is as expected (%d)", 6147 vmx_inst_error == VMXERR_UNSUPPORTED_VMCS_COMPONENT, 6148 vmx_inst_error, VMXERR_UNSUPPORTED_VMCS_COMPONENT); 6149 report_prefix_pop(); 6150 6151 out: 6152 report_prefix_pop(); 6153 } 6154 6155 static void vmx_vmcs_shadow_test_body(u8 *bitmap[2]) 6156 { 6157 unsigned base; 6158 unsigned index; 6159 unsigned bit; 6160 unsigned highest_index = rdmsr(MSR_IA32_VMX_VMCS_ENUM); 6161 6162 /* Run test on all possible valid VMCS fields */ 6163 for (base = 0; 6164 base < (1 << VMCS_FIELD_RESERVED_SHIFT); 6165 base += (1 << VMCS_FIELD_TYPE_SHIFT)) 6166 for (index = 0; index <= highest_index; index++) 6167 vmcs_shadow_test_field(bitmap, base + index); 6168 6169 /* 6170 * Run tests on some invalid VMCS fields 6171 * (Have reserved bit set). 6172 */ 6173 for (bit = VMCS_FIELD_RESERVED_SHIFT; bit < VMCS_FIELD_BIT_SIZE; bit++) 6174 vmcs_shadow_test_field(bitmap, (1ull << bit)); 6175 } 6176 6177 static void vmx_vmcs_shadow_test(void) 6178 { 6179 u8 *bitmap[2]; 6180 struct vmcs *shadow; 6181 6182 if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY)) { 6183 printf("\t'Activate secondary controls' not supported.\n"); 6184 return; 6185 } 6186 6187 if (!(ctrl_cpu_rev[1].clr & CPU_SHADOW_VMCS)) { 6188 printf("\t'VMCS shadowing' not supported.\n"); 6189 return; 6190 } 6191 6192 if (!(rdmsr(MSR_IA32_VMX_MISC) & 6193 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS)) { 6194 printf("\tVMWRITE can't modify VM-exit information fields.\n"); 6195 return; 6196 } 6197 6198 test_set_guest(vmx_vmcs_shadow_test_guest); 6199 6200 bitmap[ACCESS_VMREAD] = alloc_page(); 6201 bitmap[ACCESS_VMWRITE] = alloc_page(); 6202 6203 vmcs_write(VMREAD_BITMAP, virt_to_phys(bitmap[ACCESS_VMREAD])); 6204 vmcs_write(VMWRITE_BITMAP, virt_to_phys(bitmap[ACCESS_VMWRITE])); 6205 6206 shadow = alloc_page(); 6207 shadow->hdr.revision_id = basic.revision; 6208 shadow->hdr.shadow_vmcs = 1; 6209 TEST_ASSERT(!vmcs_clear(shadow)); 6210 6211 vmcs_clear_bits(CPU_EXEC_CTRL0, CPU_RDTSC); 6212 vmcs_set_bits(CPU_EXEC_CTRL0, CPU_SECONDARY); 6213 vmcs_set_bits(CPU_EXEC_CTRL1, CPU_SHADOW_VMCS); 6214 6215 vmcs_write(VMCS_LINK_PTR, virt_to_phys(shadow)); 6216 report_prefix_push("valid link pointer"); 6217 vmx_vmcs_shadow_test_body(bitmap); 6218 report_prefix_pop(); 6219 6220 vmcs_write(VMCS_LINK_PTR, -1ull); 6221 report_prefix_push("invalid link pointer"); 6222 vmx_vmcs_shadow_test_body(bitmap); 6223 report_prefix_pop(); 6224 6225 l1_l2_common.op = ACCESS_NONE; 6226 enter_guest(); 6227 } 6228 6229 #define TEST(name) { #name, .v2 = name } 6230 6231 /* name/init/guest_main/exit_handler/syscall_handler/guest_regs */ 6232 struct vmx_test vmx_tests[] = { 6233 { "null", NULL, basic_guest_main, basic_exit_handler, NULL, {0} }, 6234 { "vmenter", NULL, vmenter_main, vmenter_exit_handler, NULL, {0} }, 6235 { "preemption timer", preemption_timer_init, preemption_timer_main, 6236 preemption_timer_exit_handler, NULL, {0} }, 6237 { "control field PAT", test_ctrl_pat_init, test_ctrl_pat_main, 6238 test_ctrl_pat_exit_handler, NULL, {0} }, 6239 { "control field EFER", test_ctrl_efer_init, test_ctrl_efer_main, 6240 test_ctrl_efer_exit_handler, NULL, {0} }, 6241 { "CR shadowing", NULL, cr_shadowing_main, 6242 cr_shadowing_exit_handler, NULL, {0} }, 6243 { "I/O bitmap", iobmp_init, iobmp_main, iobmp_exit_handler, 6244 NULL, {0} }, 6245 { "instruction intercept", insn_intercept_init, insn_intercept_main, 6246 insn_intercept_exit_handler, NULL, {0} }, 6247 { "EPT A/D disabled", ept_init, ept_main, ept_exit_handler, NULL, {0} }, 6248 { "EPT A/D enabled", eptad_init, eptad_main, eptad_exit_handler, NULL, {0} }, 6249 { "PML", pml_init, pml_main, pml_exit_handler, NULL, {0} }, 6250 { "VPID", vpid_init, vpid_main, vpid_exit_handler, NULL, {0} }, 6251 { "interrupt", interrupt_init, interrupt_main, 6252 interrupt_exit_handler, NULL, {0} }, 6253 { "debug controls", dbgctls_init, dbgctls_main, dbgctls_exit_handler, 6254 NULL, {0} }, 6255 { "MSR switch", msr_switch_init, msr_switch_main, 6256 msr_switch_exit_handler, NULL, {0}, msr_switch_entry_failure }, 6257 { "vmmcall", vmmcall_init, vmmcall_main, vmmcall_exit_handler, NULL, {0} }, 6258 { "disable RDTSCP", disable_rdtscp_init, disable_rdtscp_main, 6259 disable_rdtscp_exit_handler, NULL, {0} }, 6260 { "int3", int3_init, int3_guest_main, int3_exit_handler, NULL, {0} }, 6261 { "into", into_init, into_guest_main, into_exit_handler, NULL, {0} }, 6262 { "exit_monitor_from_l2_test", NULL, exit_monitor_from_l2_main, 6263 exit_monitor_from_l2_handler, NULL, {0} }, 6264 /* Basic V2 tests. */ 6265 TEST(v2_null_test), 6266 TEST(v2_multiple_entries_test), 6267 TEST(fixture_test_case1), 6268 TEST(fixture_test_case2), 6269 /* Opcode tests. */ 6270 TEST(invvpid_test_v2), 6271 /* VM-entry tests */ 6272 TEST(vmx_controls_test), 6273 TEST(vmentry_movss_shadow_test), 6274 /* APICv tests */ 6275 TEST(vmx_eoi_bitmap_ioapic_scan_test), 6276 TEST(vmx_hlt_with_rvi_test), 6277 /* APIC pass-through tests */ 6278 TEST(vmx_apic_passthrough_test), 6279 TEST(vmx_apic_passthrough_thread_test), 6280 /* VMCS Shadowing tests */ 6281 TEST(vmx_vmcs_shadow_test), 6282 /* Regression tests */ 6283 TEST(vmx_cr_load_test), 6284 TEST(vmx_nm_test), 6285 TEST(vmx_db_test), 6286 TEST(vmx_nmi_window_test), 6287 TEST(vmx_intr_window_test), 6288 TEST(vmx_pending_event_test), 6289 TEST(vmx_pending_event_hlt_test), 6290 TEST(vmx_store_tsc_test), 6291 /* EPT access tests. */ 6292 TEST(ept_access_test_not_present), 6293 TEST(ept_access_test_read_only), 6294 TEST(ept_access_test_write_only), 6295 TEST(ept_access_test_read_write), 6296 TEST(ept_access_test_execute_only), 6297 TEST(ept_access_test_read_execute), 6298 TEST(ept_access_test_write_execute), 6299 TEST(ept_access_test_read_write_execute), 6300 TEST(ept_access_test_reserved_bits), 6301 TEST(ept_access_test_ignored_bits), 6302 TEST(ept_access_test_paddr_not_present_ad_disabled), 6303 TEST(ept_access_test_paddr_not_present_ad_enabled), 6304 TEST(ept_access_test_paddr_read_only_ad_disabled), 6305 TEST(ept_access_test_paddr_read_only_ad_enabled), 6306 TEST(ept_access_test_paddr_read_write), 6307 TEST(ept_access_test_paddr_read_write_execute), 6308 TEST(ept_access_test_paddr_read_execute_ad_disabled), 6309 TEST(ept_access_test_paddr_read_execute_ad_enabled), 6310 TEST(ept_access_test_paddr_not_present_page_fault), 6311 TEST(ept_access_test_force_2m_page), 6312 { NULL, NULL, NULL, NULL, NULL, {0} }, 6313 }; 6314