1 /* 2 * All test cases of nested virtualization should be in this file 3 * 4 * Author : Arthur Chunqi Li <yzt356@gmail.com> 5 */ 6 #include "vmx.h" 7 #include "msr.h" 8 #include "processor.h" 9 #include "vm.h" 10 #include "fwcfg.h" 11 #include "isr.h" 12 #include "desc.h" 13 #include "apic.h" 14 #include "types.h" 15 #include "vmalloc.h" 16 #include "alloc_page.h" 17 #include "smp.h" 18 #include "delay.h" 19 20 #define NONCANONICAL 0xaaaaaaaaaaaaaaaaull 21 22 #define VPID_CAP_INVVPID_TYPES_SHIFT 40 23 24 u64 ia32_pat; 25 u64 ia32_efer; 26 void *io_bitmap_a, *io_bitmap_b; 27 u16 ioport; 28 29 unsigned long *pml4; 30 u64 eptp; 31 void *data_page1, *data_page2; 32 33 void *pml_log; 34 #define PML_INDEX 512 35 36 static inline unsigned ffs(unsigned x) 37 { 38 int pos = -1; 39 40 __asm__ __volatile__("bsf %1, %%eax; cmovnz %%eax, %0" 41 : "+r"(pos) : "rm"(x) : "eax"); 42 return pos + 1; 43 } 44 45 static inline void vmcall() 46 { 47 asm volatile("vmcall"); 48 } 49 50 void basic_guest_main() 51 { 52 report("Basic VMX test", 1); 53 } 54 55 int basic_exit_handler() 56 { 57 report("Basic VMX test", 0); 58 print_vmexit_info(); 59 return VMX_TEST_EXIT; 60 } 61 62 void vmenter_main() 63 { 64 u64 rax; 65 u64 rsp, resume_rsp; 66 67 report("test vmlaunch", 1); 68 69 asm volatile( 70 "mov %%rsp, %0\n\t" 71 "mov %3, %%rax\n\t" 72 "vmcall\n\t" 73 "mov %%rax, %1\n\t" 74 "mov %%rsp, %2\n\t" 75 : "=r"(rsp), "=r"(rax), "=r"(resume_rsp) 76 : "g"(0xABCD)); 77 report("test vmresume", (rax == 0xFFFF) && (rsp == resume_rsp)); 78 } 79 80 int vmenter_exit_handler() 81 { 82 u64 guest_rip; 83 ulong reason; 84 85 guest_rip = vmcs_read(GUEST_RIP); 86 reason = vmcs_read(EXI_REASON) & 0xff; 87 switch (reason) { 88 case VMX_VMCALL: 89 if (regs.rax != 0xABCD) { 90 report("test vmresume", 0); 91 return VMX_TEST_VMEXIT; 92 } 93 regs.rax = 0xFFFF; 94 vmcs_write(GUEST_RIP, guest_rip + 3); 95 return VMX_TEST_RESUME; 96 default: 97 report("test vmresume", 0); 98 print_vmexit_info(); 99 } 100 return VMX_TEST_VMEXIT; 101 } 102 103 u32 preempt_scale; 104 volatile unsigned long long tsc_val; 105 volatile u32 preempt_val; 106 u64 saved_rip; 107 108 int preemption_timer_init() 109 { 110 if (!(ctrl_pin_rev.clr & PIN_PREEMPT)) { 111 printf("\tPreemption timer is not supported\n"); 112 return VMX_TEST_EXIT; 113 } 114 vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) | PIN_PREEMPT); 115 preempt_val = 10000000; 116 vmcs_write(PREEMPT_TIMER_VALUE, preempt_val); 117 preempt_scale = rdmsr(MSR_IA32_VMX_MISC) & 0x1F; 118 119 if (!(ctrl_exit_rev.clr & EXI_SAVE_PREEMPT)) 120 printf("\tSave preemption value is not supported\n"); 121 122 return VMX_TEST_START; 123 } 124 125 void preemption_timer_main() 126 { 127 tsc_val = rdtsc(); 128 if (ctrl_exit_rev.clr & EXI_SAVE_PREEMPT) { 129 vmx_set_test_stage(0); 130 vmcall(); 131 if (vmx_get_test_stage() == 1) 132 vmcall(); 133 } 134 vmx_set_test_stage(1); 135 while (vmx_get_test_stage() == 1) { 136 if (((rdtsc() - tsc_val) >> preempt_scale) 137 > 10 * preempt_val) { 138 vmx_set_test_stage(2); 139 vmcall(); 140 } 141 } 142 tsc_val = rdtsc(); 143 asm volatile ("hlt"); 144 vmcall(); 145 vmx_set_test_stage(5); 146 vmcall(); 147 } 148 149 int preemption_timer_exit_handler() 150 { 151 bool guest_halted; 152 u64 guest_rip; 153 ulong reason; 154 u32 insn_len; 155 u32 ctrl_exit; 156 157 guest_rip = vmcs_read(GUEST_RIP); 158 reason = vmcs_read(EXI_REASON) & 0xff; 159 insn_len = vmcs_read(EXI_INST_LEN); 160 switch (reason) { 161 case VMX_PREEMPT: 162 switch (vmx_get_test_stage()) { 163 case 1: 164 case 2: 165 report("busy-wait for preemption timer", 166 ((rdtsc() - tsc_val) >> preempt_scale) >= 167 preempt_val); 168 vmx_set_test_stage(3); 169 vmcs_write(PREEMPT_TIMER_VALUE, preempt_val); 170 return VMX_TEST_RESUME; 171 case 3: 172 guest_halted = 173 (vmcs_read(GUEST_ACTV_STATE) == ACTV_HLT); 174 report("preemption timer during hlt", 175 ((rdtsc() - tsc_val) >> preempt_scale) >= 176 preempt_val && guest_halted); 177 vmx_set_test_stage(4); 178 vmcs_write(PIN_CONTROLS, 179 vmcs_read(PIN_CONTROLS) & ~PIN_PREEMPT); 180 vmcs_write(GUEST_ACTV_STATE, ACTV_ACTIVE); 181 return VMX_TEST_RESUME; 182 case 4: 183 report("preemption timer with 0 value", 184 saved_rip == guest_rip); 185 break; 186 default: 187 report("Invalid stage.", false); 188 print_vmexit_info(); 189 break; 190 } 191 break; 192 case VMX_VMCALL: 193 vmcs_write(GUEST_RIP, guest_rip + insn_len); 194 switch (vmx_get_test_stage()) { 195 case 0: 196 report("Keep preemption value", 197 vmcs_read(PREEMPT_TIMER_VALUE) == preempt_val); 198 vmx_set_test_stage(1); 199 vmcs_write(PREEMPT_TIMER_VALUE, preempt_val); 200 ctrl_exit = (vmcs_read(EXI_CONTROLS) | 201 EXI_SAVE_PREEMPT) & ctrl_exit_rev.clr; 202 vmcs_write(EXI_CONTROLS, ctrl_exit); 203 return VMX_TEST_RESUME; 204 case 1: 205 report("Save preemption value", 206 vmcs_read(PREEMPT_TIMER_VALUE) < preempt_val); 207 return VMX_TEST_RESUME; 208 case 2: 209 report("busy-wait for preemption timer", 0); 210 vmx_set_test_stage(3); 211 vmcs_write(PREEMPT_TIMER_VALUE, preempt_val); 212 return VMX_TEST_RESUME; 213 case 3: 214 report("preemption timer during hlt", 0); 215 vmx_set_test_stage(4); 216 /* fall through */ 217 case 4: 218 vmcs_write(PIN_CONTROLS, 219 vmcs_read(PIN_CONTROLS) | PIN_PREEMPT); 220 vmcs_write(PREEMPT_TIMER_VALUE, 0); 221 saved_rip = guest_rip + insn_len; 222 return VMX_TEST_RESUME; 223 case 5: 224 report("preemption timer with 0 value (vmcall stage 5)", 0); 225 break; 226 default: 227 // Should not reach here 228 report("unexpected stage, %d", false, 229 vmx_get_test_stage()); 230 print_vmexit_info(); 231 return VMX_TEST_VMEXIT; 232 } 233 break; 234 default: 235 report("Unknown exit reason, %ld", false, reason); 236 print_vmexit_info(); 237 } 238 vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) & ~PIN_PREEMPT); 239 return VMX_TEST_VMEXIT; 240 } 241 242 void msr_bmp_init() 243 { 244 void *msr_bitmap; 245 u32 ctrl_cpu0; 246 247 msr_bitmap = alloc_page(); 248 memset(msr_bitmap, 0x0, PAGE_SIZE); 249 ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0); 250 ctrl_cpu0 |= CPU_MSR_BITMAP; 251 vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0); 252 vmcs_write(MSR_BITMAP, (u64)msr_bitmap); 253 } 254 255 static int test_ctrl_pat_init() 256 { 257 u64 ctrl_ent; 258 u64 ctrl_exi; 259 260 msr_bmp_init(); 261 if (!(ctrl_exit_rev.clr & EXI_SAVE_PAT) && 262 !(ctrl_exit_rev.clr & EXI_LOAD_PAT) && 263 !(ctrl_enter_rev.clr & ENT_LOAD_PAT)) { 264 printf("\tSave/load PAT is not supported\n"); 265 return 1; 266 } 267 268 ctrl_ent = vmcs_read(ENT_CONTROLS); 269 ctrl_exi = vmcs_read(EXI_CONTROLS); 270 ctrl_ent |= ctrl_enter_rev.clr & ENT_LOAD_PAT; 271 ctrl_exi |= ctrl_exit_rev.clr & (EXI_SAVE_PAT | EXI_LOAD_PAT); 272 vmcs_write(ENT_CONTROLS, ctrl_ent); 273 vmcs_write(EXI_CONTROLS, ctrl_exi); 274 ia32_pat = rdmsr(MSR_IA32_CR_PAT); 275 vmcs_write(GUEST_PAT, 0x0); 276 vmcs_write(HOST_PAT, ia32_pat); 277 return VMX_TEST_START; 278 } 279 280 static void test_ctrl_pat_main() 281 { 282 u64 guest_ia32_pat; 283 284 guest_ia32_pat = rdmsr(MSR_IA32_CR_PAT); 285 if (!(ctrl_enter_rev.clr & ENT_LOAD_PAT)) 286 printf("\tENT_LOAD_PAT is not supported.\n"); 287 else { 288 if (guest_ia32_pat != 0) { 289 report("Entry load PAT", 0); 290 return; 291 } 292 } 293 wrmsr(MSR_IA32_CR_PAT, 0x6); 294 vmcall(); 295 guest_ia32_pat = rdmsr(MSR_IA32_CR_PAT); 296 if (ctrl_enter_rev.clr & ENT_LOAD_PAT) 297 report("Entry load PAT", guest_ia32_pat == ia32_pat); 298 } 299 300 static int test_ctrl_pat_exit_handler() 301 { 302 u64 guest_rip; 303 ulong reason; 304 u64 guest_pat; 305 306 guest_rip = vmcs_read(GUEST_RIP); 307 reason = vmcs_read(EXI_REASON) & 0xff; 308 switch (reason) { 309 case VMX_VMCALL: 310 guest_pat = vmcs_read(GUEST_PAT); 311 if (!(ctrl_exit_rev.clr & EXI_SAVE_PAT)) { 312 printf("\tEXI_SAVE_PAT is not supported\n"); 313 vmcs_write(GUEST_PAT, 0x6); 314 } else { 315 report("Exit save PAT", guest_pat == 0x6); 316 } 317 if (!(ctrl_exit_rev.clr & EXI_LOAD_PAT)) 318 printf("\tEXI_LOAD_PAT is not supported\n"); 319 else 320 report("Exit load PAT", rdmsr(MSR_IA32_CR_PAT) == ia32_pat); 321 vmcs_write(GUEST_PAT, ia32_pat); 322 vmcs_write(GUEST_RIP, guest_rip + 3); 323 return VMX_TEST_RESUME; 324 default: 325 printf("ERROR : Undefined exit reason, reason = %ld.\n", reason); 326 break; 327 } 328 return VMX_TEST_VMEXIT; 329 } 330 331 static int test_ctrl_efer_init() 332 { 333 u64 ctrl_ent; 334 u64 ctrl_exi; 335 336 msr_bmp_init(); 337 ctrl_ent = vmcs_read(ENT_CONTROLS) | ENT_LOAD_EFER; 338 ctrl_exi = vmcs_read(EXI_CONTROLS) | EXI_SAVE_EFER | EXI_LOAD_EFER; 339 vmcs_write(ENT_CONTROLS, ctrl_ent & ctrl_enter_rev.clr); 340 vmcs_write(EXI_CONTROLS, ctrl_exi & ctrl_exit_rev.clr); 341 ia32_efer = rdmsr(MSR_EFER); 342 vmcs_write(GUEST_EFER, ia32_efer ^ EFER_NX); 343 vmcs_write(HOST_EFER, ia32_efer ^ EFER_NX); 344 return VMX_TEST_START; 345 } 346 347 static void test_ctrl_efer_main() 348 { 349 u64 guest_ia32_efer; 350 351 guest_ia32_efer = rdmsr(MSR_EFER); 352 if (!(ctrl_enter_rev.clr & ENT_LOAD_EFER)) 353 printf("\tENT_LOAD_EFER is not supported.\n"); 354 else { 355 if (guest_ia32_efer != (ia32_efer ^ EFER_NX)) { 356 report("Entry load EFER", 0); 357 return; 358 } 359 } 360 wrmsr(MSR_EFER, ia32_efer); 361 vmcall(); 362 guest_ia32_efer = rdmsr(MSR_EFER); 363 if (ctrl_enter_rev.clr & ENT_LOAD_EFER) 364 report("Entry load EFER", guest_ia32_efer == ia32_efer); 365 } 366 367 static int test_ctrl_efer_exit_handler() 368 { 369 u64 guest_rip; 370 ulong reason; 371 u64 guest_efer; 372 373 guest_rip = vmcs_read(GUEST_RIP); 374 reason = vmcs_read(EXI_REASON) & 0xff; 375 switch (reason) { 376 case VMX_VMCALL: 377 guest_efer = vmcs_read(GUEST_EFER); 378 if (!(ctrl_exit_rev.clr & EXI_SAVE_EFER)) { 379 printf("\tEXI_SAVE_EFER is not supported\n"); 380 vmcs_write(GUEST_EFER, ia32_efer); 381 } else { 382 report("Exit save EFER", guest_efer == ia32_efer); 383 } 384 if (!(ctrl_exit_rev.clr & EXI_LOAD_EFER)) { 385 printf("\tEXI_LOAD_EFER is not supported\n"); 386 wrmsr(MSR_EFER, ia32_efer ^ EFER_NX); 387 } else { 388 report("Exit load EFER", rdmsr(MSR_EFER) == (ia32_efer ^ EFER_NX)); 389 } 390 vmcs_write(GUEST_PAT, ia32_efer); 391 vmcs_write(GUEST_RIP, guest_rip + 3); 392 return VMX_TEST_RESUME; 393 default: 394 printf("ERROR : Undefined exit reason, reason = %ld.\n", reason); 395 break; 396 } 397 return VMX_TEST_VMEXIT; 398 } 399 400 u32 guest_cr0, guest_cr4; 401 402 static void cr_shadowing_main() 403 { 404 u32 cr0, cr4, tmp; 405 406 // Test read through 407 vmx_set_test_stage(0); 408 guest_cr0 = read_cr0(); 409 if (vmx_get_test_stage() == 1) 410 report("Read through CR0", 0); 411 else 412 vmcall(); 413 vmx_set_test_stage(1); 414 guest_cr4 = read_cr4(); 415 if (vmx_get_test_stage() == 2) 416 report("Read through CR4", 0); 417 else 418 vmcall(); 419 // Test write through 420 guest_cr0 = guest_cr0 ^ (X86_CR0_TS | X86_CR0_MP); 421 guest_cr4 = guest_cr4 ^ (X86_CR4_TSD | X86_CR4_DE); 422 vmx_set_test_stage(2); 423 write_cr0(guest_cr0); 424 if (vmx_get_test_stage() == 3) 425 report("Write throuth CR0", 0); 426 else 427 vmcall(); 428 vmx_set_test_stage(3); 429 write_cr4(guest_cr4); 430 if (vmx_get_test_stage() == 4) 431 report("Write through CR4", 0); 432 else 433 vmcall(); 434 // Test read shadow 435 vmx_set_test_stage(4); 436 vmcall(); 437 cr0 = read_cr0(); 438 if (vmx_get_test_stage() != 5) 439 report("Read shadowing CR0", cr0 == guest_cr0); 440 vmx_set_test_stage(5); 441 cr4 = read_cr4(); 442 if (vmx_get_test_stage() != 6) 443 report("Read shadowing CR4", cr4 == guest_cr4); 444 // Test write shadow (same value with shadow) 445 vmx_set_test_stage(6); 446 write_cr0(guest_cr0); 447 if (vmx_get_test_stage() == 7) 448 report("Write shadowing CR0 (same value with shadow)", 0); 449 else 450 vmcall(); 451 vmx_set_test_stage(7); 452 write_cr4(guest_cr4); 453 if (vmx_get_test_stage() == 8) 454 report("Write shadowing CR4 (same value with shadow)", 0); 455 else 456 vmcall(); 457 // Test write shadow (different value) 458 vmx_set_test_stage(8); 459 tmp = guest_cr0 ^ X86_CR0_TS; 460 asm volatile("mov %0, %%rsi\n\t" 461 "mov %%rsi, %%cr0\n\t" 462 ::"m"(tmp) 463 :"rsi", "memory", "cc"); 464 report("Write shadowing different X86_CR0_TS", vmx_get_test_stage() == 9); 465 vmx_set_test_stage(9); 466 tmp = guest_cr0 ^ X86_CR0_MP; 467 asm volatile("mov %0, %%rsi\n\t" 468 "mov %%rsi, %%cr0\n\t" 469 ::"m"(tmp) 470 :"rsi", "memory", "cc"); 471 report("Write shadowing different X86_CR0_MP", vmx_get_test_stage() == 10); 472 vmx_set_test_stage(10); 473 tmp = guest_cr4 ^ X86_CR4_TSD; 474 asm volatile("mov %0, %%rsi\n\t" 475 "mov %%rsi, %%cr4\n\t" 476 ::"m"(tmp) 477 :"rsi", "memory", "cc"); 478 report("Write shadowing different X86_CR4_TSD", vmx_get_test_stage() == 11); 479 vmx_set_test_stage(11); 480 tmp = guest_cr4 ^ X86_CR4_DE; 481 asm volatile("mov %0, %%rsi\n\t" 482 "mov %%rsi, %%cr4\n\t" 483 ::"m"(tmp) 484 :"rsi", "memory", "cc"); 485 report("Write shadowing different X86_CR4_DE", vmx_get_test_stage() == 12); 486 } 487 488 static int cr_shadowing_exit_handler() 489 { 490 u64 guest_rip; 491 ulong reason; 492 u32 insn_len; 493 u32 exit_qual; 494 495 guest_rip = vmcs_read(GUEST_RIP); 496 reason = vmcs_read(EXI_REASON) & 0xff; 497 insn_len = vmcs_read(EXI_INST_LEN); 498 exit_qual = vmcs_read(EXI_QUALIFICATION); 499 switch (reason) { 500 case VMX_VMCALL: 501 switch (vmx_get_test_stage()) { 502 case 0: 503 report("Read through CR0", guest_cr0 == vmcs_read(GUEST_CR0)); 504 break; 505 case 1: 506 report("Read through CR4", guest_cr4 == vmcs_read(GUEST_CR4)); 507 break; 508 case 2: 509 report("Write through CR0", guest_cr0 == vmcs_read(GUEST_CR0)); 510 break; 511 case 3: 512 report("Write through CR4", guest_cr4 == vmcs_read(GUEST_CR4)); 513 break; 514 case 4: 515 guest_cr0 = vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP); 516 guest_cr4 = vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE); 517 vmcs_write(CR0_MASK, X86_CR0_TS | X86_CR0_MP); 518 vmcs_write(CR0_READ_SHADOW, guest_cr0 & (X86_CR0_TS | X86_CR0_MP)); 519 vmcs_write(CR4_MASK, X86_CR4_TSD | X86_CR4_DE); 520 vmcs_write(CR4_READ_SHADOW, guest_cr4 & (X86_CR4_TSD | X86_CR4_DE)); 521 break; 522 case 6: 523 report("Write shadowing CR0 (same value)", 524 guest_cr0 == (vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP))); 525 break; 526 case 7: 527 report("Write shadowing CR4 (same value)", 528 guest_cr4 == (vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE))); 529 break; 530 default: 531 // Should not reach here 532 report("unexpected stage, %d", false, 533 vmx_get_test_stage()); 534 print_vmexit_info(); 535 return VMX_TEST_VMEXIT; 536 } 537 vmcs_write(GUEST_RIP, guest_rip + insn_len); 538 return VMX_TEST_RESUME; 539 case VMX_CR: 540 switch (vmx_get_test_stage()) { 541 case 4: 542 report("Read shadowing CR0", 0); 543 vmx_inc_test_stage(); 544 break; 545 case 5: 546 report("Read shadowing CR4", 0); 547 vmx_inc_test_stage(); 548 break; 549 case 6: 550 report("Write shadowing CR0 (same value)", 0); 551 vmx_inc_test_stage(); 552 break; 553 case 7: 554 report("Write shadowing CR4 (same value)", 0); 555 vmx_inc_test_stage(); 556 break; 557 case 8: 558 case 9: 559 // 0x600 encodes "mov %esi, %cr0" 560 if (exit_qual == 0x600) 561 vmx_inc_test_stage(); 562 break; 563 case 10: 564 case 11: 565 // 0x604 encodes "mov %esi, %cr4" 566 if (exit_qual == 0x604) 567 vmx_inc_test_stage(); 568 break; 569 default: 570 // Should not reach here 571 report("unexpected stage, %d", false, 572 vmx_get_test_stage()); 573 print_vmexit_info(); 574 return VMX_TEST_VMEXIT; 575 } 576 vmcs_write(GUEST_RIP, guest_rip + insn_len); 577 return VMX_TEST_RESUME; 578 default: 579 report("Unknown exit reason, %ld", false, reason); 580 print_vmexit_info(); 581 } 582 return VMX_TEST_VMEXIT; 583 } 584 585 static int iobmp_init() 586 { 587 u32 ctrl_cpu0; 588 589 io_bitmap_a = alloc_page(); 590 io_bitmap_b = alloc_page(); 591 memset(io_bitmap_a, 0x0, PAGE_SIZE); 592 memset(io_bitmap_b, 0x0, PAGE_SIZE); 593 ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0); 594 ctrl_cpu0 |= CPU_IO_BITMAP; 595 ctrl_cpu0 &= (~CPU_IO); 596 vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0); 597 vmcs_write(IO_BITMAP_A, (u64)io_bitmap_a); 598 vmcs_write(IO_BITMAP_B, (u64)io_bitmap_b); 599 return VMX_TEST_START; 600 } 601 602 static void iobmp_main() 603 { 604 // stage 0, test IO pass 605 vmx_set_test_stage(0); 606 inb(0x5000); 607 outb(0x0, 0x5000); 608 report("I/O bitmap - I/O pass", vmx_get_test_stage() == 0); 609 // test IO width, in/out 610 ((u8 *)io_bitmap_a)[0] = 0xFF; 611 vmx_set_test_stage(2); 612 inb(0x0); 613 report("I/O bitmap - trap in", vmx_get_test_stage() == 3); 614 vmx_set_test_stage(3); 615 outw(0x0, 0x0); 616 report("I/O bitmap - trap out", vmx_get_test_stage() == 4); 617 vmx_set_test_stage(4); 618 inl(0x0); 619 report("I/O bitmap - I/O width, long", vmx_get_test_stage() == 5); 620 // test low/high IO port 621 vmx_set_test_stage(5); 622 ((u8 *)io_bitmap_a)[0x5000 / 8] = (1 << (0x5000 % 8)); 623 inb(0x5000); 624 report("I/O bitmap - I/O port, low part", vmx_get_test_stage() == 6); 625 vmx_set_test_stage(6); 626 ((u8 *)io_bitmap_b)[0x1000 / 8] = (1 << (0x1000 % 8)); 627 inb(0x9000); 628 report("I/O bitmap - I/O port, high part", vmx_get_test_stage() == 7); 629 // test partial pass 630 vmx_set_test_stage(7); 631 inl(0x4FFF); 632 report("I/O bitmap - partial pass", vmx_get_test_stage() == 8); 633 // test overrun 634 vmx_set_test_stage(8); 635 memset(io_bitmap_a, 0x0, PAGE_SIZE); 636 memset(io_bitmap_b, 0x0, PAGE_SIZE); 637 inl(0xFFFF); 638 report("I/O bitmap - overrun", vmx_get_test_stage() == 9); 639 vmx_set_test_stage(9); 640 vmcall(); 641 outb(0x0, 0x0); 642 report("I/O bitmap - ignore unconditional exiting", 643 vmx_get_test_stage() == 9); 644 vmx_set_test_stage(10); 645 vmcall(); 646 outb(0x0, 0x0); 647 report("I/O bitmap - unconditional exiting", 648 vmx_get_test_stage() == 11); 649 } 650 651 static int iobmp_exit_handler() 652 { 653 u64 guest_rip; 654 ulong reason, exit_qual; 655 u32 insn_len, ctrl_cpu0; 656 657 guest_rip = vmcs_read(GUEST_RIP); 658 reason = vmcs_read(EXI_REASON) & 0xff; 659 exit_qual = vmcs_read(EXI_QUALIFICATION); 660 insn_len = vmcs_read(EXI_INST_LEN); 661 switch (reason) { 662 case VMX_IO: 663 switch (vmx_get_test_stage()) { 664 case 0: 665 case 1: 666 vmx_inc_test_stage(); 667 break; 668 case 2: 669 report("I/O bitmap - I/O width, byte", 670 (exit_qual & VMX_IO_SIZE_MASK) == _VMX_IO_BYTE); 671 report("I/O bitmap - I/O direction, in", exit_qual & VMX_IO_IN); 672 vmx_inc_test_stage(); 673 break; 674 case 3: 675 report("I/O bitmap - I/O width, word", 676 (exit_qual & VMX_IO_SIZE_MASK) == _VMX_IO_WORD); 677 report("I/O bitmap - I/O direction, out", 678 !(exit_qual & VMX_IO_IN)); 679 vmx_inc_test_stage(); 680 break; 681 case 4: 682 report("I/O bitmap - I/O width, long", 683 (exit_qual & VMX_IO_SIZE_MASK) == _VMX_IO_LONG); 684 vmx_inc_test_stage(); 685 break; 686 case 5: 687 if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x5000) 688 vmx_inc_test_stage(); 689 break; 690 case 6: 691 if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x9000) 692 vmx_inc_test_stage(); 693 break; 694 case 7: 695 if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x4FFF) 696 vmx_inc_test_stage(); 697 break; 698 case 8: 699 if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0xFFFF) 700 vmx_inc_test_stage(); 701 break; 702 case 9: 703 case 10: 704 ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0); 705 vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0 & ~CPU_IO); 706 vmx_inc_test_stage(); 707 break; 708 default: 709 // Should not reach here 710 report("unexpected stage, %d", false, 711 vmx_get_test_stage()); 712 print_vmexit_info(); 713 return VMX_TEST_VMEXIT; 714 } 715 vmcs_write(GUEST_RIP, guest_rip + insn_len); 716 return VMX_TEST_RESUME; 717 case VMX_VMCALL: 718 switch (vmx_get_test_stage()) { 719 case 9: 720 ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0); 721 ctrl_cpu0 |= CPU_IO | CPU_IO_BITMAP; 722 vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0); 723 break; 724 case 10: 725 ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0); 726 ctrl_cpu0 = (ctrl_cpu0 & ~CPU_IO_BITMAP) | CPU_IO; 727 vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0); 728 break; 729 default: 730 // Should not reach here 731 report("unexpected stage, %d", false, 732 vmx_get_test_stage()); 733 print_vmexit_info(); 734 return VMX_TEST_VMEXIT; 735 } 736 vmcs_write(GUEST_RIP, guest_rip + insn_len); 737 return VMX_TEST_RESUME; 738 default: 739 printf("guest_rip = %#lx\n", guest_rip); 740 printf("\tERROR : Undefined exit reason, reason = %ld.\n", reason); 741 break; 742 } 743 return VMX_TEST_VMEXIT; 744 } 745 746 #define INSN_CPU0 0 747 #define INSN_CPU1 1 748 #define INSN_ALWAYS_TRAP 2 749 750 #define FIELD_EXIT_QUAL (1 << 0) 751 #define FIELD_INSN_INFO (1 << 1) 752 753 asm( 754 "insn_hlt: hlt;ret\n\t" 755 "insn_invlpg: invlpg 0x12345678;ret\n\t" 756 "insn_mwait: xor %eax, %eax; xor %ecx, %ecx; mwait;ret\n\t" 757 "insn_rdpmc: xor %ecx, %ecx; rdpmc;ret\n\t" 758 "insn_rdtsc: rdtsc;ret\n\t" 759 "insn_cr3_load: mov cr3,%rax; mov %rax,%cr3;ret\n\t" 760 "insn_cr3_store: mov %cr3,%rax;ret\n\t" 761 #ifdef __x86_64__ 762 "insn_cr8_load: mov %rax,%cr8;ret\n\t" 763 "insn_cr8_store: mov %cr8,%rax;ret\n\t" 764 #endif 765 "insn_monitor: xor %eax, %eax; xor %ecx, %ecx; xor %edx, %edx; monitor;ret\n\t" 766 "insn_pause: pause;ret\n\t" 767 "insn_wbinvd: wbinvd;ret\n\t" 768 "insn_cpuid: mov $10, %eax; cpuid;ret\n\t" 769 "insn_invd: invd;ret\n\t" 770 "insn_sgdt: sgdt gdt64_desc;ret\n\t" 771 "insn_lgdt: lgdt gdt64_desc;ret\n\t" 772 "insn_sidt: sidt idt_descr;ret\n\t" 773 "insn_lidt: lidt idt_descr;ret\n\t" 774 "insn_sldt: sldt %ax;ret\n\t" 775 "insn_lldt: xor %eax, %eax; lldt %ax;ret\n\t" 776 "insn_str: str %ax;ret\n\t" 777 "insn_rdrand: rdrand %rax;ret\n\t" 778 "insn_rdseed: rdseed %rax;ret\n\t" 779 ); 780 extern void insn_hlt(); 781 extern void insn_invlpg(); 782 extern void insn_mwait(); 783 extern void insn_rdpmc(); 784 extern void insn_rdtsc(); 785 extern void insn_cr3_load(); 786 extern void insn_cr3_store(); 787 #ifdef __x86_64__ 788 extern void insn_cr8_load(); 789 extern void insn_cr8_store(); 790 #endif 791 extern void insn_monitor(); 792 extern void insn_pause(); 793 extern void insn_wbinvd(); 794 extern void insn_sgdt(); 795 extern void insn_lgdt(); 796 extern void insn_sidt(); 797 extern void insn_lidt(); 798 extern void insn_sldt(); 799 extern void insn_lldt(); 800 extern void insn_str(); 801 extern void insn_cpuid(); 802 extern void insn_invd(); 803 extern void insn_rdrand(); 804 extern void insn_rdseed(); 805 806 u32 cur_insn; 807 u64 cr3; 808 809 struct insn_table { 810 const char *name; 811 u32 flag; 812 void (*insn_func)(); 813 u32 type; 814 u32 reason; 815 ulong exit_qual; 816 u32 insn_info; 817 // Use FIELD_EXIT_QUAL and FIELD_INSN_INFO to define 818 // which field need to be tested, reason is always tested 819 u32 test_field; 820 }; 821 822 /* 823 * Add more test cases of instruction intercept here. Elements in this 824 * table is: 825 * name/control flag/insn function/type/exit reason/exit qulification/ 826 * instruction info/field to test 827 * The last field defines which fields (exit_qual and insn_info) need to be 828 * tested in exit handler. If set to 0, only "reason" is checked. 829 */ 830 static struct insn_table insn_table[] = { 831 // Flags for Primary Processor-Based VM-Execution Controls 832 {"HLT", CPU_HLT, insn_hlt, INSN_CPU0, 12, 0, 0, 0}, 833 {"INVLPG", CPU_INVLPG, insn_invlpg, INSN_CPU0, 14, 834 0x12345678, 0, FIELD_EXIT_QUAL}, 835 {"MWAIT", CPU_MWAIT, insn_mwait, INSN_CPU0, 36, 0, 0, 0}, 836 {"RDPMC", CPU_RDPMC, insn_rdpmc, INSN_CPU0, 15, 0, 0, 0}, 837 {"RDTSC", CPU_RDTSC, insn_rdtsc, INSN_CPU0, 16, 0, 0, 0}, 838 {"CR3 load", CPU_CR3_LOAD, insn_cr3_load, INSN_CPU0, 28, 0x3, 0, 839 FIELD_EXIT_QUAL}, 840 {"CR3 store", CPU_CR3_STORE, insn_cr3_store, INSN_CPU0, 28, 0x13, 0, 841 FIELD_EXIT_QUAL}, 842 #ifdef __x86_64__ 843 {"CR8 load", CPU_CR8_LOAD, insn_cr8_load, INSN_CPU0, 28, 0x8, 0, 844 FIELD_EXIT_QUAL}, 845 {"CR8 store", CPU_CR8_STORE, insn_cr8_store, INSN_CPU0, 28, 0x18, 0, 846 FIELD_EXIT_QUAL}, 847 #endif 848 {"MONITOR", CPU_MONITOR, insn_monitor, INSN_CPU0, 39, 0, 0, 0}, 849 {"PAUSE", CPU_PAUSE, insn_pause, INSN_CPU0, 40, 0, 0, 0}, 850 // Flags for Secondary Processor-Based VM-Execution Controls 851 {"WBINVD", CPU_WBINVD, insn_wbinvd, INSN_CPU1, 54, 0, 0, 0}, 852 {"DESC_TABLE (SGDT)", CPU_DESC_TABLE, insn_sgdt, INSN_CPU1, 46, 0, 0, 0}, 853 {"DESC_TABLE (LGDT)", CPU_DESC_TABLE, insn_lgdt, INSN_CPU1, 46, 0, 0, 0}, 854 {"DESC_TABLE (SIDT)", CPU_DESC_TABLE, insn_sidt, INSN_CPU1, 46, 0, 0, 0}, 855 {"DESC_TABLE (LIDT)", CPU_DESC_TABLE, insn_lidt, INSN_CPU1, 46, 0, 0, 0}, 856 {"DESC_TABLE (SLDT)", CPU_DESC_TABLE, insn_sldt, INSN_CPU1, 47, 0, 0, 0}, 857 {"DESC_TABLE (LLDT)", CPU_DESC_TABLE, insn_lldt, INSN_CPU1, 47, 0, 0, 0}, 858 {"DESC_TABLE (STR)", CPU_DESC_TABLE, insn_str, INSN_CPU1, 47, 0, 0, 0}, 859 /* LTR causes a #GP if done with a busy selector, so it is not tested. */ 860 {"RDRAND", CPU_RDRAND, insn_rdrand, INSN_CPU1, VMX_RDRAND, 0, 0, 0}, 861 {"RDSEED", CPU_RDSEED, insn_rdseed, INSN_CPU1, VMX_RDSEED, 0, 0, 0}, 862 // Instructions always trap 863 {"CPUID", 0, insn_cpuid, INSN_ALWAYS_TRAP, 10, 0, 0, 0}, 864 {"INVD", 0, insn_invd, INSN_ALWAYS_TRAP, 13, 0, 0, 0}, 865 // Instructions never trap 866 {NULL}, 867 }; 868 869 static int insn_intercept_init() 870 { 871 u32 ctrl_cpu; 872 873 ctrl_cpu = ctrl_cpu_rev[0].set | CPU_SECONDARY; 874 ctrl_cpu &= ctrl_cpu_rev[0].clr; 875 vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu); 876 vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu_rev[1].set); 877 cr3 = read_cr3(); 878 return VMX_TEST_START; 879 } 880 881 static void insn_intercept_main() 882 { 883 for (cur_insn = 0; insn_table[cur_insn].name != NULL; cur_insn++) { 884 vmx_set_test_stage(cur_insn * 2); 885 if ((insn_table[cur_insn].type == INSN_CPU0 && 886 !(ctrl_cpu_rev[0].clr & insn_table[cur_insn].flag)) || 887 (insn_table[cur_insn].type == INSN_CPU1 && 888 !(ctrl_cpu_rev[1].clr & insn_table[cur_insn].flag))) { 889 printf("\tCPU_CTRL%d.CPU_%s is not supported.\n", 890 insn_table[cur_insn].type - INSN_CPU0, 891 insn_table[cur_insn].name); 892 continue; 893 } 894 895 if ((insn_table[cur_insn].type == INSN_CPU0 && 896 !(ctrl_cpu_rev[0].set & insn_table[cur_insn].flag)) || 897 (insn_table[cur_insn].type == INSN_CPU1 && 898 !(ctrl_cpu_rev[1].set & insn_table[cur_insn].flag))) { 899 /* skip hlt, it stalls the guest and is tested below */ 900 if (insn_table[cur_insn].insn_func != insn_hlt) 901 insn_table[cur_insn].insn_func(); 902 report("execute %s", vmx_get_test_stage() == cur_insn * 2, 903 insn_table[cur_insn].name); 904 } else if (insn_table[cur_insn].type != INSN_ALWAYS_TRAP) 905 printf("\tCPU_CTRL%d.CPU_%s always traps.\n", 906 insn_table[cur_insn].type - INSN_CPU0, 907 insn_table[cur_insn].name); 908 909 vmcall(); 910 911 insn_table[cur_insn].insn_func(); 912 report("intercept %s", vmx_get_test_stage() == cur_insn * 2 + 1, 913 insn_table[cur_insn].name); 914 915 vmx_set_test_stage(cur_insn * 2 + 1); 916 vmcall(); 917 } 918 } 919 920 static int insn_intercept_exit_handler() 921 { 922 u64 guest_rip; 923 u32 reason; 924 ulong exit_qual; 925 u32 insn_len; 926 u32 insn_info; 927 bool pass; 928 929 guest_rip = vmcs_read(GUEST_RIP); 930 reason = vmcs_read(EXI_REASON) & 0xff; 931 exit_qual = vmcs_read(EXI_QUALIFICATION); 932 insn_len = vmcs_read(EXI_INST_LEN); 933 insn_info = vmcs_read(EXI_INST_INFO); 934 935 if (reason == VMX_VMCALL) { 936 u32 val = 0; 937 938 if (insn_table[cur_insn].type == INSN_CPU0) 939 val = vmcs_read(CPU_EXEC_CTRL0); 940 else if (insn_table[cur_insn].type == INSN_CPU1) 941 val = vmcs_read(CPU_EXEC_CTRL1); 942 943 if (vmx_get_test_stage() & 1) 944 val &= ~insn_table[cur_insn].flag; 945 else 946 val |= insn_table[cur_insn].flag; 947 948 if (insn_table[cur_insn].type == INSN_CPU0) 949 vmcs_write(CPU_EXEC_CTRL0, val | ctrl_cpu_rev[0].set); 950 else if (insn_table[cur_insn].type == INSN_CPU1) 951 vmcs_write(CPU_EXEC_CTRL1, val | ctrl_cpu_rev[1].set); 952 } else { 953 pass = (cur_insn * 2 == vmx_get_test_stage()) && 954 insn_table[cur_insn].reason == reason; 955 if (insn_table[cur_insn].test_field & FIELD_EXIT_QUAL && 956 insn_table[cur_insn].exit_qual != exit_qual) 957 pass = false; 958 if (insn_table[cur_insn].test_field & FIELD_INSN_INFO && 959 insn_table[cur_insn].insn_info != insn_info) 960 pass = false; 961 if (pass) 962 vmx_inc_test_stage(); 963 } 964 vmcs_write(GUEST_RIP, guest_rip + insn_len); 965 return VMX_TEST_RESUME; 966 } 967 968 969 /* Enables EPT and sets up the identity map. */ 970 static int setup_ept(bool enable_ad) 971 { 972 unsigned long end_of_memory; 973 u32 ctrl_cpu[2]; 974 975 if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY) || 976 !(ctrl_cpu_rev[1].clr & CPU_EPT)) { 977 printf("\tEPT is not supported"); 978 return 1; 979 } 980 981 982 if (!(ept_vpid.val & EPT_CAP_UC) && 983 !(ept_vpid.val & EPT_CAP_WB)) { 984 printf("\tEPT paging-structure memory type " 985 "UC&WB are not supported\n"); 986 return 1; 987 } 988 if (ept_vpid.val & EPT_CAP_UC) 989 eptp = EPT_MEM_TYPE_UC; 990 else 991 eptp = EPT_MEM_TYPE_WB; 992 if (!(ept_vpid.val & EPT_CAP_PWL4)) { 993 printf("\tPWL4 is not supported\n"); 994 return 1; 995 } 996 ctrl_cpu[0] = vmcs_read(CPU_EXEC_CTRL0); 997 ctrl_cpu[1] = vmcs_read(CPU_EXEC_CTRL1); 998 ctrl_cpu[0] = (ctrl_cpu[0] | CPU_SECONDARY) 999 & ctrl_cpu_rev[0].clr; 1000 ctrl_cpu[1] = (ctrl_cpu[1] | CPU_EPT) 1001 & ctrl_cpu_rev[1].clr; 1002 vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu[0]); 1003 vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu[1]); 1004 eptp |= (3 << EPTP_PG_WALK_LEN_SHIFT); 1005 pml4 = alloc_page(); 1006 memset(pml4, 0, PAGE_SIZE); 1007 eptp |= virt_to_phys(pml4); 1008 if (enable_ad) 1009 eptp |= EPTP_AD_FLAG; 1010 vmcs_write(EPTP, eptp); 1011 end_of_memory = fwcfg_get_u64(FW_CFG_RAM_SIZE); 1012 if (end_of_memory < (1ul << 32)) 1013 end_of_memory = (1ul << 32); 1014 /* Cannot use large EPT pages if we need to track EPT 1015 * accessed/dirty bits at 4K granularity. 1016 */ 1017 setup_ept_range(pml4, 0, end_of_memory, 0, 1018 !enable_ad && ept_2m_supported(), 1019 EPT_WA | EPT_RA | EPT_EA); 1020 return 0; 1021 } 1022 1023 static void ept_enable_ad_bits(void) 1024 { 1025 eptp |= EPTP_AD_FLAG; 1026 vmcs_write(EPTP, eptp); 1027 } 1028 1029 static void ept_disable_ad_bits(void) 1030 { 1031 eptp &= ~EPTP_AD_FLAG; 1032 vmcs_write(EPTP, eptp); 1033 } 1034 1035 static void ept_enable_ad_bits_or_skip_test(void) 1036 { 1037 if (!ept_ad_bits_supported()) 1038 test_skip("EPT AD bits not supported."); 1039 ept_enable_ad_bits(); 1040 } 1041 1042 static int apic_version; 1043 1044 static int ept_init_common(bool have_ad) 1045 { 1046 if (setup_ept(have_ad)) 1047 return VMX_TEST_EXIT; 1048 data_page1 = alloc_page(); 1049 data_page2 = alloc_page(); 1050 memset(data_page1, 0x0, PAGE_SIZE); 1051 memset(data_page2, 0x0, PAGE_SIZE); 1052 *((u32 *)data_page1) = MAGIC_VAL_1; 1053 *((u32 *)data_page2) = MAGIC_VAL_2; 1054 install_ept(pml4, (unsigned long)data_page1, (unsigned long)data_page2, 1055 EPT_RA | EPT_WA | EPT_EA); 1056 1057 apic_version = apic_read(APIC_LVR); 1058 return VMX_TEST_START; 1059 } 1060 1061 static int ept_init() 1062 { 1063 return ept_init_common(false); 1064 } 1065 1066 static void ept_common() 1067 { 1068 vmx_set_test_stage(0); 1069 if (*((u32 *)data_page2) != MAGIC_VAL_1 || 1070 *((u32 *)data_page1) != MAGIC_VAL_1) 1071 report("EPT basic framework - read", 0); 1072 else { 1073 *((u32 *)data_page2) = MAGIC_VAL_3; 1074 vmcall(); 1075 if (vmx_get_test_stage() == 1) { 1076 if (*((u32 *)data_page1) == MAGIC_VAL_3 && 1077 *((u32 *)data_page2) == MAGIC_VAL_2) 1078 report("EPT basic framework", 1); 1079 else 1080 report("EPT basic framework - remap", 1); 1081 } 1082 } 1083 // Test EPT Misconfigurations 1084 vmx_set_test_stage(1); 1085 vmcall(); 1086 *((u32 *)data_page1) = MAGIC_VAL_1; 1087 if (vmx_get_test_stage() != 2) { 1088 report("EPT misconfigurations", 0); 1089 goto t1; 1090 } 1091 vmx_set_test_stage(2); 1092 vmcall(); 1093 *((u32 *)data_page1) = MAGIC_VAL_1; 1094 report("EPT misconfigurations", vmx_get_test_stage() == 3); 1095 t1: 1096 // Test EPT violation 1097 vmx_set_test_stage(3); 1098 vmcall(); 1099 *((u32 *)data_page1) = MAGIC_VAL_1; 1100 report("EPT violation - page permission", vmx_get_test_stage() == 4); 1101 // Violation caused by EPT paging structure 1102 vmx_set_test_stage(4); 1103 vmcall(); 1104 *((u32 *)data_page1) = MAGIC_VAL_2; 1105 report("EPT violation - paging structure", vmx_get_test_stage() == 5); 1106 } 1107 1108 static void ept_main() 1109 { 1110 ept_common(); 1111 1112 // Test EPT access to L1 MMIO 1113 vmx_set_test_stage(6); 1114 report("EPT - MMIO access", *((u32 *)0xfee00030UL) == apic_version); 1115 1116 // Test invalid operand for INVEPT 1117 vmcall(); 1118 report("EPT - unsupported INVEPT", vmx_get_test_stage() == 7); 1119 } 1120 1121 bool invept_test(int type, u64 eptp) 1122 { 1123 bool ret, supported; 1124 1125 supported = ept_vpid.val & (EPT_CAP_INVEPT_SINGLE >> INVEPT_SINGLE << type); 1126 ret = invept(type, eptp); 1127 1128 if (ret == !supported) 1129 return false; 1130 1131 if (!supported) 1132 printf("WARNING: unsupported invept passed!\n"); 1133 else 1134 printf("WARNING: invept failed!\n"); 1135 1136 return true; 1137 } 1138 1139 static int pml_exit_handler(void) 1140 { 1141 u16 index, count; 1142 ulong reason = vmcs_read(EXI_REASON) & 0xff; 1143 u64 *pmlbuf = pml_log; 1144 u64 guest_rip = vmcs_read(GUEST_RIP);; 1145 u64 guest_cr3 = vmcs_read(GUEST_CR3); 1146 u32 insn_len = vmcs_read(EXI_INST_LEN); 1147 1148 switch (reason) { 1149 case VMX_VMCALL: 1150 switch (vmx_get_test_stage()) { 1151 case 0: 1152 index = vmcs_read(GUEST_PML_INDEX); 1153 for (count = index + 1; count < PML_INDEX; count++) { 1154 if (pmlbuf[count] == (u64)data_page2) { 1155 vmx_inc_test_stage(); 1156 clear_ept_ad(pml4, guest_cr3, (unsigned long)data_page2); 1157 break; 1158 } 1159 } 1160 break; 1161 case 1: 1162 index = vmcs_read(GUEST_PML_INDEX); 1163 /* Keep clearing the dirty bit till a overflow */ 1164 clear_ept_ad(pml4, guest_cr3, (unsigned long)data_page2); 1165 break; 1166 default: 1167 report("unexpected stage, %d.", false, 1168 vmx_get_test_stage()); 1169 print_vmexit_info(); 1170 return VMX_TEST_VMEXIT; 1171 } 1172 vmcs_write(GUEST_RIP, guest_rip + insn_len); 1173 return VMX_TEST_RESUME; 1174 case VMX_PML_FULL: 1175 vmx_inc_test_stage(); 1176 vmcs_write(GUEST_PML_INDEX, PML_INDEX - 1); 1177 return VMX_TEST_RESUME; 1178 default: 1179 report("Unknown exit reason, %ld", false, reason); 1180 print_vmexit_info(); 1181 } 1182 return VMX_TEST_VMEXIT; 1183 } 1184 1185 static int ept_exit_handler_common(bool have_ad) 1186 { 1187 u64 guest_rip; 1188 u64 guest_cr3; 1189 ulong reason; 1190 u32 insn_len; 1191 u32 exit_qual; 1192 static unsigned long data_page1_pte, data_page1_pte_pte; 1193 1194 guest_rip = vmcs_read(GUEST_RIP); 1195 guest_cr3 = vmcs_read(GUEST_CR3); 1196 reason = vmcs_read(EXI_REASON) & 0xff; 1197 insn_len = vmcs_read(EXI_INST_LEN); 1198 exit_qual = vmcs_read(EXI_QUALIFICATION); 1199 switch (reason) { 1200 case VMX_VMCALL: 1201 switch (vmx_get_test_stage()) { 1202 case 0: 1203 check_ept_ad(pml4, guest_cr3, 1204 (unsigned long)data_page1, 1205 have_ad ? EPT_ACCESS_FLAG : 0, 1206 have_ad ? EPT_ACCESS_FLAG | EPT_DIRTY_FLAG : 0); 1207 check_ept_ad(pml4, guest_cr3, 1208 (unsigned long)data_page2, 1209 have_ad ? EPT_ACCESS_FLAG | EPT_DIRTY_FLAG : 0, 1210 have_ad ? EPT_ACCESS_FLAG | EPT_DIRTY_FLAG : 0); 1211 clear_ept_ad(pml4, guest_cr3, (unsigned long)data_page1); 1212 clear_ept_ad(pml4, guest_cr3, (unsigned long)data_page2); 1213 if (have_ad) 1214 ept_sync(INVEPT_SINGLE, eptp);; 1215 if (*((u32 *)data_page1) == MAGIC_VAL_3 && 1216 *((u32 *)data_page2) == MAGIC_VAL_2) { 1217 vmx_inc_test_stage(); 1218 install_ept(pml4, (unsigned long)data_page2, 1219 (unsigned long)data_page2, 1220 EPT_RA | EPT_WA | EPT_EA); 1221 } else 1222 report("EPT basic framework - write", 0); 1223 break; 1224 case 1: 1225 install_ept(pml4, (unsigned long)data_page1, 1226 (unsigned long)data_page1, EPT_WA); 1227 ept_sync(INVEPT_SINGLE, eptp); 1228 break; 1229 case 2: 1230 install_ept(pml4, (unsigned long)data_page1, 1231 (unsigned long)data_page1, 1232 EPT_RA | EPT_WA | EPT_EA | 1233 (2 << EPT_MEM_TYPE_SHIFT)); 1234 ept_sync(INVEPT_SINGLE, eptp); 1235 break; 1236 case 3: 1237 clear_ept_ad(pml4, guest_cr3, (unsigned long)data_page1); 1238 TEST_ASSERT(get_ept_pte(pml4, (unsigned long)data_page1, 1239 1, &data_page1_pte)); 1240 set_ept_pte(pml4, (unsigned long)data_page1, 1241 1, data_page1_pte & ~EPT_PRESENT); 1242 ept_sync(INVEPT_SINGLE, eptp); 1243 break; 1244 case 4: 1245 TEST_ASSERT(get_ept_pte(pml4, (unsigned long)data_page1, 1246 2, &data_page1_pte)); 1247 data_page1_pte &= PAGE_MASK; 1248 TEST_ASSERT(get_ept_pte(pml4, data_page1_pte, 1249 2, &data_page1_pte_pte)); 1250 set_ept_pte(pml4, data_page1_pte, 2, 1251 data_page1_pte_pte & ~EPT_PRESENT); 1252 ept_sync(INVEPT_SINGLE, eptp); 1253 break; 1254 case 6: 1255 if (!invept_test(0, eptp)) 1256 vmx_inc_test_stage(); 1257 break; 1258 // Should not reach here 1259 default: 1260 report("ERROR - unexpected stage, %d.", false, 1261 vmx_get_test_stage()); 1262 print_vmexit_info(); 1263 return VMX_TEST_VMEXIT; 1264 } 1265 vmcs_write(GUEST_RIP, guest_rip + insn_len); 1266 return VMX_TEST_RESUME; 1267 case VMX_EPT_MISCONFIG: 1268 switch (vmx_get_test_stage()) { 1269 case 1: 1270 case 2: 1271 vmx_inc_test_stage(); 1272 install_ept(pml4, (unsigned long)data_page1, 1273 (unsigned long)data_page1, 1274 EPT_RA | EPT_WA | EPT_EA); 1275 ept_sync(INVEPT_SINGLE, eptp); 1276 break; 1277 // Should not reach here 1278 default: 1279 report("ERROR - unexpected stage, %d.", false, 1280 vmx_get_test_stage()); 1281 print_vmexit_info(); 1282 return VMX_TEST_VMEXIT; 1283 } 1284 return VMX_TEST_RESUME; 1285 case VMX_EPT_VIOLATION: 1286 switch(vmx_get_test_stage()) { 1287 case 3: 1288 check_ept_ad(pml4, guest_cr3, (unsigned long)data_page1, 0, 1289 have_ad ? EPT_ACCESS_FLAG | EPT_DIRTY_FLAG : 0); 1290 clear_ept_ad(pml4, guest_cr3, (unsigned long)data_page1); 1291 if (exit_qual == (EPT_VLT_WR | EPT_VLT_LADDR_VLD | 1292 EPT_VLT_PADDR)) 1293 vmx_inc_test_stage(); 1294 set_ept_pte(pml4, (unsigned long)data_page1, 1295 1, data_page1_pte | (EPT_PRESENT)); 1296 ept_sync(INVEPT_SINGLE, eptp); 1297 break; 1298 case 4: 1299 check_ept_ad(pml4, guest_cr3, (unsigned long)data_page1, 0, 1300 have_ad ? EPT_ACCESS_FLAG | EPT_DIRTY_FLAG : 0); 1301 clear_ept_ad(pml4, guest_cr3, (unsigned long)data_page1); 1302 if (exit_qual == (EPT_VLT_RD | 1303 (have_ad ? EPT_VLT_WR : 0) | 1304 EPT_VLT_LADDR_VLD)) 1305 vmx_inc_test_stage(); 1306 set_ept_pte(pml4, data_page1_pte, 2, 1307 data_page1_pte_pte | (EPT_PRESENT)); 1308 ept_sync(INVEPT_SINGLE, eptp); 1309 break; 1310 default: 1311 // Should not reach here 1312 report("ERROR : unexpected stage, %d", false, 1313 vmx_get_test_stage()); 1314 print_vmexit_info(); 1315 return VMX_TEST_VMEXIT; 1316 } 1317 return VMX_TEST_RESUME; 1318 default: 1319 report("Unknown exit reason, %ld", false, reason); 1320 print_vmexit_info(); 1321 } 1322 return VMX_TEST_VMEXIT; 1323 } 1324 1325 static int ept_exit_handler() 1326 { 1327 return ept_exit_handler_common(false); 1328 } 1329 1330 static int eptad_init() 1331 { 1332 int r = ept_init_common(true); 1333 1334 if (r == VMX_TEST_EXIT) 1335 return r; 1336 1337 if ((rdmsr(MSR_IA32_VMX_EPT_VPID_CAP) & EPT_CAP_AD_FLAG) == 0) { 1338 printf("\tEPT A/D bits are not supported"); 1339 return VMX_TEST_EXIT; 1340 } 1341 1342 return r; 1343 } 1344 1345 static int pml_init() 1346 { 1347 u32 ctrl_cpu; 1348 int r = eptad_init(); 1349 1350 if (r == VMX_TEST_EXIT) 1351 return r; 1352 1353 if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY) || 1354 !(ctrl_cpu_rev[1].clr & CPU_PML)) { 1355 printf("\tPML is not supported"); 1356 return VMX_TEST_EXIT; 1357 } 1358 1359 pml_log = alloc_page(); 1360 memset(pml_log, 0x0, PAGE_SIZE); 1361 vmcs_write(PMLADDR, (u64)pml_log); 1362 vmcs_write(GUEST_PML_INDEX, PML_INDEX - 1); 1363 1364 ctrl_cpu = vmcs_read(CPU_EXEC_CTRL1) | CPU_PML; 1365 vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu); 1366 1367 return VMX_TEST_START; 1368 } 1369 1370 static void pml_main() 1371 { 1372 int count = 0; 1373 1374 vmx_set_test_stage(0); 1375 *((u32 *)data_page2) = 0x1; 1376 vmcall(); 1377 report("PML - Dirty GPA Logging", vmx_get_test_stage() == 1); 1378 1379 while (vmx_get_test_stage() == 1) { 1380 vmcall(); 1381 *((u32 *)data_page2) = 0x1; 1382 if (count++ > PML_INDEX) 1383 break; 1384 } 1385 report("PML Full Event", vmx_get_test_stage() == 2); 1386 } 1387 1388 static void eptad_main() 1389 { 1390 ept_common(); 1391 } 1392 1393 static int eptad_exit_handler() 1394 { 1395 return ept_exit_handler_common(true); 1396 } 1397 1398 bool invvpid_test(int type, u16 vpid) 1399 { 1400 bool ret, supported; 1401 1402 supported = ept_vpid.val & 1403 (VPID_CAP_INVVPID_ADDR >> INVVPID_ADDR << type); 1404 ret = invvpid(type, vpid, 0); 1405 1406 if (ret == !supported) 1407 return false; 1408 1409 if (!supported) 1410 printf("WARNING: unsupported invvpid passed!\n"); 1411 else 1412 printf("WARNING: invvpid failed!\n"); 1413 1414 return true; 1415 } 1416 1417 static int vpid_init() 1418 { 1419 u32 ctrl_cpu1; 1420 1421 if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY) || 1422 !(ctrl_cpu_rev[1].clr & CPU_VPID)) { 1423 printf("\tVPID is not supported"); 1424 return VMX_TEST_EXIT; 1425 } 1426 1427 ctrl_cpu1 = vmcs_read(CPU_EXEC_CTRL1); 1428 ctrl_cpu1 |= CPU_VPID; 1429 vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu1); 1430 return VMX_TEST_START; 1431 } 1432 1433 static void vpid_main() 1434 { 1435 vmx_set_test_stage(0); 1436 vmcall(); 1437 report("INVVPID SINGLE ADDRESS", vmx_get_test_stage() == 1); 1438 vmx_set_test_stage(2); 1439 vmcall(); 1440 report("INVVPID SINGLE", vmx_get_test_stage() == 3); 1441 vmx_set_test_stage(4); 1442 vmcall(); 1443 report("INVVPID ALL", vmx_get_test_stage() == 5); 1444 } 1445 1446 static int vpid_exit_handler() 1447 { 1448 u64 guest_rip; 1449 ulong reason; 1450 u32 insn_len; 1451 1452 guest_rip = vmcs_read(GUEST_RIP); 1453 reason = vmcs_read(EXI_REASON) & 0xff; 1454 insn_len = vmcs_read(EXI_INST_LEN); 1455 1456 switch (reason) { 1457 case VMX_VMCALL: 1458 switch(vmx_get_test_stage()) { 1459 case 0: 1460 if (!invvpid_test(INVVPID_ADDR, 1)) 1461 vmx_inc_test_stage(); 1462 break; 1463 case 2: 1464 if (!invvpid_test(INVVPID_CONTEXT_GLOBAL, 1)) 1465 vmx_inc_test_stage(); 1466 break; 1467 case 4: 1468 if (!invvpid_test(INVVPID_ALL, 1)) 1469 vmx_inc_test_stage(); 1470 break; 1471 default: 1472 report("ERROR: unexpected stage, %d", false, 1473 vmx_get_test_stage()); 1474 print_vmexit_info(); 1475 return VMX_TEST_VMEXIT; 1476 } 1477 vmcs_write(GUEST_RIP, guest_rip + insn_len); 1478 return VMX_TEST_RESUME; 1479 default: 1480 report("Unknown exit reason, %ld", false, reason); 1481 print_vmexit_info(); 1482 } 1483 return VMX_TEST_VMEXIT; 1484 } 1485 1486 #define TIMER_VECTOR 222 1487 1488 static volatile bool timer_fired; 1489 1490 static void timer_isr(isr_regs_t *regs) 1491 { 1492 timer_fired = true; 1493 apic_write(APIC_EOI, 0); 1494 } 1495 1496 static int interrupt_init(struct vmcs *vmcs) 1497 { 1498 msr_bmp_init(); 1499 vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) & ~PIN_EXTINT); 1500 handle_irq(TIMER_VECTOR, timer_isr); 1501 return VMX_TEST_START; 1502 } 1503 1504 static void interrupt_main(void) 1505 { 1506 long long start, loops; 1507 1508 vmx_set_test_stage(0); 1509 1510 apic_write(APIC_LVTT, TIMER_VECTOR); 1511 irq_enable(); 1512 1513 apic_write(APIC_TMICT, 1); 1514 for (loops = 0; loops < 10000000 && !timer_fired; loops++) 1515 asm volatile ("nop"); 1516 report("direct interrupt while running guest", timer_fired); 1517 1518 apic_write(APIC_TMICT, 0); 1519 irq_disable(); 1520 vmcall(); 1521 timer_fired = false; 1522 apic_write(APIC_TMICT, 1); 1523 for (loops = 0; loops < 10000000 && !timer_fired; loops++) 1524 asm volatile ("nop"); 1525 report("intercepted interrupt while running guest", timer_fired); 1526 1527 irq_enable(); 1528 apic_write(APIC_TMICT, 0); 1529 irq_disable(); 1530 vmcall(); 1531 timer_fired = false; 1532 start = rdtsc(); 1533 apic_write(APIC_TMICT, 1000000); 1534 1535 asm volatile ("sti; hlt"); 1536 1537 report("direct interrupt + hlt", 1538 rdtsc() - start > 1000000 && timer_fired); 1539 1540 apic_write(APIC_TMICT, 0); 1541 irq_disable(); 1542 vmcall(); 1543 timer_fired = false; 1544 start = rdtsc(); 1545 apic_write(APIC_TMICT, 1000000); 1546 1547 asm volatile ("sti; hlt"); 1548 1549 report("intercepted interrupt + hlt", 1550 rdtsc() - start > 10000 && timer_fired); 1551 1552 apic_write(APIC_TMICT, 0); 1553 irq_disable(); 1554 vmcall(); 1555 timer_fired = false; 1556 start = rdtsc(); 1557 apic_write(APIC_TMICT, 1000000); 1558 1559 irq_enable(); 1560 asm volatile ("nop"); 1561 vmcall(); 1562 1563 report("direct interrupt + activity state hlt", 1564 rdtsc() - start > 10000 && timer_fired); 1565 1566 apic_write(APIC_TMICT, 0); 1567 irq_disable(); 1568 vmcall(); 1569 timer_fired = false; 1570 start = rdtsc(); 1571 apic_write(APIC_TMICT, 1000000); 1572 1573 irq_enable(); 1574 asm volatile ("nop"); 1575 vmcall(); 1576 1577 report("intercepted interrupt + activity state hlt", 1578 rdtsc() - start > 10000 && timer_fired); 1579 1580 apic_write(APIC_TMICT, 0); 1581 irq_disable(); 1582 vmx_set_test_stage(7); 1583 vmcall(); 1584 timer_fired = false; 1585 apic_write(APIC_TMICT, 1); 1586 for (loops = 0; loops < 10000000 && !timer_fired; loops++) 1587 asm volatile ("nop"); 1588 report("running a guest with interrupt acknowledgement set", timer_fired); 1589 1590 apic_write(APIC_TMICT, 0); 1591 irq_enable(); 1592 timer_fired = false; 1593 vmcall(); 1594 report("Inject an event to a halted guest", timer_fired); 1595 } 1596 1597 static int interrupt_exit_handler(void) 1598 { 1599 u64 guest_rip = vmcs_read(GUEST_RIP); 1600 ulong reason = vmcs_read(EXI_REASON) & 0xff; 1601 u32 insn_len = vmcs_read(EXI_INST_LEN); 1602 1603 switch (reason) { 1604 case VMX_VMCALL: 1605 switch (vmx_get_test_stage()) { 1606 case 0: 1607 case 2: 1608 case 5: 1609 vmcs_write(PIN_CONTROLS, 1610 vmcs_read(PIN_CONTROLS) | PIN_EXTINT); 1611 break; 1612 case 7: 1613 vmcs_write(EXI_CONTROLS, vmcs_read(EXI_CONTROLS) | EXI_INTA); 1614 vmcs_write(PIN_CONTROLS, 1615 vmcs_read(PIN_CONTROLS) | PIN_EXTINT); 1616 break; 1617 case 1: 1618 case 3: 1619 vmcs_write(PIN_CONTROLS, 1620 vmcs_read(PIN_CONTROLS) & ~PIN_EXTINT); 1621 break; 1622 case 4: 1623 case 6: 1624 vmcs_write(GUEST_ACTV_STATE, ACTV_HLT); 1625 break; 1626 1627 case 8: 1628 vmcs_write(GUEST_ACTV_STATE, ACTV_HLT); 1629 vmcs_write(ENT_INTR_INFO, 1630 TIMER_VECTOR | 1631 (VMX_INTR_TYPE_EXT_INTR << INTR_INFO_INTR_TYPE_SHIFT) | 1632 INTR_INFO_VALID_MASK); 1633 break; 1634 } 1635 vmx_inc_test_stage(); 1636 vmcs_write(GUEST_RIP, guest_rip + insn_len); 1637 return VMX_TEST_RESUME; 1638 case VMX_EXTINT: 1639 if (vmcs_read(EXI_CONTROLS) & EXI_INTA) { 1640 int vector = vmcs_read(EXI_INTR_INFO) & 0xff; 1641 handle_external_interrupt(vector); 1642 } else { 1643 irq_enable(); 1644 asm volatile ("nop"); 1645 irq_disable(); 1646 } 1647 if (vmx_get_test_stage() >= 2) 1648 vmcs_write(GUEST_ACTV_STATE, ACTV_ACTIVE); 1649 return VMX_TEST_RESUME; 1650 default: 1651 report("Unknown exit reason, %ld", false, reason); 1652 print_vmexit_info(); 1653 } 1654 1655 return VMX_TEST_VMEXIT; 1656 } 1657 1658 static int dbgctls_init(struct vmcs *vmcs) 1659 { 1660 u64 dr7 = 0x402; 1661 u64 zero = 0; 1662 1663 msr_bmp_init(); 1664 asm volatile( 1665 "mov %0,%%dr0\n\t" 1666 "mov %0,%%dr1\n\t" 1667 "mov %0,%%dr2\n\t" 1668 "mov %1,%%dr7\n\t" 1669 : : "r" (zero), "r" (dr7)); 1670 wrmsr(MSR_IA32_DEBUGCTLMSR, 0x1); 1671 vmcs_write(GUEST_DR7, 0x404); 1672 vmcs_write(GUEST_DEBUGCTL, 0x2); 1673 1674 vmcs_write(ENT_CONTROLS, vmcs_read(ENT_CONTROLS) | ENT_LOAD_DBGCTLS); 1675 vmcs_write(EXI_CONTROLS, vmcs_read(EXI_CONTROLS) | EXI_SAVE_DBGCTLS); 1676 1677 return VMX_TEST_START; 1678 } 1679 1680 static void dbgctls_main(void) 1681 { 1682 u64 dr7, debugctl; 1683 1684 asm volatile("mov %%dr7,%0" : "=r" (dr7)); 1685 debugctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 1686 /* Commented out: KVM does not support DEBUGCTL so far */ 1687 (void)debugctl; 1688 report("Load debug controls", dr7 == 0x404 /* && debugctl == 0x2 */); 1689 1690 dr7 = 0x408; 1691 asm volatile("mov %0,%%dr7" : : "r" (dr7)); 1692 wrmsr(MSR_IA32_DEBUGCTLMSR, 0x3); 1693 1694 vmx_set_test_stage(0); 1695 vmcall(); 1696 report("Save debug controls", vmx_get_test_stage() == 1); 1697 1698 if (ctrl_enter_rev.set & ENT_LOAD_DBGCTLS || 1699 ctrl_exit_rev.set & EXI_SAVE_DBGCTLS) { 1700 printf("\tDebug controls are always loaded/saved\n"); 1701 return; 1702 } 1703 vmx_set_test_stage(2); 1704 vmcall(); 1705 1706 asm volatile("mov %%dr7,%0" : "=r" (dr7)); 1707 debugctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 1708 /* Commented out: KVM does not support DEBUGCTL so far */ 1709 (void)debugctl; 1710 report("Guest=host debug controls", dr7 == 0x402 /* && debugctl == 0x1 */); 1711 1712 dr7 = 0x408; 1713 asm volatile("mov %0,%%dr7" : : "r" (dr7)); 1714 wrmsr(MSR_IA32_DEBUGCTLMSR, 0x3); 1715 1716 vmx_set_test_stage(3); 1717 vmcall(); 1718 report("Don't save debug controls", vmx_get_test_stage() == 4); 1719 } 1720 1721 static int dbgctls_exit_handler(void) 1722 { 1723 unsigned int reason = vmcs_read(EXI_REASON) & 0xff; 1724 u32 insn_len = vmcs_read(EXI_INST_LEN); 1725 u64 guest_rip = vmcs_read(GUEST_RIP); 1726 u64 dr7, debugctl; 1727 1728 asm volatile("mov %%dr7,%0" : "=r" (dr7)); 1729 debugctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 1730 1731 switch (reason) { 1732 case VMX_VMCALL: 1733 switch (vmx_get_test_stage()) { 1734 case 0: 1735 if (dr7 == 0x400 && debugctl == 0 && 1736 vmcs_read(GUEST_DR7) == 0x408 /* && 1737 Commented out: KVM does not support DEBUGCTL so far 1738 vmcs_read(GUEST_DEBUGCTL) == 0x3 */) 1739 vmx_inc_test_stage(); 1740 break; 1741 case 2: 1742 dr7 = 0x402; 1743 asm volatile("mov %0,%%dr7" : : "r" (dr7)); 1744 wrmsr(MSR_IA32_DEBUGCTLMSR, 0x1); 1745 vmcs_write(GUEST_DR7, 0x404); 1746 vmcs_write(GUEST_DEBUGCTL, 0x2); 1747 1748 vmcs_write(ENT_CONTROLS, 1749 vmcs_read(ENT_CONTROLS) & ~ENT_LOAD_DBGCTLS); 1750 vmcs_write(EXI_CONTROLS, 1751 vmcs_read(EXI_CONTROLS) & ~EXI_SAVE_DBGCTLS); 1752 break; 1753 case 3: 1754 if (dr7 == 0x400 && debugctl == 0 && 1755 vmcs_read(GUEST_DR7) == 0x404 /* && 1756 Commented out: KVM does not support DEBUGCTL so far 1757 vmcs_read(GUEST_DEBUGCTL) == 0x2 */) 1758 vmx_inc_test_stage(); 1759 break; 1760 } 1761 vmcs_write(GUEST_RIP, guest_rip + insn_len); 1762 return VMX_TEST_RESUME; 1763 default: 1764 report("Unknown exit reason, %d", false, reason); 1765 print_vmexit_info(); 1766 } 1767 return VMX_TEST_VMEXIT; 1768 } 1769 1770 struct vmx_msr_entry { 1771 u32 index; 1772 u32 reserved; 1773 u64 value; 1774 } __attribute__((packed)); 1775 1776 #define MSR_MAGIC 0x31415926 1777 struct vmx_msr_entry *exit_msr_store, *entry_msr_load, *exit_msr_load; 1778 1779 static int msr_switch_init(struct vmcs *vmcs) 1780 { 1781 msr_bmp_init(); 1782 exit_msr_store = alloc_page(); 1783 exit_msr_load = alloc_page(); 1784 entry_msr_load = alloc_page(); 1785 memset(exit_msr_store, 0, PAGE_SIZE); 1786 memset(exit_msr_load, 0, PAGE_SIZE); 1787 memset(entry_msr_load, 0, PAGE_SIZE); 1788 entry_msr_load[0].index = MSR_KERNEL_GS_BASE; 1789 entry_msr_load[0].value = MSR_MAGIC; 1790 1791 vmx_set_test_stage(1); 1792 vmcs_write(ENT_MSR_LD_CNT, 1); 1793 vmcs_write(ENTER_MSR_LD_ADDR, (u64)entry_msr_load); 1794 vmcs_write(EXI_MSR_ST_CNT, 1); 1795 vmcs_write(EXIT_MSR_ST_ADDR, (u64)exit_msr_store); 1796 vmcs_write(EXI_MSR_LD_CNT, 1); 1797 vmcs_write(EXIT_MSR_LD_ADDR, (u64)exit_msr_load); 1798 return VMX_TEST_START; 1799 } 1800 1801 static void msr_switch_main() 1802 { 1803 if (vmx_get_test_stage() == 1) { 1804 report("VM entry MSR load", 1805 rdmsr(MSR_KERNEL_GS_BASE) == MSR_MAGIC); 1806 vmx_set_test_stage(2); 1807 wrmsr(MSR_KERNEL_GS_BASE, MSR_MAGIC + 1); 1808 exit_msr_store[0].index = MSR_KERNEL_GS_BASE; 1809 exit_msr_load[0].index = MSR_KERNEL_GS_BASE; 1810 exit_msr_load[0].value = MSR_MAGIC + 2; 1811 } 1812 vmcall(); 1813 } 1814 1815 static int msr_switch_exit_handler() 1816 { 1817 ulong reason; 1818 1819 reason = vmcs_read(EXI_REASON); 1820 if (reason == VMX_VMCALL && vmx_get_test_stage() == 2) { 1821 report("VM exit MSR store", 1822 exit_msr_store[0].value == MSR_MAGIC + 1); 1823 report("VM exit MSR load", 1824 rdmsr(MSR_KERNEL_GS_BASE) == MSR_MAGIC + 2); 1825 vmx_set_test_stage(3); 1826 entry_msr_load[0].index = MSR_FS_BASE; 1827 return VMX_TEST_RESUME; 1828 } 1829 printf("ERROR %s: unexpected stage=%u or reason=%lu\n", 1830 __func__, vmx_get_test_stage(), reason); 1831 return VMX_TEST_EXIT; 1832 } 1833 1834 static int msr_switch_entry_failure(struct vmentry_failure *failure) 1835 { 1836 ulong reason; 1837 1838 if (failure->early) { 1839 printf("ERROR %s: early exit\n", __func__); 1840 return VMX_TEST_EXIT; 1841 } 1842 1843 reason = vmcs_read(EXI_REASON); 1844 if (reason == (VMX_ENTRY_FAILURE | VMX_FAIL_MSR) && 1845 vmx_get_test_stage() == 3) { 1846 report("VM entry MSR load: try to load FS_BASE", 1847 vmcs_read(EXI_QUALIFICATION) == 1); 1848 return VMX_TEST_VMEXIT; 1849 } 1850 printf("ERROR %s: unexpected stage=%u or reason=%lu\n", 1851 __func__, vmx_get_test_stage(), reason); 1852 return VMX_TEST_EXIT; 1853 } 1854 1855 static int vmmcall_init(struct vmcs *vmcs ) 1856 { 1857 vmcs_write(EXC_BITMAP, 1 << UD_VECTOR); 1858 return VMX_TEST_START; 1859 } 1860 1861 static void vmmcall_main(void) 1862 { 1863 asm volatile( 1864 "mov $0xABCD, %%rax\n\t" 1865 "vmmcall\n\t" 1866 ::: "rax"); 1867 1868 report("VMMCALL", 0); 1869 } 1870 1871 static int vmmcall_exit_handler() 1872 { 1873 ulong reason; 1874 1875 reason = vmcs_read(EXI_REASON); 1876 switch (reason) { 1877 case VMX_VMCALL: 1878 printf("here\n"); 1879 report("VMMCALL triggers #UD", 0); 1880 break; 1881 case VMX_EXC_NMI: 1882 report("VMMCALL triggers #UD", 1883 (vmcs_read(EXI_INTR_INFO) & 0xff) == UD_VECTOR); 1884 break; 1885 default: 1886 report("Unknown exit reason, %ld", false, reason); 1887 print_vmexit_info(); 1888 } 1889 1890 return VMX_TEST_VMEXIT; 1891 } 1892 1893 static int disable_rdtscp_init(struct vmcs *vmcs) 1894 { 1895 u32 ctrl_cpu1; 1896 1897 if (ctrl_cpu_rev[0].clr & CPU_SECONDARY) { 1898 ctrl_cpu1 = vmcs_read(CPU_EXEC_CTRL1); 1899 ctrl_cpu1 &= ~CPU_RDTSCP; 1900 vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu1); 1901 } 1902 1903 return VMX_TEST_START; 1904 } 1905 1906 static void disable_rdtscp_ud_handler(struct ex_regs *regs) 1907 { 1908 switch (vmx_get_test_stage()) { 1909 case 0: 1910 report("RDTSCP triggers #UD", true); 1911 vmx_inc_test_stage(); 1912 regs->rip += 3; 1913 break; 1914 case 2: 1915 report("RDPID triggers #UD", true); 1916 vmx_inc_test_stage(); 1917 regs->rip += 4; 1918 break; 1919 } 1920 return; 1921 1922 } 1923 1924 static void disable_rdtscp_main(void) 1925 { 1926 /* Test that #UD is properly injected in L2. */ 1927 handle_exception(UD_VECTOR, disable_rdtscp_ud_handler); 1928 1929 vmx_set_test_stage(0); 1930 asm volatile("rdtscp" : : : "eax", "ecx", "edx"); 1931 vmcall(); 1932 asm volatile(".byte 0xf3, 0x0f, 0xc7, 0xf8" : : : "eax"); 1933 vmcall(); 1934 } 1935 1936 static int disable_rdtscp_exit_handler(void) 1937 { 1938 unsigned int reason = vmcs_read(EXI_REASON) & 0xff; 1939 1940 switch (reason) { 1941 case VMX_VMCALL: 1942 switch (vmx_get_test_stage()) { 1943 case 0: 1944 report("RDTSCP triggers #UD", false); 1945 vmx_inc_test_stage(); 1946 /* fallthrough */ 1947 case 1: 1948 vmx_inc_test_stage(); 1949 vmcs_write(GUEST_RIP, vmcs_read(GUEST_RIP) + 3); 1950 return VMX_TEST_RESUME; 1951 case 2: 1952 report("RDPID triggers #UD", false); 1953 break; 1954 } 1955 break; 1956 1957 default: 1958 report("Unknown exit reason, %d", false, reason); 1959 print_vmexit_info(); 1960 } 1961 return VMX_TEST_VMEXIT; 1962 } 1963 1964 int int3_init() 1965 { 1966 vmcs_write(EXC_BITMAP, ~0u); 1967 return VMX_TEST_START; 1968 } 1969 1970 void int3_guest_main() 1971 { 1972 asm volatile ("int3"); 1973 } 1974 1975 int int3_exit_handler() 1976 { 1977 u32 reason = vmcs_read(EXI_REASON); 1978 u32 intr_info = vmcs_read(EXI_INTR_INFO); 1979 1980 report("L1 intercepts #BP", reason == VMX_EXC_NMI && 1981 (intr_info & INTR_INFO_VALID_MASK) && 1982 (intr_info & INTR_INFO_VECTOR_MASK) == BP_VECTOR && 1983 ((intr_info & INTR_INFO_INTR_TYPE_MASK) >> 1984 INTR_INFO_INTR_TYPE_SHIFT) == VMX_INTR_TYPE_SOFT_EXCEPTION); 1985 1986 return VMX_TEST_VMEXIT; 1987 } 1988 1989 int into_init() 1990 { 1991 vmcs_write(EXC_BITMAP, ~0u); 1992 return VMX_TEST_START; 1993 } 1994 1995 void into_guest_main() 1996 { 1997 struct far_pointer32 fp = { 1998 .offset = (uintptr_t)&&into, 1999 .selector = KERNEL_CS32, 2000 }; 2001 register uintptr_t rsp asm("rsp"); 2002 2003 if (fp.offset != (uintptr_t)&&into) { 2004 printf("Code address too high.\n"); 2005 return; 2006 } 2007 if ((u32)rsp != rsp) { 2008 printf("Stack address too high.\n"); 2009 return; 2010 } 2011 2012 asm goto ("lcall *%0" : : "m" (fp) : "rax" : into); 2013 return; 2014 into: 2015 asm volatile (".code32;" 2016 "movl $0x7fffffff, %eax;" 2017 "addl %eax, %eax;" 2018 "into;" 2019 "lret;" 2020 ".code64"); 2021 __builtin_unreachable(); 2022 } 2023 2024 int into_exit_handler() 2025 { 2026 u32 reason = vmcs_read(EXI_REASON); 2027 u32 intr_info = vmcs_read(EXI_INTR_INFO); 2028 2029 report("L1 intercepts #OF", reason == VMX_EXC_NMI && 2030 (intr_info & INTR_INFO_VALID_MASK) && 2031 (intr_info & INTR_INFO_VECTOR_MASK) == OF_VECTOR && 2032 ((intr_info & INTR_INFO_INTR_TYPE_MASK) >> 2033 INTR_INFO_INTR_TYPE_SHIFT) == VMX_INTR_TYPE_SOFT_EXCEPTION); 2034 2035 return VMX_TEST_VMEXIT; 2036 } 2037 2038 static void exit_monitor_from_l2_main(void) 2039 { 2040 printf("Calling exit(0) from l2...\n"); 2041 exit(0); 2042 } 2043 2044 static int exit_monitor_from_l2_handler(void) 2045 { 2046 report("The guest should have killed the VMM", false); 2047 return VMX_TEST_EXIT; 2048 } 2049 2050 static void assert_exit_reason(u64 expected) 2051 { 2052 u64 actual = vmcs_read(EXI_REASON); 2053 2054 TEST_ASSERT_EQ_MSG(expected, actual, "Expected %s, got %s.", 2055 exit_reason_description(expected), 2056 exit_reason_description(actual)); 2057 } 2058 2059 static void skip_exit_vmcall() 2060 { 2061 u64 guest_rip = vmcs_read(GUEST_RIP); 2062 u32 insn_len = vmcs_read(EXI_INST_LEN); 2063 2064 assert_exit_reason(VMX_VMCALL); 2065 vmcs_write(GUEST_RIP, guest_rip + insn_len); 2066 } 2067 2068 static void v2_null_test_guest(void) 2069 { 2070 } 2071 2072 static void v2_null_test(void) 2073 { 2074 test_set_guest(v2_null_test_guest); 2075 enter_guest(); 2076 report(__func__, 1); 2077 } 2078 2079 static void v2_multiple_entries_test_guest(void) 2080 { 2081 vmx_set_test_stage(1); 2082 vmcall(); 2083 vmx_set_test_stage(2); 2084 } 2085 2086 static void v2_multiple_entries_test(void) 2087 { 2088 test_set_guest(v2_multiple_entries_test_guest); 2089 enter_guest(); 2090 TEST_ASSERT_EQ(vmx_get_test_stage(), 1); 2091 skip_exit_vmcall(); 2092 enter_guest(); 2093 TEST_ASSERT_EQ(vmx_get_test_stage(), 2); 2094 report(__func__, 1); 2095 } 2096 2097 static int fixture_test_data = 1; 2098 2099 static void fixture_test_teardown(void *data) 2100 { 2101 *((int *) data) = 1; 2102 } 2103 2104 static void fixture_test_guest(void) 2105 { 2106 fixture_test_data++; 2107 } 2108 2109 2110 static void fixture_test_setup(void) 2111 { 2112 TEST_ASSERT_EQ_MSG(1, fixture_test_data, 2113 "fixture_test_teardown didn't run?!"); 2114 fixture_test_data = 2; 2115 test_add_teardown(fixture_test_teardown, &fixture_test_data); 2116 test_set_guest(fixture_test_guest); 2117 } 2118 2119 static void fixture_test_case1(void) 2120 { 2121 fixture_test_setup(); 2122 TEST_ASSERT_EQ(2, fixture_test_data); 2123 enter_guest(); 2124 TEST_ASSERT_EQ(3, fixture_test_data); 2125 report(__func__, 1); 2126 } 2127 2128 static void fixture_test_case2(void) 2129 { 2130 fixture_test_setup(); 2131 TEST_ASSERT_EQ(2, fixture_test_data); 2132 enter_guest(); 2133 TEST_ASSERT_EQ(3, fixture_test_data); 2134 report(__func__, 1); 2135 } 2136 2137 enum ept_access_op { 2138 OP_READ, 2139 OP_WRITE, 2140 OP_EXEC, 2141 OP_FLUSH_TLB, 2142 OP_EXIT, 2143 }; 2144 2145 static struct ept_access_test_data { 2146 unsigned long gpa; 2147 unsigned long *gva; 2148 unsigned long hpa; 2149 unsigned long *hva; 2150 enum ept_access_op op; 2151 } ept_access_test_data; 2152 2153 extern unsigned char ret42_start; 2154 extern unsigned char ret42_end; 2155 2156 /* Returns 42. */ 2157 asm( 2158 ".align 64\n" 2159 "ret42_start:\n" 2160 "mov $42, %eax\n" 2161 "ret\n" 2162 "ret42_end:\n" 2163 ); 2164 2165 static void 2166 diagnose_ept_violation_qual(u64 expected, u64 actual) 2167 { 2168 2169 #define DIAGNOSE(flag) \ 2170 do { \ 2171 if ((expected & flag) != (actual & flag)) \ 2172 printf(#flag " %sexpected\n", \ 2173 (expected & flag) ? "" : "un"); \ 2174 } while (0) 2175 2176 DIAGNOSE(EPT_VLT_RD); 2177 DIAGNOSE(EPT_VLT_WR); 2178 DIAGNOSE(EPT_VLT_FETCH); 2179 DIAGNOSE(EPT_VLT_PERM_RD); 2180 DIAGNOSE(EPT_VLT_PERM_WR); 2181 DIAGNOSE(EPT_VLT_PERM_EX); 2182 DIAGNOSE(EPT_VLT_LADDR_VLD); 2183 DIAGNOSE(EPT_VLT_PADDR); 2184 2185 #undef DIAGNOSE 2186 } 2187 2188 static void do_ept_access_op(enum ept_access_op op) 2189 { 2190 ept_access_test_data.op = op; 2191 enter_guest(); 2192 } 2193 2194 /* 2195 * Force the guest to flush its TLB (i.e., flush gva -> gpa mappings). Only 2196 * needed by tests that modify guest PTEs. 2197 */ 2198 static void ept_access_test_guest_flush_tlb(void) 2199 { 2200 do_ept_access_op(OP_FLUSH_TLB); 2201 skip_exit_vmcall(); 2202 } 2203 2204 /* 2205 * Modifies the EPT entry at @level in the mapping of @gpa. First clears the 2206 * bits in @clear then sets the bits in @set. @mkhuge transforms the entry into 2207 * a huge page. 2208 */ 2209 static unsigned long ept_twiddle(unsigned long gpa, bool mkhuge, int level, 2210 unsigned long clear, unsigned long set) 2211 { 2212 struct ept_access_test_data *data = &ept_access_test_data; 2213 unsigned long orig_pte; 2214 unsigned long pte; 2215 2216 /* Screw with the mapping at the requested level. */ 2217 TEST_ASSERT(get_ept_pte(pml4, gpa, level, &orig_pte)); 2218 pte = orig_pte; 2219 if (mkhuge) 2220 pte = (orig_pte & ~EPT_ADDR_MASK) | data->hpa | EPT_LARGE_PAGE; 2221 else 2222 pte = orig_pte; 2223 pte = (pte & ~clear) | set; 2224 set_ept_pte(pml4, gpa, level, pte); 2225 ept_sync(INVEPT_SINGLE, eptp); 2226 2227 return orig_pte; 2228 } 2229 2230 static void ept_untwiddle(unsigned long gpa, int level, unsigned long orig_pte) 2231 { 2232 set_ept_pte(pml4, gpa, level, orig_pte); 2233 } 2234 2235 static void do_ept_violation(bool leaf, enum ept_access_op op, 2236 u64 expected_qual, u64 expected_paddr) 2237 { 2238 u64 qual; 2239 2240 /* Try the access and observe the violation. */ 2241 do_ept_access_op(op); 2242 2243 assert_exit_reason(VMX_EPT_VIOLATION); 2244 2245 qual = vmcs_read(EXI_QUALIFICATION); 2246 2247 diagnose_ept_violation_qual(expected_qual, qual); 2248 TEST_EXPECT_EQ(expected_qual, qual); 2249 2250 #if 0 2251 /* Disable for now otherwise every test will fail */ 2252 TEST_EXPECT_EQ(vmcs_read(GUEST_LINEAR_ADDRESS), 2253 (unsigned long) ( 2254 op == OP_EXEC ? data->gva + 1 : data->gva)); 2255 #endif 2256 /* 2257 * TODO: tests that probe expected_paddr in pages other than the one at 2258 * the beginning of the 1g region. 2259 */ 2260 TEST_EXPECT_EQ(vmcs_read(INFO_PHYS_ADDR), expected_paddr); 2261 } 2262 2263 static void 2264 ept_violation_at_level_mkhuge(bool mkhuge, int level, unsigned long clear, 2265 unsigned long set, enum ept_access_op op, 2266 u64 expected_qual) 2267 { 2268 struct ept_access_test_data *data = &ept_access_test_data; 2269 unsigned long orig_pte; 2270 2271 orig_pte = ept_twiddle(data->gpa, mkhuge, level, clear, set); 2272 2273 do_ept_violation(level == 1 || mkhuge, op, expected_qual, 2274 op == OP_EXEC ? data->gpa + sizeof(unsigned long) : 2275 data->gpa); 2276 2277 /* Fix the violation and resume the op loop. */ 2278 ept_untwiddle(data->gpa, level, orig_pte); 2279 enter_guest(); 2280 skip_exit_vmcall(); 2281 } 2282 2283 static void 2284 ept_violation_at_level(int level, unsigned long clear, unsigned long set, 2285 enum ept_access_op op, u64 expected_qual) 2286 { 2287 ept_violation_at_level_mkhuge(false, level, clear, set, op, 2288 expected_qual); 2289 if (ept_huge_pages_supported(level)) 2290 ept_violation_at_level_mkhuge(true, level, clear, set, op, 2291 expected_qual); 2292 } 2293 2294 static void ept_violation(unsigned long clear, unsigned long set, 2295 enum ept_access_op op, u64 expected_qual) 2296 { 2297 ept_violation_at_level(1, clear, set, op, expected_qual); 2298 ept_violation_at_level(2, clear, set, op, expected_qual); 2299 ept_violation_at_level(3, clear, set, op, expected_qual); 2300 ept_violation_at_level(4, clear, set, op, expected_qual); 2301 } 2302 2303 static void ept_access_violation(unsigned long access, enum ept_access_op op, 2304 u64 expected_qual) 2305 { 2306 ept_violation(EPT_PRESENT, access, op, 2307 expected_qual | EPT_VLT_LADDR_VLD | EPT_VLT_PADDR); 2308 } 2309 2310 /* 2311 * For translations that don't involve a GVA, that is physical address (paddr) 2312 * accesses, EPT violations don't set the flag EPT_VLT_PADDR. For a typical 2313 * guest memory access, the hardware does GVA -> GPA -> HPA. However, certain 2314 * translations don't involve GVAs, such as when the hardware does the guest 2315 * page table walk. For example, in translating GVA_1 -> GPA_1, the guest MMU 2316 * might try to set an A bit on a guest PTE. If the GPA_2 that the PTE resides 2317 * on isn't present in the EPT, then the EPT violation will be for GPA_2 and 2318 * the EPT_VLT_PADDR bit will be clear in the exit qualification. 2319 * 2320 * Note that paddr violations can also be triggered by loading PAE page tables 2321 * with wonky addresses. We don't test that yet. 2322 * 2323 * This function modifies the EPT entry that maps the GPA that the guest page 2324 * table entry mapping ept_access_data.gva resides on. 2325 * 2326 * @ept_access EPT permissions to set. Other permissions are cleared. 2327 * 2328 * @pte_ad Set the A/D bits on the guest PTE accordingly. 2329 * 2330 * @op Guest operation to perform with ept_access_data.gva. 2331 * 2332 * @expect_violation 2333 * Is a violation expected during the paddr access? 2334 * 2335 * @expected_qual Expected qualification for the EPT violation. 2336 * EPT_VLT_PADDR should be clear. 2337 */ 2338 static void ept_access_paddr(unsigned long ept_access, unsigned long pte_ad, 2339 enum ept_access_op op, bool expect_violation, 2340 u64 expected_qual) 2341 { 2342 struct ept_access_test_data *data = &ept_access_test_data; 2343 unsigned long *ptep; 2344 unsigned long gpa; 2345 unsigned long orig_epte; 2346 2347 /* Modify the guest PTE mapping data->gva according to @pte_ad. */ 2348 ptep = get_pte_level(current_page_table(), data->gva, /*level=*/1); 2349 TEST_ASSERT(ptep); 2350 TEST_ASSERT_EQ(*ptep & PT_ADDR_MASK, data->gpa); 2351 *ptep = (*ptep & ~PT_AD_MASK) | pte_ad; 2352 ept_access_test_guest_flush_tlb(); 2353 2354 /* 2355 * Now modify the access bits on the EPT entry for the GPA that the 2356 * guest PTE resides on. Note that by modifying a single EPT entry, 2357 * we're potentially affecting 512 guest PTEs. However, we've carefully 2358 * constructed our test such that those other 511 PTEs aren't used by 2359 * the guest: data->gva is at the beginning of a 1G huge page, thus the 2360 * PTE we're modifying is at the beginning of a 4K page and the 2361 * following 511 entires are also under our control (and not touched by 2362 * the guest). 2363 */ 2364 gpa = virt_to_phys(ptep); 2365 TEST_ASSERT_EQ(gpa & ~PAGE_MASK, 0); 2366 /* 2367 * Make sure the guest page table page is mapped with a 4K EPT entry, 2368 * otherwise our level=1 twiddling below will fail. We use the 2369 * identity map (gpa = gpa) since page tables are shared with the host. 2370 */ 2371 install_ept(pml4, gpa, gpa, EPT_PRESENT); 2372 orig_epte = ept_twiddle(gpa, /*mkhuge=*/0, /*level=*/1, 2373 /*clear=*/EPT_PRESENT, /*set=*/ept_access); 2374 2375 if (expect_violation) { 2376 do_ept_violation(/*leaf=*/true, op, 2377 expected_qual | EPT_VLT_LADDR_VLD, gpa); 2378 ept_untwiddle(gpa, /*level=*/1, orig_epte); 2379 do_ept_access_op(op); 2380 } else { 2381 do_ept_access_op(op); 2382 ept_untwiddle(gpa, /*level=*/1, orig_epte); 2383 } 2384 2385 TEST_ASSERT(*ptep & PT_ACCESSED_MASK); 2386 if ((pte_ad & PT_DIRTY_MASK) || op == OP_WRITE) 2387 TEST_ASSERT(*ptep & PT_DIRTY_MASK); 2388 2389 skip_exit_vmcall(); 2390 } 2391 2392 static void ept_access_allowed_paddr(unsigned long ept_access, 2393 unsigned long pte_ad, 2394 enum ept_access_op op) 2395 { 2396 ept_access_paddr(ept_access, pte_ad, op, /*expect_violation=*/false, 2397 /*expected_qual=*/-1); 2398 } 2399 2400 static void ept_access_violation_paddr(unsigned long ept_access, 2401 unsigned long pte_ad, 2402 enum ept_access_op op, 2403 u64 expected_qual) 2404 { 2405 ept_access_paddr(ept_access, pte_ad, op, /*expect_violation=*/true, 2406 expected_qual); 2407 } 2408 2409 2410 static void ept_allowed_at_level_mkhuge(bool mkhuge, int level, 2411 unsigned long clear, 2412 unsigned long set, 2413 enum ept_access_op op) 2414 { 2415 struct ept_access_test_data *data = &ept_access_test_data; 2416 unsigned long orig_pte; 2417 2418 orig_pte = ept_twiddle(data->gpa, mkhuge, level, clear, set); 2419 2420 /* No violation. Should proceed to vmcall. */ 2421 do_ept_access_op(op); 2422 skip_exit_vmcall(); 2423 2424 ept_untwiddle(data->gpa, level, orig_pte); 2425 } 2426 2427 static void ept_allowed_at_level(int level, unsigned long clear, 2428 unsigned long set, enum ept_access_op op) 2429 { 2430 ept_allowed_at_level_mkhuge(false, level, clear, set, op); 2431 if (ept_huge_pages_supported(level)) 2432 ept_allowed_at_level_mkhuge(true, level, clear, set, op); 2433 } 2434 2435 static void ept_allowed(unsigned long clear, unsigned long set, 2436 enum ept_access_op op) 2437 { 2438 ept_allowed_at_level(1, clear, set, op); 2439 ept_allowed_at_level(2, clear, set, op); 2440 ept_allowed_at_level(3, clear, set, op); 2441 ept_allowed_at_level(4, clear, set, op); 2442 } 2443 2444 static void ept_ignored_bit(int bit) 2445 { 2446 /* Set the bit. */ 2447 ept_allowed(0, 1ul << bit, OP_READ); 2448 ept_allowed(0, 1ul << bit, OP_WRITE); 2449 ept_allowed(0, 1ul << bit, OP_EXEC); 2450 2451 /* Clear the bit. */ 2452 ept_allowed(1ul << bit, 0, OP_READ); 2453 ept_allowed(1ul << bit, 0, OP_WRITE); 2454 ept_allowed(1ul << bit, 0, OP_EXEC); 2455 } 2456 2457 static void ept_access_allowed(unsigned long access, enum ept_access_op op) 2458 { 2459 ept_allowed(EPT_PRESENT, access, op); 2460 } 2461 2462 2463 static void ept_misconfig_at_level_mkhuge_op(bool mkhuge, int level, 2464 unsigned long clear, 2465 unsigned long set, 2466 enum ept_access_op op) 2467 { 2468 struct ept_access_test_data *data = &ept_access_test_data; 2469 unsigned long orig_pte; 2470 2471 orig_pte = ept_twiddle(data->gpa, mkhuge, level, clear, set); 2472 2473 do_ept_access_op(op); 2474 assert_exit_reason(VMX_EPT_MISCONFIG); 2475 2476 /* Intel 27.2.1, "For all other VM exits, this field is cleared." */ 2477 #if 0 2478 /* broken: */ 2479 TEST_EXPECT_EQ_MSG(vmcs_read(EXI_QUALIFICATION), 0); 2480 #endif 2481 #if 0 2482 /* 2483 * broken: 2484 * According to description of exit qual for EPT violation, 2485 * EPT_VLT_LADDR_VLD indicates if GUEST_LINEAR_ADDRESS is valid. 2486 * However, I can't find anything that says GUEST_LINEAR_ADDRESS ought 2487 * to be set for msiconfig. 2488 */ 2489 TEST_EXPECT_EQ(vmcs_read(GUEST_LINEAR_ADDRESS), 2490 (unsigned long) ( 2491 op == OP_EXEC ? data->gva + 1 : data->gva)); 2492 #endif 2493 2494 /* Fix the violation and resume the op loop. */ 2495 ept_untwiddle(data->gpa, level, orig_pte); 2496 enter_guest(); 2497 skip_exit_vmcall(); 2498 } 2499 2500 static void ept_misconfig_at_level_mkhuge(bool mkhuge, int level, 2501 unsigned long clear, 2502 unsigned long set) 2503 { 2504 /* The op shouldn't matter (read, write, exec), so try them all! */ 2505 ept_misconfig_at_level_mkhuge_op(mkhuge, level, clear, set, OP_READ); 2506 ept_misconfig_at_level_mkhuge_op(mkhuge, level, clear, set, OP_WRITE); 2507 ept_misconfig_at_level_mkhuge_op(mkhuge, level, clear, set, OP_EXEC); 2508 } 2509 2510 static void ept_misconfig_at_level(int level, unsigned long clear, 2511 unsigned long set) 2512 { 2513 ept_misconfig_at_level_mkhuge(false, level, clear, set); 2514 if (ept_huge_pages_supported(level)) 2515 ept_misconfig_at_level_mkhuge(true, level, clear, set); 2516 } 2517 2518 static void ept_misconfig(unsigned long clear, unsigned long set) 2519 { 2520 ept_misconfig_at_level(1, clear, set); 2521 ept_misconfig_at_level(2, clear, set); 2522 ept_misconfig_at_level(3, clear, set); 2523 ept_misconfig_at_level(4, clear, set); 2524 } 2525 2526 static void ept_access_misconfig(unsigned long access) 2527 { 2528 ept_misconfig(EPT_PRESENT, access); 2529 } 2530 2531 static void ept_reserved_bit_at_level_nohuge(int level, int bit) 2532 { 2533 /* Setting the bit causes a misconfig. */ 2534 ept_misconfig_at_level_mkhuge(false, level, 0, 1ul << bit); 2535 2536 /* Making the entry non-present turns reserved bits into ignored. */ 2537 ept_violation_at_level(level, EPT_PRESENT, 1ul << bit, OP_READ, 2538 EPT_VLT_RD | EPT_VLT_LADDR_VLD | EPT_VLT_PADDR); 2539 } 2540 2541 static void ept_reserved_bit_at_level_huge(int level, int bit) 2542 { 2543 /* Setting the bit causes a misconfig. */ 2544 ept_misconfig_at_level_mkhuge(true, level, 0, 1ul << bit); 2545 2546 /* Making the entry non-present turns reserved bits into ignored. */ 2547 ept_violation_at_level(level, EPT_PRESENT, 1ul << bit, OP_READ, 2548 EPT_VLT_RD | EPT_VLT_LADDR_VLD | EPT_VLT_PADDR); 2549 } 2550 2551 static void ept_reserved_bit_at_level(int level, int bit) 2552 { 2553 /* Setting the bit causes a misconfig. */ 2554 ept_misconfig_at_level(level, 0, 1ul << bit); 2555 2556 /* Making the entry non-present turns reserved bits into ignored. */ 2557 ept_violation_at_level(level, EPT_PRESENT, 1ul << bit, OP_READ, 2558 EPT_VLT_RD | EPT_VLT_LADDR_VLD | EPT_VLT_PADDR); 2559 } 2560 2561 static void ept_reserved_bit(int bit) 2562 { 2563 ept_reserved_bit_at_level(1, bit); 2564 ept_reserved_bit_at_level(2, bit); 2565 ept_reserved_bit_at_level(3, bit); 2566 ept_reserved_bit_at_level(4, bit); 2567 } 2568 2569 #define PAGE_2M_ORDER 9 2570 #define PAGE_1G_ORDER 18 2571 2572 static void *get_1g_page(void) 2573 { 2574 static void *alloc; 2575 2576 if (!alloc) 2577 alloc = alloc_pages(PAGE_1G_ORDER); 2578 return alloc; 2579 } 2580 2581 static void ept_access_test_teardown(void *unused) 2582 { 2583 /* Exit the guest cleanly. */ 2584 do_ept_access_op(OP_EXIT); 2585 } 2586 2587 static void ept_access_test_guest(void) 2588 { 2589 struct ept_access_test_data *data = &ept_access_test_data; 2590 int (*code)(void) = (int (*)(void)) &data->gva[1]; 2591 2592 while (true) { 2593 switch (data->op) { 2594 case OP_READ: 2595 TEST_ASSERT_EQ(*data->gva, MAGIC_VAL_1); 2596 break; 2597 case OP_WRITE: 2598 *data->gva = MAGIC_VAL_2; 2599 TEST_ASSERT_EQ(*data->gva, MAGIC_VAL_2); 2600 *data->gva = MAGIC_VAL_1; 2601 break; 2602 case OP_EXEC: 2603 TEST_ASSERT_EQ(42, code()); 2604 break; 2605 case OP_FLUSH_TLB: 2606 write_cr3(read_cr3()); 2607 break; 2608 case OP_EXIT: 2609 return; 2610 default: 2611 TEST_ASSERT_MSG(false, "Unknown op %d", data->op); 2612 } 2613 vmcall(); 2614 } 2615 } 2616 2617 static void ept_access_test_setup(void) 2618 { 2619 struct ept_access_test_data *data = &ept_access_test_data; 2620 unsigned long npages = 1ul << PAGE_1G_ORDER; 2621 unsigned long size = npages * PAGE_SIZE; 2622 unsigned long *page_table = current_page_table(); 2623 unsigned long pte; 2624 2625 if (setup_ept(false)) 2626 test_skip("EPT not supported"); 2627 2628 /* We use data->gpa = 1 << 39 so that test data has a separate pml4 entry */ 2629 if (cpuid_maxphyaddr() < 40) 2630 test_skip("Test needs MAXPHYADDR >= 40"); 2631 2632 test_set_guest(ept_access_test_guest); 2633 test_add_teardown(ept_access_test_teardown, NULL); 2634 2635 data->hva = get_1g_page(); 2636 TEST_ASSERT(data->hva); 2637 data->hpa = virt_to_phys(data->hva); 2638 2639 data->gpa = 1ul << 39; 2640 data->gva = (void *) ALIGN((unsigned long) alloc_vpages(npages * 2), 2641 size); 2642 TEST_ASSERT(!any_present_pages(page_table, data->gva, size)); 2643 install_pages(page_table, data->gpa, size, data->gva); 2644 2645 /* 2646 * Make sure nothing's mapped here so the tests that screw with the 2647 * pml4 entry don't inadvertently break something. 2648 */ 2649 TEST_ASSERT(get_ept_pte(pml4, data->gpa, 4, &pte) && pte == 0); 2650 TEST_ASSERT(get_ept_pte(pml4, data->gpa + size - 1, 4, &pte) && pte == 0); 2651 install_ept(pml4, data->hpa, data->gpa, EPT_PRESENT); 2652 2653 data->hva[0] = MAGIC_VAL_1; 2654 memcpy(&data->hva[1], &ret42_start, &ret42_end - &ret42_start); 2655 } 2656 2657 static void ept_access_test_not_present(void) 2658 { 2659 ept_access_test_setup(); 2660 /* --- */ 2661 ept_access_violation(0, OP_READ, EPT_VLT_RD); 2662 ept_access_violation(0, OP_WRITE, EPT_VLT_WR); 2663 ept_access_violation(0, OP_EXEC, EPT_VLT_FETCH); 2664 } 2665 2666 static void ept_access_test_read_only(void) 2667 { 2668 ept_access_test_setup(); 2669 2670 /* r-- */ 2671 ept_access_allowed(EPT_RA, OP_READ); 2672 ept_access_violation(EPT_RA, OP_WRITE, EPT_VLT_WR | EPT_VLT_PERM_RD); 2673 ept_access_violation(EPT_RA, OP_EXEC, EPT_VLT_FETCH | EPT_VLT_PERM_RD); 2674 } 2675 2676 static void ept_access_test_write_only(void) 2677 { 2678 ept_access_test_setup(); 2679 /* -w- */ 2680 ept_access_misconfig(EPT_WA); 2681 } 2682 2683 static void ept_access_test_read_write(void) 2684 { 2685 ept_access_test_setup(); 2686 /* rw- */ 2687 ept_access_allowed(EPT_RA | EPT_WA, OP_READ); 2688 ept_access_allowed(EPT_RA | EPT_WA, OP_WRITE); 2689 ept_access_violation(EPT_RA | EPT_WA, OP_EXEC, 2690 EPT_VLT_FETCH | EPT_VLT_PERM_RD | EPT_VLT_PERM_WR); 2691 } 2692 2693 2694 static void ept_access_test_execute_only(void) 2695 { 2696 ept_access_test_setup(); 2697 /* --x */ 2698 if (ept_execute_only_supported()) { 2699 ept_access_violation(EPT_EA, OP_READ, 2700 EPT_VLT_RD | EPT_VLT_PERM_EX); 2701 ept_access_violation(EPT_EA, OP_WRITE, 2702 EPT_VLT_WR | EPT_VLT_PERM_EX); 2703 ept_access_allowed(EPT_EA, OP_EXEC); 2704 } else { 2705 ept_access_misconfig(EPT_EA); 2706 } 2707 } 2708 2709 static void ept_access_test_read_execute(void) 2710 { 2711 ept_access_test_setup(); 2712 /* r-x */ 2713 ept_access_allowed(EPT_RA | EPT_EA, OP_READ); 2714 ept_access_violation(EPT_RA | EPT_EA, OP_WRITE, 2715 EPT_VLT_WR | EPT_VLT_PERM_RD | EPT_VLT_PERM_EX); 2716 ept_access_allowed(EPT_RA | EPT_EA, OP_EXEC); 2717 } 2718 2719 static void ept_access_test_write_execute(void) 2720 { 2721 ept_access_test_setup(); 2722 /* -wx */ 2723 ept_access_misconfig(EPT_WA | EPT_EA); 2724 } 2725 2726 static void ept_access_test_read_write_execute(void) 2727 { 2728 ept_access_test_setup(); 2729 /* rwx */ 2730 ept_access_allowed(EPT_RA | EPT_WA | EPT_EA, OP_READ); 2731 ept_access_allowed(EPT_RA | EPT_WA | EPT_EA, OP_WRITE); 2732 ept_access_allowed(EPT_RA | EPT_WA | EPT_EA, OP_EXEC); 2733 } 2734 2735 static void ept_access_test_reserved_bits(void) 2736 { 2737 int i; 2738 int maxphyaddr; 2739 2740 ept_access_test_setup(); 2741 2742 /* Reserved bits above maxphyaddr. */ 2743 maxphyaddr = cpuid_maxphyaddr(); 2744 for (i = maxphyaddr; i <= 51; i++) { 2745 report_prefix_pushf("reserved_bit=%d", i); 2746 ept_reserved_bit(i); 2747 report_prefix_pop(); 2748 } 2749 2750 /* Level-specific reserved bits. */ 2751 ept_reserved_bit_at_level_nohuge(2, 3); 2752 ept_reserved_bit_at_level_nohuge(2, 4); 2753 ept_reserved_bit_at_level_nohuge(2, 5); 2754 ept_reserved_bit_at_level_nohuge(2, 6); 2755 /* 2M alignment. */ 2756 for (i = 12; i < 20; i++) { 2757 report_prefix_pushf("reserved_bit=%d", i); 2758 ept_reserved_bit_at_level_huge(2, i); 2759 report_prefix_pop(); 2760 } 2761 ept_reserved_bit_at_level_nohuge(3, 3); 2762 ept_reserved_bit_at_level_nohuge(3, 4); 2763 ept_reserved_bit_at_level_nohuge(3, 5); 2764 ept_reserved_bit_at_level_nohuge(3, 6); 2765 /* 1G alignment. */ 2766 for (i = 12; i < 29; i++) { 2767 report_prefix_pushf("reserved_bit=%d", i); 2768 ept_reserved_bit_at_level_huge(3, i); 2769 report_prefix_pop(); 2770 } 2771 ept_reserved_bit_at_level(4, 3); 2772 ept_reserved_bit_at_level(4, 4); 2773 ept_reserved_bit_at_level(4, 5); 2774 ept_reserved_bit_at_level(4, 6); 2775 ept_reserved_bit_at_level(4, 7); 2776 } 2777 2778 static void ept_access_test_ignored_bits(void) 2779 { 2780 ept_access_test_setup(); 2781 /* 2782 * Bits ignored at every level. Bits 8 and 9 (A and D) are ignored as 2783 * far as translation is concerned even if AD bits are enabled in the 2784 * EPTP. Bit 63 is ignored because "EPT-violation #VE" VM-execution 2785 * control is 0. 2786 */ 2787 ept_ignored_bit(8); 2788 ept_ignored_bit(9); 2789 ept_ignored_bit(10); 2790 ept_ignored_bit(11); 2791 ept_ignored_bit(52); 2792 ept_ignored_bit(53); 2793 ept_ignored_bit(54); 2794 ept_ignored_bit(55); 2795 ept_ignored_bit(56); 2796 ept_ignored_bit(57); 2797 ept_ignored_bit(58); 2798 ept_ignored_bit(59); 2799 ept_ignored_bit(60); 2800 ept_ignored_bit(61); 2801 ept_ignored_bit(62); 2802 ept_ignored_bit(63); 2803 } 2804 2805 static void ept_access_test_paddr_not_present_ad_disabled(void) 2806 { 2807 ept_access_test_setup(); 2808 ept_disable_ad_bits(); 2809 2810 ept_access_violation_paddr(0, PT_AD_MASK, OP_READ, EPT_VLT_RD); 2811 ept_access_violation_paddr(0, PT_AD_MASK, OP_WRITE, EPT_VLT_RD); 2812 ept_access_violation_paddr(0, PT_AD_MASK, OP_EXEC, EPT_VLT_RD); 2813 } 2814 2815 static void ept_access_test_paddr_not_present_ad_enabled(void) 2816 { 2817 u64 qual = EPT_VLT_RD | EPT_VLT_WR; 2818 2819 ept_access_test_setup(); 2820 ept_enable_ad_bits_or_skip_test(); 2821 2822 ept_access_violation_paddr(0, PT_AD_MASK, OP_READ, qual); 2823 ept_access_violation_paddr(0, PT_AD_MASK, OP_WRITE, qual); 2824 ept_access_violation_paddr(0, PT_AD_MASK, OP_EXEC, qual); 2825 } 2826 2827 static void ept_access_test_paddr_read_only_ad_disabled(void) 2828 { 2829 /* 2830 * When EPT AD bits are disabled, all accesses to guest paging 2831 * structures are reported separately as a read and (after 2832 * translation of the GPA to host physical address) a read+write 2833 * if the A/D bits have to be set. 2834 */ 2835 u64 qual = EPT_VLT_WR | EPT_VLT_RD | EPT_VLT_PERM_RD; 2836 2837 ept_access_test_setup(); 2838 ept_disable_ad_bits(); 2839 2840 /* Can't update A bit, so all accesses fail. */ 2841 ept_access_violation_paddr(EPT_RA, 0, OP_READ, qual); 2842 ept_access_violation_paddr(EPT_RA, 0, OP_WRITE, qual); 2843 ept_access_violation_paddr(EPT_RA, 0, OP_EXEC, qual); 2844 /* AD bits disabled, so only writes try to update the D bit. */ 2845 ept_access_allowed_paddr(EPT_RA, PT_ACCESSED_MASK, OP_READ); 2846 ept_access_violation_paddr(EPT_RA, PT_ACCESSED_MASK, OP_WRITE, qual); 2847 ept_access_allowed_paddr(EPT_RA, PT_ACCESSED_MASK, OP_EXEC); 2848 /* Both A and D already set, so read-only is OK. */ 2849 ept_access_allowed_paddr(EPT_RA, PT_AD_MASK, OP_READ); 2850 ept_access_allowed_paddr(EPT_RA, PT_AD_MASK, OP_WRITE); 2851 ept_access_allowed_paddr(EPT_RA, PT_AD_MASK, OP_EXEC); 2852 } 2853 2854 static void ept_access_test_paddr_read_only_ad_enabled(void) 2855 { 2856 /* 2857 * When EPT AD bits are enabled, all accesses to guest paging 2858 * structures are considered writes as far as EPT translation 2859 * is concerned. 2860 */ 2861 u64 qual = EPT_VLT_WR | EPT_VLT_RD | EPT_VLT_PERM_RD; 2862 2863 ept_access_test_setup(); 2864 ept_enable_ad_bits_or_skip_test(); 2865 2866 ept_access_violation_paddr(EPT_RA, 0, OP_READ, qual); 2867 ept_access_violation_paddr(EPT_RA, 0, OP_WRITE, qual); 2868 ept_access_violation_paddr(EPT_RA, 0, OP_EXEC, qual); 2869 ept_access_violation_paddr(EPT_RA, PT_ACCESSED_MASK, OP_READ, qual); 2870 ept_access_violation_paddr(EPT_RA, PT_ACCESSED_MASK, OP_WRITE, qual); 2871 ept_access_violation_paddr(EPT_RA, PT_ACCESSED_MASK, OP_EXEC, qual); 2872 ept_access_violation_paddr(EPT_RA, PT_AD_MASK, OP_READ, qual); 2873 ept_access_violation_paddr(EPT_RA, PT_AD_MASK, OP_WRITE, qual); 2874 ept_access_violation_paddr(EPT_RA, PT_AD_MASK, OP_EXEC, qual); 2875 } 2876 2877 static void ept_access_test_paddr_read_write(void) 2878 { 2879 ept_access_test_setup(); 2880 /* Read-write access to paging structure. */ 2881 ept_access_allowed_paddr(EPT_RA | EPT_WA, 0, OP_READ); 2882 ept_access_allowed_paddr(EPT_RA | EPT_WA, 0, OP_WRITE); 2883 ept_access_allowed_paddr(EPT_RA | EPT_WA, 0, OP_EXEC); 2884 } 2885 2886 static void ept_access_test_paddr_read_write_execute(void) 2887 { 2888 ept_access_test_setup(); 2889 /* RWX access to paging structure. */ 2890 ept_access_allowed_paddr(EPT_PRESENT, 0, OP_READ); 2891 ept_access_allowed_paddr(EPT_PRESENT, 0, OP_WRITE); 2892 ept_access_allowed_paddr(EPT_PRESENT, 0, OP_EXEC); 2893 } 2894 2895 static void ept_access_test_paddr_read_execute_ad_disabled(void) 2896 { 2897 /* 2898 * When EPT AD bits are disabled, all accesses to guest paging 2899 * structures are reported separately as a read and (after 2900 * translation of the GPA to host physical address) a read+write 2901 * if the A/D bits have to be set. 2902 */ 2903 u64 qual = EPT_VLT_WR | EPT_VLT_RD | EPT_VLT_PERM_RD | EPT_VLT_PERM_EX; 2904 2905 ept_access_test_setup(); 2906 ept_disable_ad_bits(); 2907 2908 /* Can't update A bit, so all accesses fail. */ 2909 ept_access_violation_paddr(EPT_RA | EPT_EA, 0, OP_READ, qual); 2910 ept_access_violation_paddr(EPT_RA | EPT_EA, 0, OP_WRITE, qual); 2911 ept_access_violation_paddr(EPT_RA | EPT_EA, 0, OP_EXEC, qual); 2912 /* AD bits disabled, so only writes try to update the D bit. */ 2913 ept_access_allowed_paddr(EPT_RA | EPT_EA, PT_ACCESSED_MASK, OP_READ); 2914 ept_access_violation_paddr(EPT_RA | EPT_EA, PT_ACCESSED_MASK, OP_WRITE, qual); 2915 ept_access_allowed_paddr(EPT_RA | EPT_EA, PT_ACCESSED_MASK, OP_EXEC); 2916 /* Both A and D already set, so read-only is OK. */ 2917 ept_access_allowed_paddr(EPT_RA | EPT_EA, PT_AD_MASK, OP_READ); 2918 ept_access_allowed_paddr(EPT_RA | EPT_EA, PT_AD_MASK, OP_WRITE); 2919 ept_access_allowed_paddr(EPT_RA | EPT_EA, PT_AD_MASK, OP_EXEC); 2920 } 2921 2922 static void ept_access_test_paddr_read_execute_ad_enabled(void) 2923 { 2924 /* 2925 * When EPT AD bits are enabled, all accesses to guest paging 2926 * structures are considered writes as far as EPT translation 2927 * is concerned. 2928 */ 2929 u64 qual = EPT_VLT_WR | EPT_VLT_RD | EPT_VLT_PERM_RD | EPT_VLT_PERM_EX; 2930 2931 ept_access_test_setup(); 2932 ept_enable_ad_bits_or_skip_test(); 2933 2934 ept_access_violation_paddr(EPT_RA | EPT_EA, 0, OP_READ, qual); 2935 ept_access_violation_paddr(EPT_RA | EPT_EA, 0, OP_WRITE, qual); 2936 ept_access_violation_paddr(EPT_RA | EPT_EA, 0, OP_EXEC, qual); 2937 ept_access_violation_paddr(EPT_RA | EPT_EA, PT_ACCESSED_MASK, OP_READ, qual); 2938 ept_access_violation_paddr(EPT_RA | EPT_EA, PT_ACCESSED_MASK, OP_WRITE, qual); 2939 ept_access_violation_paddr(EPT_RA | EPT_EA, PT_ACCESSED_MASK, OP_EXEC, qual); 2940 ept_access_violation_paddr(EPT_RA | EPT_EA, PT_AD_MASK, OP_READ, qual); 2941 ept_access_violation_paddr(EPT_RA | EPT_EA, PT_AD_MASK, OP_WRITE, qual); 2942 ept_access_violation_paddr(EPT_RA | EPT_EA, PT_AD_MASK, OP_EXEC, qual); 2943 } 2944 2945 static void ept_access_test_paddr_not_present_page_fault(void) 2946 { 2947 ept_access_test_setup(); 2948 /* 2949 * TODO: test no EPT violation as long as guest PF occurs. e.g., GPA is 2950 * page is read-only in EPT but GVA is also mapped read only in PT. 2951 * Thus guest page fault before host takes EPT violation for trying to 2952 * update A bit. 2953 */ 2954 } 2955 2956 static void ept_access_test_force_2m_page(void) 2957 { 2958 ept_access_test_setup(); 2959 2960 TEST_ASSERT_EQ(ept_2m_supported(), true); 2961 ept_allowed_at_level_mkhuge(true, 2, 0, 0, OP_READ); 2962 ept_violation_at_level_mkhuge(true, 2, EPT_PRESENT, EPT_RA, OP_WRITE, 2963 EPT_VLT_WR | EPT_VLT_PERM_RD | 2964 EPT_VLT_LADDR_VLD | EPT_VLT_PADDR); 2965 ept_misconfig_at_level_mkhuge(true, 2, EPT_PRESENT, EPT_WA); 2966 } 2967 2968 static bool invvpid_valid(u64 type, u64 vpid, u64 gla) 2969 { 2970 u64 msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP); 2971 2972 TEST_ASSERT(msr & VPID_CAP_INVVPID); 2973 2974 if (type < INVVPID_ADDR || type > INVVPID_CONTEXT_LOCAL) 2975 return false; 2976 2977 if (!(msr & (1ull << (type + VPID_CAP_INVVPID_TYPES_SHIFT)))) 2978 return false; 2979 2980 if (vpid >> 16) 2981 return false; 2982 2983 if (type != INVVPID_ALL && !vpid) 2984 return false; 2985 2986 if (type == INVVPID_ADDR && !is_canonical(gla)) 2987 return false; 2988 2989 return true; 2990 } 2991 2992 static void try_invvpid(u64 type, u64 vpid, u64 gla) 2993 { 2994 int rc; 2995 bool valid = invvpid_valid(type, vpid, gla); 2996 u64 expected = valid ? VMXERR_UNSUPPORTED_VMCS_COMPONENT 2997 : VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID; 2998 /* 2999 * Set VMX_INST_ERROR to VMXERR_UNVALID_VMCS_COMPONENT, so 3000 * that we can tell if it is updated by INVVPID. 3001 */ 3002 vmcs_read(~0); 3003 rc = invvpid(type, vpid, gla); 3004 report("INVVPID type %ld VPID %lx GLA %lx %s", 3005 !rc == valid, type, vpid, gla, 3006 valid ? "passes" : "fails"); 3007 report("After %s INVVPID, VMX_INST_ERR is %ld (actual %ld)", 3008 vmcs_read(VMX_INST_ERROR) == expected, 3009 rc ? "failed" : "successful", 3010 expected, vmcs_read(VMX_INST_ERROR)); 3011 } 3012 3013 static void ds_invvpid(void *data) 3014 { 3015 u64 msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP); 3016 u64 type = ffs(msr >> VPID_CAP_INVVPID_TYPES_SHIFT) - 1; 3017 3018 TEST_ASSERT(type >= INVVPID_ADDR && type <= INVVPID_CONTEXT_LOCAL); 3019 asm volatile("invvpid %0, %1" 3020 : 3021 : "m"(*(struct invvpid_operand *)data), 3022 "r"(type)); 3023 } 3024 3025 /* 3026 * The SS override is ignored in 64-bit mode, so we use an addressing 3027 * mode with %rsp as the base register to generate an implicit SS 3028 * reference. 3029 */ 3030 static void ss_invvpid(void *data) 3031 { 3032 u64 msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP); 3033 u64 type = ffs(msr >> VPID_CAP_INVVPID_TYPES_SHIFT) - 1; 3034 3035 TEST_ASSERT(type >= INVVPID_ADDR && type <= INVVPID_CONTEXT_LOCAL); 3036 asm volatile("sub %%rsp,%0; invvpid (%%rsp,%0,1), %1" 3037 : "+r"(data) 3038 : "r"(type)); 3039 } 3040 3041 static void invvpid_test_gp(void) 3042 { 3043 bool fault; 3044 3045 fault = test_for_exception(GP_VECTOR, &ds_invvpid, 3046 (void *)NONCANONICAL); 3047 report("INVVPID with non-canonical DS operand raises #GP", fault); 3048 } 3049 3050 static void invvpid_test_ss(void) 3051 { 3052 bool fault; 3053 3054 fault = test_for_exception(SS_VECTOR, &ss_invvpid, 3055 (void *)NONCANONICAL); 3056 report("INVVPID with non-canonical SS operand raises #SS", fault); 3057 } 3058 3059 static void invvpid_test_pf(void) 3060 { 3061 void *vpage = alloc_vpage(); 3062 bool fault; 3063 3064 fault = test_for_exception(PF_VECTOR, &ds_invvpid, vpage); 3065 report("INVVPID with unmapped operand raises #PF", fault); 3066 } 3067 3068 static void try_compat_invvpid(void *unused) 3069 { 3070 struct far_pointer32 fp = { 3071 .offset = (uintptr_t)&&invvpid, 3072 .selector = KERNEL_CS32, 3073 }; 3074 register uintptr_t rsp asm("rsp"); 3075 3076 TEST_ASSERT_MSG(fp.offset == (uintptr_t)&&invvpid, 3077 "Code address too high."); 3078 TEST_ASSERT_MSG(rsp == (u32)rsp, "Stack address too high."); 3079 3080 asm goto ("lcall *%0" : : "m" (fp) : "rax" : invvpid); 3081 return; 3082 invvpid: 3083 asm volatile (".code32;" 3084 "invvpid (%eax), %eax;" 3085 "lret;" 3086 ".code64"); 3087 __builtin_unreachable(); 3088 } 3089 3090 static void invvpid_test_compatibility_mode(void) 3091 { 3092 bool fault; 3093 3094 fault = test_for_exception(UD_VECTOR, &try_compat_invvpid, NULL); 3095 report("Compatibility mode INVVPID raises #UD", fault); 3096 } 3097 3098 static void invvpid_test_not_in_vmx_operation(void) 3099 { 3100 bool fault; 3101 3102 TEST_ASSERT(!vmx_off()); 3103 fault = test_for_exception(UD_VECTOR, &ds_invvpid, NULL); 3104 report("INVVPID outside of VMX operation raises #UD", fault); 3105 TEST_ASSERT(!vmx_on()); 3106 } 3107 3108 /* 3109 * This does not test real-address mode, virtual-8086 mode, protected mode, 3110 * or CPL > 0. 3111 */ 3112 static void invvpid_test_v2(void) 3113 { 3114 u64 msr; 3115 int i; 3116 unsigned types = 0; 3117 unsigned type; 3118 3119 if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY) || 3120 !(ctrl_cpu_rev[1].clr & CPU_VPID)) 3121 test_skip("VPID not supported"); 3122 3123 msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP); 3124 3125 if (!(msr & VPID_CAP_INVVPID)) 3126 test_skip("INVVPID not supported.\n"); 3127 3128 if (msr & VPID_CAP_INVVPID_ADDR) 3129 types |= 1u << INVVPID_ADDR; 3130 if (msr & VPID_CAP_INVVPID_CXTGLB) 3131 types |= 1u << INVVPID_CONTEXT_GLOBAL; 3132 if (msr & VPID_CAP_INVVPID_ALL) 3133 types |= 1u << INVVPID_ALL; 3134 if (msr & VPID_CAP_INVVPID_CXTLOC) 3135 types |= 1u << INVVPID_CONTEXT_LOCAL; 3136 3137 if (!types) 3138 test_skip("No INVVPID types supported.\n"); 3139 3140 for (i = -127; i < 128; i++) 3141 try_invvpid(i, 0xffff, 0); 3142 3143 /* 3144 * VPID must not be more than 16 bits. 3145 */ 3146 for (i = 0; i < 64; i++) 3147 for (type = 0; type < 4; type++) 3148 if (types & (1u << type)) 3149 try_invvpid(type, 1ul << i, 0); 3150 3151 /* 3152 * VPID must not be zero, except for "all contexts." 3153 */ 3154 for (type = 0; type < 4; type++) 3155 if (types & (1u << type)) 3156 try_invvpid(type, 0, 0); 3157 3158 /* 3159 * The gla operand is only validated for single-address INVVPID. 3160 */ 3161 if (types & (1u << INVVPID_ADDR)) 3162 try_invvpid(INVVPID_ADDR, 0xffff, NONCANONICAL); 3163 3164 invvpid_test_gp(); 3165 invvpid_test_ss(); 3166 invvpid_test_pf(); 3167 invvpid_test_compatibility_mode(); 3168 invvpid_test_not_in_vmx_operation(); 3169 } 3170 3171 /* 3172 * Test for early VMLAUNCH failure. Returns true if VMLAUNCH makes it 3173 * at least as far as the guest-state checks. Returns false if the 3174 * VMLAUNCH fails early and execution falls through to the next 3175 * instruction. 3176 */ 3177 static bool vmlaunch_succeeds(void) 3178 { 3179 /* 3180 * Indirectly set VMX_INST_ERR to 12 ("VMREAD/VMWRITE from/to 3181 * unsupported VMCS component"). The caller can then check 3182 * to see if a failed VM-entry sets VMX_INST_ERR as expected. 3183 */ 3184 vmcs_write(~0u, 0); 3185 3186 vmcs_write(HOST_RIP, (uintptr_t)&&success); 3187 __asm__ __volatile__ goto ("vmwrite %%rsp, %0; vmlaunch" 3188 : 3189 : "r" ((u64)HOST_RSP) 3190 : "cc", "memory" 3191 : success); 3192 return false; 3193 success: 3194 TEST_ASSERT(vmcs_read(EXI_REASON) == 3195 (VMX_FAIL_STATE | VMX_ENTRY_FAILURE)); 3196 return true; 3197 } 3198 3199 /* 3200 * Try to launch the current VMCS. 3201 */ 3202 static void test_vmx_controls(bool controls_valid, bool xfail) 3203 { 3204 bool success = vmlaunch_succeeds(); 3205 u32 vmx_inst_err; 3206 3207 report_xfail("vmlaunch %s", xfail, success == controls_valid, 3208 controls_valid ? "succeeds" : "fails"); 3209 if (!success) { 3210 vmx_inst_err = vmcs_read(VMX_INST_ERROR); 3211 report("VMX inst error is %d (actual %d)", 3212 vmx_inst_err == VMXERR_ENTRY_INVALID_CONTROL_FIELD, 3213 VMXERR_ENTRY_INVALID_CONTROL_FIELD, vmx_inst_err); 3214 } 3215 } 3216 3217 /* 3218 * Test a particular value of a VM-execution control bit, if the value 3219 * is required or if the value is zero. 3220 */ 3221 static void test_rsvd_ctl_bit_value(const char *name, union vmx_ctrl_msr msr, 3222 enum Encoding encoding, unsigned bit, 3223 unsigned val) 3224 { 3225 u32 mask = 1u << bit; 3226 bool expected; 3227 u32 controls; 3228 3229 if (msr.set & mask) 3230 TEST_ASSERT(msr.clr & mask); 3231 3232 /* 3233 * We can't arbitrarily turn on a control bit, because it may 3234 * introduce dependencies on other VMCS fields. So, we only 3235 * test turning on bits that have a required setting. 3236 */ 3237 if (val && (msr.clr & mask) && !(msr.set & mask)) 3238 return; 3239 3240 report_prefix_pushf("%s %s bit %d", 3241 val ? "Set" : "Clear", name, bit); 3242 3243 controls = vmcs_read(encoding); 3244 if (val) { 3245 vmcs_write(encoding, msr.set | mask); 3246 expected = (msr.clr & mask); 3247 } else { 3248 vmcs_write(encoding, msr.set & ~mask); 3249 expected = !(msr.set & mask); 3250 } 3251 test_vmx_controls(expected, false); 3252 vmcs_write(encoding, controls); 3253 report_prefix_pop(); 3254 } 3255 3256 /* 3257 * Test reserved values of a VM-execution control bit, based on the 3258 * allowed bit settings from the corresponding VMX capability MSR. 3259 */ 3260 static void test_rsvd_ctl_bit(const char *name, union vmx_ctrl_msr msr, 3261 enum Encoding encoding, unsigned bit) 3262 { 3263 test_rsvd_ctl_bit_value(name, msr, encoding, bit, 0); 3264 test_rsvd_ctl_bit_value(name, msr, encoding, bit, 1); 3265 } 3266 3267 /* 3268 * Reserved bits in the pin-based VM-execution controls must be set 3269 * properly. Software may consult the VMX capability MSRs to determine 3270 * the proper settings. 3271 * [Intel SDM] 3272 */ 3273 static void test_pin_based_ctls(void) 3274 { 3275 unsigned bit; 3276 3277 printf("%s: %lx\n", basic.ctrl ? "MSR_IA32_VMX_TRUE_PIN" : 3278 "MSR_IA32_VMX_PINBASED_CTLS", ctrl_pin_rev.val); 3279 for (bit = 0; bit < 32; bit++) 3280 test_rsvd_ctl_bit("pin-based controls", 3281 ctrl_pin_rev, PIN_CONTROLS, bit); 3282 } 3283 3284 /* 3285 * Reserved bits in the primary processor-based VM-execution controls 3286 * must be set properly. Software may consult the VMX capability MSRs 3287 * to determine the proper settings. 3288 * [Intel SDM] 3289 */ 3290 static void test_primary_processor_based_ctls(void) 3291 { 3292 unsigned bit; 3293 3294 printf("\n%s: %lx\n", basic.ctrl ? "MSR_IA32_VMX_TRUE_PROC" : 3295 "MSR_IA32_VMX_PROCBASED_CTLS", ctrl_cpu_rev[0].val); 3296 for (bit = 0; bit < 32; bit++) 3297 test_rsvd_ctl_bit("primary processor-based controls", 3298 ctrl_cpu_rev[0], CPU_EXEC_CTRL0, bit); 3299 } 3300 3301 /* 3302 * If the "activate secondary controls" primary processor-based 3303 * VM-execution control is 1, reserved bits in the secondary 3304 * processor-based VM-execution controls must be cleared. Software may 3305 * consult the VMX capability MSRs to determine which bits are 3306 * reserved. 3307 * If the "activate secondary controls" primary processor-based 3308 * VM-execution control is 0 (or if the processor does not support the 3309 * 1-setting of that control), no checks are performed on the 3310 * secondary processor-based VM-execution controls. 3311 * [Intel SDM] 3312 */ 3313 static void test_secondary_processor_based_ctls(void) 3314 { 3315 u32 primary; 3316 u32 secondary; 3317 unsigned bit; 3318 3319 if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY)) 3320 return; 3321 3322 primary = vmcs_read(CPU_EXEC_CTRL0); 3323 secondary = vmcs_read(CPU_EXEC_CTRL1); 3324 3325 vmcs_write(CPU_EXEC_CTRL0, primary | CPU_SECONDARY); 3326 printf("\nMSR_IA32_VMX_PROCBASED_CTLS2: %lx\n", ctrl_cpu_rev[1].val); 3327 for (bit = 0; bit < 32; bit++) 3328 test_rsvd_ctl_bit("secondary processor-based controls", 3329 ctrl_cpu_rev[1], CPU_EXEC_CTRL1, bit); 3330 3331 /* 3332 * When the "activate secondary controls" VM-execution control 3333 * is clear, there are no checks on the secondary controls. 3334 */ 3335 vmcs_write(CPU_EXEC_CTRL0, primary & ~CPU_SECONDARY); 3336 vmcs_write(CPU_EXEC_CTRL1, ~0); 3337 report("Secondary processor-based controls ignored", 3338 vmlaunch_succeeds()); 3339 vmcs_write(CPU_EXEC_CTRL1, secondary); 3340 vmcs_write(CPU_EXEC_CTRL0, primary); 3341 } 3342 3343 static void try_cr3_target_count(unsigned i, unsigned max) 3344 { 3345 report_prefix_pushf("CR3 target count 0x%x", i); 3346 vmcs_write(CR3_TARGET_COUNT, i); 3347 test_vmx_controls(i <= max, false); 3348 report_prefix_pop(); 3349 } 3350 3351 /* 3352 * The CR3-target count must not be greater than 4. Future processors 3353 * may support a different number of CR3-target values. Software 3354 * should read the VMX capability MSR IA32_VMX_MISC to determine the 3355 * number of values supported. 3356 * [Intel SDM] 3357 */ 3358 static void test_cr3_targets(void) 3359 { 3360 unsigned supported_targets = (rdmsr(MSR_IA32_VMX_MISC) >> 16) & 0x1ff; 3361 u32 cr3_targets = vmcs_read(CR3_TARGET_COUNT); 3362 unsigned i; 3363 3364 printf("\nSupported CR3 targets: %d\n", supported_targets); 3365 TEST_ASSERT(supported_targets <= 256); 3366 3367 try_cr3_target_count(-1u, supported_targets); 3368 try_cr3_target_count(0x80000000, supported_targets); 3369 try_cr3_target_count(0x7fffffff, supported_targets); 3370 for (i = 0; i <= supported_targets + 1; i++) 3371 try_cr3_target_count(i, supported_targets); 3372 vmcs_write(CR3_TARGET_COUNT, cr3_targets); 3373 } 3374 3375 /* 3376 * Test a particular address setting for a physical page reference in 3377 * the VMCS. 3378 */ 3379 static void test_vmcs_page_addr(const char *name, 3380 enum Encoding encoding, 3381 bool ignored, 3382 bool xfail_beyond_mapped_ram, 3383 u64 addr) 3384 { 3385 bool xfail = 3386 (xfail_beyond_mapped_ram && 3387 addr > fwcfg_get_u64(FW_CFG_RAM_SIZE) - PAGE_SIZE && 3388 addr < (1ul << cpuid_maxphyaddr())); 3389 3390 report_prefix_pushf("%s = %lx", name, addr); 3391 vmcs_write(encoding, addr); 3392 test_vmx_controls(ignored || (IS_ALIGNED(addr, PAGE_SIZE) && 3393 addr < (1ul << cpuid_maxphyaddr())), 3394 xfail); 3395 report_prefix_pop(); 3396 xfail = false; 3397 } 3398 3399 /* 3400 * Test interesting values for a physical page reference in the VMCS. 3401 */ 3402 static void test_vmcs_page_values(const char *name, 3403 enum Encoding encoding, 3404 bool ignored, 3405 bool xfail_beyond_mapped_ram) 3406 { 3407 unsigned i; 3408 u64 orig_val = vmcs_read(encoding); 3409 3410 for (i = 0; i < 64; i++) 3411 test_vmcs_page_addr(name, encoding, ignored, 3412 xfail_beyond_mapped_ram, 1ul << i); 3413 3414 test_vmcs_page_addr(name, encoding, ignored, 3415 xfail_beyond_mapped_ram, PAGE_SIZE - 1); 3416 test_vmcs_page_addr(name, encoding, ignored, 3417 xfail_beyond_mapped_ram, PAGE_SIZE); 3418 test_vmcs_page_addr(name, encoding, ignored, 3419 xfail_beyond_mapped_ram, 3420 (1ul << cpuid_maxphyaddr()) - PAGE_SIZE); 3421 test_vmcs_page_addr(name, encoding, ignored, 3422 xfail_beyond_mapped_ram, 3423 -1ul); 3424 3425 vmcs_write(encoding, orig_val); 3426 } 3427 3428 /* 3429 * Test a physical page reference in the VMCS, when the corresponding 3430 * feature is enabled and when the corresponding feature is disabled. 3431 */ 3432 static void test_vmcs_page_reference(u32 control_bit, enum Encoding field, 3433 const char *field_name, 3434 const char *control_name, 3435 bool xfail_beyond_mapped_ram) 3436 { 3437 u32 primary = vmcs_read(CPU_EXEC_CTRL0); 3438 u64 page_addr; 3439 3440 if (!(ctrl_cpu_rev[0].clr & control_bit)) 3441 return; 3442 3443 page_addr = vmcs_read(field); 3444 3445 report_prefix_pushf("%s enabled", control_name); 3446 vmcs_write(CPU_EXEC_CTRL0, primary | control_bit); 3447 test_vmcs_page_values(field_name, field, false, xfail_beyond_mapped_ram); 3448 report_prefix_pop(); 3449 3450 report_prefix_pushf("%s disabled", control_name); 3451 vmcs_write(CPU_EXEC_CTRL0, primary & ~control_bit); 3452 test_vmcs_page_values(field_name, field, true, false); 3453 report_prefix_pop(); 3454 3455 vmcs_write(field, page_addr); 3456 vmcs_write(CPU_EXEC_CTRL0, primary); 3457 } 3458 3459 /* 3460 * If the "use I/O bitmaps" VM-execution control is 1, bits 11:0 of 3461 * each I/O-bitmap address must be 0. Neither address should set any 3462 * bits beyond the processor's physical-address width. 3463 * [Intel SDM] 3464 */ 3465 static void test_io_bitmaps(void) 3466 { 3467 test_vmcs_page_reference(CPU_IO_BITMAP, IO_BITMAP_A, 3468 "I/O bitmap A", "Use I/O bitmaps", false); 3469 test_vmcs_page_reference(CPU_IO_BITMAP, IO_BITMAP_B, 3470 "I/O bitmap B", "Use I/O bitmaps", false); 3471 } 3472 3473 /* 3474 * If the "use MSR bitmaps" VM-execution control is 1, bits 11:0 of 3475 * the MSR-bitmap address must be 0. The address should not set any 3476 * bits beyond the processor's physical-address width. 3477 * [Intel SDM] 3478 */ 3479 static void test_msr_bitmap(void) 3480 { 3481 test_vmcs_page_reference(CPU_MSR_BITMAP, MSR_BITMAP, 3482 "MSR bitmap", "Use MSR bitmaps", false); 3483 } 3484 3485 /* 3486 * If the "use TPR shadow" VM-execution control is 1, the virtual-APIC 3487 * address must satisfy the following checks: 3488 * - Bits 11:0 of the address must be 0. 3489 * - The address should not set any bits beyond the processor's 3490 * physical-address width. 3491 * [Intel SDM] 3492 */ 3493 static void test_apic_virt_addr(void) 3494 { 3495 test_vmcs_page_reference(CPU_TPR_SHADOW, APIC_VIRT_ADDR, 3496 "virtual-APIC address", "Use TPR shadow", true); 3497 } 3498 3499 static void set_vtpr(unsigned vtpr) 3500 { 3501 *(u32 *)phys_to_virt(vmcs_read(APIC_VIRT_ADDR) + APIC_TASKPRI) = vtpr; 3502 } 3503 3504 static void try_tpr_threshold_and_vtpr(unsigned threshold, unsigned vtpr) 3505 { 3506 bool valid = true; 3507 u32 primary = vmcs_read(CPU_EXEC_CTRL0); 3508 u32 secondary = vmcs_read(CPU_EXEC_CTRL1); 3509 3510 if ((primary & CPU_TPR_SHADOW) && 3511 (!(primary & CPU_SECONDARY) || 3512 !(secondary & (CPU_VINTD | CPU_VIRT_APIC_ACCESSES)))) 3513 valid = (threshold & 0xf) <= ((vtpr >> 4) & 0xf); 3514 3515 set_vtpr(vtpr); 3516 report_prefix_pushf("TPR threshold 0x%x, VTPR.class 0x%x", 3517 threshold, (vtpr >> 4) & 0xf); 3518 test_vmx_controls(valid, false); 3519 report_prefix_pop(); 3520 } 3521 3522 /* 3523 * Test interesting vTPR values for a given TPR threshold. 3524 */ 3525 static void test_vtpr_values(unsigned threshold) 3526 { 3527 try_tpr_threshold_and_vtpr(threshold, threshold - 1); 3528 try_tpr_threshold_and_vtpr(threshold, threshold); 3529 try_tpr_threshold_and_vtpr(threshold, threshold + 1); 3530 } 3531 3532 static void try_tpr_threshold(unsigned threshold) 3533 { 3534 bool valid = true; 3535 3536 u32 primary = vmcs_read(CPU_EXEC_CTRL0); 3537 u32 secondary = vmcs_read(CPU_EXEC_CTRL1); 3538 3539 if ((primary & CPU_TPR_SHADOW) && !((primary & CPU_SECONDARY) && 3540 (secondary & CPU_VINTD))) 3541 valid = !(threshold >> 4); 3542 3543 set_vtpr(-1); 3544 vmcs_write(TPR_THRESHOLD, threshold); 3545 report_prefix_pushf("TPR threshold 0x%x, VTPR.class 0xf", threshold); 3546 test_vmx_controls(valid, false); 3547 report_prefix_pop(); 3548 3549 if (valid) 3550 test_vtpr_values(threshold); 3551 } 3552 3553 /* 3554 * Test interesting TPR threshold values. 3555 */ 3556 static void test_tpr_threshold_values(void) 3557 { 3558 unsigned i; 3559 3560 for (i = 0; i < 0x10; i++) 3561 try_tpr_threshold(i); 3562 for (i = 4; i < 32; i++) 3563 try_tpr_threshold(1u << i); 3564 try_tpr_threshold(-1u); 3565 try_tpr_threshold(0x7fffffff); 3566 } 3567 3568 /* 3569 * This test covers the following two VM entry checks: 3570 * 3571 * i) If the "use TPR shadow" VM-execution control is 1 and the 3572 * "virtual-interrupt delivery" VM-execution control is 0, bits 3573 * 31:4 of the TPR threshold VM-execution control field must 3574 be 0. 3575 * [Intel SDM] 3576 * 3577 * ii) If the "use TPR shadow" VM-execution control is 1, the 3578 * "virtual-interrupt delivery" VM-execution control is 0 3579 * and the "virtualize APIC accesses" VM-execution control 3580 * is 0, the value of bits 3:0 of the TPR threshold VM-execution 3581 * control field must not be greater than the value of bits 3582 * 7:4 of VTPR. 3583 * [Intel SDM] 3584 */ 3585 static void test_tpr_threshold(void) 3586 { 3587 u32 primary = vmcs_read(CPU_EXEC_CTRL0); 3588 void *virtual_apic_page; 3589 3590 if (!(ctrl_cpu_rev[0].clr & CPU_TPR_SHADOW)) 3591 return; 3592 3593 virtual_apic_page = alloc_page(); 3594 memset(virtual_apic_page, 0xff, PAGE_SIZE); 3595 vmcs_write(APIC_VIRT_ADDR, virt_to_phys(virtual_apic_page)); 3596 3597 vmcs_write(CPU_EXEC_CTRL0, primary & ~(CPU_TPR_SHADOW | CPU_SECONDARY)); 3598 report_prefix_pushf("Use TPR shadow disabled, secondary controls disabled"); 3599 test_tpr_threshold_values(); 3600 report_prefix_pop(); 3601 vmcs_write(CPU_EXEC_CTRL0, vmcs_read(CPU_EXEC_CTRL0) | CPU_TPR_SHADOW); 3602 report_prefix_pushf("Use TPR shadow enabled"); 3603 test_tpr_threshold_values(); 3604 report_prefix_pop(); 3605 3606 if (!((ctrl_cpu_rev[0].clr & CPU_SECONDARY) && 3607 (ctrl_cpu_rev[1].clr & (CPU_VINTD | CPU_VIRT_APIC_ACCESSES)))) 3608 return; 3609 3610 u32 secondary = vmcs_read(CPU_EXEC_CTRL1); 3611 3612 if (ctrl_cpu_rev[1].clr & CPU_VINTD) { 3613 vmcs_write(CPU_EXEC_CTRL1, CPU_VINTD); 3614 report_prefix_pushf("Use TPR shadow enabled; secondary controls disabled; virtual-interrupt delivery enabled; virtualize APIC accesses disabled"); 3615 test_tpr_threshold_values(); 3616 report_prefix_pop(); 3617 3618 vmcs_write(CPU_EXEC_CTRL0, 3619 vmcs_read(CPU_EXEC_CTRL0) | CPU_SECONDARY); 3620 report_prefix_pushf("Use TPR shadow enabled; secondary controls enabled; virtual-interrupt delivery enabled; virtualize APIC accesses disabled"); 3621 test_tpr_threshold_values(); 3622 report_prefix_pop(); 3623 } 3624 3625 if (ctrl_cpu_rev[1].clr & CPU_VIRT_APIC_ACCESSES) { 3626 vmcs_write(CPU_EXEC_CTRL0, 3627 vmcs_read(CPU_EXEC_CTRL0) & ~CPU_SECONDARY); 3628 vmcs_write(CPU_EXEC_CTRL1, CPU_VIRT_APIC_ACCESSES); 3629 report_prefix_pushf("Use TPR shadow enabled; secondary controls disabled; virtual-interrupt delivery enabled; virtualize APIC accesses enabled"); 3630 test_tpr_threshold_values(); 3631 report_prefix_pop(); 3632 3633 vmcs_write(CPU_EXEC_CTRL0, 3634 vmcs_read(CPU_EXEC_CTRL0) | CPU_SECONDARY); 3635 report_prefix_pushf("Use TPR shadow enabled; secondary controls enabled; virtual-interrupt delivery enabled; virtualize APIC accesses enabled"); 3636 test_tpr_threshold_values(); 3637 report_prefix_pop(); 3638 } 3639 3640 if ((ctrl_cpu_rev[1].clr & 3641 (CPU_VINTD | CPU_VIRT_APIC_ACCESSES)) == 3642 (CPU_VINTD | CPU_VIRT_APIC_ACCESSES)) { 3643 vmcs_write(CPU_EXEC_CTRL0, 3644 vmcs_read(CPU_EXEC_CTRL0) & ~CPU_SECONDARY); 3645 vmcs_write(CPU_EXEC_CTRL1, 3646 CPU_VINTD | CPU_VIRT_APIC_ACCESSES); 3647 report_prefix_pushf("Use TPR shadow enabled; secondary controls disabled; virtual-interrupt delivery enabled; virtualize APIC accesses enabled"); 3648 test_tpr_threshold_values(); 3649 report_prefix_pop(); 3650 3651 vmcs_write(CPU_EXEC_CTRL0, 3652 vmcs_read(CPU_EXEC_CTRL0) | CPU_SECONDARY); 3653 report_prefix_pushf("Use TPR shadow enabled; secondary controls enabled; virtual-interrupt delivery enabled; virtualize APIC accesses enabled"); 3654 test_tpr_threshold_values(); 3655 report_prefix_pop(); 3656 } 3657 3658 vmcs_write(CPU_EXEC_CTRL1, secondary); 3659 vmcs_write(CPU_EXEC_CTRL0, primary); 3660 } 3661 3662 /* 3663 * This test verifies the following two vmentry checks: 3664 * 3665 * If the "NMI exiting" VM-execution control is 0, "Virtual NMIs" 3666 * VM-execution control must be 0. 3667 * [Intel SDM] 3668 * 3669 * If the “virtual NMIs” VM-execution control is 0, the “NMI-window 3670 * exiting” VM-execution control must be 0. 3671 * [Intel SDM] 3672 */ 3673 static void test_nmi_ctrls(void) 3674 { 3675 u32 pin_ctrls, cpu_ctrls0, test_pin_ctrls, test_cpu_ctrls0; 3676 3677 if ((ctrl_pin_rev.clr & (PIN_NMI | PIN_VIRT_NMI)) != 3678 (PIN_NMI | PIN_VIRT_NMI)) { 3679 test_skip("NMI exiting and Virtual NMIs are not supported !"); 3680 return; 3681 } 3682 3683 /* Save the controls so that we can restore them after our tests */ 3684 pin_ctrls = vmcs_read(PIN_CONTROLS); 3685 cpu_ctrls0 = vmcs_read(CPU_EXEC_CTRL0); 3686 3687 test_pin_ctrls = pin_ctrls & ~(PIN_NMI | PIN_VIRT_NMI); 3688 test_cpu_ctrls0 = cpu_ctrls0 & ~CPU_NMI_WINDOW; 3689 3690 vmcs_write(PIN_CONTROLS, test_pin_ctrls); 3691 report_prefix_pushf("NMI-exiting disabled, virtual-NMIs disabled"); 3692 test_vmx_controls(true, false); 3693 report_prefix_pop(); 3694 3695 vmcs_write(PIN_CONTROLS, test_pin_ctrls | PIN_VIRT_NMI); 3696 report_prefix_pushf("NMI-exiting disabled, virtual-NMIs enabled"); 3697 test_vmx_controls(false, false); 3698 report_prefix_pop(); 3699 3700 vmcs_write(PIN_CONTROLS, test_pin_ctrls | (PIN_NMI | PIN_VIRT_NMI)); 3701 report_prefix_pushf("NMI-exiting enabled, virtual-NMIs enabled"); 3702 test_vmx_controls(true, false); 3703 report_prefix_pop(); 3704 3705 vmcs_write(PIN_CONTROLS, test_pin_ctrls | PIN_NMI); 3706 report_prefix_pushf("NMI-exiting enabled, virtual-NMIs disabled"); 3707 test_vmx_controls(true, false); 3708 report_prefix_pop(); 3709 3710 if (!(ctrl_cpu_rev[0].clr & CPU_NMI_WINDOW)) { 3711 report_info("NMI-window exiting is not supported, skipping..."); 3712 goto done; 3713 } 3714 3715 vmcs_write(PIN_CONTROLS, test_pin_ctrls); 3716 vmcs_write(CPU_EXEC_CTRL0, test_cpu_ctrls0 | CPU_NMI_WINDOW); 3717 report_prefix_pushf("Virtual-NMIs disabled, NMI-window-exiting enabled"); 3718 test_vmx_controls(false, false); 3719 report_prefix_pop(); 3720 3721 vmcs_write(PIN_CONTROLS, test_pin_ctrls); 3722 vmcs_write(CPU_EXEC_CTRL0, test_cpu_ctrls0); 3723 report_prefix_pushf("Virtual-NMIs disabled, NMI-window-exiting disabled"); 3724 test_vmx_controls(true, false); 3725 report_prefix_pop(); 3726 3727 vmcs_write(PIN_CONTROLS, test_pin_ctrls | (PIN_NMI | PIN_VIRT_NMI)); 3728 vmcs_write(CPU_EXEC_CTRL0, test_cpu_ctrls0 | CPU_NMI_WINDOW); 3729 report_prefix_pushf("Virtual-NMIs enabled, NMI-window-exiting enabled"); 3730 test_vmx_controls(true, false); 3731 report_prefix_pop(); 3732 3733 vmcs_write(PIN_CONTROLS, test_pin_ctrls | (PIN_NMI | PIN_VIRT_NMI)); 3734 vmcs_write(CPU_EXEC_CTRL0, test_cpu_ctrls0); 3735 report_prefix_pushf("Virtual-NMIs enabled, NMI-window-exiting disabled"); 3736 test_vmx_controls(true, false); 3737 report_prefix_pop(); 3738 3739 /* Restore the controls to their original values */ 3740 vmcs_write(CPU_EXEC_CTRL0, cpu_ctrls0); 3741 done: 3742 vmcs_write(PIN_CONTROLS, pin_ctrls); 3743 } 3744 3745 3746 /* 3747 * Check that the virtual CPU checks all of the VMX controls as 3748 * documented in the Intel SDM. 3749 */ 3750 static void vmx_controls_test(void) 3751 { 3752 /* 3753 * Bit 1 of the guest's RFLAGS must be 1, or VM-entry will 3754 * fail due to invalid guest state, should we make it that 3755 * far. 3756 */ 3757 vmcs_write(GUEST_RFLAGS, 0); 3758 3759 test_pin_based_ctls(); 3760 test_primary_processor_based_ctls(); 3761 test_secondary_processor_based_ctls(); 3762 test_cr3_targets(); 3763 test_io_bitmaps(); 3764 test_msr_bitmap(); 3765 test_apic_virt_addr(); 3766 test_tpr_threshold(); 3767 test_nmi_ctrls(); 3768 } 3769 3770 static bool valid_vmcs_for_vmentry(void) 3771 { 3772 struct vmcs *current_vmcs = NULL; 3773 3774 if (vmcs_save(¤t_vmcs)) 3775 return false; 3776 3777 return current_vmcs && !(current_vmcs->revision_id >> 31); 3778 } 3779 3780 static void try_vmentry_in_movss_shadow(void) 3781 { 3782 u32 vm_inst_err; 3783 u32 flags; 3784 bool early_failure = false; 3785 u32 expected_flags = X86_EFLAGS_FIXED; 3786 bool valid_vmcs = valid_vmcs_for_vmentry(); 3787 3788 expected_flags |= valid_vmcs ? X86_EFLAGS_ZF : X86_EFLAGS_CF; 3789 3790 /* 3791 * Indirectly set VM_INST_ERR to 12 ("VMREAD/VMWRITE from/to 3792 * unsupported VMCS component"). 3793 */ 3794 vmcs_write(~0u, 0); 3795 3796 __asm__ __volatile__ ("mov %[host_rsp], %%edx;" 3797 "vmwrite %%rsp, %%rdx;" 3798 "mov 0f, %%rax;" 3799 "mov %[host_rip], %%edx;" 3800 "vmwrite %%rax, %%rdx;" 3801 "mov $-1, %%ah;" 3802 "sahf;" 3803 "mov %%ss, %%ax;" 3804 "mov %%ax, %%ss;" 3805 "vmlaunch;" 3806 "mov $1, %[early_failure];" 3807 "0: lahf;" 3808 "movzbl %%ah, %[flags]" 3809 : [early_failure] "+r" (early_failure), 3810 [flags] "=&a" (flags) 3811 : [host_rsp] "i" (HOST_RSP), 3812 [host_rip] "i" (HOST_RIP) 3813 : "rdx", "cc", "memory"); 3814 vm_inst_err = vmcs_read(VMX_INST_ERROR); 3815 3816 report("Early VM-entry failure", early_failure); 3817 report("RFLAGS[8:0] is %x (actual %x)", flags == expected_flags, 3818 expected_flags, flags); 3819 if (valid_vmcs) 3820 report("VM-instruction error is %d (actual %d)", 3821 vm_inst_err == VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS, 3822 VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS, vm_inst_err); 3823 } 3824 3825 static void vmentry_movss_shadow_test(void) 3826 { 3827 struct vmcs *orig_vmcs; 3828 3829 TEST_ASSERT(!vmcs_save(&orig_vmcs)); 3830 3831 /* 3832 * Set the launched flag on the current VMCS to verify the correct 3833 * error priority, below. 3834 */ 3835 test_set_guest(v2_null_test_guest); 3836 enter_guest(); 3837 3838 /* 3839 * With bit 1 of the guest's RFLAGS clear, VM-entry should 3840 * fail due to invalid guest state (if we make it that far). 3841 */ 3842 vmcs_write(GUEST_RFLAGS, 0); 3843 3844 /* 3845 * "VM entry with events blocked by MOV SS" takes precedence over 3846 * "VMLAUNCH with non-clear VMCS." 3847 */ 3848 report_prefix_push("valid current-VMCS"); 3849 try_vmentry_in_movss_shadow(); 3850 report_prefix_pop(); 3851 3852 /* 3853 * VMfailInvalid takes precedence over "VM entry with events 3854 * blocked by MOV SS." 3855 */ 3856 TEST_ASSERT(!vmcs_clear(orig_vmcs)); 3857 report_prefix_push("no current-VMCS"); 3858 try_vmentry_in_movss_shadow(); 3859 report_prefix_pop(); 3860 3861 TEST_ASSERT(!make_vmcs_current(orig_vmcs)); 3862 vmcs_write(GUEST_RFLAGS, X86_EFLAGS_FIXED); 3863 } 3864 3865 #define X86_FEATURE_PCID (1 << 17) 3866 #define X86_FEATURE_MCE (1 << 7) 3867 3868 static int write_cr4_checking(unsigned long val) 3869 { 3870 asm volatile(ASM_TRY("1f") 3871 "mov %0, %%cr4\n\t" 3872 "1:": : "r" (val)); 3873 return exception_vector(); 3874 } 3875 3876 static void vmx_cr_load_test(void) 3877 { 3878 struct cpuid _cpuid = cpuid(1); 3879 unsigned long cr4 = read_cr4(), cr3 = read_cr3(); 3880 3881 if (!(_cpuid.c & X86_FEATURE_PCID)) { 3882 report_skip("PCID not detected"); 3883 return; 3884 } 3885 if (!(_cpuid.d & X86_FEATURE_MCE)) { 3886 report_skip("MCE not detected"); 3887 return; 3888 } 3889 3890 TEST_ASSERT(!(cr4 & (X86_CR4_PCIDE | X86_CR4_MCE))); 3891 TEST_ASSERT(!(cr3 & X86_CR3_PCID_MASK)); 3892 3893 /* Enable PCID for L1. */ 3894 cr4 |= X86_CR4_PCIDE; 3895 cr3 |= 0x1; 3896 TEST_ASSERT(!write_cr4_checking(cr4)); 3897 write_cr3(cr3); 3898 3899 test_set_guest(v2_null_test_guest); 3900 vmcs_write(HOST_CR4, cr4); 3901 vmcs_write(HOST_CR3, cr3); 3902 enter_guest(); 3903 3904 /* 3905 * No exception is expected. 3906 * 3907 * NB. KVM loads the last guest write to CR4 into CR4 read 3908 * shadow. In order to trigger an exit to KVM, we can set a 3909 * bit that was zero in the above CR4 write and is owned by 3910 * KVM. We choose to set CR4.MCE, which shall have no side 3911 * effect because normally no guest MCE (e.g., as the result 3912 * of bad memory) would happen during this test. 3913 */ 3914 TEST_ASSERT(!write_cr4_checking(cr4 | X86_CR4_MCE)); 3915 3916 /* Cleanup L1 state: disable PCID. */ 3917 write_cr3(cr3 & ~X86_CR3_PCID_MASK); 3918 TEST_ASSERT(!write_cr4_checking(cr4 & ~X86_CR4_PCIDE)); 3919 } 3920 3921 static bool cpu_has_apicv(void) 3922 { 3923 return ((ctrl_cpu_rev[1].clr & CPU_APIC_REG_VIRT) && 3924 (ctrl_cpu_rev[1].clr & CPU_VINTD) && 3925 (ctrl_pin_rev.clr & PIN_POST_INTR)); 3926 } 3927 3928 static void trigger_ioapic_scan_thread(void *data) 3929 { 3930 /* Wait until other CPU entered L2 */ 3931 while (vmx_get_test_stage() != 1) 3932 ; 3933 3934 /* Trigger ioapic scan */ 3935 ioapic_set_redir(0xf, 0x79, TRIGGER_LEVEL); 3936 vmx_set_test_stage(2); 3937 } 3938 3939 static void irq_79_handler_guest(isr_regs_t *regs) 3940 { 3941 eoi(); 3942 3943 /* L1 expects vmexit on VMX_VMCALL and not VMX_EOI_INDUCED */ 3944 vmcall(); 3945 } 3946 3947 /* 3948 * Constant for num of busy-loop iterations after which 3949 * a timer interrupt should have happened in host 3950 */ 3951 #define TIMER_INTERRUPT_DELAY 100000000 3952 3953 static void vmx_eoi_bitmap_ioapic_scan_test_guest(void) 3954 { 3955 handle_irq(0x79, irq_79_handler_guest); 3956 irq_enable(); 3957 3958 /* Signal to L1 CPU to trigger ioapic scan */ 3959 vmx_set_test_stage(1); 3960 /* Wait until L1 CPU to trigger ioapic scan */ 3961 while (vmx_get_test_stage() != 2) 3962 ; 3963 3964 /* 3965 * Wait for L0 timer interrupt to be raised while we run in L2 3966 * such that L0 will process the IOAPIC scan request before 3967 * resuming L2 3968 */ 3969 delay(TIMER_INTERRUPT_DELAY); 3970 3971 asm volatile ("int $0x79"); 3972 } 3973 3974 static void vmx_eoi_bitmap_ioapic_scan_test(void) 3975 { 3976 void *msr_bitmap; 3977 void *virtual_apic_page; 3978 3979 if (!cpu_has_apicv() || (cpu_count() < 2)) { 3980 report_skip(__func__); 3981 return; 3982 } 3983 3984 msr_bitmap = alloc_page(); 3985 virtual_apic_page = alloc_page(); 3986 3987 u64 cpu_ctrl_0 = CPU_SECONDARY | CPU_TPR_SHADOW | CPU_MSR_BITMAP; 3988 u64 cpu_ctrl_1 = CPU_VINTD | CPU_VIRT_X2APIC; 3989 3990 memset(msr_bitmap, 0x0, PAGE_SIZE); 3991 vmcs_write(MSR_BITMAP, (u64)msr_bitmap); 3992 3993 vmcs_write(APIC_VIRT_ADDR, (u64)virtual_apic_page); 3994 vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) | PIN_EXTINT); 3995 3996 vmcs_write(EOI_EXIT_BITMAP0, 0x0); 3997 vmcs_write(EOI_EXIT_BITMAP1, 0x0); 3998 vmcs_write(EOI_EXIT_BITMAP2, 0x0); 3999 vmcs_write(EOI_EXIT_BITMAP3, 0x0); 4000 4001 vmcs_write(CPU_EXEC_CTRL0, vmcs_read(CPU_EXEC_CTRL0) | cpu_ctrl_0); 4002 vmcs_write(CPU_EXEC_CTRL1, vmcs_read(CPU_EXEC_CTRL1) | cpu_ctrl_1); 4003 4004 on_cpu_async(1, trigger_ioapic_scan_thread, NULL); 4005 test_set_guest(vmx_eoi_bitmap_ioapic_scan_test_guest); 4006 4007 /* 4008 * Launch L2. 4009 * We expect the exit reason to be VMX_VMCALL (and not EOI INDUCED). 4010 * In case the reason isn't VMX_VMCALL, the asserion inside 4011 * skip_exit_vmcall() will fail. 4012 */ 4013 enter_guest(); 4014 skip_exit_vmcall(); 4015 4016 /* Let L2 finish */ 4017 enter_guest(); 4018 report(__func__, 1); 4019 } 4020 4021 static void irq_78_handler_guest(isr_regs_t *regs) 4022 { 4023 set_irq_line(0xf, 0); 4024 vmcall(); 4025 eoi(); 4026 vmcall(); 4027 } 4028 4029 static void vmx_apic_passthrough_guest(void) 4030 { 4031 handle_irq(0x78, irq_78_handler_guest); 4032 irq_enable(); 4033 4034 set_irq_line(0xf, 1); 4035 } 4036 4037 static void vmx_apic_passthrough_test(void) 4038 { 4039 void *msr_bitmap = alloc_page(); 4040 4041 u64 cpu_ctrl_0 = CPU_SECONDARY | CPU_MSR_BITMAP; 4042 u64 cpu_ctrl_1 = 0; 4043 4044 memset(msr_bitmap, 0x0, PAGE_SIZE); 4045 vmcs_write(MSR_BITMAP, (u64)msr_bitmap); 4046 4047 vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) & ~PIN_EXTINT); 4048 4049 vmcs_write(CPU_EXEC_CTRL0, vmcs_read(CPU_EXEC_CTRL0) | cpu_ctrl_0); 4050 vmcs_write(CPU_EXEC_CTRL1, vmcs_read(CPU_EXEC_CTRL1) | cpu_ctrl_1); 4051 4052 ioapic_set_redir(0xf, 0x78, TRIGGER_LEVEL); 4053 test_set_guest(vmx_apic_passthrough_guest); 4054 4055 /* Before EOI remote_irr should still be set */ 4056 enter_guest(); 4057 skip_exit_vmcall(); 4058 TEST_ASSERT_EQ_MSG(1, (int)ioapic_read_redir(0xf).remote_irr, 4059 "IOAPIC pass-through: remote_irr=1 before EOI"); 4060 4061 /* After EOI remote_irr should be cleared */ 4062 enter_guest(); 4063 skip_exit_vmcall(); 4064 TEST_ASSERT_EQ_MSG(0, (int)ioapic_read_redir(0xf).remote_irr, 4065 "IOAPIC pass-through: remote_irr=0 after EOI"); 4066 4067 /* Let L2 finish */ 4068 enter_guest(); 4069 report(__func__, 1); 4070 } 4071 4072 #define TEST(name) { #name, .v2 = name } 4073 4074 /* name/init/guest_main/exit_handler/syscall_handler/guest_regs */ 4075 struct vmx_test vmx_tests[] = { 4076 { "null", NULL, basic_guest_main, basic_exit_handler, NULL, {0} }, 4077 { "vmenter", NULL, vmenter_main, vmenter_exit_handler, NULL, {0} }, 4078 { "preemption timer", preemption_timer_init, preemption_timer_main, 4079 preemption_timer_exit_handler, NULL, {0} }, 4080 { "control field PAT", test_ctrl_pat_init, test_ctrl_pat_main, 4081 test_ctrl_pat_exit_handler, NULL, {0} }, 4082 { "control field EFER", test_ctrl_efer_init, test_ctrl_efer_main, 4083 test_ctrl_efer_exit_handler, NULL, {0} }, 4084 { "CR shadowing", NULL, cr_shadowing_main, 4085 cr_shadowing_exit_handler, NULL, {0} }, 4086 { "I/O bitmap", iobmp_init, iobmp_main, iobmp_exit_handler, 4087 NULL, {0} }, 4088 { "instruction intercept", insn_intercept_init, insn_intercept_main, 4089 insn_intercept_exit_handler, NULL, {0} }, 4090 { "EPT A/D disabled", ept_init, ept_main, ept_exit_handler, NULL, {0} }, 4091 { "EPT A/D enabled", eptad_init, eptad_main, eptad_exit_handler, NULL, {0} }, 4092 { "PML", pml_init, pml_main, pml_exit_handler, NULL, {0} }, 4093 { "VPID", vpid_init, vpid_main, vpid_exit_handler, NULL, {0} }, 4094 { "interrupt", interrupt_init, interrupt_main, 4095 interrupt_exit_handler, NULL, {0} }, 4096 { "debug controls", dbgctls_init, dbgctls_main, dbgctls_exit_handler, 4097 NULL, {0} }, 4098 { "MSR switch", msr_switch_init, msr_switch_main, 4099 msr_switch_exit_handler, NULL, {0}, msr_switch_entry_failure }, 4100 { "vmmcall", vmmcall_init, vmmcall_main, vmmcall_exit_handler, NULL, {0} }, 4101 { "disable RDTSCP", disable_rdtscp_init, disable_rdtscp_main, 4102 disable_rdtscp_exit_handler, NULL, {0} }, 4103 { "int3", int3_init, int3_guest_main, int3_exit_handler, NULL, {0} }, 4104 { "into", into_init, into_guest_main, into_exit_handler, NULL, {0} }, 4105 { "exit_monitor_from_l2_test", NULL, exit_monitor_from_l2_main, 4106 exit_monitor_from_l2_handler, NULL, {0} }, 4107 /* Basic V2 tests. */ 4108 TEST(v2_null_test), 4109 TEST(v2_multiple_entries_test), 4110 TEST(fixture_test_case1), 4111 TEST(fixture_test_case2), 4112 /* EPT access tests. */ 4113 TEST(ept_access_test_not_present), 4114 TEST(ept_access_test_read_only), 4115 TEST(ept_access_test_write_only), 4116 TEST(ept_access_test_read_write), 4117 TEST(ept_access_test_execute_only), 4118 TEST(ept_access_test_read_execute), 4119 TEST(ept_access_test_write_execute), 4120 TEST(ept_access_test_read_write_execute), 4121 TEST(ept_access_test_reserved_bits), 4122 TEST(ept_access_test_ignored_bits), 4123 TEST(ept_access_test_paddr_not_present_ad_disabled), 4124 TEST(ept_access_test_paddr_not_present_ad_enabled), 4125 TEST(ept_access_test_paddr_read_only_ad_disabled), 4126 TEST(ept_access_test_paddr_read_only_ad_enabled), 4127 TEST(ept_access_test_paddr_read_write), 4128 TEST(ept_access_test_paddr_read_write_execute), 4129 TEST(ept_access_test_paddr_read_execute_ad_disabled), 4130 TEST(ept_access_test_paddr_read_execute_ad_enabled), 4131 TEST(ept_access_test_paddr_not_present_page_fault), 4132 TEST(ept_access_test_force_2m_page), 4133 /* Opcode tests. */ 4134 TEST(invvpid_test_v2), 4135 /* VM-entry tests */ 4136 TEST(vmx_controls_test), 4137 TEST(vmentry_movss_shadow_test), 4138 /* APICv tests */ 4139 TEST(vmx_eoi_bitmap_ioapic_scan_test), 4140 /* APIC pass-through tests */ 4141 TEST(vmx_apic_passthrough_test), 4142 /* Regression tests */ 4143 TEST(vmx_cr_load_test), 4144 { NULL, NULL, NULL, NULL, NULL, {0} }, 4145 }; 4146