1 /* 2 * All test cases of nested virtualization should be in this file 3 * 4 * Author : Arthur Chunqi Li <yzt356@gmail.com> 5 */ 6 #include "vmx.h" 7 #include "msr.h" 8 #include "processor.h" 9 #include "vm.h" 10 #include "fwcfg.h" 11 #include "isr.h" 12 #include "desc.h" 13 #include "apic.h" 14 #include "types.h" 15 16 u64 ia32_pat; 17 u64 ia32_efer; 18 void *io_bitmap_a, *io_bitmap_b; 19 u16 ioport; 20 21 unsigned long *pml4; 22 u64 eptp; 23 void *data_page1, *data_page2; 24 25 void *pml_log; 26 #define PML_INDEX 512 27 28 static inline void vmcall() 29 { 30 asm volatile("vmcall"); 31 } 32 33 void basic_guest_main() 34 { 35 report("Basic VMX test", 1); 36 } 37 38 int basic_exit_handler() 39 { 40 report("Basic VMX test", 0); 41 print_vmexit_info(); 42 return VMX_TEST_EXIT; 43 } 44 45 void vmenter_main() 46 { 47 u64 rax; 48 u64 rsp, resume_rsp; 49 50 report("test vmlaunch", 1); 51 52 asm volatile( 53 "mov %%rsp, %0\n\t" 54 "mov %3, %%rax\n\t" 55 "vmcall\n\t" 56 "mov %%rax, %1\n\t" 57 "mov %%rsp, %2\n\t" 58 : "=r"(rsp), "=r"(rax), "=r"(resume_rsp) 59 : "g"(0xABCD)); 60 report("test vmresume", (rax == 0xFFFF) && (rsp == resume_rsp)); 61 } 62 63 int vmenter_exit_handler() 64 { 65 u64 guest_rip; 66 ulong reason; 67 68 guest_rip = vmcs_read(GUEST_RIP); 69 reason = vmcs_read(EXI_REASON) & 0xff; 70 switch (reason) { 71 case VMX_VMCALL: 72 if (regs.rax != 0xABCD) { 73 report("test vmresume", 0); 74 return VMX_TEST_VMEXIT; 75 } 76 regs.rax = 0xFFFF; 77 vmcs_write(GUEST_RIP, guest_rip + 3); 78 return VMX_TEST_RESUME; 79 default: 80 report("test vmresume", 0); 81 print_vmexit_info(); 82 } 83 return VMX_TEST_VMEXIT; 84 } 85 86 u32 preempt_scale; 87 volatile unsigned long long tsc_val; 88 volatile u32 preempt_val; 89 u64 saved_rip; 90 91 int preemption_timer_init() 92 { 93 if (!(ctrl_pin_rev.clr & PIN_PREEMPT)) { 94 printf("\tPreemption timer is not supported\n"); 95 return VMX_TEST_EXIT; 96 } 97 vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) | PIN_PREEMPT); 98 preempt_val = 10000000; 99 vmcs_write(PREEMPT_TIMER_VALUE, preempt_val); 100 preempt_scale = rdmsr(MSR_IA32_VMX_MISC) & 0x1F; 101 102 if (!(ctrl_exit_rev.clr & EXI_SAVE_PREEMPT)) 103 printf("\tSave preemption value is not supported\n"); 104 105 return VMX_TEST_START; 106 } 107 108 void preemption_timer_main() 109 { 110 tsc_val = rdtsc(); 111 if (ctrl_exit_rev.clr & EXI_SAVE_PREEMPT) { 112 vmx_set_test_stage(0); 113 vmcall(); 114 if (vmx_get_test_stage() == 1) 115 vmcall(); 116 } 117 vmx_set_test_stage(1); 118 while (vmx_get_test_stage() == 1) { 119 if (((rdtsc() - tsc_val) >> preempt_scale) 120 > 10 * preempt_val) { 121 vmx_set_test_stage(2); 122 vmcall(); 123 } 124 } 125 tsc_val = rdtsc(); 126 asm volatile ("hlt"); 127 vmcall(); 128 vmx_set_test_stage(5); 129 vmcall(); 130 } 131 132 int preemption_timer_exit_handler() 133 { 134 bool guest_halted; 135 u64 guest_rip; 136 ulong reason; 137 u32 insn_len; 138 u32 ctrl_exit; 139 140 guest_rip = vmcs_read(GUEST_RIP); 141 reason = vmcs_read(EXI_REASON) & 0xff; 142 insn_len = vmcs_read(EXI_INST_LEN); 143 switch (reason) { 144 case VMX_PREEMPT: 145 switch (vmx_get_test_stage()) { 146 case 1: 147 case 2: 148 report("busy-wait for preemption timer", 149 ((rdtsc() - tsc_val) >> preempt_scale) >= 150 preempt_val); 151 vmx_set_test_stage(3); 152 vmcs_write(PREEMPT_TIMER_VALUE, preempt_val); 153 return VMX_TEST_RESUME; 154 case 3: 155 guest_halted = 156 (vmcs_read(GUEST_ACTV_STATE) == ACTV_HLT); 157 report("preemption timer during hlt", 158 ((rdtsc() - tsc_val) >> preempt_scale) >= 159 preempt_val && guest_halted); 160 vmx_set_test_stage(4); 161 vmcs_write(PIN_CONTROLS, 162 vmcs_read(PIN_CONTROLS) & ~PIN_PREEMPT); 163 vmcs_write(GUEST_ACTV_STATE, ACTV_ACTIVE); 164 return VMX_TEST_RESUME; 165 case 4: 166 report("preemption timer with 0 value", 167 saved_rip == guest_rip); 168 break; 169 default: 170 printf("Invalid stage.\n"); 171 print_vmexit_info(); 172 break; 173 } 174 break; 175 case VMX_VMCALL: 176 vmcs_write(GUEST_RIP, guest_rip + insn_len); 177 switch (vmx_get_test_stage()) { 178 case 0: 179 report("Keep preemption value", 180 vmcs_read(PREEMPT_TIMER_VALUE) == preempt_val); 181 vmx_set_test_stage(1); 182 vmcs_write(PREEMPT_TIMER_VALUE, preempt_val); 183 ctrl_exit = (vmcs_read(EXI_CONTROLS) | 184 EXI_SAVE_PREEMPT) & ctrl_exit_rev.clr; 185 vmcs_write(EXI_CONTROLS, ctrl_exit); 186 return VMX_TEST_RESUME; 187 case 1: 188 report("Save preemption value", 189 vmcs_read(PREEMPT_TIMER_VALUE) < preempt_val); 190 return VMX_TEST_RESUME; 191 case 2: 192 report("busy-wait for preemption timer", 0); 193 vmx_set_test_stage(3); 194 vmcs_write(PREEMPT_TIMER_VALUE, preempt_val); 195 return VMX_TEST_RESUME; 196 case 3: 197 report("preemption timer during hlt", 0); 198 vmx_set_test_stage(4); 199 /* fall through */ 200 case 4: 201 vmcs_write(PIN_CONTROLS, 202 vmcs_read(PIN_CONTROLS) | PIN_PREEMPT); 203 vmcs_write(PREEMPT_TIMER_VALUE, 0); 204 saved_rip = guest_rip + insn_len; 205 return VMX_TEST_RESUME; 206 case 5: 207 report("preemption timer with 0 value (vmcall stage 5)", 0); 208 break; 209 default: 210 // Should not reach here 211 printf("ERROR : unexpected stage, %d\n", 212 vmx_get_test_stage()); 213 print_vmexit_info(); 214 return VMX_TEST_VMEXIT; 215 } 216 break; 217 default: 218 printf("Unknown exit reason, %ld\n", reason); 219 print_vmexit_info(); 220 } 221 vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) & ~PIN_PREEMPT); 222 return VMX_TEST_VMEXIT; 223 } 224 225 void msr_bmp_init() 226 { 227 void *msr_bitmap; 228 u32 ctrl_cpu0; 229 230 msr_bitmap = alloc_page(); 231 memset(msr_bitmap, 0x0, PAGE_SIZE); 232 ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0); 233 ctrl_cpu0 |= CPU_MSR_BITMAP; 234 vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0); 235 vmcs_write(MSR_BITMAP, (u64)msr_bitmap); 236 } 237 238 static int test_ctrl_pat_init() 239 { 240 u64 ctrl_ent; 241 u64 ctrl_exi; 242 243 msr_bmp_init(); 244 if (!(ctrl_exit_rev.clr & EXI_SAVE_PAT) && 245 !(ctrl_exit_rev.clr & EXI_LOAD_PAT) && 246 !(ctrl_enter_rev.clr & ENT_LOAD_PAT)) { 247 printf("\tSave/load PAT is not supported\n"); 248 return 1; 249 } 250 251 ctrl_ent = vmcs_read(ENT_CONTROLS); 252 ctrl_exi = vmcs_read(EXI_CONTROLS); 253 ctrl_ent |= ctrl_enter_rev.clr & ENT_LOAD_PAT; 254 ctrl_exi |= ctrl_exit_rev.clr & (EXI_SAVE_PAT | EXI_LOAD_PAT); 255 vmcs_write(ENT_CONTROLS, ctrl_ent); 256 vmcs_write(EXI_CONTROLS, ctrl_exi); 257 ia32_pat = rdmsr(MSR_IA32_CR_PAT); 258 vmcs_write(GUEST_PAT, 0x0); 259 vmcs_write(HOST_PAT, ia32_pat); 260 return VMX_TEST_START; 261 } 262 263 static void test_ctrl_pat_main() 264 { 265 u64 guest_ia32_pat; 266 267 guest_ia32_pat = rdmsr(MSR_IA32_CR_PAT); 268 if (!(ctrl_enter_rev.clr & ENT_LOAD_PAT)) 269 printf("\tENT_LOAD_PAT is not supported.\n"); 270 else { 271 if (guest_ia32_pat != 0) { 272 report("Entry load PAT", 0); 273 return; 274 } 275 } 276 wrmsr(MSR_IA32_CR_PAT, 0x6); 277 vmcall(); 278 guest_ia32_pat = rdmsr(MSR_IA32_CR_PAT); 279 if (ctrl_enter_rev.clr & ENT_LOAD_PAT) 280 report("Entry load PAT", guest_ia32_pat == ia32_pat); 281 } 282 283 static int test_ctrl_pat_exit_handler() 284 { 285 u64 guest_rip; 286 ulong reason; 287 u64 guest_pat; 288 289 guest_rip = vmcs_read(GUEST_RIP); 290 reason = vmcs_read(EXI_REASON) & 0xff; 291 switch (reason) { 292 case VMX_VMCALL: 293 guest_pat = vmcs_read(GUEST_PAT); 294 if (!(ctrl_exit_rev.clr & EXI_SAVE_PAT)) { 295 printf("\tEXI_SAVE_PAT is not supported\n"); 296 vmcs_write(GUEST_PAT, 0x6); 297 } else { 298 report("Exit save PAT", guest_pat == 0x6); 299 } 300 if (!(ctrl_exit_rev.clr & EXI_LOAD_PAT)) 301 printf("\tEXI_LOAD_PAT is not supported\n"); 302 else 303 report("Exit load PAT", rdmsr(MSR_IA32_CR_PAT) == ia32_pat); 304 vmcs_write(GUEST_PAT, ia32_pat); 305 vmcs_write(GUEST_RIP, guest_rip + 3); 306 return VMX_TEST_RESUME; 307 default: 308 printf("ERROR : Undefined exit reason, reason = %ld.\n", reason); 309 break; 310 } 311 return VMX_TEST_VMEXIT; 312 } 313 314 static int test_ctrl_efer_init() 315 { 316 u64 ctrl_ent; 317 u64 ctrl_exi; 318 319 msr_bmp_init(); 320 ctrl_ent = vmcs_read(ENT_CONTROLS) | ENT_LOAD_EFER; 321 ctrl_exi = vmcs_read(EXI_CONTROLS) | EXI_SAVE_EFER | EXI_LOAD_EFER; 322 vmcs_write(ENT_CONTROLS, ctrl_ent & ctrl_enter_rev.clr); 323 vmcs_write(EXI_CONTROLS, ctrl_exi & ctrl_exit_rev.clr); 324 ia32_efer = rdmsr(MSR_EFER); 325 vmcs_write(GUEST_EFER, ia32_efer ^ EFER_NX); 326 vmcs_write(HOST_EFER, ia32_efer ^ EFER_NX); 327 return VMX_TEST_START; 328 } 329 330 static void test_ctrl_efer_main() 331 { 332 u64 guest_ia32_efer; 333 334 guest_ia32_efer = rdmsr(MSR_EFER); 335 if (!(ctrl_enter_rev.clr & ENT_LOAD_EFER)) 336 printf("\tENT_LOAD_EFER is not supported.\n"); 337 else { 338 if (guest_ia32_efer != (ia32_efer ^ EFER_NX)) { 339 report("Entry load EFER", 0); 340 return; 341 } 342 } 343 wrmsr(MSR_EFER, ia32_efer); 344 vmcall(); 345 guest_ia32_efer = rdmsr(MSR_EFER); 346 if (ctrl_enter_rev.clr & ENT_LOAD_EFER) 347 report("Entry load EFER", guest_ia32_efer == ia32_efer); 348 } 349 350 static int test_ctrl_efer_exit_handler() 351 { 352 u64 guest_rip; 353 ulong reason; 354 u64 guest_efer; 355 356 guest_rip = vmcs_read(GUEST_RIP); 357 reason = vmcs_read(EXI_REASON) & 0xff; 358 switch (reason) { 359 case VMX_VMCALL: 360 guest_efer = vmcs_read(GUEST_EFER); 361 if (!(ctrl_exit_rev.clr & EXI_SAVE_EFER)) { 362 printf("\tEXI_SAVE_EFER is not supported\n"); 363 vmcs_write(GUEST_EFER, ia32_efer); 364 } else { 365 report("Exit save EFER", guest_efer == ia32_efer); 366 } 367 if (!(ctrl_exit_rev.clr & EXI_LOAD_EFER)) { 368 printf("\tEXI_LOAD_EFER is not supported\n"); 369 wrmsr(MSR_EFER, ia32_efer ^ EFER_NX); 370 } else { 371 report("Exit load EFER", rdmsr(MSR_EFER) == (ia32_efer ^ EFER_NX)); 372 } 373 vmcs_write(GUEST_PAT, ia32_efer); 374 vmcs_write(GUEST_RIP, guest_rip + 3); 375 return VMX_TEST_RESUME; 376 default: 377 printf("ERROR : Undefined exit reason, reason = %ld.\n", reason); 378 break; 379 } 380 return VMX_TEST_VMEXIT; 381 } 382 383 u32 guest_cr0, guest_cr4; 384 385 static void cr_shadowing_main() 386 { 387 u32 cr0, cr4, tmp; 388 389 // Test read through 390 vmx_set_test_stage(0); 391 guest_cr0 = read_cr0(); 392 if (vmx_get_test_stage() == 1) 393 report("Read through CR0", 0); 394 else 395 vmcall(); 396 vmx_set_test_stage(1); 397 guest_cr4 = read_cr4(); 398 if (vmx_get_test_stage() == 2) 399 report("Read through CR4", 0); 400 else 401 vmcall(); 402 // Test write through 403 guest_cr0 = guest_cr0 ^ (X86_CR0_TS | X86_CR0_MP); 404 guest_cr4 = guest_cr4 ^ (X86_CR4_TSD | X86_CR4_DE); 405 vmx_set_test_stage(2); 406 write_cr0(guest_cr0); 407 if (vmx_get_test_stage() == 3) 408 report("Write throuth CR0", 0); 409 else 410 vmcall(); 411 vmx_set_test_stage(3); 412 write_cr4(guest_cr4); 413 if (vmx_get_test_stage() == 4) 414 report("Write through CR4", 0); 415 else 416 vmcall(); 417 // Test read shadow 418 vmx_set_test_stage(4); 419 vmcall(); 420 cr0 = read_cr0(); 421 if (vmx_get_test_stage() != 5) 422 report("Read shadowing CR0", cr0 == guest_cr0); 423 vmx_set_test_stage(5); 424 cr4 = read_cr4(); 425 if (vmx_get_test_stage() != 6) 426 report("Read shadowing CR4", cr4 == guest_cr4); 427 // Test write shadow (same value with shadow) 428 vmx_set_test_stage(6); 429 write_cr0(guest_cr0); 430 if (vmx_get_test_stage() == 7) 431 report("Write shadowing CR0 (same value with shadow)", 0); 432 else 433 vmcall(); 434 vmx_set_test_stage(7); 435 write_cr4(guest_cr4); 436 if (vmx_get_test_stage() == 8) 437 report("Write shadowing CR4 (same value with shadow)", 0); 438 else 439 vmcall(); 440 // Test write shadow (different value) 441 vmx_set_test_stage(8); 442 tmp = guest_cr0 ^ X86_CR0_TS; 443 asm volatile("mov %0, %%rsi\n\t" 444 "mov %%rsi, %%cr0\n\t" 445 ::"m"(tmp) 446 :"rsi", "memory", "cc"); 447 report("Write shadowing different X86_CR0_TS", vmx_get_test_stage() == 9); 448 vmx_set_test_stage(9); 449 tmp = guest_cr0 ^ X86_CR0_MP; 450 asm volatile("mov %0, %%rsi\n\t" 451 "mov %%rsi, %%cr0\n\t" 452 ::"m"(tmp) 453 :"rsi", "memory", "cc"); 454 report("Write shadowing different X86_CR0_MP", vmx_get_test_stage() == 10); 455 vmx_set_test_stage(10); 456 tmp = guest_cr4 ^ X86_CR4_TSD; 457 asm volatile("mov %0, %%rsi\n\t" 458 "mov %%rsi, %%cr4\n\t" 459 ::"m"(tmp) 460 :"rsi", "memory", "cc"); 461 report("Write shadowing different X86_CR4_TSD", vmx_get_test_stage() == 11); 462 vmx_set_test_stage(11); 463 tmp = guest_cr4 ^ X86_CR4_DE; 464 asm volatile("mov %0, %%rsi\n\t" 465 "mov %%rsi, %%cr4\n\t" 466 ::"m"(tmp) 467 :"rsi", "memory", "cc"); 468 report("Write shadowing different X86_CR4_DE", vmx_get_test_stage() == 12); 469 } 470 471 static int cr_shadowing_exit_handler() 472 { 473 u64 guest_rip; 474 ulong reason; 475 u32 insn_len; 476 u32 exit_qual; 477 478 guest_rip = vmcs_read(GUEST_RIP); 479 reason = vmcs_read(EXI_REASON) & 0xff; 480 insn_len = vmcs_read(EXI_INST_LEN); 481 exit_qual = vmcs_read(EXI_QUALIFICATION); 482 switch (reason) { 483 case VMX_VMCALL: 484 switch (vmx_get_test_stage()) { 485 case 0: 486 report("Read through CR0", guest_cr0 == vmcs_read(GUEST_CR0)); 487 break; 488 case 1: 489 report("Read through CR4", guest_cr4 == vmcs_read(GUEST_CR4)); 490 break; 491 case 2: 492 report("Write through CR0", guest_cr0 == vmcs_read(GUEST_CR0)); 493 break; 494 case 3: 495 report("Write through CR4", guest_cr4 == vmcs_read(GUEST_CR4)); 496 break; 497 case 4: 498 guest_cr0 = vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP); 499 guest_cr4 = vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE); 500 vmcs_write(CR0_MASK, X86_CR0_TS | X86_CR0_MP); 501 vmcs_write(CR0_READ_SHADOW, guest_cr0 & (X86_CR0_TS | X86_CR0_MP)); 502 vmcs_write(CR4_MASK, X86_CR4_TSD | X86_CR4_DE); 503 vmcs_write(CR4_READ_SHADOW, guest_cr4 & (X86_CR4_TSD | X86_CR4_DE)); 504 break; 505 case 6: 506 report("Write shadowing CR0 (same value)", 507 guest_cr0 == (vmcs_read(GUEST_CR0) ^ (X86_CR0_TS | X86_CR0_MP))); 508 break; 509 case 7: 510 report("Write shadowing CR4 (same value)", 511 guest_cr4 == (vmcs_read(GUEST_CR4) ^ (X86_CR4_TSD | X86_CR4_DE))); 512 break; 513 default: 514 // Should not reach here 515 printf("ERROR : unexpected stage, %d\n", 516 vmx_get_test_stage()); 517 print_vmexit_info(); 518 return VMX_TEST_VMEXIT; 519 } 520 vmcs_write(GUEST_RIP, guest_rip + insn_len); 521 return VMX_TEST_RESUME; 522 case VMX_CR: 523 switch (vmx_get_test_stage()) { 524 case 4: 525 report("Read shadowing CR0", 0); 526 vmx_inc_test_stage(); 527 break; 528 case 5: 529 report("Read shadowing CR4", 0); 530 vmx_inc_test_stage(); 531 break; 532 case 6: 533 report("Write shadowing CR0 (same value)", 0); 534 vmx_inc_test_stage(); 535 break; 536 case 7: 537 report("Write shadowing CR4 (same value)", 0); 538 vmx_inc_test_stage(); 539 break; 540 case 8: 541 case 9: 542 // 0x600 encodes "mov %esi, %cr0" 543 if (exit_qual == 0x600) 544 vmx_inc_test_stage(); 545 break; 546 case 10: 547 case 11: 548 // 0x604 encodes "mov %esi, %cr4" 549 if (exit_qual == 0x604) 550 vmx_inc_test_stage(); 551 break; 552 default: 553 // Should not reach here 554 printf("ERROR : unexpected stage, %d\n", 555 vmx_get_test_stage()); 556 print_vmexit_info(); 557 return VMX_TEST_VMEXIT; 558 } 559 vmcs_write(GUEST_RIP, guest_rip + insn_len); 560 return VMX_TEST_RESUME; 561 default: 562 printf("Unknown exit reason, %ld\n", reason); 563 print_vmexit_info(); 564 } 565 return VMX_TEST_VMEXIT; 566 } 567 568 static int iobmp_init() 569 { 570 u32 ctrl_cpu0; 571 572 io_bitmap_a = alloc_page(); 573 io_bitmap_b = alloc_page(); 574 memset(io_bitmap_a, 0x0, PAGE_SIZE); 575 memset(io_bitmap_b, 0x0, PAGE_SIZE); 576 ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0); 577 ctrl_cpu0 |= CPU_IO_BITMAP; 578 ctrl_cpu0 &= (~CPU_IO); 579 vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0); 580 vmcs_write(IO_BITMAP_A, (u64)io_bitmap_a); 581 vmcs_write(IO_BITMAP_B, (u64)io_bitmap_b); 582 return VMX_TEST_START; 583 } 584 585 static void iobmp_main() 586 { 587 // stage 0, test IO pass 588 vmx_set_test_stage(0); 589 inb(0x5000); 590 outb(0x0, 0x5000); 591 report("I/O bitmap - I/O pass", vmx_get_test_stage() == 0); 592 // test IO width, in/out 593 ((u8 *)io_bitmap_a)[0] = 0xFF; 594 vmx_set_test_stage(2); 595 inb(0x0); 596 report("I/O bitmap - trap in", vmx_get_test_stage() == 3); 597 vmx_set_test_stage(3); 598 outw(0x0, 0x0); 599 report("I/O bitmap - trap out", vmx_get_test_stage() == 4); 600 vmx_set_test_stage(4); 601 inl(0x0); 602 report("I/O bitmap - I/O width, long", vmx_get_test_stage() == 5); 603 // test low/high IO port 604 vmx_set_test_stage(5); 605 ((u8 *)io_bitmap_a)[0x5000 / 8] = (1 << (0x5000 % 8)); 606 inb(0x5000); 607 report("I/O bitmap - I/O port, low part", vmx_get_test_stage() == 6); 608 vmx_set_test_stage(6); 609 ((u8 *)io_bitmap_b)[0x1000 / 8] = (1 << (0x1000 % 8)); 610 inb(0x9000); 611 report("I/O bitmap - I/O port, high part", vmx_get_test_stage() == 7); 612 // test partial pass 613 vmx_set_test_stage(7); 614 inl(0x4FFF); 615 report("I/O bitmap - partial pass", vmx_get_test_stage() == 8); 616 // test overrun 617 vmx_set_test_stage(8); 618 memset(io_bitmap_a, 0x0, PAGE_SIZE); 619 memset(io_bitmap_b, 0x0, PAGE_SIZE); 620 inl(0xFFFF); 621 report("I/O bitmap - overrun", vmx_get_test_stage() == 9); 622 vmx_set_test_stage(9); 623 vmcall(); 624 outb(0x0, 0x0); 625 report("I/O bitmap - ignore unconditional exiting", 626 vmx_get_test_stage() == 9); 627 vmx_set_test_stage(10); 628 vmcall(); 629 outb(0x0, 0x0); 630 report("I/O bitmap - unconditional exiting", 631 vmx_get_test_stage() == 11); 632 } 633 634 static int iobmp_exit_handler() 635 { 636 u64 guest_rip; 637 ulong reason, exit_qual; 638 u32 insn_len, ctrl_cpu0; 639 640 guest_rip = vmcs_read(GUEST_RIP); 641 reason = vmcs_read(EXI_REASON) & 0xff; 642 exit_qual = vmcs_read(EXI_QUALIFICATION); 643 insn_len = vmcs_read(EXI_INST_LEN); 644 switch (reason) { 645 case VMX_IO: 646 switch (vmx_get_test_stage()) { 647 case 0: 648 case 1: 649 vmx_inc_test_stage(); 650 break; 651 case 2: 652 report("I/O bitmap - I/O width, byte", 653 (exit_qual & VMX_IO_SIZE_MASK) == _VMX_IO_BYTE); 654 report("I/O bitmap - I/O direction, in", exit_qual & VMX_IO_IN); 655 vmx_inc_test_stage(); 656 break; 657 case 3: 658 report("I/O bitmap - I/O width, word", 659 (exit_qual & VMX_IO_SIZE_MASK) == _VMX_IO_WORD); 660 report("I/O bitmap - I/O direction, out", 661 !(exit_qual & VMX_IO_IN)); 662 vmx_inc_test_stage(); 663 break; 664 case 4: 665 report("I/O bitmap - I/O width, long", 666 (exit_qual & VMX_IO_SIZE_MASK) == _VMX_IO_LONG); 667 vmx_inc_test_stage(); 668 break; 669 case 5: 670 if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x5000) 671 vmx_inc_test_stage(); 672 break; 673 case 6: 674 if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x9000) 675 vmx_inc_test_stage(); 676 break; 677 case 7: 678 if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0x4FFF) 679 vmx_inc_test_stage(); 680 break; 681 case 8: 682 if (((exit_qual & VMX_IO_PORT_MASK) >> VMX_IO_PORT_SHIFT) == 0xFFFF) 683 vmx_inc_test_stage(); 684 break; 685 case 9: 686 case 10: 687 ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0); 688 vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0 & ~CPU_IO); 689 vmx_inc_test_stage(); 690 break; 691 default: 692 // Should not reach here 693 printf("ERROR : unexpected stage, %d\n", 694 vmx_get_test_stage()); 695 print_vmexit_info(); 696 return VMX_TEST_VMEXIT; 697 } 698 vmcs_write(GUEST_RIP, guest_rip + insn_len); 699 return VMX_TEST_RESUME; 700 case VMX_VMCALL: 701 switch (vmx_get_test_stage()) { 702 case 9: 703 ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0); 704 ctrl_cpu0 |= CPU_IO | CPU_IO_BITMAP; 705 vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0); 706 break; 707 case 10: 708 ctrl_cpu0 = vmcs_read(CPU_EXEC_CTRL0); 709 ctrl_cpu0 = (ctrl_cpu0 & ~CPU_IO_BITMAP) | CPU_IO; 710 vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu0); 711 break; 712 default: 713 // Should not reach here 714 printf("ERROR : unexpected stage, %d\n", 715 vmx_get_test_stage()); 716 print_vmexit_info(); 717 return VMX_TEST_VMEXIT; 718 } 719 vmcs_write(GUEST_RIP, guest_rip + insn_len); 720 return VMX_TEST_RESUME; 721 default: 722 printf("guest_rip = 0x%lx\n", guest_rip); 723 printf("\tERROR : Undefined exit reason, reason = %ld.\n", reason); 724 break; 725 } 726 return VMX_TEST_VMEXIT; 727 } 728 729 #define INSN_CPU0 0 730 #define INSN_CPU1 1 731 #define INSN_ALWAYS_TRAP 2 732 733 #define FIELD_EXIT_QUAL (1 << 0) 734 #define FIELD_INSN_INFO (1 << 1) 735 736 asm( 737 "insn_hlt: hlt;ret\n\t" 738 "insn_invlpg: invlpg 0x12345678;ret\n\t" 739 "insn_mwait: xor %eax, %eax; xor %ecx, %ecx; mwait;ret\n\t" 740 "insn_rdpmc: xor %ecx, %ecx; rdpmc;ret\n\t" 741 "insn_rdtsc: rdtsc;ret\n\t" 742 "insn_cr3_load: mov cr3,%rax; mov %rax,%cr3;ret\n\t" 743 "insn_cr3_store: mov %cr3,%rax;ret\n\t" 744 #ifdef __x86_64__ 745 "insn_cr8_load: mov %rax,%cr8;ret\n\t" 746 "insn_cr8_store: mov %cr8,%rax;ret\n\t" 747 #endif 748 "insn_monitor: xor %eax, %eax; xor %ecx, %ecx; xor %edx, %edx; monitor;ret\n\t" 749 "insn_pause: pause;ret\n\t" 750 "insn_wbinvd: wbinvd;ret\n\t" 751 "insn_cpuid: mov $10, %eax; cpuid;ret\n\t" 752 "insn_invd: invd;ret\n\t" 753 "insn_sgdt: sgdt gdt64_desc;ret\n\t" 754 "insn_lgdt: lgdt gdt64_desc;ret\n\t" 755 "insn_sidt: sidt idt_descr;ret\n\t" 756 "insn_lidt: lidt idt_descr;ret\n\t" 757 "insn_sldt: sldt %ax;ret\n\t" 758 "insn_lldt: xor %eax, %eax; lldt %ax;ret\n\t" 759 "insn_str: str %ax;ret\n\t" 760 ); 761 extern void insn_hlt(); 762 extern void insn_invlpg(); 763 extern void insn_mwait(); 764 extern void insn_rdpmc(); 765 extern void insn_rdtsc(); 766 extern void insn_cr3_load(); 767 extern void insn_cr3_store(); 768 #ifdef __x86_64__ 769 extern void insn_cr8_load(); 770 extern void insn_cr8_store(); 771 #endif 772 extern void insn_monitor(); 773 extern void insn_pause(); 774 extern void insn_wbinvd(); 775 extern void insn_sgdt(); 776 extern void insn_lgdt(); 777 extern void insn_sidt(); 778 extern void insn_lidt(); 779 extern void insn_sldt(); 780 extern void insn_lldt(); 781 extern void insn_str(); 782 extern void insn_cpuid(); 783 extern void insn_invd(); 784 785 u32 cur_insn; 786 u64 cr3; 787 788 struct insn_table { 789 const char *name; 790 u32 flag; 791 void (*insn_func)(); 792 u32 type; 793 u32 reason; 794 ulong exit_qual; 795 u32 insn_info; 796 // Use FIELD_EXIT_QUAL and FIELD_INSN_INFO to define 797 // which field need to be tested, reason is always tested 798 u32 test_field; 799 }; 800 801 /* 802 * Add more test cases of instruction intercept here. Elements in this 803 * table is: 804 * name/control flag/insn function/type/exit reason/exit qulification/ 805 * instruction info/field to test 806 * The last field defines which fields (exit_qual and insn_info) need to be 807 * tested in exit handler. If set to 0, only "reason" is checked. 808 */ 809 static struct insn_table insn_table[] = { 810 // Flags for Primary Processor-Based VM-Execution Controls 811 {"HLT", CPU_HLT, insn_hlt, INSN_CPU0, 12, 0, 0, 0}, 812 {"INVLPG", CPU_INVLPG, insn_invlpg, INSN_CPU0, 14, 813 0x12345678, 0, FIELD_EXIT_QUAL}, 814 {"MWAIT", CPU_MWAIT, insn_mwait, INSN_CPU0, 36, 0, 0, 0}, 815 {"RDPMC", CPU_RDPMC, insn_rdpmc, INSN_CPU0, 15, 0, 0, 0}, 816 {"RDTSC", CPU_RDTSC, insn_rdtsc, INSN_CPU0, 16, 0, 0, 0}, 817 {"CR3 load", CPU_CR3_LOAD, insn_cr3_load, INSN_CPU0, 28, 0x3, 0, 818 FIELD_EXIT_QUAL}, 819 {"CR3 store", CPU_CR3_STORE, insn_cr3_store, INSN_CPU0, 28, 0x13, 0, 820 FIELD_EXIT_QUAL}, 821 #ifdef __x86_64__ 822 {"CR8 load", CPU_CR8_LOAD, insn_cr8_load, INSN_CPU0, 28, 0x8, 0, 823 FIELD_EXIT_QUAL}, 824 {"CR8 store", CPU_CR8_STORE, insn_cr8_store, INSN_CPU0, 28, 0x18, 0, 825 FIELD_EXIT_QUAL}, 826 #endif 827 {"MONITOR", CPU_MONITOR, insn_monitor, INSN_CPU0, 39, 0, 0, 0}, 828 {"PAUSE", CPU_PAUSE, insn_pause, INSN_CPU0, 40, 0, 0, 0}, 829 // Flags for Secondary Processor-Based VM-Execution Controls 830 {"WBINVD", CPU_WBINVD, insn_wbinvd, INSN_CPU1, 54, 0, 0, 0}, 831 {"DESC_TABLE (SGDT)", CPU_DESC_TABLE, insn_sgdt, INSN_CPU1, 46, 0, 0, 0}, 832 {"DESC_TABLE (LGDT)", CPU_DESC_TABLE, insn_lgdt, INSN_CPU1, 46, 0, 0, 0}, 833 {"DESC_TABLE (SIDT)", CPU_DESC_TABLE, insn_sidt, INSN_CPU1, 46, 0, 0, 0}, 834 {"DESC_TABLE (LIDT)", CPU_DESC_TABLE, insn_lidt, INSN_CPU1, 46, 0, 0, 0}, 835 {"DESC_TABLE (SLDT)", CPU_DESC_TABLE, insn_sldt, INSN_CPU1, 47, 0, 0, 0}, 836 {"DESC_TABLE (LLDT)", CPU_DESC_TABLE, insn_lldt, INSN_CPU1, 47, 0, 0, 0}, 837 {"DESC_TABLE (STR)", CPU_DESC_TABLE, insn_str, INSN_CPU1, 47, 0, 0, 0}, 838 /* LTR causes a #GP if done with a busy selector, so it is not tested. */ 839 // Instructions always trap 840 {"CPUID", 0, insn_cpuid, INSN_ALWAYS_TRAP, 10, 0, 0, 0}, 841 {"INVD", 0, insn_invd, INSN_ALWAYS_TRAP, 13, 0, 0, 0}, 842 // Instructions never trap 843 {NULL}, 844 }; 845 846 static int insn_intercept_init() 847 { 848 u32 ctrl_cpu; 849 850 ctrl_cpu = ctrl_cpu_rev[0].set | CPU_SECONDARY; 851 ctrl_cpu &= ctrl_cpu_rev[0].clr; 852 vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu); 853 vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu_rev[1].set); 854 cr3 = read_cr3(); 855 return VMX_TEST_START; 856 } 857 858 static void insn_intercept_main() 859 { 860 for (cur_insn = 0; insn_table[cur_insn].name != NULL; cur_insn++) { 861 vmx_set_test_stage(cur_insn * 2); 862 if ((insn_table[cur_insn].type == INSN_CPU0 && 863 !(ctrl_cpu_rev[0].clr & insn_table[cur_insn].flag)) || 864 (insn_table[cur_insn].type == INSN_CPU1 && 865 !(ctrl_cpu_rev[1].clr & insn_table[cur_insn].flag))) { 866 printf("\tCPU_CTRL%d.CPU_%s is not supported.\n", 867 insn_table[cur_insn].type - INSN_CPU0, 868 insn_table[cur_insn].name); 869 continue; 870 } 871 872 if ((insn_table[cur_insn].type == INSN_CPU0 && 873 !(ctrl_cpu_rev[0].set & insn_table[cur_insn].flag)) || 874 (insn_table[cur_insn].type == INSN_CPU1 && 875 !(ctrl_cpu_rev[1].set & insn_table[cur_insn].flag))) { 876 /* skip hlt, it stalls the guest and is tested below */ 877 if (insn_table[cur_insn].insn_func != insn_hlt) 878 insn_table[cur_insn].insn_func(); 879 report("execute %s", vmx_get_test_stage() == cur_insn * 2, 880 insn_table[cur_insn].name); 881 } else if (insn_table[cur_insn].type != INSN_ALWAYS_TRAP) 882 printf("\tCPU_CTRL%d.CPU_%s always traps.\n", 883 insn_table[cur_insn].type - INSN_CPU0, 884 insn_table[cur_insn].name); 885 886 vmcall(); 887 888 insn_table[cur_insn].insn_func(); 889 report("intercept %s", vmx_get_test_stage() == cur_insn * 2 + 1, 890 insn_table[cur_insn].name); 891 892 vmx_set_test_stage(cur_insn * 2 + 1); 893 vmcall(); 894 } 895 } 896 897 static int insn_intercept_exit_handler() 898 { 899 u64 guest_rip; 900 u32 reason; 901 ulong exit_qual; 902 u32 insn_len; 903 u32 insn_info; 904 bool pass; 905 906 guest_rip = vmcs_read(GUEST_RIP); 907 reason = vmcs_read(EXI_REASON) & 0xff; 908 exit_qual = vmcs_read(EXI_QUALIFICATION); 909 insn_len = vmcs_read(EXI_INST_LEN); 910 insn_info = vmcs_read(EXI_INST_INFO); 911 912 if (reason == VMX_VMCALL) { 913 u32 val = 0; 914 915 if (insn_table[cur_insn].type == INSN_CPU0) 916 val = vmcs_read(CPU_EXEC_CTRL0); 917 else if (insn_table[cur_insn].type == INSN_CPU1) 918 val = vmcs_read(CPU_EXEC_CTRL1); 919 920 if (vmx_get_test_stage() & 1) 921 val &= ~insn_table[cur_insn].flag; 922 else 923 val |= insn_table[cur_insn].flag; 924 925 if (insn_table[cur_insn].type == INSN_CPU0) 926 vmcs_write(CPU_EXEC_CTRL0, val | ctrl_cpu_rev[0].set); 927 else if (insn_table[cur_insn].type == INSN_CPU1) 928 vmcs_write(CPU_EXEC_CTRL1, val | ctrl_cpu_rev[1].set); 929 } else { 930 pass = (cur_insn * 2 == vmx_get_test_stage()) && 931 insn_table[cur_insn].reason == reason; 932 if (insn_table[cur_insn].test_field & FIELD_EXIT_QUAL && 933 insn_table[cur_insn].exit_qual != exit_qual) 934 pass = false; 935 if (insn_table[cur_insn].test_field & FIELD_INSN_INFO && 936 insn_table[cur_insn].insn_info != insn_info) 937 pass = false; 938 if (pass) 939 vmx_inc_test_stage(); 940 } 941 vmcs_write(GUEST_RIP, guest_rip + insn_len); 942 return VMX_TEST_RESUME; 943 } 944 945 946 /* Enables EPT and sets up the identity map. */ 947 static int setup_ept(bool enable_ad) 948 { 949 unsigned long end_of_memory; 950 u32 ctrl_cpu[2]; 951 952 if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY) || 953 !(ctrl_cpu_rev[1].clr & CPU_EPT)) { 954 printf("\tEPT is not supported"); 955 return 1; 956 } 957 958 959 if (!(ept_vpid.val & EPT_CAP_UC) && 960 !(ept_vpid.val & EPT_CAP_WB)) { 961 printf("\tEPT paging-structure memory type " 962 "UC&WB are not supported\n"); 963 return 1; 964 } 965 if (ept_vpid.val & EPT_CAP_UC) 966 eptp = EPT_MEM_TYPE_UC; 967 else 968 eptp = EPT_MEM_TYPE_WB; 969 if (!(ept_vpid.val & EPT_CAP_PWL4)) { 970 printf("\tPWL4 is not supported\n"); 971 return 1; 972 } 973 ctrl_cpu[0] = vmcs_read(CPU_EXEC_CTRL0); 974 ctrl_cpu[1] = vmcs_read(CPU_EXEC_CTRL1); 975 ctrl_cpu[0] = (ctrl_cpu[0] | CPU_SECONDARY) 976 & ctrl_cpu_rev[0].clr; 977 ctrl_cpu[1] = (ctrl_cpu[1] | CPU_EPT) 978 & ctrl_cpu_rev[1].clr; 979 vmcs_write(CPU_EXEC_CTRL0, ctrl_cpu[0]); 980 vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu[1]); 981 eptp |= (3 << EPTP_PG_WALK_LEN_SHIFT); 982 pml4 = alloc_page(); 983 memset(pml4, 0, PAGE_SIZE); 984 eptp |= virt_to_phys(pml4); 985 if (enable_ad) 986 eptp |= EPTP_AD_FLAG; 987 vmcs_write(EPTP, eptp); 988 end_of_memory = fwcfg_get_u64(FW_CFG_RAM_SIZE); 989 if (end_of_memory < (1ul << 32)) 990 end_of_memory = (1ul << 32); 991 /* Cannot use large EPT pages if we need to track EPT 992 * accessed/dirty bits at 4K granularity. 993 */ 994 setup_ept_range(pml4, 0, end_of_memory, 0, 995 !enable_ad && ept_2m_supported(), 996 EPT_WA | EPT_RA | EPT_EA); 997 return 0; 998 } 999 1000 static void ept_enable_ad_bits(void) 1001 { 1002 eptp |= EPTP_AD_FLAG; 1003 vmcs_write(EPTP, eptp); 1004 } 1005 1006 static void ept_disable_ad_bits(void) 1007 { 1008 eptp &= ~EPTP_AD_FLAG; 1009 vmcs_write(EPTP, eptp); 1010 } 1011 1012 static void ept_enable_ad_bits_or_skip_test(void) 1013 { 1014 if (!ept_ad_bits_supported()) 1015 test_skip("EPT AD bits not supported."); 1016 ept_enable_ad_bits(); 1017 } 1018 1019 static int apic_version; 1020 1021 static int ept_init_common(bool have_ad) 1022 { 1023 if (setup_ept(have_ad)) 1024 return VMX_TEST_EXIT; 1025 data_page1 = alloc_page(); 1026 data_page2 = alloc_page(); 1027 memset(data_page1, 0x0, PAGE_SIZE); 1028 memset(data_page2, 0x0, PAGE_SIZE); 1029 *((u32 *)data_page1) = MAGIC_VAL_1; 1030 *((u32 *)data_page2) = MAGIC_VAL_2; 1031 install_ept(pml4, (unsigned long)data_page1, (unsigned long)data_page2, 1032 EPT_RA | EPT_WA | EPT_EA); 1033 1034 apic_version = *((u32 *)0xfee00030UL); 1035 return VMX_TEST_START; 1036 } 1037 1038 static int ept_init() 1039 { 1040 return ept_init_common(false); 1041 } 1042 1043 static void ept_common() 1044 { 1045 vmx_set_test_stage(0); 1046 if (*((u32 *)data_page2) != MAGIC_VAL_1 || 1047 *((u32 *)data_page1) != MAGIC_VAL_1) 1048 report("EPT basic framework - read", 0); 1049 else { 1050 *((u32 *)data_page2) = MAGIC_VAL_3; 1051 vmcall(); 1052 if (vmx_get_test_stage() == 1) { 1053 if (*((u32 *)data_page1) == MAGIC_VAL_3 && 1054 *((u32 *)data_page2) == MAGIC_VAL_2) 1055 report("EPT basic framework", 1); 1056 else 1057 report("EPT basic framework - remap", 1); 1058 } 1059 } 1060 // Test EPT Misconfigurations 1061 vmx_set_test_stage(1); 1062 vmcall(); 1063 *((u32 *)data_page1) = MAGIC_VAL_1; 1064 if (vmx_get_test_stage() != 2) { 1065 report("EPT misconfigurations", 0); 1066 goto t1; 1067 } 1068 vmx_set_test_stage(2); 1069 vmcall(); 1070 *((u32 *)data_page1) = MAGIC_VAL_1; 1071 report("EPT misconfigurations", vmx_get_test_stage() == 3); 1072 t1: 1073 // Test EPT violation 1074 vmx_set_test_stage(3); 1075 vmcall(); 1076 *((u32 *)data_page1) = MAGIC_VAL_1; 1077 report("EPT violation - page permission", vmx_get_test_stage() == 4); 1078 // Violation caused by EPT paging structure 1079 vmx_set_test_stage(4); 1080 vmcall(); 1081 *((u32 *)data_page1) = MAGIC_VAL_2; 1082 report("EPT violation - paging structure", vmx_get_test_stage() == 5); 1083 } 1084 1085 static void ept_main() 1086 { 1087 ept_common(); 1088 1089 // Test EPT access to L1 MMIO 1090 vmx_set_test_stage(6); 1091 report("EPT - MMIO access", *((u32 *)0xfee00030UL) == apic_version); 1092 1093 // Test invalid operand for INVEPT 1094 vmcall(); 1095 report("EPT - unsupported INVEPT", vmx_get_test_stage() == 7); 1096 } 1097 1098 bool invept_test(int type, u64 eptp) 1099 { 1100 bool ret, supported; 1101 1102 supported = ept_vpid.val & (EPT_CAP_INVEPT_SINGLE >> INVEPT_SINGLE << type); 1103 ret = invept(type, eptp); 1104 1105 if (ret == !supported) 1106 return false; 1107 1108 if (!supported) 1109 printf("WARNING: unsupported invept passed!\n"); 1110 else 1111 printf("WARNING: invept failed!\n"); 1112 1113 return true; 1114 } 1115 1116 static int pml_exit_handler(void) 1117 { 1118 u16 index, count; 1119 ulong reason = vmcs_read(EXI_REASON) & 0xff; 1120 u64 *pmlbuf = pml_log; 1121 u64 guest_rip = vmcs_read(GUEST_RIP);; 1122 u64 guest_cr3 = vmcs_read(GUEST_CR3); 1123 u32 insn_len = vmcs_read(EXI_INST_LEN); 1124 1125 switch (reason) { 1126 case VMX_VMCALL: 1127 switch (vmx_get_test_stage()) { 1128 case 0: 1129 index = vmcs_read(GUEST_PML_INDEX); 1130 for (count = index + 1; count < PML_INDEX; count++) { 1131 if (pmlbuf[count] == (u64)data_page2) { 1132 vmx_inc_test_stage(); 1133 clear_ept_ad(pml4, guest_cr3, (unsigned long)data_page2); 1134 break; 1135 } 1136 } 1137 break; 1138 case 1: 1139 index = vmcs_read(GUEST_PML_INDEX); 1140 /* Keep clearing the dirty bit till a overflow */ 1141 clear_ept_ad(pml4, guest_cr3, (unsigned long)data_page2); 1142 break; 1143 default: 1144 printf("ERROR - unexpected stage, %d.\n", 1145 vmx_get_test_stage()); 1146 print_vmexit_info(); 1147 return VMX_TEST_VMEXIT; 1148 } 1149 vmcs_write(GUEST_RIP, guest_rip + insn_len); 1150 return VMX_TEST_RESUME; 1151 case VMX_PML_FULL: 1152 vmx_inc_test_stage(); 1153 vmcs_write(GUEST_PML_INDEX, PML_INDEX - 1); 1154 return VMX_TEST_RESUME; 1155 default: 1156 printf("Unknown exit reason, %ld\n", reason); 1157 print_vmexit_info(); 1158 } 1159 return VMX_TEST_VMEXIT; 1160 } 1161 1162 static int ept_exit_handler_common(bool have_ad) 1163 { 1164 u64 guest_rip; 1165 u64 guest_cr3; 1166 ulong reason; 1167 u32 insn_len; 1168 u32 exit_qual; 1169 static unsigned long data_page1_pte, data_page1_pte_pte; 1170 1171 guest_rip = vmcs_read(GUEST_RIP); 1172 guest_cr3 = vmcs_read(GUEST_CR3); 1173 reason = vmcs_read(EXI_REASON) & 0xff; 1174 insn_len = vmcs_read(EXI_INST_LEN); 1175 exit_qual = vmcs_read(EXI_QUALIFICATION); 1176 switch (reason) { 1177 case VMX_VMCALL: 1178 switch (vmx_get_test_stage()) { 1179 case 0: 1180 check_ept_ad(pml4, guest_cr3, 1181 (unsigned long)data_page1, 1182 have_ad ? EPT_ACCESS_FLAG : 0, 1183 have_ad ? EPT_ACCESS_FLAG | EPT_DIRTY_FLAG : 0); 1184 check_ept_ad(pml4, guest_cr3, 1185 (unsigned long)data_page2, 1186 have_ad ? EPT_ACCESS_FLAG | EPT_DIRTY_FLAG : 0, 1187 have_ad ? EPT_ACCESS_FLAG | EPT_DIRTY_FLAG : 0); 1188 clear_ept_ad(pml4, guest_cr3, (unsigned long)data_page1); 1189 clear_ept_ad(pml4, guest_cr3, (unsigned long)data_page2); 1190 if (have_ad) 1191 ept_sync(INVEPT_SINGLE, eptp);; 1192 if (*((u32 *)data_page1) == MAGIC_VAL_3 && 1193 *((u32 *)data_page2) == MAGIC_VAL_2) { 1194 vmx_inc_test_stage(); 1195 install_ept(pml4, (unsigned long)data_page2, 1196 (unsigned long)data_page2, 1197 EPT_RA | EPT_WA | EPT_EA); 1198 } else 1199 report("EPT basic framework - write", 0); 1200 break; 1201 case 1: 1202 install_ept(pml4, (unsigned long)data_page1, 1203 (unsigned long)data_page1, EPT_WA); 1204 ept_sync(INVEPT_SINGLE, eptp); 1205 break; 1206 case 2: 1207 install_ept(pml4, (unsigned long)data_page1, 1208 (unsigned long)data_page1, 1209 EPT_RA | EPT_WA | EPT_EA | 1210 (2 << EPT_MEM_TYPE_SHIFT)); 1211 ept_sync(INVEPT_SINGLE, eptp); 1212 break; 1213 case 3: 1214 clear_ept_ad(pml4, guest_cr3, (unsigned long)data_page1); 1215 data_page1_pte = get_ept_pte(pml4, 1216 (unsigned long)data_page1, 1); 1217 set_ept_pte(pml4, (unsigned long)data_page1, 1218 1, data_page1_pte & ~EPT_PRESENT); 1219 ept_sync(INVEPT_SINGLE, eptp); 1220 break; 1221 case 4: 1222 data_page1_pte = get_ept_pte(pml4, 1223 (unsigned long)data_page1, 2); 1224 data_page1_pte &= PAGE_MASK; 1225 data_page1_pte_pte = get_ept_pte(pml4, data_page1_pte, 2); 1226 set_ept_pte(pml4, data_page1_pte, 2, 1227 data_page1_pte_pte & ~EPT_PRESENT); 1228 ept_sync(INVEPT_SINGLE, eptp); 1229 break; 1230 case 6: 1231 if (!invept_test(0, eptp)) 1232 vmx_inc_test_stage(); 1233 break; 1234 // Should not reach here 1235 default: 1236 printf("ERROR - unexpected stage, %d.\n", 1237 vmx_get_test_stage()); 1238 print_vmexit_info(); 1239 return VMX_TEST_VMEXIT; 1240 } 1241 vmcs_write(GUEST_RIP, guest_rip + insn_len); 1242 return VMX_TEST_RESUME; 1243 case VMX_EPT_MISCONFIG: 1244 switch (vmx_get_test_stage()) { 1245 case 1: 1246 case 2: 1247 vmx_inc_test_stage(); 1248 install_ept(pml4, (unsigned long)data_page1, 1249 (unsigned long)data_page1, 1250 EPT_RA | EPT_WA | EPT_EA); 1251 ept_sync(INVEPT_SINGLE, eptp); 1252 break; 1253 // Should not reach here 1254 default: 1255 printf("ERROR - unexpected stage, %d.\n", 1256 vmx_get_test_stage()); 1257 print_vmexit_info(); 1258 return VMX_TEST_VMEXIT; 1259 } 1260 return VMX_TEST_RESUME; 1261 case VMX_EPT_VIOLATION: 1262 switch(vmx_get_test_stage()) { 1263 case 3: 1264 check_ept_ad(pml4, guest_cr3, (unsigned long)data_page1, 0, 1265 have_ad ? EPT_ACCESS_FLAG | EPT_DIRTY_FLAG : 0); 1266 clear_ept_ad(pml4, guest_cr3, (unsigned long)data_page1); 1267 if (exit_qual == (EPT_VLT_WR | EPT_VLT_LADDR_VLD | 1268 EPT_VLT_PADDR)) 1269 vmx_inc_test_stage(); 1270 set_ept_pte(pml4, (unsigned long)data_page1, 1271 1, data_page1_pte | (EPT_PRESENT)); 1272 ept_sync(INVEPT_SINGLE, eptp); 1273 break; 1274 case 4: 1275 check_ept_ad(pml4, guest_cr3, (unsigned long)data_page1, 0, 1276 have_ad ? EPT_ACCESS_FLAG | EPT_DIRTY_FLAG : 0); 1277 clear_ept_ad(pml4, guest_cr3, (unsigned long)data_page1); 1278 if (exit_qual == (EPT_VLT_RD | 1279 (have_ad ? EPT_VLT_WR : 0) | 1280 EPT_VLT_LADDR_VLD)) 1281 vmx_inc_test_stage(); 1282 set_ept_pte(pml4, data_page1_pte, 2, 1283 data_page1_pte_pte | (EPT_PRESENT)); 1284 ept_sync(INVEPT_SINGLE, eptp); 1285 break; 1286 default: 1287 // Should not reach here 1288 printf("ERROR : unexpected stage, %d\n", 1289 vmx_get_test_stage()); 1290 print_vmexit_info(); 1291 return VMX_TEST_VMEXIT; 1292 } 1293 return VMX_TEST_RESUME; 1294 default: 1295 printf("Unknown exit reason, %ld\n", reason); 1296 print_vmexit_info(); 1297 } 1298 return VMX_TEST_VMEXIT; 1299 } 1300 1301 static int ept_exit_handler() 1302 { 1303 return ept_exit_handler_common(false); 1304 } 1305 1306 static int eptad_init() 1307 { 1308 int r = ept_init_common(true); 1309 1310 if (r == VMX_TEST_EXIT) 1311 return r; 1312 1313 if ((rdmsr(MSR_IA32_VMX_EPT_VPID_CAP) & EPT_CAP_AD_FLAG) == 0) { 1314 printf("\tEPT A/D bits are not supported"); 1315 return VMX_TEST_EXIT; 1316 } 1317 1318 return r; 1319 } 1320 1321 static int pml_init() 1322 { 1323 u32 ctrl_cpu; 1324 int r = eptad_init(); 1325 1326 if (r == VMX_TEST_EXIT) 1327 return r; 1328 1329 if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY) || 1330 !(ctrl_cpu_rev[1].clr & CPU_PML)) { 1331 printf("\tPML is not supported"); 1332 return VMX_TEST_EXIT; 1333 } 1334 1335 pml_log = alloc_page(); 1336 memset(pml_log, 0x0, PAGE_SIZE); 1337 vmcs_write(PMLADDR, (u64)pml_log); 1338 vmcs_write(GUEST_PML_INDEX, PML_INDEX - 1); 1339 1340 ctrl_cpu = vmcs_read(CPU_EXEC_CTRL1) | CPU_PML; 1341 vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu); 1342 1343 return VMX_TEST_START; 1344 } 1345 1346 static void pml_main() 1347 { 1348 int count = 0; 1349 1350 vmx_set_test_stage(0); 1351 *((u32 *)data_page2) = 0x1; 1352 vmcall(); 1353 report("PML - Dirty GPA Logging", vmx_get_test_stage() == 1); 1354 1355 while (vmx_get_test_stage() == 1) { 1356 *((u32 *)data_page2) = 0x1; 1357 if (count++ > PML_INDEX) 1358 break; 1359 vmcall(); 1360 } 1361 report("PML Full Event", vmx_get_test_stage() == 2); 1362 } 1363 1364 static void eptad_main() 1365 { 1366 ept_common(); 1367 } 1368 1369 static int eptad_exit_handler() 1370 { 1371 return ept_exit_handler_common(true); 1372 } 1373 1374 bool invvpid_test(int type, u16 vpid) 1375 { 1376 bool ret, supported; 1377 1378 supported = ept_vpid.val & (VPID_CAP_INVVPID_SINGLE >> INVVPID_SINGLE << type); 1379 ret = invvpid(type, vpid, 0); 1380 1381 if (ret == !supported) 1382 return false; 1383 1384 if (!supported) 1385 printf("WARNING: unsupported invvpid passed!\n"); 1386 else 1387 printf("WARNING: invvpid failed!\n"); 1388 1389 return true; 1390 } 1391 1392 static int vpid_init() 1393 { 1394 u32 ctrl_cpu1; 1395 1396 if (!(ctrl_cpu_rev[0].clr & CPU_SECONDARY) || 1397 !(ctrl_cpu_rev[1].clr & CPU_VPID)) { 1398 printf("\tVPID is not supported"); 1399 return VMX_TEST_EXIT; 1400 } 1401 1402 ctrl_cpu1 = vmcs_read(CPU_EXEC_CTRL1); 1403 ctrl_cpu1 |= CPU_VPID; 1404 vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu1); 1405 return VMX_TEST_START; 1406 } 1407 1408 static void vpid_main() 1409 { 1410 vmx_set_test_stage(0); 1411 vmcall(); 1412 report("INVVPID SINGLE ADDRESS", vmx_get_test_stage() == 1); 1413 vmx_set_test_stage(2); 1414 vmcall(); 1415 report("INVVPID SINGLE", vmx_get_test_stage() == 3); 1416 vmx_set_test_stage(4); 1417 vmcall(); 1418 report("INVVPID ALL", vmx_get_test_stage() == 5); 1419 } 1420 1421 static int vpid_exit_handler() 1422 { 1423 u64 guest_rip; 1424 ulong reason; 1425 u32 insn_len; 1426 1427 guest_rip = vmcs_read(GUEST_RIP); 1428 reason = vmcs_read(EXI_REASON) & 0xff; 1429 insn_len = vmcs_read(EXI_INST_LEN); 1430 1431 switch (reason) { 1432 case VMX_VMCALL: 1433 switch(vmx_get_test_stage()) { 1434 case 0: 1435 if (!invvpid_test(INVVPID_SINGLE_ADDRESS, 1)) 1436 vmx_inc_test_stage(); 1437 break; 1438 case 2: 1439 if (!invvpid_test(INVVPID_SINGLE, 1)) 1440 vmx_inc_test_stage(); 1441 break; 1442 case 4: 1443 if (!invvpid_test(INVVPID_ALL, 1)) 1444 vmx_inc_test_stage(); 1445 break; 1446 default: 1447 printf("ERROR: unexpected stage, %d\n", 1448 vmx_get_test_stage()); 1449 print_vmexit_info(); 1450 return VMX_TEST_VMEXIT; 1451 } 1452 vmcs_write(GUEST_RIP, guest_rip + insn_len); 1453 return VMX_TEST_RESUME; 1454 default: 1455 printf("Unknown exit reason, %ld\n", reason); 1456 print_vmexit_info(); 1457 } 1458 return VMX_TEST_VMEXIT; 1459 } 1460 1461 #define TIMER_VECTOR 222 1462 1463 static volatile bool timer_fired; 1464 1465 static void timer_isr(isr_regs_t *regs) 1466 { 1467 timer_fired = true; 1468 apic_write(APIC_EOI, 0); 1469 } 1470 1471 static int interrupt_init(struct vmcs *vmcs) 1472 { 1473 msr_bmp_init(); 1474 vmcs_write(PIN_CONTROLS, vmcs_read(PIN_CONTROLS) & ~PIN_EXTINT); 1475 handle_irq(TIMER_VECTOR, timer_isr); 1476 return VMX_TEST_START; 1477 } 1478 1479 static void interrupt_main(void) 1480 { 1481 long long start, loops; 1482 1483 vmx_set_test_stage(0); 1484 1485 apic_write(APIC_LVTT, TIMER_VECTOR); 1486 irq_enable(); 1487 1488 apic_write(APIC_TMICT, 1); 1489 for (loops = 0; loops < 10000000 && !timer_fired; loops++) 1490 asm volatile ("nop"); 1491 report("direct interrupt while running guest", timer_fired); 1492 1493 apic_write(APIC_TMICT, 0); 1494 irq_disable(); 1495 vmcall(); 1496 timer_fired = false; 1497 apic_write(APIC_TMICT, 1); 1498 for (loops = 0; loops < 10000000 && !timer_fired; loops++) 1499 asm volatile ("nop"); 1500 report("intercepted interrupt while running guest", timer_fired); 1501 1502 irq_enable(); 1503 apic_write(APIC_TMICT, 0); 1504 irq_disable(); 1505 vmcall(); 1506 timer_fired = false; 1507 start = rdtsc(); 1508 apic_write(APIC_TMICT, 1000000); 1509 1510 asm volatile ("sti; hlt"); 1511 1512 report("direct interrupt + hlt", 1513 rdtsc() - start > 1000000 && timer_fired); 1514 1515 apic_write(APIC_TMICT, 0); 1516 irq_disable(); 1517 vmcall(); 1518 timer_fired = false; 1519 start = rdtsc(); 1520 apic_write(APIC_TMICT, 1000000); 1521 1522 asm volatile ("sti; hlt"); 1523 1524 report("intercepted interrupt + hlt", 1525 rdtsc() - start > 10000 && timer_fired); 1526 1527 apic_write(APIC_TMICT, 0); 1528 irq_disable(); 1529 vmcall(); 1530 timer_fired = false; 1531 start = rdtsc(); 1532 apic_write(APIC_TMICT, 1000000); 1533 1534 irq_enable(); 1535 asm volatile ("nop"); 1536 vmcall(); 1537 1538 report("direct interrupt + activity state hlt", 1539 rdtsc() - start > 10000 && timer_fired); 1540 1541 apic_write(APIC_TMICT, 0); 1542 irq_disable(); 1543 vmcall(); 1544 timer_fired = false; 1545 start = rdtsc(); 1546 apic_write(APIC_TMICT, 1000000); 1547 1548 irq_enable(); 1549 asm volatile ("nop"); 1550 vmcall(); 1551 1552 report("intercepted interrupt + activity state hlt", 1553 rdtsc() - start > 10000 && timer_fired); 1554 1555 apic_write(APIC_TMICT, 0); 1556 irq_disable(); 1557 vmx_set_test_stage(7); 1558 vmcall(); 1559 timer_fired = false; 1560 apic_write(APIC_TMICT, 1); 1561 for (loops = 0; loops < 10000000 && !timer_fired; loops++) 1562 asm volatile ("nop"); 1563 report("running a guest with interrupt acknowledgement set", timer_fired); 1564 } 1565 1566 static int interrupt_exit_handler(void) 1567 { 1568 u64 guest_rip = vmcs_read(GUEST_RIP); 1569 ulong reason = vmcs_read(EXI_REASON) & 0xff; 1570 u32 insn_len = vmcs_read(EXI_INST_LEN); 1571 1572 switch (reason) { 1573 case VMX_VMCALL: 1574 switch (vmx_get_test_stage()) { 1575 case 0: 1576 case 2: 1577 case 5: 1578 vmcs_write(PIN_CONTROLS, 1579 vmcs_read(PIN_CONTROLS) | PIN_EXTINT); 1580 break; 1581 case 7: 1582 vmcs_write(EXI_CONTROLS, vmcs_read(EXI_CONTROLS) | EXI_INTA); 1583 vmcs_write(PIN_CONTROLS, 1584 vmcs_read(PIN_CONTROLS) | PIN_EXTINT); 1585 break; 1586 case 1: 1587 case 3: 1588 vmcs_write(PIN_CONTROLS, 1589 vmcs_read(PIN_CONTROLS) & ~PIN_EXTINT); 1590 break; 1591 case 4: 1592 case 6: 1593 vmcs_write(GUEST_ACTV_STATE, ACTV_HLT); 1594 break; 1595 } 1596 vmx_inc_test_stage(); 1597 vmcs_write(GUEST_RIP, guest_rip + insn_len); 1598 return VMX_TEST_RESUME; 1599 case VMX_EXTINT: 1600 if (vmcs_read(EXI_CONTROLS) & EXI_INTA) { 1601 int vector = vmcs_read(EXI_INTR_INFO) & 0xff; 1602 handle_external_interrupt(vector); 1603 } else { 1604 irq_enable(); 1605 asm volatile ("nop"); 1606 irq_disable(); 1607 } 1608 if (vmx_get_test_stage() >= 2) 1609 vmcs_write(GUEST_ACTV_STATE, ACTV_ACTIVE); 1610 return VMX_TEST_RESUME; 1611 default: 1612 printf("Unknown exit reason, %ld\n", reason); 1613 print_vmexit_info(); 1614 } 1615 1616 return VMX_TEST_VMEXIT; 1617 } 1618 1619 static int dbgctls_init(struct vmcs *vmcs) 1620 { 1621 u64 dr7 = 0x402; 1622 u64 zero = 0; 1623 1624 msr_bmp_init(); 1625 asm volatile( 1626 "mov %0,%%dr0\n\t" 1627 "mov %0,%%dr1\n\t" 1628 "mov %0,%%dr2\n\t" 1629 "mov %1,%%dr7\n\t" 1630 : : "r" (zero), "r" (dr7)); 1631 wrmsr(MSR_IA32_DEBUGCTLMSR, 0x1); 1632 vmcs_write(GUEST_DR7, 0x404); 1633 vmcs_write(GUEST_DEBUGCTL, 0x2); 1634 1635 vmcs_write(ENT_CONTROLS, vmcs_read(ENT_CONTROLS) | ENT_LOAD_DBGCTLS); 1636 vmcs_write(EXI_CONTROLS, vmcs_read(EXI_CONTROLS) | EXI_SAVE_DBGCTLS); 1637 1638 return VMX_TEST_START; 1639 } 1640 1641 static void dbgctls_main(void) 1642 { 1643 u64 dr7, debugctl; 1644 1645 asm volatile("mov %%dr7,%0" : "=r" (dr7)); 1646 debugctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 1647 /* Commented out: KVM does not support DEBUGCTL so far */ 1648 (void)debugctl; 1649 report("Load debug controls", dr7 == 0x404 /* && debugctl == 0x2 */); 1650 1651 dr7 = 0x408; 1652 asm volatile("mov %0,%%dr7" : : "r" (dr7)); 1653 wrmsr(MSR_IA32_DEBUGCTLMSR, 0x3); 1654 1655 vmx_set_test_stage(0); 1656 vmcall(); 1657 report("Save debug controls", vmx_get_test_stage() == 1); 1658 1659 if (ctrl_enter_rev.set & ENT_LOAD_DBGCTLS || 1660 ctrl_exit_rev.set & EXI_SAVE_DBGCTLS) { 1661 printf("\tDebug controls are always loaded/saved\n"); 1662 return; 1663 } 1664 vmx_set_test_stage(2); 1665 vmcall(); 1666 1667 asm volatile("mov %%dr7,%0" : "=r" (dr7)); 1668 debugctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 1669 /* Commented out: KVM does not support DEBUGCTL so far */ 1670 (void)debugctl; 1671 report("Guest=host debug controls", dr7 == 0x402 /* && debugctl == 0x1 */); 1672 1673 dr7 = 0x408; 1674 asm volatile("mov %0,%%dr7" : : "r" (dr7)); 1675 wrmsr(MSR_IA32_DEBUGCTLMSR, 0x3); 1676 1677 vmx_set_test_stage(3); 1678 vmcall(); 1679 report("Don't save debug controls", vmx_get_test_stage() == 4); 1680 } 1681 1682 static int dbgctls_exit_handler(void) 1683 { 1684 unsigned int reason = vmcs_read(EXI_REASON) & 0xff; 1685 u32 insn_len = vmcs_read(EXI_INST_LEN); 1686 u64 guest_rip = vmcs_read(GUEST_RIP); 1687 u64 dr7, debugctl; 1688 1689 asm volatile("mov %%dr7,%0" : "=r" (dr7)); 1690 debugctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 1691 1692 switch (reason) { 1693 case VMX_VMCALL: 1694 switch (vmx_get_test_stage()) { 1695 case 0: 1696 if (dr7 == 0x400 && debugctl == 0 && 1697 vmcs_read(GUEST_DR7) == 0x408 /* && 1698 Commented out: KVM does not support DEBUGCTL so far 1699 vmcs_read(GUEST_DEBUGCTL) == 0x3 */) 1700 vmx_inc_test_stage(); 1701 break; 1702 case 2: 1703 dr7 = 0x402; 1704 asm volatile("mov %0,%%dr7" : : "r" (dr7)); 1705 wrmsr(MSR_IA32_DEBUGCTLMSR, 0x1); 1706 vmcs_write(GUEST_DR7, 0x404); 1707 vmcs_write(GUEST_DEBUGCTL, 0x2); 1708 1709 vmcs_write(ENT_CONTROLS, 1710 vmcs_read(ENT_CONTROLS) & ~ENT_LOAD_DBGCTLS); 1711 vmcs_write(EXI_CONTROLS, 1712 vmcs_read(EXI_CONTROLS) & ~EXI_SAVE_DBGCTLS); 1713 break; 1714 case 3: 1715 if (dr7 == 0x400 && debugctl == 0 && 1716 vmcs_read(GUEST_DR7) == 0x404 /* && 1717 Commented out: KVM does not support DEBUGCTL so far 1718 vmcs_read(GUEST_DEBUGCTL) == 0x2 */) 1719 vmx_inc_test_stage(); 1720 break; 1721 } 1722 vmcs_write(GUEST_RIP, guest_rip + insn_len); 1723 return VMX_TEST_RESUME; 1724 default: 1725 printf("Unknown exit reason, %d\n", reason); 1726 print_vmexit_info(); 1727 } 1728 return VMX_TEST_VMEXIT; 1729 } 1730 1731 struct vmx_msr_entry { 1732 u32 index; 1733 u32 reserved; 1734 u64 value; 1735 } __attribute__((packed)); 1736 1737 #define MSR_MAGIC 0x31415926 1738 struct vmx_msr_entry *exit_msr_store, *entry_msr_load, *exit_msr_load; 1739 1740 static int msr_switch_init(struct vmcs *vmcs) 1741 { 1742 msr_bmp_init(); 1743 exit_msr_store = alloc_page(); 1744 exit_msr_load = alloc_page(); 1745 entry_msr_load = alloc_page(); 1746 memset(exit_msr_store, 0, PAGE_SIZE); 1747 memset(exit_msr_load, 0, PAGE_SIZE); 1748 memset(entry_msr_load, 0, PAGE_SIZE); 1749 entry_msr_load[0].index = MSR_KERNEL_GS_BASE; 1750 entry_msr_load[0].value = MSR_MAGIC; 1751 1752 vmx_set_test_stage(1); 1753 vmcs_write(ENT_MSR_LD_CNT, 1); 1754 vmcs_write(ENTER_MSR_LD_ADDR, (u64)entry_msr_load); 1755 vmcs_write(EXI_MSR_ST_CNT, 1); 1756 vmcs_write(EXIT_MSR_ST_ADDR, (u64)exit_msr_store); 1757 vmcs_write(EXI_MSR_LD_CNT, 1); 1758 vmcs_write(EXIT_MSR_LD_ADDR, (u64)exit_msr_load); 1759 return VMX_TEST_START; 1760 } 1761 1762 static void msr_switch_main() 1763 { 1764 if (vmx_get_test_stage() == 1) { 1765 report("VM entry MSR load", 1766 rdmsr(MSR_KERNEL_GS_BASE) == MSR_MAGIC); 1767 vmx_set_test_stage(2); 1768 wrmsr(MSR_KERNEL_GS_BASE, MSR_MAGIC + 1); 1769 exit_msr_store[0].index = MSR_KERNEL_GS_BASE; 1770 exit_msr_load[0].index = MSR_KERNEL_GS_BASE; 1771 exit_msr_load[0].value = MSR_MAGIC + 2; 1772 } 1773 vmcall(); 1774 } 1775 1776 static int msr_switch_exit_handler() 1777 { 1778 ulong reason; 1779 1780 reason = vmcs_read(EXI_REASON); 1781 if (reason == VMX_VMCALL && vmx_get_test_stage() == 2) { 1782 report("VM exit MSR store", 1783 exit_msr_store[0].value == MSR_MAGIC + 1); 1784 report("VM exit MSR load", 1785 rdmsr(MSR_KERNEL_GS_BASE) == MSR_MAGIC + 2); 1786 vmx_set_test_stage(3); 1787 entry_msr_load[0].index = MSR_FS_BASE; 1788 return VMX_TEST_RESUME; 1789 } 1790 printf("ERROR %s: unexpected stage=%u or reason=%lu\n", 1791 __func__, vmx_get_test_stage(), reason); 1792 return VMX_TEST_EXIT; 1793 } 1794 1795 static int msr_switch_entry_failure(struct vmentry_failure *failure) 1796 { 1797 ulong reason; 1798 1799 if (failure->early) { 1800 printf("ERROR %s: early exit\n", __func__); 1801 return VMX_TEST_EXIT; 1802 } 1803 1804 reason = vmcs_read(EXI_REASON); 1805 if (reason == (VMX_ENTRY_FAILURE | VMX_FAIL_MSR) && 1806 vmx_get_test_stage() == 3) { 1807 report("VM entry MSR load: try to load FS_BASE", 1808 vmcs_read(EXI_QUALIFICATION) == 1); 1809 return VMX_TEST_VMEXIT; 1810 } 1811 printf("ERROR %s: unexpected stage=%u or reason=%lu\n", 1812 __func__, vmx_get_test_stage(), reason); 1813 return VMX_TEST_EXIT; 1814 } 1815 1816 static int vmmcall_init(struct vmcs *vmcs ) 1817 { 1818 vmcs_write(EXC_BITMAP, 1 << UD_VECTOR); 1819 return VMX_TEST_START; 1820 } 1821 1822 static void vmmcall_main(void) 1823 { 1824 asm volatile( 1825 "mov $0xABCD, %%rax\n\t" 1826 "vmmcall\n\t" 1827 ::: "rax"); 1828 1829 report("VMMCALL", 0); 1830 } 1831 1832 static int vmmcall_exit_handler() 1833 { 1834 ulong reason; 1835 1836 reason = vmcs_read(EXI_REASON); 1837 switch (reason) { 1838 case VMX_VMCALL: 1839 printf("here\n"); 1840 report("VMMCALL triggers #UD", 0); 1841 break; 1842 case VMX_EXC_NMI: 1843 report("VMMCALL triggers #UD", 1844 (vmcs_read(EXI_INTR_INFO) & 0xff) == UD_VECTOR); 1845 break; 1846 default: 1847 printf("Unknown exit reason, %ld\n", reason); 1848 print_vmexit_info(); 1849 } 1850 1851 return VMX_TEST_VMEXIT; 1852 } 1853 1854 static int disable_rdtscp_init(struct vmcs *vmcs) 1855 { 1856 u32 ctrl_cpu1; 1857 1858 if (ctrl_cpu_rev[0].clr & CPU_SECONDARY) { 1859 ctrl_cpu1 = vmcs_read(CPU_EXEC_CTRL1); 1860 ctrl_cpu1 &= ~CPU_RDTSCP; 1861 vmcs_write(CPU_EXEC_CTRL1, ctrl_cpu1); 1862 } 1863 1864 return VMX_TEST_START; 1865 } 1866 1867 static void disable_rdtscp_ud_handler(struct ex_regs *regs) 1868 { 1869 switch (vmx_get_test_stage()) { 1870 case 0: 1871 report("RDTSCP triggers #UD", true); 1872 vmx_inc_test_stage(); 1873 regs->rip += 3; 1874 break; 1875 case 2: 1876 report("RDPID triggers #UD", true); 1877 vmx_inc_test_stage(); 1878 regs->rip += 4; 1879 break; 1880 } 1881 return; 1882 1883 } 1884 1885 static void disable_rdtscp_main(void) 1886 { 1887 /* Test that #UD is properly injected in L2. */ 1888 handle_exception(UD_VECTOR, disable_rdtscp_ud_handler); 1889 1890 vmx_set_test_stage(0); 1891 asm volatile("rdtscp" : : : "eax", "ecx", "edx"); 1892 vmcall(); 1893 asm volatile(".byte 0xf3, 0x0f, 0xc7, 0xf8" : : : "eax"); 1894 vmcall(); 1895 } 1896 1897 static int disable_rdtscp_exit_handler(void) 1898 { 1899 unsigned int reason = vmcs_read(EXI_REASON) & 0xff; 1900 1901 switch (reason) { 1902 case VMX_VMCALL: 1903 switch (vmx_get_test_stage()) { 1904 case 0: 1905 report("RDTSCP triggers #UD", false); 1906 vmx_inc_test_stage(); 1907 /* fallthrough */ 1908 case 1: 1909 vmx_inc_test_stage(); 1910 vmcs_write(GUEST_RIP, vmcs_read(GUEST_RIP) + 3); 1911 return VMX_TEST_RESUME; 1912 case 2: 1913 report("RDPID triggers #UD", false); 1914 break; 1915 } 1916 break; 1917 1918 default: 1919 printf("Unknown exit reason, %d\n", reason); 1920 print_vmexit_info(); 1921 } 1922 return VMX_TEST_VMEXIT; 1923 } 1924 1925 int int3_init() 1926 { 1927 vmcs_write(EXC_BITMAP, ~0u); 1928 return VMX_TEST_START; 1929 } 1930 1931 void int3_guest_main() 1932 { 1933 asm volatile ("int3"); 1934 } 1935 1936 int int3_exit_handler() 1937 { 1938 u32 reason = vmcs_read(EXI_REASON); 1939 u32 intr_info = vmcs_read(EXI_INTR_INFO); 1940 1941 report("L1 intercepts #BP", reason == VMX_EXC_NMI && 1942 (intr_info & INTR_INFO_VALID_MASK) && 1943 (intr_info & INTR_INFO_VECTOR_MASK) == BP_VECTOR && 1944 ((intr_info & INTR_INFO_INTR_TYPE_MASK) >> 1945 INTR_INFO_INTR_TYPE_SHIFT) == VMX_INTR_TYPE_SOFT_EXCEPTION); 1946 1947 return VMX_TEST_VMEXIT; 1948 } 1949 1950 int into_init() 1951 { 1952 vmcs_write(EXC_BITMAP, ~0u); 1953 return VMX_TEST_START; 1954 } 1955 1956 void into_guest_main() 1957 { 1958 struct far_pointer32 fp = { 1959 .offset = (uintptr_t)&&into, 1960 .selector = KERNEL_CS32, 1961 }; 1962 register uintptr_t rsp asm("rsp"); 1963 1964 if (fp.offset != (uintptr_t)&&into) { 1965 printf("Code address too high.\n"); 1966 return; 1967 } 1968 if ((u32)rsp != rsp) { 1969 printf("Stack address too high.\n"); 1970 return; 1971 } 1972 1973 asm goto ("lcall *%0" : : "m" (fp) : "rax" : into); 1974 return; 1975 into: 1976 asm volatile (".code32;" 1977 "movl $0x7fffffff, %eax;" 1978 "addl %eax, %eax;" 1979 "into;" 1980 "lret;" 1981 ".code64"); 1982 __builtin_unreachable(); 1983 } 1984 1985 int into_exit_handler() 1986 { 1987 u32 reason = vmcs_read(EXI_REASON); 1988 u32 intr_info = vmcs_read(EXI_INTR_INFO); 1989 1990 report("L1 intercepts #OF", reason == VMX_EXC_NMI && 1991 (intr_info & INTR_INFO_VALID_MASK) && 1992 (intr_info & INTR_INFO_VECTOR_MASK) == OF_VECTOR && 1993 ((intr_info & INTR_INFO_INTR_TYPE_MASK) >> 1994 INTR_INFO_INTR_TYPE_SHIFT) == VMX_INTR_TYPE_SOFT_EXCEPTION); 1995 1996 return VMX_TEST_VMEXIT; 1997 } 1998 1999 static void exit_monitor_from_l2_main(void) 2000 { 2001 printf("Calling exit(0) from l2...\n"); 2002 exit(0); 2003 } 2004 2005 static int exit_monitor_from_l2_handler(void) 2006 { 2007 report("The guest should have killed the VMM", false); 2008 return VMX_TEST_EXIT; 2009 } 2010 2011 static void assert_exit_reason(u64 expected) 2012 { 2013 u64 actual = vmcs_read(EXI_REASON); 2014 2015 TEST_ASSERT_EQ_MSG(expected, actual, "Expected %s, got %s.", 2016 exit_reason_description(expected), 2017 exit_reason_description(actual)); 2018 } 2019 2020 static void skip_exit_vmcall() 2021 { 2022 u64 guest_rip = vmcs_read(GUEST_RIP); 2023 u32 insn_len = vmcs_read(EXI_INST_LEN); 2024 2025 assert_exit_reason(VMX_VMCALL); 2026 vmcs_write(GUEST_RIP, guest_rip + insn_len); 2027 } 2028 2029 static void v2_null_test_guest(void) 2030 { 2031 } 2032 2033 static void v2_null_test(void) 2034 { 2035 test_set_guest(v2_null_test_guest); 2036 enter_guest(); 2037 report(__func__, 1); 2038 } 2039 2040 static void v2_multiple_entries_test_guest(void) 2041 { 2042 vmx_set_test_stage(1); 2043 vmcall(); 2044 vmx_set_test_stage(2); 2045 } 2046 2047 static void v2_multiple_entries_test(void) 2048 { 2049 test_set_guest(v2_multiple_entries_test_guest); 2050 enter_guest(); 2051 TEST_ASSERT_EQ(vmx_get_test_stage(), 1); 2052 skip_exit_vmcall(); 2053 enter_guest(); 2054 TEST_ASSERT_EQ(vmx_get_test_stage(), 2); 2055 report(__func__, 1); 2056 } 2057 2058 static int fixture_test_data = 1; 2059 2060 static void fixture_test_teardown(void *data) 2061 { 2062 *((int *) data) = 1; 2063 } 2064 2065 static void fixture_test_guest(void) 2066 { 2067 fixture_test_data++; 2068 } 2069 2070 2071 static void fixture_test_setup(void) 2072 { 2073 TEST_ASSERT_EQ_MSG(1, fixture_test_data, 2074 "fixture_test_teardown didn't run?!"); 2075 fixture_test_data = 2; 2076 test_add_teardown(fixture_test_teardown, &fixture_test_data); 2077 test_set_guest(fixture_test_guest); 2078 } 2079 2080 static void fixture_test_case1(void) 2081 { 2082 fixture_test_setup(); 2083 TEST_ASSERT_EQ(2, fixture_test_data); 2084 enter_guest(); 2085 TEST_ASSERT_EQ(3, fixture_test_data); 2086 report(__func__, 1); 2087 } 2088 2089 static void fixture_test_case2(void) 2090 { 2091 fixture_test_setup(); 2092 TEST_ASSERT_EQ(2, fixture_test_data); 2093 enter_guest(); 2094 TEST_ASSERT_EQ(3, fixture_test_data); 2095 report(__func__, 1); 2096 } 2097 2098 enum ept_access_op { 2099 OP_READ, 2100 OP_WRITE, 2101 OP_EXEC, 2102 OP_FLUSH_TLB, 2103 OP_EXIT, 2104 }; 2105 2106 static struct ept_access_test_data { 2107 unsigned long gpa; 2108 unsigned long *gva; 2109 unsigned long hpa; 2110 unsigned long *hva; 2111 enum ept_access_op op; 2112 } ept_access_test_data; 2113 2114 extern unsigned char ret42_start; 2115 extern unsigned char ret42_end; 2116 2117 /* Returns 42. */ 2118 asm( 2119 ".align 64\n" 2120 "ret42_start:\n" 2121 "mov $42, %eax\n" 2122 "ret\n" 2123 "ret42_end:\n" 2124 ); 2125 2126 static void 2127 diagnose_ept_violation_qual(u64 expected, u64 actual) 2128 { 2129 2130 #define DIAGNOSE(flag) \ 2131 do { \ 2132 if ((expected & flag) != (actual & flag)) \ 2133 printf(#flag " %sexpected\n", \ 2134 (expected & flag) ? "" : "un"); \ 2135 } while (0) 2136 2137 DIAGNOSE(EPT_VLT_RD); 2138 DIAGNOSE(EPT_VLT_WR); 2139 DIAGNOSE(EPT_VLT_FETCH); 2140 DIAGNOSE(EPT_VLT_PERM_RD); 2141 DIAGNOSE(EPT_VLT_PERM_WR); 2142 DIAGNOSE(EPT_VLT_PERM_EX); 2143 DIAGNOSE(EPT_VLT_LADDR_VLD); 2144 DIAGNOSE(EPT_VLT_PADDR); 2145 2146 #undef DIAGNOSE 2147 } 2148 2149 static void do_ept_access_op(enum ept_access_op op) 2150 { 2151 ept_access_test_data.op = op; 2152 enter_guest(); 2153 } 2154 2155 /* 2156 * Force the guest to flush its TLB (i.e., flush gva -> gpa mappings). Only 2157 * needed by tests that modify guest PTEs. 2158 */ 2159 static void ept_access_test_guest_flush_tlb(void) 2160 { 2161 do_ept_access_op(OP_FLUSH_TLB); 2162 skip_exit_vmcall(); 2163 } 2164 2165 /* 2166 * Modifies the EPT entry at @level in the mapping of @gpa. First clears the 2167 * bits in @clear then sets the bits in @set. @mkhuge transforms the entry into 2168 * a huge page. 2169 */ 2170 static unsigned long ept_twiddle(unsigned long gpa, bool mkhuge, int level, 2171 unsigned long clear, unsigned long set) 2172 { 2173 struct ept_access_test_data *data = &ept_access_test_data; 2174 unsigned long orig_pte; 2175 unsigned long pte; 2176 2177 /* Screw with the mapping at the requested level. */ 2178 orig_pte = get_ept_pte(pml4, gpa, level); 2179 TEST_ASSERT(orig_pte != -1); 2180 pte = orig_pte; 2181 if (mkhuge) 2182 pte = (orig_pte & ~EPT_ADDR_MASK) | data->hpa | EPT_LARGE_PAGE; 2183 else 2184 pte = orig_pte; 2185 pte = (pte & ~clear) | set; 2186 set_ept_pte(pml4, gpa, level, pte); 2187 ept_sync(INVEPT_SINGLE, eptp); 2188 2189 return orig_pte; 2190 } 2191 2192 static void ept_untwiddle(unsigned long gpa, int level, unsigned long orig_pte) 2193 { 2194 set_ept_pte(pml4, gpa, level, orig_pte); 2195 } 2196 2197 static void do_ept_violation(bool leaf, enum ept_access_op op, 2198 u64 expected_qual, u64 expected_paddr) 2199 { 2200 u64 qual; 2201 2202 /* Try the access and observe the violation. */ 2203 do_ept_access_op(op); 2204 2205 assert_exit_reason(VMX_EPT_VIOLATION); 2206 2207 qual = vmcs_read(EXI_QUALIFICATION); 2208 2209 diagnose_ept_violation_qual(expected_qual, qual); 2210 TEST_EXPECT_EQ(expected_qual, qual); 2211 2212 #if 0 2213 /* Disable for now otherwise every test will fail */ 2214 TEST_EXPECT_EQ(vmcs_read(GUEST_LINEAR_ADDRESS), 2215 (unsigned long) ( 2216 op == OP_EXEC ? data->gva + 1 : data->gva)); 2217 #endif 2218 /* 2219 * TODO: tests that probe expected_paddr in pages other than the one at 2220 * the beginning of the 1g region. 2221 */ 2222 TEST_EXPECT_EQ(vmcs_read(INFO_PHYS_ADDR), expected_paddr); 2223 } 2224 2225 static void 2226 ept_violation_at_level_mkhuge(bool mkhuge, int level, unsigned long clear, 2227 unsigned long set, enum ept_access_op op, 2228 u64 expected_qual) 2229 { 2230 struct ept_access_test_data *data = &ept_access_test_data; 2231 unsigned long orig_pte; 2232 2233 orig_pte = ept_twiddle(data->gpa, mkhuge, level, clear, set); 2234 2235 do_ept_violation(level == 1 || mkhuge, op, expected_qual, 2236 op == OP_EXEC ? data->gpa + sizeof(unsigned long) : 2237 data->gpa); 2238 2239 /* Fix the violation and resume the op loop. */ 2240 ept_untwiddle(data->gpa, level, orig_pte); 2241 enter_guest(); 2242 skip_exit_vmcall(); 2243 } 2244 2245 static void 2246 ept_violation_at_level(int level, unsigned long clear, unsigned long set, 2247 enum ept_access_op op, u64 expected_qual) 2248 { 2249 ept_violation_at_level_mkhuge(false, level, clear, set, op, 2250 expected_qual); 2251 if (ept_huge_pages_supported(level)) 2252 ept_violation_at_level_mkhuge(true, level, clear, set, op, 2253 expected_qual); 2254 } 2255 2256 static void ept_violation(unsigned long clear, unsigned long set, 2257 enum ept_access_op op, u64 expected_qual) 2258 { 2259 ept_violation_at_level(1, clear, set, op, expected_qual); 2260 ept_violation_at_level(2, clear, set, op, expected_qual); 2261 ept_violation_at_level(3, clear, set, op, expected_qual); 2262 ept_violation_at_level(4, clear, set, op, expected_qual); 2263 } 2264 2265 static void ept_access_violation(unsigned long access, enum ept_access_op op, 2266 u64 expected_qual) 2267 { 2268 ept_violation(EPT_PRESENT, access, op, 2269 expected_qual | EPT_VLT_LADDR_VLD | EPT_VLT_PADDR); 2270 } 2271 2272 /* 2273 * For translations that don't involve a GVA, that is physical address (paddr) 2274 * accesses, EPT violations don't set the flag EPT_VLT_PADDR. For a typical 2275 * guest memory access, the hardware does GVA -> GPA -> HPA. However, certain 2276 * translations don't involve GVAs, such as when the hardware does the guest 2277 * page table walk. For example, in translating GVA_1 -> GPA_1, the guest MMU 2278 * might try to set an A bit on a guest PTE. If the GPA_2 that the PTE resides 2279 * on isn't present in the EPT, then the EPT violation will be for GPA_2 and 2280 * the EPT_VLT_PADDR bit will be clear in the exit qualification. 2281 * 2282 * Note that paddr violations can also be triggered by loading PAE page tables 2283 * with wonky addresses. We don't test that yet. 2284 * 2285 * This function modifies the EPT entry that maps the GPA that the guest page 2286 * table entry mapping ept_access_data.gva resides on. 2287 * 2288 * @ept_access EPT permissions to set. Other permissions are cleared. 2289 * 2290 * @pte_ad Set the A/D bits on the guest PTE accordingly. 2291 * 2292 * @op Guest operation to perform with ept_access_data.gva. 2293 * 2294 * @expect_violation 2295 * Is a violation expected during the paddr access? 2296 * 2297 * @expected_qual Expected qualification for the EPT violation. 2298 * EPT_VLT_PADDR should be clear. 2299 */ 2300 static void ept_access_paddr(unsigned long ept_access, unsigned long pte_ad, 2301 enum ept_access_op op, bool expect_violation, 2302 u64 expected_qual) 2303 { 2304 struct ept_access_test_data *data = &ept_access_test_data; 2305 unsigned long *ptep; 2306 unsigned long gpa; 2307 unsigned long orig_epte; 2308 2309 /* Modify the guest PTE mapping data->gva according to @pte_ad. */ 2310 ptep = get_pte_level(current_page_table(), data->gva, /*level=*/1); 2311 TEST_ASSERT(ptep); 2312 TEST_ASSERT_EQ(*ptep & PT_ADDR_MASK, data->gpa); 2313 *ptep = (*ptep & ~PT_AD_MASK) | pte_ad; 2314 ept_access_test_guest_flush_tlb(); 2315 2316 /* 2317 * Now modify the access bits on the EPT entry for the GPA that the 2318 * guest PTE resides on. Note that by modifying a single EPT entry, 2319 * we're potentially affecting 512 guest PTEs. However, we've carefully 2320 * constructed our test such that those other 511 PTEs aren't used by 2321 * the guest: data->gva is at the beginning of a 1G huge page, thus the 2322 * PTE we're modifying is at the beginning of a 4K page and the 2323 * following 511 entires are also under our control (and not touched by 2324 * the guest). 2325 */ 2326 gpa = virt_to_phys(ptep); 2327 TEST_ASSERT_EQ(gpa & ~PAGE_MASK, 0); 2328 /* 2329 * Make sure the guest page table page is mapped with a 4K EPT entry, 2330 * otherwise our level=1 twiddling below will fail. We use the 2331 * identity map (gpa = gpa) since page tables are shared with the host. 2332 */ 2333 install_ept(pml4, gpa, gpa, EPT_PRESENT); 2334 orig_epte = ept_twiddle(gpa, /*mkhuge=*/0, /*level=*/1, 2335 /*clear=*/EPT_PRESENT, /*set=*/ept_access); 2336 2337 if (expect_violation) { 2338 do_ept_violation(/*leaf=*/true, op, 2339 expected_qual | EPT_VLT_LADDR_VLD, gpa); 2340 ept_untwiddle(gpa, /*level=*/1, orig_epte); 2341 do_ept_access_op(op); 2342 } else { 2343 do_ept_access_op(op); 2344 ept_untwiddle(gpa, /*level=*/1, orig_epte); 2345 } 2346 2347 TEST_ASSERT(*ptep & PT_ACCESSED_MASK); 2348 if ((pte_ad & PT_DIRTY_MASK) || op == OP_WRITE) 2349 TEST_ASSERT(*ptep & PT_DIRTY_MASK); 2350 2351 skip_exit_vmcall(); 2352 } 2353 2354 static void ept_access_allowed_paddr(unsigned long ept_access, 2355 unsigned long pte_ad, 2356 enum ept_access_op op) 2357 { 2358 ept_access_paddr(ept_access, pte_ad, op, /*expect_violation=*/false, 2359 /*expected_qual=*/-1); 2360 } 2361 2362 static void ept_access_violation_paddr(unsigned long ept_access, 2363 unsigned long pte_ad, 2364 enum ept_access_op op, 2365 u64 expected_qual) 2366 { 2367 ept_access_paddr(ept_access, pte_ad, op, /*expect_violation=*/true, 2368 expected_qual); 2369 } 2370 2371 2372 static void ept_allowed_at_level_mkhuge(bool mkhuge, int level, 2373 unsigned long clear, 2374 unsigned long set, 2375 enum ept_access_op op) 2376 { 2377 struct ept_access_test_data *data = &ept_access_test_data; 2378 unsigned long orig_pte; 2379 2380 orig_pte = ept_twiddle(data->gpa, mkhuge, level, clear, set); 2381 2382 /* No violation. Should proceed to vmcall. */ 2383 do_ept_access_op(op); 2384 skip_exit_vmcall(); 2385 2386 ept_untwiddle(data->gpa, level, orig_pte); 2387 } 2388 2389 static void ept_allowed_at_level(int level, unsigned long clear, 2390 unsigned long set, enum ept_access_op op) 2391 { 2392 ept_allowed_at_level_mkhuge(false, level, clear, set, op); 2393 if (ept_huge_pages_supported(level)) 2394 ept_allowed_at_level_mkhuge(true, level, clear, set, op); 2395 } 2396 2397 static void ept_allowed(unsigned long clear, unsigned long set, 2398 enum ept_access_op op) 2399 { 2400 ept_allowed_at_level(1, clear, set, op); 2401 ept_allowed_at_level(2, clear, set, op); 2402 ept_allowed_at_level(3, clear, set, op); 2403 ept_allowed_at_level(4, clear, set, op); 2404 } 2405 2406 static void ept_ignored_bit(int bit) 2407 { 2408 /* Set the bit. */ 2409 ept_allowed(0, 1ul << bit, OP_READ); 2410 ept_allowed(0, 1ul << bit, OP_WRITE); 2411 ept_allowed(0, 1ul << bit, OP_EXEC); 2412 2413 /* Clear the bit. */ 2414 ept_allowed(1ul << bit, 0, OP_READ); 2415 ept_allowed(1ul << bit, 0, OP_WRITE); 2416 ept_allowed(1ul << bit, 0, OP_EXEC); 2417 } 2418 2419 static void ept_access_allowed(unsigned long access, enum ept_access_op op) 2420 { 2421 ept_allowed(EPT_PRESENT, access, op); 2422 } 2423 2424 2425 static void ept_misconfig_at_level_mkhuge_op(bool mkhuge, int level, 2426 unsigned long clear, 2427 unsigned long set, 2428 enum ept_access_op op) 2429 { 2430 struct ept_access_test_data *data = &ept_access_test_data; 2431 unsigned long orig_pte; 2432 2433 orig_pte = ept_twiddle(data->gpa, mkhuge, level, clear, set); 2434 2435 do_ept_access_op(op); 2436 assert_exit_reason(VMX_EPT_MISCONFIG); 2437 2438 /* Intel 27.2.1, "For all other VM exits, this field is cleared." */ 2439 #if 0 2440 /* broken: */ 2441 TEST_EXPECT_EQ_MSG(vmcs_read(EXI_QUALIFICATION), 0); 2442 #endif 2443 #if 0 2444 /* 2445 * broken: 2446 * According to description of exit qual for EPT violation, 2447 * EPT_VLT_LADDR_VLD indicates if GUEST_LINEAR_ADDRESS is valid. 2448 * However, I can't find anything that says GUEST_LINEAR_ADDRESS ought 2449 * to be set for msiconfig. 2450 */ 2451 TEST_EXPECT_EQ(vmcs_read(GUEST_LINEAR_ADDRESS), 2452 (unsigned long) ( 2453 op == OP_EXEC ? data->gva + 1 : data->gva)); 2454 #endif 2455 2456 /* Fix the violation and resume the op loop. */ 2457 ept_untwiddle(data->gpa, level, orig_pte); 2458 enter_guest(); 2459 skip_exit_vmcall(); 2460 } 2461 2462 static void ept_misconfig_at_level_mkhuge(bool mkhuge, int level, 2463 unsigned long clear, 2464 unsigned long set) 2465 { 2466 /* The op shouldn't matter (read, write, exec), so try them all! */ 2467 ept_misconfig_at_level_mkhuge_op(mkhuge, level, clear, set, OP_READ); 2468 ept_misconfig_at_level_mkhuge_op(mkhuge, level, clear, set, OP_WRITE); 2469 ept_misconfig_at_level_mkhuge_op(mkhuge, level, clear, set, OP_EXEC); 2470 } 2471 2472 static void ept_misconfig_at_level(int level, unsigned long clear, 2473 unsigned long set) 2474 { 2475 ept_misconfig_at_level_mkhuge(false, level, clear, set); 2476 if (ept_huge_pages_supported(level)) 2477 ept_misconfig_at_level_mkhuge(true, level, clear, set); 2478 } 2479 2480 static void ept_misconfig(unsigned long clear, unsigned long set) 2481 { 2482 ept_misconfig_at_level(1, clear, set); 2483 ept_misconfig_at_level(2, clear, set); 2484 ept_misconfig_at_level(3, clear, set); 2485 ept_misconfig_at_level(4, clear, set); 2486 } 2487 2488 static void ept_access_misconfig(unsigned long access) 2489 { 2490 ept_misconfig(EPT_PRESENT, access); 2491 } 2492 2493 static void ept_reserved_bit_at_level_nohuge(int level, int bit) 2494 { 2495 /* Setting the bit causes a misconfig. */ 2496 ept_misconfig_at_level_mkhuge(false, level, 0, 1ul << bit); 2497 2498 /* Making the entry non-present turns reserved bits into ignored. */ 2499 ept_violation_at_level(level, EPT_PRESENT, 1ul << bit, OP_READ, 2500 EPT_VLT_RD | EPT_VLT_LADDR_VLD | EPT_VLT_PADDR); 2501 } 2502 2503 static void ept_reserved_bit_at_level_huge(int level, int bit) 2504 { 2505 /* Setting the bit causes a misconfig. */ 2506 ept_misconfig_at_level_mkhuge(true, level, 0, 1ul << bit); 2507 2508 /* Making the entry non-present turns reserved bits into ignored. */ 2509 ept_violation_at_level(level, EPT_PRESENT, 1ul << bit, OP_READ, 2510 EPT_VLT_RD | EPT_VLT_LADDR_VLD | EPT_VLT_PADDR); 2511 } 2512 2513 static void ept_reserved_bit_at_level(int level, int bit) 2514 { 2515 /* Setting the bit causes a misconfig. */ 2516 ept_misconfig_at_level(level, 0, 1ul << bit); 2517 2518 /* Making the entry non-present turns reserved bits into ignored. */ 2519 ept_violation_at_level(level, EPT_PRESENT, 1ul << bit, OP_READ, 2520 EPT_VLT_RD | EPT_VLT_LADDR_VLD | EPT_VLT_PADDR); 2521 } 2522 2523 static void ept_reserved_bit(int bit) 2524 { 2525 ept_reserved_bit_at_level(1, bit); 2526 ept_reserved_bit_at_level(2, bit); 2527 ept_reserved_bit_at_level(3, bit); 2528 ept_reserved_bit_at_level(4, bit); 2529 } 2530 2531 #define PAGE_2M_ORDER 9 2532 #define PAGE_1G_ORDER 18 2533 2534 static void *get_1g_page(void) 2535 { 2536 static void *alloc; 2537 2538 if (!alloc) 2539 alloc = alloc_pages(PAGE_1G_ORDER); 2540 return alloc; 2541 } 2542 2543 static void ept_access_test_teardown(void *unused) 2544 { 2545 /* Exit the guest cleanly. */ 2546 do_ept_access_op(OP_EXIT); 2547 } 2548 2549 static void ept_access_test_guest(void) 2550 { 2551 struct ept_access_test_data *data = &ept_access_test_data; 2552 int (*code)(void) = (int (*)(void)) &data->gva[1]; 2553 2554 while (true) { 2555 switch (data->op) { 2556 case OP_READ: 2557 TEST_ASSERT_EQ(*data->gva, MAGIC_VAL_1); 2558 break; 2559 case OP_WRITE: 2560 *data->gva = MAGIC_VAL_2; 2561 TEST_ASSERT_EQ(*data->gva, MAGIC_VAL_2); 2562 *data->gva = MAGIC_VAL_1; 2563 break; 2564 case OP_EXEC: 2565 TEST_ASSERT_EQ(42, code()); 2566 break; 2567 case OP_FLUSH_TLB: 2568 write_cr3(read_cr3()); 2569 break; 2570 case OP_EXIT: 2571 return; 2572 default: 2573 TEST_ASSERT_MSG(false, "Unknown op %d", data->op); 2574 } 2575 vmcall(); 2576 } 2577 } 2578 2579 static void ept_access_test_setup(void) 2580 { 2581 struct ept_access_test_data *data = &ept_access_test_data; 2582 unsigned long npages = 1ul << PAGE_1G_ORDER; 2583 unsigned long size = npages * PAGE_SIZE; 2584 unsigned long *page_table = current_page_table(); 2585 2586 if (setup_ept(false)) 2587 test_skip("EPT not supported"); 2588 2589 test_set_guest(ept_access_test_guest); 2590 test_add_teardown(ept_access_test_teardown, NULL); 2591 2592 data->hva = get_1g_page(); 2593 TEST_ASSERT(data->hva); 2594 data->hpa = virt_to_phys(data->hva); 2595 2596 data->gpa = 1ul << 40; 2597 data->gva = (void *) ALIGN((unsigned long) alloc_vpages(npages * 2), 2598 size); 2599 TEST_ASSERT(!any_present_pages(page_table, data->gva, size)); 2600 install_pages(page_table, data->gpa, size, data->gva); 2601 2602 /* 2603 * Make sure nothing's mapped here so the tests that screw with the 2604 * pml4 entry don't inadvertently break something. 2605 */ 2606 TEST_ASSERT_EQ(get_ept_pte(pml4, data->gpa, 4), -1); 2607 TEST_ASSERT_EQ(get_ept_pte(pml4, data->gpa + size - 1, 4), -1); 2608 install_ept(pml4, data->hpa, data->gpa, EPT_PRESENT); 2609 2610 data->hva[0] = MAGIC_VAL_1; 2611 memcpy(&data->hva[1], &ret42_start, &ret42_end - &ret42_start); 2612 } 2613 2614 static void ept_access_test_not_present(void) 2615 { 2616 ept_access_test_setup(); 2617 /* --- */ 2618 ept_access_violation(0, OP_READ, EPT_VLT_RD); 2619 ept_access_violation(0, OP_WRITE, EPT_VLT_WR); 2620 ept_access_violation(0, OP_EXEC, EPT_VLT_FETCH); 2621 } 2622 2623 static void ept_access_test_read_only(void) 2624 { 2625 ept_access_test_setup(); 2626 2627 /* r-- */ 2628 ept_access_allowed(EPT_RA, OP_READ); 2629 ept_access_violation(EPT_RA, OP_WRITE, EPT_VLT_WR | EPT_VLT_PERM_RD); 2630 ept_access_violation(EPT_RA, OP_EXEC, EPT_VLT_FETCH | EPT_VLT_PERM_RD); 2631 } 2632 2633 static void ept_access_test_write_only(void) 2634 { 2635 ept_access_test_setup(); 2636 /* -w- */ 2637 ept_access_misconfig(EPT_WA); 2638 } 2639 2640 static void ept_access_test_read_write(void) 2641 { 2642 ept_access_test_setup(); 2643 /* rw- */ 2644 ept_access_allowed(EPT_RA | EPT_WA, OP_READ); 2645 ept_access_allowed(EPT_RA | EPT_WA, OP_WRITE); 2646 ept_access_violation(EPT_RA | EPT_WA, OP_EXEC, 2647 EPT_VLT_FETCH | EPT_VLT_PERM_RD | EPT_VLT_PERM_WR); 2648 } 2649 2650 2651 static void ept_access_test_execute_only(void) 2652 { 2653 ept_access_test_setup(); 2654 /* --x */ 2655 if (ept_execute_only_supported()) { 2656 ept_access_violation(EPT_EA, OP_READ, 2657 EPT_VLT_RD | EPT_VLT_PERM_EX); 2658 ept_access_violation(EPT_EA, OP_WRITE, 2659 EPT_VLT_WR | EPT_VLT_PERM_EX); 2660 ept_access_allowed(EPT_EA, OP_EXEC); 2661 } else { 2662 ept_access_misconfig(EPT_EA); 2663 } 2664 } 2665 2666 static void ept_access_test_read_execute(void) 2667 { 2668 ept_access_test_setup(); 2669 /* r-x */ 2670 ept_access_allowed(EPT_RA | EPT_EA, OP_READ); 2671 ept_access_violation(EPT_RA | EPT_EA, OP_WRITE, 2672 EPT_VLT_WR | EPT_VLT_PERM_RD | EPT_VLT_PERM_EX); 2673 ept_access_allowed(EPT_RA | EPT_EA, OP_EXEC); 2674 } 2675 2676 static void ept_access_test_write_execute(void) 2677 { 2678 ept_access_test_setup(); 2679 /* -wx */ 2680 ept_access_misconfig(EPT_WA | EPT_EA); 2681 } 2682 2683 static void ept_access_test_read_write_execute(void) 2684 { 2685 ept_access_test_setup(); 2686 /* rwx */ 2687 ept_access_allowed(EPT_RA | EPT_WA | EPT_EA, OP_READ); 2688 ept_access_allowed(EPT_RA | EPT_WA | EPT_EA, OP_WRITE); 2689 ept_access_allowed(EPT_RA | EPT_WA | EPT_EA, OP_EXEC); 2690 } 2691 2692 static void ept_access_test_reserved_bits(void) 2693 { 2694 int i; 2695 int maxphyaddr; 2696 2697 ept_access_test_setup(); 2698 2699 /* Reserved bits above maxphyaddr. */ 2700 maxphyaddr = cpuid_maxphyaddr(); 2701 for (i = maxphyaddr; i <= 51; i++) { 2702 report_prefix_pushf("reserved_bit=%d", i); 2703 ept_reserved_bit(i); 2704 report_prefix_pop(); 2705 } 2706 2707 /* Level-specific reserved bits. */ 2708 ept_reserved_bit_at_level_nohuge(2, 3); 2709 ept_reserved_bit_at_level_nohuge(2, 4); 2710 ept_reserved_bit_at_level_nohuge(2, 5); 2711 ept_reserved_bit_at_level_nohuge(2, 6); 2712 /* 2M alignment. */ 2713 for (i = 12; i < 20; i++) { 2714 report_prefix_pushf("reserved_bit=%d", i); 2715 ept_reserved_bit_at_level_huge(2, i); 2716 report_prefix_pop(); 2717 } 2718 ept_reserved_bit_at_level_nohuge(3, 3); 2719 ept_reserved_bit_at_level_nohuge(3, 4); 2720 ept_reserved_bit_at_level_nohuge(3, 5); 2721 ept_reserved_bit_at_level_nohuge(3, 6); 2722 /* 1G alignment. */ 2723 for (i = 12; i < 29; i++) { 2724 report_prefix_pushf("reserved_bit=%d", i); 2725 ept_reserved_bit_at_level_huge(3, i); 2726 report_prefix_pop(); 2727 } 2728 ept_reserved_bit_at_level(4, 3); 2729 ept_reserved_bit_at_level(4, 4); 2730 ept_reserved_bit_at_level(4, 5); 2731 ept_reserved_bit_at_level(4, 6); 2732 ept_reserved_bit_at_level(4, 7); 2733 } 2734 2735 static void ept_access_test_ignored_bits(void) 2736 { 2737 ept_access_test_setup(); 2738 /* 2739 * Bits ignored at every level. Bits 8 and 9 (A and D) are ignored as 2740 * far as translation is concerned even if AD bits are enabled in the 2741 * EPTP. Bit 63 is ignored because "EPT-violation #VE" VM-execution 2742 * control is 0. 2743 */ 2744 ept_ignored_bit(8); 2745 ept_ignored_bit(9); 2746 ept_ignored_bit(10); 2747 ept_ignored_bit(11); 2748 ept_ignored_bit(52); 2749 ept_ignored_bit(53); 2750 ept_ignored_bit(54); 2751 ept_ignored_bit(55); 2752 ept_ignored_bit(56); 2753 ept_ignored_bit(57); 2754 ept_ignored_bit(58); 2755 ept_ignored_bit(59); 2756 ept_ignored_bit(60); 2757 ept_ignored_bit(61); 2758 ept_ignored_bit(62); 2759 ept_ignored_bit(63); 2760 } 2761 2762 static void ept_access_test_paddr_not_present_ad_disabled(void) 2763 { 2764 ept_access_test_setup(); 2765 ept_disable_ad_bits(); 2766 2767 ept_access_violation_paddr(0, PT_AD_MASK, OP_READ, EPT_VLT_RD); 2768 ept_access_violation_paddr(0, PT_AD_MASK, OP_WRITE, EPT_VLT_RD); 2769 ept_access_violation_paddr(0, PT_AD_MASK, OP_EXEC, EPT_VLT_RD); 2770 } 2771 2772 static void ept_access_test_paddr_not_present_ad_enabled(void) 2773 { 2774 u64 qual = EPT_VLT_RD | EPT_VLT_WR; 2775 2776 ept_access_test_setup(); 2777 ept_enable_ad_bits_or_skip_test(); 2778 2779 ept_access_violation_paddr(0, PT_AD_MASK, OP_READ, qual); 2780 ept_access_violation_paddr(0, PT_AD_MASK, OP_WRITE, qual); 2781 ept_access_violation_paddr(0, PT_AD_MASK, OP_EXEC, qual); 2782 } 2783 2784 static void ept_access_test_paddr_read_only_ad_disabled(void) 2785 { 2786 /* 2787 * When EPT AD bits are disabled, all accesses to guest paging 2788 * structures are reported separately as a read and (after 2789 * translation of the GPA to host physical address) a read+write 2790 * if the A/D bits have to be set. 2791 */ 2792 u64 qual = EPT_VLT_WR | EPT_VLT_RD | EPT_VLT_PERM_RD; 2793 2794 ept_access_test_setup(); 2795 ept_disable_ad_bits(); 2796 2797 /* Can't update A bit, so all accesses fail. */ 2798 ept_access_violation_paddr(EPT_RA, 0, OP_READ, qual); 2799 ept_access_violation_paddr(EPT_RA, 0, OP_WRITE, qual); 2800 ept_access_violation_paddr(EPT_RA, 0, OP_EXEC, qual); 2801 /* AD bits disabled, so only writes try to update the D bit. */ 2802 ept_access_allowed_paddr(EPT_RA, PT_ACCESSED_MASK, OP_READ); 2803 ept_access_violation_paddr(EPT_RA, PT_ACCESSED_MASK, OP_WRITE, qual); 2804 ept_access_allowed_paddr(EPT_RA, PT_ACCESSED_MASK, OP_EXEC); 2805 /* Both A and D already set, so read-only is OK. */ 2806 ept_access_allowed_paddr(EPT_RA, PT_AD_MASK, OP_READ); 2807 ept_access_allowed_paddr(EPT_RA, PT_AD_MASK, OP_WRITE); 2808 ept_access_allowed_paddr(EPT_RA, PT_AD_MASK, OP_EXEC); 2809 } 2810 2811 static void ept_access_test_paddr_read_only_ad_enabled(void) 2812 { 2813 /* 2814 * When EPT AD bits are enabled, all accesses to guest paging 2815 * structures are considered writes as far as EPT translation 2816 * is concerned. 2817 */ 2818 u64 qual = EPT_VLT_WR | EPT_VLT_RD | EPT_VLT_PERM_RD; 2819 2820 ept_access_test_setup(); 2821 ept_enable_ad_bits_or_skip_test(); 2822 2823 ept_access_violation_paddr(EPT_RA, 0, OP_READ, qual); 2824 ept_access_violation_paddr(EPT_RA, 0, OP_WRITE, qual); 2825 ept_access_violation_paddr(EPT_RA, 0, OP_EXEC, qual); 2826 ept_access_violation_paddr(EPT_RA, PT_ACCESSED_MASK, OP_READ, qual); 2827 ept_access_violation_paddr(EPT_RA, PT_ACCESSED_MASK, OP_WRITE, qual); 2828 ept_access_violation_paddr(EPT_RA, PT_ACCESSED_MASK, OP_EXEC, qual); 2829 ept_access_violation_paddr(EPT_RA, PT_AD_MASK, OP_READ, qual); 2830 ept_access_violation_paddr(EPT_RA, PT_AD_MASK, OP_WRITE, qual); 2831 ept_access_violation_paddr(EPT_RA, PT_AD_MASK, OP_EXEC, qual); 2832 } 2833 2834 static void ept_access_test_paddr_read_write(void) 2835 { 2836 ept_access_test_setup(); 2837 /* Read-write access to paging structure. */ 2838 ept_access_allowed_paddr(EPT_RA | EPT_WA, 0, OP_READ); 2839 ept_access_allowed_paddr(EPT_RA | EPT_WA, 0, OP_WRITE); 2840 ept_access_allowed_paddr(EPT_RA | EPT_WA, 0, OP_EXEC); 2841 } 2842 2843 static void ept_access_test_paddr_read_write_execute(void) 2844 { 2845 ept_access_test_setup(); 2846 /* RWX access to paging structure. */ 2847 ept_access_allowed_paddr(EPT_PRESENT, 0, OP_READ); 2848 ept_access_allowed_paddr(EPT_PRESENT, 0, OP_WRITE); 2849 ept_access_allowed_paddr(EPT_PRESENT, 0, OP_EXEC); 2850 } 2851 2852 static void ept_access_test_paddr_read_execute_ad_disabled(void) 2853 { 2854 /* 2855 * When EPT AD bits are disabled, all accesses to guest paging 2856 * structures are reported separately as a read and (after 2857 * translation of the GPA to host physical address) a read+write 2858 * if the A/D bits have to be set. 2859 */ 2860 u64 qual = EPT_VLT_WR | EPT_VLT_RD | EPT_VLT_PERM_RD | EPT_VLT_PERM_EX; 2861 2862 ept_access_test_setup(); 2863 ept_disable_ad_bits(); 2864 2865 /* Can't update A bit, so all accesses fail. */ 2866 ept_access_violation_paddr(EPT_RA | EPT_EA, 0, OP_READ, qual); 2867 ept_access_violation_paddr(EPT_RA | EPT_EA, 0, OP_WRITE, qual); 2868 ept_access_violation_paddr(EPT_RA | EPT_EA, 0, OP_EXEC, qual); 2869 /* AD bits disabled, so only writes try to update the D bit. */ 2870 ept_access_allowed_paddr(EPT_RA | EPT_EA, PT_ACCESSED_MASK, OP_READ); 2871 ept_access_violation_paddr(EPT_RA | EPT_EA, PT_ACCESSED_MASK, OP_WRITE, qual); 2872 ept_access_allowed_paddr(EPT_RA | EPT_EA, PT_ACCESSED_MASK, OP_EXEC); 2873 /* Both A and D already set, so read-only is OK. */ 2874 ept_access_allowed_paddr(EPT_RA | EPT_EA, PT_AD_MASK, OP_READ); 2875 ept_access_allowed_paddr(EPT_RA | EPT_EA, PT_AD_MASK, OP_WRITE); 2876 ept_access_allowed_paddr(EPT_RA | EPT_EA, PT_AD_MASK, OP_EXEC); 2877 } 2878 2879 static void ept_access_test_paddr_read_execute_ad_enabled(void) 2880 { 2881 /* 2882 * When EPT AD bits are enabled, all accesses to guest paging 2883 * structures are considered writes as far as EPT translation 2884 * is concerned. 2885 */ 2886 u64 qual = EPT_VLT_WR | EPT_VLT_RD | EPT_VLT_PERM_RD | EPT_VLT_PERM_EX; 2887 2888 ept_access_test_setup(); 2889 ept_enable_ad_bits_or_skip_test(); 2890 2891 ept_access_violation_paddr(EPT_RA | EPT_EA, 0, OP_READ, qual); 2892 ept_access_violation_paddr(EPT_RA | EPT_EA, 0, OP_WRITE, qual); 2893 ept_access_violation_paddr(EPT_RA | EPT_EA, 0, OP_EXEC, qual); 2894 ept_access_violation_paddr(EPT_RA | EPT_EA, PT_ACCESSED_MASK, OP_READ, qual); 2895 ept_access_violation_paddr(EPT_RA | EPT_EA, PT_ACCESSED_MASK, OP_WRITE, qual); 2896 ept_access_violation_paddr(EPT_RA | EPT_EA, PT_ACCESSED_MASK, OP_EXEC, qual); 2897 ept_access_violation_paddr(EPT_RA | EPT_EA, PT_AD_MASK, OP_READ, qual); 2898 ept_access_violation_paddr(EPT_RA | EPT_EA, PT_AD_MASK, OP_WRITE, qual); 2899 ept_access_violation_paddr(EPT_RA | EPT_EA, PT_AD_MASK, OP_EXEC, qual); 2900 } 2901 2902 static void ept_access_test_paddr_not_present_page_fault(void) 2903 { 2904 ept_access_test_setup(); 2905 /* 2906 * TODO: test no EPT violation as long as guest PF occurs. e.g., GPA is 2907 * page is read-only in EPT but GVA is also mapped read only in PT. 2908 * Thus guest page fault before host takes EPT violation for trying to 2909 * update A bit. 2910 */ 2911 } 2912 2913 static void ept_access_test_force_2m_page(void) 2914 { 2915 ept_access_test_setup(); 2916 2917 TEST_ASSERT_EQ(ept_2m_supported(), true); 2918 ept_allowed_at_level_mkhuge(true, 2, 0, 0, OP_READ); 2919 ept_violation_at_level_mkhuge(true, 2, EPT_PRESENT, EPT_RA, OP_WRITE, 2920 EPT_VLT_WR | EPT_VLT_PERM_RD | 2921 EPT_VLT_LADDR_VLD | EPT_VLT_PADDR); 2922 ept_misconfig_at_level_mkhuge(true, 2, EPT_PRESENT, EPT_WA); 2923 } 2924 2925 #define TEST(name) { #name, .v2 = name } 2926 2927 /* name/init/guest_main/exit_handler/syscall_handler/guest_regs */ 2928 struct vmx_test vmx_tests[] = { 2929 { "null", NULL, basic_guest_main, basic_exit_handler, NULL, {0} }, 2930 { "vmenter", NULL, vmenter_main, vmenter_exit_handler, NULL, {0} }, 2931 { "preemption timer", preemption_timer_init, preemption_timer_main, 2932 preemption_timer_exit_handler, NULL, {0} }, 2933 { "control field PAT", test_ctrl_pat_init, test_ctrl_pat_main, 2934 test_ctrl_pat_exit_handler, NULL, {0} }, 2935 { "control field EFER", test_ctrl_efer_init, test_ctrl_efer_main, 2936 test_ctrl_efer_exit_handler, NULL, {0} }, 2937 { "CR shadowing", NULL, cr_shadowing_main, 2938 cr_shadowing_exit_handler, NULL, {0} }, 2939 { "I/O bitmap", iobmp_init, iobmp_main, iobmp_exit_handler, 2940 NULL, {0} }, 2941 { "instruction intercept", insn_intercept_init, insn_intercept_main, 2942 insn_intercept_exit_handler, NULL, {0} }, 2943 { "EPT A/D disabled", ept_init, ept_main, ept_exit_handler, NULL, {0} }, 2944 { "EPT A/D enabled", eptad_init, eptad_main, eptad_exit_handler, NULL, {0} }, 2945 { "PML", pml_init, pml_main, pml_exit_handler, NULL, {0} }, 2946 { "VPID", vpid_init, vpid_main, vpid_exit_handler, NULL, {0} }, 2947 { "interrupt", interrupt_init, interrupt_main, 2948 interrupt_exit_handler, NULL, {0} }, 2949 { "debug controls", dbgctls_init, dbgctls_main, dbgctls_exit_handler, 2950 NULL, {0} }, 2951 { "MSR switch", msr_switch_init, msr_switch_main, 2952 msr_switch_exit_handler, NULL, {0}, msr_switch_entry_failure }, 2953 { "vmmcall", vmmcall_init, vmmcall_main, vmmcall_exit_handler, NULL, {0} }, 2954 { "disable RDTSCP", disable_rdtscp_init, disable_rdtscp_main, 2955 disable_rdtscp_exit_handler, NULL, {0} }, 2956 { "int3", int3_init, int3_guest_main, int3_exit_handler, NULL, {0} }, 2957 { "into", into_init, into_guest_main, into_exit_handler, NULL, {0} }, 2958 { "exit_monitor_from_l2_test", NULL, exit_monitor_from_l2_main, 2959 exit_monitor_from_l2_handler, NULL, {0} }, 2960 /* Basic V2 tests. */ 2961 TEST(v2_null_test), 2962 TEST(v2_multiple_entries_test), 2963 TEST(fixture_test_case1), 2964 TEST(fixture_test_case2), 2965 /* EPT access tests. */ 2966 TEST(ept_access_test_not_present), 2967 TEST(ept_access_test_read_only), 2968 TEST(ept_access_test_write_only), 2969 TEST(ept_access_test_read_write), 2970 TEST(ept_access_test_execute_only), 2971 TEST(ept_access_test_read_execute), 2972 TEST(ept_access_test_write_execute), 2973 TEST(ept_access_test_read_write_execute), 2974 TEST(ept_access_test_reserved_bits), 2975 TEST(ept_access_test_ignored_bits), 2976 TEST(ept_access_test_paddr_not_present_ad_disabled), 2977 TEST(ept_access_test_paddr_not_present_ad_enabled), 2978 TEST(ept_access_test_paddr_read_only_ad_disabled), 2979 TEST(ept_access_test_paddr_read_only_ad_enabled), 2980 TEST(ept_access_test_paddr_read_write), 2981 TEST(ept_access_test_paddr_read_write_execute), 2982 TEST(ept_access_test_paddr_read_execute_ad_disabled), 2983 TEST(ept_access_test_paddr_read_execute_ad_enabled), 2984 TEST(ept_access_test_paddr_not_present_page_fault), 2985 TEST(ept_access_test_force_2m_page), 2986 { NULL, NULL, NULL, NULL, NULL, {0} }, 2987 }; 2988