1 #ifndef X86_VMX_H 2 #define X86_VMX_H 3 4 #include "libcflat.h" 5 #include "processor.h" 6 #include "bitops.h" 7 #include "asm/page.h" 8 #include "asm/io.h" 9 10 void __abort_test(void); 11 12 #define __TEST_ASSERT(cond) \ 13 do { \ 14 if (!(cond)) { \ 15 report_fail("%s:%d: Assertion failed: %s", \ 16 __FILE__, __LINE__, #cond); \ 17 dump_stack(); \ 18 __abort_test(); \ 19 } \ 20 } while (0) 21 22 #define TEST_ASSERT(cond) \ 23 do { \ 24 __TEST_ASSERT(cond); \ 25 report_passed(); \ 26 } while (0) 27 28 #define TEST_ASSERT_MSG(cond, fmt, args...) \ 29 do { \ 30 if (!(cond)) { \ 31 report_fail("%s:%d: Assertion failed: %s\n" fmt,\ 32 __FILE__, __LINE__, #cond, ##args); \ 33 dump_stack(); \ 34 __abort_test(); \ 35 } \ 36 report_passed(); \ 37 } while (0) 38 39 #define __TEST_EQ(a, b, a_str, b_str, assertion, fmt, args...) \ 40 do { \ 41 typeof(a) _a = a; \ 42 typeof(b) _b = b; \ 43 if (_a != _b) { \ 44 char _bin_a[BINSTR_SZ]; \ 45 char _bin_b[BINSTR_SZ]; \ 46 binstr(_a, _bin_a); \ 47 binstr(_b, _bin_b); \ 48 report_fail("%s:%d: %s failed: (%s) == (%s)\n" \ 49 "\tLHS: %#018lx - %s - %lu\n" \ 50 "\tRHS: %#018lx - %s - %lu%s" fmt, \ 51 __FILE__, __LINE__, \ 52 assertion ? "Assertion" : "Expectation", a_str, b_str, \ 53 (unsigned long) _a, _bin_a, (unsigned long) _a, \ 54 (unsigned long) _b, _bin_b, (unsigned long) _b, \ 55 fmt[0] == '\0' ? "" : "\n", ## args); \ 56 dump_stack(); \ 57 if (assertion) \ 58 __abort_test(); \ 59 } \ 60 report_passed(); \ 61 } while (0) 62 63 #define TEST_ASSERT_EQ(a, b) __TEST_EQ(a, b, #a, #b, 1, "") 64 #define TEST_ASSERT_EQ_MSG(a, b, fmt, args...) \ 65 __TEST_EQ(a, b, #a, #b, 1, fmt, ## args) 66 #define TEST_EXPECT_EQ(a, b) __TEST_EQ(a, b, #a, #b, 0, "") 67 #define TEST_EXPECT_EQ_MSG(a, b, fmt, args...) \ 68 __TEST_EQ(a, b, #a, #b, 0, fmt, ## args) 69 70 struct vmcs_hdr { 71 u32 revision_id:31; 72 u32 shadow_vmcs:1; 73 }; 74 75 struct vmcs { 76 struct vmcs_hdr hdr; 77 u32 abort; /* VMX-abort indicator */ 78 /* VMCS data */ 79 char data[0]; 80 }; 81 82 struct invvpid_operand { 83 u64 vpid; 84 u64 gla; 85 }; 86 87 struct regs { 88 u64 rax; 89 u64 rcx; 90 u64 rdx; 91 u64 rbx; 92 u64 cr2; 93 u64 rbp; 94 u64 rsi; 95 u64 rdi; 96 u64 r8; 97 u64 r9; 98 u64 r10; 99 u64 r11; 100 u64 r12; 101 u64 r13; 102 u64 r14; 103 u64 r15; 104 u64 rflags; 105 }; 106 107 union exit_reason { 108 struct { 109 u32 basic : 16; 110 u32 reserved16 : 1; 111 u32 reserved17 : 1; 112 u32 reserved18 : 1; 113 u32 reserved19 : 1; 114 u32 reserved20 : 1; 115 u32 reserved21 : 1; 116 u32 reserved22 : 1; 117 u32 reserved23 : 1; 118 u32 reserved24 : 1; 119 u32 reserved25 : 1; 120 u32 reserved26 : 1; 121 u32 enclave_mode : 1; 122 u32 smi_pending_mtf : 1; 123 u32 smi_from_vmx_root : 1; 124 u32 reserved30 : 1; 125 u32 failed_vmentry : 1; 126 }; 127 u32 full; 128 }; 129 130 struct vmentry_result { 131 /* Instruction mnemonic (for convenience). */ 132 const char *instr; 133 /* Did the test attempt vmlaunch or vmresume? */ 134 bool vmlaunch; 135 /* Did the instruction VM-Fail? */ 136 bool vm_fail; 137 /* Did the VM-Entry fully enter the guest? */ 138 bool entered; 139 /* VM-Exit reason, valid iff !vm_fail */ 140 union exit_reason exit_reason; 141 /* Contents of [re]flags after failed entry. */ 142 unsigned long flags; 143 }; 144 145 struct vmx_test { 146 const char *name; 147 int (*init)(struct vmcs *vmcs); 148 void (*guest_main)(void); 149 int (*exit_handler)(union exit_reason exit_reason); 150 void (*syscall_handler)(u64 syscall_no); 151 struct regs guest_regs; 152 int (*entry_failure_handler)(struct vmentry_result *result); 153 struct vmcs *vmcs; 154 int exits; 155 /* Alternative test interface. */ 156 void (*v2)(void); 157 }; 158 159 union vmx_basic { 160 u64 val; 161 struct { 162 u32 revision; 163 u32 size:13, 164 reserved1: 3, 165 width:1, 166 dual:1, 167 type:4, 168 insouts:1, 169 ctrl:1, 170 reserved2:8; 171 }; 172 }; 173 174 union vmx_ctrl_msr { 175 u64 val; 176 struct { 177 u32 set, clr; 178 }; 179 }; 180 181 union vmx_misc { 182 u64 val; 183 struct { 184 u32 pt_bit:5, 185 stores_lma:1, 186 act_hlt:1, 187 act_shutdown:1, 188 act_wfsipi:1, 189 :5, 190 vmx_pt:1, 191 smm_smbase:1, 192 cr3_targets:9, 193 msr_list_size:3, 194 smm_mon_ctl:1, 195 vmwrite_any:1, 196 inject_len0:1, 197 :1; 198 u32 mseg_revision; 199 }; 200 }; 201 202 union vmx_ept_vpid { 203 u64 val; 204 struct { 205 u32:16, 206 super:2, 207 : 2, 208 invept:1, 209 : 11; 210 u32 invvpid:1; 211 }; 212 }; 213 214 enum Encoding { 215 /* 16-Bit Control Fields */ 216 VPID = 0x0000ul, 217 /* Posted-interrupt notification vector */ 218 PINV = 0x0002ul, 219 /* EPTP index */ 220 EPTP_IDX = 0x0004ul, 221 222 /* 16-Bit Guest State Fields */ 223 GUEST_SEL_ES = 0x0800ul, 224 GUEST_SEL_CS = 0x0802ul, 225 GUEST_SEL_SS = 0x0804ul, 226 GUEST_SEL_DS = 0x0806ul, 227 GUEST_SEL_FS = 0x0808ul, 228 GUEST_SEL_GS = 0x080aul, 229 GUEST_SEL_LDTR = 0x080cul, 230 GUEST_SEL_TR = 0x080eul, 231 GUEST_INT_STATUS = 0x0810ul, 232 GUEST_PML_INDEX = 0x0812ul, 233 234 /* 16-Bit Host State Fields */ 235 HOST_SEL_ES = 0x0c00ul, 236 HOST_SEL_CS = 0x0c02ul, 237 HOST_SEL_SS = 0x0c04ul, 238 HOST_SEL_DS = 0x0c06ul, 239 HOST_SEL_FS = 0x0c08ul, 240 HOST_SEL_GS = 0x0c0aul, 241 HOST_SEL_TR = 0x0c0cul, 242 243 /* 64-Bit Control Fields */ 244 IO_BITMAP_A = 0x2000ul, 245 IO_BITMAP_B = 0x2002ul, 246 MSR_BITMAP = 0x2004ul, 247 EXIT_MSR_ST_ADDR = 0x2006ul, 248 EXIT_MSR_LD_ADDR = 0x2008ul, 249 ENTER_MSR_LD_ADDR = 0x200aul, 250 VMCS_EXEC_PTR = 0x200cul, 251 TSC_OFFSET = 0x2010ul, 252 TSC_OFFSET_HI = 0x2011ul, 253 APIC_VIRT_ADDR = 0x2012ul, 254 APIC_ACCS_ADDR = 0x2014ul, 255 POSTED_INTR_DESC_ADDR = 0x2016ul, 256 EPTP = 0x201aul, 257 EPTP_HI = 0x201bul, 258 VMREAD_BITMAP = 0x2026ul, 259 VMREAD_BITMAP_HI = 0x2027ul, 260 VMWRITE_BITMAP = 0x2028ul, 261 VMWRITE_BITMAP_HI = 0x2029ul, 262 EOI_EXIT_BITMAP0 = 0x201cul, 263 EOI_EXIT_BITMAP1 = 0x201eul, 264 EOI_EXIT_BITMAP2 = 0x2020ul, 265 EOI_EXIT_BITMAP3 = 0x2022ul, 266 PMLADDR = 0x200eul, 267 PMLADDR_HI = 0x200ful, 268 269 270 /* 64-Bit Readonly Data Field */ 271 INFO_PHYS_ADDR = 0x2400ul, 272 273 /* 64-Bit Guest State */ 274 VMCS_LINK_PTR = 0x2800ul, 275 VMCS_LINK_PTR_HI = 0x2801ul, 276 GUEST_DEBUGCTL = 0x2802ul, 277 GUEST_DEBUGCTL_HI = 0x2803ul, 278 GUEST_EFER = 0x2806ul, 279 GUEST_PAT = 0x2804ul, 280 GUEST_PERF_GLOBAL_CTRL = 0x2808ul, 281 GUEST_PDPTE = 0x280aul, 282 GUEST_BNDCFGS = 0x2812ul, 283 284 /* 64-Bit Host State */ 285 HOST_PAT = 0x2c00ul, 286 HOST_EFER = 0x2c02ul, 287 HOST_PERF_GLOBAL_CTRL = 0x2c04ul, 288 289 /* 32-Bit Control Fields */ 290 PIN_CONTROLS = 0x4000ul, 291 CPU_EXEC_CTRL0 = 0x4002ul, 292 EXC_BITMAP = 0x4004ul, 293 PF_ERROR_MASK = 0x4006ul, 294 PF_ERROR_MATCH = 0x4008ul, 295 CR3_TARGET_COUNT = 0x400aul, 296 EXI_CONTROLS = 0x400cul, 297 EXI_MSR_ST_CNT = 0x400eul, 298 EXI_MSR_LD_CNT = 0x4010ul, 299 ENT_CONTROLS = 0x4012ul, 300 ENT_MSR_LD_CNT = 0x4014ul, 301 ENT_INTR_INFO = 0x4016ul, 302 ENT_INTR_ERROR = 0x4018ul, 303 ENT_INST_LEN = 0x401aul, 304 TPR_THRESHOLD = 0x401cul, 305 CPU_EXEC_CTRL1 = 0x401eul, 306 307 /* 32-Bit R/O Data Fields */ 308 VMX_INST_ERROR = 0x4400ul, 309 EXI_REASON = 0x4402ul, 310 EXI_INTR_INFO = 0x4404ul, 311 EXI_INTR_ERROR = 0x4406ul, 312 IDT_VECT_INFO = 0x4408ul, 313 IDT_VECT_ERROR = 0x440aul, 314 EXI_INST_LEN = 0x440cul, 315 EXI_INST_INFO = 0x440eul, 316 317 /* 32-Bit Guest State Fields */ 318 GUEST_LIMIT_ES = 0x4800ul, 319 GUEST_LIMIT_CS = 0x4802ul, 320 GUEST_LIMIT_SS = 0x4804ul, 321 GUEST_LIMIT_DS = 0x4806ul, 322 GUEST_LIMIT_FS = 0x4808ul, 323 GUEST_LIMIT_GS = 0x480aul, 324 GUEST_LIMIT_LDTR = 0x480cul, 325 GUEST_LIMIT_TR = 0x480eul, 326 GUEST_LIMIT_GDTR = 0x4810ul, 327 GUEST_LIMIT_IDTR = 0x4812ul, 328 GUEST_AR_ES = 0x4814ul, 329 GUEST_AR_CS = 0x4816ul, 330 GUEST_AR_SS = 0x4818ul, 331 GUEST_AR_DS = 0x481aul, 332 GUEST_AR_FS = 0x481cul, 333 GUEST_AR_GS = 0x481eul, 334 GUEST_AR_LDTR = 0x4820ul, 335 GUEST_AR_TR = 0x4822ul, 336 GUEST_INTR_STATE = 0x4824ul, 337 GUEST_ACTV_STATE = 0x4826ul, 338 GUEST_SMBASE = 0x4828ul, 339 GUEST_SYSENTER_CS = 0x482aul, 340 PREEMPT_TIMER_VALUE = 0x482eul, 341 342 /* 32-Bit Host State Fields */ 343 HOST_SYSENTER_CS = 0x4c00ul, 344 345 /* Natural-Width Control Fields */ 346 CR0_MASK = 0x6000ul, 347 CR4_MASK = 0x6002ul, 348 CR0_READ_SHADOW = 0x6004ul, 349 CR4_READ_SHADOW = 0x6006ul, 350 CR3_TARGET_0 = 0x6008ul, 351 CR3_TARGET_1 = 0x600aul, 352 CR3_TARGET_2 = 0x600cul, 353 CR3_TARGET_3 = 0x600eul, 354 355 /* Natural-Width R/O Data Fields */ 356 EXI_QUALIFICATION = 0x6400ul, 357 IO_RCX = 0x6402ul, 358 IO_RSI = 0x6404ul, 359 IO_RDI = 0x6406ul, 360 IO_RIP = 0x6408ul, 361 GUEST_LINEAR_ADDRESS = 0x640aul, 362 363 /* Natural-Width Guest State Fields */ 364 GUEST_CR0 = 0x6800ul, 365 GUEST_CR3 = 0x6802ul, 366 GUEST_CR4 = 0x6804ul, 367 GUEST_BASE_ES = 0x6806ul, 368 GUEST_BASE_CS = 0x6808ul, 369 GUEST_BASE_SS = 0x680aul, 370 GUEST_BASE_DS = 0x680cul, 371 GUEST_BASE_FS = 0x680eul, 372 GUEST_BASE_GS = 0x6810ul, 373 GUEST_BASE_LDTR = 0x6812ul, 374 GUEST_BASE_TR = 0x6814ul, 375 GUEST_BASE_GDTR = 0x6816ul, 376 GUEST_BASE_IDTR = 0x6818ul, 377 GUEST_DR7 = 0x681aul, 378 GUEST_RSP = 0x681cul, 379 GUEST_RIP = 0x681eul, 380 GUEST_RFLAGS = 0x6820ul, 381 GUEST_PENDING_DEBUG = 0x6822ul, 382 GUEST_SYSENTER_ESP = 0x6824ul, 383 GUEST_SYSENTER_EIP = 0x6826ul, 384 385 /* Natural-Width Host State Fields */ 386 HOST_CR0 = 0x6c00ul, 387 HOST_CR3 = 0x6c02ul, 388 HOST_CR4 = 0x6c04ul, 389 HOST_BASE_FS = 0x6c06ul, 390 HOST_BASE_GS = 0x6c08ul, 391 HOST_BASE_TR = 0x6c0aul, 392 HOST_BASE_GDTR = 0x6c0cul, 393 HOST_BASE_IDTR = 0x6c0eul, 394 HOST_SYSENTER_ESP = 0x6c10ul, 395 HOST_SYSENTER_EIP = 0x6c12ul, 396 HOST_RSP = 0x6c14ul, 397 HOST_RIP = 0x6c16ul 398 }; 399 400 #define VMX_ENTRY_FAILURE (1ul << 31) 401 #define VMX_ENTRY_FLAGS (X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | \ 402 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF) 403 404 enum Reason { 405 VMX_EXC_NMI = 0, 406 VMX_EXTINT = 1, 407 VMX_TRIPLE_FAULT = 2, 408 VMX_INIT = 3, 409 VMX_SIPI = 4, 410 VMX_SMI_IO = 5, 411 VMX_SMI_OTHER = 6, 412 VMX_INTR_WINDOW = 7, 413 VMX_NMI_WINDOW = 8, 414 VMX_TASK_SWITCH = 9, 415 VMX_CPUID = 10, 416 VMX_GETSEC = 11, 417 VMX_HLT = 12, 418 VMX_INVD = 13, 419 VMX_INVLPG = 14, 420 VMX_RDPMC = 15, 421 VMX_RDTSC = 16, 422 VMX_RSM = 17, 423 VMX_VMCALL = 18, 424 VMX_VMCLEAR = 19, 425 VMX_VMLAUNCH = 20, 426 VMX_VMPTRLD = 21, 427 VMX_VMPTRST = 22, 428 VMX_VMREAD = 23, 429 VMX_VMRESUME = 24, 430 VMX_VMWRITE = 25, 431 VMX_VMXOFF = 26, 432 VMX_VMXON = 27, 433 VMX_CR = 28, 434 VMX_DR = 29, 435 VMX_IO = 30, 436 VMX_RDMSR = 31, 437 VMX_WRMSR = 32, 438 VMX_FAIL_STATE = 33, 439 VMX_FAIL_MSR = 34, 440 VMX_MWAIT = 36, 441 VMX_MTF = 37, 442 VMX_MONITOR = 39, 443 VMX_PAUSE = 40, 444 VMX_FAIL_MCHECK = 41, 445 VMX_TPR_THRESHOLD = 43, 446 VMX_APIC_ACCESS = 44, 447 VMX_EOI_INDUCED = 45, 448 VMX_GDTR_IDTR = 46, 449 VMX_LDTR_TR = 47, 450 VMX_EPT_VIOLATION = 48, 451 VMX_EPT_MISCONFIG = 49, 452 VMX_INVEPT = 50, 453 VMX_PREEMPT = 52, 454 VMX_INVVPID = 53, 455 VMX_WBINVD = 54, 456 VMX_XSETBV = 55, 457 VMX_APIC_WRITE = 56, 458 VMX_RDRAND = 57, 459 VMX_INVPCID = 58, 460 VMX_VMFUNC = 59, 461 VMX_RDSEED = 61, 462 VMX_PML_FULL = 62, 463 VMX_XSAVES = 63, 464 VMX_XRSTORS = 64, 465 }; 466 467 enum Ctrl_exi { 468 EXI_SAVE_DBGCTLS = 1UL << 2, 469 EXI_HOST_64 = 1UL << 9, 470 EXI_LOAD_PERF = 1UL << 12, 471 EXI_INTA = 1UL << 15, 472 EXI_SAVE_PAT = 1UL << 18, 473 EXI_LOAD_PAT = 1UL << 19, 474 EXI_SAVE_EFER = 1UL << 20, 475 EXI_LOAD_EFER = 1UL << 21, 476 EXI_SAVE_PREEMPT = 1UL << 22, 477 }; 478 479 enum Ctrl_ent { 480 ENT_LOAD_DBGCTLS = 1UL << 2, 481 ENT_GUEST_64 = 1UL << 9, 482 ENT_LOAD_PERF = 1UL << 13, 483 ENT_LOAD_PAT = 1UL << 14, 484 ENT_LOAD_EFER = 1UL << 15, 485 ENT_LOAD_BNDCFGS = 1UL << 16 486 }; 487 488 enum Ctrl_pin { 489 PIN_EXTINT = 1ul << 0, 490 PIN_NMI = 1ul << 3, 491 PIN_VIRT_NMI = 1ul << 5, 492 PIN_PREEMPT = 1ul << 6, 493 PIN_POST_INTR = 1ul << 7, 494 }; 495 496 enum Ctrl0 { 497 CPU_INTR_WINDOW = 1ul << 2, 498 CPU_USE_TSC_OFFSET = 1ul << 3, 499 CPU_HLT = 1ul << 7, 500 CPU_INVLPG = 1ul << 9, 501 CPU_MWAIT = 1ul << 10, 502 CPU_RDPMC = 1ul << 11, 503 CPU_RDTSC = 1ul << 12, 504 CPU_CR3_LOAD = 1ul << 15, 505 CPU_CR3_STORE = 1ul << 16, 506 CPU_CR8_LOAD = 1ul << 19, 507 CPU_CR8_STORE = 1ul << 20, 508 CPU_TPR_SHADOW = 1ul << 21, 509 CPU_NMI_WINDOW = 1ul << 22, 510 CPU_IO = 1ul << 24, 511 CPU_IO_BITMAP = 1ul << 25, 512 CPU_MTF = 1ul << 27, 513 CPU_MSR_BITMAP = 1ul << 28, 514 CPU_MONITOR = 1ul << 29, 515 CPU_PAUSE = 1ul << 30, 516 CPU_SECONDARY = 1ul << 31, 517 }; 518 519 enum Ctrl1 { 520 CPU_VIRT_APIC_ACCESSES = 1ul << 0, 521 CPU_EPT = 1ul << 1, 522 CPU_DESC_TABLE = 1ul << 2, 523 CPU_RDTSCP = 1ul << 3, 524 CPU_VIRT_X2APIC = 1ul << 4, 525 CPU_VPID = 1ul << 5, 526 CPU_WBINVD = 1ul << 6, 527 CPU_URG = 1ul << 7, 528 CPU_APIC_REG_VIRT = 1ul << 8, 529 CPU_VINTD = 1ul << 9, 530 CPU_RDRAND = 1ul << 11, 531 CPU_SHADOW_VMCS = 1ul << 14, 532 CPU_RDSEED = 1ul << 16, 533 CPU_PML = 1ul << 17, 534 CPU_USE_TSC_SCALING = 1ul << 25, 535 }; 536 537 enum Intr_type { 538 VMX_INTR_TYPE_EXT_INTR = 0, 539 VMX_INTR_TYPE_NMI_INTR = 2, 540 VMX_INTR_TYPE_HARD_EXCEPTION = 3, 541 VMX_INTR_TYPE_SOFT_INTR = 4, 542 VMX_INTR_TYPE_SOFT_EXCEPTION = 6, 543 }; 544 545 /* 546 * Interruption-information format 547 */ 548 #define INTR_INFO_VECTOR_MASK 0xff /* 7:0 */ 549 #define INTR_INFO_INTR_TYPE_MASK 0x700 /* 10:8 */ 550 #define INTR_INFO_DELIVER_CODE_MASK 0x800 /* 11 */ 551 #define INTR_INFO_UNBLOCK_NMI_MASK 0x1000 /* 12 */ 552 #define INTR_INFO_VALID_MASK 0x80000000 /* 31 */ 553 554 #define INTR_INFO_INTR_TYPE_SHIFT 8 555 556 #define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */ 557 #define INTR_TYPE_RESERVED (1 << 8) /* reserved */ 558 #define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */ 559 #define INTR_TYPE_HARD_EXCEPTION (3 << 8) /* processor exception */ 560 #define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */ 561 #define INTR_TYPE_PRIV_SW_EXCEPTION (5 << 8) /* priv. software exception */ 562 #define INTR_TYPE_SOFT_EXCEPTION (6 << 8) /* software exception */ 563 #define INTR_TYPE_OTHER_EVENT (7 << 8) /* other event */ 564 565 /* 566 * Guest interruptibility state 567 */ 568 #define GUEST_INTR_STATE_STI (1 << 0) 569 #define GUEST_INTR_STATE_MOVSS (1 << 1) 570 #define GUEST_INTR_STATE_SMI (1 << 2) 571 #define GUEST_INTR_STATE_NMI (1 << 3) 572 #define GUEST_INTR_STATE_ENCLAVE (1 << 4) 573 574 /* 575 * VM-instruction error numbers 576 */ 577 enum vm_instruction_error_number { 578 VMXERR_VMCALL_IN_VMX_ROOT_OPERATION = 1, 579 VMXERR_VMCLEAR_INVALID_ADDRESS = 2, 580 VMXERR_VMCLEAR_VMXON_POINTER = 3, 581 VMXERR_VMLAUNCH_NONCLEAR_VMCS = 4, 582 VMXERR_VMRESUME_NONLAUNCHED_VMCS = 5, 583 VMXERR_VMRESUME_AFTER_VMXOFF = 6, 584 VMXERR_ENTRY_INVALID_CONTROL_FIELD = 7, 585 VMXERR_ENTRY_INVALID_HOST_STATE_FIELD = 8, 586 VMXERR_VMPTRLD_INVALID_ADDRESS = 9, 587 VMXERR_VMPTRLD_VMXON_POINTER = 10, 588 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID = 11, 589 VMXERR_UNSUPPORTED_VMCS_COMPONENT = 12, 590 VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT = 13, 591 VMXERR_VMXON_IN_VMX_ROOT_OPERATION = 15, 592 VMXERR_ENTRY_INVALID_EXECUTIVE_VMCS_POINTER = 16, 593 VMXERR_ENTRY_NONLAUNCHED_EXECUTIVE_VMCS = 17, 594 VMXERR_ENTRY_EXECUTIVE_VMCS_POINTER_NOT_VMXON_POINTER = 18, 595 VMXERR_VMCALL_NONCLEAR_VMCS = 19, 596 VMXERR_VMCALL_INVALID_VM_EXIT_CONTROL_FIELDS = 20, 597 VMXERR_VMCALL_INCORRECT_MSEG_REVISION_ID = 22, 598 VMXERR_VMXOFF_UNDER_DUAL_MONITOR_TREATMENT_OF_SMIS_AND_SMM = 23, 599 VMXERR_VMCALL_INVALID_SMM_MONITOR_FEATURES = 24, 600 VMXERR_ENTRY_INVALID_VM_EXECUTION_CONTROL_FIELDS_IN_EXECUTIVE_VMCS = 25, 601 VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS = 26, 602 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID = 28, 603 }; 604 605 enum vm_entry_failure_code { 606 ENTRY_FAIL_DEFAULT = 0, 607 ENTRY_FAIL_PDPTE = 2, 608 ENTRY_FAIL_NMI = 3, 609 ENTRY_FAIL_VMCS_LINK_PTR = 4, 610 }; 611 612 #define SAVE_GPR \ 613 "xchg %rax, regs\n\t" \ 614 "xchg %rcx, regs+0x8\n\t" \ 615 "xchg %rdx, regs+0x10\n\t" \ 616 "xchg %rbx, regs+0x18\n\t" \ 617 "xchg %rbp, regs+0x28\n\t" \ 618 "xchg %rsi, regs+0x30\n\t" \ 619 "xchg %rdi, regs+0x38\n\t" \ 620 "xchg %r8, regs+0x40\n\t" \ 621 "xchg %r9, regs+0x48\n\t" \ 622 "xchg %r10, regs+0x50\n\t" \ 623 "xchg %r11, regs+0x58\n\t" \ 624 "xchg %r12, regs+0x60\n\t" \ 625 "xchg %r13, regs+0x68\n\t" \ 626 "xchg %r14, regs+0x70\n\t" \ 627 "xchg %r15, regs+0x78\n\t" 628 629 #define LOAD_GPR SAVE_GPR 630 631 #define SAVE_GPR_C \ 632 "xchg %%rax, regs\n\t" \ 633 "xchg %%rcx, regs+0x8\n\t" \ 634 "xchg %%rdx, regs+0x10\n\t" \ 635 "xchg %%rbx, regs+0x18\n\t" \ 636 "xchg %%rbp, regs+0x28\n\t" \ 637 "xchg %%rsi, regs+0x30\n\t" \ 638 "xchg %%rdi, regs+0x38\n\t" \ 639 "xchg %%r8, regs+0x40\n\t" \ 640 "xchg %%r9, regs+0x48\n\t" \ 641 "xchg %%r10, regs+0x50\n\t" \ 642 "xchg %%r11, regs+0x58\n\t" \ 643 "xchg %%r12, regs+0x60\n\t" \ 644 "xchg %%r13, regs+0x68\n\t" \ 645 "xchg %%r14, regs+0x70\n\t" \ 646 "xchg %%r15, regs+0x78\n\t" 647 648 #define LOAD_GPR_C SAVE_GPR_C 649 650 #define VMX_IO_SIZE_MASK 0x7 651 #define _VMX_IO_BYTE 0 652 #define _VMX_IO_WORD 1 653 #define _VMX_IO_LONG 3 654 #define VMX_IO_DIRECTION_MASK (1ul << 3) 655 #define VMX_IO_IN (1ul << 3) 656 #define VMX_IO_OUT 0 657 #define VMX_IO_STRING (1ul << 4) 658 #define VMX_IO_REP (1ul << 5) 659 #define VMX_IO_OPRAND_IMM (1ul << 6) 660 #define VMX_IO_PORT_MASK 0xFFFF0000 661 #define VMX_IO_PORT_SHIFT 16 662 663 #define VMX_TEST_START 0 664 #define VMX_TEST_VMEXIT 1 665 #define VMX_TEST_EXIT 2 666 #define VMX_TEST_RESUME 3 667 #define VMX_TEST_VMABORT 4 668 #define VMX_TEST_VMSKIP 5 669 670 #define HYPERCALL_BIT (1ul << 12) 671 #define HYPERCALL_MASK 0xFFF 672 #define HYPERCALL_VMEXIT 0x1 673 #define HYPERCALL_VMABORT 0x2 674 #define HYPERCALL_VMSKIP 0x3 675 676 #define EPTP_PG_WALK_LEN_SHIFT 3ul 677 #define EPTP_PG_WALK_LEN_MASK 0x38ul 678 #define EPTP_RESERV_BITS_MASK 0x1ful 679 #define EPTP_RESERV_BITS_SHIFT 0x7ul 680 #define EPTP_AD_FLAG (1ul << 6) 681 682 #define EPT_MEM_TYPE_UC 0ul 683 #define EPT_MEM_TYPE_WC 1ul 684 #define EPT_MEM_TYPE_WT 4ul 685 #define EPT_MEM_TYPE_WP 5ul 686 #define EPT_MEM_TYPE_WB 6ul 687 688 #define EPT_RA 1ul 689 #define EPT_WA 2ul 690 #define EPT_EA 4ul 691 #define EPT_PRESENT (EPT_RA | EPT_WA | EPT_EA) 692 #define EPT_ACCESS_FLAG (1ul << 8) 693 #define EPT_DIRTY_FLAG (1ul << 9) 694 #define EPT_LARGE_PAGE (1ul << 7) 695 #define EPT_MEM_TYPE_SHIFT 3ul 696 #define EPT_MEM_TYPE_MASK 0x7ul 697 #define EPT_IGNORE_PAT (1ul << 6) 698 #define EPT_SUPPRESS_VE (1ull << 63) 699 700 #define EPT_CAP_EXEC_ONLY (1ull << 0) 701 #define EPT_CAP_PWL4 (1ull << 6) 702 #define EPT_CAP_PWL5 (1ull << 7) 703 #define EPT_CAP_UC (1ull << 8) 704 #define EPT_CAP_WB (1ull << 14) 705 #define EPT_CAP_2M_PAGE (1ull << 16) 706 #define EPT_CAP_1G_PAGE (1ull << 17) 707 #define EPT_CAP_INVEPT (1ull << 20) 708 #define EPT_CAP_AD_FLAG (1ull << 21) 709 #define EPT_CAP_ADV_EPT_INFO (1ull << 22) 710 #define EPT_CAP_INVEPT_SINGLE (1ull << 25) 711 #define EPT_CAP_INVEPT_ALL (1ull << 26) 712 #define VPID_CAP_INVVPID (1ull << 32) 713 #define VPID_CAP_INVVPID_ADDR (1ull << 40) 714 #define VPID_CAP_INVVPID_CXTGLB (1ull << 41) 715 #define VPID_CAP_INVVPID_ALL (1ull << 42) 716 #define VPID_CAP_INVVPID_CXTLOC (1ull << 43) 717 718 #define PAGE_SIZE_2M (512 * PAGE_SIZE) 719 #define PAGE_SIZE_1G (512 * PAGE_SIZE_2M) 720 #define EPT_PAGE_LEVEL 4 721 #define EPT_PGDIR_WIDTH 9 722 #define EPT_PGDIR_MASK 511 723 #define EPT_PGDIR_ENTRIES (1 << EPT_PGDIR_WIDTH) 724 #define EPT_LEVEL_SHIFT(level) (((level)-1) * EPT_PGDIR_WIDTH + 12) 725 #define EPT_ADDR_MASK GENMASK_ULL(51, 12) 726 #define PAGE_MASK_2M (~(PAGE_SIZE_2M-1)) 727 728 #define EPT_VLT_RD (1ull << 0) 729 #define EPT_VLT_WR (1ull << 1) 730 #define EPT_VLT_FETCH (1ull << 2) 731 #define EPT_VLT_PERM_RD (1ull << 3) 732 #define EPT_VLT_PERM_WR (1ull << 4) 733 #define EPT_VLT_PERM_EX (1ull << 5) 734 #define EPT_VLT_PERM_USER_EX (1ull << 6) 735 #define EPT_VLT_PERMS (EPT_VLT_PERM_RD | EPT_VLT_PERM_WR | \ 736 EPT_VLT_PERM_EX) 737 #define EPT_VLT_LADDR_VLD (1ull << 7) 738 #define EPT_VLT_PADDR (1ull << 8) 739 #define EPT_VLT_GUEST_USER (1ull << 9) 740 #define EPT_VLT_GUEST_RW (1ull << 10) 741 #define EPT_VLT_GUEST_EX (1ull << 11) 742 #define EPT_VLT_GUEST_MASK (EPT_VLT_GUEST_USER | EPT_VLT_GUEST_RW | \ 743 EPT_VLT_GUEST_EX) 744 745 #define MAGIC_VAL_1 0x12345678ul 746 #define MAGIC_VAL_2 0x87654321ul 747 #define MAGIC_VAL_3 0xfffffffful 748 #define MAGIC_VAL_4 0xdeadbeeful 749 750 #define INVEPT_SINGLE 1 751 #define INVEPT_GLOBAL 2 752 753 #define INVVPID_ADDR 0 754 #define INVVPID_CONTEXT_GLOBAL 1 755 #define INVVPID_ALL 2 756 #define INVVPID_CONTEXT_LOCAL 3 757 758 #define ACTV_ACTIVE 0 759 #define ACTV_HLT 1 760 #define ACTV_SHUTDOWN 2 761 #define ACTV_WAIT_SIPI 3 762 763 /* 764 * VMCS field encoding: 765 * Bit 0: High-access 766 * Bits 1-9: Index 767 * Bits 10-12: Type 768 * Bits 13-15: Width 769 * Bits 15-64: Reserved 770 */ 771 #define VMCS_FIELD_HIGH_SHIFT (0) 772 #define VMCS_FIELD_INDEX_SHIFT (1) 773 #define VMCS_FIELD_INDEX_MASK GENMASK(9, 1) 774 #define VMCS_FIELD_TYPE_SHIFT (10) 775 #define VMCS_FIELD_WIDTH_SHIFT (13) 776 #define VMCS_FIELD_RESERVED_SHIFT (15) 777 #define VMCS_FIELD_BIT_SIZE (BITS_PER_LONG) 778 779 extern struct regs regs; 780 781 extern union vmx_basic basic; 782 extern union vmx_ctrl_msr ctrl_pin_rev; 783 extern union vmx_ctrl_msr ctrl_cpu_rev[2]; 784 extern union vmx_ctrl_msr ctrl_exit_rev; 785 extern union vmx_ctrl_msr ctrl_enter_rev; 786 extern union vmx_ept_vpid ept_vpid; 787 788 static inline bool ept_2m_supported(void) 789 { 790 return ept_vpid.val & EPT_CAP_2M_PAGE; 791 } 792 793 static inline bool ept_1g_supported(void) 794 { 795 return ept_vpid.val & EPT_CAP_1G_PAGE; 796 } 797 798 static inline bool ept_huge_pages_supported(int level) 799 { 800 if (level == 2) 801 return ept_2m_supported(); 802 else if (level == 3) 803 return ept_1g_supported(); 804 else 805 return false; 806 } 807 808 static inline bool ept_execute_only_supported(void) 809 { 810 return ept_vpid.val & EPT_CAP_EXEC_ONLY; 811 } 812 813 static inline bool ept_ad_bits_supported(void) 814 { 815 return ept_vpid.val & EPT_CAP_AD_FLAG; 816 } 817 818 static inline bool is_4_level_ept_supported(void) 819 { 820 return ept_vpid.val & EPT_CAP_PWL4; 821 } 822 823 static inline bool is_5_level_ept_supported(void) 824 { 825 return ept_vpid.val & EPT_CAP_PWL5; 826 } 827 828 static inline bool is_ept_memtype_supported(int type) 829 { 830 if (type == EPT_MEM_TYPE_UC) 831 return ept_vpid.val & EPT_CAP_UC; 832 833 if (type == EPT_MEM_TYPE_WB) 834 return ept_vpid.val & EPT_CAP_WB; 835 836 return false; 837 } 838 839 static inline bool is_invept_type_supported(u64 type) 840 { 841 if (type < INVEPT_SINGLE || type > INVEPT_GLOBAL) 842 return false; 843 844 return ept_vpid.val & (EPT_CAP_INVEPT_SINGLE << (type - INVEPT_SINGLE)); 845 } 846 847 static inline bool is_vpid_supported(void) 848 { 849 return (ctrl_cpu_rev[0].clr & CPU_SECONDARY) && 850 (ctrl_cpu_rev[1].clr & CPU_VPID); 851 } 852 853 static inline bool is_invvpid_supported(void) 854 { 855 return ept_vpid.val & VPID_CAP_INVVPID; 856 } 857 858 static inline bool is_invvpid_type_supported(unsigned long type) 859 { 860 if (type < INVVPID_ADDR || type > INVVPID_CONTEXT_LOCAL) 861 return false; 862 863 return ept_vpid.val & (VPID_CAP_INVVPID_ADDR << (type - INVVPID_ADDR)); 864 } 865 866 extern u64 *bsp_vmxon_region; 867 extern bool launched; 868 869 void vmx_set_test_stage(u32 s); 870 u32 vmx_get_test_stage(void); 871 void vmx_inc_test_stage(void); 872 873 /* -1 on VM-Fail, 0 on success, >1 on fault */ 874 static int __vmxon_safe(u64 *vmxon_region) 875 { 876 bool vmfail; 877 u64 rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF; 878 879 asm volatile ("push %1\n\t" 880 "popf\n\t" 881 ASM_TRY("1f") "vmxon %2\n\t" 882 "setbe %0\n\t" 883 "jmp 2f\n\t" 884 "1: movb $0, %0\n\t" 885 "2:\n\t" 886 : "=q" (vmfail) : "q" (rflags), "m" (vmxon_region) : "cc"); 887 888 if (vmfail) 889 return -1; 890 891 return exception_vector(); 892 } 893 894 static int vmxon_safe(void) 895 { 896 return __vmxon_safe(bsp_vmxon_region); 897 } 898 899 static int vmx_on(void) 900 { 901 return vmxon_safe(); 902 } 903 904 static int vmx_off(void) 905 { 906 bool ret; 907 u64 rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF; 908 909 asm volatile("push %1; popf; vmxoff; setbe %0\n\t" 910 : "=q"(ret) : "q" (rflags) : "cc"); 911 return ret; 912 } 913 914 static inline int make_vmcs_current(struct vmcs *vmcs) 915 { 916 bool ret; 917 u64 rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF; 918 919 asm volatile ("push %1; popf; vmptrld %2; setbe %0" 920 : "=q" (ret) : "q" (rflags), "m" (vmcs) : "cc"); 921 return ret; 922 } 923 924 static inline int vmcs_clear(struct vmcs *vmcs) 925 { 926 bool ret; 927 u64 rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF; 928 929 asm volatile ("push %1; popf; vmclear %2; setbe %0" 930 : "=q" (ret) : "q" (rflags), "m" (vmcs) : "cc"); 931 return ret; 932 } 933 934 static inline u64 vmcs_read(enum Encoding enc) 935 { 936 u64 val; 937 asm volatile ("vmread %1, %0" : "=rm" (val) : "r" ((u64)enc) : "cc"); 938 return val; 939 } 940 941 /* 942 * VMREAD with a guaranteed memory operand, used to test KVM's MMU by forcing 943 * KVM to translate GVA->GPA. 944 */ 945 static inline u64 vmcs_readm(enum Encoding enc) 946 { 947 u64 val; 948 949 asm volatile ("vmread %1, %0" : "=m" (val) : "r" ((u64)enc) : "cc"); 950 return val; 951 } 952 953 static inline int vmcs_read_safe(enum Encoding enc, u64 *value) 954 { 955 u64 rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF; 956 u64 encoding = enc; 957 u64 val; 958 959 asm volatile ("shl $8, %%rax;" 960 "sahf;" 961 "vmread %[encoding], %[val];" 962 "lahf;" 963 "shr $8, %%rax" 964 : /* output */ [val]"=rm"(val), "+a"(rflags) 965 : /* input */ [encoding]"r"(encoding) 966 : /* clobber */ "cc"); 967 968 *value = val; 969 return rflags & (X86_EFLAGS_CF | X86_EFLAGS_ZF); 970 } 971 972 static inline int vmcs_write(enum Encoding enc, u64 val) 973 { 974 bool ret; 975 asm volatile ("vmwrite %1, %2; setbe %0" 976 : "=q"(ret) : "rm" (val), "r" ((u64)enc) : "cc"); 977 return ret; 978 } 979 980 static inline int vmcs_set_bits(enum Encoding enc, u64 val) 981 { 982 return vmcs_write(enc, vmcs_read(enc) | val); 983 } 984 985 static inline int vmcs_clear_bits(enum Encoding enc, u64 val) 986 { 987 return vmcs_write(enc, vmcs_read(enc) & ~val); 988 } 989 990 static inline int vmcs_save(struct vmcs **vmcs) 991 { 992 bool ret; 993 unsigned long pa; 994 u64 rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF; 995 996 asm volatile ("push %2; popf; vmptrst %1; setbe %0" 997 : "=q" (ret), "=m" (pa) : "r" (rflags) : "cc"); 998 *vmcs = (pa == -1ull) ? NULL : phys_to_virt(pa); 999 return ret; 1000 } 1001 1002 static inline int __invept(unsigned long type, u64 eptp) 1003 { 1004 bool failed = false; 1005 u64 rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF; 1006 1007 struct { 1008 u64 eptp, gpa; 1009 } operand = {eptp, 0}; 1010 asm volatile("push %1; popf; invept %2, %3; setbe %0" 1011 : "=q" (failed) : "r" (rflags), "m"(operand),"r"(type) : "cc"); 1012 return failed ? -1: 0; 1013 } 1014 1015 static inline void invept(unsigned long type, u64 eptp) 1016 { 1017 __TEST_ASSERT(!__invept(type, eptp)); 1018 } 1019 1020 static inline int __invvpid(unsigned long type, u64 vpid, u64 gla) 1021 { 1022 bool failed = false; 1023 u64 rflags = read_rflags() | X86_EFLAGS_CF | X86_EFLAGS_ZF; 1024 1025 struct invvpid_operand operand = {vpid, gla}; 1026 asm volatile("push %1; popf; invvpid %2, %3; setbe %0" 1027 : "=q" (failed) : "r" (rflags), "m"(operand),"r"(type) : "cc"); 1028 return failed ? -1: 0; 1029 } 1030 1031 static inline void invvpid(unsigned long type, u64 vpid, u64 gla) 1032 { 1033 __TEST_ASSERT(!__invvpid(type, vpid, gla)); 1034 } 1035 1036 void enable_vmx(void); 1037 void init_vmx(u64 *vmxon_region); 1038 int init_vmcs(struct vmcs **vmcs); 1039 1040 const char *exit_reason_description(u64 reason); 1041 void print_vmexit_info(union exit_reason exit_reason); 1042 void print_vmentry_failure_info(struct vmentry_result *result); 1043 void install_ept_entry(unsigned long *pml4, int pte_level, 1044 unsigned long guest_addr, unsigned long pte, 1045 unsigned long *pt_page); 1046 void install_1g_ept(unsigned long *pml4, unsigned long phys, 1047 unsigned long guest_addr, u64 perm); 1048 void install_2m_ept(unsigned long *pml4, unsigned long phys, 1049 unsigned long guest_addr, u64 perm); 1050 void install_ept(unsigned long *pml4, unsigned long phys, 1051 unsigned long guest_addr, u64 perm); 1052 void setup_ept_range(unsigned long *pml4, unsigned long start, 1053 unsigned long len, int map_1g, int map_2m, u64 perm); 1054 bool get_ept_pte(unsigned long *pml4, unsigned long guest_addr, int level, 1055 unsigned long *pte); 1056 void set_ept_pte(unsigned long *pml4, unsigned long guest_addr, 1057 int level, u64 pte_val); 1058 void check_ept_ad(unsigned long *pml4, u64 guest_cr3, 1059 unsigned long guest_addr, int expected_gpa_ad, 1060 int expected_pt_ad); 1061 void clear_ept_ad(unsigned long *pml4, u64 guest_cr3, 1062 unsigned long guest_addr); 1063 1064 #define ABORT_ON_EARLY_VMENTRY_FAIL 0x1 1065 #define ABORT_ON_INVALID_GUEST_STATE 0x2 1066 1067 void __enter_guest(u8 abort_flag, struct vmentry_result *result); 1068 void enter_guest(void); 1069 void enter_guest_with_bad_controls(void); 1070 void hypercall(u32 hypercall_no); 1071 1072 typedef void (*test_guest_func)(void); 1073 typedef void (*test_teardown_func)(void *data); 1074 void test_set_guest(test_guest_func func); 1075 void test_override_guest(test_guest_func func); 1076 void test_add_teardown(test_teardown_func func, void *data); 1077 void test_skip(const char *msg); 1078 void test_set_guest_finished(void); 1079 1080 #endif 1081