1 /* Copyright 2008 IBM Corporation 2 * 2008 Red Hat, Inc. 3 * Copyright 2011 Intel Corporation 4 * Copyright 2016 Veertu, Inc. 5 * Copyright 2017 The Android Open Source Project 6 * 7 * QEMU Hypervisor.framework support 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of version 2 of the GNU General Public 11 * License as published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, see <http://www.gnu.org/licenses/>. 20 * 21 * This file contain code under public domain from the hvdos project: 22 * https://github.com/mist64/hvdos 23 * 24 * Parts Copyright (c) 2011 NetApp, Inc. 25 * All rights reserved. 26 * 27 * Redistribution and use in source and binary forms, with or without 28 * modification, are permitted provided that the following conditions 29 * are met: 30 * 1. Redistributions of source code must retain the above copyright 31 * notice, this list of conditions and the following disclaimer. 32 * 2. Redistributions in binary form must reproduce the above copyright 33 * notice, this list of conditions and the following disclaimer in the 34 * documentation and/or other materials provided with the distribution. 35 * 36 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 37 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 38 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 39 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 40 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 41 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 42 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 43 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 44 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 45 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 46 * SUCH DAMAGE. 47 */ 48 49 #include "qemu/osdep.h" 50 #include "qemu/error-report.h" 51 #include "qemu/memalign.h" 52 #include "qapi/error.h" 53 #include "migration/blocker.h" 54 55 #include "system/hvf.h" 56 #include "system/hvf_int.h" 57 #include "system/runstate.h" 58 #include "system/cpus.h" 59 #include "hvf-i386.h" 60 #include "vmcs.h" 61 #include "vmx.h" 62 #include "x86.h" 63 #include "x86_descr.h" 64 #include "x86_flags.h" 65 #include "x86_mmu.h" 66 #include "x86_decode.h" 67 #include "x86_emu.h" 68 #include "x86_task.h" 69 #include "x86hvf.h" 70 71 #include <Hypervisor/hv.h> 72 #include <Hypervisor/hv_vmx.h> 73 #include <sys/sysctl.h> 74 75 #include "hw/i386/apic_internal.h" 76 #include "qemu/main-loop.h" 77 #include "qemu/accel.h" 78 #include "target/i386/cpu.h" 79 80 static Error *invtsc_mig_blocker; 81 82 void vmx_update_tpr(CPUState *cpu) 83 { 84 /* TODO: need integrate APIC handling */ 85 X86CPU *x86_cpu = X86_CPU(cpu); 86 int tpr = cpu_get_apic_tpr(x86_cpu->apic_state) << 4; 87 int irr = apic_get_highest_priority_irr(x86_cpu->apic_state); 88 89 wreg(cpu->accel->fd, HV_X86_TPR, tpr); 90 if (irr == -1) { 91 wvmcs(cpu->accel->fd, VMCS_TPR_THRESHOLD, 0); 92 } else { 93 wvmcs(cpu->accel->fd, VMCS_TPR_THRESHOLD, (irr > tpr) ? tpr >> 4 : 94 irr >> 4); 95 } 96 } 97 98 static void update_apic_tpr(CPUState *cpu) 99 { 100 X86CPU *x86_cpu = X86_CPU(cpu); 101 int tpr = rreg(cpu->accel->fd, HV_X86_TPR) >> 4; 102 cpu_set_apic_tpr(x86_cpu->apic_state, tpr); 103 } 104 105 #define VECTORING_INFO_VECTOR_MASK 0xff 106 107 void hvf_handle_io(CPUState *env, uint16_t port, void *buffer, 108 int direction, int size, int count) 109 { 110 int i; 111 uint8_t *ptr = buffer; 112 113 for (i = 0; i < count; i++) { 114 address_space_rw(&address_space_io, port, MEMTXATTRS_UNSPECIFIED, 115 ptr, size, 116 direction); 117 ptr += size; 118 } 119 } 120 121 static bool ept_emulation_fault(hvf_slot *slot, uint64_t gpa, uint64_t ept_qual) 122 { 123 int read, write; 124 125 /* EPT fault on an instruction fetch doesn't make sense here */ 126 if (ept_qual & EPT_VIOLATION_INST_FETCH) { 127 return false; 128 } 129 130 /* EPT fault must be a read fault or a write fault */ 131 read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0; 132 write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0; 133 if ((read | write) == 0) { 134 return false; 135 } 136 137 if (write && slot) { 138 if (slot->flags & HVF_SLOT_LOG) { 139 uint64_t dirty_page_start = gpa & ~(TARGET_PAGE_SIZE - 1u); 140 memory_region_set_dirty(slot->region, gpa - slot->start, 1); 141 hv_vm_protect(dirty_page_start, TARGET_PAGE_SIZE, 142 HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC); 143 } 144 } 145 146 /* 147 * The EPT violation must have been caused by accessing a 148 * guest-physical address that is a translation of a guest-linear 149 * address. 150 */ 151 if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 || 152 (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) { 153 return false; 154 } 155 156 if (!slot) { 157 return true; 158 } 159 if (!memory_region_is_ram(slot->region) && 160 !(read && memory_region_is_romd(slot->region))) { 161 return true; 162 } 163 return false; 164 } 165 166 void hvf_arch_vcpu_destroy(CPUState *cpu) 167 { 168 X86CPU *x86_cpu = X86_CPU(cpu); 169 CPUX86State *env = &x86_cpu->env; 170 171 g_free(env->hvf_mmio_buf); 172 } 173 174 static void init_tsc_freq(CPUX86State *env) 175 { 176 size_t length; 177 uint64_t tsc_freq; 178 179 if (env->tsc_khz != 0) { 180 return; 181 } 182 183 length = sizeof(uint64_t); 184 if (sysctlbyname("machdep.tsc.frequency", &tsc_freq, &length, NULL, 0)) { 185 return; 186 } 187 env->tsc_khz = tsc_freq / 1000; /* Hz to KHz */ 188 } 189 190 static void init_apic_bus_freq(CPUX86State *env) 191 { 192 size_t length; 193 uint64_t bus_freq; 194 195 if (env->apic_bus_freq != 0) { 196 return; 197 } 198 199 length = sizeof(uint64_t); 200 if (sysctlbyname("hw.busfrequency", &bus_freq, &length, NULL, 0)) { 201 return; 202 } 203 env->apic_bus_freq = bus_freq; 204 } 205 206 static inline bool tsc_is_known(CPUX86State *env) 207 { 208 return env->tsc_khz != 0; 209 } 210 211 static inline bool apic_bus_freq_is_known(CPUX86State *env) 212 { 213 return env->apic_bus_freq != 0; 214 } 215 216 void hvf_kick_vcpu_thread(CPUState *cpu) 217 { 218 cpus_kick_thread(cpu); 219 hv_vcpu_interrupt(&cpu->accel->fd, 1); 220 } 221 222 int hvf_arch_init(void) 223 { 224 return 0; 225 } 226 227 hv_return_t hvf_arch_vm_create(MachineState *ms, uint32_t pa_range) 228 { 229 return hv_vm_create(HV_VM_DEFAULT); 230 } 231 232 int hvf_arch_init_vcpu(CPUState *cpu) 233 { 234 X86CPU *x86cpu = X86_CPU(cpu); 235 CPUX86State *env = &x86cpu->env; 236 Error *local_err = NULL; 237 int r; 238 uint64_t reqCap; 239 240 init_emu(); 241 init_decoder(); 242 243 if (hvf_state->hvf_caps == NULL) { 244 hvf_state->hvf_caps = g_new0(struct hvf_vcpu_caps, 1); 245 } 246 env->hvf_mmio_buf = g_new(char, 4096); 247 248 if (x86cpu->vmware_cpuid_freq) { 249 init_tsc_freq(env); 250 init_apic_bus_freq(env); 251 252 if (!tsc_is_known(env) || !apic_bus_freq_is_known(env)) { 253 error_report("vmware-cpuid-freq: feature couldn't be enabled"); 254 } 255 } 256 257 if ((env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC) && 258 invtsc_mig_blocker == NULL) { 259 error_setg(&invtsc_mig_blocker, 260 "State blocked by non-migratable CPU device (invtsc flag)"); 261 r = migrate_add_blocker(&invtsc_mig_blocker, &local_err); 262 if (r < 0) { 263 error_report_err(local_err); 264 return r; 265 } 266 } 267 268 269 if (hv_vmx_read_capability(HV_VMX_CAP_PINBASED, 270 &hvf_state->hvf_caps->vmx_cap_pinbased)) { 271 abort(); 272 } 273 if (hv_vmx_read_capability(HV_VMX_CAP_PROCBASED, 274 &hvf_state->hvf_caps->vmx_cap_procbased)) { 275 abort(); 276 } 277 if (hv_vmx_read_capability(HV_VMX_CAP_PROCBASED2, 278 &hvf_state->hvf_caps->vmx_cap_procbased2)) { 279 abort(); 280 } 281 if (hv_vmx_read_capability(HV_VMX_CAP_ENTRY, 282 &hvf_state->hvf_caps->vmx_cap_entry)) { 283 abort(); 284 } 285 286 /* set VMCS control fields */ 287 wvmcs(cpu->accel->fd, VMCS_PIN_BASED_CTLS, 288 cap2ctrl(hvf_state->hvf_caps->vmx_cap_pinbased, 289 VMCS_PIN_BASED_CTLS_EXTINT | 290 VMCS_PIN_BASED_CTLS_NMI | 291 VMCS_PIN_BASED_CTLS_VNMI)); 292 wvmcs(cpu->accel->fd, VMCS_PRI_PROC_BASED_CTLS, 293 cap2ctrl(hvf_state->hvf_caps->vmx_cap_procbased, 294 VMCS_PRI_PROC_BASED_CTLS_HLT | 295 VMCS_PRI_PROC_BASED_CTLS_MWAIT | 296 VMCS_PRI_PROC_BASED_CTLS_TSC_OFFSET | 297 VMCS_PRI_PROC_BASED_CTLS_TPR_SHADOW) | 298 VMCS_PRI_PROC_BASED_CTLS_SEC_CONTROL); 299 300 reqCap = VMCS_PRI_PROC_BASED2_CTLS_APIC_ACCESSES; 301 302 /* Is RDTSCP support in CPUID? If so, enable it in the VMCS. */ 303 if (hvf_get_supported_cpuid(0x80000001, 0, R_EDX) & CPUID_EXT2_RDTSCP) { 304 reqCap |= VMCS_PRI_PROC_BASED2_CTLS_RDTSCP; 305 } 306 307 wvmcs(cpu->accel->fd, VMCS_SEC_PROC_BASED_CTLS, 308 cap2ctrl(hvf_state->hvf_caps->vmx_cap_procbased2, reqCap)); 309 310 wvmcs(cpu->accel->fd, VMCS_ENTRY_CTLS, 311 cap2ctrl(hvf_state->hvf_caps->vmx_cap_entry, 0)); 312 wvmcs(cpu->accel->fd, VMCS_EXCEPTION_BITMAP, 0); /* Double fault */ 313 314 wvmcs(cpu->accel->fd, VMCS_TPR_THRESHOLD, 0); 315 316 x86cpu = X86_CPU(cpu); 317 x86cpu->env.xsave_buf_len = 4096; 318 x86cpu->env.xsave_buf = qemu_memalign(4096, x86cpu->env.xsave_buf_len); 319 320 /* 321 * The allocated storage must be large enough for all of the 322 * possible XSAVE state components. 323 */ 324 assert(hvf_get_supported_cpuid(0xd, 0, R_ECX) <= x86cpu->env.xsave_buf_len); 325 326 hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_STAR, 1); 327 hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_LSTAR, 1); 328 hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_CSTAR, 1); 329 hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_FMASK, 1); 330 hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_FSBASE, 1); 331 hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_GSBASE, 1); 332 hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_KERNELGSBASE, 1); 333 hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_TSC_AUX, 1); 334 hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_IA32_TSC, 1); 335 hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_IA32_SYSENTER_CS, 1); 336 hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_IA32_SYSENTER_EIP, 1); 337 hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_IA32_SYSENTER_ESP, 1); 338 339 return 0; 340 } 341 342 static void hvf_store_events(CPUState *cpu, uint32_t ins_len, uint64_t idtvec_info) 343 { 344 X86CPU *x86_cpu = X86_CPU(cpu); 345 CPUX86State *env = &x86_cpu->env; 346 347 env->exception_nr = -1; 348 env->exception_pending = 0; 349 env->exception_injected = 0; 350 env->interrupt_injected = -1; 351 env->nmi_injected = false; 352 env->ins_len = 0; 353 env->has_error_code = false; 354 if (idtvec_info & VMCS_IDT_VEC_VALID) { 355 switch (idtvec_info & VMCS_IDT_VEC_TYPE) { 356 case VMCS_IDT_VEC_HWINTR: 357 case VMCS_IDT_VEC_SWINTR: 358 env->interrupt_injected = idtvec_info & VMCS_IDT_VEC_VECNUM; 359 break; 360 case VMCS_IDT_VEC_NMI: 361 env->nmi_injected = true; 362 break; 363 case VMCS_IDT_VEC_HWEXCEPTION: 364 case VMCS_IDT_VEC_SWEXCEPTION: 365 env->exception_nr = idtvec_info & VMCS_IDT_VEC_VECNUM; 366 env->exception_injected = 1; 367 break; 368 case VMCS_IDT_VEC_PRIV_SWEXCEPTION: 369 default: 370 abort(); 371 } 372 if ((idtvec_info & VMCS_IDT_VEC_TYPE) == VMCS_IDT_VEC_SWEXCEPTION || 373 (idtvec_info & VMCS_IDT_VEC_TYPE) == VMCS_IDT_VEC_SWINTR) { 374 env->ins_len = ins_len; 375 } 376 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { 377 env->has_error_code = true; 378 env->error_code = rvmcs(cpu->accel->fd, VMCS_IDT_VECTORING_ERROR); 379 } 380 } 381 if ((rvmcs(cpu->accel->fd, VMCS_GUEST_INTERRUPTIBILITY) & 382 VMCS_INTERRUPTIBILITY_NMI_BLOCKING)) { 383 env->hflags2 |= HF2_NMI_MASK; 384 } else { 385 env->hflags2 &= ~HF2_NMI_MASK; 386 } 387 if (rvmcs(cpu->accel->fd, VMCS_GUEST_INTERRUPTIBILITY) & 388 (VMCS_INTERRUPTIBILITY_STI_BLOCKING | 389 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)) { 390 env->hflags |= HF_INHIBIT_IRQ_MASK; 391 } else { 392 env->hflags &= ~HF_INHIBIT_IRQ_MASK; 393 } 394 } 395 396 static void hvf_cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, 397 uint32_t *eax, uint32_t *ebx, 398 uint32_t *ecx, uint32_t *edx) 399 { 400 /* 401 * A wrapper extends cpu_x86_cpuid with 0x40000000 and 0x40000010 leafs, 402 * leafs 0x40000001-0x4000000F are filled with zeros 403 * Provides vmware-cpuid-freq support to hvf 404 * 405 * Note: leaf 0x40000000 not exposes HVF, 406 * leaving hypervisor signature empty 407 */ 408 409 if (index < 0x40000000 || index > 0x40000010 || 410 !tsc_is_known(env) || !apic_bus_freq_is_known(env)) { 411 412 cpu_x86_cpuid(env, index, count, eax, ebx, ecx, edx); 413 return; 414 } 415 416 switch (index) { 417 case 0x40000000: 418 *eax = 0x40000010; /* Max available cpuid leaf */ 419 *ebx = 0; /* Leave signature empty */ 420 *ecx = 0; 421 *edx = 0; 422 break; 423 case 0x40000010: 424 *eax = env->tsc_khz; 425 *ebx = env->apic_bus_freq / 1000; /* Hz to KHz */ 426 *ecx = 0; 427 *edx = 0; 428 break; 429 default: 430 *eax = 0; 431 *ebx = 0; 432 *ecx = 0; 433 *edx = 0; 434 break; 435 } 436 } 437 438 void hvf_load_regs(CPUState *cs) 439 { 440 X86CPU *cpu = X86_CPU(cs); 441 CPUX86State *env = &cpu->env; 442 443 int i = 0; 444 RRX(env, R_EAX) = rreg(cs->accel->fd, HV_X86_RAX); 445 RRX(env, R_EBX) = rreg(cs->accel->fd, HV_X86_RBX); 446 RRX(env, R_ECX) = rreg(cs->accel->fd, HV_X86_RCX); 447 RRX(env, R_EDX) = rreg(cs->accel->fd, HV_X86_RDX); 448 RRX(env, R_ESI) = rreg(cs->accel->fd, HV_X86_RSI); 449 RRX(env, R_EDI) = rreg(cs->accel->fd, HV_X86_RDI); 450 RRX(env, R_ESP) = rreg(cs->accel->fd, HV_X86_RSP); 451 RRX(env, R_EBP) = rreg(cs->accel->fd, HV_X86_RBP); 452 for (i = 8; i < 16; i++) { 453 RRX(env, i) = rreg(cs->accel->fd, HV_X86_RAX + i); 454 } 455 456 env->eflags = rreg(cs->accel->fd, HV_X86_RFLAGS); 457 rflags_to_lflags(env); 458 env->eip = rreg(cs->accel->fd, HV_X86_RIP); 459 } 460 461 void hvf_store_regs(CPUState *cs) 462 { 463 X86CPU *cpu = X86_CPU(cs); 464 CPUX86State *env = &cpu->env; 465 466 int i = 0; 467 wreg(cs->accel->fd, HV_X86_RAX, RAX(env)); 468 wreg(cs->accel->fd, HV_X86_RBX, RBX(env)); 469 wreg(cs->accel->fd, HV_X86_RCX, RCX(env)); 470 wreg(cs->accel->fd, HV_X86_RDX, RDX(env)); 471 wreg(cs->accel->fd, HV_X86_RSI, RSI(env)); 472 wreg(cs->accel->fd, HV_X86_RDI, RDI(env)); 473 wreg(cs->accel->fd, HV_X86_RBP, RBP(env)); 474 wreg(cs->accel->fd, HV_X86_RSP, RSP(env)); 475 for (i = 8; i < 16; i++) { 476 wreg(cs->accel->fd, HV_X86_RAX + i, RRX(env, i)); 477 } 478 479 lflags_to_rflags(env); 480 wreg(cs->accel->fd, HV_X86_RFLAGS, env->eflags); 481 macvm_set_rip(cs, env->eip); 482 } 483 484 int hvf_vcpu_exec(CPUState *cpu) 485 { 486 X86CPU *x86_cpu = X86_CPU(cpu); 487 CPUX86State *env = &x86_cpu->env; 488 int ret = 0; 489 uint64_t rip = 0; 490 491 if (hvf_process_events(cpu)) { 492 return EXCP_HLT; 493 } 494 495 do { 496 if (cpu->accel->dirty) { 497 hvf_put_registers(cpu); 498 cpu->accel->dirty = false; 499 } 500 501 if (hvf_inject_interrupts(cpu)) { 502 return EXCP_INTERRUPT; 503 } 504 vmx_update_tpr(cpu); 505 506 bql_unlock(); 507 if (!cpu_is_bsp(X86_CPU(cpu)) && cpu->halted) { 508 bql_lock(); 509 return EXCP_HLT; 510 } 511 512 hv_return_t r = hv_vcpu_run_until(cpu->accel->fd, HV_DEADLINE_FOREVER); 513 assert_hvf_ok(r); 514 515 /* handle VMEXIT */ 516 uint64_t exit_reason = rvmcs(cpu->accel->fd, VMCS_EXIT_REASON); 517 uint64_t exit_qual = rvmcs(cpu->accel->fd, VMCS_EXIT_QUALIFICATION); 518 uint32_t ins_len = (uint32_t)rvmcs(cpu->accel->fd, 519 VMCS_EXIT_INSTRUCTION_LENGTH); 520 521 uint64_t idtvec_info = rvmcs(cpu->accel->fd, VMCS_IDT_VECTORING_INFO); 522 523 hvf_store_events(cpu, ins_len, idtvec_info); 524 rip = rreg(cpu->accel->fd, HV_X86_RIP); 525 env->eflags = rreg(cpu->accel->fd, HV_X86_RFLAGS); 526 527 bql_lock(); 528 529 update_apic_tpr(cpu); 530 current_cpu = cpu; 531 532 ret = 0; 533 switch (exit_reason) { 534 case EXIT_REASON_HLT: { 535 macvm_set_rip(cpu, rip + ins_len); 536 if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) && 537 (env->eflags & IF_MASK)) 538 && !(cpu->interrupt_request & CPU_INTERRUPT_NMI) && 539 !(idtvec_info & VMCS_IDT_VEC_VALID)) { 540 cpu->halted = 1; 541 ret = EXCP_HLT; 542 break; 543 } 544 ret = EXCP_INTERRUPT; 545 break; 546 } 547 case EXIT_REASON_MWAIT: { 548 ret = EXCP_INTERRUPT; 549 break; 550 } 551 /* Need to check if MMIO or unmapped fault */ 552 case EXIT_REASON_EPT_FAULT: 553 { 554 hvf_slot *slot; 555 uint64_t gpa = rvmcs(cpu->accel->fd, VMCS_GUEST_PHYSICAL_ADDRESS); 556 557 if (((idtvec_info & VMCS_IDT_VEC_VALID) == 0) && 558 ((exit_qual & EXIT_QUAL_NMIUDTI) != 0)) { 559 vmx_set_nmi_blocking(cpu); 560 } 561 562 slot = hvf_find_overlap_slot(gpa, 1); 563 /* mmio */ 564 if (ept_emulation_fault(slot, gpa, exit_qual)) { 565 struct x86_decode decode; 566 567 hvf_load_regs(cpu); 568 decode_instruction(env, &decode); 569 exec_instruction(env, &decode); 570 hvf_store_regs(cpu); 571 break; 572 } 573 break; 574 } 575 case EXIT_REASON_INOUT: 576 { 577 uint32_t in = (exit_qual & 8) != 0; 578 uint32_t size = (exit_qual & 7) + 1; 579 uint32_t string = (exit_qual & 16) != 0; 580 uint32_t port = exit_qual >> 16; 581 /*uint32_t rep = (exit_qual & 0x20) != 0;*/ 582 583 if (!string && in) { 584 uint64_t val = 0; 585 hvf_load_regs(cpu); 586 hvf_handle_io(env_cpu(env), port, &val, 0, size, 1); 587 if (size == 1) { 588 AL(env) = val; 589 } else if (size == 2) { 590 AX(env) = val; 591 } else if (size == 4) { 592 RAX(env) = (uint32_t)val; 593 } else { 594 RAX(env) = (uint64_t)val; 595 } 596 env->eip += ins_len; 597 hvf_store_regs(cpu); 598 break; 599 } else if (!string && !in) { 600 RAX(env) = rreg(cpu->accel->fd, HV_X86_RAX); 601 hvf_handle_io(env_cpu(env), port, &RAX(env), 1, size, 1); 602 macvm_set_rip(cpu, rip + ins_len); 603 break; 604 } 605 struct x86_decode decode; 606 607 hvf_load_regs(cpu); 608 decode_instruction(env, &decode); 609 assert(ins_len == decode.len); 610 exec_instruction(env, &decode); 611 hvf_store_regs(cpu); 612 613 break; 614 } 615 case EXIT_REASON_CPUID: { 616 uint32_t rax = (uint32_t)rreg(cpu->accel->fd, HV_X86_RAX); 617 uint32_t rbx = (uint32_t)rreg(cpu->accel->fd, HV_X86_RBX); 618 uint32_t rcx = (uint32_t)rreg(cpu->accel->fd, HV_X86_RCX); 619 uint32_t rdx = (uint32_t)rreg(cpu->accel->fd, HV_X86_RDX); 620 621 if (rax == 1) { 622 /* CPUID1.ecx.OSXSAVE needs to know CR4 */ 623 env->cr[4] = rvmcs(cpu->accel->fd, VMCS_GUEST_CR4); 624 } 625 hvf_cpu_x86_cpuid(env, rax, rcx, &rax, &rbx, &rcx, &rdx); 626 627 wreg(cpu->accel->fd, HV_X86_RAX, rax); 628 wreg(cpu->accel->fd, HV_X86_RBX, rbx); 629 wreg(cpu->accel->fd, HV_X86_RCX, rcx); 630 wreg(cpu->accel->fd, HV_X86_RDX, rdx); 631 632 macvm_set_rip(cpu, rip + ins_len); 633 break; 634 } 635 case EXIT_REASON_XSETBV: { 636 uint32_t eax = (uint32_t)rreg(cpu->accel->fd, HV_X86_RAX); 637 uint32_t ecx = (uint32_t)rreg(cpu->accel->fd, HV_X86_RCX); 638 uint32_t edx = (uint32_t)rreg(cpu->accel->fd, HV_X86_RDX); 639 640 if (ecx) { 641 macvm_set_rip(cpu, rip + ins_len); 642 break; 643 } 644 env->xcr0 = ((uint64_t)edx << 32) | eax; 645 wreg(cpu->accel->fd, HV_X86_XCR0, env->xcr0 | 1); 646 macvm_set_rip(cpu, rip + ins_len); 647 break; 648 } 649 case EXIT_REASON_INTR_WINDOW: 650 vmx_clear_int_window_exiting(cpu); 651 ret = EXCP_INTERRUPT; 652 break; 653 case EXIT_REASON_NMI_WINDOW: 654 vmx_clear_nmi_window_exiting(cpu); 655 ret = EXCP_INTERRUPT; 656 break; 657 case EXIT_REASON_EXT_INTR: 658 /* force exit and allow io handling */ 659 ret = EXCP_INTERRUPT; 660 break; 661 case EXIT_REASON_RDMSR: 662 case EXIT_REASON_WRMSR: 663 { 664 hvf_load_regs(cpu); 665 if (exit_reason == EXIT_REASON_RDMSR) { 666 simulate_rdmsr(env); 667 } else { 668 simulate_wrmsr(env); 669 } 670 env->eip += ins_len; 671 hvf_store_regs(cpu); 672 break; 673 } 674 case EXIT_REASON_CR_ACCESS: { 675 int cr; 676 int reg; 677 678 hvf_load_regs(cpu); 679 cr = exit_qual & 15; 680 reg = (exit_qual >> 8) & 15; 681 682 switch (cr) { 683 case 0x0: { 684 macvm_set_cr0(cpu->accel->fd, RRX(env, reg)); 685 break; 686 } 687 case 4: { 688 macvm_set_cr4(cpu->accel->fd, RRX(env, reg)); 689 break; 690 } 691 case 8: { 692 if (exit_qual & 0x10) { 693 RRX(env, reg) = cpu_get_apic_tpr(x86_cpu->apic_state); 694 } else { 695 int tpr = RRX(env, reg); 696 cpu_set_apic_tpr(x86_cpu->apic_state, tpr); 697 ret = EXCP_INTERRUPT; 698 } 699 break; 700 } 701 default: 702 error_report("Unrecognized CR %d", cr); 703 abort(); 704 } 705 env->eip += ins_len; 706 hvf_store_regs(cpu); 707 break; 708 } 709 case EXIT_REASON_APIC_ACCESS: { /* TODO */ 710 struct x86_decode decode; 711 712 hvf_load_regs(cpu); 713 decode_instruction(env, &decode); 714 exec_instruction(env, &decode); 715 hvf_store_regs(cpu); 716 break; 717 } 718 case EXIT_REASON_TPR: { 719 ret = 1; 720 break; 721 } 722 case EXIT_REASON_TASK_SWITCH: { 723 uint64_t vinfo = rvmcs(cpu->accel->fd, VMCS_IDT_VECTORING_INFO); 724 x86_segment_selector sel = {.sel = exit_qual & 0xffff}; 725 vmx_handle_task_switch(cpu, sel, (exit_qual >> 30) & 0x3, 726 vinfo & VMCS_INTR_VALID, vinfo & VECTORING_INFO_VECTOR_MASK, vinfo 727 & VMCS_INTR_T_MASK); 728 break; 729 } 730 case EXIT_REASON_TRIPLE_FAULT: { 731 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); 732 ret = EXCP_INTERRUPT; 733 break; 734 } 735 case EXIT_REASON_RDPMC: 736 wreg(cpu->accel->fd, HV_X86_RAX, 0); 737 wreg(cpu->accel->fd, HV_X86_RDX, 0); 738 macvm_set_rip(cpu, rip + ins_len); 739 break; 740 case VMX_REASON_VMCALL: 741 env->exception_nr = EXCP0D_GPF; 742 env->exception_injected = 1; 743 env->has_error_code = true; 744 env->error_code = 0; 745 break; 746 default: 747 error_report("%llx: unhandled exit %llx", rip, exit_reason); 748 } 749 } while (ret == 0); 750 751 return ret; 752 } 753 754 int hvf_arch_insert_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp) 755 { 756 return -ENOSYS; 757 } 758 759 int hvf_arch_remove_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp) 760 { 761 return -ENOSYS; 762 } 763 764 int hvf_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type) 765 { 766 return -ENOSYS; 767 } 768 769 int hvf_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type) 770 { 771 return -ENOSYS; 772 } 773 774 void hvf_arch_remove_all_hw_breakpoints(void) 775 { 776 } 777 778 void hvf_arch_update_guest_debug(CPUState *cpu) 779 { 780 } 781 782 bool hvf_arch_supports_guest_debug(void) 783 { 784 return false; 785 } 786