Lines Matching +full:attr +full:- +full:cnt +full:- +full:name
4 * Copyright (C) 2006-2008 Qumranet Technologies
11 * See the COPYING file in the top-level directory.
16 #include "qapi/qapi-events-run-state.h"
28 #include "standard-headers/asm-x86/kvm_para.h"
29 #include "hw/xen/interface/arch-x86/cpuid.h"
32 #include "host-cpu.h"
39 #include "../confidential-guest.h"
42 #include "xen-emu.h"
44 #include "hyperv-proto.h"
47 #include "qemu/host-utils.h"
48 #include "qemu/main-loop.h"
50 #include "qemu/config-file.h"
51 #include "qemu/error-report.h"
58 #include "hw/i386/apic-msidef.h"
61 #include "hw/i386/x86-iommu.h"
87 * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
100 /* A 4096-byte buffer can hold the 8-byte kvm_msrs header, plus
194 [KVM_X86_SEV_ES_VM] = "SEV-ES",
195 [KVM_X86_SNP_VM] = "SEV-SNP",
211 machine_types = kvm_check_extension(KVM_STATE(current_machine->accelerator), in kvm_is_vm_type_supported()
220 if (ms->cgs) { in kvm_get_vm_type()
221 if (!object_dynamic_cast(OBJECT(ms->cgs), TYPE_X86_CONFIDENTIAL_GUEST)) { in kvm_get_vm_type()
223 object_get_typename(OBJECT(ms->cgs))); in kvm_get_vm_type()
227 X86_CONFIDENTIAL_GUEST(ms->cgs)); in kvm_get_vm_type()
231 error_report("vm-type %s not supported by KVM", vm_type_name[kvm_type]); in kvm_get_vm_type()
303 CPUX86State *env = &cpu->env; in kvm_get_tsc()
307 if (env->tsc_valid) { in kvm_get_tsc()
311 env->tsc_valid = !runstate_is_running(); in kvm_get_tsc()
318 env->tsc = value; in kvm_get_tsc()
343 size = sizeof(*cpuid) + max * sizeof(*cpuid->entries); in try_get_cpuid()
345 cpuid->nent = max; in try_get_cpuid()
347 if (r == 0 && cpuid->nent >= max) { in try_get_cpuid()
348 r = -E2BIG; in try_get_cpuid()
351 if (r == -E2BIG) { in try_get_cpuid()
356 strerror(-r)); in try_get_cpuid()
402 ret = entry->eax; in cpuid_entry_get_reg()
405 ret = entry->ebx; in cpuid_entry_get_reg()
408 ret = entry->ecx; in cpuid_entry_get_reg()
411 ret = entry->edx; in cpuid_entry_get_reg()
424 for (i = 0; i < cpuid->nent; ++i) { in cpuid_find_entry()
425 if (cpuid->entries[i].function == function && in cpuid_find_entry()
426 cpuid->entries[i].index == index) { in cpuid_find_entry()
427 return &cpuid->entries[i]; in cpuid_find_entry()
461 /* tsc-deadline flag is not returned by GET_SUPPORTED_CPUID, but it in kvm_arch_get_supported_cpuid()
471 * without the in-kernel irqchip in kvm_arch_get_supported_cpuid()
503 * Linux v4.17-v4.20 incorrectly return ARCH_CAPABILITIES on SVM hosts. in kvm_arch_get_supported_cpuid()
527 struct kvm_device_attr attr = { in kvm_arch_get_supported_cpuid() local
529 .attr = KVM_X86_XCOMP_GUEST_SUPP, in kvm_arch_get_supported_cpuid()
538 int rc = kvm_ioctl(s, KVM_GET_DEVICE_ATTR, &attr); in kvm_arch_get_supported_cpuid()
540 if (rc != -ENXIO) { in kvm_arch_get_supported_cpuid()
564 * be enabled without the in-kernel irqchip in kvm_arch_get_supported_cpuid()
576 if (current_machine->cgs) { in kvm_arch_get_supported_cpuid()
578 X86_CONFIDENTIAL_GUEST(current_machine->cgs), in kvm_arch_get_supported_cpuid()
599 for (i = 0; i < kvm_feature_msrs->nmsrs; i++) in kvm_arch_get_supported_msr_feature()
600 if (kvm_feature_msrs->indices[i] == index) { in kvm_arch_get_supported_msr_feature()
603 if (i == kvm_feature_msrs->nmsrs) { in kvm_arch_get_supported_msr_feature()
613 index, strerror(-ret)); in kvm_arch_get_supported_msr_feature()
672 CPUX86State *env = &cpu->env; in kvm_mce_inject()
711 * guest kernel back into env->mcg_ext_ctl. in kvm_mce_inject()
714 if (env->mcg_ext_ctl & MCG_EXT_CTL_LMCE_EN) { in kvm_mce_inject()
741 CPUX86State *env = &cpu->env; in kvm_arch_on_sigbus_vcpu()
752 if ((env->mcg_cap & MCG_SER_P) && addr) { in kvm_arch_on_sigbus_vcpu()
755 kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) { in kvm_arch_on_sigbus_vcpu()
797 assert(env->exception_nr == -1); in kvm_queue_exception()
798 assert(!env->exception_pending); in kvm_queue_exception()
799 assert(!env->exception_injected); in kvm_queue_exception()
800 assert(!env->exception_has_payload); in kvm_queue_exception()
802 env->exception_nr = exception_nr; in kvm_queue_exception()
805 env->exception_pending = 1; in kvm_queue_exception()
807 env->exception_has_payload = exception_has_payload; in kvm_queue_exception()
808 env->exception_payload = exception_payload; in kvm_queue_exception()
810 env->exception_injected = 1; in kvm_queue_exception()
814 env->dr[6] = exception_payload; in kvm_queue_exception()
817 env->cr[2] = exception_payload; in kvm_queue_exception()
829 env->tsc_valid = false; in cpu_update_state()
836 return cpu->apic_id; in kvm_arch_vcpu_id()
846 ((cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_NOTIFY) || in hyperv_enabled()
847 cpu->hyperv_features || cpu->hyperv_passthrough); in hyperv_enabled()
857 int min_freq = freq - (freq * 250 / 1000000); in freq_within_bounds()
869 CPUX86State *env = &cpu->env; in kvm_arch_set_tsc_khz()
882 if (!env->tsc_khz) { in kvm_arch_set_tsc_khz()
886 cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ? in kvm_arch_set_tsc_khz()
887 kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) : -ENOTSUP; in kvm_arch_set_tsc_khz()
892 if (kvm_check_extension(cs->kvm_state, KVM_CAP_TSC_CONTROL)) { in kvm_arch_set_tsc_khz()
900 if (cur_freq != -ENOTSUP && freq_within_bounds(cur_freq, env->tsc_khz)) { in kvm_arch_set_tsc_khz()
905 kvm_vcpu_ioctl(cs, KVM_SET_TSC_KHZ, env->tsc_khz) : in kvm_arch_set_tsc_khz()
906 -ENOTSUP; in kvm_arch_set_tsc_khz()
912 cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ? in kvm_arch_set_tsc_khz()
914 -ENOTSUP; in kvm_arch_set_tsc_khz()
915 if (cur_freq <= 0 || cur_freq != env->tsc_khz) { in kvm_arch_set_tsc_khz()
919 env->tsc_khz, cur_freq); in kvm_arch_set_tsc_khz()
929 if (!env->tsc_khz) { in tsc_is_stable_and_known()
932 return (env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC) in tsc_is_stable_and_known()
933 || env->user_tsc_khz; in tsc_is_stable_and_known()
949 .desc = "relaxed timing (hv-relaxed)",
956 .desc = "virtual APIC (hv-vapic)",
963 .desc = "clocksources (hv-time)",
970 .desc = "crash MSRs (hv-crash)",
977 .desc = "reset MSR (hv-reset)",
984 .desc = "VP_INDEX MSR (hv-vpindex)",
991 .desc = "VP_RUNTIME MSR (hv-runtime)",
998 .desc = "synthetic interrupt controller (hv-synic)",
1005 .desc = "synthetic timers (hv-stimer)",
1013 .desc = "frequency MSRs (hv-frequencies)",
1022 .desc = "reenlightenment MSRs (hv-reenlightenment)",
1029 .desc = "paravirtualized TLB flush (hv-tlbflush)",
1038 .desc = "enlightened VMCS (hv-evmcs)",
1046 .desc = "paravirtualized IPI (hv-ipi)",
1055 .desc = "direct mode synthetic timers (hv-stimer-direct)",
1063 .desc = "AVIC/APICv support (hv-avic/hv-apicv)",
1070 .desc = "Enable synthetic kernel debugger channel (hv-syndbg)",
1079 .desc = "enlightened MSR-Bitmap (hv-emsr-bitmap)",
1086 .desc = "XMM fast hypercall input (hv-xmm-input)",
1093 .desc = "Extended gva ranges for TLB flush hypercalls (hv-tlbflush-ext)",
1101 .desc = "direct TLB flush (hv-tlbflush-direct)",
1116 size = sizeof(*cpuid) + max * sizeof(*cpuid->entries); in try_get_hv_cpuid()
1118 cpuid->nent = max; in try_get_hv_cpuid()
1125 if (r == 0 && cpuid->nent >= max) { in try_get_hv_cpuid()
1126 r = -E2BIG; in try_get_hv_cpuid()
1129 if (r == -E2BIG) { in try_get_hv_cpuid()
1134 strerror(-r)); in try_get_hv_cpuid()
1157 * Non-empty KVM context is needed when KVM_CAP_SYS_HYPERV_CPUID is in get_supported_hv_cpuid()
1160 assert(do_sys_ioctl || cs->kvm_state); in get_supported_hv_cpuid()
1164 * -E2BIG, however, it doesn't report back the right size. Keep increasing in get_supported_hv_cpuid()
1165 * it and re-trying until we succeed. in get_supported_hv_cpuid()
1177 if (!do_sys_ioctl && kvm_check_extension(cs->kvm_state, in get_supported_hv_cpuid()
1179 for (i = 0; i < cpuid->nent; i++) { in get_supported_hv_cpuid()
1180 if (cpuid->entries[i].function == HV_CPUID_ENLIGHTMENT_INFO) { in get_supported_hv_cpuid()
1181 cpuid->entries[i].eax |= HV_ENLIGHTENED_VMCS_RECOMMENDED; in get_supported_hv_cpuid()
1200 cpuid = g_malloc0(sizeof(*cpuid) + 2 * sizeof(*cpuid->entries)); in get_supported_hv_cpuid_legacy()
1201 cpuid->nent = 2; in get_supported_hv_cpuid_legacy()
1204 entry_feat = &cpuid->entries[0]; in get_supported_hv_cpuid_legacy()
1205 entry_feat->function = HV_CPUID_FEATURES; in get_supported_hv_cpuid_legacy()
1207 entry_recomm = &cpuid->entries[1]; in get_supported_hv_cpuid_legacy()
1208 entry_recomm->function = HV_CPUID_ENLIGHTMENT_INFO; in get_supported_hv_cpuid_legacy()
1209 entry_recomm->ebx = cpu->hyperv_spinlock_attempts; in get_supported_hv_cpuid_legacy()
1211 if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0) { in get_supported_hv_cpuid_legacy()
1212 entry_feat->eax |= HV_HYPERCALL_AVAILABLE; in get_supported_hv_cpuid_legacy()
1213 entry_feat->eax |= HV_APIC_ACCESS_AVAILABLE; in get_supported_hv_cpuid_legacy()
1214 entry_feat->edx |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE; in get_supported_hv_cpuid_legacy()
1215 entry_recomm->eax |= HV_RELAXED_TIMING_RECOMMENDED; in get_supported_hv_cpuid_legacy()
1216 entry_recomm->eax |= HV_APIC_ACCESS_RECOMMENDED; in get_supported_hv_cpuid_legacy()
1219 if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TIME) > 0) { in get_supported_hv_cpuid_legacy()
1220 entry_feat->eax |= HV_TIME_REF_COUNT_AVAILABLE; in get_supported_hv_cpuid_legacy()
1221 entry_feat->eax |= HV_REFERENCE_TSC_AVAILABLE; in get_supported_hv_cpuid_legacy()
1225 entry_feat->eax |= HV_ACCESS_FREQUENCY_MSRS; in get_supported_hv_cpuid_legacy()
1226 entry_feat->edx |= HV_FREQUENCY_MSRS_AVAILABLE; in get_supported_hv_cpuid_legacy()
1230 entry_feat->edx |= HV_GUEST_CRASH_MSR_AVAILABLE; in get_supported_hv_cpuid_legacy()
1234 entry_feat->eax |= HV_ACCESS_REENLIGHTENMENTS_CONTROL; in get_supported_hv_cpuid_legacy()
1238 entry_feat->eax |= HV_RESET_AVAILABLE; in get_supported_hv_cpuid_legacy()
1242 entry_feat->eax |= HV_VP_INDEX_AVAILABLE; in get_supported_hv_cpuid_legacy()
1246 entry_feat->eax |= HV_VP_RUNTIME_AVAILABLE; in get_supported_hv_cpuid_legacy()
1250 unsigned int cap = cpu->hyperv_synic_kvm_only ? in get_supported_hv_cpuid_legacy()
1253 if (kvm_check_extension(cs->kvm_state, cap) > 0) { in get_supported_hv_cpuid_legacy()
1254 entry_feat->eax |= HV_SYNIC_AVAILABLE; in get_supported_hv_cpuid_legacy()
1259 entry_feat->eax |= HV_SYNTIMERS_AVAILABLE; in get_supported_hv_cpuid_legacy()
1263 entry_feat->edx |= HV_GUEST_DEBUGGING_AVAILABLE; in get_supported_hv_cpuid_legacy()
1264 entry_feat->edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE; in get_supported_hv_cpuid_legacy()
1265 entry_feat->ebx |= HV_PARTITION_DEBUGGING_ALLOWED; in get_supported_hv_cpuid_legacy()
1268 if (kvm_check_extension(cs->kvm_state, in get_supported_hv_cpuid_legacy()
1270 entry_recomm->eax |= HV_REMOTE_TLB_FLUSH_RECOMMENDED; in get_supported_hv_cpuid_legacy()
1271 entry_recomm->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED; in get_supported_hv_cpuid_legacy()
1274 if (kvm_check_extension(cs->kvm_state, in get_supported_hv_cpuid_legacy()
1276 entry_recomm->eax |= HV_ENLIGHTENED_VMCS_RECOMMENDED; in get_supported_hv_cpuid_legacy()
1279 if (kvm_check_extension(cs->kvm_state, in get_supported_hv_cpuid_legacy()
1281 entry_recomm->eax |= HV_CLUSTER_IPI_RECOMMENDED; in get_supported_hv_cpuid_legacy()
1282 entry_recomm->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED; in get_supported_hv_cpuid_legacy()
1300 * 'cs->kvm_state' may be NULL when Hyper-V features are expanded in hv_cpuid_get_host()
1305 assert(cs->kvm_state); in hv_cpuid_get_host()
1364 error_setg(errp, "Hyper-V %s requires Hyper-V %s", in hv_feature_check_deps()
1409 * Expand Hyper-V CPU features. In partucular, check that all the requested
1413 * Hyper-V features.
1426 * time per-CPU kvm_state is not available yet so we can only proceed in kvm_hyperv_expand_features()
1429 if (!cs->kvm_state && in kvm_hyperv_expand_features()
1433 if (cpu->hyperv_passthrough) { in kvm_hyperv_expand_features()
1434 cpu->hyperv_vendor_id[0] = in kvm_hyperv_expand_features()
1436 cpu->hyperv_vendor_id[1] = in kvm_hyperv_expand_features()
1438 cpu->hyperv_vendor_id[2] = in kvm_hyperv_expand_features()
1440 cpu->hyperv_vendor = g_realloc(cpu->hyperv_vendor, in kvm_hyperv_expand_features()
1441 sizeof(cpu->hyperv_vendor_id) + 1); in kvm_hyperv_expand_features()
1442 memcpy(cpu->hyperv_vendor, cpu->hyperv_vendor_id, in kvm_hyperv_expand_features()
1443 sizeof(cpu->hyperv_vendor_id)); in kvm_hyperv_expand_features()
1444 cpu->hyperv_vendor[sizeof(cpu->hyperv_vendor_id)] = 0; in kvm_hyperv_expand_features()
1446 cpu->hyperv_interface_id[0] = in kvm_hyperv_expand_features()
1448 cpu->hyperv_interface_id[1] = in kvm_hyperv_expand_features()
1450 cpu->hyperv_interface_id[2] = in kvm_hyperv_expand_features()
1452 cpu->hyperv_interface_id[3] = in kvm_hyperv_expand_features()
1455 cpu->hyperv_ver_id_build = in kvm_hyperv_expand_features()
1457 cpu->hyperv_ver_id_major = in kvm_hyperv_expand_features()
1459 cpu->hyperv_ver_id_minor = in kvm_hyperv_expand_features()
1461 cpu->hyperv_ver_id_sp = in kvm_hyperv_expand_features()
1463 cpu->hyperv_ver_id_sb = in kvm_hyperv_expand_features()
1465 cpu->hyperv_ver_id_sn = in kvm_hyperv_expand_features()
1468 cpu->hv_max_vps = hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, in kvm_hyperv_expand_features()
1470 cpu->hyperv_limits[0] = in kvm_hyperv_expand_features()
1472 cpu->hyperv_limits[1] = in kvm_hyperv_expand_features()
1474 cpu->hyperv_limits[2] = in kvm_hyperv_expand_features()
1477 cpu->hyperv_spinlock_attempts = in kvm_hyperv_expand_features()
1481 * Mark feature as enabled in 'cpu->hyperv_features' as in kvm_hyperv_expand_features()
1487 cpu->hyperv_features |= BIT(feat); in kvm_hyperv_expand_features()
1500 error_setg(errp, "Hyper-V %s is not supported by kernel", in kvm_hyperv_expand_features()
1515 !cpu->hyperv_synic_kvm_only && in kvm_hyperv_expand_features()
1517 error_setg(errp, "Hyper-V %s requires Hyper-V %s", in kvm_hyperv_expand_features()
1527 * Fill in Hyper-V CPUIDs. Returns the number of entries filled in cpuid_ent.
1548 c->function = HV_CPUID_VENDOR_AND_MAX_FUNCTIONS; in hyperv_fill_cpuids()
1549 c->eax = max_cpuid_leaf; in hyperv_fill_cpuids()
1550 c->ebx = cpu->hyperv_vendor_id[0]; in hyperv_fill_cpuids()
1551 c->ecx = cpu->hyperv_vendor_id[1]; in hyperv_fill_cpuids()
1552 c->edx = cpu->hyperv_vendor_id[2]; in hyperv_fill_cpuids()
1555 c->function = HV_CPUID_INTERFACE; in hyperv_fill_cpuids()
1556 c->eax = cpu->hyperv_interface_id[0]; in hyperv_fill_cpuids()
1557 c->ebx = cpu->hyperv_interface_id[1]; in hyperv_fill_cpuids()
1558 c->ecx = cpu->hyperv_interface_id[2]; in hyperv_fill_cpuids()
1559 c->edx = cpu->hyperv_interface_id[3]; in hyperv_fill_cpuids()
1562 c->function = HV_CPUID_VERSION; in hyperv_fill_cpuids()
1563 c->eax = cpu->hyperv_ver_id_build; in hyperv_fill_cpuids()
1564 c->ebx = (uint32_t)cpu->hyperv_ver_id_major << 16 | in hyperv_fill_cpuids()
1565 cpu->hyperv_ver_id_minor; in hyperv_fill_cpuids()
1566 c->ecx = cpu->hyperv_ver_id_sp; in hyperv_fill_cpuids()
1567 c->edx = (uint32_t)cpu->hyperv_ver_id_sb << 24 | in hyperv_fill_cpuids()
1568 (cpu->hyperv_ver_id_sn & 0xffffff); in hyperv_fill_cpuids()
1571 c->function = HV_CPUID_FEATURES; in hyperv_fill_cpuids()
1572 c->eax = hv_build_cpuid_leaf(cs, HV_CPUID_FEATURES, R_EAX); in hyperv_fill_cpuids()
1573 c->ebx = hv_build_cpuid_leaf(cs, HV_CPUID_FEATURES, R_EBX); in hyperv_fill_cpuids()
1574 c->edx = hv_build_cpuid_leaf(cs, HV_CPUID_FEATURES, R_EDX); in hyperv_fill_cpuids()
1576 /* Unconditionally required with any Hyper-V enlightenment */ in hyperv_fill_cpuids()
1577 c->eax |= HV_HYPERCALL_AVAILABLE; in hyperv_fill_cpuids()
1581 !cpu->hyperv_synic_kvm_only) { in hyperv_fill_cpuids()
1582 c->ebx |= HV_POST_MESSAGES | HV_SIGNAL_EVENTS; in hyperv_fill_cpuids()
1587 c->edx |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE; in hyperv_fill_cpuids()
1590 c->function = HV_CPUID_ENLIGHTMENT_INFO; in hyperv_fill_cpuids()
1591 c->eax = hv_build_cpuid_leaf(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EAX); in hyperv_fill_cpuids()
1592 c->ebx = cpu->hyperv_spinlock_attempts; in hyperv_fill_cpuids()
1596 c->eax |= HV_APIC_ACCESS_RECOMMENDED; in hyperv_fill_cpuids()
1599 if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_ON) { in hyperv_fill_cpuids()
1600 c->eax |= HV_NO_NONARCH_CORESHARING; in hyperv_fill_cpuids()
1601 } else if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO) { in hyperv_fill_cpuids()
1602 c->eax |= hv_cpuid_get_host(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EAX) & in hyperv_fill_cpuids()
1607 c->function = HV_CPUID_IMPLEMENT_LIMITS; in hyperv_fill_cpuids()
1608 c->eax = cpu->hv_max_vps; in hyperv_fill_cpuids()
1609 c->ebx = cpu->hyperv_limits[0]; in hyperv_fill_cpuids()
1610 c->ecx = cpu->hyperv_limits[1]; in hyperv_fill_cpuids()
1611 c->edx = cpu->hyperv_limits[2]; in hyperv_fill_cpuids()
1620 c->function = function; in hyperv_fill_cpuids()
1624 c->function = HV_CPUID_NESTED_FEATURES; in hyperv_fill_cpuids()
1625 c->eax = nested_eax; in hyperv_fill_cpuids()
1630 c->function = HV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS; in hyperv_fill_cpuids()
1631 c->eax = hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS) ? in hyperv_fill_cpuids()
1634 c->eax = 0; in hyperv_fill_cpuids()
1635 c->ebx = signature[0]; in hyperv_fill_cpuids()
1636 c->ecx = signature[1]; in hyperv_fill_cpuids()
1637 c->edx = signature[2]; in hyperv_fill_cpuids()
1640 c->function = HV_CPUID_SYNDBG_INTERFACE; in hyperv_fill_cpuids()
1642 c->eax = signature[0]; in hyperv_fill_cpuids()
1643 c->ebx = 0; in hyperv_fill_cpuids()
1644 c->ecx = 0; in hyperv_fill_cpuids()
1645 c->edx = 0; in hyperv_fill_cpuids()
1648 c->function = HV_CPUID_SYNDBG_PLATFORM_CAPABILITIES; in hyperv_fill_cpuids()
1649 c->eax = HV_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING; in hyperv_fill_cpuids()
1650 c->ebx = 0; in hyperv_fill_cpuids()
1651 c->ecx = 0; in hyperv_fill_cpuids()
1652 c->edx = 0; in hyperv_fill_cpuids()
1680 if (cpu->hyperv_passthrough && hv_passthrough_mig_blocker == NULL) { in hyperv_init_vcpu()
1682 "'hv-passthrough' CPU flag prevents migration, use explicit" in hyperv_init_vcpu()
1683 " set of hv-* flags instead"); in hyperv_init_vcpu()
1691 if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO && in hyperv_init_vcpu()
1694 "'hv-no-nonarch-coresharing=auto' CPU flag prevents migration" in hyperv_init_vcpu()
1695 " use explicit 'hv-no-nonarch-coresharing=on' instead (but" in hyperv_init_vcpu()
1719 return -ENXIO; in hyperv_init_vcpu()
1724 uint32_t synic_cap = cpu->hyperv_synic_kvm_only ? in hyperv_init_vcpu()
1729 strerror(-ret)); in hyperv_init_vcpu()
1733 if (!cpu->hyperv_synic_kvm_only) { in hyperv_init_vcpu()
1737 strerror(-ret)); in hyperv_init_vcpu()
1751 * KVM is required to support EVMCS ver.1. as that's what 'hv-evmcs' in hyperv_init_vcpu()
1753 * to '1' as well so 'hv-evmcs' feature is migratable even when (and if) in hyperv_init_vcpu()
1754 * ver.2 is implemented. A new option (e.g. 'hv-evmcs=2') will then have in hyperv_init_vcpu()
1758 error_report("Hyper-V %s is not supported by kernel", in hyperv_init_vcpu()
1768 return -ENOTSUP; in hyperv_init_vcpu()
1772 if (cpu->hyperv_enforce_cpuid) { in hyperv_init_vcpu()
1776 strerror(-ret)); in hyperv_init_vcpu()
1796 env->xsave_buf_len = QEMU_ALIGN_UP(has_xsave2, 4096); in kvm_init_xsave()
1798 env->xsave_buf_len = sizeof(struct kvm_xsave); in kvm_init_xsave()
1801 env->xsave_buf = qemu_memalign(4096, env->xsave_buf_len); in kvm_init_xsave()
1802 memset(env->xsave_buf, 0, env->xsave_buf_len); in kvm_init_xsave()
1808 env->xsave_buf_len); in kvm_init_xsave()
1816 if (!env->nested_state) { in kvm_init_nested_state()
1820 size = env->nested_state->size; in kvm_init_nested_state()
1822 memset(env->nested_state, 0, size); in kvm_init_nested_state()
1823 env->nested_state->size = size; in kvm_init_nested_state()
1826 env->nested_state->format = KVM_STATE_NESTED_FORMAT_VMX; in kvm_init_nested_state()
1827 vmx_hdr = &env->nested_state->hdr.vmx; in kvm_init_nested_state()
1828 vmx_hdr->vmxon_pa = -1ull; in kvm_init_nested_state()
1829 vmx_hdr->vmcs12_pa = -1ull; in kvm_init_nested_state()
1831 env->nested_state->format = KVM_STATE_NESTED_FORMAT_SVM; in kvm_init_nested_state()
1855 c->function = i; in kvm_x86_build_cpuid()
1856 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); in kvm_x86_build_cpuid()
1857 times = c->eax & 0xff; in kvm_x86_build_cpuid()
1859 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC | in kvm_x86_build_cpuid()
1868 c->function = i; in kvm_x86_build_cpuid()
1869 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC; in kvm_x86_build_cpuid()
1870 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); in kvm_x86_build_cpuid()
1876 cpuid_i--; in kvm_x86_build_cpuid()
1884 c->function = i; in kvm_x86_build_cpuid()
1885 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; in kvm_x86_build_cpuid()
1886 c->index = j; in kvm_x86_build_cpuid()
1887 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx); in kvm_x86_build_cpuid()
1889 if (i == 4 && c->eax == 0) { in kvm_x86_build_cpuid()
1892 if (i == 0xb && !(c->ecx & 0xff00)) { in kvm_x86_build_cpuid()
1895 if (i == 0x1f && !(c->ecx & 0xff00)) { in kvm_x86_build_cpuid()
1898 if (i == 0xd && c->eax == 0) { in kvm_x86_build_cpuid()
1902 cpuid_i--; in kvm_x86_build_cpuid()
1914 c->function = i; in kvm_x86_build_cpuid()
1915 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; in kvm_x86_build_cpuid()
1916 c->index = j; in kvm_x86_build_cpuid()
1917 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx); in kvm_x86_build_cpuid()
1919 if (j > 1 && (c->eax & 0xf) != 1) { in kvm_x86_build_cpuid()
1936 c->function = i; in kvm_x86_build_cpuid()
1937 c->index = 0; in kvm_x86_build_cpuid()
1938 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; in kvm_x86_build_cpuid()
1939 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); in kvm_x86_build_cpuid()
1940 times = c->eax; in kvm_x86_build_cpuid()
1947 c->function = i; in kvm_x86_build_cpuid()
1948 c->index = j; in kvm_x86_build_cpuid()
1949 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; in kvm_x86_build_cpuid()
1950 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx); in kvm_x86_build_cpuid()
1955 c->function = i; in kvm_x86_build_cpuid()
1956 c->flags = 0; in kvm_x86_build_cpuid()
1957 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); in kvm_x86_build_cpuid()
1958 if (!c->eax && !c->ebx && !c->ecx && !c->edx) { in kvm_x86_build_cpuid()
1961 * so we can omit it and avoid hitting KVM's 80-entry limit. in kvm_x86_build_cpuid()
1963 cpuid_i--; in kvm_x86_build_cpuid()
2009 c->function = i; in kvm_x86_build_cpuid()
2010 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; in kvm_x86_build_cpuid()
2011 c->index = j; in kvm_x86_build_cpuid()
2012 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx); in kvm_x86_build_cpuid()
2014 if (c->eax == 0) { in kvm_x86_build_cpuid()
2024 c->function = i; in kvm_x86_build_cpuid()
2025 c->flags = 0; in kvm_x86_build_cpuid()
2026 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); in kvm_x86_build_cpuid()
2027 if (!c->eax && !c->ebx && !c->ecx && !c->edx) { in kvm_x86_build_cpuid()
2030 * so we can omit it and avoid hitting KVM's 80-entry limit. in kvm_x86_build_cpuid()
2032 cpuid_i--; in kvm_x86_build_cpuid()
2039 if (env->cpuid_xlevel2 > 0) { in kvm_x86_build_cpuid()
2049 c->function = i; in kvm_x86_build_cpuid()
2050 c->flags = 0; in kvm_x86_build_cpuid()
2051 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); in kvm_x86_build_cpuid()
2087 CPUX86State *env = &cpu->env; in kvm_arch_init_vcpu()
2096 if (current_machine->cgs) { in kvm_arch_init_vcpu()
2098 X86_CONFIDENTIAL_GUEST(current_machine->cgs), cs); in kvm_arch_init_vcpu()
2108 has_xsave2 = kvm_check_extension(cs->kvm_state, KVM_CAP_XSAVE2); in kvm_arch_init_vcpu()
2117 * latter case, we query it from KVM and record in env->tsc_khz, in kvm_arch_init_vcpu()
2120 if (!env->tsc_khz) { in kvm_arch_init_vcpu()
2121 r = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ? in kvm_arch_init_vcpu()
2123 -ENOTSUP; in kvm_arch_init_vcpu()
2125 env->tsc_khz = r; in kvm_arch_init_vcpu()
2129 env->apic_bus_freq = KVM_APIC_BUS_FREQUENCY; in kvm_arch_init_vcpu()
2134 * 'query-cpu-model-expansion' in this case as we don't have a KVM vCPU to in kvm_arch_init_vcpu()
2135 * check which Hyper-V enlightenments are supported and which are not, we in kvm_arch_init_vcpu()
2136 * can still proceed and check/expand Hyper-V enlightenments here so legacy in kvm_arch_init_vcpu()
2141 return -ENOSYS; in kvm_arch_init_vcpu()
2155 if (cs->kvm_state->xen_version) { in kvm_arch_init_vcpu()
2162 c->function = kvm_base + XEN_CPUID_SIGNATURE; in kvm_arch_init_vcpu()
2163 c->eax = kvm_base + XEN_CPUID_TIME; in kvm_arch_init_vcpu()
2164 c->ebx = signature[0]; in kvm_arch_init_vcpu()
2165 c->ecx = signature[1]; in kvm_arch_init_vcpu()
2166 c->edx = signature[2]; in kvm_arch_init_vcpu()
2169 c->function = kvm_base + XEN_CPUID_VENDOR; in kvm_arch_init_vcpu()
2170 c->eax = cs->kvm_state->xen_version; in kvm_arch_init_vcpu()
2171 c->ebx = 0; in kvm_arch_init_vcpu()
2172 c->ecx = 0; in kvm_arch_init_vcpu()
2173 c->edx = 0; in kvm_arch_init_vcpu()
2176 c->function = kvm_base + XEN_CPUID_HVM_MSR; in kvm_arch_init_vcpu()
2177 /* Number of hypercall-transfer pages */ in kvm_arch_init_vcpu()
2178 c->eax = 1; in kvm_arch_init_vcpu()
2181 c->ebx = XEN_HYPERCALL_MSR_HYPERV; in kvm_arch_init_vcpu()
2182 kvm_xen_init(cs->kvm_state, c->ebx); in kvm_arch_init_vcpu()
2184 c->ebx = XEN_HYPERCALL_MSR; in kvm_arch_init_vcpu()
2186 c->ecx = 0; in kvm_arch_init_vcpu()
2187 c->edx = 0; in kvm_arch_init_vcpu()
2190 c->function = kvm_base + XEN_CPUID_TIME; in kvm_arch_init_vcpu()
2191 c->eax = ((!!tsc_is_stable_and_known(env) << 1) | in kvm_arch_init_vcpu()
2192 (!!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP) << 2)); in kvm_arch_init_vcpu()
2194 c->ebx = 0; in kvm_arch_init_vcpu()
2196 c->ecx = env->user_tsc_khz; in kvm_arch_init_vcpu()
2198 c->edx = 0; in kvm_arch_init_vcpu()
2201 c->function = kvm_base + XEN_CPUID_HVM; in kvm_arch_init_vcpu()
2202 xen_max_leaf->eax = kvm_base + XEN_CPUID_HVM; in kvm_arch_init_vcpu()
2203 if (cs->kvm_state->xen_version >= XEN_VERSION(4, 5)) { in kvm_arch_init_vcpu()
2204 c->function = kvm_base + XEN_CPUID_HVM; in kvm_arch_init_vcpu()
2206 if (cpu->xen_vapic) { in kvm_arch_init_vcpu()
2207 c->eax |= XEN_HVM_CPUID_APIC_ACCESS_VIRT; in kvm_arch_init_vcpu()
2208 c->eax |= XEN_HVM_CPUID_X2APIC_VIRT; in kvm_arch_init_vcpu()
2211 c->eax |= XEN_HVM_CPUID_IOMMU_MAPPINGS; in kvm_arch_init_vcpu()
2213 if (cs->kvm_state->xen_version >= XEN_VERSION(4, 6)) { in kvm_arch_init_vcpu()
2214 c->eax |= XEN_HVM_CPUID_VCPU_ID_PRESENT; in kvm_arch_init_vcpu()
2215 c->ebx = cs->cpu_index; in kvm_arch_init_vcpu()
2218 if (cs->kvm_state->xen_version >= XEN_VERSION(4, 17)) { in kvm_arch_init_vcpu()
2219 c->eax |= XEN_HVM_CPUID_UPCALL_VECTOR; in kvm_arch_init_vcpu()
2234 } else if (cpu->expose_kvm) { in kvm_arch_init_vcpu()
2237 c->function = KVM_CPUID_SIGNATURE | kvm_base; in kvm_arch_init_vcpu()
2238 c->eax = KVM_CPUID_FEATURES | kvm_base; in kvm_arch_init_vcpu()
2239 c->ebx = signature[0]; in kvm_arch_init_vcpu()
2240 c->ecx = signature[1]; in kvm_arch_init_vcpu()
2241 c->edx = signature[2]; in kvm_arch_init_vcpu()
2244 c->function = KVM_CPUID_FEATURES | kvm_base; in kvm_arch_init_vcpu()
2245 c->eax = env->features[FEAT_KVM]; in kvm_arch_init_vcpu()
2246 c->edx = env->features[FEAT_KVM_HINTS]; in kvm_arch_init_vcpu()
2249 if (cpu->kvm_pv_enforce_cpuid) { in kvm_arch_init_vcpu()
2254 strerror(-r)); in kvm_arch_init_vcpu()
2262 if (((env->cpuid_version >> 8)&0xF) >= 6 in kvm_arch_init_vcpu()
2263 && (env->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) == in kvm_arch_init_vcpu()
2269 ret = kvm_get_mce_cap_supported(cs->kvm_state, &mcg_cap, &banks); in kvm_arch_init_vcpu()
2271 fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret)); in kvm_arch_init_vcpu()
2275 if (banks < (env->mcg_cap & MCG_CAP_BANKS_MASK)) { in kvm_arch_init_vcpu()
2277 (int)(env->mcg_cap & MCG_CAP_BANKS_MASK), banks); in kvm_arch_init_vcpu()
2278 return -ENOTSUP; in kvm_arch_init_vcpu()
2281 unsupported_caps = env->mcg_cap & ~(mcg_cap | MCG_CAP_BANKS_MASK); in kvm_arch_init_vcpu()
2285 return -ENOTSUP; in kvm_arch_init_vcpu()
2291 env->mcg_cap &= mcg_cap | MCG_CAP_BANKS_MASK; in kvm_arch_init_vcpu()
2292 ret = kvm_vcpu_ioctl(cs, KVM_X86_SETUP_MCE, &env->mcg_cap); in kvm_arch_init_vcpu()
2294 fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret)); in kvm_arch_init_vcpu()
2299 cpu->vmsentry = qemu_add_vm_change_state_handler(cpu_update_state, env); in kvm_arch_init_vcpu()
2303 has_msr_feature_control = !!(c->ecx & CPUID_EXT_VMX) || in kvm_arch_init_vcpu()
2304 !!(c->ecx & CPUID_EXT_SMX); in kvm_arch_init_vcpu()
2308 if (c && (c->ebx & CPUID_7_0_EBX_SGX)) { in kvm_arch_init_vcpu()
2312 if (env->mcg_cap & MCG_LMCE_P) { in kvm_arch_init_vcpu()
2316 if (!env->user_tsc_khz) { in kvm_arch_init_vcpu()
2317 if ((env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC) && in kvm_arch_init_vcpu()
2320 "State blocked by non-migratable CPU device" in kvm_arch_init_vcpu()
2330 if (cpu->vmware_cpuid_freq in kvm_arch_init_vcpu()
2332 * it if KVM exposes leaf 0x40000000. (Conflicts with Hyper-V) */ in kvm_arch_init_vcpu()
2333 && cpu->expose_kvm in kvm_arch_init_vcpu()
2339 c->function = KVM_CPUID_SIGNATURE | 0x10; in kvm_arch_init_vcpu()
2340 c->eax = env->tsc_khz; in kvm_arch_init_vcpu()
2341 c->ebx = env->apic_bus_freq / 1000; /* Hz to KHz */ in kvm_arch_init_vcpu()
2342 c->ecx = c->edx = 0; in kvm_arch_init_vcpu()
2345 c->eax = MAX(c->eax, KVM_CPUID_SIGNATURE | 0x10); in kvm_arch_init_vcpu()
2362 env->nested_state = g_malloc0(max_nested_state_len); in kvm_arch_init_vcpu()
2363 env->nested_state->size = max_nested_state_len; in kvm_arch_init_vcpu()
2369 cpu->kvm_msr_buf = g_malloc0(MSR_BUF_SIZE); in kvm_arch_init_vcpu()
2371 if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP)) { in kvm_arch_init_vcpu()
2388 CPUX86State *env = &cpu->env; in kvm_arch_destroy_vcpu()
2390 g_free(env->xsave_buf); in kvm_arch_destroy_vcpu()
2392 g_free(cpu->kvm_msr_buf); in kvm_arch_destroy_vcpu()
2393 cpu->kvm_msr_buf = NULL; in kvm_arch_destroy_vcpu()
2395 g_free(env->nested_state); in kvm_arch_destroy_vcpu()
2396 env->nested_state = NULL; in kvm_arch_destroy_vcpu()
2398 qemu_del_vm_change_state_handler(cpu->vmsentry); in kvm_arch_destroy_vcpu()
2405 CPUX86State *env = &cpu->env; in kvm_arch_reset_vcpu()
2407 env->xcr0 = 1; in kvm_arch_reset_vcpu()
2409 env->mp_state = cpu_is_bsp(cpu) ? KVM_MP_STATE_RUNNABLE : in kvm_arch_reset_vcpu()
2412 env->mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_reset_vcpu()
2416 env->poll_control_msr = 1; in kvm_arch_reset_vcpu()
2425 CPUX86State *env = &cpu->env; in kvm_arch_after_reset_vcpu()
2433 for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) { in kvm_arch_after_reset_vcpu()
2434 env->msr_hv_synic_sint[i] = HV_SINT_MASKED; in kvm_arch_after_reset_vcpu()
2445 msrs = g_malloc0(sizeof(*msrs) + sizeof(msrs->entries[0])); in kvm_arch_reset_parked_vcpu()
2446 msrs->entries[0].index = MSR_IA32_TSC; in kvm_arch_reset_parked_vcpu()
2447 msrs->entries[0].data = 1; /* match the value in x86_cpu_reset() */ in kvm_arch_reset_parked_vcpu()
2448 msrs->nmsrs++; in kvm_arch_reset_parked_vcpu()
2458 CPUX86State *env = &cpu->env; in kvm_arch_do_init_vcpu()
2460 /* APs get directly into wait-for-SIPI state. */ in kvm_arch_do_init_vcpu()
2461 if (env->mp_state == KVM_MP_STATE_UNINITIALIZED) { in kvm_arch_do_init_vcpu()
2462 env->mp_state = KVM_MP_STATE_INIT_RECEIVED; in kvm_arch_do_init_vcpu()
2482 if (ret < 0 && ret != -E2BIG) { in kvm_get_supported_feature_msrs()
2484 strerror(-ret)); in kvm_get_supported_feature_msrs()
2492 kvm_feature_msrs->nmsrs = msr_list.nmsrs; in kvm_get_supported_feature_msrs()
2497 strerror(-ret)); in kvm_get_supported_feature_msrs()
2517 if (ret < 0 && ret != -E2BIG) { in kvm_get_supported_msrs()
2528 kvm_msr_list->nmsrs = msr_list.nmsrs; in kvm_get_supported_msrs()
2533 for (i = 0; i < kvm_msr_list->nmsrs; i++) { in kvm_get_supported_msrs()
2534 switch (kvm_msr_list->indices[i]) { in kvm_get_supported_msrs()
2655 *val = cs->kvm_state->msr_energy.msr_unit; in kvm_rdmsr_rapl_power_unit()
2667 *val = cs->kvm_state->msr_energy.msr_limit; in kvm_rdmsr_pkg_power_limit()
2679 *val = cs->kvm_state->msr_energy.msr_info; in kvm_rdmsr_pkg_power_info()
2690 *val = cs->kvm_state->msr_energy.msr_value[cs->cpu_index]; in kvm_rdmsr_pkg_energy_status()
2707 memory_region_init(&smram_as_root, OBJECT(kvm_state), "mem-container-smram", ~0ull); in register_smram_listener()
2713 memory_region_init_alias(&smram_as_mem, OBJECT(kvm_state), "mem-smram", in register_smram_listener()
2724 address_space_init(&smram_address_space, &smram_as_root, "KVM-SMRAM"); in register_smram_listener()
2726 &smram_address_space, 1, "kvm-smram"); in register_smram_listener()
2732 struct KVMMsrEnergy *vmsr = &s->msr_energy; in kvm_msr_energy_thread()
2745 pkg_stat = g_new0(vmsr_package_energy_stat, vmsr->host_topo.maxpkgs); in kvm_msr_energy_thread()
2751 vpkgs_energy_stat = g_new0(unsigned int, vmsr->guest_vsockets); in kvm_msr_energy_thread()
2754 for (int i = 0; i < vmsr->host_topo.maxpkgs; i++) { in kvm_msr_energy_thread()
2760 vmsr->host_topo.maxticks[i] = (MSR_ENERGY_THREAD_SLEEP_US / 1000000) in kvm_msr_energy_thread()
2762 * vmsr->host_topo.pkg_cpu_count[i]; in kvm_msr_energy_thread()
2768 = vmsr_get_thread_ids(vmsr->pid, &num_threads); in kvm_msr_energy_thread()
2783 vmsr_read_thread_stat(vmsr->pid, in kvm_msr_energy_thread()
2793 for (int i = 0; i < vmsr->host_topo.maxpkgs; i++) { in kvm_msr_energy_thread()
2804 s->msr_energy.sioc); in kvm_msr_energy_thread()
2817 for (int i = 0; i < vmsr->host_topo.maxpkgs; i++) { in kvm_msr_energy_thread()
2828 s->msr_energy.sioc); in kvm_msr_energy_thread()
2837 pkg_stat[i].e_end - pkg_stat[i].e_start; in kvm_msr_energy_thread()
2848 vmsr_read_thread_stat(vmsr->pid, in kvm_msr_energy_thread()
2854 if (vmsr->pid < 0) { in kvm_msr_energy_thread()
2872 if (cpu->thread_id == thd_stat[i].thread_id) { in kvm_msr_energy_thread()
2874 thd_stat[i].vcpu_id = cpu->cpu_index; in kvm_msr_energy_thread()
2883 for (int i = 0; i < vmsr->guest_cpu_list->len; i++) { in kvm_msr_energy_thread()
2886 vmsr->guest_cpu_list->cpus[i].arch_id) in kvm_msr_energy_thread()
2889 &vmsr->guest_topo_info, &topo_ids); in kvm_msr_energy_thread()
2895 /* Calculate the total energy of all non-vCPU thread */ in kvm_msr_energy_thread()
2902 vmsr->host_topo.maxticks[thd_stat[i].pkg_id]); in kvm_msr_energy_thread()
2908 /* Calculate the ratio per non-vCPU thread of each package */ in kvm_msr_energy_thread()
2909 for (int i = 0; i < vmsr->host_topo.maxpkgs; i++) { in kvm_msr_energy_thread()
2925 vmsr->host_topo.maxticks[thd_stat[i].pkg_id]); in kvm_msr_energy_thread()
2941 vmsr->msr_value[thd_stat[i].vcpu_id] = \ in kvm_msr_energy_thread()
2961 struct KVMMsrEnergy *r = &s->msr_energy; in kvm_msr_energy_thread_init()
2971 return -1; in kvm_msr_energy_thread_init()
2975 return -1; in kvm_msr_energy_thread_init()
2979 vmsr_init_topo_info(&r->guest_topo_info, ms); in kvm_msr_energy_thread_init()
2982 r->guest_vcpus = ms->smp.cpus; in kvm_msr_energy_thread_init()
2985 r->guest_vsockets = ms->smp.sockets; in kvm_msr_energy_thread_init()
2988 r->msr_value = g_new0(uint64_t, r->guest_vcpus); in kvm_msr_energy_thread_init()
2991 r->guest_cpu_list = mc->possible_cpu_arch_ids(ms); in kvm_msr_energy_thread_init()
2994 r->host_topo.maxcpus = vmsr_get_maxcpus(); in kvm_msr_energy_thread_init()
2995 if (r->host_topo.maxcpus == 0) { in kvm_msr_energy_thread_init()
2997 return -1; in kvm_msr_energy_thread_init()
3001 r->host_topo.maxpkgs = vmsr_get_max_physical_package(r->host_topo.maxcpus); in kvm_msr_energy_thread_init()
3002 if (r->host_topo.maxpkgs == 0) { in kvm_msr_energy_thread_init()
3004 return -1; in kvm_msr_energy_thread_init()
3008 r->host_topo.pkg_cpu_count = g_new0(unsigned int, r->host_topo.maxpkgs); in kvm_msr_energy_thread_init()
3009 r->host_topo.maxticks = g_new0(unsigned int, r->host_topo.maxpkgs); in kvm_msr_energy_thread_init()
3011 vmsr_count_cpus_per_package(r->host_topo.pkg_cpu_count, in kvm_msr_energy_thread_init()
3012 r->host_topo.maxpkgs); in kvm_msr_energy_thread_init()
3013 for (int i = 0; i < r->host_topo.maxpkgs; i++) { in kvm_msr_energy_thread_init()
3014 if (r->host_topo.pkg_cpu_count[i] == 0) { in kvm_msr_energy_thread_init()
3016 return -1; in kvm_msr_energy_thread_init()
3021 r->pid = getpid(); in kvm_msr_energy_thread_init()
3024 if (s->msr_energy.socket_path == NULL) { in kvm_msr_energy_thread_init()
3025 s->msr_energy.socket_path = vmsr_compute_default_paths(); in kvm_msr_energy_thread_init()
3029 s->msr_energy.sioc = vmsr_open_socket(s->msr_energy.socket_path); in kvm_msr_energy_thread_init()
3031 if (s->msr_energy.sioc == NULL) { in kvm_msr_energy_thread_init()
3033 return -1; in kvm_msr_energy_thread_init()
3037 r->msr_unit = vmsr_read_msr(MSR_RAPL_POWER_UNIT, 0, r->pid, in kvm_msr_energy_thread_init()
3038 s->msr_energy.sioc); in kvm_msr_energy_thread_init()
3039 r->msr_limit = vmsr_read_msr(MSR_PKG_POWER_LIMIT, 0, r->pid, in kvm_msr_energy_thread_init()
3040 s->msr_energy.sioc); in kvm_msr_energy_thread_init()
3041 r->msr_info = vmsr_read_msr(MSR_PKG_POWER_INFO, 0, r->pid, in kvm_msr_energy_thread_init()
3042 s->msr_energy.sioc); in kvm_msr_energy_thread_init()
3043 if (r->msr_unit == 0 || r->msr_limit == 0 || r->msr_info == 0) { in kvm_msr_energy_thread_init()
3045 return -1; in kvm_msr_energy_thread_init()
3048 qemu_thread_create(&r->msr_thr, "kvm-msr", in kvm_msr_energy_thread_init()
3067 strerror(-ret)); in kvm_vm_enable_exception_payload()
3084 strerror(-ret)); in kvm_vm_enable_triple_fault_event()
3100 "kvm-shadow-mem", in kvm_vm_set_nr_mmu_pages()
3102 if (shadow_mem != -1) { in kvm_vm_set_nr_mmu_pages()
3135 return -ENOTSUP; in kvm_vm_enable_bus_lock_exit()
3141 strerror(-ret)); in kvm_vm_enable_bus_lock_exit()
3150 if (s->notify_vmexit != NOTIFY_VMEXIT_OPTION_DISABLE) { in kvm_vm_enable_notify_vmexit()
3152 ((uint64_t)s->notify_window << 32) | in kvm_vm_enable_notify_vmexit()
3159 strerror(-ret)); in kvm_vm_enable_notify_vmexit()
3173 strerror(-ret)); in kvm_vm_enable_userspace_msr()
3181 strerror(-ret)); in kvm_vm_enable_userspace_msr()
3192 if (s->msr_energy.enable == true) { in kvm_vm_enable_energy_msrs()
3197 strerror(-ret)); in kvm_vm_enable_energy_msrs()
3205 strerror(-ret)); in kvm_vm_enable_energy_msrs()
3213 strerror(-ret)); in kvm_vm_enable_energy_msrs()
3220 strerror(-ret)); in kvm_vm_enable_energy_msrs()
3236 if (ms->cgs) { in kvm_arch_init()
3237 ret = confidential_guest_kvm_init(ms->cgs, &local_err); in kvm_arch_init()
3259 if (s->xen_version) { in kvm_arch_init()
3263 return -ENOTSUP; in kvm_arch_init()
3273 return -ENOTSUP; in kvm_arch_init()
3320 strerror(-ret)); in kvm_arch_init()
3328 if (x86ms->bus_lock_ratelimit > 0) { in kvm_arch_init()
3335 x86ms->bus_lock_ratelimit, BUS_LOCK_SLICE_TIME); in kvm_arch_init()
3352 if (s->msr_energy.enable == true) { in kvm_arch_init()
3371 lhs->selector = rhs->selector; in set_v8086_seg()
3372 lhs->base = rhs->base; in set_v8086_seg()
3373 lhs->limit = rhs->limit; in set_v8086_seg()
3374 lhs->type = 3; in set_v8086_seg()
3375 lhs->present = 1; in set_v8086_seg()
3376 lhs->dpl = 3; in set_v8086_seg()
3377 lhs->db = 0; in set_v8086_seg()
3378 lhs->s = 1; in set_v8086_seg()
3379 lhs->l = 0; in set_v8086_seg()
3380 lhs->g = 0; in set_v8086_seg()
3381 lhs->avl = 0; in set_v8086_seg()
3382 lhs->unusable = 0; in set_v8086_seg()
3387 unsigned flags = rhs->flags; in set_seg()
3388 lhs->selector = rhs->selector; in set_seg()
3389 lhs->base = rhs->base; in set_seg()
3390 lhs->limit = rhs->limit; in set_seg()
3391 lhs->type = (flags >> DESC_TYPE_SHIFT) & 15; in set_seg()
3392 lhs->present = (flags & DESC_P_MASK) != 0; in set_seg()
3393 lhs->dpl = (flags >> DESC_DPL_SHIFT) & 3; in set_seg()
3394 lhs->db = (flags >> DESC_B_SHIFT) & 1; in set_seg()
3395 lhs->s = (flags & DESC_S_MASK) != 0; in set_seg()
3396 lhs->l = (flags >> DESC_L_SHIFT) & 1; in set_seg()
3397 lhs->g = (flags & DESC_G_MASK) != 0; in set_seg()
3398 lhs->avl = (flags & DESC_AVL_MASK) != 0; in set_seg()
3399 lhs->unusable = !lhs->present; in set_seg()
3400 lhs->padding = 0; in set_seg()
3405 lhs->selector = rhs->selector; in get_seg()
3406 lhs->base = rhs->base; in get_seg()
3407 lhs->limit = rhs->limit; in get_seg()
3408 lhs->flags = (rhs->type << DESC_TYPE_SHIFT) | in get_seg()
3409 ((rhs->present && !rhs->unusable) * DESC_P_MASK) | in get_seg()
3410 (rhs->dpl << DESC_DPL_SHIFT) | in get_seg()
3411 (rhs->db << DESC_B_SHIFT) | in get_seg()
3412 (rhs->s * DESC_S_MASK) | in get_seg()
3413 (rhs->l << DESC_L_SHIFT) | in get_seg()
3414 (rhs->g * DESC_G_MASK) | in get_seg()
3415 (rhs->avl * DESC_AVL_MASK); in get_seg()
3429 CPUX86State *env = &cpu->env; in kvm_getput_regs()
3440 kvm_getput_reg(®s.rax, &env->regs[R_EAX], set); in kvm_getput_regs()
3441 kvm_getput_reg(®s.rbx, &env->regs[R_EBX], set); in kvm_getput_regs()
3442 kvm_getput_reg(®s.rcx, &env->regs[R_ECX], set); in kvm_getput_regs()
3443 kvm_getput_reg(®s.rdx, &env->regs[R_EDX], set); in kvm_getput_regs()
3444 kvm_getput_reg(®s.rsi, &env->regs[R_ESI], set); in kvm_getput_regs()
3445 kvm_getput_reg(®s.rdi, &env->regs[R_EDI], set); in kvm_getput_regs()
3446 kvm_getput_reg(®s.rsp, &env->regs[R_ESP], set); in kvm_getput_regs()
3447 kvm_getput_reg(®s.rbp, &env->regs[R_EBP], set); in kvm_getput_regs()
3449 kvm_getput_reg(®s.r8, &env->regs[8], set); in kvm_getput_regs()
3450 kvm_getput_reg(®s.r9, &env->regs[9], set); in kvm_getput_regs()
3451 kvm_getput_reg(®s.r10, &env->regs[10], set); in kvm_getput_regs()
3452 kvm_getput_reg(®s.r11, &env->regs[11], set); in kvm_getput_regs()
3453 kvm_getput_reg(®s.r12, &env->regs[12], set); in kvm_getput_regs()
3454 kvm_getput_reg(®s.r13, &env->regs[13], set); in kvm_getput_regs()
3455 kvm_getput_reg(®s.r14, &env->regs[14], set); in kvm_getput_regs()
3456 kvm_getput_reg(®s.r15, &env->regs[15], set); in kvm_getput_regs()
3459 kvm_getput_reg(®s.rflags, &env->eflags, set); in kvm_getput_regs()
3460 kvm_getput_reg(®s.rip, &env->eip, set); in kvm_getput_regs()
3471 CPUX86State *env = &cpu->env; in kvm_put_xsave()
3472 void *xsave = env->xsave_buf; in kvm_put_xsave()
3474 x86_cpu_xsave_all_areas(cpu, xsave, env->xsave_buf_len); in kvm_put_xsave()
3481 CPUX86State *env = &cpu->env; in kvm_put_xcrs()
3491 xcrs.xcrs[0].value = env->xcr0; in kvm_put_xcrs()
3497 CPUX86State *env = &cpu->env; in kvm_put_sregs()
3506 if ((env->eflags & VM_MASK)) { in kvm_put_sregs()
3507 set_v8086_seg(&sregs.cs, &env->segs[R_CS]); in kvm_put_sregs()
3508 set_v8086_seg(&sregs.ds, &env->segs[R_DS]); in kvm_put_sregs()
3509 set_v8086_seg(&sregs.es, &env->segs[R_ES]); in kvm_put_sregs()
3510 set_v8086_seg(&sregs.fs, &env->segs[R_FS]); in kvm_put_sregs()
3511 set_v8086_seg(&sregs.gs, &env->segs[R_GS]); in kvm_put_sregs()
3512 set_v8086_seg(&sregs.ss, &env->segs[R_SS]); in kvm_put_sregs()
3514 set_seg(&sregs.cs, &env->segs[R_CS]); in kvm_put_sregs()
3515 set_seg(&sregs.ds, &env->segs[R_DS]); in kvm_put_sregs()
3516 set_seg(&sregs.es, &env->segs[R_ES]); in kvm_put_sregs()
3517 set_seg(&sregs.fs, &env->segs[R_FS]); in kvm_put_sregs()
3518 set_seg(&sregs.gs, &env->segs[R_GS]); in kvm_put_sregs()
3519 set_seg(&sregs.ss, &env->segs[R_SS]); in kvm_put_sregs()
3522 set_seg(&sregs.tr, &env->tr); in kvm_put_sregs()
3523 set_seg(&sregs.ldt, &env->ldt); in kvm_put_sregs()
3525 sregs.idt.limit = env->idt.limit; in kvm_put_sregs()
3526 sregs.idt.base = env->idt.base; in kvm_put_sregs()
3528 sregs.gdt.limit = env->gdt.limit; in kvm_put_sregs()
3529 sregs.gdt.base = env->gdt.base; in kvm_put_sregs()
3532 sregs.cr0 = env->cr[0]; in kvm_put_sregs()
3533 sregs.cr2 = env->cr[2]; in kvm_put_sregs()
3534 sregs.cr3 = env->cr[3]; in kvm_put_sregs()
3535 sregs.cr4 = env->cr[4]; in kvm_put_sregs()
3537 sregs.cr8 = cpu_get_apic_tpr(cpu->apic_state); in kvm_put_sregs()
3538 sregs.apic_base = cpu_get_apic_base(cpu->apic_state); in kvm_put_sregs()
3540 sregs.efer = env->efer; in kvm_put_sregs()
3547 CPUX86State *env = &cpu->env; in kvm_put_sregs2()
3553 if ((env->eflags & VM_MASK)) { in kvm_put_sregs2()
3554 set_v8086_seg(&sregs.cs, &env->segs[R_CS]); in kvm_put_sregs2()
3555 set_v8086_seg(&sregs.ds, &env->segs[R_DS]); in kvm_put_sregs2()
3556 set_v8086_seg(&sregs.es, &env->segs[R_ES]); in kvm_put_sregs2()
3557 set_v8086_seg(&sregs.fs, &env->segs[R_FS]); in kvm_put_sregs2()
3558 set_v8086_seg(&sregs.gs, &env->segs[R_GS]); in kvm_put_sregs2()
3559 set_v8086_seg(&sregs.ss, &env->segs[R_SS]); in kvm_put_sregs2()
3561 set_seg(&sregs.cs, &env->segs[R_CS]); in kvm_put_sregs2()
3562 set_seg(&sregs.ds, &env->segs[R_DS]); in kvm_put_sregs2()
3563 set_seg(&sregs.es, &env->segs[R_ES]); in kvm_put_sregs2()
3564 set_seg(&sregs.fs, &env->segs[R_FS]); in kvm_put_sregs2()
3565 set_seg(&sregs.gs, &env->segs[R_GS]); in kvm_put_sregs2()
3566 set_seg(&sregs.ss, &env->segs[R_SS]); in kvm_put_sregs2()
3569 set_seg(&sregs.tr, &env->tr); in kvm_put_sregs2()
3570 set_seg(&sregs.ldt, &env->ldt); in kvm_put_sregs2()
3572 sregs.idt.limit = env->idt.limit; in kvm_put_sregs2()
3573 sregs.idt.base = env->idt.base; in kvm_put_sregs2()
3575 sregs.gdt.limit = env->gdt.limit; in kvm_put_sregs2()
3576 sregs.gdt.base = env->gdt.base; in kvm_put_sregs2()
3579 sregs.cr0 = env->cr[0]; in kvm_put_sregs2()
3580 sregs.cr2 = env->cr[2]; in kvm_put_sregs2()
3581 sregs.cr3 = env->cr[3]; in kvm_put_sregs2()
3582 sregs.cr4 = env->cr[4]; in kvm_put_sregs2()
3584 sregs.cr8 = cpu_get_apic_tpr(cpu->apic_state); in kvm_put_sregs2()
3585 sregs.apic_base = cpu_get_apic_base(cpu->apic_state); in kvm_put_sregs2()
3587 sregs.efer = env->efer; in kvm_put_sregs2()
3589 if (env->pdptrs_valid) { in kvm_put_sregs2()
3591 sregs.pdptrs[i] = env->pdptrs[i]; in kvm_put_sregs2()
3602 memset(cpu->kvm_msr_buf, 0, MSR_BUF_SIZE); in kvm_msr_buf_reset()
3607 struct kvm_msrs *msrs = cpu->kvm_msr_buf; in kvm_msr_entry_add()
3609 struct kvm_msr_entry *entry = &msrs->entries[msrs->nmsrs]; in kvm_msr_entry_add()
3613 entry->index = index; in kvm_msr_entry_add()
3614 entry->reserved = 0; in kvm_msr_entry_add()
3615 entry->data = value; in kvm_msr_entry_add()
3616 msrs->nmsrs++; in kvm_msr_entry_add()
3624 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf); in kvm_put_one_msr()
3656 CPUX86State *env = &cpu->env; in kvm_put_tscdeadline_msr()
3663 ret = kvm_put_one_msr(cpu, MSR_IA32_TSCDEADLINE, env->tsc_deadline); in kvm_put_tscdeadline_msr()
3687 cpu->env.msr_ia32_feature_control); in kvm_put_msr_feature_control()
3729 * Bit 0:31 -> 0 if the control bit can be zero (i.e. 1 if it must be one). in make_vmx_msr_value()
3730 * Bit 32:63 -> 1 if the control bit can be one. in make_vmx_msr_value()
3756 * If the guest is 64-bit, a value of 1 is allowed for the host address in kvm_msr_entry_add_vmx()
3763 * Bits 0-30, 32-44 and 50-53 come from the host. KVM should in kvm_msr_entry_add_vmx()
3772 * Same for bits 0-4 and 25-27. Bits 16-24 (CR3 target count) can in kvm_msr_entry_add_vmx()
3774 * future proof. Bits 32-63 in theory could change, though KVM does in kvm_msr_entry_add_vmx()
3775 * not support dual-monitor treatment and probably never will; mask in kvm_msr_entry_add_vmx()
3826 /* FRED injected-event data (0x2052). */ in kvm_msr_entry_add_vmx()
3830 /* Secondary VM-exit controls (0x2044). */ in kvm_msr_entry_add_vmx()
3855 int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf); in kvm_buf_set_msrs()
3860 if (ret < cpu->kvm_msr_buf->nmsrs) { in kvm_buf_set_msrs()
3861 struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret]; in kvm_buf_set_msrs()
3863 (uint32_t)e->index, (uint64_t)e->data); in kvm_buf_set_msrs()
3866 assert(ret == cpu->kvm_msr_buf->nmsrs); in kvm_buf_set_msrs()
3872 CPUX86State *env = &cpu->env; in kvm_init_msrs()
3879 env->features[FEAT_ARCH_CAPABILITIES]); in kvm_init_msrs()
3884 env->features[FEAT_CORE_CAPABILITY]); in kvm_init_msrs()
3887 if (has_msr_perf_capabs && cpu->enable_pmu) { in kvm_init_msrs()
3888 kvm_msr_entry_add_perf(cpu, env->features); in kvm_init_msrs()
3896 kvm_msr_entry_add_vmx(cpu, env->features); in kvm_init_msrs()
3901 kvm_msr_entry_add(cpu, MSR_IA32_UCODE_REV, cpu->ucode_rev); in kvm_init_msrs()
3908 CPUX86State *env = &cpu->env; in kvm_put_msrs()
3913 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, env->sysenter_cs); in kvm_put_msrs()
3914 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, env->sysenter_esp); in kvm_put_msrs()
3915 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, env->sysenter_eip); in kvm_put_msrs()
3916 kvm_msr_entry_add(cpu, MSR_PAT, env->pat); in kvm_put_msrs()
3918 kvm_msr_entry_add(cpu, MSR_STAR, env->star); in kvm_put_msrs()
3921 kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, env->vm_hsave); in kvm_put_msrs()
3924 kvm_msr_entry_add(cpu, MSR_TSC_AUX, env->tsc_aux); in kvm_put_msrs()
3927 kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, env->tsc_adjust); in kvm_put_msrs()
3931 env->msr_ia32_misc_enable); in kvm_put_msrs()
3934 kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, env->smbase); in kvm_put_msrs()
3937 kvm_msr_entry_add(cpu, MSR_SMI_COUNT, env->msr_smi_count); in kvm_put_msrs()
3940 kvm_msr_entry_add(cpu, MSR_IA32_PKRS, env->pkrs); in kvm_put_msrs()
3943 kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, env->msr_bndcfgs); in kvm_put_msrs()
3946 kvm_msr_entry_add(cpu, MSR_IA32_XSS, env->xss); in kvm_put_msrs()
3949 kvm_msr_entry_add(cpu, MSR_IA32_UMWAIT_CONTROL, env->umwait); in kvm_put_msrs()
3952 kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, env->spec_ctrl); in kvm_put_msrs()
3955 kvm_msr_entry_add(cpu, MSR_AMD64_TSC_RATIO, env->amd_tsc_scale_msr); in kvm_put_msrs()
3959 kvm_msr_entry_add(cpu, MSR_IA32_TSX_CTRL, env->tsx_ctrl); in kvm_put_msrs()
3962 kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, env->virt_ssbd); in kvm_put_msrs()
3965 kvm_msr_entry_add(cpu, MSR_K7_HWCR, env->msr_hwcr); in kvm_put_msrs()
3970 kvm_msr_entry_add(cpu, MSR_CSTAR, env->cstar); in kvm_put_msrs()
3971 kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, env->kernelgsbase); in kvm_put_msrs()
3972 kvm_msr_entry_add(cpu, MSR_FMASK, env->fmask); in kvm_put_msrs()
3973 kvm_msr_entry_add(cpu, MSR_LSTAR, env->lstar); in kvm_put_msrs()
3974 if (env->features[FEAT_7_1_EAX] & CPUID_7_1_EAX_FRED) { in kvm_put_msrs()
3975 kvm_msr_entry_add(cpu, MSR_IA32_FRED_RSP0, env->fred_rsp0); in kvm_put_msrs()
3976 kvm_msr_entry_add(cpu, MSR_IA32_FRED_RSP1, env->fred_rsp1); in kvm_put_msrs()
3977 kvm_msr_entry_add(cpu, MSR_IA32_FRED_RSP2, env->fred_rsp2); in kvm_put_msrs()
3978 kvm_msr_entry_add(cpu, MSR_IA32_FRED_RSP3, env->fred_rsp3); in kvm_put_msrs()
3979 kvm_msr_entry_add(cpu, MSR_IA32_FRED_STKLVLS, env->fred_stklvls); in kvm_put_msrs()
3980 kvm_msr_entry_add(cpu, MSR_IA32_FRED_SSP1, env->fred_ssp1); in kvm_put_msrs()
3981 kvm_msr_entry_add(cpu, MSR_IA32_FRED_SSP2, env->fred_ssp2); in kvm_put_msrs()
3982 kvm_msr_entry_add(cpu, MSR_IA32_FRED_SSP3, env->fred_ssp3); in kvm_put_msrs()
3983 kvm_msr_entry_add(cpu, MSR_IA32_FRED_CONFIG, env->fred_config); in kvm_put_msrs()
3993 kvm_msr_entry_add(cpu, MSR_IA32_TSC, env->tsc); in kvm_put_msrs()
3994 if (env->features[FEAT_KVM] & (CPUID_KVM_CLOCK | CPUID_KVM_CLOCK2)) { in kvm_put_msrs()
3995 kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, env->system_time_msr); in kvm_put_msrs()
3996 kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, env->wall_clock_msr); in kvm_put_msrs()
3998 if (env->features[FEAT_KVM] & CPUID_KVM_ASYNCPF_INT) { in kvm_put_msrs()
3999 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_INT, env->async_pf_int_msr); in kvm_put_msrs()
4001 if (env->features[FEAT_KVM] & CPUID_KVM_ASYNCPF) { in kvm_put_msrs()
4002 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, env->async_pf_en_msr); in kvm_put_msrs()
4004 if (env->features[FEAT_KVM] & CPUID_KVM_PV_EOI) { in kvm_put_msrs()
4005 kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, env->pv_eoi_en_msr); in kvm_put_msrs()
4007 if (env->features[FEAT_KVM] & CPUID_KVM_STEAL_TIME) { in kvm_put_msrs()
4008 kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr); in kvm_put_msrs()
4011 if (env->features[FEAT_KVM] & CPUID_KVM_POLL_CONTROL) { in kvm_put_msrs()
4012 kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, env->poll_control_msr); in kvm_put_msrs()
4025 env->msr_fixed_counters[i]); in kvm_put_msrs()
4029 env->msr_gp_counters[i]); in kvm_put_msrs()
4031 env->msr_gp_evtsel[i]); in kvm_put_msrs()
4035 env->msr_global_status); in kvm_put_msrs()
4037 env->msr_global_ovf_ctrl); in kvm_put_msrs()
4041 env->msr_fixed_ctr_ctrl); in kvm_put_msrs()
4043 env->msr_global_ctrl); in kvm_put_msrs()
4047 * Hyper-V partition-wide MSRs: to avoid clearing them on cpu hot-add, in kvm_put_msrs()
4053 env->msr_hv_guest_os_id); in kvm_put_msrs()
4055 env->msr_hv_hypercall); in kvm_put_msrs()
4059 env->msr_hv_tsc); in kvm_put_msrs()
4063 env->msr_hv_reenlightenment_control); in kvm_put_msrs()
4065 env->msr_hv_tsc_emulation_control); in kvm_put_msrs()
4067 env->msr_hv_tsc_emulation_status); in kvm_put_msrs()
4077 env->msr_hv_vapic); in kvm_put_msrs()
4084 env->msr_hv_crash_params[j]); in kvm_put_msrs()
4089 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, env->msr_hv_runtime); in kvm_put_msrs()
4102 env->msr_hv_synic_control); in kvm_put_msrs()
4104 env->msr_hv_synic_evt_page); in kvm_put_msrs()
4106 env->msr_hv_synic_msg_page); in kvm_put_msrs()
4108 for (j = 0; j < ARRAY_SIZE(env->msr_hv_synic_sint); j++) { in kvm_put_msrs()
4110 env->msr_hv_synic_sint[j]); in kvm_put_msrs()
4116 for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_config); j++) { in kvm_put_msrs()
4118 env->msr_hv_stimer_config[j]); in kvm_put_msrs()
4121 for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_count); j++) { in kvm_put_msrs()
4123 env->msr_hv_stimer_count[j]); in kvm_put_msrs()
4126 if (env->features[FEAT_1_EDX] & CPUID_MTRR) { in kvm_put_msrs()
4127 uint64_t phys_mask = MAKE_64BIT_MASK(0, cpu->phys_bits); in kvm_put_msrs()
4129 kvm_msr_entry_add(cpu, MSR_MTRRdefType, env->mtrr_deftype); in kvm_put_msrs()
4130 kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, env->mtrr_fixed[0]); in kvm_put_msrs()
4131 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, env->mtrr_fixed[1]); in kvm_put_msrs()
4132 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, env->mtrr_fixed[2]); in kvm_put_msrs()
4133 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, env->mtrr_fixed[3]); in kvm_put_msrs()
4134 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, env->mtrr_fixed[4]); in kvm_put_msrs()
4135 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, env->mtrr_fixed[5]); in kvm_put_msrs()
4136 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, env->mtrr_fixed[6]); in kvm_put_msrs()
4137 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, env->mtrr_fixed[7]); in kvm_put_msrs()
4138 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, env->mtrr_fixed[8]); in kvm_put_msrs()
4139 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, env->mtrr_fixed[9]); in kvm_put_msrs()
4140 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, env->mtrr_fixed[10]); in kvm_put_msrs()
4145 uint64_t mask = env->mtrr_var[i].mask; in kvm_put_msrs()
4149 env->mtrr_var[i].base); in kvm_put_msrs()
4153 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) { in kvm_put_msrs()
4158 env->msr_rtit_ctrl); in kvm_put_msrs()
4160 env->msr_rtit_status); in kvm_put_msrs()
4162 env->msr_rtit_output_base); in kvm_put_msrs()
4164 env->msr_rtit_output_mask); in kvm_put_msrs()
4166 env->msr_rtit_cr3_match); in kvm_put_msrs()
4169 env->msr_rtit_addrs[i]); in kvm_put_msrs()
4173 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_SGX_LC) { in kvm_put_msrs()
4175 env->msr_ia32_sgxlepubkeyhash[0]); in kvm_put_msrs()
4177 env->msr_ia32_sgxlepubkeyhash[1]); in kvm_put_msrs()
4179 env->msr_ia32_sgxlepubkeyhash[2]); in kvm_put_msrs()
4181 env->msr_ia32_sgxlepubkeyhash[3]); in kvm_put_msrs()
4184 if (env->features[FEAT_XSAVE] & CPUID_D_1_EAX_XFD) { in kvm_put_msrs()
4186 env->msr_xfd); in kvm_put_msrs()
4188 env->msr_xfd_err); in kvm_put_msrs()
4191 if (kvm_enabled() && cpu->enable_pmu && in kvm_put_msrs()
4192 (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) { in kvm_put_msrs()
4204 if (ret == 1 && !!depth && depth == env->msr_lbr_depth) { in kvm_put_msrs()
4205 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_CTL, env->msr_lbr_ctl); in kvm_put_msrs()
4206 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_DEPTH, env->msr_lbr_depth); in kvm_put_msrs()
4209 if (!env->lbr_records[i].from) { in kvm_put_msrs()
4213 env->lbr_records[i].from); in kvm_put_msrs()
4215 env->lbr_records[i].to); in kvm_put_msrs()
4217 env->lbr_records[i].info); in kvm_put_msrs()
4226 if (env->mcg_cap) { in kvm_put_msrs()
4227 kvm_msr_entry_add(cpu, MSR_MCG_STATUS, env->mcg_status); in kvm_put_msrs()
4228 kvm_msr_entry_add(cpu, MSR_MCG_CTL, env->mcg_ctl); in kvm_put_msrs()
4230 kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, env->mcg_ext_ctl); in kvm_put_msrs()
4232 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) { in kvm_put_msrs()
4233 kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, env->mce_banks[i]); in kvm_put_msrs()
4243 CPUX86State *env = &cpu->env; in kvm_get_xsave()
4244 void *xsave = env->xsave_buf; in kvm_get_xsave()
4253 x86_cpu_xrstor_all_areas(cpu, xsave, env->xsave_buf_len); in kvm_get_xsave()
4260 CPUX86State *env = &cpu->env; in kvm_get_xcrs()
4276 env->xcr0 = xcrs.xcrs[i].value; in kvm_get_xcrs()
4285 CPUX86State *env = &cpu->env; in kvm_get_sregs()
4299 get_seg(&env->segs[R_CS], &sregs.cs); in kvm_get_sregs()
4300 get_seg(&env->segs[R_DS], &sregs.ds); in kvm_get_sregs()
4301 get_seg(&env->segs[R_ES], &sregs.es); in kvm_get_sregs()
4302 get_seg(&env->segs[R_FS], &sregs.fs); in kvm_get_sregs()
4303 get_seg(&env->segs[R_GS], &sregs.gs); in kvm_get_sregs()
4304 get_seg(&env->segs[R_SS], &sregs.ss); in kvm_get_sregs()
4306 get_seg(&env->tr, &sregs.tr); in kvm_get_sregs()
4307 get_seg(&env->ldt, &sregs.ldt); in kvm_get_sregs()
4309 env->idt.limit = sregs.idt.limit; in kvm_get_sregs()
4310 env->idt.base = sregs.idt.base; in kvm_get_sregs()
4311 env->gdt.limit = sregs.gdt.limit; in kvm_get_sregs()
4312 env->gdt.base = sregs.gdt.base; in kvm_get_sregs()
4314 env->cr[0] = sregs.cr0; in kvm_get_sregs()
4315 env->cr[2] = sregs.cr2; in kvm_get_sregs()
4316 env->cr[3] = sregs.cr3; in kvm_get_sregs()
4317 env->cr[4] = sregs.cr4; in kvm_get_sregs()
4319 env->efer = sregs.efer; in kvm_get_sregs()
4320 if (sev_es_enabled() && env->efer & MSR_EFER_LME && in kvm_get_sregs()
4321 env->cr[0] & CR0_PG_MASK) { in kvm_get_sregs()
4322 env->efer |= MSR_EFER_LMA; in kvm_get_sregs()
4333 CPUX86State *env = &cpu->env; in kvm_get_sregs2()
4342 get_seg(&env->segs[R_CS], &sregs.cs); in kvm_get_sregs2()
4343 get_seg(&env->segs[R_DS], &sregs.ds); in kvm_get_sregs2()
4344 get_seg(&env->segs[R_ES], &sregs.es); in kvm_get_sregs2()
4345 get_seg(&env->segs[R_FS], &sregs.fs); in kvm_get_sregs2()
4346 get_seg(&env->segs[R_GS], &sregs.gs); in kvm_get_sregs2()
4347 get_seg(&env->segs[R_SS], &sregs.ss); in kvm_get_sregs2()
4349 get_seg(&env->tr, &sregs.tr); in kvm_get_sregs2()
4350 get_seg(&env->ldt, &sregs.ldt); in kvm_get_sregs2()
4352 env->idt.limit = sregs.idt.limit; in kvm_get_sregs2()
4353 env->idt.base = sregs.idt.base; in kvm_get_sregs2()
4354 env->gdt.limit = sregs.gdt.limit; in kvm_get_sregs2()
4355 env->gdt.base = sregs.gdt.base; in kvm_get_sregs2()
4357 env->cr[0] = sregs.cr0; in kvm_get_sregs2()
4358 env->cr[2] = sregs.cr2; in kvm_get_sregs2()
4359 env->cr[3] = sregs.cr3; in kvm_get_sregs2()
4360 env->cr[4] = sregs.cr4; in kvm_get_sregs2()
4362 env->efer = sregs.efer; in kvm_get_sregs2()
4363 if (sev_es_enabled() && env->efer & MSR_EFER_LME && in kvm_get_sregs2()
4364 env->cr[0] & CR0_PG_MASK) { in kvm_get_sregs2()
4365 env->efer |= MSR_EFER_LMA; in kvm_get_sregs2()
4368 env->pdptrs_valid = sregs.flags & KVM_SREGS2_FLAGS_PDPTRS_VALID; in kvm_get_sregs2()
4370 if (env->pdptrs_valid) { in kvm_get_sregs2()
4372 env->pdptrs[i] = sregs.pdptrs[i]; in kvm_get_sregs2()
4384 CPUX86State *env = &cpu->env; in kvm_get_msrs()
4385 struct kvm_msr_entry *msrs = cpu->kvm_msr_buf->entries; in kvm_get_msrs()
4447 if (!env->tsc_valid) { in kvm_get_msrs()
4449 env->tsc_valid = !runstate_is_running(); in kvm_get_msrs()
4461 if (env->features[FEAT_7_1_EAX] & CPUID_7_1_EAX_FRED) { in kvm_get_msrs()
4474 if (env->features[FEAT_KVM] & (CPUID_KVM_CLOCK | CPUID_KVM_CLOCK2)) { in kvm_get_msrs()
4478 if (env->features[FEAT_KVM] & CPUID_KVM_ASYNCPF_INT) { in kvm_get_msrs()
4481 if (env->features[FEAT_KVM] & CPUID_KVM_ASYNCPF) { in kvm_get_msrs()
4484 if (env->features[FEAT_KVM] & CPUID_KVM_PV_EOI) { in kvm_get_msrs()
4487 if (env->features[FEAT_KVM] & CPUID_KVM_STEAL_TIME) { in kvm_get_msrs()
4490 if (env->features[FEAT_KVM] & CPUID_KVM_POLL_CONTROL) { in kvm_get_msrs()
4509 if (env->mcg_cap) { in kvm_get_msrs()
4515 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) { in kvm_get_msrs()
4566 if (env->features[FEAT_1_EDX] & CPUID_MTRR) { in kvm_get_msrs()
4585 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) { in kvm_get_msrs()
4599 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_SGX_LC) { in kvm_get_msrs()
4606 if (env->features[FEAT_XSAVE] & CPUID_D_1_EAX_XFD) { in kvm_get_msrs()
4611 if (kvm_enabled() && cpu->enable_pmu && in kvm_get_msrs()
4612 (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) { in kvm_get_msrs()
4628 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf); in kvm_get_msrs()
4633 if (ret < cpu->kvm_msr_buf->nmsrs) { in kvm_get_msrs()
4634 struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret]; in kvm_get_msrs()
4636 (uint32_t)e->index); in kvm_get_msrs()
4639 assert(ret == cpu->kvm_msr_buf->nmsrs); in kvm_get_msrs()
4644 * c n-1.12: actual mask bits in kvm_get_msrs()
4656 if (cpu->fill_mtrr_mask) { in kvm_get_msrs()
4658 assert(cpu->phys_bits <= TARGET_PHYS_ADDR_SPACE_BITS); in kvm_get_msrs()
4659 mtrr_top_bits = MAKE_64BIT_MASK(cpu->phys_bits, 52 - cpu->phys_bits); in kvm_get_msrs()
4668 env->sysenter_cs = msrs[i].data; in kvm_get_msrs()
4671 env->sysenter_esp = msrs[i].data; in kvm_get_msrs()
4674 env->sysenter_eip = msrs[i].data; in kvm_get_msrs()
4677 env->pat = msrs[i].data; in kvm_get_msrs()
4680 env->star = msrs[i].data; in kvm_get_msrs()
4684 env->cstar = msrs[i].data; in kvm_get_msrs()
4687 env->kernelgsbase = msrs[i].data; in kvm_get_msrs()
4690 env->fmask = msrs[i].data; in kvm_get_msrs()
4693 env->lstar = msrs[i].data; in kvm_get_msrs()
4696 env->fred_rsp0 = msrs[i].data; in kvm_get_msrs()
4699 env->fred_rsp1 = msrs[i].data; in kvm_get_msrs()
4702 env->fred_rsp2 = msrs[i].data; in kvm_get_msrs()
4705 env->fred_rsp3 = msrs[i].data; in kvm_get_msrs()
4708 env->fred_stklvls = msrs[i].data; in kvm_get_msrs()
4711 env->fred_ssp1 = msrs[i].data; in kvm_get_msrs()
4714 env->fred_ssp2 = msrs[i].data; in kvm_get_msrs()
4717 env->fred_ssp3 = msrs[i].data; in kvm_get_msrs()
4720 env->fred_config = msrs[i].data; in kvm_get_msrs()
4724 env->tsc = msrs[i].data; in kvm_get_msrs()
4727 env->tsc_aux = msrs[i].data; in kvm_get_msrs()
4730 env->tsc_adjust = msrs[i].data; in kvm_get_msrs()
4733 env->tsc_deadline = msrs[i].data; in kvm_get_msrs()
4736 env->vm_hsave = msrs[i].data; in kvm_get_msrs()
4739 env->system_time_msr = msrs[i].data; in kvm_get_msrs()
4742 env->wall_clock_msr = msrs[i].data; in kvm_get_msrs()
4745 env->mcg_status = msrs[i].data; in kvm_get_msrs()
4748 env->mcg_ctl = msrs[i].data; in kvm_get_msrs()
4751 env->mcg_ext_ctl = msrs[i].data; in kvm_get_msrs()
4754 env->msr_ia32_misc_enable = msrs[i].data; in kvm_get_msrs()
4757 env->smbase = msrs[i].data; in kvm_get_msrs()
4760 env->msr_smi_count = msrs[i].data; in kvm_get_msrs()
4763 env->msr_ia32_feature_control = msrs[i].data; in kvm_get_msrs()
4766 env->msr_bndcfgs = msrs[i].data; in kvm_get_msrs()
4769 env->xss = msrs[i].data; in kvm_get_msrs()
4772 env->umwait = msrs[i].data; in kvm_get_msrs()
4775 env->pkrs = msrs[i].data; in kvm_get_msrs()
4779 msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) { in kvm_get_msrs()
4780 env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data; in kvm_get_msrs()
4784 env->async_pf_en_msr = msrs[i].data; in kvm_get_msrs()
4787 env->async_pf_int_msr = msrs[i].data; in kvm_get_msrs()
4790 env->pv_eoi_en_msr = msrs[i].data; in kvm_get_msrs()
4793 env->steal_time_msr = msrs[i].data; in kvm_get_msrs()
4796 env->poll_control_msr = msrs[i].data; in kvm_get_msrs()
4800 env->msr_fixed_ctr_ctrl = msrs[i].data; in kvm_get_msrs()
4803 env->msr_global_ctrl = msrs[i].data; in kvm_get_msrs()
4806 env->msr_global_status = msrs[i].data; in kvm_get_msrs()
4809 env->msr_global_ovf_ctrl = msrs[i].data; in kvm_get_msrs()
4811 case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR0 + MAX_FIXED_COUNTERS - 1: in kvm_get_msrs()
4812 env->msr_fixed_counters[index - MSR_CORE_PERF_FIXED_CTR0] = msrs[i].data; in kvm_get_msrs()
4814 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR0 + MAX_GP_COUNTERS - 1: in kvm_get_msrs()
4815 env->msr_gp_counters[index - MSR_P6_PERFCTR0] = msrs[i].data; in kvm_get_msrs()
4817 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL0 + MAX_GP_COUNTERS - 1: in kvm_get_msrs()
4818 env->msr_gp_evtsel[index - MSR_P6_EVNTSEL0] = msrs[i].data; in kvm_get_msrs()
4821 env->msr_hv_hypercall = msrs[i].data; in kvm_get_msrs()
4824 env->msr_hv_guest_os_id = msrs[i].data; in kvm_get_msrs()
4827 env->msr_hv_vapic = msrs[i].data; in kvm_get_msrs()
4830 env->msr_hv_tsc = msrs[i].data; in kvm_get_msrs()
4833 env->msr_hv_crash_params[index - HV_X64_MSR_CRASH_P0] = msrs[i].data; in kvm_get_msrs()
4836 env->msr_hv_runtime = msrs[i].data; in kvm_get_msrs()
4839 env->msr_hv_synic_control = msrs[i].data; in kvm_get_msrs()
4842 env->msr_hv_synic_evt_page = msrs[i].data; in kvm_get_msrs()
4845 env->msr_hv_synic_msg_page = msrs[i].data; in kvm_get_msrs()
4848 env->msr_hv_synic_sint[index - HV_X64_MSR_SINT0] = msrs[i].data; in kvm_get_msrs()
4854 env->msr_hv_stimer_config[(index - HV_X64_MSR_STIMER0_CONFIG)/2] = in kvm_get_msrs()
4861 env->msr_hv_stimer_count[(index - HV_X64_MSR_STIMER0_COUNT)/2] = in kvm_get_msrs()
4865 env->msr_hv_reenlightenment_control = msrs[i].data; in kvm_get_msrs()
4868 env->msr_hv_tsc_emulation_control = msrs[i].data; in kvm_get_msrs()
4871 env->msr_hv_tsc_emulation_status = msrs[i].data; in kvm_get_msrs()
4874 env->msr_hv_syndbg_options = msrs[i].data; in kvm_get_msrs()
4877 env->mtrr_deftype = msrs[i].data; in kvm_get_msrs()
4880 env->mtrr_fixed[0] = msrs[i].data; in kvm_get_msrs()
4883 env->mtrr_fixed[1] = msrs[i].data; in kvm_get_msrs()
4886 env->mtrr_fixed[2] = msrs[i].data; in kvm_get_msrs()
4889 env->mtrr_fixed[3] = msrs[i].data; in kvm_get_msrs()
4892 env->mtrr_fixed[4] = msrs[i].data; in kvm_get_msrs()
4895 env->mtrr_fixed[5] = msrs[i].data; in kvm_get_msrs()
4898 env->mtrr_fixed[6] = msrs[i].data; in kvm_get_msrs()
4901 env->mtrr_fixed[7] = msrs[i].data; in kvm_get_msrs()
4904 env->mtrr_fixed[8] = msrs[i].data; in kvm_get_msrs()
4907 env->mtrr_fixed[9] = msrs[i].data; in kvm_get_msrs()
4910 env->mtrr_fixed[10] = msrs[i].data; in kvm_get_msrs()
4912 case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT - 1): in kvm_get_msrs()
4914 env->mtrr_var[MSR_MTRRphysIndex(index)].mask = msrs[i].data | in kvm_get_msrs()
4917 env->mtrr_var[MSR_MTRRphysIndex(index)].base = msrs[i].data; in kvm_get_msrs()
4921 env->spec_ctrl = msrs[i].data; in kvm_get_msrs()
4924 env->amd_tsc_scale_msr = msrs[i].data; in kvm_get_msrs()
4927 env->tsx_ctrl = msrs[i].data; in kvm_get_msrs()
4930 env->virt_ssbd = msrs[i].data; in kvm_get_msrs()
4933 env->msr_rtit_ctrl = msrs[i].data; in kvm_get_msrs()
4936 env->msr_rtit_status = msrs[i].data; in kvm_get_msrs()
4939 env->msr_rtit_output_base = msrs[i].data; in kvm_get_msrs()
4942 env->msr_rtit_output_mask = msrs[i].data; in kvm_get_msrs()
4945 env->msr_rtit_cr3_match = msrs[i].data; in kvm_get_msrs()
4948 env->msr_rtit_addrs[index - MSR_IA32_RTIT_ADDR0_A] = msrs[i].data; in kvm_get_msrs()
4951 env->msr_ia32_sgxlepubkeyhash[index - MSR_IA32_SGXLEPUBKEYHASH0] = in kvm_get_msrs()
4955 env->msr_xfd = msrs[i].data; in kvm_get_msrs()
4958 env->msr_xfd_err = msrs[i].data; in kvm_get_msrs()
4961 env->msr_lbr_ctl = msrs[i].data; in kvm_get_msrs()
4964 env->msr_lbr_depth = msrs[i].data; in kvm_get_msrs()
4967 env->lbr_records[index - MSR_ARCH_LBR_FROM_0].from = msrs[i].data; in kvm_get_msrs()
4970 env->lbr_records[index - MSR_ARCH_LBR_TO_0].to = msrs[i].data; in kvm_get_msrs()
4973 env->lbr_records[index - MSR_ARCH_LBR_INFO_0].info = msrs[i].data; in kvm_get_msrs()
4976 env->msr_hwcr = msrs[i].data; in kvm_get_msrs()
4986 struct kvm_mp_state mp_state = { .mp_state = cpu->env.mp_state }; in kvm_put_mp_state()
4994 CPUX86State *env = &cpu->env; in kvm_get_mp_state()
5002 env->mp_state = mp_state.mp_state; in kvm_get_mp_state()
5004 cs->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED); in kvm_get_mp_state()
5011 DeviceState *apic = cpu->apic_state; in kvm_get_apic()
5029 CPUX86State *env = &cpu->env; in kvm_put_vcpu_events()
5036 events.exception.pending = env->exception_pending; in kvm_put_vcpu_events()
5037 events.exception_has_payload = env->exception_has_payload; in kvm_put_vcpu_events()
5038 events.exception_payload = env->exception_payload; in kvm_put_vcpu_events()
5040 events.exception.nr = env->exception_nr; in kvm_put_vcpu_events()
5041 events.exception.injected = env->exception_injected; in kvm_put_vcpu_events()
5042 events.exception.has_error_code = env->has_error_code; in kvm_put_vcpu_events()
5043 events.exception.error_code = env->error_code; in kvm_put_vcpu_events()
5045 events.interrupt.injected = (env->interrupt_injected >= 0); in kvm_put_vcpu_events()
5046 events.interrupt.nr = env->interrupt_injected; in kvm_put_vcpu_events()
5047 events.interrupt.soft = env->soft_interrupt; in kvm_put_vcpu_events()
5049 events.nmi.injected = env->nmi_injected; in kvm_put_vcpu_events()
5050 events.nmi.pending = env->nmi_pending; in kvm_put_vcpu_events()
5051 events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK); in kvm_put_vcpu_events()
5053 events.sipi_vector = env->sipi_vector; in kvm_put_vcpu_events()
5057 events.smi.smm = !!(env->hflags & HF_SMM_MASK); in kvm_put_vcpu_events()
5058 events.smi.smm_inside_nmi = !!(env->hflags2 & HF2_SMM_INSIDE_NMI_MASK); in kvm_put_vcpu_events()
5061 * from cs->interrupt_request. in kvm_put_vcpu_events()
5063 events.smi.pending = cs->interrupt_request & CPU_INTERRUPT_SMI; in kvm_put_vcpu_events()
5064 events.smi.latched_init = cs->interrupt_request & CPU_INTERRUPT_INIT; in kvm_put_vcpu_events()
5065 cs->interrupt_request &= ~(CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI); in kvm_put_vcpu_events()
5067 /* Keep these in cs->interrupt_request. */ in kvm_put_vcpu_events()
5075 if (env->mp_state == KVM_MP_STATE_SIPI_RECEIVED) { in kvm_put_vcpu_events()
5082 events.triple_fault.pending = env->triple_fault_pending; in kvm_put_vcpu_events()
5090 CPUX86State *env = &cpu->env; in kvm_get_vcpu_events()
5101 env->exception_pending = events.exception.pending; in kvm_get_vcpu_events()
5102 env->exception_has_payload = events.exception_has_payload; in kvm_get_vcpu_events()
5103 env->exception_payload = events.exception_payload; in kvm_get_vcpu_events()
5105 env->exception_pending = 0; in kvm_get_vcpu_events()
5106 env->exception_has_payload = false; in kvm_get_vcpu_events()
5108 env->exception_injected = events.exception.injected; in kvm_get_vcpu_events()
5109 env->exception_nr = in kvm_get_vcpu_events()
5110 (env->exception_pending || env->exception_injected) ? in kvm_get_vcpu_events()
5111 events.exception.nr : -1; in kvm_get_vcpu_events()
5112 env->has_error_code = events.exception.has_error_code; in kvm_get_vcpu_events()
5113 env->error_code = events.exception.error_code; in kvm_get_vcpu_events()
5115 env->interrupt_injected = in kvm_get_vcpu_events()
5116 events.interrupt.injected ? events.interrupt.nr : -1; in kvm_get_vcpu_events()
5117 env->soft_interrupt = events.interrupt.soft; in kvm_get_vcpu_events()
5119 env->nmi_injected = events.nmi.injected; in kvm_get_vcpu_events()
5120 env->nmi_pending = events.nmi.pending; in kvm_get_vcpu_events()
5122 env->hflags2 |= HF2_NMI_MASK; in kvm_get_vcpu_events()
5124 env->hflags2 &= ~HF2_NMI_MASK; in kvm_get_vcpu_events()
5129 env->hflags |= HF_SMM_MASK; in kvm_get_vcpu_events()
5131 env->hflags &= ~HF_SMM_MASK; in kvm_get_vcpu_events()
5139 env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK; in kvm_get_vcpu_events()
5141 env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK; in kvm_get_vcpu_events()
5151 env->triple_fault_pending = events.triple_fault.pending; in kvm_get_vcpu_events()
5154 env->sipi_vector = events.sipi_vector; in kvm_get_vcpu_events()
5161 CPUX86State *env = &cpu->env; in kvm_put_debugregs()
5167 dbgregs.db[i] = env->dr[i]; in kvm_put_debugregs()
5169 dbgregs.dr6 = env->dr[6]; in kvm_put_debugregs()
5170 dbgregs.dr7 = env->dr[7]; in kvm_put_debugregs()
5178 CPUX86State *env = &cpu->env; in kvm_get_debugregs()
5187 env->dr[i] = dbgregs.db[i]; in kvm_get_debugregs()
5189 env->dr[4] = env->dr[6] = dbgregs.dr6; in kvm_get_debugregs()
5190 env->dr[5] = env->dr[7] = dbgregs.dr7; in kvm_get_debugregs()
5197 CPUX86State *env = &cpu->env; in kvm_put_nested_state()
5200 if (!env->nested_state) { in kvm_put_nested_state()
5205 * Copy flags that are affected by reset from env->hflags and env->hflags2. in kvm_put_nested_state()
5207 if (env->hflags & HF_GUEST_MASK) { in kvm_put_nested_state()
5208 env->nested_state->flags |= KVM_STATE_NESTED_GUEST_MODE; in kvm_put_nested_state()
5210 env->nested_state->flags &= ~KVM_STATE_NESTED_GUEST_MODE; in kvm_put_nested_state()
5214 if (cpu_has_svm(env) && (env->hflags2 & HF2_GIF_MASK)) { in kvm_put_nested_state()
5215 env->nested_state->flags |= KVM_STATE_NESTED_GIF_SET; in kvm_put_nested_state()
5217 env->nested_state->flags &= ~KVM_STATE_NESTED_GIF_SET; in kvm_put_nested_state()
5220 assert(env->nested_state->size <= max_nested_state_len); in kvm_put_nested_state()
5221 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_NESTED_STATE, env->nested_state); in kvm_put_nested_state()
5226 CPUX86State *env = &cpu->env; in kvm_get_nested_state()
5230 if (!env->nested_state) { in kvm_get_nested_state()
5236 * nested_state->hdr.size than what our kernel support. in kvm_get_nested_state()
5237 * We preserve migration origin nested_state->hdr.size for in kvm_get_nested_state()
5241 env->nested_state->size = max_nested_state_len; in kvm_get_nested_state()
5243 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_NESTED_STATE, env->nested_state); in kvm_get_nested_state()
5249 * Copy flags that are affected by reset to env->hflags and env->hflags2. in kvm_get_nested_state()
5251 if (env->nested_state->flags & KVM_STATE_NESTED_GUEST_MODE) { in kvm_get_nested_state()
5252 env->hflags |= HF_GUEST_MASK; in kvm_get_nested_state()
5254 env->hflags &= ~HF_GUEST_MASK; in kvm_get_nested_state()
5259 if (env->nested_state->flags & KVM_STATE_NESTED_GIF_SET) { in kvm_get_nested_state()
5260 env->hflags2 |= HF2_GIF_MASK; in kvm_get_nested_state()
5262 env->hflags2 &= ~HF2_GIF_MASK; in kvm_get_nested_state()
5284 error_setg_errno(errp, -ret, "Failed to set feature control MSR"); in kvm_arch_put_registers()
5292 error_setg_errno(errp, -ret, "Failed to set special registers"); in kvm_arch_put_registers()
5299 error_setg_errno(errp, -ret, "Failed to set nested state"); in kvm_arch_put_registers()
5308 * setting (e.g. using an explicit "tsc-freq" option). in kvm_arch_put_registers()
5317 error_setg_errno(errp, -ret, "Failed to set Xen state"); in kvm_arch_put_registers()
5325 error_setg_errno(errp, -ret, "Failed to set general purpose registers"); in kvm_arch_put_registers()
5330 error_setg_errno(errp, -ret, "Failed to set XSAVE"); in kvm_arch_put_registers()
5335 error_setg_errno(errp, -ret, "Failed to set XCRs"); in kvm_arch_put_registers()
5340 error_setg_errno(errp, -ret, "Failed to set MSRs"); in kvm_arch_put_registers()
5345 error_setg_errno(errp, -ret, "Failed to set vCPU events"); in kvm_arch_put_registers()
5351 error_setg_errno(errp, -ret, "Failed to set MP state"); in kvm_arch_put_registers()
5358 error_setg_errno(errp, -ret, "Failed to set TSC deadline MSR"); in kvm_arch_put_registers()
5363 error_setg_errno(errp, -ret, "Failed to set debug registers"); in kvm_arch_put_registers()
5378 error_setg_errno(errp, -ret, "Failed to get vCPU events"); in kvm_arch_get_registers()
5387 error_setg_errno(errp, -ret, "Failed to get MP state"); in kvm_arch_get_registers()
5392 error_setg_errno(errp, -ret, "Failed to get general purpose registers"); in kvm_arch_get_registers()
5397 error_setg_errno(errp, -ret, "Failed to get XSAVE"); in kvm_arch_get_registers()
5402 error_setg_errno(errp, -ret, "Failed to get XCRs"); in kvm_arch_get_registers()
5407 error_setg_errno(errp, -ret, "Failed to get special registers"); in kvm_arch_get_registers()
5412 error_setg_errno(errp, -ret, "Failed to get MSRs"); in kvm_arch_get_registers()
5417 error_setg_errno(errp, -ret, "Failed to get APIC"); in kvm_arch_get_registers()
5422 error_setg_errno(errp, -ret, "Failed to get debug registers"); in kvm_arch_get_registers()
5427 error_setg_errno(errp, -ret, "Failed to get nested state"); in kvm_arch_get_registers()
5434 error_setg_errno(errp, -ret, "Failed to get Xen state"); in kvm_arch_get_registers()
5441 cpu_sync_bndcs_hflags(&cpu->env); in kvm_arch_get_registers()
5448 CPUX86State *env = &x86_cpu->env; in kvm_arch_pre_run()
5452 if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) { in kvm_arch_pre_run()
5453 if (cpu->interrupt_request & CPU_INTERRUPT_NMI) { in kvm_arch_pre_run()
5455 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI; in kvm_arch_pre_run()
5461 strerror(-ret)); in kvm_arch_pre_run()
5464 if (cpu->interrupt_request & CPU_INTERRUPT_SMI) { in kvm_arch_pre_run()
5466 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI; in kvm_arch_pre_run()
5472 strerror(-ret)); in kvm_arch_pre_run()
5485 if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) { in kvm_arch_pre_run()
5486 if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) && in kvm_arch_pre_run()
5487 !(env->hflags & HF_SMM_MASK)) { in kvm_arch_pre_run()
5488 cpu->exit_request = 1; in kvm_arch_pre_run()
5490 if (cpu->interrupt_request & CPU_INTERRUPT_TPR) { in kvm_arch_pre_run()
5491 cpu->exit_request = 1; in kvm_arch_pre_run()
5497 if (run->ready_for_interrupt_injection && in kvm_arch_pre_run()
5498 (cpu->interrupt_request & CPU_INTERRUPT_HARD) && in kvm_arch_pre_run()
5499 (env->eflags & IF_MASK)) { in kvm_arch_pre_run()
5502 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD; in kvm_arch_pre_run()
5513 strerror(-ret)); in kvm_arch_pre_run()
5522 if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) { in kvm_arch_pre_run()
5523 run->request_interrupt_window = 1; in kvm_arch_pre_run()
5525 run->request_interrupt_window = 0; in kvm_arch_pre_run()
5529 run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state); in kvm_arch_pre_run()
5547 CPUX86State *env = &x86_cpu->env; in kvm_arch_post_run()
5549 if (run->flags & KVM_RUN_X86_SMM) { in kvm_arch_post_run()
5550 env->hflags |= HF_SMM_MASK; in kvm_arch_post_run()
5552 env->hflags &= ~HF_SMM_MASK; in kvm_arch_post_run()
5554 if (run->if_flag) { in kvm_arch_post_run()
5555 env->eflags |= IF_MASK; in kvm_arch_post_run()
5557 env->eflags &= ~IF_MASK; in kvm_arch_post_run()
5559 if (run->flags & KVM_RUN_X86_BUS_LOCK) { in kvm_arch_post_run()
5566 * vcpu_info->evtchn_upcall_pending has been cleared, and deassert in kvm_arch_post_run()
5571 if (x86_cpu->env.xen_callback_asserted) { in kvm_arch_post_run()
5581 cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8); in kvm_arch_post_run()
5582 cpu_set_apic_base(x86_cpu->apic_state, run->apic_base); in kvm_arch_post_run()
5592 CPUX86State *env = &cpu->env; in kvm_arch_process_async_events()
5594 if (cs->interrupt_request & CPU_INTERRUPT_MCE) { in kvm_arch_process_async_events()
5596 assert(env->mcg_cap); in kvm_arch_process_async_events()
5598 cs->interrupt_request &= ~CPU_INTERRUPT_MCE; in kvm_arch_process_async_events()
5602 if (env->exception_nr == EXCP08_DBLE) { in kvm_arch_process_async_events()
5605 cs->exit_request = 1; in kvm_arch_process_async_events()
5609 env->has_error_code = 0; in kvm_arch_process_async_events()
5611 cs->halted = 0; in kvm_arch_process_async_events()
5612 if (kvm_irqchip_in_kernel() && env->mp_state == KVM_MP_STATE_HALTED) { in kvm_arch_process_async_events()
5613 env->mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_process_async_events()
5617 if ((cs->interrupt_request & CPU_INTERRUPT_INIT) && in kvm_arch_process_async_events()
5618 !(env->hflags & HF_SMM_MASK)) { in kvm_arch_process_async_events()
5627 if (cs->interrupt_request & CPU_INTERRUPT_POLL) { in kvm_arch_process_async_events()
5628 cs->interrupt_request &= ~CPU_INTERRUPT_POLL; in kvm_arch_process_async_events()
5629 apic_poll_irq(cpu->apic_state); in kvm_arch_process_async_events()
5631 if (((cs->interrupt_request & CPU_INTERRUPT_HARD) && in kvm_arch_process_async_events()
5632 (env->eflags & IF_MASK)) || in kvm_arch_process_async_events()
5633 (cs->interrupt_request & CPU_INTERRUPT_NMI)) { in kvm_arch_process_async_events()
5634 cs->halted = 0; in kvm_arch_process_async_events()
5636 if (cs->interrupt_request & CPU_INTERRUPT_SIPI) { in kvm_arch_process_async_events()
5640 if (cs->interrupt_request & CPU_INTERRUPT_TPR) { in kvm_arch_process_async_events()
5641 cs->interrupt_request &= ~CPU_INTERRUPT_TPR; in kvm_arch_process_async_events()
5643 apic_handle_tpr_access_report(cpu->apic_state, env->eip, in kvm_arch_process_async_events()
5644 env->tpr_access_type); in kvm_arch_process_async_events()
5647 return cs->halted; in kvm_arch_process_async_events()
5653 CPUX86State *env = &cpu->env; in kvm_handle_halt()
5655 if (!((cs->interrupt_request & CPU_INTERRUPT_HARD) && in kvm_handle_halt()
5656 (env->eflags & IF_MASK)) && in kvm_handle_halt()
5657 !(cs->interrupt_request & CPU_INTERRUPT_NMI)) { in kvm_handle_halt()
5658 cs->halted = 1; in kvm_handle_halt()
5668 struct kvm_run *run = cs->kvm_run; in kvm_handle_tpr_access()
5670 apic_handle_tpr_access_report(cpu->apic_state, run->tpr_access.rip, in kvm_handle_tpr_access()
5671 run->tpr_access.is_write ? TPR_ACCESS_WRITE in kvm_handle_tpr_access()
5680 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) || in kvm_arch_insert_sw_breakpoint()
5681 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&int3, 1, 1)) { in kvm_arch_insert_sw_breakpoint()
5682 return -EINVAL; in kvm_arch_insert_sw_breakpoint()
5691 if (cpu_memory_rw_debug(cs, bp->pc, &int3, 1, 0)) { in kvm_arch_remove_sw_breakpoint()
5692 return -EINVAL; in kvm_arch_remove_sw_breakpoint()
5697 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) { in kvm_arch_remove_sw_breakpoint()
5698 return -EINVAL; in kvm_arch_remove_sw_breakpoint()
5717 (hw_breakpoint[n].len == len || len == -1)) { in find_hw_breakpoint()
5721 return -1; in find_hw_breakpoint()
5738 if (addr & (len - 1)) { in kvm_arch_insert_hw_breakpoint()
5739 return -EINVAL; in kvm_arch_insert_hw_breakpoint()
5743 return -EINVAL; in kvm_arch_insert_hw_breakpoint()
5747 return -ENOSYS; in kvm_arch_insert_hw_breakpoint()
5751 return -ENOBUFS; in kvm_arch_insert_hw_breakpoint()
5754 return -EEXIST; in kvm_arch_insert_hw_breakpoint()
5770 return -ENOENT; in kvm_arch_remove_hw_breakpoint()
5772 nb_hw_breakpoint--; in kvm_arch_remove_hw_breakpoint()
5789 CPUX86State *env = &cpu->env; in kvm_handle_debug()
5793 if (arch_info->exception == EXCP01_DB) { in kvm_handle_debug()
5794 if (arch_info->dr6 & DR6_BS) { in kvm_handle_debug()
5795 if (cs->singlestep_enabled) { in kvm_handle_debug()
5800 if (arch_info->dr6 & (1 << n)) { in kvm_handle_debug()
5801 switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) { in kvm_handle_debug()
5807 cs->watchpoint_hit = &hw_watchpoint; in kvm_handle_debug()
5813 cs->watchpoint_hit = &hw_watchpoint; in kvm_handle_debug()
5821 } else if (kvm_find_sw_breakpoint(cs, arch_info->pc)) { in kvm_handle_debug()
5826 assert(env->exception_nr == -1); in kvm_handle_debug()
5829 kvm_queue_exception(env, arch_info->exception, in kvm_handle_debug()
5830 arch_info->exception == EXCP01_DB, in kvm_handle_debug()
5831 arch_info->dr6); in kvm_handle_debug()
5832 env->has_error_code = 0; in kvm_handle_debug()
5851 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP; in kvm_arch_update_guest_debug()
5854 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP; in kvm_arch_update_guest_debug()
5855 dbg->arch.debugreg[7] = 0x0600; in kvm_arch_update_guest_debug()
5857 dbg->arch.debugreg[n] = hw_breakpoint[n].addr; in kvm_arch_update_guest_debug()
5858 dbg->arch.debugreg[7] |= (2 << (n * 2)) | in kvm_arch_update_guest_debug()
5876 if (handler->msr) { in kvm_install_msr_filters()
5882 .base = handler->msr, in kvm_install_msr_filters()
5886 if (handler->rdmsr) { in kvm_install_msr_filters()
5887 range->flags |= KVM_MSR_FILTER_READ; in kvm_install_msr_filters()
5890 if (handler->wrmsr) { in kvm_install_msr_filters()
5891 range->flags |= KVM_MSR_FILTER_WRITE; in kvm_install_msr_filters()
5922 return -EINVAL; in kvm_filter_msr()
5932 if (run->msr.index == handler->msr) { in kvm_handle_rdmsr()
5933 if (handler->rdmsr) { in kvm_handle_rdmsr()
5934 r = handler->rdmsr(cpu, handler->msr, in kvm_handle_rdmsr()
5935 (uint64_t *)&run->msr.data); in kvm_handle_rdmsr()
5936 run->msr.error = r ? 0 : 1; in kvm_handle_rdmsr()
5952 if (run->msr.index == handler->msr) { in kvm_handle_wrmsr()
5953 if (handler->wrmsr) { in kvm_handle_wrmsr()
5954 r = handler->wrmsr(cpu, handler->msr, run->msr.data); in kvm_handle_wrmsr()
5955 run->msr.error = r ? 0 : 1; in kvm_handle_wrmsr()
5981 error_report("Could not enable SGX PROVISIONKEY: %s", strerror(-ret)); in __kvm_enable_sgx_provisioning()
6003 * to service guest-initiated memory attribute update requests so that
6006 * is only applicable to guest_memfd-backed guests (e.g. SNP/TDX).
6008 * Other other use-cases for KVM_HC_MAP_GPA_RANGE, such as for SEV live
6011 * For the guest_memfd use-case, these exits will generally be synthesized
6012 * by KVM based on platform-specific hypercalls, like GHCB requests in the
6013 * case of SEV-SNP, and not issued directly within the guest though the
6017 * SEV live migration use-case would be useful for guest-memfd backed guests,
6019 * means, these 2 use-cases should be treated as being mutually-exclusive.
6028 return -EINVAL; in kvm_handle_hc_map_gpa_range()
6030 gpa = run->hypercall.args[0]; in kvm_handle_hc_map_gpa_range()
6031 size = run->hypercall.args[1] * TARGET_PAGE_SIZE; in kvm_handle_hc_map_gpa_range()
6032 attributes = run->hypercall.args[2]; in kvm_handle_hc_map_gpa_range()
6034 trace_kvm_hc_map_gpa_range(gpa, size, attributes, run->hypercall.flags); in kvm_handle_hc_map_gpa_range()
6042 * Opportunistically pre-fault memory in. Failures are ignored so that any in kvm_handle_hc_map_gpa_range()
6060 if (run->hypercall.nr == KVM_HC_MAP_GPA_RANGE) in kvm_handle_hypercall()
6063 return -EINVAL; in kvm_handle_hypercall()
6076 switch (run->exit_reason) { in kvm_arch_handle_exit()
6092 code = run->fail_entry.hardware_entry_failure_reason; in kvm_arch_handle_exit()
6106 ret = -1; in kvm_arch_handle_exit()
6110 run->ex.exception, run->ex.error_code); in kvm_arch_handle_exit()
6111 ret = -1; in kvm_arch_handle_exit()
6116 ret = kvm_handle_debug(cpu, &run->debug.arch); in kvm_arch_handle_exit()
6120 ret = kvm_hv_handle_exit(cpu, &run->hyperv); in kvm_arch_handle_exit()
6123 ioapic_eoi_broadcast(run->eoi.vector); in kvm_arch_handle_exit()
6131 ctx_invalid = !!(run->notify.flags & KVM_NOTIFY_CONTEXT_INVALID); in kvm_arch_handle_exit()
6134 state->notify_vmexit == NOTIFY_VMEXIT_OPTION_INTERNAL_ERROR) { in kvm_arch_handle_exit()
6137 ret = -1; in kvm_arch_handle_exit()
6147 assert(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER); in kvm_arch_handle_exit()
6152 assert(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER); in kvm_arch_handle_exit()
6157 ret = kvm_xen_handle_exit(cpu, &run->xen); in kvm_arch_handle_exit()
6164 switch (run->system_event.type) { in kvm_arch_handle_exit()
6169 ret = -1; in kvm_arch_handle_exit()
6174 fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason); in kvm_arch_handle_exit()
6175 ret = -1; in kvm_arch_handle_exit()
6185 CPUX86State *env = &cpu->env; in kvm_arch_stop_on_emulation_error()
6188 return !(env->cr[0] & CR0_PE_MASK) || in kvm_arch_stop_on_emulation_error()
6189 ((env->segs[R_CS].selector & 3) != 3); in kvm_arch_stop_on_emulation_error()
6194 /* We know at this point that we're using the in-kernel in kvm_arch_init_irq_routing()
6224 strerror(-ret)); in kvm_arch_irqchip_create()
6244 env = &X86_CPU(first_cpu)->env; in kvm_swizzle_msi_ext_dest_id()
6245 if (!(env->features[FEAT_KVM] & CPUID_KVM_MSI_EXT_DEST_ID)) { in kvm_swizzle_msi_ext_dest_id()
6272 if (class->int_remap) { in kvm_arch_fixup_msi_route()
6276 src.address = route->u.msi.address_hi; in kvm_arch_fixup_msi_route()
6278 src.address |= route->u.msi.address_lo; in kvm_arch_fixup_msi_route()
6279 src.data = route->u.msi.data; in kvm_arch_fixup_msi_route()
6281 ret = class->int_remap(iommu, &src, &dst, dev ? \ in kvm_arch_fixup_msi_route()
6285 trace_kvm_x86_fixup_msi_error(route->gsi); in kvm_arch_fixup_msi_route()
6291 * extended destination ID in the low bits 11-5. */ in kvm_arch_fixup_msi_route()
6294 route->u.msi.address_hi = dst.address >> VTD_MSI_ADDR_HI_SHIFT; in kvm_arch_fixup_msi_route()
6295 route->u.msi.address_lo = dst.address & VTD_MSI_ADDR_LO_MASK; in kvm_arch_fixup_msi_route()
6296 route->u.msi.data = dst.data; in kvm_arch_fixup_msi_route()
6316 route->u.msi.address_hi = address >> VTD_MSI_ADDR_HI_SHIFT; in kvm_arch_fixup_msi_route()
6317 route->u.msi.address_lo = address & VTD_MSI_ADDR_LO_MASK; in kvm_arch_fixup_msi_route()
6337 int cnt = 0, vector; in kvm_update_msi_routes_all() local
6344 cnt++; in kvm_update_msi_routes_all()
6345 vector = entry->vector; in kvm_update_msi_routes_all()
6346 dev = entry->dev; in kvm_update_msi_routes_all()
6358 kvm_irqchip_update_msi_route(kvm_state, entry->virq, msg, dev); in kvm_update_msi_routes_all()
6361 trace_kvm_x86_update_msi_routes(cnt); in kvm_update_msi_routes_all()
6378 entry->dev = dev; in kvm_arch_add_msi_route_post()
6379 entry->vector = vector; in kvm_arch_add_msi_route_post()
6380 entry->virq = route->gsi; in kvm_arch_add_msi_route_post()
6383 trace_kvm_x86_add_msi_route(route->gsi); in kvm_arch_add_msi_route_post()
6403 if (entry->virq == virq) { in kvm_arch_release_virq_post()
6462 return s->notify_vmexit; in kvm_arch_get_notify_vmexit()
6469 if (s->fd != -1) { in kvm_arch_set_notify_vmexit()
6474 s->notify_vmexit = value; in kvm_arch_set_notify_vmexit()
6478 const char *name, void *opaque, in kvm_arch_get_notify_window() argument
6482 uint32_t value = s->notify_window; in kvm_arch_get_notify_window()
6484 visit_type_uint32(v, name, &value, errp); in kvm_arch_get_notify_window()
6488 const char *name, void *opaque, in kvm_arch_set_notify_window() argument
6494 if (s->fd != -1) { in kvm_arch_set_notify_window()
6499 if (!visit_type_uint32(v, name, &value, errp)) { in kvm_arch_set_notify_window()
6503 s->notify_window = value; in kvm_arch_set_notify_window()
6507 const char *name, void *opaque, in kvm_arch_get_xen_version() argument
6511 uint32_t value = s->xen_version; in kvm_arch_get_xen_version()
6513 visit_type_uint32(v, name, &value, errp); in kvm_arch_get_xen_version()
6517 const char *name, void *opaque, in kvm_arch_set_xen_version() argument
6524 visit_type_uint32(v, name, &value, &error); in kvm_arch_set_xen_version()
6530 s->xen_version = value; in kvm_arch_set_xen_version()
6537 const char *name, void *opaque, in kvm_arch_get_xen_gnttab_max_frames() argument
6541 uint16_t value = s->xen_gnttab_max_frames; in kvm_arch_get_xen_gnttab_max_frames()
6543 visit_type_uint16(v, name, &value, errp); in kvm_arch_get_xen_gnttab_max_frames()
6547 const char *name, void *opaque, in kvm_arch_set_xen_gnttab_max_frames() argument
6554 visit_type_uint16(v, name, &value, &error); in kvm_arch_set_xen_gnttab_max_frames()
6560 s->xen_gnttab_max_frames = value; in kvm_arch_set_xen_gnttab_max_frames()
6564 const char *name, void *opaque, in kvm_arch_get_xen_evtchn_max_pirq() argument
6568 uint16_t value = s->xen_evtchn_max_pirq; in kvm_arch_get_xen_evtchn_max_pirq()
6570 visit_type_uint16(v, name, &value, errp); in kvm_arch_get_xen_evtchn_max_pirq()
6574 const char *name, void *opaque, in kvm_arch_set_xen_evtchn_max_pirq() argument
6581 visit_type_uint16(v, name, &value, &error); in kvm_arch_set_xen_evtchn_max_pirq()
6587 s->xen_evtchn_max_pirq = value; in kvm_arch_set_xen_evtchn_max_pirq()
6592 object_class_property_add_enum(oc, "notify-vmexit", "NotifyVMexitOption", in kvm_arch_accel_class_init()
6596 object_class_property_set_description(oc, "notify-vmexit", in kvm_arch_accel_class_init()
6599 object_class_property_add(oc, "notify-window", "uint32", in kvm_arch_accel_class_init()
6603 object_class_property_set_description(oc, "notify-window", in kvm_arch_accel_class_init()
6607 object_class_property_add(oc, "xen-version", "uint32", in kvm_arch_accel_class_init()
6611 object_class_property_set_description(oc, "xen-version", in kvm_arch_accel_class_init()
6616 object_class_property_add(oc, "xen-gnttab-max-frames", "uint16", in kvm_arch_accel_class_init()
6620 object_class_property_set_description(oc, "xen-gnttab-max-frames", in kvm_arch_accel_class_init()
6623 object_class_property_add(oc, "xen-evtchn-max-pirq", "uint16", in kvm_arch_accel_class_init()
6627 object_class_property_set_description(oc, "xen-evtchn-max-pirq", in kvm_arch_accel_class_init()