Lines Matching +full:- +full:- +full:disable +full:- +full:numa
24 #include "cpu-qom.h"
25 #include "kvm/hyperv-proto.h"
26 #include "exec/cpu-common.h"
27 #include "exec/cpu-defs.h"
28 #include "exec/cpu-interrupt.h"
31 #include "qapi/qapi-types-common.h"
32 #include "qemu/cpu-float.h"
34 #include "standard-headers/asm-x86/kvm_para.h"
137 /* hidden flags - used internally by qemu to represent additional cpu
169 #define HF_MPX_IU_SHIFT 26 /* BND registers in-use */
463 #define MSR_MTRRphysIndex(addr) ((((addr) & ~1u) - 0x200) / 2)
846 /* Supervisor-mode Execution Prevention */
852 /* Invalidate Process-Context Identifier */
860 /* AVX-512 Foundation */
862 /* AVX-512 Doubleword & Quadword Instruction */
870 /* AVX-512 Integer Fused Multiply Add */
878 /* AVX-512 Prefetch */
880 /* AVX-512 Exponential and Reciprocal */
882 /* AVX-512 Conflict Detection */
886 /* AVX-512 Byte and Word Instructions */
888 /* AVX-512 Vector Length Extensions */
891 /* AVX-512 Vector Byte Manipulation Instruction */
893 /* User-Mode Instruction Prevention */
895 /* Protection Keys for User-mode Pages */
901 /* Additional AVX-512 Vector Byte Manipulation Instruction */
907 /* Carry-Less Multiplication Quadword */
915 /* 5-level Page Tables */
931 /* Protection Keys for Supervisor-mode Pages */
954 /* AMX tile (two-dimensional register) */
968 /* Speculative Store Bypass Disable */
995 /* Non-Serializing Write to Model Specific Register (WRMSRNS) */
1013 /* AVX-VNNI-INT16 Instructions */
1040 /* AVX10 128-bit vector support is present */
1042 /* AVX10 256-bit vector support is present */
1044 /* AVX10 512-bit vector support is present */
1063 /* KVM paravirtualized end-of-interrupt signaling */
1067 /* KVM host-side polling on HLT control from the guest */
1091 /* Speculative Store Bypass Disable */
1093 /* Paravirtualized Speculative Store Bypass Disable MSR */
1095 /* Predictive Store Forwarding Disable */
1100 /* WRMSR to FS_BASE, GS_BASE, or KERNEL_GS_BASE is non-serializing */
1118 /* Not vulnerable to SRSO at the user-kernel boundary */
1145 /* "ace" on-CPU crypto (xcrypt) */
1147 /* "ace_en" on-CPU crypto enabled */
1187 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
1188 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
1189 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
1190 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
1191 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
1192 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
1194 ((env)->cpuid_vendor1 == CPUID_VENDOR_ZHAOXIN1_1 && \
1195 (env)->cpuid_vendor2 == CPUID_VENDOR_ZHAOXIN1_2 && \
1196 (env)->cpuid_vendor3 == CPUID_VENDOR_ZHAOXIN1_3)
1198 ((env)->cpuid_vendor1 == CPUID_VENDOR_ZHAOXIN2_1 && \
1199 (env)->cpuid_vendor2 == CPUID_VENDOR_ZHAOXIN2_2 && \
1200 (env)->cpuid_vendor3 == CPUID_VENDOR_ZHAOXIN2_3)
1357 /* Supported Hyper-V Enlightenments */
1407 /* i386-specific interrupt pending bits. */
1499 * to zero-extend CC_DST anyway.
1577 #define ZMM_B(n) _b_ZMMReg[63 - (n)]
1578 #define ZMM_W(n) _w_ZMMReg[31 - (n)]
1579 #define ZMM_L(n) _l_ZMMReg[15 - (n)]
1580 #define ZMM_H(n) _h_ZMMReg[31 - (n)]
1581 #define ZMM_S(n) _s_ZMMReg[15 - (n)]
1582 #define ZMM_Q(n) _q_ZMMReg[7 - (n)]
1583 #define ZMM_D(n) _d_ZMMReg[7 - (n)]
1584 #define ZMM_X(n) _x_ZMMReg[3 - (n)]
1585 #define ZMM_Y(n) _y_ZMMReg[1 - (n)]
1587 #define XMM_Q(n) _q_XMMReg[1 - (n)]
1589 #define YMM_Q(n) _q_YMMReg[3 - (n)]
1590 #define YMM_X(n) _x_YMMReg[1 - (n)]
1592 #define MMX_B(n) _b_MMXReg[7 - (n)]
1593 #define MMX_W(n) _w_MMXReg[3 - (n)]
1594 #define MMX_L(n) _l_MMXReg[1 - (n)]
1595 #define MMX_S(n) _s_MMXReg[1 - (n)]
1639 #define MAX_GP_COUNTERS (MSR_IA32_PERF_STATUS - MSR_P6_EVNTSEL0)
1791 * Note: representation of fully-associative caches is not implemented
1800 * AMD-specific: CPUID[0x80000005], CPUID[0x80000006].
1805 /* Self-initializing cache */
1809 * non-originating threads sharing this cache.
1853 int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */
1885 /* KVM-only so far */
1977 /* Partition-wide HV MSRs, will be updated only on the first vcpu */
1988 /* Per-VCPU HV MSRs */
2009 /* Per-VCPU XFD MSRs */
2013 /* Per-VCPU Arch LBR MSRs */
2067 /* Maximum level/xlevel/xlevel2 value for auto-assignment: */
2081 /* Cache information for CPUID. When legacy-cache=on, the cache data
2259 * If true present L1 cache as per-thread, not per-core.
2280 /* Enable auto level-increase for all CPUID leaves */
2289 /* Enable auto level-increase for Intel Processor Trace leave */
2301 /* Forcefully disable KVM PV features not exposed in guest CPUIDs */
2309 * identical to host physical address bits. With NPT or EPT 4-level
2323 int32_t node_id; /* NUMA node this CPU belongs to */
2341 * @ordering: Ordering on the "-cpu help" CPU model list.
2354 * Should be eventually replaced by subclass-specific property defaults.
2365 * If unavailable, cpu_def->model_id is used.
2405 /* MS-DOS compatibility mode FPU exception support */
2426 sc = &env->segs[seg_reg]; in cpu_x86_load_seg_cache()
2427 sc->selector = selector; in cpu_x86_load_seg_cache()
2428 sc->base = base; in cpu_x86_load_seg_cache()
2429 sc->limit = limit; in cpu_x86_load_seg_cache()
2430 sc->flags = flags; in cpu_x86_load_seg_cache()
2436 if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) { in cpu_x86_load_seg_cache()
2438 env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; in cpu_x86_load_seg_cache()
2439 env->hflags &= ~(HF_ADDSEG_MASK); in cpu_x86_load_seg_cache()
2444 new_hflags = (env->segs[R_CS].flags & DESC_B_MASK) in cpu_x86_load_seg_cache()
2445 >> (DESC_B_SHIFT - HF_CS32_SHIFT); in cpu_x86_load_seg_cache()
2446 env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) | in cpu_x86_load_seg_cache()
2455 env->hflags = (env->hflags & ~HF_CPL_MASK) | cpl; in cpu_x86_load_seg_cache()
2459 new_hflags = (env->segs[R_SS].flags & DESC_B_MASK) in cpu_x86_load_seg_cache()
2460 >> (DESC_B_SHIFT - HF_SS32_SHIFT); in cpu_x86_load_seg_cache()
2461 if (env->hflags & HF_CS64_MASK) { in cpu_x86_load_seg_cache()
2463 } else if (!(env->cr[0] & CR0_PE_MASK) || in cpu_x86_load_seg_cache()
2464 (env->eflags & VM_MASK) || in cpu_x86_load_seg_cache()
2465 !(env->hflags & HF_CS32_MASK)) { in cpu_x86_load_seg_cache()
2470 translate-i386.c. */ in cpu_x86_load_seg_cache()
2473 new_hflags |= ((env->segs[R_DS].base | in cpu_x86_load_seg_cache()
2474 env->segs[R_ES].base | in cpu_x86_load_seg_cache()
2475 env->segs[R_SS].base) != 0) << in cpu_x86_load_seg_cache()
2478 env->hflags = (env->hflags & in cpu_x86_load_seg_cache()
2487 CPUX86State *env = &cpu->env; in cpu_x86_load_seg_cache_sipi()
2489 env->eip = 0; in cpu_x86_load_seg_cache_sipi()
2492 env->segs[R_CS].limit, in cpu_x86_load_seg_cache_sipi()
2493 env->segs[R_CS].flags); in cpu_x86_load_seg_cache_sipi()
2494 cs->halted = 0; in cpu_x86_load_seg_cache_sipi()
2506 /* cpu-exec.c */
2548 return cpu->enable_cpuid_0x1f || in x86_has_cpuid_0x1f()
2549 x86_has_extended_topo(cpu->env.avail_cpu_topo); in x86_has_cpuid_0x1f()
2634 #define CC_DST (env->cc_dst)
2635 #define CC_SRC (env->cc_src)
2636 #define CC_SRC2 (env->cc_src2)
2637 #define CC_OP (env->cc_op)
2658 uint32_t eflags = env->eflags; in cpu_compute_eflags()
2660 eflags |= cpu_cc_compute_all(env) | (env->df & DF_MASK); in cpu_compute_eflags()
2667 return ((MemTxAttrs) { .secure = (env->hflags & HF_SMM_MASK) != 0 }); in cpu_get_mem_attrs()
2672 if (env->hflags & HF_SMM_MASK) { in x86_get_a20_mask()
2673 return -1; in x86_get_a20_mask()
2675 return env->a20_mask; in x86_get_a20_mask()
2681 return env->features[FEAT_1_ECX] & CPUID_EXT_VMX; in cpu_has_vmx()
2686 return env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM; in cpu_has_svm()
2708 ((env->cr[4] & CR4_VMXE_MASK) || (env->hflags & HF_SMM_MASK)); in cpu_vmx_maybe_enabled()
2716 /* Set all non-runtime-variable float_status fields to x86 handling */
2724 env->mxcsr = mxcsr; in cpu_set_mxcsr()
2732 env->fpuc = fpuc; in cpu_set_fpuc()
2743 { /* no-op */ } in cpu_svm_check_intercept_param()
2761 #define CPU_VERSION_LATEST -1
2767 #define CPU_VERSION_AUTO -2
2787 /* cpu-dump.c */
2802 return !!(cpu->hyperv_features & BIT(feat)); in hyperv_feat_enabled()
2808 if (!env->features[FEAT_XSAVE]) { in cr4_reserved_bits()
2811 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMEP)) { in cr4_reserved_bits()
2814 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) { in cr4_reserved_bits()
2817 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE)) { in cr4_reserved_bits()
2820 if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKU)) { in cr4_reserved_bits()
2823 if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57)) { in cr4_reserved_bits()
2826 if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_UMIP)) { in cr4_reserved_bits()
2829 if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKS)) { in cr4_reserved_bits()
2832 if (!(env->features[FEAT_7_1_EAX] & CPUID_7_1_EAX_LAM)) { in cr4_reserved_bits()
2835 if (!(env->features[FEAT_7_1_EAX] & CPUID_7_1_EAX_FRED)) { in cr4_reserved_bits()
2846 int_prio = (env->int_ctl & V_INTR_PRIO_MASK) >> V_INTR_PRIO_SHIFT; in ctl_has_irq()
2847 tpr = env->int_ctl & V_TPR_MASK; in ctl_has_irq()
2849 if (env->int_ctl & V_IGN_TPR_MASK) { in ctl_has_irq()
2850 return (env->int_ctl & V_IRQ_MASK); in ctl_has_irq()
2853 return (env->int_ctl & V_IRQ_MASK) && (int_prio >= tpr); in ctl_has_irq()
2859 # define TARGET_VSYSCALL_PAGE (UINT64_C(-10) << 20)
2876 * SUB_COUT_VEC(x, y) = NOT majority(x, NOT y, (x - y) ^ x ^ NOT y)
2877 * = majority(NOT x, y, (x - y) ^ x ^ y)
2881 * bit in (x-y)^x^y likewise does not matter. Hence, x^y can be replaced
2882 * with 0 in (x-y)^x^y, resulting in majority(NOT x, y, x-y)