1 /* 2 * i386 virtual CPU header 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #ifndef I386_CPU_H 21 #define I386_CPU_H 22 23 #include "system/tcg.h" 24 #include "cpu-qom.h" 25 #include "kvm/hyperv-proto.h" 26 #include "exec/cpu-defs.h" 27 #include "exec/memop.h" 28 #include "hw/i386/topology.h" 29 #include "qapi/qapi-types-common.h" 30 #include "qemu/cpu-float.h" 31 #include "qemu/timer.h" 32 #include "standard-headers/asm-x86/kvm_para.h" 33 34 #define XEN_NR_VIRQS 24 35 36 #define KVM_HAVE_MCE_INJECTION 1 37 38 /* support for self modifying code even if the modified instruction is 39 close to the modifying instruction */ 40 #define TARGET_HAS_PRECISE_SMC 41 42 #ifdef TARGET_X86_64 43 #define I386_ELF_MACHINE EM_X86_64 44 #define ELF_MACHINE_UNAME "x86_64" 45 #else 46 #define I386_ELF_MACHINE EM_386 47 #define ELF_MACHINE_UNAME "i686" 48 #endif 49 50 enum { 51 R_EAX = 0, 52 R_ECX = 1, 53 R_EDX = 2, 54 R_EBX = 3, 55 R_ESP = 4, 56 R_EBP = 5, 57 R_ESI = 6, 58 R_EDI = 7, 59 R_R8 = 8, 60 R_R9 = 9, 61 R_R10 = 10, 62 R_R11 = 11, 63 R_R12 = 12, 64 R_R13 = 13, 65 R_R14 = 14, 66 R_R15 = 15, 67 68 R_AL = 0, 69 R_CL = 1, 70 R_DL = 2, 71 R_BL = 3, 72 R_AH = 4, 73 R_CH = 5, 74 R_DH = 6, 75 R_BH = 7, 76 }; 77 78 typedef enum X86Seg { 79 R_ES = 0, 80 R_CS = 1, 81 R_SS = 2, 82 R_DS = 3, 83 R_FS = 4, 84 R_GS = 5, 85 R_LDTR = 6, 86 R_TR = 7, 87 } X86Seg; 88 89 /* segment descriptor fields */ 90 #define DESC_G_SHIFT 23 91 #define DESC_G_MASK (1 << DESC_G_SHIFT) 92 #define DESC_B_SHIFT 22 93 #define DESC_B_MASK (1 << DESC_B_SHIFT) 94 #define DESC_L_SHIFT 21 /* x86_64 only : 64 bit code segment */ 95 #define DESC_L_MASK (1 << DESC_L_SHIFT) 96 #define DESC_AVL_SHIFT 20 97 #define DESC_AVL_MASK (1 << DESC_AVL_SHIFT) 98 #define DESC_P_SHIFT 15 99 #define DESC_P_MASK (1 << DESC_P_SHIFT) 100 #define DESC_DPL_SHIFT 13 101 #define DESC_DPL_MASK (3 << DESC_DPL_SHIFT) 102 #define DESC_S_SHIFT 12 103 #define DESC_S_MASK (1 << DESC_S_SHIFT) 104 #define DESC_TYPE_SHIFT 8 105 #define DESC_TYPE_MASK (15 << DESC_TYPE_SHIFT) 106 #define DESC_A_MASK (1 << 8) 107 108 #define DESC_CS_MASK (1 << 11) /* 1=code segment 0=data segment */ 109 #define DESC_C_MASK (1 << 10) /* code: conforming */ 110 #define DESC_R_MASK (1 << 9) /* code: readable */ 111 112 #define DESC_E_MASK (1 << 10) /* data: expansion direction */ 113 #define DESC_W_MASK (1 << 9) /* data: writable */ 114 115 #define DESC_TSS_BUSY_MASK (1 << 9) 116 117 /* eflags masks */ 118 #define CC_C 0x0001 119 #define CC_P 0x0004 120 #define CC_A 0x0010 121 #define CC_Z 0x0040 122 #define CC_S 0x0080 123 #define CC_O 0x0800 124 125 #define TF_SHIFT 8 126 #define IOPL_SHIFT 12 127 #define VM_SHIFT 17 128 129 #define TF_MASK 0x00000100 130 #define IF_MASK 0x00000200 131 #define DF_MASK 0x00000400 132 #define IOPL_MASK 0x00003000 133 #define NT_MASK 0x00004000 134 #define RF_MASK 0x00010000 135 #define VM_MASK 0x00020000 136 #define AC_MASK 0x00040000 137 #define VIF_MASK 0x00080000 138 #define VIP_MASK 0x00100000 139 #define ID_MASK 0x00200000 140 141 /* hidden flags - used internally by qemu to represent additional cpu 142 states. Only the INHIBIT_IRQ, SMM and SVMI are not redundant. We 143 avoid using the IOPL_MASK, TF_MASK, VM_MASK and AC_MASK bit 144 positions to ease oring with eflags. */ 145 /* current cpl */ 146 #define HF_CPL_SHIFT 0 147 /* true if hardware interrupts must be disabled for next instruction */ 148 #define HF_INHIBIT_IRQ_SHIFT 3 149 /* 16 or 32 segments */ 150 #define HF_CS32_SHIFT 4 151 #define HF_SS32_SHIFT 5 152 /* zero base for DS, ES and SS : can be '0' only in 32 bit CS segment */ 153 #define HF_ADDSEG_SHIFT 6 154 /* copy of CR0.PE (protected mode) */ 155 #define HF_PE_SHIFT 7 156 #define HF_TF_SHIFT 8 /* must be same as eflags */ 157 #define HF_MP_SHIFT 9 /* the order must be MP, EM, TS */ 158 #define HF_EM_SHIFT 10 159 #define HF_TS_SHIFT 11 160 #define HF_IOPL_SHIFT 12 /* must be same as eflags */ 161 #define HF_LMA_SHIFT 14 /* only used on x86_64: long mode active */ 162 #define HF_CS64_SHIFT 15 /* only used on x86_64: 64 bit code segment */ 163 #define HF_RF_SHIFT 16 /* must be same as eflags */ 164 #define HF_VM_SHIFT 17 /* must be same as eflags */ 165 #define HF_AC_SHIFT 18 /* must be same as eflags */ 166 #define HF_SMM_SHIFT 19 /* CPU in SMM mode */ 167 #define HF_SVME_SHIFT 20 /* SVME enabled (copy of EFER.SVME) */ 168 #define HF_GUEST_SHIFT 21 /* SVM intercepts are active */ 169 #define HF_OSFXSR_SHIFT 22 /* CR4.OSFXSR */ 170 #define HF_SMAP_SHIFT 23 /* CR4.SMAP */ 171 #define HF_IOBPT_SHIFT 24 /* an io breakpoint enabled */ 172 #define HF_MPX_EN_SHIFT 25 /* MPX Enabled (CR4+XCR0+BNDCFGx) */ 173 #define HF_MPX_IU_SHIFT 26 /* BND registers in-use */ 174 #define HF_UMIP_SHIFT 27 /* CR4.UMIP */ 175 #define HF_AVX_EN_SHIFT 28 /* AVX Enabled (CR4+XCR0) */ 176 177 #define HF_CPL_MASK (3 << HF_CPL_SHIFT) 178 #define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT) 179 #define HF_CS32_MASK (1 << HF_CS32_SHIFT) 180 #define HF_SS32_MASK (1 << HF_SS32_SHIFT) 181 #define HF_ADDSEG_MASK (1 << HF_ADDSEG_SHIFT) 182 #define HF_PE_MASK (1 << HF_PE_SHIFT) 183 #define HF_TF_MASK (1 << HF_TF_SHIFT) 184 #define HF_MP_MASK (1 << HF_MP_SHIFT) 185 #define HF_EM_MASK (1 << HF_EM_SHIFT) 186 #define HF_TS_MASK (1 << HF_TS_SHIFT) 187 #define HF_IOPL_MASK (3 << HF_IOPL_SHIFT) 188 #define HF_LMA_MASK (1 << HF_LMA_SHIFT) 189 #define HF_CS64_MASK (1 << HF_CS64_SHIFT) 190 #define HF_RF_MASK (1 << HF_RF_SHIFT) 191 #define HF_VM_MASK (1 << HF_VM_SHIFT) 192 #define HF_AC_MASK (1 << HF_AC_SHIFT) 193 #define HF_SMM_MASK (1 << HF_SMM_SHIFT) 194 #define HF_SVME_MASK (1 << HF_SVME_SHIFT) 195 #define HF_GUEST_MASK (1 << HF_GUEST_SHIFT) 196 #define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT) 197 #define HF_SMAP_MASK (1 << HF_SMAP_SHIFT) 198 #define HF_IOBPT_MASK (1 << HF_IOBPT_SHIFT) 199 #define HF_MPX_EN_MASK (1 << HF_MPX_EN_SHIFT) 200 #define HF_MPX_IU_MASK (1 << HF_MPX_IU_SHIFT) 201 #define HF_UMIP_MASK (1 << HF_UMIP_SHIFT) 202 #define HF_AVX_EN_MASK (1 << HF_AVX_EN_SHIFT) 203 204 /* hflags2 */ 205 206 #define HF2_GIF_SHIFT 0 /* if set CPU takes interrupts */ 207 #define HF2_HIF_SHIFT 1 /* value of IF_MASK when entering SVM */ 208 #define HF2_NMI_SHIFT 2 /* CPU serving NMI */ 209 #define HF2_VINTR_SHIFT 3 /* value of V_INTR_MASKING bit */ 210 #define HF2_SMM_INSIDE_NMI_SHIFT 4 /* CPU serving SMI nested inside NMI */ 211 #define HF2_MPX_PR_SHIFT 5 /* BNDCFGx.BNDPRESERVE */ 212 #define HF2_NPT_SHIFT 6 /* Nested Paging enabled */ 213 #define HF2_IGNNE_SHIFT 7 /* Ignore CR0.NE=0 */ 214 #define HF2_VGIF_SHIFT 8 /* Can take VIRQ*/ 215 216 #define HF2_GIF_MASK (1 << HF2_GIF_SHIFT) 217 #define HF2_HIF_MASK (1 << HF2_HIF_SHIFT) 218 #define HF2_NMI_MASK (1 << HF2_NMI_SHIFT) 219 #define HF2_VINTR_MASK (1 << HF2_VINTR_SHIFT) 220 #define HF2_SMM_INSIDE_NMI_MASK (1 << HF2_SMM_INSIDE_NMI_SHIFT) 221 #define HF2_MPX_PR_MASK (1 << HF2_MPX_PR_SHIFT) 222 #define HF2_NPT_MASK (1 << HF2_NPT_SHIFT) 223 #define HF2_IGNNE_MASK (1 << HF2_IGNNE_SHIFT) 224 #define HF2_VGIF_MASK (1 << HF2_VGIF_SHIFT) 225 226 #define CR0_PE_SHIFT 0 227 #define CR0_MP_SHIFT 1 228 229 #define CR0_PE_MASK (1U << 0) 230 #define CR0_MP_MASK (1U << 1) 231 #define CR0_EM_MASK (1U << 2) 232 #define CR0_TS_MASK (1U << 3) 233 #define CR0_ET_MASK (1U << 4) 234 #define CR0_NE_MASK (1U << 5) 235 #define CR0_WP_MASK (1U << 16) 236 #define CR0_AM_MASK (1U << 18) 237 #define CR0_NW_MASK (1U << 29) 238 #define CR0_CD_MASK (1U << 30) 239 #define CR0_PG_MASK (1U << 31) 240 241 #define CR4_VME_MASK (1U << 0) 242 #define CR4_PVI_MASK (1U << 1) 243 #define CR4_TSD_MASK (1U << 2) 244 #define CR4_DE_MASK (1U << 3) 245 #define CR4_PSE_MASK (1U << 4) 246 #define CR4_PAE_MASK (1U << 5) 247 #define CR4_MCE_MASK (1U << 6) 248 #define CR4_PGE_MASK (1U << 7) 249 #define CR4_PCE_MASK (1U << 8) 250 #define CR4_OSFXSR_SHIFT 9 251 #define CR4_OSFXSR_MASK (1U << CR4_OSFXSR_SHIFT) 252 #define CR4_OSXMMEXCPT_MASK (1U << 10) 253 #define CR4_UMIP_MASK (1U << 11) 254 #define CR4_LA57_MASK (1U << 12) 255 #define CR4_VMXE_MASK (1U << 13) 256 #define CR4_SMXE_MASK (1U << 14) 257 #define CR4_FSGSBASE_MASK (1U << 16) 258 #define CR4_PCIDE_MASK (1U << 17) 259 #define CR4_OSXSAVE_MASK (1U << 18) 260 #define CR4_SMEP_MASK (1U << 20) 261 #define CR4_SMAP_MASK (1U << 21) 262 #define CR4_PKE_MASK (1U << 22) 263 #define CR4_PKS_MASK (1U << 24) 264 #define CR4_LAM_SUP_MASK (1U << 28) 265 266 #ifdef TARGET_X86_64 267 #define CR4_FRED_MASK (1ULL << 32) 268 #else 269 #define CR4_FRED_MASK 0 270 #endif 271 272 #define CR4_RESERVED_MASK \ 273 (~(target_ulong)(CR4_VME_MASK | CR4_PVI_MASK | CR4_TSD_MASK \ 274 | CR4_DE_MASK | CR4_PSE_MASK | CR4_PAE_MASK \ 275 | CR4_MCE_MASK | CR4_PGE_MASK | CR4_PCE_MASK \ 276 | CR4_OSFXSR_MASK | CR4_OSXMMEXCPT_MASK | CR4_UMIP_MASK \ 277 | CR4_LA57_MASK \ 278 | CR4_FSGSBASE_MASK | CR4_PCIDE_MASK | CR4_OSXSAVE_MASK \ 279 | CR4_SMEP_MASK | CR4_SMAP_MASK | CR4_PKE_MASK | CR4_PKS_MASK \ 280 | CR4_LAM_SUP_MASK | CR4_FRED_MASK)) 281 282 #define DR6_BD (1 << 13) 283 #define DR6_BS (1 << 14) 284 #define DR6_BT (1 << 15) 285 #define DR6_FIXED_1 0xffff0ff0 286 287 #define DR7_GD (1 << 13) 288 #define DR7_TYPE_SHIFT 16 289 #define DR7_LEN_SHIFT 18 290 #define DR7_FIXED_1 0x00000400 291 #define DR7_GLOBAL_BP_MASK 0xaa 292 #define DR7_LOCAL_BP_MASK 0x55 293 #define DR7_MAX_BP 4 294 #define DR7_TYPE_BP_INST 0x0 295 #define DR7_TYPE_DATA_WR 0x1 296 #define DR7_TYPE_IO_RW 0x2 297 #define DR7_TYPE_DATA_RW 0x3 298 299 #define DR_RESERVED_MASK 0xffffffff00000000ULL 300 301 #define PG_PRESENT_BIT 0 302 #define PG_RW_BIT 1 303 #define PG_USER_BIT 2 304 #define PG_PWT_BIT 3 305 #define PG_PCD_BIT 4 306 #define PG_ACCESSED_BIT 5 307 #define PG_DIRTY_BIT 6 308 #define PG_PSE_BIT 7 309 #define PG_GLOBAL_BIT 8 310 #define PG_PSE_PAT_BIT 12 311 #define PG_PKRU_BIT 59 312 #define PG_NX_BIT 63 313 314 #define PG_PRESENT_MASK (1 << PG_PRESENT_BIT) 315 #define PG_RW_MASK (1 << PG_RW_BIT) 316 #define PG_USER_MASK (1 << PG_USER_BIT) 317 #define PG_PWT_MASK (1 << PG_PWT_BIT) 318 #define PG_PCD_MASK (1 << PG_PCD_BIT) 319 #define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT) 320 #define PG_DIRTY_MASK (1 << PG_DIRTY_BIT) 321 #define PG_PSE_MASK (1 << PG_PSE_BIT) 322 #define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT) 323 #define PG_PSE_PAT_MASK (1 << PG_PSE_PAT_BIT) 324 #define PG_ADDRESS_MASK 0x000ffffffffff000LL 325 #define PG_HI_USER_MASK 0x7ff0000000000000LL 326 #define PG_PKRU_MASK (15ULL << PG_PKRU_BIT) 327 #define PG_NX_MASK (1ULL << PG_NX_BIT) 328 329 #define PG_ERROR_W_BIT 1 330 331 #define PG_ERROR_P_MASK 0x01 332 #define PG_ERROR_W_MASK (1 << PG_ERROR_W_BIT) 333 #define PG_ERROR_U_MASK 0x04 334 #define PG_ERROR_RSVD_MASK 0x08 335 #define PG_ERROR_I_D_MASK 0x10 336 #define PG_ERROR_PK_MASK 0x20 337 338 #define PG_MODE_PAE (1 << 0) 339 #define PG_MODE_LMA (1 << 1) 340 #define PG_MODE_NXE (1 << 2) 341 #define PG_MODE_PSE (1 << 3) 342 #define PG_MODE_LA57 (1 << 4) 343 #define PG_MODE_SVM_MASK MAKE_64BIT_MASK(0, 15) 344 345 /* Bits of CR4 that do not affect the NPT page format. */ 346 #define PG_MODE_WP (1 << 16) 347 #define PG_MODE_PKE (1 << 17) 348 #define PG_MODE_PKS (1 << 18) 349 #define PG_MODE_SMEP (1 << 19) 350 #define PG_MODE_PG (1 << 20) 351 352 #define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */ 353 #define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */ 354 #define MCG_LMCE_P (1ULL<<27) /* Local Machine Check Supported */ 355 356 #define MCE_CAP_DEF (MCG_CTL_P|MCG_SER_P) 357 #define MCE_BANKS_DEF 10 358 359 #define MCG_CAP_BANKS_MASK 0xff 360 361 #define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */ 362 #define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */ 363 #define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */ 364 #define MCG_STATUS_LMCE (1ULL<<3) /* Local MCE signaled */ 365 366 #define MCG_EXT_CTL_LMCE_EN (1ULL<<0) /* Local MCE enabled */ 367 368 #define MCI_STATUS_VAL (1ULL<<63) /* valid error */ 369 #define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */ 370 #define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */ 371 #define MCI_STATUS_EN (1ULL<<60) /* error enabled */ 372 #define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */ 373 #define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */ 374 #define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */ 375 #define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */ 376 #define MCI_STATUS_AR (1ULL<<55) /* Action required */ 377 #define MCI_STATUS_DEFERRED (1ULL<<44) /* Deferred error */ 378 #define MCI_STATUS_POISON (1ULL<<43) /* Poisoned data consumed */ 379 380 /* MISC register defines */ 381 #define MCM_ADDR_SEGOFF 0 /* segment offset */ 382 #define MCM_ADDR_LINEAR 1 /* linear address */ 383 #define MCM_ADDR_PHYS 2 /* physical address */ 384 #define MCM_ADDR_MEM 3 /* memory address */ 385 #define MCM_ADDR_GENERIC 7 /* generic */ 386 387 #define MSR_IA32_TSC 0x10 388 #define MSR_IA32_APICBASE 0x1b 389 #define MSR_IA32_APICBASE_BSP (1<<8) 390 #define MSR_IA32_APICBASE_ENABLE (1<<11) 391 #define MSR_IA32_APICBASE_EXTD (1 << 10) 392 #define MSR_IA32_APICBASE_BASE (0xfffffU<<12) 393 #define MSR_IA32_APICBASE_RESERVED \ 394 (~(uint64_t)(MSR_IA32_APICBASE_BSP | MSR_IA32_APICBASE_ENABLE \ 395 | MSR_IA32_APICBASE_EXTD | MSR_IA32_APICBASE_BASE)) 396 397 #define MSR_IA32_FEATURE_CONTROL 0x0000003a 398 #define MSR_TSC_ADJUST 0x0000003b 399 #define MSR_IA32_SPEC_CTRL 0x48 400 #define MSR_VIRT_SSBD 0xc001011f 401 #define MSR_IA32_PRED_CMD 0x49 402 #define MSR_IA32_UCODE_REV 0x8b 403 #define MSR_IA32_CORE_CAPABILITY 0xcf 404 405 #define MSR_IA32_ARCH_CAPABILITIES 0x10a 406 #define ARCH_CAP_TSX_CTRL_MSR (1<<7) 407 408 #define MSR_IA32_PERF_CAPABILITIES 0x345 409 #define PERF_CAP_LBR_FMT 0x3f 410 411 #define MSR_IA32_TSX_CTRL 0x122 412 #define MSR_IA32_TSCDEADLINE 0x6e0 413 #define MSR_IA32_PKRS 0x6e1 414 #define MSR_RAPL_POWER_UNIT 0x00000606 415 #define MSR_PKG_POWER_LIMIT 0x00000610 416 #define MSR_PKG_ENERGY_STATUS 0x00000611 417 #define MSR_PKG_POWER_INFO 0x00000614 418 #define MSR_ARCH_LBR_CTL 0x000014ce 419 #define MSR_ARCH_LBR_DEPTH 0x000014cf 420 #define MSR_ARCH_LBR_FROM_0 0x00001500 421 #define MSR_ARCH_LBR_TO_0 0x00001600 422 #define MSR_ARCH_LBR_INFO_0 0x00001200 423 424 #define FEATURE_CONTROL_LOCKED (1<<0) 425 #define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1ULL << 1) 426 #define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2) 427 #define FEATURE_CONTROL_SGX_LC (1ULL << 17) 428 #define FEATURE_CONTROL_SGX (1ULL << 18) 429 #define FEATURE_CONTROL_LMCE (1<<20) 430 431 #define MSR_IA32_SGXLEPUBKEYHASH0 0x8c 432 #define MSR_IA32_SGXLEPUBKEYHASH1 0x8d 433 #define MSR_IA32_SGXLEPUBKEYHASH2 0x8e 434 #define MSR_IA32_SGXLEPUBKEYHASH3 0x8f 435 436 #define MSR_P6_PERFCTR0 0xc1 437 438 #define MSR_IA32_SMBASE 0x9e 439 #define MSR_SMI_COUNT 0x34 440 #define MSR_CORE_THREAD_COUNT 0x35 441 #define MSR_MTRRcap 0xfe 442 #define MSR_MTRRcap_VCNT 8 443 #define MSR_MTRRcap_FIXRANGE_SUPPORT (1 << 8) 444 #define MSR_MTRRcap_WC_SUPPORTED (1 << 10) 445 446 #define MSR_IA32_SYSENTER_CS 0x174 447 #define MSR_IA32_SYSENTER_ESP 0x175 448 #define MSR_IA32_SYSENTER_EIP 0x176 449 450 #define MSR_MCG_CAP 0x179 451 #define MSR_MCG_STATUS 0x17a 452 #define MSR_MCG_CTL 0x17b 453 #define MSR_MCG_EXT_CTL 0x4d0 454 455 #define MSR_P6_EVNTSEL0 0x186 456 457 #define MSR_IA32_PERF_STATUS 0x198 458 459 #define MSR_IA32_MISC_ENABLE 0x1a0 460 /* Indicates good rep/movs microcode on some processors: */ 461 #define MSR_IA32_MISC_ENABLE_DEFAULT 1 462 #define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << 18) 463 464 #define MSR_MTRRphysBase(reg) (0x200 + 2 * (reg)) 465 #define MSR_MTRRphysMask(reg) (0x200 + 2 * (reg) + 1) 466 467 #define MSR_MTRRphysIndex(addr) ((((addr) & ~1u) - 0x200) / 2) 468 469 #define MSR_MTRRfix64K_00000 0x250 470 #define MSR_MTRRfix16K_80000 0x258 471 #define MSR_MTRRfix16K_A0000 0x259 472 #define MSR_MTRRfix4K_C0000 0x268 473 #define MSR_MTRRfix4K_C8000 0x269 474 #define MSR_MTRRfix4K_D0000 0x26a 475 #define MSR_MTRRfix4K_D8000 0x26b 476 #define MSR_MTRRfix4K_E0000 0x26c 477 #define MSR_MTRRfix4K_E8000 0x26d 478 #define MSR_MTRRfix4K_F0000 0x26e 479 #define MSR_MTRRfix4K_F8000 0x26f 480 481 #define MSR_PAT 0x277 482 483 #define MSR_MTRRdefType 0x2ff 484 485 #define MSR_CORE_PERF_FIXED_CTR0 0x309 486 #define MSR_CORE_PERF_FIXED_CTR1 0x30a 487 #define MSR_CORE_PERF_FIXED_CTR2 0x30b 488 #define MSR_CORE_PERF_FIXED_CTR_CTRL 0x38d 489 #define MSR_CORE_PERF_GLOBAL_STATUS 0x38e 490 #define MSR_CORE_PERF_GLOBAL_CTRL 0x38f 491 #define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390 492 493 #define MSR_MC0_CTL 0x400 494 #define MSR_MC0_STATUS 0x401 495 #define MSR_MC0_ADDR 0x402 496 #define MSR_MC0_MISC 0x403 497 498 #define MSR_IA32_RTIT_OUTPUT_BASE 0x560 499 #define MSR_IA32_RTIT_OUTPUT_MASK 0x561 500 #define MSR_IA32_RTIT_CTL 0x570 501 #define MSR_IA32_RTIT_STATUS 0x571 502 #define MSR_IA32_RTIT_CR3_MATCH 0x572 503 #define MSR_IA32_RTIT_ADDR0_A 0x580 504 #define MSR_IA32_RTIT_ADDR0_B 0x581 505 #define MSR_IA32_RTIT_ADDR1_A 0x582 506 #define MSR_IA32_RTIT_ADDR1_B 0x583 507 #define MSR_IA32_RTIT_ADDR2_A 0x584 508 #define MSR_IA32_RTIT_ADDR2_B 0x585 509 #define MSR_IA32_RTIT_ADDR3_A 0x586 510 #define MSR_IA32_RTIT_ADDR3_B 0x587 511 #define MAX_RTIT_ADDRS 8 512 513 #define MSR_EFER 0xc0000080 514 515 #define MSR_EFER_SCE (1 << 0) 516 #define MSR_EFER_LME (1 << 8) 517 #define MSR_EFER_LMA (1 << 10) 518 #define MSR_EFER_NXE (1 << 11) 519 #define MSR_EFER_SVME (1 << 12) 520 #define MSR_EFER_FFXSR (1 << 14) 521 522 #define MSR_EFER_RESERVED\ 523 (~(target_ulong)(MSR_EFER_SCE | MSR_EFER_LME\ 524 | MSR_EFER_LMA | MSR_EFER_NXE | MSR_EFER_SVME\ 525 | MSR_EFER_FFXSR)) 526 527 #define MSR_STAR 0xc0000081 528 #define MSR_LSTAR 0xc0000082 529 #define MSR_CSTAR 0xc0000083 530 #define MSR_FMASK 0xc0000084 531 #define MSR_FSBASE 0xc0000100 532 #define MSR_GSBASE 0xc0000101 533 #define MSR_KERNELGSBASE 0xc0000102 534 #define MSR_TSC_AUX 0xc0000103 535 #define MSR_AMD64_TSC_RATIO 0xc0000104 536 537 #define MSR_AMD64_TSC_RATIO_DEFAULT 0x100000000ULL 538 539 #define MSR_K7_HWCR 0xc0010015 540 541 #define MSR_VM_HSAVE_PA 0xc0010117 542 543 #define MSR_IA32_XFD 0x000001c4 544 #define MSR_IA32_XFD_ERR 0x000001c5 545 546 /* FRED MSRs */ 547 #define MSR_IA32_FRED_RSP0 0x000001cc /* Stack level 0 regular stack pointer */ 548 #define MSR_IA32_FRED_RSP1 0x000001cd /* Stack level 1 regular stack pointer */ 549 #define MSR_IA32_FRED_RSP2 0x000001ce /* Stack level 2 regular stack pointer */ 550 #define MSR_IA32_FRED_RSP3 0x000001cf /* Stack level 3 regular stack pointer */ 551 #define MSR_IA32_FRED_STKLVLS 0x000001d0 /* FRED exception stack levels */ 552 #define MSR_IA32_FRED_SSP1 0x000001d1 /* Stack level 1 shadow stack pointer in ring 0 */ 553 #define MSR_IA32_FRED_SSP2 0x000001d2 /* Stack level 2 shadow stack pointer in ring 0 */ 554 #define MSR_IA32_FRED_SSP3 0x000001d3 /* Stack level 3 shadow stack pointer in ring 0 */ 555 #define MSR_IA32_FRED_CONFIG 0x000001d4 /* FRED Entrypoint and interrupt stack level */ 556 557 #define MSR_IA32_BNDCFGS 0x00000d90 558 #define MSR_IA32_XSS 0x00000da0 559 #define MSR_IA32_UMWAIT_CONTROL 0xe1 560 561 #define MSR_IA32_VMX_BASIC 0x00000480 562 #define MSR_IA32_VMX_PINBASED_CTLS 0x00000481 563 #define MSR_IA32_VMX_PROCBASED_CTLS 0x00000482 564 #define MSR_IA32_VMX_EXIT_CTLS 0x00000483 565 #define MSR_IA32_VMX_ENTRY_CTLS 0x00000484 566 #define MSR_IA32_VMX_MISC 0x00000485 567 #define MSR_IA32_VMX_CR0_FIXED0 0x00000486 568 #define MSR_IA32_VMX_CR0_FIXED1 0x00000487 569 #define MSR_IA32_VMX_CR4_FIXED0 0x00000488 570 #define MSR_IA32_VMX_CR4_FIXED1 0x00000489 571 #define MSR_IA32_VMX_VMCS_ENUM 0x0000048a 572 #define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b 573 #define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c 574 #define MSR_IA32_VMX_TRUE_PINBASED_CTLS 0x0000048d 575 #define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x0000048e 576 #define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x0000048f 577 #define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x00000490 578 #define MSR_IA32_VMX_VMFUNC 0x00000491 579 580 #define MSR_APIC_START 0x00000800 581 #define MSR_APIC_END 0x000008ff 582 583 #define XSTATE_FP_BIT 0 584 #define XSTATE_SSE_BIT 1 585 #define XSTATE_YMM_BIT 2 586 #define XSTATE_BNDREGS_BIT 3 587 #define XSTATE_BNDCSR_BIT 4 588 #define XSTATE_OPMASK_BIT 5 589 #define XSTATE_ZMM_Hi256_BIT 6 590 #define XSTATE_Hi16_ZMM_BIT 7 591 #define XSTATE_PKRU_BIT 9 592 #define XSTATE_ARCH_LBR_BIT 15 593 #define XSTATE_XTILE_CFG_BIT 17 594 #define XSTATE_XTILE_DATA_BIT 18 595 596 #define XSTATE_FP_MASK (1ULL << XSTATE_FP_BIT) 597 #define XSTATE_SSE_MASK (1ULL << XSTATE_SSE_BIT) 598 #define XSTATE_YMM_MASK (1ULL << XSTATE_YMM_BIT) 599 #define XSTATE_BNDREGS_MASK (1ULL << XSTATE_BNDREGS_BIT) 600 #define XSTATE_BNDCSR_MASK (1ULL << XSTATE_BNDCSR_BIT) 601 #define XSTATE_OPMASK_MASK (1ULL << XSTATE_OPMASK_BIT) 602 #define XSTATE_ZMM_Hi256_MASK (1ULL << XSTATE_ZMM_Hi256_BIT) 603 #define XSTATE_Hi16_ZMM_MASK (1ULL << XSTATE_Hi16_ZMM_BIT) 604 #define XSTATE_PKRU_MASK (1ULL << XSTATE_PKRU_BIT) 605 #define XSTATE_ARCH_LBR_MASK (1ULL << XSTATE_ARCH_LBR_BIT) 606 #define XSTATE_XTILE_CFG_MASK (1ULL << XSTATE_XTILE_CFG_BIT) 607 #define XSTATE_XTILE_DATA_MASK (1ULL << XSTATE_XTILE_DATA_BIT) 608 609 #define XSTATE_DYNAMIC_MASK (XSTATE_XTILE_DATA_MASK) 610 611 #define ESA_FEATURE_ALIGN64_BIT 1 612 #define ESA_FEATURE_XFD_BIT 2 613 614 #define ESA_FEATURE_ALIGN64_MASK (1U << ESA_FEATURE_ALIGN64_BIT) 615 #define ESA_FEATURE_XFD_MASK (1U << ESA_FEATURE_XFD_BIT) 616 617 618 /* CPUID feature bits available in XCR0 */ 619 #define CPUID_XSTATE_XCR0_MASK (XSTATE_FP_MASK | XSTATE_SSE_MASK | \ 620 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | \ 621 XSTATE_BNDCSR_MASK | XSTATE_OPMASK_MASK | \ 622 XSTATE_ZMM_Hi256_MASK | \ 623 XSTATE_Hi16_ZMM_MASK | XSTATE_PKRU_MASK | \ 624 XSTATE_XTILE_CFG_MASK | XSTATE_XTILE_DATA_MASK) 625 626 /* CPUID feature words */ 627 typedef enum FeatureWord { 628 FEAT_1_EDX, /* CPUID[1].EDX */ 629 FEAT_1_ECX, /* CPUID[1].ECX */ 630 FEAT_7_0_EBX, /* CPUID[EAX=7,ECX=0].EBX */ 631 FEAT_7_0_ECX, /* CPUID[EAX=7,ECX=0].ECX */ 632 FEAT_7_0_EDX, /* CPUID[EAX=7,ECX=0].EDX */ 633 FEAT_7_1_EAX, /* CPUID[EAX=7,ECX=1].EAX */ 634 FEAT_8000_0001_EDX, /* CPUID[8000_0001].EDX */ 635 FEAT_8000_0001_ECX, /* CPUID[8000_0001].ECX */ 636 FEAT_8000_0007_EBX, /* CPUID[8000_0007].EBX */ 637 FEAT_8000_0007_EDX, /* CPUID[8000_0007].EDX */ 638 FEAT_8000_0008_EBX, /* CPUID[8000_0008].EBX */ 639 FEAT_8000_0021_EAX, /* CPUID[8000_0021].EAX */ 640 FEAT_8000_0021_EBX, /* CPUID[8000_0021].EBX */ 641 FEAT_8000_0022_EAX, /* CPUID[8000_0022].EAX */ 642 FEAT_C000_0001_EDX, /* CPUID[C000_0001].EDX */ 643 FEAT_KVM, /* CPUID[4000_0001].EAX (KVM_CPUID_FEATURES) */ 644 FEAT_KVM_HINTS, /* CPUID[4000_0001].EDX */ 645 FEAT_SVM, /* CPUID[8000_000A].EDX */ 646 FEAT_XSAVE, /* CPUID[EAX=0xd,ECX=1].EAX */ 647 FEAT_6_EAX, /* CPUID[6].EAX */ 648 FEAT_XSAVE_XCR0_LO, /* CPUID[EAX=0xd,ECX=0].EAX */ 649 FEAT_XSAVE_XCR0_HI, /* CPUID[EAX=0xd,ECX=0].EDX */ 650 FEAT_ARCH_CAPABILITIES, 651 FEAT_CORE_CAPABILITY, 652 FEAT_PERF_CAPABILITIES, 653 FEAT_VMX_PROCBASED_CTLS, 654 FEAT_VMX_SECONDARY_CTLS, 655 FEAT_VMX_PINBASED_CTLS, 656 FEAT_VMX_EXIT_CTLS, 657 FEAT_VMX_ENTRY_CTLS, 658 FEAT_VMX_MISC, 659 FEAT_VMX_EPT_VPID_CAPS, 660 FEAT_VMX_BASIC, 661 FEAT_VMX_VMFUNC, 662 FEAT_14_0_ECX, 663 FEAT_SGX_12_0_EAX, /* CPUID[EAX=0x12,ECX=0].EAX (SGX) */ 664 FEAT_SGX_12_0_EBX, /* CPUID[EAX=0x12,ECX=0].EBX (SGX MISCSELECT[31:0]) */ 665 FEAT_SGX_12_1_EAX, /* CPUID[EAX=0x12,ECX=1].EAX (SGX ATTRIBUTES[31:0]) */ 666 FEAT_XSAVE_XSS_LO, /* CPUID[EAX=0xd,ECX=1].ECX */ 667 FEAT_XSAVE_XSS_HI, /* CPUID[EAX=0xd,ECX=1].EDX */ 668 FEAT_7_1_EDX, /* CPUID[EAX=7,ECX=1].EDX */ 669 FEAT_7_2_EDX, /* CPUID[EAX=7,ECX=2].EDX */ 670 FEAT_24_0_EBX, /* CPUID[EAX=0x24,ECX=0].EBX */ 671 FEATURE_WORDS, 672 } FeatureWord; 673 674 typedef uint64_t FeatureWordArray[FEATURE_WORDS]; 675 uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w); 676 677 /* cpuid_features bits */ 678 #define CPUID_FP87 (1U << 0) 679 #define CPUID_VME (1U << 1) 680 #define CPUID_DE (1U << 2) 681 #define CPUID_PSE (1U << 3) 682 #define CPUID_TSC (1U << 4) 683 #define CPUID_MSR (1U << 5) 684 #define CPUID_PAE (1U << 6) 685 #define CPUID_MCE (1U << 7) 686 #define CPUID_CX8 (1U << 8) 687 #define CPUID_APIC (1U << 9) 688 #define CPUID_SEP (1U << 11) /* sysenter/sysexit */ 689 #define CPUID_MTRR (1U << 12) 690 #define CPUID_PGE (1U << 13) 691 #define CPUID_MCA (1U << 14) 692 #define CPUID_CMOV (1U << 15) 693 #define CPUID_PAT (1U << 16) 694 #define CPUID_PSE36 (1U << 17) 695 #define CPUID_PN (1U << 18) 696 #define CPUID_CLFLUSH (1U << 19) 697 #define CPUID_DTS (1U << 21) 698 #define CPUID_ACPI (1U << 22) 699 #define CPUID_MMX (1U << 23) 700 #define CPUID_FXSR (1U << 24) 701 #define CPUID_SSE (1U << 25) 702 #define CPUID_SSE2 (1U << 26) 703 #define CPUID_SS (1U << 27) 704 #define CPUID_HT (1U << 28) 705 #define CPUID_TM (1U << 29) 706 #define CPUID_IA64 (1U << 30) 707 #define CPUID_PBE (1U << 31) 708 709 #define CPUID_EXT_SSE3 (1U << 0) 710 #define CPUID_EXT_PCLMULQDQ (1U << 1) 711 #define CPUID_EXT_DTES64 (1U << 2) 712 #define CPUID_EXT_MONITOR (1U << 3) 713 #define CPUID_EXT_DSCPL (1U << 4) 714 #define CPUID_EXT_VMX (1U << 5) 715 #define CPUID_EXT_SMX (1U << 6) 716 #define CPUID_EXT_EST (1U << 7) 717 #define CPUID_EXT_TM2 (1U << 8) 718 #define CPUID_EXT_SSSE3 (1U << 9) 719 #define CPUID_EXT_CID (1U << 10) 720 #define CPUID_EXT_FMA (1U << 12) 721 #define CPUID_EXT_CX16 (1U << 13) 722 #define CPUID_EXT_XTPR (1U << 14) 723 #define CPUID_EXT_PDCM (1U << 15) 724 #define CPUID_EXT_PCID (1U << 17) 725 #define CPUID_EXT_DCA (1U << 18) 726 #define CPUID_EXT_SSE41 (1U << 19) 727 #define CPUID_EXT_SSE42 (1U << 20) 728 #define CPUID_EXT_X2APIC (1U << 21) 729 #define CPUID_EXT_MOVBE (1U << 22) 730 #define CPUID_EXT_POPCNT (1U << 23) 731 #define CPUID_EXT_TSC_DEADLINE_TIMER (1U << 24) 732 #define CPUID_EXT_AES (1U << 25) 733 #define CPUID_EXT_XSAVE (1U << 26) 734 #define CPUID_EXT_OSXSAVE (1U << 27) 735 #define CPUID_EXT_AVX (1U << 28) 736 #define CPUID_EXT_F16C (1U << 29) 737 #define CPUID_EXT_RDRAND (1U << 30) 738 #define CPUID_EXT_HYPERVISOR (1U << 31) 739 740 #define CPUID_EXT2_FPU (1U << 0) 741 #define CPUID_EXT2_VME (1U << 1) 742 #define CPUID_EXT2_DE (1U << 2) 743 #define CPUID_EXT2_PSE (1U << 3) 744 #define CPUID_EXT2_TSC (1U << 4) 745 #define CPUID_EXT2_MSR (1U << 5) 746 #define CPUID_EXT2_PAE (1U << 6) 747 #define CPUID_EXT2_MCE (1U << 7) 748 #define CPUID_EXT2_CX8 (1U << 8) 749 #define CPUID_EXT2_APIC (1U << 9) 750 #define CPUID_EXT2_SYSCALL (1U << 11) 751 #define CPUID_EXT2_MTRR (1U << 12) 752 #define CPUID_EXT2_PGE (1U << 13) 753 #define CPUID_EXT2_MCA (1U << 14) 754 #define CPUID_EXT2_CMOV (1U << 15) 755 #define CPUID_EXT2_PAT (1U << 16) 756 #define CPUID_EXT2_PSE36 (1U << 17) 757 #define CPUID_EXT2_MP (1U << 19) 758 #define CPUID_EXT2_NX (1U << 20) 759 #define CPUID_EXT2_MMXEXT (1U << 22) 760 #define CPUID_EXT2_MMX (1U << 23) 761 #define CPUID_EXT2_FXSR (1U << 24) 762 #define CPUID_EXT2_FFXSR (1U << 25) 763 #define CPUID_EXT2_PDPE1GB (1U << 26) 764 #define CPUID_EXT2_RDTSCP (1U << 27) 765 #define CPUID_EXT2_LM (1U << 29) 766 #define CPUID_EXT2_3DNOWEXT (1U << 30) 767 #define CPUID_EXT2_3DNOW (1U << 31) 768 769 /* CPUID[8000_0001].EDX bits that are aliases of CPUID[1].EDX bits on AMD CPUs */ 770 #define CPUID_EXT2_AMD_ALIASES (CPUID_EXT2_FPU | CPUID_EXT2_VME | \ 771 CPUID_EXT2_DE | CPUID_EXT2_PSE | \ 772 CPUID_EXT2_TSC | CPUID_EXT2_MSR | \ 773 CPUID_EXT2_PAE | CPUID_EXT2_MCE | \ 774 CPUID_EXT2_CX8 | CPUID_EXT2_APIC | \ 775 CPUID_EXT2_MTRR | CPUID_EXT2_PGE | \ 776 CPUID_EXT2_MCA | CPUID_EXT2_CMOV | \ 777 CPUID_EXT2_PAT | CPUID_EXT2_PSE36 | \ 778 CPUID_EXT2_MMX | CPUID_EXT2_FXSR) 779 780 #define CPUID_EXT3_LAHF_LM (1U << 0) 781 #define CPUID_EXT3_CMP_LEG (1U << 1) 782 #define CPUID_EXT3_SVM (1U << 2) 783 #define CPUID_EXT3_EXTAPIC (1U << 3) 784 #define CPUID_EXT3_CR8LEG (1U << 4) 785 #define CPUID_EXT3_ABM (1U << 5) 786 #define CPUID_EXT3_SSE4A (1U << 6) 787 #define CPUID_EXT3_MISALIGNSSE (1U << 7) 788 #define CPUID_EXT3_3DNOWPREFETCH (1U << 8) 789 #define CPUID_EXT3_OSVW (1U << 9) 790 #define CPUID_EXT3_IBS (1U << 10) 791 #define CPUID_EXT3_XOP (1U << 11) 792 #define CPUID_EXT3_SKINIT (1U << 12) 793 #define CPUID_EXT3_WDT (1U << 13) 794 #define CPUID_EXT3_LWP (1U << 15) 795 #define CPUID_EXT3_FMA4 (1U << 16) 796 #define CPUID_EXT3_TCE (1U << 17) 797 #define CPUID_EXT3_NODEID (1U << 19) 798 #define CPUID_EXT3_TBM (1U << 21) 799 #define CPUID_EXT3_TOPOEXT (1U << 22) 800 #define CPUID_EXT3_PERFCORE (1U << 23) 801 #define CPUID_EXT3_PERFNB (1U << 24) 802 803 #define CPUID_SVM_NPT (1U << 0) 804 #define CPUID_SVM_LBRV (1U << 1) 805 #define CPUID_SVM_SVMLOCK (1U << 2) 806 #define CPUID_SVM_NRIPSAVE (1U << 3) 807 #define CPUID_SVM_TSCSCALE (1U << 4) 808 #define CPUID_SVM_VMCBCLEAN (1U << 5) 809 #define CPUID_SVM_FLUSHASID (1U << 6) 810 #define CPUID_SVM_DECODEASSIST (1U << 7) 811 #define CPUID_SVM_PAUSEFILTER (1U << 10) 812 #define CPUID_SVM_PFTHRESHOLD (1U << 12) 813 #define CPUID_SVM_AVIC (1U << 13) 814 #define CPUID_SVM_V_VMSAVE_VMLOAD (1U << 15) 815 #define CPUID_SVM_VGIF (1U << 16) 816 #define CPUID_SVM_VNMI (1U << 25) 817 #define CPUID_SVM_SVME_ADDR_CHK (1U << 28) 818 819 /* Support RDFSBASE/RDGSBASE/WRFSBASE/WRGSBASE */ 820 #define CPUID_7_0_EBX_FSGSBASE (1U << 0) 821 /* Support TSC adjust MSR */ 822 #define CPUID_7_0_EBX_TSC_ADJUST (1U << 1) 823 /* Support SGX */ 824 #define CPUID_7_0_EBX_SGX (1U << 2) 825 /* 1st Group of Advanced Bit Manipulation Extensions */ 826 #define CPUID_7_0_EBX_BMI1 (1U << 3) 827 /* Hardware Lock Elision */ 828 #define CPUID_7_0_EBX_HLE (1U << 4) 829 /* Intel Advanced Vector Extensions 2 */ 830 #define CPUID_7_0_EBX_AVX2 (1U << 5) 831 /* FPU data pointer updated only on x87 exceptions */ 832 #define CPUID_7_0_EBX_FDP_EXCPTN_ONLY (1u << 6) 833 /* Supervisor-mode Execution Prevention */ 834 #define CPUID_7_0_EBX_SMEP (1U << 7) 835 /* 2nd Group of Advanced Bit Manipulation Extensions */ 836 #define CPUID_7_0_EBX_BMI2 (1U << 8) 837 /* Enhanced REP MOVSB/STOSB */ 838 #define CPUID_7_0_EBX_ERMS (1U << 9) 839 /* Invalidate Process-Context Identifier */ 840 #define CPUID_7_0_EBX_INVPCID (1U << 10) 841 /* Restricted Transactional Memory */ 842 #define CPUID_7_0_EBX_RTM (1U << 11) 843 /* Zero out FPU CS and FPU DS */ 844 #define CPUID_7_0_EBX_ZERO_FCS_FDS (1U << 13) 845 /* Memory Protection Extension */ 846 #define CPUID_7_0_EBX_MPX (1U << 14) 847 /* AVX-512 Foundation */ 848 #define CPUID_7_0_EBX_AVX512F (1U << 16) 849 /* AVX-512 Doubleword & Quadword Instruction */ 850 #define CPUID_7_0_EBX_AVX512DQ (1U << 17) 851 /* Read Random SEED */ 852 #define CPUID_7_0_EBX_RDSEED (1U << 18) 853 /* ADCX and ADOX instructions */ 854 #define CPUID_7_0_EBX_ADX (1U << 19) 855 /* Supervisor Mode Access Prevention */ 856 #define CPUID_7_0_EBX_SMAP (1U << 20) 857 /* AVX-512 Integer Fused Multiply Add */ 858 #define CPUID_7_0_EBX_AVX512IFMA (1U << 21) 859 /* Flush a Cache Line Optimized */ 860 #define CPUID_7_0_EBX_CLFLUSHOPT (1U << 23) 861 /* Cache Line Write Back */ 862 #define CPUID_7_0_EBX_CLWB (1U << 24) 863 /* Intel Processor Trace */ 864 #define CPUID_7_0_EBX_INTEL_PT (1U << 25) 865 /* AVX-512 Prefetch */ 866 #define CPUID_7_0_EBX_AVX512PF (1U << 26) 867 /* AVX-512 Exponential and Reciprocal */ 868 #define CPUID_7_0_EBX_AVX512ER (1U << 27) 869 /* AVX-512 Conflict Detection */ 870 #define CPUID_7_0_EBX_AVX512CD (1U << 28) 871 /* SHA1/SHA256 Instruction Extensions */ 872 #define CPUID_7_0_EBX_SHA_NI (1U << 29) 873 /* AVX-512 Byte and Word Instructions */ 874 #define CPUID_7_0_EBX_AVX512BW (1U << 30) 875 /* AVX-512 Vector Length Extensions */ 876 #define CPUID_7_0_EBX_AVX512VL (1U << 31) 877 878 /* AVX-512 Vector Byte Manipulation Instruction */ 879 #define CPUID_7_0_ECX_AVX512_VBMI (1U << 1) 880 /* User-Mode Instruction Prevention */ 881 #define CPUID_7_0_ECX_UMIP (1U << 2) 882 /* Protection Keys for User-mode Pages */ 883 #define CPUID_7_0_ECX_PKU (1U << 3) 884 /* OS Enable Protection Keys */ 885 #define CPUID_7_0_ECX_OSPKE (1U << 4) 886 /* UMONITOR/UMWAIT/TPAUSE Instructions */ 887 #define CPUID_7_0_ECX_WAITPKG (1U << 5) 888 /* Additional AVX-512 Vector Byte Manipulation Instruction */ 889 #define CPUID_7_0_ECX_AVX512_VBMI2 (1U << 6) 890 /* Galois Field New Instructions */ 891 #define CPUID_7_0_ECX_GFNI (1U << 8) 892 /* Vector AES Instructions */ 893 #define CPUID_7_0_ECX_VAES (1U << 9) 894 /* Carry-Less Multiplication Quadword */ 895 #define CPUID_7_0_ECX_VPCLMULQDQ (1U << 10) 896 /* Vector Neural Network Instructions */ 897 #define CPUID_7_0_ECX_AVX512VNNI (1U << 11) 898 /* Support for VPOPCNT[B,W] and VPSHUFBITQMB */ 899 #define CPUID_7_0_ECX_AVX512BITALG (1U << 12) 900 /* POPCNT for vectors of DW/QW */ 901 #define CPUID_7_0_ECX_AVX512_VPOPCNTDQ (1U << 14) 902 /* 5-level Page Tables */ 903 #define CPUID_7_0_ECX_LA57 (1U << 16) 904 /* Read Processor ID */ 905 #define CPUID_7_0_ECX_RDPID (1U << 22) 906 /* Bus Lock Debug Exception */ 907 #define CPUID_7_0_ECX_BUS_LOCK_DETECT (1U << 24) 908 /* Cache Line Demote Instruction */ 909 #define CPUID_7_0_ECX_CLDEMOTE (1U << 25) 910 /* Move Doubleword as Direct Store Instruction */ 911 #define CPUID_7_0_ECX_MOVDIRI (1U << 27) 912 /* Move 64 Bytes as Direct Store Instruction */ 913 #define CPUID_7_0_ECX_MOVDIR64B (1U << 28) 914 /* Support SGX Launch Control */ 915 #define CPUID_7_0_ECX_SGX_LC (1U << 30) 916 /* Protection Keys for Supervisor-mode Pages */ 917 #define CPUID_7_0_ECX_PKS (1U << 31) 918 919 /* AVX512 Neural Network Instructions */ 920 #define CPUID_7_0_EDX_AVX512_4VNNIW (1U << 2) 921 /* AVX512 Multiply Accumulation Single Precision */ 922 #define CPUID_7_0_EDX_AVX512_4FMAPS (1U << 3) 923 /* Fast Short Rep Mov */ 924 #define CPUID_7_0_EDX_FSRM (1U << 4) 925 /* AVX512 Vector Pair Intersection to a Pair of Mask Registers */ 926 #define CPUID_7_0_EDX_AVX512_VP2INTERSECT (1U << 8) 927 /* SERIALIZE instruction */ 928 #define CPUID_7_0_EDX_SERIALIZE (1U << 14) 929 /* TSX Suspend Load Address Tracking instruction */ 930 #define CPUID_7_0_EDX_TSX_LDTRK (1U << 16) 931 /* Architectural LBRs */ 932 #define CPUID_7_0_EDX_ARCH_LBR (1U << 19) 933 /* AMX_BF16 instruction */ 934 #define CPUID_7_0_EDX_AMX_BF16 (1U << 22) 935 /* AVX512_FP16 instruction */ 936 #define CPUID_7_0_EDX_AVX512_FP16 (1U << 23) 937 /* AMX tile (two-dimensional register) */ 938 #define CPUID_7_0_EDX_AMX_TILE (1U << 24) 939 /* AMX_INT8 instruction */ 940 #define CPUID_7_0_EDX_AMX_INT8 (1U << 25) 941 /* Speculation Control */ 942 #define CPUID_7_0_EDX_SPEC_CTRL (1U << 26) 943 /* Single Thread Indirect Branch Predictors */ 944 #define CPUID_7_0_EDX_STIBP (1U << 27) 945 /* Flush L1D cache */ 946 #define CPUID_7_0_EDX_FLUSH_L1D (1U << 28) 947 /* Arch Capabilities */ 948 #define CPUID_7_0_EDX_ARCH_CAPABILITIES (1U << 29) 949 /* Core Capability */ 950 #define CPUID_7_0_EDX_CORE_CAPABILITY (1U << 30) 951 /* Speculative Store Bypass Disable */ 952 #define CPUID_7_0_EDX_SPEC_CTRL_SSBD (1U << 31) 953 954 /* SHA512 Instruction */ 955 #define CPUID_7_1_EAX_SHA512 (1U << 0) 956 /* SM3 Instruction */ 957 #define CPUID_7_1_EAX_SM3 (1U << 1) 958 /* SM4 Instruction */ 959 #define CPUID_7_1_EAX_SM4 (1U << 2) 960 /* AVX VNNI Instruction */ 961 #define CPUID_7_1_EAX_AVX_VNNI (1U << 4) 962 /* AVX512 BFloat16 Instruction */ 963 #define CPUID_7_1_EAX_AVX512_BF16 (1U << 5) 964 /* CMPCCXADD Instructions */ 965 #define CPUID_7_1_EAX_CMPCCXADD (1U << 7) 966 /* Fast Zero REP MOVS */ 967 #define CPUID_7_1_EAX_FZRM (1U << 10) 968 /* Fast Short REP STOS */ 969 #define CPUID_7_1_EAX_FSRS (1U << 11) 970 /* Fast Short REP CMPS/SCAS */ 971 #define CPUID_7_1_EAX_FSRC (1U << 12) 972 /* Flexible return and event delivery (FRED) */ 973 #define CPUID_7_1_EAX_FRED (1U << 17) 974 /* Load into IA32_KERNEL_GS_BASE (LKGS) */ 975 #define CPUID_7_1_EAX_LKGS (1U << 18) 976 /* Non-Serializing Write to Model Specific Register (WRMSRNS) */ 977 #define CPUID_7_1_EAX_WRMSRNS (1U << 19) 978 /* Support Tile Computational Operations on FP16 Numbers */ 979 #define CPUID_7_1_EAX_AMX_FP16 (1U << 21) 980 /* Support for VPMADD52[H,L]UQ */ 981 #define CPUID_7_1_EAX_AVX_IFMA (1U << 23) 982 /* Linear Address Masking */ 983 #define CPUID_7_1_EAX_LAM (1U << 26) 984 985 /* Support for VPDPB[SU,UU,SS]D[,S] */ 986 #define CPUID_7_1_EDX_AVX_VNNI_INT8 (1U << 4) 987 /* AVX NE CONVERT Instructions */ 988 #define CPUID_7_1_EDX_AVX_NE_CONVERT (1U << 5) 989 /* AMX COMPLEX Instructions */ 990 #define CPUID_7_1_EDX_AMX_COMPLEX (1U << 8) 991 /* AVX-VNNI-INT16 Instructions */ 992 #define CPUID_7_1_EDX_AVX_VNNI_INT16 (1U << 10) 993 /* PREFETCHIT0/1 Instructions */ 994 #define CPUID_7_1_EDX_PREFETCHITI (1U << 14) 995 /* Support for Advanced Vector Extensions 10 */ 996 #define CPUID_7_1_EDX_AVX10 (1U << 19) 997 998 /* Indicate bit 7 of the IA32_SPEC_CTRL MSR is supported */ 999 #define CPUID_7_2_EDX_PSFD (1U << 0) 1000 /* Indicate bits 3 and 4 of the IA32_SPEC_CTRL MSR are supported */ 1001 #define CPUID_7_2_EDX_IPRED_CTRL (1U << 1) 1002 /* Indicate bits 5 and 6 of the IA32_SPEC_CTRL MSR are supported */ 1003 #define CPUID_7_2_EDX_RRSBA_CTRL (1U << 2) 1004 /* Indicate bit 8 of the IA32_SPEC_CTRL MSR is supported */ 1005 #define CPUID_7_2_EDX_DDPD_U (1U << 3) 1006 /* Indicate bit 10 of the IA32_SPEC_CTRL MSR is supported */ 1007 #define CPUID_7_2_EDX_BHI_CTRL (1U << 4) 1008 /* Do not exhibit MXCSR Configuration Dependent Timing (MCDT) behavior */ 1009 #define CPUID_7_2_EDX_MCDT_NO (1U << 5) 1010 1011 /* XFD Extend Feature Disabled */ 1012 #define CPUID_D_1_EAX_XFD (1U << 4) 1013 1014 /* Packets which contain IP payload have LIP values */ 1015 #define CPUID_14_0_ECX_LIP (1U << 31) 1016 1017 /* AVX10 128-bit vector support is present */ 1018 #define CPUID_24_0_EBX_AVX10_128 (1U << 16) 1019 /* AVX10 256-bit vector support is present */ 1020 #define CPUID_24_0_EBX_AVX10_256 (1U << 17) 1021 /* AVX10 512-bit vector support is present */ 1022 #define CPUID_24_0_EBX_AVX10_512 (1U << 18) 1023 /* AVX10 vector length support mask */ 1024 #define CPUID_24_0_EBX_AVX10_VL_MASK (CPUID_24_0_EBX_AVX10_128 | \ 1025 CPUID_24_0_EBX_AVX10_256 | \ 1026 CPUID_24_0_EBX_AVX10_512) 1027 1028 /* RAS Features */ 1029 #define CPUID_8000_0007_EBX_OVERFLOW_RECOV (1U << 0) 1030 #define CPUID_8000_0007_EBX_SUCCOR (1U << 1) 1031 1032 /* (Old) KVM paravirtualized clocksource */ 1033 #define CPUID_KVM_CLOCK (1U << KVM_FEATURE_CLOCKSOURCE) 1034 /* (New) KVM specific paravirtualized clocksource */ 1035 #define CPUID_KVM_CLOCK2 (1U << KVM_FEATURE_CLOCKSOURCE2) 1036 /* KVM asynchronous page fault */ 1037 #define CPUID_KVM_ASYNCPF (1U << KVM_FEATURE_ASYNC_PF) 1038 /* KVM stolen (when guest vCPU is not running) time accounting */ 1039 #define CPUID_KVM_STEAL_TIME (1U << KVM_FEATURE_STEAL_TIME) 1040 /* KVM paravirtualized end-of-interrupt signaling */ 1041 #define CPUID_KVM_PV_EOI (1U << KVM_FEATURE_PV_EOI) 1042 /* KVM paravirtualized spinlocks support */ 1043 #define CPUID_KVM_PV_UNHALT (1U << KVM_FEATURE_PV_UNHALT) 1044 /* KVM host-side polling on HLT control from the guest */ 1045 #define CPUID_KVM_POLL_CONTROL (1U << KVM_FEATURE_POLL_CONTROL) 1046 /* KVM interrupt based asynchronous page fault*/ 1047 #define CPUID_KVM_ASYNCPF_INT (1U << KVM_FEATURE_ASYNC_PF_INT) 1048 /* KVM 'Extended Destination ID' support for external interrupts */ 1049 #define CPUID_KVM_MSI_EXT_DEST_ID (1U << KVM_FEATURE_MSI_EXT_DEST_ID) 1050 1051 /* Hint to KVM that vCPUs expect never preempted for an unlimited time */ 1052 #define CPUID_KVM_HINTS_REALTIME (1U << KVM_HINTS_REALTIME) 1053 1054 /* CLZERO instruction */ 1055 #define CPUID_8000_0008_EBX_CLZERO (1U << 0) 1056 /* Always save/restore FP error pointers */ 1057 #define CPUID_8000_0008_EBX_XSAVEERPTR (1U << 2) 1058 /* Write back and do not invalidate cache */ 1059 #define CPUID_8000_0008_EBX_WBNOINVD (1U << 9) 1060 /* Indirect Branch Prediction Barrier */ 1061 #define CPUID_8000_0008_EBX_IBPB (1U << 12) 1062 /* Indirect Branch Restricted Speculation */ 1063 #define CPUID_8000_0008_EBX_IBRS (1U << 14) 1064 /* Single Thread Indirect Branch Predictors */ 1065 #define CPUID_8000_0008_EBX_STIBP (1U << 15) 1066 /* STIBP mode has enhanced performance and may be left always on */ 1067 #define CPUID_8000_0008_EBX_STIBP_ALWAYS_ON (1U << 17) 1068 /* Speculative Store Bypass Disable */ 1069 #define CPUID_8000_0008_EBX_AMD_SSBD (1U << 24) 1070 /* Paravirtualized Speculative Store Bypass Disable MSR */ 1071 #define CPUID_8000_0008_EBX_VIRT_SSBD (1U << 25) 1072 /* Predictive Store Forwarding Disable */ 1073 #define CPUID_8000_0008_EBX_AMD_PSFD (1U << 28) 1074 1075 /* Processor ignores nested data breakpoints */ 1076 #define CPUID_8000_0021_EAX_NO_NESTED_DATA_BP (1U << 0) 1077 /* LFENCE is always serializing */ 1078 #define CPUID_8000_0021_EAX_LFENCE_ALWAYS_SERIALIZING (1U << 2) 1079 /* Null Selector Clears Base */ 1080 #define CPUID_8000_0021_EAX_NULL_SEL_CLR_BASE (1U << 6) 1081 /* Automatic IBRS */ 1082 #define CPUID_8000_0021_EAX_AUTO_IBRS (1U << 8) 1083 /* Enhanced Return Address Predictor Scurity */ 1084 #define CPUID_8000_0021_EAX_ERAPS (1U << 24) 1085 /* Selective Branch Predictor Barrier */ 1086 #define CPUID_8000_0021_EAX_SBPB (1U << 27) 1087 /* IBPB includes branch type prediction flushing */ 1088 #define CPUID_8000_0021_EAX_IBPB_BRTYPE (1U << 28) 1089 /* Not vulnerable to Speculative Return Stack Overflow */ 1090 #define CPUID_8000_0021_EAX_SRSO_NO (1U << 29) 1091 /* Not vulnerable to SRSO at the user-kernel boundary */ 1092 #define CPUID_8000_0021_EAX_SRSO_USER_KERNEL_NO (1U << 30) 1093 1094 /* 1095 * Return Address Predictor size. RapSize x 8 is the minimum number of 1096 * CALL instructions software needs to execute to flush the RAP. 1097 */ 1098 #define CPUID_8000_0021_EBX_RAPSIZE (8U << 16) 1099 1100 /* Performance Monitoring Version 2 */ 1101 #define CPUID_8000_0022_EAX_PERFMON_V2 (1U << 0) 1102 1103 #define CPUID_XSAVE_XSAVEOPT (1U << 0) 1104 #define CPUID_XSAVE_XSAVEC (1U << 1) 1105 #define CPUID_XSAVE_XGETBV1 (1U << 2) 1106 #define CPUID_XSAVE_XSAVES (1U << 3) 1107 1108 #define CPUID_6_EAX_ARAT (1U << 2) 1109 1110 /* CPUID[0x80000007].EDX flags: */ 1111 #define CPUID_APM_INVTSC (1U << 8) 1112 1113 #define CPUID_VENDOR_SZ 12 1114 1115 #define CPUID_VENDOR_INTEL_1 0x756e6547 /* "Genu" */ 1116 #define CPUID_VENDOR_INTEL_2 0x49656e69 /* "ineI" */ 1117 #define CPUID_VENDOR_INTEL_3 0x6c65746e /* "ntel" */ 1118 #define CPUID_VENDOR_INTEL "GenuineIntel" 1119 1120 #define CPUID_VENDOR_AMD_1 0x68747541 /* "Auth" */ 1121 #define CPUID_VENDOR_AMD_2 0x69746e65 /* "enti" */ 1122 #define CPUID_VENDOR_AMD_3 0x444d4163 /* "cAMD" */ 1123 #define CPUID_VENDOR_AMD "AuthenticAMD" 1124 1125 #define CPUID_VENDOR_VIA "CentaurHauls" 1126 1127 #define CPUID_VENDOR_HYGON "HygonGenuine" 1128 1129 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \ 1130 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \ 1131 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3) 1132 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \ 1133 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \ 1134 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3) 1135 1136 #define CPUID_MWAIT_IBE (1U << 1) /* Interrupts can exit capability */ 1137 #define CPUID_MWAIT_EMX (1U << 0) /* enumeration supported */ 1138 1139 /* CPUID[0xB].ECX level types */ 1140 #define CPUID_B_ECX_TOPO_LEVEL_INVALID 0 1141 #define CPUID_B_ECX_TOPO_LEVEL_SMT 1 1142 #define CPUID_B_ECX_TOPO_LEVEL_CORE 2 1143 1144 /* COUID[0x1F].ECX level types */ 1145 #define CPUID_1F_ECX_TOPO_LEVEL_INVALID CPUID_B_ECX_TOPO_LEVEL_INVALID 1146 #define CPUID_1F_ECX_TOPO_LEVEL_SMT CPUID_B_ECX_TOPO_LEVEL_SMT 1147 #define CPUID_1F_ECX_TOPO_LEVEL_CORE CPUID_B_ECX_TOPO_LEVEL_CORE 1148 #define CPUID_1F_ECX_TOPO_LEVEL_MODULE 3 1149 #define CPUID_1F_ECX_TOPO_LEVEL_DIE 5 1150 1151 /* MSR Feature Bits */ 1152 #define MSR_ARCH_CAP_RDCL_NO (1U << 0) 1153 #define MSR_ARCH_CAP_IBRS_ALL (1U << 1) 1154 #define MSR_ARCH_CAP_RSBA (1U << 2) 1155 #define MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY (1U << 3) 1156 #define MSR_ARCH_CAP_SSB_NO (1U << 4) 1157 #define MSR_ARCH_CAP_MDS_NO (1U << 5) 1158 #define MSR_ARCH_CAP_PSCHANGE_MC_NO (1U << 6) 1159 #define MSR_ARCH_CAP_TSX_CTRL_MSR (1U << 7) 1160 #define MSR_ARCH_CAP_TAA_NO (1U << 8) 1161 #define MSR_ARCH_CAP_SBDR_SSDP_NO (1U << 13) 1162 #define MSR_ARCH_CAP_FBSDP_NO (1U << 14) 1163 #define MSR_ARCH_CAP_PSDP_NO (1U << 15) 1164 #define MSR_ARCH_CAP_FB_CLEAR (1U << 17) 1165 #define MSR_ARCH_CAP_BHI_NO (1U << 20) 1166 #define MSR_ARCH_CAP_PBRSB_NO (1U << 24) 1167 #define MSR_ARCH_CAP_GDS_NO (1U << 26) 1168 #define MSR_ARCH_CAP_RFDS_NO (1U << 27) 1169 1170 #define MSR_CORE_CAP_SPLIT_LOCK_DETECT (1U << 5) 1171 1172 /* VMX MSR features */ 1173 #define MSR_VMX_BASIC_VMCS_REVISION_MASK 0x7FFFFFFFull 1174 #define MSR_VMX_BASIC_VMXON_REGION_SIZE_MASK (0x00001FFFull << 32) 1175 #define MSR_VMX_BASIC_VMCS_MEM_TYPE_MASK (0x003C0000ull << 32) 1176 #define MSR_VMX_BASIC_DUAL_MONITOR (1ULL << 49) 1177 #define MSR_VMX_BASIC_INS_OUTS (1ULL << 54) 1178 #define MSR_VMX_BASIC_TRUE_CTLS (1ULL << 55) 1179 #define MSR_VMX_BASIC_ANY_ERRCODE (1ULL << 56) 1180 #define MSR_VMX_BASIC_NESTED_EXCEPTION (1ULL << 58) 1181 1182 #define MSR_VMX_MISC_PREEMPTION_TIMER_SHIFT_MASK 0x1Full 1183 #define MSR_VMX_MISC_STORE_LMA (1ULL << 5) 1184 #define MSR_VMX_MISC_ACTIVITY_HLT (1ULL << 6) 1185 #define MSR_VMX_MISC_ACTIVITY_SHUTDOWN (1ULL << 7) 1186 #define MSR_VMX_MISC_ACTIVITY_WAIT_SIPI (1ULL << 8) 1187 #define MSR_VMX_MISC_MAX_MSR_LIST_SIZE_MASK 0x0E000000ull 1188 #define MSR_VMX_MISC_VMWRITE_VMEXIT (1ULL << 29) 1189 #define MSR_VMX_MISC_ZERO_LEN_INJECT (1ULL << 30) 1190 1191 #define MSR_VMX_EPT_EXECONLY (1ULL << 0) 1192 #define MSR_VMX_EPT_PAGE_WALK_LENGTH_4 (1ULL << 6) 1193 #define MSR_VMX_EPT_PAGE_WALK_LENGTH_5 (1ULL << 7) 1194 #define MSR_VMX_EPT_UC (1ULL << 8) 1195 #define MSR_VMX_EPT_WB (1ULL << 14) 1196 #define MSR_VMX_EPT_2MB (1ULL << 16) 1197 #define MSR_VMX_EPT_1GB (1ULL << 17) 1198 #define MSR_VMX_EPT_INVEPT (1ULL << 20) 1199 #define MSR_VMX_EPT_AD_BITS (1ULL << 21) 1200 #define MSR_VMX_EPT_ADVANCED_VMEXIT_INFO (1ULL << 22) 1201 #define MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT (1ULL << 25) 1202 #define MSR_VMX_EPT_INVEPT_ALL_CONTEXT (1ULL << 26) 1203 #define MSR_VMX_EPT_INVVPID (1ULL << 32) 1204 #define MSR_VMX_EPT_INVVPID_SINGLE_ADDR (1ULL << 40) 1205 #define MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT (1ULL << 41) 1206 #define MSR_VMX_EPT_INVVPID_ALL_CONTEXT (1ULL << 42) 1207 #define MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS (1ULL << 43) 1208 1209 #define MSR_VMX_VMFUNC_EPT_SWITCHING (1ULL << 0) 1210 1211 1212 /* VMX controls */ 1213 #define VMX_CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004 1214 #define VMX_CPU_BASED_USE_TSC_OFFSETING 0x00000008 1215 #define VMX_CPU_BASED_HLT_EXITING 0x00000080 1216 #define VMX_CPU_BASED_INVLPG_EXITING 0x00000200 1217 #define VMX_CPU_BASED_MWAIT_EXITING 0x00000400 1218 #define VMX_CPU_BASED_RDPMC_EXITING 0x00000800 1219 #define VMX_CPU_BASED_RDTSC_EXITING 0x00001000 1220 #define VMX_CPU_BASED_CR3_LOAD_EXITING 0x00008000 1221 #define VMX_CPU_BASED_CR3_STORE_EXITING 0x00010000 1222 #define VMX_CPU_BASED_CR8_LOAD_EXITING 0x00080000 1223 #define VMX_CPU_BASED_CR8_STORE_EXITING 0x00100000 1224 #define VMX_CPU_BASED_TPR_SHADOW 0x00200000 1225 #define VMX_CPU_BASED_VIRTUAL_NMI_PENDING 0x00400000 1226 #define VMX_CPU_BASED_MOV_DR_EXITING 0x00800000 1227 #define VMX_CPU_BASED_UNCOND_IO_EXITING 0x01000000 1228 #define VMX_CPU_BASED_USE_IO_BITMAPS 0x02000000 1229 #define VMX_CPU_BASED_MONITOR_TRAP_FLAG 0x08000000 1230 #define VMX_CPU_BASED_USE_MSR_BITMAPS 0x10000000 1231 #define VMX_CPU_BASED_MONITOR_EXITING 0x20000000 1232 #define VMX_CPU_BASED_PAUSE_EXITING 0x40000000 1233 #define VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS 0x80000000 1234 1235 #define VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001 1236 #define VMX_SECONDARY_EXEC_ENABLE_EPT 0x00000002 1237 #define VMX_SECONDARY_EXEC_DESC 0x00000004 1238 #define VMX_SECONDARY_EXEC_RDTSCP 0x00000008 1239 #define VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE 0x00000010 1240 #define VMX_SECONDARY_EXEC_ENABLE_VPID 0x00000020 1241 #define VMX_SECONDARY_EXEC_WBINVD_EXITING 0x00000040 1242 #define VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080 1243 #define VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT 0x00000100 1244 #define VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY 0x00000200 1245 #define VMX_SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400 1246 #define VMX_SECONDARY_EXEC_RDRAND_EXITING 0x00000800 1247 #define VMX_SECONDARY_EXEC_ENABLE_INVPCID 0x00001000 1248 #define VMX_SECONDARY_EXEC_ENABLE_VMFUNC 0x00002000 1249 #define VMX_SECONDARY_EXEC_SHADOW_VMCS 0x00004000 1250 #define VMX_SECONDARY_EXEC_ENCLS_EXITING 0x00008000 1251 #define VMX_SECONDARY_EXEC_RDSEED_EXITING 0x00010000 1252 #define VMX_SECONDARY_EXEC_ENABLE_PML 0x00020000 1253 #define VMX_SECONDARY_EXEC_XSAVES 0x00100000 1254 #define VMX_SECONDARY_EXEC_TSC_SCALING 0x02000000 1255 #define VMX_SECONDARY_EXEC_ENABLE_USER_WAIT_PAUSE 0x04000000 1256 1257 #define VMX_PIN_BASED_EXT_INTR_MASK 0x00000001 1258 #define VMX_PIN_BASED_NMI_EXITING 0x00000008 1259 #define VMX_PIN_BASED_VIRTUAL_NMIS 0x00000020 1260 #define VMX_PIN_BASED_VMX_PREEMPTION_TIMER 0x00000040 1261 #define VMX_PIN_BASED_POSTED_INTR 0x00000080 1262 1263 #define VMX_VM_EXIT_SAVE_DEBUG_CONTROLS 0x00000004 1264 #define VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200 1265 #define VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL 0x00001000 1266 #define VMX_VM_EXIT_ACK_INTR_ON_EXIT 0x00008000 1267 #define VMX_VM_EXIT_SAVE_IA32_PAT 0x00040000 1268 #define VMX_VM_EXIT_LOAD_IA32_PAT 0x00080000 1269 #define VMX_VM_EXIT_SAVE_IA32_EFER 0x00100000 1270 #define VMX_VM_EXIT_LOAD_IA32_EFER 0x00200000 1271 #define VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER 0x00400000 1272 #define VMX_VM_EXIT_CLEAR_BNDCFGS 0x00800000 1273 #define VMX_VM_EXIT_PT_CONCEAL_PIP 0x01000000 1274 #define VMX_VM_EXIT_CLEAR_IA32_RTIT_CTL 0x02000000 1275 #define VMX_VM_EXIT_LOAD_IA32_PKRS 0x20000000 1276 #define VMX_VM_EXIT_ACTIVATE_SECONDARY_CONTROLS 0x80000000 1277 1278 #define VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000004 1279 #define VMX_VM_ENTRY_IA32E_MODE 0x00000200 1280 #define VMX_VM_ENTRY_SMM 0x00000400 1281 #define VMX_VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800 1282 #define VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL 0x00002000 1283 #define VMX_VM_ENTRY_LOAD_IA32_PAT 0x00004000 1284 #define VMX_VM_ENTRY_LOAD_IA32_EFER 0x00008000 1285 #define VMX_VM_ENTRY_LOAD_BNDCFGS 0x00010000 1286 #define VMX_VM_ENTRY_PT_CONCEAL_PIP 0x00020000 1287 #define VMX_VM_ENTRY_LOAD_IA32_RTIT_CTL 0x00040000 1288 #define VMX_VM_ENTRY_LOAD_IA32_PKRS 0x00400000 1289 1290 /* Supported Hyper-V Enlightenments */ 1291 #define HYPERV_FEAT_RELAXED 0 1292 #define HYPERV_FEAT_VAPIC 1 1293 #define HYPERV_FEAT_TIME 2 1294 #define HYPERV_FEAT_CRASH 3 1295 #define HYPERV_FEAT_RESET 4 1296 #define HYPERV_FEAT_VPINDEX 5 1297 #define HYPERV_FEAT_RUNTIME 6 1298 #define HYPERV_FEAT_SYNIC 7 1299 #define HYPERV_FEAT_STIMER 8 1300 #define HYPERV_FEAT_FREQUENCIES 9 1301 #define HYPERV_FEAT_REENLIGHTENMENT 10 1302 #define HYPERV_FEAT_TLBFLUSH 11 1303 #define HYPERV_FEAT_EVMCS 12 1304 #define HYPERV_FEAT_IPI 13 1305 #define HYPERV_FEAT_STIMER_DIRECT 14 1306 #define HYPERV_FEAT_AVIC 15 1307 #define HYPERV_FEAT_SYNDBG 16 1308 #define HYPERV_FEAT_MSR_BITMAP 17 1309 #define HYPERV_FEAT_XMM_INPUT 18 1310 #define HYPERV_FEAT_TLBFLUSH_EXT 19 1311 #define HYPERV_FEAT_TLBFLUSH_DIRECT 20 1312 1313 #ifndef HYPERV_SPINLOCK_NEVER_NOTIFY 1314 #define HYPERV_SPINLOCK_NEVER_NOTIFY 0xFFFFFFFF 1315 #endif 1316 1317 #define EXCP00_DIVZ 0 1318 #define EXCP01_DB 1 1319 #define EXCP02_NMI 2 1320 #define EXCP03_INT3 3 1321 #define EXCP04_INTO 4 1322 #define EXCP05_BOUND 5 1323 #define EXCP06_ILLOP 6 1324 #define EXCP07_PREX 7 1325 #define EXCP08_DBLE 8 1326 #define EXCP09_XERR 9 1327 #define EXCP0A_TSS 10 1328 #define EXCP0B_NOSEG 11 1329 #define EXCP0C_STACK 12 1330 #define EXCP0D_GPF 13 1331 #define EXCP0E_PAGE 14 1332 #define EXCP10_COPR 16 1333 #define EXCP11_ALGN 17 1334 #define EXCP12_MCHK 18 1335 1336 #define EXCP_VMEXIT 0x100 /* only for system emulation */ 1337 #define EXCP_SYSCALL 0x101 /* only for user emulation */ 1338 #define EXCP_VSYSCALL 0x102 /* only for user emulation */ 1339 1340 /* i386-specific interrupt pending bits. */ 1341 #define CPU_INTERRUPT_POLL CPU_INTERRUPT_TGT_EXT_1 1342 #define CPU_INTERRUPT_SMI CPU_INTERRUPT_TGT_EXT_2 1343 #define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3 1344 #define CPU_INTERRUPT_MCE CPU_INTERRUPT_TGT_EXT_4 1345 #define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_INT_0 1346 #define CPU_INTERRUPT_SIPI CPU_INTERRUPT_TGT_INT_1 1347 #define CPU_INTERRUPT_TPR CPU_INTERRUPT_TGT_INT_2 1348 1349 /* Use a clearer name for this. */ 1350 #define CPU_INTERRUPT_INIT CPU_INTERRUPT_RESET 1351 1352 #define CC_OP_HAS_EFLAGS(op) ((op) >= CC_OP_EFLAGS && (op) <= CC_OP_ADCOX) 1353 1354 /* Instead of computing the condition codes after each x86 instruction, 1355 * QEMU just stores one operand (called CC_SRC), the result 1356 * (called CC_DST) and the type of operation (called CC_OP). When the 1357 * condition codes are needed, the condition codes can be calculated 1358 * using this information. Condition codes are not generated if they 1359 * are only needed for conditional branches. 1360 */ 1361 typedef enum { 1362 CC_OP_EFLAGS = 0, /* all cc are explicitly computed, CC_SRC = flags */ 1363 CC_OP_ADCX = 1, /* CC_DST = C, CC_SRC = rest. */ 1364 CC_OP_ADOX = 2, /* CC_SRC2 = O, CC_SRC = rest. */ 1365 CC_OP_ADCOX = 3, /* CC_DST = C, CC_SRC2 = O, CC_SRC = rest. */ 1366 1367 /* Low 2 bits = MemOp constant for the size */ 1368 #define CC_OP_FIRST_BWLQ CC_OP_MULB 1369 CC_OP_MULB = 4, /* modify all flags, C, O = (CC_SRC != 0) */ 1370 CC_OP_MULW, 1371 CC_OP_MULL, 1372 CC_OP_MULQ, 1373 1374 CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ 1375 CC_OP_ADDW, 1376 CC_OP_ADDL, 1377 CC_OP_ADDQ, 1378 1379 CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ 1380 CC_OP_ADCW, 1381 CC_OP_ADCL, 1382 CC_OP_ADCQ, 1383 1384 CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ 1385 CC_OP_SUBW, 1386 CC_OP_SUBL, 1387 CC_OP_SUBQ, 1388 1389 CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ 1390 CC_OP_SBBW, 1391 CC_OP_SBBL, 1392 CC_OP_SBBQ, 1393 1394 CC_OP_LOGICB, /* modify all flags, CC_DST = res */ 1395 CC_OP_LOGICW, 1396 CC_OP_LOGICL, 1397 CC_OP_LOGICQ, 1398 1399 CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */ 1400 CC_OP_INCW, 1401 CC_OP_INCL, 1402 CC_OP_INCQ, 1403 1404 CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C */ 1405 CC_OP_DECW, 1406 CC_OP_DECL, 1407 CC_OP_DECQ, 1408 1409 CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.msb = C */ 1410 CC_OP_SHLW, 1411 CC_OP_SHLL, 1412 CC_OP_SHLQ, 1413 1414 CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */ 1415 CC_OP_SARW, 1416 CC_OP_SARL, 1417 CC_OP_SARQ, 1418 1419 CC_OP_BMILGB, /* Z,S via CC_DST, C = SRC==0; O=0; P,A undefined */ 1420 CC_OP_BMILGW, 1421 CC_OP_BMILGL, 1422 CC_OP_BMILGQ, 1423 1424 CC_OP_BLSIB, /* Z,S via CC_DST, C = SRC!=0; O=0; P,A undefined */ 1425 CC_OP_BLSIW, 1426 CC_OP_BLSIL, 1427 CC_OP_BLSIQ, 1428 1429 /* 1430 * Note that only CC_OP_POPCNT (i.e. the one with MO_TL size) 1431 * is used or implemented, because the translation needs 1432 * to zero-extend CC_DST anyway. 1433 */ 1434 CC_OP_POPCNTB__, /* Z via CC_DST, all other flags clear. */ 1435 CC_OP_POPCNTW__, 1436 CC_OP_POPCNTL__, 1437 CC_OP_POPCNTQ__, 1438 CC_OP_POPCNT = sizeof(target_ulong) == 8 ? CC_OP_POPCNTQ__ : CC_OP_POPCNTL__, 1439 #define CC_OP_LAST_BWLQ CC_OP_POPCNTQ__ 1440 1441 CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */ 1442 } CCOp; 1443 1444 /* See X86DecodedInsn.cc_op, using int8_t. */ 1445 QEMU_BUILD_BUG_ON(CC_OP_DYNAMIC > INT8_MAX); 1446 1447 static inline MemOp cc_op_size(CCOp op) 1448 { 1449 MemOp size = op & 3; 1450 1451 QEMU_BUILD_BUG_ON(CC_OP_FIRST_BWLQ & 3); 1452 assert(op >= CC_OP_FIRST_BWLQ && op <= CC_OP_LAST_BWLQ); 1453 assert(size <= MO_TL); 1454 1455 return size; 1456 } 1457 1458 typedef struct SegmentCache { 1459 uint32_t selector; 1460 target_ulong base; 1461 uint32_t limit; 1462 uint32_t flags; 1463 } SegmentCache; 1464 1465 typedef union MMXReg { 1466 uint8_t _b_MMXReg[64 / 8]; 1467 uint16_t _w_MMXReg[64 / 16]; 1468 uint32_t _l_MMXReg[64 / 32]; 1469 uint64_t _q_MMXReg[64 / 64]; 1470 float32 _s_MMXReg[64 / 32]; 1471 float64 _d_MMXReg[64 / 64]; 1472 } MMXReg; 1473 1474 typedef union XMMReg { 1475 uint64_t _q_XMMReg[128 / 64]; 1476 } XMMReg; 1477 1478 typedef union YMMReg { 1479 uint64_t _q_YMMReg[256 / 64]; 1480 XMMReg _x_YMMReg[256 / 128]; 1481 } YMMReg; 1482 1483 typedef union ZMMReg { 1484 uint8_t _b_ZMMReg[512 / 8]; 1485 uint16_t _w_ZMMReg[512 / 16]; 1486 uint32_t _l_ZMMReg[512 / 32]; 1487 uint64_t _q_ZMMReg[512 / 64]; 1488 float16 _h_ZMMReg[512 / 16]; 1489 float32 _s_ZMMReg[512 / 32]; 1490 float64 _d_ZMMReg[512 / 64]; 1491 XMMReg _x_ZMMReg[512 / 128]; 1492 YMMReg _y_ZMMReg[512 / 256]; 1493 } ZMMReg; 1494 1495 typedef struct BNDReg { 1496 uint64_t lb; 1497 uint64_t ub; 1498 } BNDReg; 1499 1500 typedef struct BNDCSReg { 1501 uint64_t cfgu; 1502 uint64_t sts; 1503 } BNDCSReg; 1504 1505 #define BNDCFG_ENABLE 1ULL 1506 #define BNDCFG_BNDPRESERVE 2ULL 1507 #define BNDCFG_BDIR_MASK TARGET_PAGE_MASK 1508 1509 #if HOST_BIG_ENDIAN 1510 #define ZMM_B(n) _b_ZMMReg[63 - (n)] 1511 #define ZMM_W(n) _w_ZMMReg[31 - (n)] 1512 #define ZMM_L(n) _l_ZMMReg[15 - (n)] 1513 #define ZMM_H(n) _h_ZMMReg[31 - (n)] 1514 #define ZMM_S(n) _s_ZMMReg[15 - (n)] 1515 #define ZMM_Q(n) _q_ZMMReg[7 - (n)] 1516 #define ZMM_D(n) _d_ZMMReg[7 - (n)] 1517 #define ZMM_X(n) _x_ZMMReg[3 - (n)] 1518 #define ZMM_Y(n) _y_ZMMReg[1 - (n)] 1519 1520 #define XMM_Q(n) _q_XMMReg[1 - (n)] 1521 1522 #define YMM_Q(n) _q_YMMReg[3 - (n)] 1523 #define YMM_X(n) _x_YMMReg[1 - (n)] 1524 1525 #define MMX_B(n) _b_MMXReg[7 - (n)] 1526 #define MMX_W(n) _w_MMXReg[3 - (n)] 1527 #define MMX_L(n) _l_MMXReg[1 - (n)] 1528 #define MMX_S(n) _s_MMXReg[1 - (n)] 1529 #else 1530 #define ZMM_B(n) _b_ZMMReg[n] 1531 #define ZMM_W(n) _w_ZMMReg[n] 1532 #define ZMM_L(n) _l_ZMMReg[n] 1533 #define ZMM_H(n) _h_ZMMReg[n] 1534 #define ZMM_S(n) _s_ZMMReg[n] 1535 #define ZMM_Q(n) _q_ZMMReg[n] 1536 #define ZMM_D(n) _d_ZMMReg[n] 1537 #define ZMM_X(n) _x_ZMMReg[n] 1538 #define ZMM_Y(n) _y_ZMMReg[n] 1539 1540 #define XMM_Q(n) _q_XMMReg[n] 1541 1542 #define YMM_Q(n) _q_YMMReg[n] 1543 #define YMM_X(n) _x_YMMReg[n] 1544 1545 #define MMX_B(n) _b_MMXReg[n] 1546 #define MMX_W(n) _w_MMXReg[n] 1547 #define MMX_L(n) _l_MMXReg[n] 1548 #define MMX_S(n) _s_MMXReg[n] 1549 #endif 1550 #define MMX_Q(n) _q_MMXReg[n] 1551 1552 typedef union { 1553 floatx80 d __attribute__((aligned(16))); 1554 MMXReg mmx; 1555 } FPReg; 1556 1557 typedef struct { 1558 uint64_t base; 1559 uint64_t mask; 1560 } MTRRVar; 1561 1562 #define CPU_NB_REGS64 16 1563 #define CPU_NB_REGS32 8 1564 1565 #ifdef TARGET_X86_64 1566 #define CPU_NB_REGS CPU_NB_REGS64 1567 #else 1568 #define CPU_NB_REGS CPU_NB_REGS32 1569 #endif 1570 1571 #define MAX_FIXED_COUNTERS 3 1572 #define MAX_GP_COUNTERS (MSR_IA32_PERF_STATUS - MSR_P6_EVNTSEL0) 1573 1574 #define TARGET_INSN_START_EXTRA_WORDS 1 1575 1576 #define NB_OPMASK_REGS 8 1577 1578 /* CPU can't have 0xFFFFFFFF APIC ID, use that value to distinguish 1579 * that APIC ID hasn't been set yet 1580 */ 1581 #define UNASSIGNED_APIC_ID 0xFFFFFFFF 1582 1583 typedef struct X86LegacyXSaveArea { 1584 uint16_t fcw; 1585 uint16_t fsw; 1586 uint8_t ftw; 1587 uint8_t reserved; 1588 uint16_t fpop; 1589 union { 1590 struct { 1591 uint64_t fpip; 1592 uint64_t fpdp; 1593 }; 1594 struct { 1595 uint32_t fip; 1596 uint32_t fcs; 1597 uint32_t foo; 1598 uint32_t fos; 1599 }; 1600 }; 1601 uint32_t mxcsr; 1602 uint32_t mxcsr_mask; 1603 FPReg fpregs[8]; 1604 uint8_t xmm_regs[16][16]; 1605 uint32_t hw_reserved[12]; 1606 uint32_t sw_reserved[12]; 1607 } X86LegacyXSaveArea; 1608 1609 QEMU_BUILD_BUG_ON(sizeof(X86LegacyXSaveArea) != 512); 1610 1611 typedef struct X86XSaveHeader { 1612 uint64_t xstate_bv; 1613 uint64_t xcomp_bv; 1614 uint64_t reserve0; 1615 uint8_t reserved[40]; 1616 } X86XSaveHeader; 1617 1618 /* Ext. save area 2: AVX State */ 1619 typedef struct XSaveAVX { 1620 uint8_t ymmh[16][16]; 1621 } XSaveAVX; 1622 1623 /* Ext. save area 3: BNDREG */ 1624 typedef struct XSaveBNDREG { 1625 BNDReg bnd_regs[4]; 1626 } XSaveBNDREG; 1627 1628 /* Ext. save area 4: BNDCSR */ 1629 typedef union XSaveBNDCSR { 1630 BNDCSReg bndcsr; 1631 uint8_t data[64]; 1632 } XSaveBNDCSR; 1633 1634 /* Ext. save area 5: Opmask */ 1635 typedef struct XSaveOpmask { 1636 uint64_t opmask_regs[NB_OPMASK_REGS]; 1637 } XSaveOpmask; 1638 1639 /* Ext. save area 6: ZMM_Hi256 */ 1640 typedef struct XSaveZMM_Hi256 { 1641 uint8_t zmm_hi256[16][32]; 1642 } XSaveZMM_Hi256; 1643 1644 /* Ext. save area 7: Hi16_ZMM */ 1645 typedef struct XSaveHi16_ZMM { 1646 uint8_t hi16_zmm[16][64]; 1647 } XSaveHi16_ZMM; 1648 1649 /* Ext. save area 9: PKRU state */ 1650 typedef struct XSavePKRU { 1651 uint32_t pkru; 1652 uint32_t padding; 1653 } XSavePKRU; 1654 1655 /* Ext. save area 17: AMX XTILECFG state */ 1656 typedef struct XSaveXTILECFG { 1657 uint8_t xtilecfg[64]; 1658 } XSaveXTILECFG; 1659 1660 /* Ext. save area 18: AMX XTILEDATA state */ 1661 typedef struct XSaveXTILEDATA { 1662 uint8_t xtiledata[8][1024]; 1663 } XSaveXTILEDATA; 1664 1665 typedef struct { 1666 uint64_t from; 1667 uint64_t to; 1668 uint64_t info; 1669 } LBREntry; 1670 1671 #define ARCH_LBR_NR_ENTRIES 32 1672 1673 /* Ext. save area 19: Supervisor mode Arch LBR state */ 1674 typedef struct XSavesArchLBR { 1675 uint64_t lbr_ctl; 1676 uint64_t lbr_depth; 1677 uint64_t ler_from; 1678 uint64_t ler_to; 1679 uint64_t ler_info; 1680 LBREntry lbr_records[ARCH_LBR_NR_ENTRIES]; 1681 } XSavesArchLBR; 1682 1683 QEMU_BUILD_BUG_ON(sizeof(XSaveAVX) != 0x100); 1684 QEMU_BUILD_BUG_ON(sizeof(XSaveBNDREG) != 0x40); 1685 QEMU_BUILD_BUG_ON(sizeof(XSaveBNDCSR) != 0x40); 1686 QEMU_BUILD_BUG_ON(sizeof(XSaveOpmask) != 0x40); 1687 QEMU_BUILD_BUG_ON(sizeof(XSaveZMM_Hi256) != 0x200); 1688 QEMU_BUILD_BUG_ON(sizeof(XSaveHi16_ZMM) != 0x400); 1689 QEMU_BUILD_BUG_ON(sizeof(XSavePKRU) != 0x8); 1690 QEMU_BUILD_BUG_ON(sizeof(XSaveXTILECFG) != 0x40); 1691 QEMU_BUILD_BUG_ON(sizeof(XSaveXTILEDATA) != 0x2000); 1692 QEMU_BUILD_BUG_ON(sizeof(XSavesArchLBR) != 0x328); 1693 1694 typedef struct ExtSaveArea { 1695 uint32_t feature, bits; 1696 uint32_t offset, size; 1697 uint32_t ecx; 1698 } ExtSaveArea; 1699 1700 #define XSAVE_STATE_AREA_COUNT (XSTATE_XTILE_DATA_BIT + 1) 1701 1702 extern ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT]; 1703 1704 typedef enum TPRAccess { 1705 TPR_ACCESS_READ, 1706 TPR_ACCESS_WRITE, 1707 } TPRAccess; 1708 1709 /* Cache information data structures: */ 1710 1711 enum CacheType { 1712 DATA_CACHE, 1713 INSTRUCTION_CACHE, 1714 UNIFIED_CACHE 1715 }; 1716 1717 typedef struct CPUCacheInfo { 1718 enum CacheType type; 1719 uint8_t level; 1720 /* Size in bytes */ 1721 uint32_t size; 1722 /* Line size, in bytes */ 1723 uint16_t line_size; 1724 /* 1725 * Associativity. 1726 * Note: representation of fully-associative caches is not implemented 1727 */ 1728 uint8_t associativity; 1729 /* Physical line partitions. CPUID[0x8000001D].EBX, CPUID[4].EBX */ 1730 uint8_t partitions; 1731 /* Number of sets. CPUID[0x8000001D].ECX, CPUID[4].ECX */ 1732 uint32_t sets; 1733 /* 1734 * Lines per tag. 1735 * AMD-specific: CPUID[0x80000005], CPUID[0x80000006]. 1736 * (Is this synonym to @partitions?) 1737 */ 1738 uint8_t lines_per_tag; 1739 1740 /* Self-initializing cache */ 1741 bool self_init; 1742 /* 1743 * WBINVD/INVD is not guaranteed to act upon lower level caches of 1744 * non-originating threads sharing this cache. 1745 * CPUID[4].EDX[bit 0], CPUID[0x8000001D].EDX[bit 0] 1746 */ 1747 bool no_invd_sharing; 1748 /* 1749 * Cache is inclusive of lower cache levels. 1750 * CPUID[4].EDX[bit 1], CPUID[0x8000001D].EDX[bit 1]. 1751 */ 1752 bool inclusive; 1753 /* 1754 * A complex function is used to index the cache, potentially using all 1755 * address bits. CPUID[4].EDX[bit 2]. 1756 */ 1757 bool complex_indexing; 1758 1759 /* 1760 * Cache Topology. The level that cache is shared in. 1761 * Used to encode CPUID[4].EAX[bits 25:14] or 1762 * CPUID[0x8000001D].EAX[bits 25:14]. 1763 */ 1764 CpuTopologyLevel share_level; 1765 } CPUCacheInfo; 1766 1767 1768 typedef struct CPUCaches { 1769 CPUCacheInfo *l1d_cache; 1770 CPUCacheInfo *l1i_cache; 1771 CPUCacheInfo *l2_cache; 1772 CPUCacheInfo *l3_cache; 1773 } CPUCaches; 1774 1775 typedef struct HVFX86LazyFlags { 1776 target_ulong result; 1777 target_ulong auxbits; 1778 } HVFX86LazyFlags; 1779 1780 typedef struct CPUArchState { 1781 /* standard registers */ 1782 target_ulong regs[CPU_NB_REGS]; 1783 target_ulong eip; 1784 target_ulong eflags; /* eflags register. During CPU emulation, CC 1785 flags and DF are set to zero because they are 1786 stored elsewhere */ 1787 1788 /* emulator internal eflags handling */ 1789 target_ulong cc_dst; 1790 target_ulong cc_src; 1791 target_ulong cc_src2; 1792 uint32_t cc_op; 1793 int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */ 1794 uint32_t hflags; /* TB flags, see HF_xxx constants. These flags 1795 are known at translation time. */ 1796 uint32_t hflags2; /* various other flags, see HF2_xxx constants. */ 1797 1798 /* segments */ 1799 SegmentCache segs[6]; /* selector values */ 1800 SegmentCache ldt; 1801 SegmentCache tr; 1802 SegmentCache gdt; /* only base and limit are used */ 1803 SegmentCache idt; /* only base and limit are used */ 1804 1805 target_ulong cr[5]; /* NOTE: cr1 is unused */ 1806 1807 bool pdptrs_valid; 1808 uint64_t pdptrs[4]; 1809 int32_t a20_mask; 1810 1811 BNDReg bnd_regs[4]; 1812 BNDCSReg bndcs_regs; 1813 uint64_t msr_bndcfgs; 1814 uint64_t efer; 1815 1816 /* Beginning of state preserved by INIT (dummy marker). */ 1817 struct {} start_init_save; 1818 1819 /* FPU state */ 1820 unsigned int fpstt; /* top of stack index */ 1821 uint16_t fpus; 1822 uint16_t fpuc; 1823 uint8_t fptags[8]; /* 0 = valid, 1 = empty */ 1824 FPReg fpregs[8]; 1825 /* KVM-only so far */ 1826 uint16_t fpop; 1827 uint16_t fpcs; 1828 uint16_t fpds; 1829 uint64_t fpip; 1830 uint64_t fpdp; 1831 1832 /* emulator internal variables */ 1833 float_status fp_status; 1834 floatx80 ft0; 1835 1836 float_status mmx_status; /* for 3DNow! float ops */ 1837 float_status sse_status; 1838 uint32_t mxcsr; 1839 ZMMReg xmm_regs[CPU_NB_REGS == 8 ? 8 : 32] QEMU_ALIGNED(16); 1840 ZMMReg xmm_t0 QEMU_ALIGNED(16); 1841 MMXReg mmx_t0; 1842 1843 uint64_t opmask_regs[NB_OPMASK_REGS]; 1844 #ifdef TARGET_X86_64 1845 uint8_t xtilecfg[64]; 1846 uint8_t xtiledata[8192]; 1847 #endif 1848 1849 /* sysenter registers */ 1850 uint32_t sysenter_cs; 1851 target_ulong sysenter_esp; 1852 target_ulong sysenter_eip; 1853 uint64_t star; 1854 1855 uint64_t vm_hsave; 1856 1857 #ifdef TARGET_X86_64 1858 target_ulong lstar; 1859 target_ulong cstar; 1860 target_ulong fmask; 1861 target_ulong kernelgsbase; 1862 1863 /* FRED MSRs */ 1864 uint64_t fred_rsp0; 1865 uint64_t fred_rsp1; 1866 uint64_t fred_rsp2; 1867 uint64_t fred_rsp3; 1868 uint64_t fred_stklvls; 1869 uint64_t fred_ssp1; 1870 uint64_t fred_ssp2; 1871 uint64_t fred_ssp3; 1872 uint64_t fred_config; 1873 #endif 1874 1875 uint64_t tsc_adjust; 1876 uint64_t tsc_deadline; 1877 uint64_t tsc_aux; 1878 1879 uint64_t xcr0; 1880 1881 uint64_t mcg_status; 1882 uint64_t msr_ia32_misc_enable; 1883 uint64_t msr_ia32_feature_control; 1884 uint64_t msr_ia32_sgxlepubkeyhash[4]; 1885 1886 uint64_t msr_fixed_ctr_ctrl; 1887 uint64_t msr_global_ctrl; 1888 uint64_t msr_global_status; 1889 uint64_t msr_global_ovf_ctrl; 1890 uint64_t msr_fixed_counters[MAX_FIXED_COUNTERS]; 1891 uint64_t msr_gp_counters[MAX_GP_COUNTERS]; 1892 uint64_t msr_gp_evtsel[MAX_GP_COUNTERS]; 1893 1894 uint64_t pat; 1895 uint32_t smbase; 1896 uint64_t msr_smi_count; 1897 1898 uint32_t pkru; 1899 uint32_t pkrs; 1900 uint32_t tsx_ctrl; 1901 1902 uint64_t spec_ctrl; 1903 uint64_t amd_tsc_scale_msr; 1904 uint64_t virt_ssbd; 1905 1906 /* End of state preserved by INIT (dummy marker). */ 1907 struct {} end_init_save; 1908 1909 uint64_t system_time_msr; 1910 uint64_t wall_clock_msr; 1911 uint64_t steal_time_msr; 1912 uint64_t async_pf_en_msr; 1913 uint64_t async_pf_int_msr; 1914 uint64_t pv_eoi_en_msr; 1915 uint64_t poll_control_msr; 1916 1917 /* Partition-wide HV MSRs, will be updated only on the first vcpu */ 1918 uint64_t msr_hv_hypercall; 1919 uint64_t msr_hv_guest_os_id; 1920 uint64_t msr_hv_tsc; 1921 uint64_t msr_hv_syndbg_control; 1922 uint64_t msr_hv_syndbg_status; 1923 uint64_t msr_hv_syndbg_send_page; 1924 uint64_t msr_hv_syndbg_recv_page; 1925 uint64_t msr_hv_syndbg_pending_page; 1926 uint64_t msr_hv_syndbg_options; 1927 1928 /* Per-VCPU HV MSRs */ 1929 uint64_t msr_hv_vapic; 1930 uint64_t msr_hv_crash_params[HV_CRASH_PARAMS]; 1931 uint64_t msr_hv_runtime; 1932 uint64_t msr_hv_synic_control; 1933 uint64_t msr_hv_synic_evt_page; 1934 uint64_t msr_hv_synic_msg_page; 1935 uint64_t msr_hv_synic_sint[HV_SINT_COUNT]; 1936 uint64_t msr_hv_stimer_config[HV_STIMER_COUNT]; 1937 uint64_t msr_hv_stimer_count[HV_STIMER_COUNT]; 1938 uint64_t msr_hv_reenlightenment_control; 1939 uint64_t msr_hv_tsc_emulation_control; 1940 uint64_t msr_hv_tsc_emulation_status; 1941 1942 uint64_t msr_rtit_ctrl; 1943 uint64_t msr_rtit_status; 1944 uint64_t msr_rtit_output_base; 1945 uint64_t msr_rtit_output_mask; 1946 uint64_t msr_rtit_cr3_match; 1947 uint64_t msr_rtit_addrs[MAX_RTIT_ADDRS]; 1948 1949 /* Per-VCPU XFD MSRs */ 1950 uint64_t msr_xfd; 1951 uint64_t msr_xfd_err; 1952 1953 /* Per-VCPU Arch LBR MSRs */ 1954 uint64_t msr_lbr_ctl; 1955 uint64_t msr_lbr_depth; 1956 LBREntry lbr_records[ARCH_LBR_NR_ENTRIES]; 1957 1958 /* AMD MSRC001_0015 Hardware Configuration */ 1959 uint64_t msr_hwcr; 1960 1961 /* exception/interrupt handling */ 1962 int error_code; 1963 int exception_is_int; 1964 target_ulong exception_next_eip; 1965 target_ulong dr[8]; /* debug registers; note dr4 and dr5 are unused */ 1966 union { 1967 struct CPUBreakpoint *cpu_breakpoint[4]; 1968 struct CPUWatchpoint *cpu_watchpoint[4]; 1969 }; /* break/watchpoints for dr[0..3] */ 1970 int old_exception; /* exception in flight */ 1971 1972 uint64_t vm_vmcb; 1973 uint64_t tsc_offset; 1974 uint64_t intercept; 1975 uint16_t intercept_cr_read; 1976 uint16_t intercept_cr_write; 1977 uint16_t intercept_dr_read; 1978 uint16_t intercept_dr_write; 1979 uint32_t intercept_exceptions; 1980 uint64_t nested_cr3; 1981 uint32_t nested_pg_mode; 1982 uint8_t v_tpr; 1983 uint32_t int_ctl; 1984 1985 /* KVM states, automatically cleared on reset */ 1986 uint8_t nmi_injected; 1987 uint8_t nmi_pending; 1988 1989 uintptr_t retaddr; 1990 1991 /* RAPL MSR */ 1992 uint64_t msr_rapl_power_unit; 1993 uint64_t msr_pkg_energy_status; 1994 1995 /* Fields up to this point are cleared by a CPU reset */ 1996 struct {} end_reset_fields; 1997 1998 /* Fields after this point are preserved across CPU reset. */ 1999 2000 /* processor features (e.g. for CPUID insn) */ 2001 /* Minimum cpuid leaf 7 value */ 2002 uint32_t cpuid_level_func7; 2003 /* Actual cpuid leaf 7 value */ 2004 uint32_t cpuid_min_level_func7; 2005 /* Minimum level/xlevel/xlevel2, based on CPU model + features */ 2006 uint32_t cpuid_min_level, cpuid_min_xlevel, cpuid_min_xlevel2; 2007 /* Maximum level/xlevel/xlevel2 value for auto-assignment: */ 2008 uint32_t cpuid_max_level, cpuid_max_xlevel, cpuid_max_xlevel2; 2009 /* Actual level/xlevel/xlevel2 value: */ 2010 uint32_t cpuid_level, cpuid_xlevel, cpuid_xlevel2; 2011 uint32_t cpuid_vendor1; 2012 uint32_t cpuid_vendor2; 2013 uint32_t cpuid_vendor3; 2014 uint32_t cpuid_version; 2015 FeatureWordArray features; 2016 /* AVX10 version */ 2017 uint8_t avx10_version; 2018 /* Features that were explicitly enabled/disabled */ 2019 FeatureWordArray user_features; 2020 uint32_t cpuid_model[12]; 2021 /* Cache information for CPUID. When legacy-cache=on, the cache data 2022 * on each CPUID leaf will be different, because we keep compatibility 2023 * with old QEMU versions. 2024 */ 2025 CPUCaches cache_info_cpuid2, cache_info_cpuid4, cache_info_amd; 2026 2027 /* MTRRs */ 2028 uint64_t mtrr_fixed[11]; 2029 uint64_t mtrr_deftype; 2030 MTRRVar mtrr_var[MSR_MTRRcap_VCNT]; 2031 2032 /* For KVM */ 2033 uint32_t mp_state; 2034 int32_t exception_nr; 2035 int32_t interrupt_injected; 2036 uint8_t soft_interrupt; 2037 uint8_t exception_pending; 2038 uint8_t exception_injected; 2039 uint8_t has_error_code; 2040 uint8_t exception_has_payload; 2041 uint64_t exception_payload; 2042 uint8_t triple_fault_pending; 2043 uint32_t ins_len; 2044 uint32_t sipi_vector; 2045 bool tsc_valid; 2046 int64_t tsc_khz; 2047 int64_t user_tsc_khz; /* for sanity check only */ 2048 uint64_t apic_bus_freq; 2049 uint64_t tsc; 2050 #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 2051 void *xsave_buf; 2052 uint32_t xsave_buf_len; 2053 #endif 2054 #if defined(CONFIG_KVM) 2055 struct kvm_nested_state *nested_state; 2056 MemoryRegion *xen_vcpu_info_mr; 2057 void *xen_vcpu_info_hva; 2058 uint64_t xen_vcpu_info_gpa; 2059 uint64_t xen_vcpu_info_default_gpa; 2060 uint64_t xen_vcpu_time_info_gpa; 2061 uint64_t xen_vcpu_runstate_gpa; 2062 uint8_t xen_vcpu_callback_vector; 2063 bool xen_callback_asserted; 2064 uint16_t xen_virq[XEN_NR_VIRQS]; 2065 uint64_t xen_singleshot_timer_ns; 2066 QEMUTimer *xen_singleshot_timer; 2067 uint64_t xen_periodic_timer_period; 2068 QEMUTimer *xen_periodic_timer; 2069 QemuMutex xen_timers_lock; 2070 #endif 2071 #if defined(CONFIG_HVF) 2072 HVFX86LazyFlags hvf_lflags; 2073 void *hvf_mmio_buf; 2074 #endif 2075 2076 uint64_t mcg_cap; 2077 uint64_t mcg_ctl; 2078 uint64_t mcg_ext_ctl; 2079 uint64_t mce_banks[MCE_BANKS_DEF*4]; 2080 uint64_t xstate_bv; 2081 2082 /* vmstate */ 2083 uint16_t fpus_vmstate; 2084 uint16_t fptag_vmstate; 2085 uint16_t fpregs_format_vmstate; 2086 2087 uint64_t xss; 2088 uint32_t umwait; 2089 2090 TPRAccess tpr_access_type; 2091 2092 X86CPUTopoInfo topo_info; 2093 2094 /* Bitmap of available CPU topology levels for this CPU. */ 2095 DECLARE_BITMAP(avail_cpu_topo, CPU_TOPOLOGY_LEVEL__MAX); 2096 } CPUX86State; 2097 2098 struct kvm_msrs; 2099 2100 /** 2101 * X86CPU: 2102 * @env: #CPUX86State 2103 * @migratable: If set, only migratable flags will be accepted when "enforce" 2104 * mode is used, and only migratable flags will be included in the "host" 2105 * CPU model. 2106 * 2107 * An x86 CPU. 2108 */ 2109 struct ArchCPU { 2110 CPUState parent_obj; 2111 2112 CPUX86State env; 2113 VMChangeStateEntry *vmsentry; 2114 2115 uint64_t ucode_rev; 2116 2117 uint32_t hyperv_spinlock_attempts; 2118 char *hyperv_vendor; 2119 bool hyperv_synic_kvm_only; 2120 uint64_t hyperv_features; 2121 bool hyperv_passthrough; 2122 OnOffAuto hyperv_no_nonarch_cs; 2123 uint32_t hyperv_vendor_id[3]; 2124 uint32_t hyperv_interface_id[4]; 2125 uint32_t hyperv_limits[3]; 2126 bool hyperv_enforce_cpuid; 2127 uint32_t hyperv_ver_id_build; 2128 uint16_t hyperv_ver_id_major; 2129 uint16_t hyperv_ver_id_minor; 2130 uint32_t hyperv_ver_id_sp; 2131 uint8_t hyperv_ver_id_sb; 2132 uint32_t hyperv_ver_id_sn; 2133 2134 bool check_cpuid; 2135 bool enforce_cpuid; 2136 /* 2137 * Force features to be enabled even if the host doesn't support them. 2138 * This is dangerous and should be done only for testing CPUID 2139 * compatibility. 2140 */ 2141 bool force_features; 2142 bool expose_kvm; 2143 bool expose_tcg; 2144 bool migratable; 2145 bool migrate_smi_count; 2146 bool max_features; /* Enable all supported features automatically */ 2147 uint32_t apic_id; 2148 2149 /* Enables publishing of TSC increment and Local APIC bus frequencies to 2150 * the guest OS in CPUID page 0x40000010, the same way that VMWare does. */ 2151 bool vmware_cpuid_freq; 2152 2153 /* if true the CPUID code directly forward host cache leaves to the guest */ 2154 bool cache_info_passthrough; 2155 2156 /* if true the CPUID code directly forwards 2157 * host monitor/mwait leaves to the guest */ 2158 struct { 2159 uint32_t eax; 2160 uint32_t ebx; 2161 uint32_t ecx; 2162 uint32_t edx; 2163 } mwait; 2164 2165 /* Features that were filtered out because of missing host capabilities */ 2166 FeatureWordArray filtered_features; 2167 2168 /* Enable PMU CPUID bits. This can't be enabled by default yet because 2169 * it doesn't have ABI stability guarantees, as it passes all PMU CPUID 2170 * bits returned by GET_SUPPORTED_CPUID (that depend on host CPU and kernel 2171 * capabilities) directly to the guest. 2172 */ 2173 bool enable_pmu; 2174 2175 /* 2176 * Enable LBR_FMT bits of IA32_PERF_CAPABILITIES MSR. 2177 * This can't be initialized with a default because it doesn't have 2178 * stable ABI support yet. It is only allowed to pass all LBR_FMT bits 2179 * returned by kvm_arch_get_supported_msr_feature()(which depends on both 2180 * host CPU and kernel capabilities) to the guest. 2181 */ 2182 uint64_t lbr_fmt; 2183 2184 /* LMCE support can be enabled/disabled via cpu option 'lmce=on/off'. It is 2185 * disabled by default to avoid breaking migration between QEMU with 2186 * different LMCE configurations. 2187 */ 2188 bool enable_lmce; 2189 2190 /* Compatibility bits for old machine types. 2191 * If true present virtual l3 cache for VM, the vcpus in the same virtual 2192 * socket share an virtual l3 cache. 2193 */ 2194 bool enable_l3_cache; 2195 2196 /* Compatibility bits for old machine types. 2197 * If true present L1 cache as per-thread, not per-core. 2198 */ 2199 bool l1_cache_per_core; 2200 2201 /* Compatibility bits for old machine types. 2202 * If true present the old cache topology information 2203 */ 2204 bool legacy_cache; 2205 2206 /* Compatibility bits for old machine types. 2207 * If true decode the CPUID Function 0x8000001E_ECX to support multiple 2208 * nodes per processor 2209 */ 2210 bool legacy_multi_node; 2211 2212 /* Compatibility bits for old machine types: */ 2213 bool enable_cpuid_0xb; 2214 2215 /* Enable auto level-increase for all CPUID leaves */ 2216 bool full_cpuid_auto_level; 2217 2218 /* Only advertise CPUID leaves defined by the vendor */ 2219 bool vendor_cpuid_only; 2220 2221 /* Only advertise TOPOEXT features that AMD defines */ 2222 bool amd_topoext_features_only; 2223 2224 /* Enable auto level-increase for Intel Processor Trace leave */ 2225 bool intel_pt_auto_level; 2226 2227 /* if true fill the top bits of the MTRR_PHYSMASKn variable range */ 2228 bool fill_mtrr_mask; 2229 2230 /* if true override the phys_bits value with a value read from the host */ 2231 bool host_phys_bits; 2232 2233 /* if set, limit maximum value for phys_bits when host_phys_bits is true */ 2234 uint8_t host_phys_bits_limit; 2235 2236 /* Forcefully disable KVM PV features not exposed in guest CPUIDs */ 2237 bool kvm_pv_enforce_cpuid; 2238 2239 /* Number of physical address bits supported */ 2240 uint32_t phys_bits; 2241 2242 /* 2243 * Number of guest physical address bits available. Usually this is 2244 * identical to host physical address bits. With NPT or EPT 4-level 2245 * paging, guest physical address space might be restricted to 48 bits 2246 * even if the host cpu supports more physical address bits. 2247 */ 2248 uint32_t guest_phys_bits; 2249 2250 /* in order to simplify APIC support, we leave this pointer to the 2251 user */ 2252 struct DeviceState *apic_state; 2253 struct MemoryRegion *cpu_as_root, *cpu_as_mem, *smram; 2254 Notifier machine_done; 2255 2256 struct kvm_msrs *kvm_msr_buf; 2257 2258 int32_t node_id; /* NUMA node this CPU belongs to */ 2259 int32_t socket_id; 2260 int32_t die_id; 2261 int32_t module_id; 2262 int32_t core_id; 2263 int32_t thread_id; 2264 2265 int32_t hv_max_vps; 2266 2267 bool xen_vapic; 2268 }; 2269 2270 typedef struct X86CPUModel X86CPUModel; 2271 2272 /** 2273 * X86CPUClass: 2274 * @cpu_def: CPU model definition 2275 * @host_cpuid_required: Whether CPU model requires cpuid from host. 2276 * @ordering: Ordering on the "-cpu help" CPU model list. 2277 * @migration_safe: See CpuDefinitionInfo::migration_safe 2278 * @static_model: See CpuDefinitionInfo::static 2279 * @parent_realize: The parent class' realize handler. 2280 * @parent_phases: The parent class' reset phase handlers. 2281 * 2282 * An x86 CPU model or family. 2283 */ 2284 struct X86CPUClass { 2285 CPUClass parent_class; 2286 2287 /* 2288 * CPU definition, automatically loaded by instance_init if not NULL. 2289 * Should be eventually replaced by subclass-specific property defaults. 2290 */ 2291 X86CPUModel *model; 2292 2293 bool host_cpuid_required; 2294 int ordering; 2295 bool migration_safe; 2296 bool static_model; 2297 2298 /* 2299 * Optional description of CPU model. 2300 * If unavailable, cpu_def->model_id is used. 2301 */ 2302 const char *model_description; 2303 2304 DeviceRealize parent_realize; 2305 DeviceUnrealize parent_unrealize; 2306 ResettablePhases parent_phases; 2307 }; 2308 2309 #ifndef CONFIG_USER_ONLY 2310 extern const VMStateDescription vmstate_x86_cpu; 2311 #endif 2312 2313 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request); 2314 2315 int x86_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu, 2316 int cpuid, DumpState *s); 2317 int x86_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu, 2318 int cpuid, DumpState *s); 2319 int x86_cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu, 2320 DumpState *s); 2321 int x86_cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu, 2322 DumpState *s); 2323 2324 bool x86_cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list, 2325 Error **errp); 2326 2327 void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags); 2328 2329 int x86_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); 2330 int x86_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); 2331 void x86_cpu_gdb_init(CPUState *cs); 2332 2333 void x86_cpu_list(void); 2334 int cpu_x86_support_mca_broadcast(CPUX86State *env); 2335 2336 #ifndef CONFIG_USER_ONLY 2337 hwaddr x86_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr, 2338 MemTxAttrs *attrs); 2339 int cpu_get_pic_interrupt(CPUX86State *s); 2340 2341 /* MS-DOS compatibility mode FPU exception support */ 2342 void x86_register_ferr_irq(qemu_irq irq); 2343 void fpu_check_raise_ferr_irq(CPUX86State *s); 2344 void cpu_set_ignne(void); 2345 void cpu_clear_ignne(void); 2346 #endif 2347 2348 /* mpx_helper.c */ 2349 void cpu_sync_bndcs_hflags(CPUX86State *env); 2350 2351 /* this function must always be used to load data in the segment 2352 cache: it synchronizes the hflags with the segment cache values */ 2353 static inline void cpu_x86_load_seg_cache(CPUX86State *env, 2354 X86Seg seg_reg, unsigned int selector, 2355 target_ulong base, 2356 unsigned int limit, 2357 unsigned int flags) 2358 { 2359 SegmentCache *sc; 2360 unsigned int new_hflags; 2361 2362 sc = &env->segs[seg_reg]; 2363 sc->selector = selector; 2364 sc->base = base; 2365 sc->limit = limit; 2366 sc->flags = flags; 2367 2368 /* update the hidden flags */ 2369 { 2370 if (seg_reg == R_CS) { 2371 #ifdef TARGET_X86_64 2372 if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) { 2373 /* long mode */ 2374 env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; 2375 env->hflags &= ~(HF_ADDSEG_MASK); 2376 } else 2377 #endif 2378 { 2379 /* legacy / compatibility case */ 2380 new_hflags = (env->segs[R_CS].flags & DESC_B_MASK) 2381 >> (DESC_B_SHIFT - HF_CS32_SHIFT); 2382 env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) | 2383 new_hflags; 2384 } 2385 } 2386 if (seg_reg == R_SS) { 2387 int cpl = (flags >> DESC_DPL_SHIFT) & 3; 2388 #if HF_CPL_MASK != 3 2389 #error HF_CPL_MASK is hardcoded 2390 #endif 2391 env->hflags = (env->hflags & ~HF_CPL_MASK) | cpl; 2392 /* Possibly switch between BNDCFGS and BNDCFGU */ 2393 cpu_sync_bndcs_hflags(env); 2394 } 2395 new_hflags = (env->segs[R_SS].flags & DESC_B_MASK) 2396 >> (DESC_B_SHIFT - HF_SS32_SHIFT); 2397 if (env->hflags & HF_CS64_MASK) { 2398 /* zero base assumed for DS, ES and SS in long mode */ 2399 } else if (!(env->cr[0] & CR0_PE_MASK) || 2400 (env->eflags & VM_MASK) || 2401 !(env->hflags & HF_CS32_MASK)) { 2402 /* XXX: try to avoid this test. The problem comes from the 2403 fact that is real mode or vm86 mode we only modify the 2404 'base' and 'selector' fields of the segment cache to go 2405 faster. A solution may be to force addseg to one in 2406 translate-i386.c. */ 2407 new_hflags |= HF_ADDSEG_MASK; 2408 } else { 2409 new_hflags |= ((env->segs[R_DS].base | 2410 env->segs[R_ES].base | 2411 env->segs[R_SS].base) != 0) << 2412 HF_ADDSEG_SHIFT; 2413 } 2414 env->hflags = (env->hflags & 2415 ~(HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags; 2416 } 2417 } 2418 2419 static inline void cpu_x86_load_seg_cache_sipi(X86CPU *cpu, 2420 uint8_t sipi_vector) 2421 { 2422 CPUState *cs = CPU(cpu); 2423 CPUX86State *env = &cpu->env; 2424 2425 env->eip = 0; 2426 cpu_x86_load_seg_cache(env, R_CS, sipi_vector << 8, 2427 sipi_vector << 12, 2428 env->segs[R_CS].limit, 2429 env->segs[R_CS].flags); 2430 cs->halted = 0; 2431 } 2432 2433 uint64_t cpu_x86_get_msr_core_thread_count(X86CPU *cpu); 2434 2435 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector, 2436 target_ulong *base, unsigned int *limit, 2437 unsigned int *flags); 2438 2439 /* op_helper.c */ 2440 /* used for debug or cpu save/restore */ 2441 2442 /* cpu-exec.c */ 2443 /* 2444 * The following helpers are only usable in user mode simulation. 2445 * The host pointers should come from lock_user(). 2446 */ 2447 void cpu_x86_load_seg(CPUX86State *s, X86Seg seg_reg, int selector); 2448 void cpu_x86_fsave(CPUX86State *s, void *host, size_t len); 2449 void cpu_x86_frstor(CPUX86State *s, void *host, size_t len); 2450 void cpu_x86_fxsave(CPUX86State *s, void *host, size_t len); 2451 void cpu_x86_fxrstor(CPUX86State *s, void *host, size_t len); 2452 void cpu_x86_xsave(CPUX86State *s, void *host, size_t len, uint64_t rbfm); 2453 bool cpu_x86_xrstor(CPUX86State *s, void *host, size_t len, uint64_t rbfm); 2454 2455 /* cpu.c */ 2456 void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, 2457 uint32_t vendor2, uint32_t vendor3); 2458 typedef struct PropValue { 2459 const char *prop, *value; 2460 } PropValue; 2461 void x86_cpu_apply_props(X86CPU *cpu, PropValue *props); 2462 2463 void x86_cpu_after_reset(X86CPU *cpu); 2464 2465 uint32_t cpu_x86_virtual_addr_width(CPUX86State *env); 2466 2467 /* cpu.c other functions (cpuid) */ 2468 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, 2469 uint32_t *eax, uint32_t *ebx, 2470 uint32_t *ecx, uint32_t *edx); 2471 void cpu_clear_apic_feature(CPUX86State *env); 2472 void cpu_set_apic_feature(CPUX86State *env); 2473 void host_cpuid(uint32_t function, uint32_t count, 2474 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx); 2475 bool cpu_has_x2apic_feature(CPUX86State *env); 2476 2477 /* helper.c */ 2478 void x86_cpu_set_a20(X86CPU *cpu, int a20_state); 2479 void cpu_sync_avx_hflag(CPUX86State *env); 2480 2481 #ifndef CONFIG_USER_ONLY 2482 static inline int x86_asidx_from_attrs(CPUState *cs, MemTxAttrs attrs) 2483 { 2484 return !!attrs.secure; 2485 } 2486 2487 static inline AddressSpace *cpu_addressspace(CPUState *cs, MemTxAttrs attrs) 2488 { 2489 return cpu_get_address_space(cs, cpu_asidx_from_attrs(cs, attrs)); 2490 } 2491 2492 /* 2493 * load efer and update the corresponding hflags. XXX: do consistency 2494 * checks with cpuid bits? 2495 */ 2496 void cpu_load_efer(CPUX86State *env, uint64_t val); 2497 uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr); 2498 uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr); 2499 uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr); 2500 uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr); 2501 void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val); 2502 void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val); 2503 void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val); 2504 void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val); 2505 void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val); 2506 #endif 2507 2508 /* will be suppressed */ 2509 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0); 2510 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3); 2511 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4); 2512 void cpu_x86_update_dr7(CPUX86State *env, uint32_t new_dr7); 2513 2514 /* hw/pc.c */ 2515 uint64_t cpu_get_tsc(CPUX86State *env); 2516 2517 #define CPU_RESOLVING_TYPE TYPE_X86_CPU 2518 2519 #ifdef TARGET_X86_64 2520 #define TARGET_DEFAULT_CPU_TYPE X86_CPU_TYPE_NAME("qemu64") 2521 #else 2522 #define TARGET_DEFAULT_CPU_TYPE X86_CPU_TYPE_NAME("qemu32") 2523 #endif 2524 2525 #define cpu_list x86_cpu_list 2526 2527 /* MMU modes definitions */ 2528 #define MMU_KSMAP64_IDX 0 2529 #define MMU_KSMAP32_IDX 1 2530 #define MMU_USER64_IDX 2 2531 #define MMU_USER32_IDX 3 2532 #define MMU_KNOSMAP64_IDX 4 2533 #define MMU_KNOSMAP32_IDX 5 2534 #define MMU_PHYS_IDX 6 2535 #define MMU_NESTED_IDX 7 2536 2537 #ifdef CONFIG_USER_ONLY 2538 #ifdef TARGET_X86_64 2539 #define MMU_USER_IDX MMU_USER64_IDX 2540 #else 2541 #define MMU_USER_IDX MMU_USER32_IDX 2542 #endif 2543 #endif 2544 2545 static inline bool is_mmu_index_smap(int mmu_index) 2546 { 2547 return (mmu_index & ~1) == MMU_KSMAP64_IDX; 2548 } 2549 2550 static inline bool is_mmu_index_user(int mmu_index) 2551 { 2552 return (mmu_index & ~1) == MMU_USER64_IDX; 2553 } 2554 2555 static inline bool is_mmu_index_32(int mmu_index) 2556 { 2557 assert(mmu_index < MMU_PHYS_IDX); 2558 return mmu_index & 1; 2559 } 2560 2561 int x86_mmu_index_pl(CPUX86State *env, unsigned pl); 2562 int cpu_mmu_index_kernel(CPUX86State *env); 2563 2564 #define CC_DST (env->cc_dst) 2565 #define CC_SRC (env->cc_src) 2566 #define CC_SRC2 (env->cc_src2) 2567 #define CC_OP (env->cc_op) 2568 2569 #include "exec/cpu-all.h" 2570 #include "svm.h" 2571 2572 #if !defined(CONFIG_USER_ONLY) 2573 #include "hw/i386/apic.h" 2574 #endif 2575 2576 static inline void cpu_get_tb_cpu_state(CPUX86State *env, vaddr *pc, 2577 uint64_t *cs_base, uint32_t *flags) 2578 { 2579 *flags = env->hflags | 2580 (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK | AC_MASK)); 2581 if (env->hflags & HF_CS64_MASK) { 2582 *cs_base = 0; 2583 *pc = env->eip; 2584 } else { 2585 *cs_base = env->segs[R_CS].base; 2586 *pc = (uint32_t)(*cs_base + env->eip); 2587 } 2588 } 2589 2590 void do_cpu_init(X86CPU *cpu); 2591 2592 #define MCE_INJECT_BROADCAST 1 2593 #define MCE_INJECT_UNCOND_AO 2 2594 2595 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank, 2596 uint64_t status, uint64_t mcg_status, uint64_t addr, 2597 uint64_t misc, int flags); 2598 2599 uint32_t cpu_cc_compute_all(CPUX86State *env1); 2600 2601 static inline uint32_t cpu_compute_eflags(CPUX86State *env) 2602 { 2603 uint32_t eflags = env->eflags; 2604 if (tcg_enabled()) { 2605 eflags |= cpu_cc_compute_all(env) | (env->df & DF_MASK); 2606 } 2607 return eflags; 2608 } 2609 2610 static inline MemTxAttrs cpu_get_mem_attrs(CPUX86State *env) 2611 { 2612 return ((MemTxAttrs) { .secure = (env->hflags & HF_SMM_MASK) != 0 }); 2613 } 2614 2615 static inline int32_t x86_get_a20_mask(CPUX86State *env) 2616 { 2617 if (env->hflags & HF_SMM_MASK) { 2618 return -1; 2619 } else { 2620 return env->a20_mask; 2621 } 2622 } 2623 2624 static inline bool cpu_has_vmx(CPUX86State *env) 2625 { 2626 return env->features[FEAT_1_ECX] & CPUID_EXT_VMX; 2627 } 2628 2629 static inline bool cpu_has_svm(CPUX86State *env) 2630 { 2631 return env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM; 2632 } 2633 2634 /* 2635 * In order for a vCPU to enter VMX operation it must have CR4.VMXE set. 2636 * Since it was set, CR4.VMXE must remain set as long as vCPU is in 2637 * VMX operation. This is because CR4.VMXE is one of the bits set 2638 * in MSR_IA32_VMX_CR4_FIXED1. 2639 * 2640 * There is one exception to above statement when vCPU enters SMM mode. 2641 * When a vCPU enters SMM mode, it temporarily exit VMX operation and 2642 * may also reset CR4.VMXE during execution in SMM mode. 2643 * When vCPU exits SMM mode, vCPU state is restored to be in VMX operation 2644 * and CR4.VMXE is restored to it's original value of being set. 2645 * 2646 * Therefore, when vCPU is not in SMM mode, we can infer whether 2647 * VMX is being used by examining CR4.VMXE. Otherwise, we cannot 2648 * know for certain. 2649 */ 2650 static inline bool cpu_vmx_maybe_enabled(CPUX86State *env) 2651 { 2652 return cpu_has_vmx(env) && 2653 ((env->cr[4] & CR4_VMXE_MASK) || (env->hflags & HF_SMM_MASK)); 2654 } 2655 2656 /* excp_helper.c */ 2657 int get_pg_mode(CPUX86State *env); 2658 2659 /* fpu_helper.c */ 2660 2661 /* Set all non-runtime-variable float_status fields to x86 handling */ 2662 void cpu_init_fp_statuses(CPUX86State *env); 2663 void update_fp_status(CPUX86State *env); 2664 void update_mxcsr_status(CPUX86State *env); 2665 void update_mxcsr_from_sse_status(CPUX86State *env); 2666 2667 static inline void cpu_set_mxcsr(CPUX86State *env, uint32_t mxcsr) 2668 { 2669 env->mxcsr = mxcsr; 2670 if (tcg_enabled()) { 2671 update_mxcsr_status(env); 2672 } 2673 } 2674 2675 static inline void cpu_set_fpuc(CPUX86State *env, uint16_t fpuc) 2676 { 2677 env->fpuc = fpuc; 2678 if (tcg_enabled()) { 2679 update_fp_status(env); 2680 } 2681 } 2682 2683 /* svm_helper.c */ 2684 #ifdef CONFIG_USER_ONLY 2685 static inline void 2686 cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type, 2687 uint64_t param, uintptr_t retaddr) 2688 { /* no-op */ } 2689 static inline bool 2690 cpu_svm_has_intercept(CPUX86State *env, uint32_t type) 2691 { return false; } 2692 #else 2693 void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type, 2694 uint64_t param, uintptr_t retaddr); 2695 bool cpu_svm_has_intercept(CPUX86State *env, uint32_t type); 2696 #endif 2697 2698 /* apic.c */ 2699 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access); 2700 void apic_handle_tpr_access_report(DeviceState *d, target_ulong ip, 2701 TPRAccess access); 2702 2703 /* Special values for X86CPUVersion: */ 2704 2705 /* Resolve to latest CPU version */ 2706 #define CPU_VERSION_LATEST -1 2707 2708 /* 2709 * Resolve to version defined by current machine type. 2710 * See x86_cpu_set_default_version() 2711 */ 2712 #define CPU_VERSION_AUTO -2 2713 2714 /* Don't resolve to any versioned CPU models, like old QEMU versions */ 2715 #define CPU_VERSION_LEGACY 0 2716 2717 typedef int X86CPUVersion; 2718 2719 /* 2720 * Set default CPU model version for CPU models having 2721 * version == CPU_VERSION_AUTO. 2722 */ 2723 void x86_cpu_set_default_version(X86CPUVersion version); 2724 2725 #ifndef CONFIG_USER_ONLY 2726 2727 void do_cpu_sipi(X86CPU *cpu); 2728 2729 #define APIC_DEFAULT_ADDRESS 0xfee00000 2730 #define APIC_SPACE_SIZE 0x100000 2731 2732 /* cpu-dump.c */ 2733 void x86_cpu_dump_local_apic_state(CPUState *cs, int flags); 2734 2735 #endif 2736 2737 /* cpu.c */ 2738 bool cpu_is_bsp(X86CPU *cpu); 2739 2740 void x86_cpu_xrstor_all_areas(X86CPU *cpu, const void *buf, uint32_t buflen); 2741 void x86_cpu_xsave_all_areas(X86CPU *cpu, void *buf, uint32_t buflen); 2742 uint32_t xsave_area_size(uint64_t mask, bool compacted); 2743 void x86_update_hflags(CPUX86State* env); 2744 2745 static inline bool hyperv_feat_enabled(X86CPU *cpu, int feat) 2746 { 2747 return !!(cpu->hyperv_features & BIT(feat)); 2748 } 2749 2750 static inline uint64_t cr4_reserved_bits(CPUX86State *env) 2751 { 2752 uint64_t reserved_bits = CR4_RESERVED_MASK; 2753 if (!env->features[FEAT_XSAVE]) { 2754 reserved_bits |= CR4_OSXSAVE_MASK; 2755 } 2756 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMEP)) { 2757 reserved_bits |= CR4_SMEP_MASK; 2758 } 2759 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) { 2760 reserved_bits |= CR4_SMAP_MASK; 2761 } 2762 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE)) { 2763 reserved_bits |= CR4_FSGSBASE_MASK; 2764 } 2765 if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKU)) { 2766 reserved_bits |= CR4_PKE_MASK; 2767 } 2768 if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57)) { 2769 reserved_bits |= CR4_LA57_MASK; 2770 } 2771 if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_UMIP)) { 2772 reserved_bits |= CR4_UMIP_MASK; 2773 } 2774 if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKS)) { 2775 reserved_bits |= CR4_PKS_MASK; 2776 } 2777 if (!(env->features[FEAT_7_1_EAX] & CPUID_7_1_EAX_LAM)) { 2778 reserved_bits |= CR4_LAM_SUP_MASK; 2779 } 2780 if (!(env->features[FEAT_7_1_EAX] & CPUID_7_1_EAX_FRED)) { 2781 reserved_bits |= CR4_FRED_MASK; 2782 } 2783 return reserved_bits; 2784 } 2785 2786 static inline bool ctl_has_irq(CPUX86State *env) 2787 { 2788 uint32_t int_prio; 2789 uint32_t tpr; 2790 2791 int_prio = (env->int_ctl & V_INTR_PRIO_MASK) >> V_INTR_PRIO_SHIFT; 2792 tpr = env->int_ctl & V_TPR_MASK; 2793 2794 if (env->int_ctl & V_IGN_TPR_MASK) { 2795 return (env->int_ctl & V_IRQ_MASK); 2796 } 2797 2798 return (env->int_ctl & V_IRQ_MASK) && (int_prio >= tpr); 2799 } 2800 2801 #if defined(TARGET_X86_64) && \ 2802 defined(CONFIG_USER_ONLY) && \ 2803 defined(CONFIG_LINUX) 2804 # define TARGET_VSYSCALL_PAGE (UINT64_C(-10) << 20) 2805 #endif 2806 2807 #endif /* I386_CPU_H */ 2808