1 /*
2 * i386 virtual CPU header
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #ifndef I386_CPU_H
21 #define I386_CPU_H
22
23 #include "system/tcg.h"
24 #include "cpu-qom.h"
25 #include "kvm/hyperv-proto.h"
26 #include "exec/cpu-common.h"
27 #include "exec/cpu-defs.h"
28 #include "exec/cpu-interrupt.h"
29 #include "exec/memop.h"
30 #include "hw/i386/topology.h"
31 #include "qapi/qapi-types-common.h"
32 #include "qemu/cpu-float.h"
33 #include "qemu/timer.h"
34 #include "standard-headers/asm-x86/kvm_para.h"
35
36 #define XEN_NR_VIRQS 24
37
38 #ifdef TARGET_X86_64
39 #define I386_ELF_MACHINE EM_X86_64
40 #define ELF_MACHINE_UNAME "x86_64"
41 #else
42 #define I386_ELF_MACHINE EM_386
43 #define ELF_MACHINE_UNAME "i686"
44 #endif
45
46 enum {
47 R_EAX = 0,
48 R_ECX = 1,
49 R_EDX = 2,
50 R_EBX = 3,
51 R_ESP = 4,
52 R_EBP = 5,
53 R_ESI = 6,
54 R_EDI = 7,
55 R_R8 = 8,
56 R_R9 = 9,
57 R_R10 = 10,
58 R_R11 = 11,
59 R_R12 = 12,
60 R_R13 = 13,
61 R_R14 = 14,
62 R_R15 = 15,
63
64 R_AL = 0,
65 R_CL = 1,
66 R_DL = 2,
67 R_BL = 3,
68 R_AH = 4,
69 R_CH = 5,
70 R_DH = 6,
71 R_BH = 7,
72 };
73
74 typedef enum X86Seg {
75 R_ES = 0,
76 R_CS = 1,
77 R_SS = 2,
78 R_DS = 3,
79 R_FS = 4,
80 R_GS = 5,
81 R_LDTR = 6,
82 R_TR = 7,
83 } X86Seg;
84
85 /* segment descriptor fields */
86 #define DESC_G_SHIFT 23
87 #define DESC_G_MASK (1 << DESC_G_SHIFT)
88 #define DESC_B_SHIFT 22
89 #define DESC_B_MASK (1 << DESC_B_SHIFT)
90 #define DESC_L_SHIFT 21 /* x86_64 only : 64 bit code segment */
91 #define DESC_L_MASK (1 << DESC_L_SHIFT)
92 #define DESC_AVL_SHIFT 20
93 #define DESC_AVL_MASK (1 << DESC_AVL_SHIFT)
94 #define DESC_P_SHIFT 15
95 #define DESC_P_MASK (1 << DESC_P_SHIFT)
96 #define DESC_DPL_SHIFT 13
97 #define DESC_DPL_MASK (3 << DESC_DPL_SHIFT)
98 #define DESC_S_SHIFT 12
99 #define DESC_S_MASK (1 << DESC_S_SHIFT)
100 #define DESC_TYPE_SHIFT 8
101 #define DESC_TYPE_MASK (15 << DESC_TYPE_SHIFT)
102 #define DESC_A_MASK (1 << 8)
103
104 #define DESC_CS_MASK (1 << 11) /* 1=code segment 0=data segment */
105 #define DESC_C_MASK (1 << 10) /* code: conforming */
106 #define DESC_R_MASK (1 << 9) /* code: readable */
107
108 #define DESC_E_MASK (1 << 10) /* data: expansion direction */
109 #define DESC_W_MASK (1 << 9) /* data: writable */
110
111 #define DESC_TSS_BUSY_MASK (1 << 9)
112
113 /* eflags masks */
114 #define CC_C 0x0001
115 #define CC_P 0x0004
116 #define CC_A 0x0010
117 #define CC_Z 0x0040
118 #define CC_S 0x0080
119 #define CC_O 0x0800
120
121 #define TF_SHIFT 8
122 #define IOPL_SHIFT 12
123 #define VM_SHIFT 17
124
125 #define TF_MASK 0x00000100
126 #define IF_MASK 0x00000200
127 #define DF_MASK 0x00000400
128 #define IOPL_MASK 0x00003000
129 #define NT_MASK 0x00004000
130 #define RF_MASK 0x00010000
131 #define VM_MASK 0x00020000
132 #define AC_MASK 0x00040000
133 #define VIF_MASK 0x00080000
134 #define VIP_MASK 0x00100000
135 #define ID_MASK 0x00200000
136
137 /* hidden flags - used internally by qemu to represent additional cpu
138 states. Only the INHIBIT_IRQ, SMM and SVMI are not redundant. We
139 avoid using the IOPL_MASK, TF_MASK, VM_MASK and AC_MASK bit
140 positions to ease oring with eflags. */
141 /* current cpl */
142 #define HF_CPL_SHIFT 0
143 /* true if hardware interrupts must be disabled for next instruction */
144 #define HF_INHIBIT_IRQ_SHIFT 3
145 /* 16 or 32 segments */
146 #define HF_CS32_SHIFT 4
147 #define HF_SS32_SHIFT 5
148 /* zero base for DS, ES and SS : can be '0' only in 32 bit CS segment */
149 #define HF_ADDSEG_SHIFT 6
150 /* copy of CR0.PE (protected mode) */
151 #define HF_PE_SHIFT 7
152 #define HF_TF_SHIFT 8 /* must be same as eflags */
153 #define HF_MP_SHIFT 9 /* the order must be MP, EM, TS */
154 #define HF_EM_SHIFT 10
155 #define HF_TS_SHIFT 11
156 #define HF_IOPL_SHIFT 12 /* must be same as eflags */
157 #define HF_LMA_SHIFT 14 /* only used on x86_64: long mode active */
158 #define HF_CS64_SHIFT 15 /* only used on x86_64: 64 bit code segment */
159 #define HF_RF_SHIFT 16 /* must be same as eflags */
160 #define HF_VM_SHIFT 17 /* must be same as eflags */
161 #define HF_AC_SHIFT 18 /* must be same as eflags */
162 #define HF_SMM_SHIFT 19 /* CPU in SMM mode */
163 #define HF_SVME_SHIFT 20 /* SVME enabled (copy of EFER.SVME) */
164 #define HF_GUEST_SHIFT 21 /* SVM intercepts are active */
165 #define HF_OSFXSR_SHIFT 22 /* CR4.OSFXSR */
166 #define HF_SMAP_SHIFT 23 /* CR4.SMAP */
167 #define HF_IOBPT_SHIFT 24 /* an io breakpoint enabled */
168 #define HF_MPX_EN_SHIFT 25 /* MPX Enabled (CR4+XCR0+BNDCFGx) */
169 #define HF_MPX_IU_SHIFT 26 /* BND registers in-use */
170 #define HF_UMIP_SHIFT 27 /* CR4.UMIP */
171 #define HF_AVX_EN_SHIFT 28 /* AVX Enabled (CR4+XCR0) */
172
173 #define HF_CPL_MASK (3 << HF_CPL_SHIFT)
174 #define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT)
175 #define HF_CS32_MASK (1 << HF_CS32_SHIFT)
176 #define HF_SS32_MASK (1 << HF_SS32_SHIFT)
177 #define HF_ADDSEG_MASK (1 << HF_ADDSEG_SHIFT)
178 #define HF_PE_MASK (1 << HF_PE_SHIFT)
179 #define HF_TF_MASK (1 << HF_TF_SHIFT)
180 #define HF_MP_MASK (1 << HF_MP_SHIFT)
181 #define HF_EM_MASK (1 << HF_EM_SHIFT)
182 #define HF_TS_MASK (1 << HF_TS_SHIFT)
183 #define HF_IOPL_MASK (3 << HF_IOPL_SHIFT)
184 #define HF_LMA_MASK (1 << HF_LMA_SHIFT)
185 #define HF_CS64_MASK (1 << HF_CS64_SHIFT)
186 #define HF_RF_MASK (1 << HF_RF_SHIFT)
187 #define HF_VM_MASK (1 << HF_VM_SHIFT)
188 #define HF_AC_MASK (1 << HF_AC_SHIFT)
189 #define HF_SMM_MASK (1 << HF_SMM_SHIFT)
190 #define HF_SVME_MASK (1 << HF_SVME_SHIFT)
191 #define HF_GUEST_MASK (1 << HF_GUEST_SHIFT)
192 #define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT)
193 #define HF_SMAP_MASK (1 << HF_SMAP_SHIFT)
194 #define HF_IOBPT_MASK (1 << HF_IOBPT_SHIFT)
195 #define HF_MPX_EN_MASK (1 << HF_MPX_EN_SHIFT)
196 #define HF_MPX_IU_MASK (1 << HF_MPX_IU_SHIFT)
197 #define HF_UMIP_MASK (1 << HF_UMIP_SHIFT)
198 #define HF_AVX_EN_MASK (1 << HF_AVX_EN_SHIFT)
199
200 /* hflags2 */
201
202 #define HF2_GIF_SHIFT 0 /* if set CPU takes interrupts */
203 #define HF2_HIF_SHIFT 1 /* value of IF_MASK when entering SVM */
204 #define HF2_NMI_SHIFT 2 /* CPU serving NMI */
205 #define HF2_VINTR_SHIFT 3 /* value of V_INTR_MASKING bit */
206 #define HF2_SMM_INSIDE_NMI_SHIFT 4 /* CPU serving SMI nested inside NMI */
207 #define HF2_MPX_PR_SHIFT 5 /* BNDCFGx.BNDPRESERVE */
208 #define HF2_NPT_SHIFT 6 /* Nested Paging enabled */
209 #define HF2_IGNNE_SHIFT 7 /* Ignore CR0.NE=0 */
210 #define HF2_VGIF_SHIFT 8 /* Can take VIRQ*/
211
212 #define HF2_GIF_MASK (1 << HF2_GIF_SHIFT)
213 #define HF2_HIF_MASK (1 << HF2_HIF_SHIFT)
214 #define HF2_NMI_MASK (1 << HF2_NMI_SHIFT)
215 #define HF2_VINTR_MASK (1 << HF2_VINTR_SHIFT)
216 #define HF2_SMM_INSIDE_NMI_MASK (1 << HF2_SMM_INSIDE_NMI_SHIFT)
217 #define HF2_MPX_PR_MASK (1 << HF2_MPX_PR_SHIFT)
218 #define HF2_NPT_MASK (1 << HF2_NPT_SHIFT)
219 #define HF2_IGNNE_MASK (1 << HF2_IGNNE_SHIFT)
220 #define HF2_VGIF_MASK (1 << HF2_VGIF_SHIFT)
221
222 #define CR0_PE_SHIFT 0
223 #define CR0_MP_SHIFT 1
224
225 #define CR0_PE_MASK (1U << 0)
226 #define CR0_MP_MASK (1U << 1)
227 #define CR0_EM_MASK (1U << 2)
228 #define CR0_TS_MASK (1U << 3)
229 #define CR0_ET_MASK (1U << 4)
230 #define CR0_NE_MASK (1U << 5)
231 #define CR0_WP_MASK (1U << 16)
232 #define CR0_AM_MASK (1U << 18)
233 #define CR0_NW_MASK (1U << 29)
234 #define CR0_CD_MASK (1U << 30)
235 #define CR0_PG_MASK (1U << 31)
236
237 #define CR4_VME_MASK (1U << 0)
238 #define CR4_PVI_MASK (1U << 1)
239 #define CR4_TSD_MASK (1U << 2)
240 #define CR4_DE_MASK (1U << 3)
241 #define CR4_PSE_MASK (1U << 4)
242 #define CR4_PAE_MASK (1U << 5)
243 #define CR4_MCE_MASK (1U << 6)
244 #define CR4_PGE_MASK (1U << 7)
245 #define CR4_PCE_MASK (1U << 8)
246 #define CR4_OSFXSR_SHIFT 9
247 #define CR4_OSFXSR_MASK (1U << CR4_OSFXSR_SHIFT)
248 #define CR4_OSXMMEXCPT_MASK (1U << 10)
249 #define CR4_UMIP_MASK (1U << 11)
250 #define CR4_LA57_MASK (1U << 12)
251 #define CR4_VMXE_MASK (1U << 13)
252 #define CR4_SMXE_MASK (1U << 14)
253 #define CR4_FSGSBASE_MASK (1U << 16)
254 #define CR4_PCIDE_MASK (1U << 17)
255 #define CR4_OSXSAVE_MASK (1U << 18)
256 #define CR4_SMEP_MASK (1U << 20)
257 #define CR4_SMAP_MASK (1U << 21)
258 #define CR4_PKE_MASK (1U << 22)
259 #define CR4_PKS_MASK (1U << 24)
260 #define CR4_LAM_SUP_MASK (1U << 28)
261
262 #ifdef TARGET_X86_64
263 #define CR4_FRED_MASK (1ULL << 32)
264 #else
265 #define CR4_FRED_MASK 0
266 #endif
267
268 #define CR4_RESERVED_MASK \
269 (~(target_ulong)(CR4_VME_MASK | CR4_PVI_MASK | CR4_TSD_MASK \
270 | CR4_DE_MASK | CR4_PSE_MASK | CR4_PAE_MASK \
271 | CR4_MCE_MASK | CR4_PGE_MASK | CR4_PCE_MASK \
272 | CR4_OSFXSR_MASK | CR4_OSXMMEXCPT_MASK | CR4_UMIP_MASK \
273 | CR4_LA57_MASK \
274 | CR4_FSGSBASE_MASK | CR4_PCIDE_MASK | CR4_OSXSAVE_MASK \
275 | CR4_SMEP_MASK | CR4_SMAP_MASK | CR4_PKE_MASK | CR4_PKS_MASK \
276 | CR4_LAM_SUP_MASK | CR4_FRED_MASK))
277
278 #define DR6_BD (1 << 13)
279 #define DR6_BS (1 << 14)
280 #define DR6_BT (1 << 15)
281 #define DR6_FIXED_1 0xffff0ff0
282
283 #define DR7_GD (1 << 13)
284 #define DR7_TYPE_SHIFT 16
285 #define DR7_LEN_SHIFT 18
286 #define DR7_FIXED_1 0x00000400
287 #define DR7_GLOBAL_BP_MASK 0xaa
288 #define DR7_LOCAL_BP_MASK 0x55
289 #define DR7_MAX_BP 4
290 #define DR7_TYPE_BP_INST 0x0
291 #define DR7_TYPE_DATA_WR 0x1
292 #define DR7_TYPE_IO_RW 0x2
293 #define DR7_TYPE_DATA_RW 0x3
294
295 #define DR_RESERVED_MASK 0xffffffff00000000ULL
296
297 #define PG_PRESENT_BIT 0
298 #define PG_RW_BIT 1
299 #define PG_USER_BIT 2
300 #define PG_PWT_BIT 3
301 #define PG_PCD_BIT 4
302 #define PG_ACCESSED_BIT 5
303 #define PG_DIRTY_BIT 6
304 #define PG_PSE_BIT 7
305 #define PG_GLOBAL_BIT 8
306 #define PG_PSE_PAT_BIT 12
307 #define PG_PKRU_BIT 59
308 #define PG_NX_BIT 63
309
310 #define PG_PRESENT_MASK (1 << PG_PRESENT_BIT)
311 #define PG_RW_MASK (1 << PG_RW_BIT)
312 #define PG_USER_MASK (1 << PG_USER_BIT)
313 #define PG_PWT_MASK (1 << PG_PWT_BIT)
314 #define PG_PCD_MASK (1 << PG_PCD_BIT)
315 #define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT)
316 #define PG_DIRTY_MASK (1 << PG_DIRTY_BIT)
317 #define PG_PSE_MASK (1 << PG_PSE_BIT)
318 #define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT)
319 #define PG_PSE_PAT_MASK (1 << PG_PSE_PAT_BIT)
320 #define PG_ADDRESS_MASK 0x000ffffffffff000LL
321 #define PG_HI_USER_MASK 0x7ff0000000000000LL
322 #define PG_PKRU_MASK (15ULL << PG_PKRU_BIT)
323 #define PG_NX_MASK (1ULL << PG_NX_BIT)
324
325 #define PG_ERROR_W_BIT 1
326
327 #define PG_ERROR_P_MASK 0x01
328 #define PG_ERROR_W_MASK (1 << PG_ERROR_W_BIT)
329 #define PG_ERROR_U_MASK 0x04
330 #define PG_ERROR_RSVD_MASK 0x08
331 #define PG_ERROR_I_D_MASK 0x10
332 #define PG_ERROR_PK_MASK 0x20
333
334 #define PG_MODE_PAE (1 << 0)
335 #define PG_MODE_LMA (1 << 1)
336 #define PG_MODE_NXE (1 << 2)
337 #define PG_MODE_PSE (1 << 3)
338 #define PG_MODE_LA57 (1 << 4)
339 #define PG_MODE_SVM_MASK MAKE_64BIT_MASK(0, 15)
340
341 /* Bits of CR4 that do not affect the NPT page format. */
342 #define PG_MODE_WP (1 << 16)
343 #define PG_MODE_PKE (1 << 17)
344 #define PG_MODE_PKS (1 << 18)
345 #define PG_MODE_SMEP (1 << 19)
346 #define PG_MODE_PG (1 << 20)
347
348 #define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */
349 #define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
350 #define MCG_LMCE_P (1ULL<<27) /* Local Machine Check Supported */
351
352 #define MCE_CAP_DEF (MCG_CTL_P|MCG_SER_P)
353 #define MCE_BANKS_DEF 10
354
355 #define MCG_CAP_BANKS_MASK 0xff
356
357 #define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */
358 #define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */
359 #define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */
360 #define MCG_STATUS_LMCE (1ULL<<3) /* Local MCE signaled */
361
362 #define MCG_EXT_CTL_LMCE_EN (1ULL<<0) /* Local MCE enabled */
363
364 #define MCI_STATUS_VAL (1ULL<<63) /* valid error */
365 #define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */
366 #define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */
367 #define MCI_STATUS_EN (1ULL<<60) /* error enabled */
368 #define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */
369 #define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */
370 #define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
371 #define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
372 #define MCI_STATUS_AR (1ULL<<55) /* Action required */
373 #define MCI_STATUS_DEFERRED (1ULL<<44) /* Deferred error */
374 #define MCI_STATUS_POISON (1ULL<<43) /* Poisoned data consumed */
375
376 /* MISC register defines */
377 #define MCM_ADDR_SEGOFF 0 /* segment offset */
378 #define MCM_ADDR_LINEAR 1 /* linear address */
379 #define MCM_ADDR_PHYS 2 /* physical address */
380 #define MCM_ADDR_MEM 3 /* memory address */
381 #define MCM_ADDR_GENERIC 7 /* generic */
382
383 #define MSR_IA32_TSC 0x10
384 #define MSR_IA32_APICBASE 0x1b
385 #define MSR_IA32_APICBASE_BSP (1<<8)
386 #define MSR_IA32_APICBASE_ENABLE (1<<11)
387 #define MSR_IA32_APICBASE_EXTD (1 << 10)
388 #define MSR_IA32_APICBASE_BASE (0xfffffU<<12)
389 #define MSR_IA32_APICBASE_RESERVED \
390 (~(uint64_t)(MSR_IA32_APICBASE_BSP | MSR_IA32_APICBASE_ENABLE \
391 | MSR_IA32_APICBASE_EXTD | MSR_IA32_APICBASE_BASE))
392
393 #define MSR_IA32_FEATURE_CONTROL 0x0000003a
394 #define MSR_TSC_ADJUST 0x0000003b
395 #define MSR_IA32_SPEC_CTRL 0x48
396 #define MSR_VIRT_SSBD 0xc001011f
397 #define MSR_IA32_PRED_CMD 0x49
398 #define MSR_IA32_UCODE_REV 0x8b
399 #define MSR_IA32_CORE_CAPABILITY 0xcf
400
401 #define MSR_IA32_ARCH_CAPABILITIES 0x10a
402 #define ARCH_CAP_TSX_CTRL_MSR (1<<7)
403
404 #define MSR_IA32_PERF_CAPABILITIES 0x345
405 #define PERF_CAP_LBR_FMT 0x3f
406
407 #define MSR_IA32_TSX_CTRL 0x122
408 #define MSR_IA32_TSCDEADLINE 0x6e0
409 #define MSR_IA32_PKRS 0x6e1
410 #define MSR_RAPL_POWER_UNIT 0x00000606
411 #define MSR_PKG_POWER_LIMIT 0x00000610
412 #define MSR_PKG_ENERGY_STATUS 0x00000611
413 #define MSR_PKG_POWER_INFO 0x00000614
414 #define MSR_ARCH_LBR_CTL 0x000014ce
415 #define MSR_ARCH_LBR_DEPTH 0x000014cf
416 #define MSR_ARCH_LBR_FROM_0 0x00001500
417 #define MSR_ARCH_LBR_TO_0 0x00001600
418 #define MSR_ARCH_LBR_INFO_0 0x00001200
419
420 #define FEATURE_CONTROL_LOCKED (1<<0)
421 #define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1ULL << 1)
422 #define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2)
423 #define FEATURE_CONTROL_SGX_LC (1ULL << 17)
424 #define FEATURE_CONTROL_SGX (1ULL << 18)
425 #define FEATURE_CONTROL_LMCE (1<<20)
426
427 #define MSR_IA32_SGXLEPUBKEYHASH0 0x8c
428 #define MSR_IA32_SGXLEPUBKEYHASH1 0x8d
429 #define MSR_IA32_SGXLEPUBKEYHASH2 0x8e
430 #define MSR_IA32_SGXLEPUBKEYHASH3 0x8f
431
432 #define MSR_P6_PERFCTR0 0xc1
433
434 #define MSR_IA32_SMBASE 0x9e
435 #define MSR_SMI_COUNT 0x34
436 #define MSR_CORE_THREAD_COUNT 0x35
437 #define MSR_MTRRcap 0xfe
438 #define MSR_MTRRcap_VCNT 8
439 #define MSR_MTRRcap_FIXRANGE_SUPPORT (1 << 8)
440 #define MSR_MTRRcap_WC_SUPPORTED (1 << 10)
441
442 #define MSR_IA32_SYSENTER_CS 0x174
443 #define MSR_IA32_SYSENTER_ESP 0x175
444 #define MSR_IA32_SYSENTER_EIP 0x176
445
446 #define MSR_MCG_CAP 0x179
447 #define MSR_MCG_STATUS 0x17a
448 #define MSR_MCG_CTL 0x17b
449 #define MSR_MCG_EXT_CTL 0x4d0
450
451 #define MSR_P6_EVNTSEL0 0x186
452
453 #define MSR_IA32_PERF_STATUS 0x198
454
455 #define MSR_IA32_MISC_ENABLE 0x1a0
456 /* Indicates good rep/movs microcode on some processors: */
457 #define MSR_IA32_MISC_ENABLE_DEFAULT 1
458 #define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << 18)
459
460 #define MSR_MTRRphysBase(reg) (0x200 + 2 * (reg))
461 #define MSR_MTRRphysMask(reg) (0x200 + 2 * (reg) + 1)
462
463 #define MSR_MTRRphysIndex(addr) ((((addr) & ~1u) - 0x200) / 2)
464
465 #define MSR_MTRRfix64K_00000 0x250
466 #define MSR_MTRRfix16K_80000 0x258
467 #define MSR_MTRRfix16K_A0000 0x259
468 #define MSR_MTRRfix4K_C0000 0x268
469 #define MSR_MTRRfix4K_C8000 0x269
470 #define MSR_MTRRfix4K_D0000 0x26a
471 #define MSR_MTRRfix4K_D8000 0x26b
472 #define MSR_MTRRfix4K_E0000 0x26c
473 #define MSR_MTRRfix4K_E8000 0x26d
474 #define MSR_MTRRfix4K_F0000 0x26e
475 #define MSR_MTRRfix4K_F8000 0x26f
476
477 #define MSR_PAT 0x277
478
479 #define MSR_MTRRdefType 0x2ff
480
481 #define MSR_CORE_PERF_FIXED_CTR0 0x309
482 #define MSR_CORE_PERF_FIXED_CTR1 0x30a
483 #define MSR_CORE_PERF_FIXED_CTR2 0x30b
484 #define MSR_CORE_PERF_FIXED_CTR_CTRL 0x38d
485 #define MSR_CORE_PERF_GLOBAL_STATUS 0x38e
486 #define MSR_CORE_PERF_GLOBAL_CTRL 0x38f
487 #define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390
488
489 #define MSR_MC0_CTL 0x400
490 #define MSR_MC0_STATUS 0x401
491 #define MSR_MC0_ADDR 0x402
492 #define MSR_MC0_MISC 0x403
493
494 #define MSR_IA32_RTIT_OUTPUT_BASE 0x560
495 #define MSR_IA32_RTIT_OUTPUT_MASK 0x561
496 #define MSR_IA32_RTIT_CTL 0x570
497 #define MSR_IA32_RTIT_STATUS 0x571
498 #define MSR_IA32_RTIT_CR3_MATCH 0x572
499 #define MSR_IA32_RTIT_ADDR0_A 0x580
500 #define MSR_IA32_RTIT_ADDR0_B 0x581
501 #define MSR_IA32_RTIT_ADDR1_A 0x582
502 #define MSR_IA32_RTIT_ADDR1_B 0x583
503 #define MSR_IA32_RTIT_ADDR2_A 0x584
504 #define MSR_IA32_RTIT_ADDR2_B 0x585
505 #define MSR_IA32_RTIT_ADDR3_A 0x586
506 #define MSR_IA32_RTIT_ADDR3_B 0x587
507 #define MAX_RTIT_ADDRS 8
508
509 #define MSR_EFER 0xc0000080
510
511 #define MSR_EFER_SCE (1 << 0)
512 #define MSR_EFER_LME (1 << 8)
513 #define MSR_EFER_LMA (1 << 10)
514 #define MSR_EFER_NXE (1 << 11)
515 #define MSR_EFER_SVME (1 << 12)
516 #define MSR_EFER_FFXSR (1 << 14)
517
518 #define MSR_EFER_RESERVED\
519 (~(target_ulong)(MSR_EFER_SCE | MSR_EFER_LME\
520 | MSR_EFER_LMA | MSR_EFER_NXE | MSR_EFER_SVME\
521 | MSR_EFER_FFXSR))
522
523 #define MSR_STAR 0xc0000081
524 #define MSR_LSTAR 0xc0000082
525 #define MSR_CSTAR 0xc0000083
526 #define MSR_FMASK 0xc0000084
527 #define MSR_FSBASE 0xc0000100
528 #define MSR_GSBASE 0xc0000101
529 #define MSR_KERNELGSBASE 0xc0000102
530 #define MSR_TSC_AUX 0xc0000103
531 #define MSR_AMD64_TSC_RATIO 0xc0000104
532
533 #define MSR_AMD64_TSC_RATIO_DEFAULT 0x100000000ULL
534
535 #define MSR_K7_HWCR 0xc0010015
536
537 #define MSR_VM_HSAVE_PA 0xc0010117
538
539 #define MSR_IA32_XFD 0x000001c4
540 #define MSR_IA32_XFD_ERR 0x000001c5
541
542 /* FRED MSRs */
543 #define MSR_IA32_FRED_RSP0 0x000001cc /* Stack level 0 regular stack pointer */
544 #define MSR_IA32_FRED_RSP1 0x000001cd /* Stack level 1 regular stack pointer */
545 #define MSR_IA32_FRED_RSP2 0x000001ce /* Stack level 2 regular stack pointer */
546 #define MSR_IA32_FRED_RSP3 0x000001cf /* Stack level 3 regular stack pointer */
547 #define MSR_IA32_FRED_STKLVLS 0x000001d0 /* FRED exception stack levels */
548 #define MSR_IA32_FRED_SSP1 0x000001d1 /* Stack level 1 shadow stack pointer in ring 0 */
549 #define MSR_IA32_FRED_SSP2 0x000001d2 /* Stack level 2 shadow stack pointer in ring 0 */
550 #define MSR_IA32_FRED_SSP3 0x000001d3 /* Stack level 3 shadow stack pointer in ring 0 */
551 #define MSR_IA32_FRED_CONFIG 0x000001d4 /* FRED Entrypoint and interrupt stack level */
552
553 #define MSR_IA32_BNDCFGS 0x00000d90
554 #define MSR_IA32_XSS 0x00000da0
555 #define MSR_IA32_UMWAIT_CONTROL 0xe1
556
557 #define MSR_IA32_VMX_BASIC 0x00000480
558 #define MSR_IA32_VMX_PINBASED_CTLS 0x00000481
559 #define MSR_IA32_VMX_PROCBASED_CTLS 0x00000482
560 #define MSR_IA32_VMX_EXIT_CTLS 0x00000483
561 #define MSR_IA32_VMX_ENTRY_CTLS 0x00000484
562 #define MSR_IA32_VMX_MISC 0x00000485
563 #define MSR_IA32_VMX_CR0_FIXED0 0x00000486
564 #define MSR_IA32_VMX_CR0_FIXED1 0x00000487
565 #define MSR_IA32_VMX_CR4_FIXED0 0x00000488
566 #define MSR_IA32_VMX_CR4_FIXED1 0x00000489
567 #define MSR_IA32_VMX_VMCS_ENUM 0x0000048a
568 #define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b
569 #define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c
570 #define MSR_IA32_VMX_TRUE_PINBASED_CTLS 0x0000048d
571 #define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x0000048e
572 #define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x0000048f
573 #define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x00000490
574 #define MSR_IA32_VMX_VMFUNC 0x00000491
575
576 #define MSR_APIC_START 0x00000800
577 #define MSR_APIC_END 0x000008ff
578
579 #define XSTATE_FP_BIT 0
580 #define XSTATE_SSE_BIT 1
581 #define XSTATE_YMM_BIT 2
582 #define XSTATE_BNDREGS_BIT 3
583 #define XSTATE_BNDCSR_BIT 4
584 #define XSTATE_OPMASK_BIT 5
585 #define XSTATE_ZMM_Hi256_BIT 6
586 #define XSTATE_Hi16_ZMM_BIT 7
587 #define XSTATE_PKRU_BIT 9
588 #define XSTATE_ARCH_LBR_BIT 15
589 #define XSTATE_XTILE_CFG_BIT 17
590 #define XSTATE_XTILE_DATA_BIT 18
591
592 #define XSTATE_FP_MASK (1ULL << XSTATE_FP_BIT)
593 #define XSTATE_SSE_MASK (1ULL << XSTATE_SSE_BIT)
594 #define XSTATE_YMM_MASK (1ULL << XSTATE_YMM_BIT)
595 #define XSTATE_BNDREGS_MASK (1ULL << XSTATE_BNDREGS_BIT)
596 #define XSTATE_BNDCSR_MASK (1ULL << XSTATE_BNDCSR_BIT)
597 #define XSTATE_OPMASK_MASK (1ULL << XSTATE_OPMASK_BIT)
598 #define XSTATE_ZMM_Hi256_MASK (1ULL << XSTATE_ZMM_Hi256_BIT)
599 #define XSTATE_Hi16_ZMM_MASK (1ULL << XSTATE_Hi16_ZMM_BIT)
600 #define XSTATE_PKRU_MASK (1ULL << XSTATE_PKRU_BIT)
601 #define XSTATE_ARCH_LBR_MASK (1ULL << XSTATE_ARCH_LBR_BIT)
602 #define XSTATE_XTILE_CFG_MASK (1ULL << XSTATE_XTILE_CFG_BIT)
603 #define XSTATE_XTILE_DATA_MASK (1ULL << XSTATE_XTILE_DATA_BIT)
604
605 #define XSTATE_DYNAMIC_MASK (XSTATE_XTILE_DATA_MASK)
606
607 #define ESA_FEATURE_ALIGN64_BIT 1
608 #define ESA_FEATURE_XFD_BIT 2
609
610 #define ESA_FEATURE_ALIGN64_MASK (1U << ESA_FEATURE_ALIGN64_BIT)
611 #define ESA_FEATURE_XFD_MASK (1U << ESA_FEATURE_XFD_BIT)
612
613
614 /* CPUID feature bits available in XCR0 */
615 #define CPUID_XSTATE_XCR0_MASK (XSTATE_FP_MASK | XSTATE_SSE_MASK | \
616 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | \
617 XSTATE_BNDCSR_MASK | XSTATE_OPMASK_MASK | \
618 XSTATE_ZMM_Hi256_MASK | \
619 XSTATE_Hi16_ZMM_MASK | XSTATE_PKRU_MASK | \
620 XSTATE_XTILE_CFG_MASK | XSTATE_XTILE_DATA_MASK)
621
622 /* CPUID feature words */
623 typedef enum FeatureWord {
624 FEAT_1_EDX, /* CPUID[1].EDX */
625 FEAT_1_ECX, /* CPUID[1].ECX */
626 FEAT_7_0_EBX, /* CPUID[EAX=7,ECX=0].EBX */
627 FEAT_7_0_ECX, /* CPUID[EAX=7,ECX=0].ECX */
628 FEAT_7_0_EDX, /* CPUID[EAX=7,ECX=0].EDX */
629 FEAT_7_1_EAX, /* CPUID[EAX=7,ECX=1].EAX */
630 FEAT_8000_0001_EDX, /* CPUID[8000_0001].EDX */
631 FEAT_8000_0001_ECX, /* CPUID[8000_0001].ECX */
632 FEAT_8000_0007_EBX, /* CPUID[8000_0007].EBX */
633 FEAT_8000_0007_EDX, /* CPUID[8000_0007].EDX */
634 FEAT_8000_0008_EBX, /* CPUID[8000_0008].EBX */
635 FEAT_8000_0021_EAX, /* CPUID[8000_0021].EAX */
636 FEAT_8000_0021_EBX, /* CPUID[8000_0021].EBX */
637 FEAT_8000_0022_EAX, /* CPUID[8000_0022].EAX */
638 FEAT_C000_0001_EDX, /* CPUID[C000_0001].EDX */
639 FEAT_KVM, /* CPUID[4000_0001].EAX (KVM_CPUID_FEATURES) */
640 FEAT_KVM_HINTS, /* CPUID[4000_0001].EDX */
641 FEAT_SVM, /* CPUID[8000_000A].EDX */
642 FEAT_XSAVE, /* CPUID[EAX=0xd,ECX=1].EAX */
643 FEAT_6_EAX, /* CPUID[6].EAX */
644 FEAT_XSAVE_XCR0_LO, /* CPUID[EAX=0xd,ECX=0].EAX */
645 FEAT_XSAVE_XCR0_HI, /* CPUID[EAX=0xd,ECX=0].EDX */
646 FEAT_ARCH_CAPABILITIES,
647 FEAT_CORE_CAPABILITY,
648 FEAT_PERF_CAPABILITIES,
649 FEAT_VMX_PROCBASED_CTLS,
650 FEAT_VMX_SECONDARY_CTLS,
651 FEAT_VMX_PINBASED_CTLS,
652 FEAT_VMX_EXIT_CTLS,
653 FEAT_VMX_ENTRY_CTLS,
654 FEAT_VMX_MISC,
655 FEAT_VMX_EPT_VPID_CAPS,
656 FEAT_VMX_BASIC,
657 FEAT_VMX_VMFUNC,
658 FEAT_14_0_ECX,
659 FEAT_SGX_12_0_EAX, /* CPUID[EAX=0x12,ECX=0].EAX (SGX) */
660 FEAT_SGX_12_0_EBX, /* CPUID[EAX=0x12,ECX=0].EBX (SGX MISCSELECT[31:0]) */
661 FEAT_SGX_12_1_EAX, /* CPUID[EAX=0x12,ECX=1].EAX (SGX ATTRIBUTES[31:0]) */
662 FEAT_XSAVE_XSS_LO, /* CPUID[EAX=0xd,ECX=1].ECX */
663 FEAT_XSAVE_XSS_HI, /* CPUID[EAX=0xd,ECX=1].EDX */
664 FEAT_7_1_EDX, /* CPUID[EAX=7,ECX=1].EDX */
665 FEAT_7_2_EDX, /* CPUID[EAX=7,ECX=2].EDX */
666 FEAT_24_0_EBX, /* CPUID[EAX=0x24,ECX=0].EBX */
667 FEATURE_WORDS,
668 } FeatureWord;
669
670 typedef uint64_t FeatureWordArray[FEATURE_WORDS];
671 uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
672
673 /* cpuid_features bits */
674 #define CPUID_FP87 (1U << 0)
675 #define CPUID_VME (1U << 1)
676 #define CPUID_DE (1U << 2)
677 #define CPUID_PSE (1U << 3)
678 #define CPUID_TSC (1U << 4)
679 #define CPUID_MSR (1U << 5)
680 #define CPUID_PAE (1U << 6)
681 #define CPUID_MCE (1U << 7)
682 #define CPUID_CX8 (1U << 8)
683 #define CPUID_APIC (1U << 9)
684 #define CPUID_SEP (1U << 11) /* sysenter/sysexit */
685 #define CPUID_MTRR (1U << 12)
686 #define CPUID_PGE (1U << 13)
687 #define CPUID_MCA (1U << 14)
688 #define CPUID_CMOV (1U << 15)
689 #define CPUID_PAT (1U << 16)
690 #define CPUID_PSE36 (1U << 17)
691 #define CPUID_PN (1U << 18)
692 #define CPUID_CLFLUSH (1U << 19)
693 #define CPUID_DTS (1U << 21)
694 #define CPUID_ACPI (1U << 22)
695 #define CPUID_MMX (1U << 23)
696 #define CPUID_FXSR (1U << 24)
697 #define CPUID_SSE (1U << 25)
698 #define CPUID_SSE2 (1U << 26)
699 #define CPUID_SS (1U << 27)
700 #define CPUID_HT (1U << 28)
701 #define CPUID_TM (1U << 29)
702 #define CPUID_IA64 (1U << 30)
703 #define CPUID_PBE (1U << 31)
704
705 #define CPUID_EXT_SSE3 (1U << 0)
706 #define CPUID_EXT_PCLMULQDQ (1U << 1)
707 #define CPUID_EXT_DTES64 (1U << 2)
708 #define CPUID_EXT_MONITOR (1U << 3)
709 #define CPUID_EXT_DSCPL (1U << 4)
710 #define CPUID_EXT_VMX (1U << 5)
711 #define CPUID_EXT_SMX (1U << 6)
712 #define CPUID_EXT_EST (1U << 7)
713 #define CPUID_EXT_TM2 (1U << 8)
714 #define CPUID_EXT_SSSE3 (1U << 9)
715 #define CPUID_EXT_CID (1U << 10)
716 #define CPUID_EXT_FMA (1U << 12)
717 #define CPUID_EXT_CX16 (1U << 13)
718 #define CPUID_EXT_XTPR (1U << 14)
719 #define CPUID_EXT_PDCM (1U << 15)
720 #define CPUID_EXT_PCID (1U << 17)
721 #define CPUID_EXT_DCA (1U << 18)
722 #define CPUID_EXT_SSE41 (1U << 19)
723 #define CPUID_EXT_SSE42 (1U << 20)
724 #define CPUID_EXT_X2APIC (1U << 21)
725 #define CPUID_EXT_MOVBE (1U << 22)
726 #define CPUID_EXT_POPCNT (1U << 23)
727 #define CPUID_EXT_TSC_DEADLINE_TIMER (1U << 24)
728 #define CPUID_EXT_AES (1U << 25)
729 #define CPUID_EXT_XSAVE (1U << 26)
730 #define CPUID_EXT_OSXSAVE (1U << 27)
731 #define CPUID_EXT_AVX (1U << 28)
732 #define CPUID_EXT_F16C (1U << 29)
733 #define CPUID_EXT_RDRAND (1U << 30)
734 #define CPUID_EXT_HYPERVISOR (1U << 31)
735
736 #define CPUID_EXT2_FPU (1U << 0)
737 #define CPUID_EXT2_VME (1U << 1)
738 #define CPUID_EXT2_DE (1U << 2)
739 #define CPUID_EXT2_PSE (1U << 3)
740 #define CPUID_EXT2_TSC (1U << 4)
741 #define CPUID_EXT2_MSR (1U << 5)
742 #define CPUID_EXT2_PAE (1U << 6)
743 #define CPUID_EXT2_MCE (1U << 7)
744 #define CPUID_EXT2_CX8 (1U << 8)
745 #define CPUID_EXT2_APIC (1U << 9)
746 #define CPUID_EXT2_SYSCALL (1U << 11)
747 #define CPUID_EXT2_MTRR (1U << 12)
748 #define CPUID_EXT2_PGE (1U << 13)
749 #define CPUID_EXT2_MCA (1U << 14)
750 #define CPUID_EXT2_CMOV (1U << 15)
751 #define CPUID_EXT2_PAT (1U << 16)
752 #define CPUID_EXT2_PSE36 (1U << 17)
753 #define CPUID_EXT2_MP (1U << 19)
754 #define CPUID_EXT2_NX (1U << 20)
755 #define CPUID_EXT2_MMXEXT (1U << 22)
756 #define CPUID_EXT2_MMX (1U << 23)
757 #define CPUID_EXT2_FXSR (1U << 24)
758 #define CPUID_EXT2_FFXSR (1U << 25)
759 #define CPUID_EXT2_PDPE1GB (1U << 26)
760 #define CPUID_EXT2_RDTSCP (1U << 27)
761 #define CPUID_EXT2_LM (1U << 29)
762 #define CPUID_EXT2_3DNOWEXT (1U << 30)
763 #define CPUID_EXT2_3DNOW (1U << 31)
764
765 /* CPUID[8000_0001].EDX bits that are aliases of CPUID[1].EDX bits on AMD CPUs */
766 #define CPUID_EXT2_AMD_ALIASES (CPUID_EXT2_FPU | CPUID_EXT2_VME | \
767 CPUID_EXT2_DE | CPUID_EXT2_PSE | \
768 CPUID_EXT2_TSC | CPUID_EXT2_MSR | \
769 CPUID_EXT2_PAE | CPUID_EXT2_MCE | \
770 CPUID_EXT2_CX8 | CPUID_EXT2_APIC | \
771 CPUID_EXT2_MTRR | CPUID_EXT2_PGE | \
772 CPUID_EXT2_MCA | CPUID_EXT2_CMOV | \
773 CPUID_EXT2_PAT | CPUID_EXT2_PSE36 | \
774 CPUID_EXT2_MMX | CPUID_EXT2_FXSR)
775
776 #define CPUID_EXT3_LAHF_LM (1U << 0)
777 #define CPUID_EXT3_CMP_LEG (1U << 1)
778 #define CPUID_EXT3_SVM (1U << 2)
779 #define CPUID_EXT3_EXTAPIC (1U << 3)
780 #define CPUID_EXT3_CR8LEG (1U << 4)
781 #define CPUID_EXT3_ABM (1U << 5)
782 #define CPUID_EXT3_SSE4A (1U << 6)
783 #define CPUID_EXT3_MISALIGNSSE (1U << 7)
784 #define CPUID_EXT3_3DNOWPREFETCH (1U << 8)
785 #define CPUID_EXT3_OSVW (1U << 9)
786 #define CPUID_EXT3_IBS (1U << 10)
787 #define CPUID_EXT3_XOP (1U << 11)
788 #define CPUID_EXT3_SKINIT (1U << 12)
789 #define CPUID_EXT3_WDT (1U << 13)
790 #define CPUID_EXT3_LWP (1U << 15)
791 #define CPUID_EXT3_FMA4 (1U << 16)
792 #define CPUID_EXT3_TCE (1U << 17)
793 #define CPUID_EXT3_NODEID (1U << 19)
794 #define CPUID_EXT3_TBM (1U << 21)
795 #define CPUID_EXT3_TOPOEXT (1U << 22)
796 #define CPUID_EXT3_PERFCORE (1U << 23)
797 #define CPUID_EXT3_PERFNB (1U << 24)
798
799 #define CPUID_SVM_NPT (1U << 0)
800 #define CPUID_SVM_LBRV (1U << 1)
801 #define CPUID_SVM_SVMLOCK (1U << 2)
802 #define CPUID_SVM_NRIPSAVE (1U << 3)
803 #define CPUID_SVM_TSCSCALE (1U << 4)
804 #define CPUID_SVM_VMCBCLEAN (1U << 5)
805 #define CPUID_SVM_FLUSHASID (1U << 6)
806 #define CPUID_SVM_DECODEASSIST (1U << 7)
807 #define CPUID_SVM_PAUSEFILTER (1U << 10)
808 #define CPUID_SVM_PFTHRESHOLD (1U << 12)
809 #define CPUID_SVM_AVIC (1U << 13)
810 #define CPUID_SVM_V_VMSAVE_VMLOAD (1U << 15)
811 #define CPUID_SVM_VGIF (1U << 16)
812 #define CPUID_SVM_VNMI (1U << 25)
813 #define CPUID_SVM_SVME_ADDR_CHK (1U << 28)
814
815 /* Support RDFSBASE/RDGSBASE/WRFSBASE/WRGSBASE */
816 #define CPUID_7_0_EBX_FSGSBASE (1U << 0)
817 /* Support TSC adjust MSR */
818 #define CPUID_7_0_EBX_TSC_ADJUST (1U << 1)
819 /* Support SGX */
820 #define CPUID_7_0_EBX_SGX (1U << 2)
821 /* 1st Group of Advanced Bit Manipulation Extensions */
822 #define CPUID_7_0_EBX_BMI1 (1U << 3)
823 /* Hardware Lock Elision */
824 #define CPUID_7_0_EBX_HLE (1U << 4)
825 /* Intel Advanced Vector Extensions 2 */
826 #define CPUID_7_0_EBX_AVX2 (1U << 5)
827 /* FPU data pointer updated only on x87 exceptions */
828 #define CPUID_7_0_EBX_FDP_EXCPTN_ONLY (1u << 6)
829 /* Supervisor-mode Execution Prevention */
830 #define CPUID_7_0_EBX_SMEP (1U << 7)
831 /* 2nd Group of Advanced Bit Manipulation Extensions */
832 #define CPUID_7_0_EBX_BMI2 (1U << 8)
833 /* Enhanced REP MOVSB/STOSB */
834 #define CPUID_7_0_EBX_ERMS (1U << 9)
835 /* Invalidate Process-Context Identifier */
836 #define CPUID_7_0_EBX_INVPCID (1U << 10)
837 /* Restricted Transactional Memory */
838 #define CPUID_7_0_EBX_RTM (1U << 11)
839 /* Zero out FPU CS and FPU DS */
840 #define CPUID_7_0_EBX_ZERO_FCS_FDS (1U << 13)
841 /* Memory Protection Extension */
842 #define CPUID_7_0_EBX_MPX (1U << 14)
843 /* AVX-512 Foundation */
844 #define CPUID_7_0_EBX_AVX512F (1U << 16)
845 /* AVX-512 Doubleword & Quadword Instruction */
846 #define CPUID_7_0_EBX_AVX512DQ (1U << 17)
847 /* Read Random SEED */
848 #define CPUID_7_0_EBX_RDSEED (1U << 18)
849 /* ADCX and ADOX instructions */
850 #define CPUID_7_0_EBX_ADX (1U << 19)
851 /* Supervisor Mode Access Prevention */
852 #define CPUID_7_0_EBX_SMAP (1U << 20)
853 /* AVX-512 Integer Fused Multiply Add */
854 #define CPUID_7_0_EBX_AVX512IFMA (1U << 21)
855 /* Flush a Cache Line Optimized */
856 #define CPUID_7_0_EBX_CLFLUSHOPT (1U << 23)
857 /* Cache Line Write Back */
858 #define CPUID_7_0_EBX_CLWB (1U << 24)
859 /* Intel Processor Trace */
860 #define CPUID_7_0_EBX_INTEL_PT (1U << 25)
861 /* AVX-512 Prefetch */
862 #define CPUID_7_0_EBX_AVX512PF (1U << 26)
863 /* AVX-512 Exponential and Reciprocal */
864 #define CPUID_7_0_EBX_AVX512ER (1U << 27)
865 /* AVX-512 Conflict Detection */
866 #define CPUID_7_0_EBX_AVX512CD (1U << 28)
867 /* SHA1/SHA256 Instruction Extensions */
868 #define CPUID_7_0_EBX_SHA_NI (1U << 29)
869 /* AVX-512 Byte and Word Instructions */
870 #define CPUID_7_0_EBX_AVX512BW (1U << 30)
871 /* AVX-512 Vector Length Extensions */
872 #define CPUID_7_0_EBX_AVX512VL (1U << 31)
873
874 /* AVX-512 Vector Byte Manipulation Instruction */
875 #define CPUID_7_0_ECX_AVX512_VBMI (1U << 1)
876 /* User-Mode Instruction Prevention */
877 #define CPUID_7_0_ECX_UMIP (1U << 2)
878 /* Protection Keys for User-mode Pages */
879 #define CPUID_7_0_ECX_PKU (1U << 3)
880 /* OS Enable Protection Keys */
881 #define CPUID_7_0_ECX_OSPKE (1U << 4)
882 /* UMONITOR/UMWAIT/TPAUSE Instructions */
883 #define CPUID_7_0_ECX_WAITPKG (1U << 5)
884 /* Additional AVX-512 Vector Byte Manipulation Instruction */
885 #define CPUID_7_0_ECX_AVX512_VBMI2 (1U << 6)
886 /* Galois Field New Instructions */
887 #define CPUID_7_0_ECX_GFNI (1U << 8)
888 /* Vector AES Instructions */
889 #define CPUID_7_0_ECX_VAES (1U << 9)
890 /* Carry-Less Multiplication Quadword */
891 #define CPUID_7_0_ECX_VPCLMULQDQ (1U << 10)
892 /* Vector Neural Network Instructions */
893 #define CPUID_7_0_ECX_AVX512VNNI (1U << 11)
894 /* Support for VPOPCNT[B,W] and VPSHUFBITQMB */
895 #define CPUID_7_0_ECX_AVX512BITALG (1U << 12)
896 /* POPCNT for vectors of DW/QW */
897 #define CPUID_7_0_ECX_AVX512_VPOPCNTDQ (1U << 14)
898 /* 5-level Page Tables */
899 #define CPUID_7_0_ECX_LA57 (1U << 16)
900 /* Read Processor ID */
901 #define CPUID_7_0_ECX_RDPID (1U << 22)
902 /* Bus Lock Debug Exception */
903 #define CPUID_7_0_ECX_BUS_LOCK_DETECT (1U << 24)
904 /* Cache Line Demote Instruction */
905 #define CPUID_7_0_ECX_CLDEMOTE (1U << 25)
906 /* Move Doubleword as Direct Store Instruction */
907 #define CPUID_7_0_ECX_MOVDIRI (1U << 27)
908 /* Move 64 Bytes as Direct Store Instruction */
909 #define CPUID_7_0_ECX_MOVDIR64B (1U << 28)
910 /* Support SGX Launch Control */
911 #define CPUID_7_0_ECX_SGX_LC (1U << 30)
912 /* Protection Keys for Supervisor-mode Pages */
913 #define CPUID_7_0_ECX_PKS (1U << 31)
914
915 /* AVX512 Neural Network Instructions */
916 #define CPUID_7_0_EDX_AVX512_4VNNIW (1U << 2)
917 /* AVX512 Multiply Accumulation Single Precision */
918 #define CPUID_7_0_EDX_AVX512_4FMAPS (1U << 3)
919 /* Fast Short Rep Mov */
920 #define CPUID_7_0_EDX_FSRM (1U << 4)
921 /* AVX512 Vector Pair Intersection to a Pair of Mask Registers */
922 #define CPUID_7_0_EDX_AVX512_VP2INTERSECT (1U << 8)
923 /* SERIALIZE instruction */
924 #define CPUID_7_0_EDX_SERIALIZE (1U << 14)
925 /* TSX Suspend Load Address Tracking instruction */
926 #define CPUID_7_0_EDX_TSX_LDTRK (1U << 16)
927 /* Architectural LBRs */
928 #define CPUID_7_0_EDX_ARCH_LBR (1U << 19)
929 /* AMX_BF16 instruction */
930 #define CPUID_7_0_EDX_AMX_BF16 (1U << 22)
931 /* AVX512_FP16 instruction */
932 #define CPUID_7_0_EDX_AVX512_FP16 (1U << 23)
933 /* AMX tile (two-dimensional register) */
934 #define CPUID_7_0_EDX_AMX_TILE (1U << 24)
935 /* AMX_INT8 instruction */
936 #define CPUID_7_0_EDX_AMX_INT8 (1U << 25)
937 /* Speculation Control */
938 #define CPUID_7_0_EDX_SPEC_CTRL (1U << 26)
939 /* Single Thread Indirect Branch Predictors */
940 #define CPUID_7_0_EDX_STIBP (1U << 27)
941 /* Flush L1D cache */
942 #define CPUID_7_0_EDX_FLUSH_L1D (1U << 28)
943 /* Arch Capabilities */
944 #define CPUID_7_0_EDX_ARCH_CAPABILITIES (1U << 29)
945 /* Core Capability */
946 #define CPUID_7_0_EDX_CORE_CAPABILITY (1U << 30)
947 /* Speculative Store Bypass Disable */
948 #define CPUID_7_0_EDX_SPEC_CTRL_SSBD (1U << 31)
949
950 /* SHA512 Instruction */
951 #define CPUID_7_1_EAX_SHA512 (1U << 0)
952 /* SM3 Instruction */
953 #define CPUID_7_1_EAX_SM3 (1U << 1)
954 /* SM4 Instruction */
955 #define CPUID_7_1_EAX_SM4 (1U << 2)
956 /* AVX VNNI Instruction */
957 #define CPUID_7_1_EAX_AVX_VNNI (1U << 4)
958 /* AVX512 BFloat16 Instruction */
959 #define CPUID_7_1_EAX_AVX512_BF16 (1U << 5)
960 /* CMPCCXADD Instructions */
961 #define CPUID_7_1_EAX_CMPCCXADD (1U << 7)
962 /* Fast Zero REP MOVS */
963 #define CPUID_7_1_EAX_FZRM (1U << 10)
964 /* Fast Short REP STOS */
965 #define CPUID_7_1_EAX_FSRS (1U << 11)
966 /* Fast Short REP CMPS/SCAS */
967 #define CPUID_7_1_EAX_FSRC (1U << 12)
968 /* Flexible return and event delivery (FRED) */
969 #define CPUID_7_1_EAX_FRED (1U << 17)
970 /* Load into IA32_KERNEL_GS_BASE (LKGS) */
971 #define CPUID_7_1_EAX_LKGS (1U << 18)
972 /* Non-Serializing Write to Model Specific Register (WRMSRNS) */
973 #define CPUID_7_1_EAX_WRMSRNS (1U << 19)
974 /* Support Tile Computational Operations on FP16 Numbers */
975 #define CPUID_7_1_EAX_AMX_FP16 (1U << 21)
976 /* Support for VPMADD52[H,L]UQ */
977 #define CPUID_7_1_EAX_AVX_IFMA (1U << 23)
978 /* Linear Address Masking */
979 #define CPUID_7_1_EAX_LAM (1U << 26)
980
981 /* Support for VPDPB[SU,UU,SS]D[,S] */
982 #define CPUID_7_1_EDX_AVX_VNNI_INT8 (1U << 4)
983 /* AVX NE CONVERT Instructions */
984 #define CPUID_7_1_EDX_AVX_NE_CONVERT (1U << 5)
985 /* AMX COMPLEX Instructions */
986 #define CPUID_7_1_EDX_AMX_COMPLEX (1U << 8)
987 /* AVX-VNNI-INT16 Instructions */
988 #define CPUID_7_1_EDX_AVX_VNNI_INT16 (1U << 10)
989 /* PREFETCHIT0/1 Instructions */
990 #define CPUID_7_1_EDX_PREFETCHITI (1U << 14)
991 /* Support for Advanced Vector Extensions 10 */
992 #define CPUID_7_1_EDX_AVX10 (1U << 19)
993
994 /* Indicate bit 7 of the IA32_SPEC_CTRL MSR is supported */
995 #define CPUID_7_2_EDX_PSFD (1U << 0)
996 /* Indicate bits 3 and 4 of the IA32_SPEC_CTRL MSR are supported */
997 #define CPUID_7_2_EDX_IPRED_CTRL (1U << 1)
998 /* Indicate bits 5 and 6 of the IA32_SPEC_CTRL MSR are supported */
999 #define CPUID_7_2_EDX_RRSBA_CTRL (1U << 2)
1000 /* Indicate bit 8 of the IA32_SPEC_CTRL MSR is supported */
1001 #define CPUID_7_2_EDX_DDPD_U (1U << 3)
1002 /* Indicate bit 10 of the IA32_SPEC_CTRL MSR is supported */
1003 #define CPUID_7_2_EDX_BHI_CTRL (1U << 4)
1004 /* Do not exhibit MXCSR Configuration Dependent Timing (MCDT) behavior */
1005 #define CPUID_7_2_EDX_MCDT_NO (1U << 5)
1006
1007 /* XFD Extend Feature Disabled */
1008 #define CPUID_D_1_EAX_XFD (1U << 4)
1009
1010 /* Packets which contain IP payload have LIP values */
1011 #define CPUID_14_0_ECX_LIP (1U << 31)
1012
1013 /* AVX10 128-bit vector support is present */
1014 #define CPUID_24_0_EBX_AVX10_128 (1U << 16)
1015 /* AVX10 256-bit vector support is present */
1016 #define CPUID_24_0_EBX_AVX10_256 (1U << 17)
1017 /* AVX10 512-bit vector support is present */
1018 #define CPUID_24_0_EBX_AVX10_512 (1U << 18)
1019 /* AVX10 vector length support mask */
1020 #define CPUID_24_0_EBX_AVX10_VL_MASK (CPUID_24_0_EBX_AVX10_128 | \
1021 CPUID_24_0_EBX_AVX10_256 | \
1022 CPUID_24_0_EBX_AVX10_512)
1023
1024 /* RAS Features */
1025 #define CPUID_8000_0007_EBX_OVERFLOW_RECOV (1U << 0)
1026 #define CPUID_8000_0007_EBX_SUCCOR (1U << 1)
1027
1028 /* (Old) KVM paravirtualized clocksource */
1029 #define CPUID_KVM_CLOCK (1U << KVM_FEATURE_CLOCKSOURCE)
1030 /* (New) KVM specific paravirtualized clocksource */
1031 #define CPUID_KVM_CLOCK2 (1U << KVM_FEATURE_CLOCKSOURCE2)
1032 /* KVM asynchronous page fault */
1033 #define CPUID_KVM_ASYNCPF (1U << KVM_FEATURE_ASYNC_PF)
1034 /* KVM stolen (when guest vCPU is not running) time accounting */
1035 #define CPUID_KVM_STEAL_TIME (1U << KVM_FEATURE_STEAL_TIME)
1036 /* KVM paravirtualized end-of-interrupt signaling */
1037 #define CPUID_KVM_PV_EOI (1U << KVM_FEATURE_PV_EOI)
1038 /* KVM paravirtualized spinlocks support */
1039 #define CPUID_KVM_PV_UNHALT (1U << KVM_FEATURE_PV_UNHALT)
1040 /* KVM host-side polling on HLT control from the guest */
1041 #define CPUID_KVM_POLL_CONTROL (1U << KVM_FEATURE_POLL_CONTROL)
1042 /* KVM interrupt based asynchronous page fault*/
1043 #define CPUID_KVM_ASYNCPF_INT (1U << KVM_FEATURE_ASYNC_PF_INT)
1044 /* KVM 'Extended Destination ID' support for external interrupts */
1045 #define CPUID_KVM_MSI_EXT_DEST_ID (1U << KVM_FEATURE_MSI_EXT_DEST_ID)
1046
1047 /* Hint to KVM that vCPUs expect never preempted for an unlimited time */
1048 #define CPUID_KVM_HINTS_REALTIME (1U << KVM_HINTS_REALTIME)
1049
1050 /* CLZERO instruction */
1051 #define CPUID_8000_0008_EBX_CLZERO (1U << 0)
1052 /* Always save/restore FP error pointers */
1053 #define CPUID_8000_0008_EBX_XSAVEERPTR (1U << 2)
1054 /* Write back and do not invalidate cache */
1055 #define CPUID_8000_0008_EBX_WBNOINVD (1U << 9)
1056 /* Indirect Branch Prediction Barrier */
1057 #define CPUID_8000_0008_EBX_IBPB (1U << 12)
1058 /* Indirect Branch Restricted Speculation */
1059 #define CPUID_8000_0008_EBX_IBRS (1U << 14)
1060 /* Single Thread Indirect Branch Predictors */
1061 #define CPUID_8000_0008_EBX_STIBP (1U << 15)
1062 /* STIBP mode has enhanced performance and may be left always on */
1063 #define CPUID_8000_0008_EBX_STIBP_ALWAYS_ON (1U << 17)
1064 /* Speculative Store Bypass Disable */
1065 #define CPUID_8000_0008_EBX_AMD_SSBD (1U << 24)
1066 /* Paravirtualized Speculative Store Bypass Disable MSR */
1067 #define CPUID_8000_0008_EBX_VIRT_SSBD (1U << 25)
1068 /* Predictive Store Forwarding Disable */
1069 #define CPUID_8000_0008_EBX_AMD_PSFD (1U << 28)
1070
1071 /* Processor ignores nested data breakpoints */
1072 #define CPUID_8000_0021_EAX_NO_NESTED_DATA_BP (1U << 0)
1073 /* LFENCE is always serializing */
1074 #define CPUID_8000_0021_EAX_LFENCE_ALWAYS_SERIALIZING (1U << 2)
1075 /* Null Selector Clears Base */
1076 #define CPUID_8000_0021_EAX_NULL_SEL_CLR_BASE (1U << 6)
1077 /* Automatic IBRS */
1078 #define CPUID_8000_0021_EAX_AUTO_IBRS (1U << 8)
1079 /* Enhanced Return Address Predictor Scurity */
1080 #define CPUID_8000_0021_EAX_ERAPS (1U << 24)
1081 /* Selective Branch Predictor Barrier */
1082 #define CPUID_8000_0021_EAX_SBPB (1U << 27)
1083 /* IBPB includes branch type prediction flushing */
1084 #define CPUID_8000_0021_EAX_IBPB_BRTYPE (1U << 28)
1085 /* Not vulnerable to Speculative Return Stack Overflow */
1086 #define CPUID_8000_0021_EAX_SRSO_NO (1U << 29)
1087 /* Not vulnerable to SRSO at the user-kernel boundary */
1088 #define CPUID_8000_0021_EAX_SRSO_USER_KERNEL_NO (1U << 30)
1089
1090 /*
1091 * Return Address Predictor size. RapSize x 8 is the minimum number of
1092 * CALL instructions software needs to execute to flush the RAP.
1093 */
1094 #define CPUID_8000_0021_EBX_RAPSIZE (8U << 16)
1095
1096 /* Performance Monitoring Version 2 */
1097 #define CPUID_8000_0022_EAX_PERFMON_V2 (1U << 0)
1098
1099 #define CPUID_XSAVE_XSAVEOPT (1U << 0)
1100 #define CPUID_XSAVE_XSAVEC (1U << 1)
1101 #define CPUID_XSAVE_XGETBV1 (1U << 2)
1102 #define CPUID_XSAVE_XSAVES (1U << 3)
1103
1104 #define CPUID_6_EAX_ARAT (1U << 2)
1105
1106 /* CPUID[0x80000007].EDX flags: */
1107 #define CPUID_APM_INVTSC (1U << 8)
1108
1109 /* "rng" RNG present (xstore) */
1110 #define CPUID_C000_0001_EDX_XSTORE (1U << 2)
1111 /* "rng_en" RNG enabled */
1112 #define CPUID_C000_0001_EDX_XSTORE_EN (1U << 3)
1113 /* "ace" on-CPU crypto (xcrypt) */
1114 #define CPUID_C000_0001_EDX_XCRYPT (1U << 6)
1115 /* "ace_en" on-CPU crypto enabled */
1116 #define CPUID_C000_0001_EDX_XCRYPT_EN (1U << 7)
1117 /* Advanced Cryptography Engine v2 */
1118 #define CPUID_C000_0001_EDX_ACE2 (1U << 8)
1119 /* ACE v2 enabled */
1120 #define CPUID_C000_0001_EDX_ACE2_EN (1U << 9)
1121 /* PadLock Hash Engine */
1122 #define CPUID_C000_0001_EDX_PHE (1U << 10)
1123 /* PHE enabled */
1124 #define CPUID_C000_0001_EDX_PHE_EN (1U << 11)
1125 /* PadLock Montgomery Multiplier */
1126 #define CPUID_C000_0001_EDX_PMM (1U << 12)
1127 /* PMM enabled */
1128 #define CPUID_C000_0001_EDX_PMM_EN (1U << 13)
1129
1130 #define CPUID_VENDOR_SZ 12
1131
1132 #define CPUID_VENDOR_INTEL_1 0x756e6547 /* "Genu" */
1133 #define CPUID_VENDOR_INTEL_2 0x49656e69 /* "ineI" */
1134 #define CPUID_VENDOR_INTEL_3 0x6c65746e /* "ntel" */
1135 #define CPUID_VENDOR_INTEL "GenuineIntel"
1136
1137 #define CPUID_VENDOR_AMD_1 0x68747541 /* "Auth" */
1138 #define CPUID_VENDOR_AMD_2 0x69746e65 /* "enti" */
1139 #define CPUID_VENDOR_AMD_3 0x444d4163 /* "cAMD" */
1140 #define CPUID_VENDOR_AMD "AuthenticAMD"
1141
1142 #define CPUID_VENDOR_ZHAOXIN1_1 0x746E6543 /* "Cent" */
1143 #define CPUID_VENDOR_ZHAOXIN1_2 0x48727561 /* "aurH" */
1144 #define CPUID_VENDOR_ZHAOXIN1_3 0x736C7561 /* "auls" */
1145
1146 #define CPUID_VENDOR_ZHAOXIN2_1 0x68532020 /* " Sh" */
1147 #define CPUID_VENDOR_ZHAOXIN2_2 0x68676E61 /* "angh" */
1148 #define CPUID_VENDOR_ZHAOXIN2_3 0x20206961 /* "ai " */
1149
1150 #define CPUID_VENDOR_ZHAOXIN1 "CentaurHauls"
1151 #define CPUID_VENDOR_ZHAOXIN2 " Shanghai "
1152
1153 #define CPUID_VENDOR_HYGON "HygonGenuine"
1154
1155 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
1156 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
1157 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
1158 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
1159 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
1160 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
1161 #define IS_ZHAOXIN1_CPU(env) \
1162 ((env)->cpuid_vendor1 == CPUID_VENDOR_ZHAOXIN1_1 && \
1163 (env)->cpuid_vendor2 == CPUID_VENDOR_ZHAOXIN1_2 && \
1164 (env)->cpuid_vendor3 == CPUID_VENDOR_ZHAOXIN1_3)
1165 #define IS_ZHAOXIN2_CPU(env) \
1166 ((env)->cpuid_vendor1 == CPUID_VENDOR_ZHAOXIN2_1 && \
1167 (env)->cpuid_vendor2 == CPUID_VENDOR_ZHAOXIN2_2 && \
1168 (env)->cpuid_vendor3 == CPUID_VENDOR_ZHAOXIN2_3)
1169 #define IS_ZHAOXIN_CPU(env) (IS_ZHAOXIN1_CPU(env) || IS_ZHAOXIN2_CPU(env))
1170
1171 #define CPUID_MWAIT_IBE (1U << 1) /* Interrupts can exit capability */
1172 #define CPUID_MWAIT_EMX (1U << 0) /* enumeration supported */
1173
1174 /* CPUID[0xB].ECX level types */
1175 #define CPUID_B_ECX_TOPO_LEVEL_INVALID 0
1176 #define CPUID_B_ECX_TOPO_LEVEL_SMT 1
1177 #define CPUID_B_ECX_TOPO_LEVEL_CORE 2
1178
1179 /* COUID[0x1F].ECX level types */
1180 #define CPUID_1F_ECX_TOPO_LEVEL_INVALID CPUID_B_ECX_TOPO_LEVEL_INVALID
1181 #define CPUID_1F_ECX_TOPO_LEVEL_SMT CPUID_B_ECX_TOPO_LEVEL_SMT
1182 #define CPUID_1F_ECX_TOPO_LEVEL_CORE CPUID_B_ECX_TOPO_LEVEL_CORE
1183 #define CPUID_1F_ECX_TOPO_LEVEL_MODULE 3
1184 #define CPUID_1F_ECX_TOPO_LEVEL_DIE 5
1185
1186 /* MSR Feature Bits */
1187 #define MSR_ARCH_CAP_RDCL_NO (1U << 0)
1188 #define MSR_ARCH_CAP_IBRS_ALL (1U << 1)
1189 #define MSR_ARCH_CAP_RSBA (1U << 2)
1190 #define MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY (1U << 3)
1191 #define MSR_ARCH_CAP_SSB_NO (1U << 4)
1192 #define MSR_ARCH_CAP_MDS_NO (1U << 5)
1193 #define MSR_ARCH_CAP_PSCHANGE_MC_NO (1U << 6)
1194 #define MSR_ARCH_CAP_TSX_CTRL_MSR (1U << 7)
1195 #define MSR_ARCH_CAP_TAA_NO (1U << 8)
1196 #define MSR_ARCH_CAP_SBDR_SSDP_NO (1U << 13)
1197 #define MSR_ARCH_CAP_FBSDP_NO (1U << 14)
1198 #define MSR_ARCH_CAP_PSDP_NO (1U << 15)
1199 #define MSR_ARCH_CAP_FB_CLEAR (1U << 17)
1200 #define MSR_ARCH_CAP_BHI_NO (1U << 20)
1201 #define MSR_ARCH_CAP_PBRSB_NO (1U << 24)
1202 #define MSR_ARCH_CAP_GDS_NO (1U << 26)
1203 #define MSR_ARCH_CAP_RFDS_NO (1U << 27)
1204
1205 #define MSR_CORE_CAP_SPLIT_LOCK_DETECT (1U << 5)
1206
1207 /* VMX MSR features */
1208 #define MSR_VMX_BASIC_VMCS_REVISION_MASK 0x7FFFFFFFull
1209 #define MSR_VMX_BASIC_VMXON_REGION_SIZE_MASK (0x00001FFFull << 32)
1210 #define MSR_VMX_BASIC_VMCS_MEM_TYPE_MASK (0x003C0000ull << 32)
1211 #define MSR_VMX_BASIC_DUAL_MONITOR (1ULL << 49)
1212 #define MSR_VMX_BASIC_INS_OUTS (1ULL << 54)
1213 #define MSR_VMX_BASIC_TRUE_CTLS (1ULL << 55)
1214 #define MSR_VMX_BASIC_ANY_ERRCODE (1ULL << 56)
1215 #define MSR_VMX_BASIC_NESTED_EXCEPTION (1ULL << 58)
1216
1217 #define MSR_VMX_MISC_PREEMPTION_TIMER_SHIFT_MASK 0x1Full
1218 #define MSR_VMX_MISC_STORE_LMA (1ULL << 5)
1219 #define MSR_VMX_MISC_ACTIVITY_HLT (1ULL << 6)
1220 #define MSR_VMX_MISC_ACTIVITY_SHUTDOWN (1ULL << 7)
1221 #define MSR_VMX_MISC_ACTIVITY_WAIT_SIPI (1ULL << 8)
1222 #define MSR_VMX_MISC_MAX_MSR_LIST_SIZE_MASK 0x0E000000ull
1223 #define MSR_VMX_MISC_VMWRITE_VMEXIT (1ULL << 29)
1224 #define MSR_VMX_MISC_ZERO_LEN_INJECT (1ULL << 30)
1225
1226 #define MSR_VMX_EPT_EXECONLY (1ULL << 0)
1227 #define MSR_VMX_EPT_PAGE_WALK_LENGTH_4 (1ULL << 6)
1228 #define MSR_VMX_EPT_PAGE_WALK_LENGTH_5 (1ULL << 7)
1229 #define MSR_VMX_EPT_UC (1ULL << 8)
1230 #define MSR_VMX_EPT_WB (1ULL << 14)
1231 #define MSR_VMX_EPT_2MB (1ULL << 16)
1232 #define MSR_VMX_EPT_1GB (1ULL << 17)
1233 #define MSR_VMX_EPT_INVEPT (1ULL << 20)
1234 #define MSR_VMX_EPT_AD_BITS (1ULL << 21)
1235 #define MSR_VMX_EPT_ADVANCED_VMEXIT_INFO (1ULL << 22)
1236 #define MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT (1ULL << 25)
1237 #define MSR_VMX_EPT_INVEPT_ALL_CONTEXT (1ULL << 26)
1238 #define MSR_VMX_EPT_INVVPID (1ULL << 32)
1239 #define MSR_VMX_EPT_INVVPID_SINGLE_ADDR (1ULL << 40)
1240 #define MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT (1ULL << 41)
1241 #define MSR_VMX_EPT_INVVPID_ALL_CONTEXT (1ULL << 42)
1242 #define MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS (1ULL << 43)
1243
1244 #define MSR_VMX_VMFUNC_EPT_SWITCHING (1ULL << 0)
1245
1246
1247 /* VMX controls */
1248 #define VMX_CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004
1249 #define VMX_CPU_BASED_USE_TSC_OFFSETING 0x00000008
1250 #define VMX_CPU_BASED_HLT_EXITING 0x00000080
1251 #define VMX_CPU_BASED_INVLPG_EXITING 0x00000200
1252 #define VMX_CPU_BASED_MWAIT_EXITING 0x00000400
1253 #define VMX_CPU_BASED_RDPMC_EXITING 0x00000800
1254 #define VMX_CPU_BASED_RDTSC_EXITING 0x00001000
1255 #define VMX_CPU_BASED_CR3_LOAD_EXITING 0x00008000
1256 #define VMX_CPU_BASED_CR3_STORE_EXITING 0x00010000
1257 #define VMX_CPU_BASED_CR8_LOAD_EXITING 0x00080000
1258 #define VMX_CPU_BASED_CR8_STORE_EXITING 0x00100000
1259 #define VMX_CPU_BASED_TPR_SHADOW 0x00200000
1260 #define VMX_CPU_BASED_VIRTUAL_NMI_PENDING 0x00400000
1261 #define VMX_CPU_BASED_MOV_DR_EXITING 0x00800000
1262 #define VMX_CPU_BASED_UNCOND_IO_EXITING 0x01000000
1263 #define VMX_CPU_BASED_USE_IO_BITMAPS 0x02000000
1264 #define VMX_CPU_BASED_MONITOR_TRAP_FLAG 0x08000000
1265 #define VMX_CPU_BASED_USE_MSR_BITMAPS 0x10000000
1266 #define VMX_CPU_BASED_MONITOR_EXITING 0x20000000
1267 #define VMX_CPU_BASED_PAUSE_EXITING 0x40000000
1268 #define VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS 0x80000000
1269
1270 #define VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001
1271 #define VMX_SECONDARY_EXEC_ENABLE_EPT 0x00000002
1272 #define VMX_SECONDARY_EXEC_DESC 0x00000004
1273 #define VMX_SECONDARY_EXEC_RDTSCP 0x00000008
1274 #define VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE 0x00000010
1275 #define VMX_SECONDARY_EXEC_ENABLE_VPID 0x00000020
1276 #define VMX_SECONDARY_EXEC_WBINVD_EXITING 0x00000040
1277 #define VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080
1278 #define VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT 0x00000100
1279 #define VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY 0x00000200
1280 #define VMX_SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400
1281 #define VMX_SECONDARY_EXEC_RDRAND_EXITING 0x00000800
1282 #define VMX_SECONDARY_EXEC_ENABLE_INVPCID 0x00001000
1283 #define VMX_SECONDARY_EXEC_ENABLE_VMFUNC 0x00002000
1284 #define VMX_SECONDARY_EXEC_SHADOW_VMCS 0x00004000
1285 #define VMX_SECONDARY_EXEC_ENCLS_EXITING 0x00008000
1286 #define VMX_SECONDARY_EXEC_RDSEED_EXITING 0x00010000
1287 #define VMX_SECONDARY_EXEC_ENABLE_PML 0x00020000
1288 #define VMX_SECONDARY_EXEC_XSAVES 0x00100000
1289 #define VMX_SECONDARY_EXEC_TSC_SCALING 0x02000000
1290 #define VMX_SECONDARY_EXEC_ENABLE_USER_WAIT_PAUSE 0x04000000
1291
1292 #define VMX_PIN_BASED_EXT_INTR_MASK 0x00000001
1293 #define VMX_PIN_BASED_NMI_EXITING 0x00000008
1294 #define VMX_PIN_BASED_VIRTUAL_NMIS 0x00000020
1295 #define VMX_PIN_BASED_VMX_PREEMPTION_TIMER 0x00000040
1296 #define VMX_PIN_BASED_POSTED_INTR 0x00000080
1297
1298 #define VMX_VM_EXIT_SAVE_DEBUG_CONTROLS 0x00000004
1299 #define VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200
1300 #define VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL 0x00001000
1301 #define VMX_VM_EXIT_ACK_INTR_ON_EXIT 0x00008000
1302 #define VMX_VM_EXIT_SAVE_IA32_PAT 0x00040000
1303 #define VMX_VM_EXIT_LOAD_IA32_PAT 0x00080000
1304 #define VMX_VM_EXIT_SAVE_IA32_EFER 0x00100000
1305 #define VMX_VM_EXIT_LOAD_IA32_EFER 0x00200000
1306 #define VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER 0x00400000
1307 #define VMX_VM_EXIT_CLEAR_BNDCFGS 0x00800000
1308 #define VMX_VM_EXIT_PT_CONCEAL_PIP 0x01000000
1309 #define VMX_VM_EXIT_CLEAR_IA32_RTIT_CTL 0x02000000
1310 #define VMX_VM_EXIT_LOAD_IA32_PKRS 0x20000000
1311 #define VMX_VM_EXIT_ACTIVATE_SECONDARY_CONTROLS 0x80000000
1312
1313 #define VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000004
1314 #define VMX_VM_ENTRY_IA32E_MODE 0x00000200
1315 #define VMX_VM_ENTRY_SMM 0x00000400
1316 #define VMX_VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800
1317 #define VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL 0x00002000
1318 #define VMX_VM_ENTRY_LOAD_IA32_PAT 0x00004000
1319 #define VMX_VM_ENTRY_LOAD_IA32_EFER 0x00008000
1320 #define VMX_VM_ENTRY_LOAD_BNDCFGS 0x00010000
1321 #define VMX_VM_ENTRY_PT_CONCEAL_PIP 0x00020000
1322 #define VMX_VM_ENTRY_LOAD_IA32_RTIT_CTL 0x00040000
1323 #define VMX_VM_ENTRY_LOAD_IA32_PKRS 0x00400000
1324
1325 /* Supported Hyper-V Enlightenments */
1326 #define HYPERV_FEAT_RELAXED 0
1327 #define HYPERV_FEAT_VAPIC 1
1328 #define HYPERV_FEAT_TIME 2
1329 #define HYPERV_FEAT_CRASH 3
1330 #define HYPERV_FEAT_RESET 4
1331 #define HYPERV_FEAT_VPINDEX 5
1332 #define HYPERV_FEAT_RUNTIME 6
1333 #define HYPERV_FEAT_SYNIC 7
1334 #define HYPERV_FEAT_STIMER 8
1335 #define HYPERV_FEAT_FREQUENCIES 9
1336 #define HYPERV_FEAT_REENLIGHTENMENT 10
1337 #define HYPERV_FEAT_TLBFLUSH 11
1338 #define HYPERV_FEAT_EVMCS 12
1339 #define HYPERV_FEAT_IPI 13
1340 #define HYPERV_FEAT_STIMER_DIRECT 14
1341 #define HYPERV_FEAT_AVIC 15
1342 #define HYPERV_FEAT_SYNDBG 16
1343 #define HYPERV_FEAT_MSR_BITMAP 17
1344 #define HYPERV_FEAT_XMM_INPUT 18
1345 #define HYPERV_FEAT_TLBFLUSH_EXT 19
1346 #define HYPERV_FEAT_TLBFLUSH_DIRECT 20
1347
1348 #ifndef HYPERV_SPINLOCK_NEVER_NOTIFY
1349 #define HYPERV_SPINLOCK_NEVER_NOTIFY 0xFFFFFFFF
1350 #endif
1351
1352 #define EXCP00_DIVZ 0
1353 #define EXCP01_DB 1
1354 #define EXCP02_NMI 2
1355 #define EXCP03_INT3 3
1356 #define EXCP04_INTO 4
1357 #define EXCP05_BOUND 5
1358 #define EXCP06_ILLOP 6
1359 #define EXCP07_PREX 7
1360 #define EXCP08_DBLE 8
1361 #define EXCP09_XERR 9
1362 #define EXCP0A_TSS 10
1363 #define EXCP0B_NOSEG 11
1364 #define EXCP0C_STACK 12
1365 #define EXCP0D_GPF 13
1366 #define EXCP0E_PAGE 14
1367 #define EXCP10_COPR 16
1368 #define EXCP11_ALGN 17
1369 #define EXCP12_MCHK 18
1370
1371 #define EXCP_VMEXIT 0x100 /* only for system emulation */
1372 #define EXCP_SYSCALL 0x101 /* only for user emulation */
1373 #define EXCP_VSYSCALL 0x102 /* only for user emulation */
1374
1375 /* i386-specific interrupt pending bits. */
1376 #define CPU_INTERRUPT_POLL CPU_INTERRUPT_TGT_EXT_1
1377 #define CPU_INTERRUPT_SMI CPU_INTERRUPT_TGT_EXT_2
1378 #define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3
1379 #define CPU_INTERRUPT_MCE CPU_INTERRUPT_TGT_EXT_4
1380 #define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_INT_0
1381 #define CPU_INTERRUPT_SIPI CPU_INTERRUPT_TGT_INT_1
1382 #define CPU_INTERRUPT_TPR CPU_INTERRUPT_TGT_INT_2
1383
1384 /* Use a clearer name for this. */
1385 #define CPU_INTERRUPT_INIT CPU_INTERRUPT_RESET
1386
1387 #define CC_OP_HAS_EFLAGS(op) ((op) >= CC_OP_EFLAGS && (op) <= CC_OP_ADCOX)
1388
1389 /* Instead of computing the condition codes after each x86 instruction,
1390 * QEMU just stores one operand (called CC_SRC), the result
1391 * (called CC_DST) and the type of operation (called CC_OP). When the
1392 * condition codes are needed, the condition codes can be calculated
1393 * using this information. Condition codes are not generated if they
1394 * are only needed for conditional branches.
1395 */
1396 typedef enum {
1397 CC_OP_EFLAGS = 0, /* all cc are explicitly computed, CC_SRC = flags */
1398 CC_OP_ADCX = 1, /* CC_DST = C, CC_SRC = rest. */
1399 CC_OP_ADOX = 2, /* CC_SRC2 = O, CC_SRC = rest. */
1400 CC_OP_ADCOX = 3, /* CC_DST = C, CC_SRC2 = O, CC_SRC = rest. */
1401
1402 /* Low 2 bits = MemOp constant for the size */
1403 #define CC_OP_FIRST_BWLQ CC_OP_MULB
1404 CC_OP_MULB = 4, /* modify all flags, C, O = (CC_SRC != 0) */
1405 CC_OP_MULW,
1406 CC_OP_MULL,
1407 CC_OP_MULQ,
1408
1409 CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
1410 CC_OP_ADDW,
1411 CC_OP_ADDL,
1412 CC_OP_ADDQ,
1413
1414 CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
1415 CC_OP_ADCW,
1416 CC_OP_ADCL,
1417 CC_OP_ADCQ,
1418
1419 CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
1420 CC_OP_SUBW,
1421 CC_OP_SUBL,
1422 CC_OP_SUBQ,
1423
1424 CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
1425 CC_OP_SBBW,
1426 CC_OP_SBBL,
1427 CC_OP_SBBQ,
1428
1429 CC_OP_LOGICB, /* modify all flags, CC_DST = res */
1430 CC_OP_LOGICW,
1431 CC_OP_LOGICL,
1432 CC_OP_LOGICQ,
1433
1434 CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */
1435 CC_OP_INCW,
1436 CC_OP_INCL,
1437 CC_OP_INCQ,
1438
1439 CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C */
1440 CC_OP_DECW,
1441 CC_OP_DECL,
1442 CC_OP_DECQ,
1443
1444 CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.msb = C */
1445 CC_OP_SHLW,
1446 CC_OP_SHLL,
1447 CC_OP_SHLQ,
1448
1449 CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
1450 CC_OP_SARW,
1451 CC_OP_SARL,
1452 CC_OP_SARQ,
1453
1454 CC_OP_BMILGB, /* Z,S via CC_DST, C = SRC==0; O=0; P,A undefined */
1455 CC_OP_BMILGW,
1456 CC_OP_BMILGL,
1457 CC_OP_BMILGQ,
1458
1459 CC_OP_BLSIB, /* Z,S via CC_DST, C = SRC!=0; O=0; P,A undefined */
1460 CC_OP_BLSIW,
1461 CC_OP_BLSIL,
1462 CC_OP_BLSIQ,
1463
1464 /*
1465 * Note that only CC_OP_POPCNT (i.e. the one with MO_TL size)
1466 * is used or implemented, because the translation needs
1467 * to zero-extend CC_DST anyway.
1468 */
1469 CC_OP_POPCNTB__, /* Z via CC_DST, all other flags clear. */
1470 CC_OP_POPCNTW__,
1471 CC_OP_POPCNTL__,
1472 CC_OP_POPCNTQ__,
1473 CC_OP_POPCNT = sizeof(target_ulong) == 8 ? CC_OP_POPCNTQ__ : CC_OP_POPCNTL__,
1474 #define CC_OP_LAST_BWLQ CC_OP_POPCNTQ__
1475
1476 CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
1477 } CCOp;
1478
1479 /* See X86DecodedInsn.cc_op, using int8_t. */
1480 QEMU_BUILD_BUG_ON(CC_OP_DYNAMIC > INT8_MAX);
1481
cc_op_size(CCOp op)1482 static inline MemOp cc_op_size(CCOp op)
1483 {
1484 MemOp size = op & 3;
1485
1486 QEMU_BUILD_BUG_ON(CC_OP_FIRST_BWLQ & 3);
1487 assert(op >= CC_OP_FIRST_BWLQ && op <= CC_OP_LAST_BWLQ);
1488 assert(size <= MO_TL);
1489
1490 return size;
1491 }
1492
1493 typedef struct SegmentCache {
1494 uint32_t selector;
1495 target_ulong base;
1496 uint32_t limit;
1497 uint32_t flags;
1498 } SegmentCache;
1499
1500 typedef union MMXReg {
1501 uint8_t _b_MMXReg[64 / 8];
1502 uint16_t _w_MMXReg[64 / 16];
1503 uint32_t _l_MMXReg[64 / 32];
1504 uint64_t _q_MMXReg[64 / 64];
1505 float32 _s_MMXReg[64 / 32];
1506 float64 _d_MMXReg[64 / 64];
1507 } MMXReg;
1508
1509 typedef union XMMReg {
1510 uint64_t _q_XMMReg[128 / 64];
1511 } XMMReg;
1512
1513 typedef union YMMReg {
1514 uint64_t _q_YMMReg[256 / 64];
1515 XMMReg _x_YMMReg[256 / 128];
1516 } YMMReg;
1517
1518 typedef union ZMMReg {
1519 uint8_t _b_ZMMReg[512 / 8];
1520 uint16_t _w_ZMMReg[512 / 16];
1521 uint32_t _l_ZMMReg[512 / 32];
1522 uint64_t _q_ZMMReg[512 / 64];
1523 float16 _h_ZMMReg[512 / 16];
1524 float32 _s_ZMMReg[512 / 32];
1525 float64 _d_ZMMReg[512 / 64];
1526 XMMReg _x_ZMMReg[512 / 128];
1527 YMMReg _y_ZMMReg[512 / 256];
1528 } ZMMReg;
1529
1530 typedef struct BNDReg {
1531 uint64_t lb;
1532 uint64_t ub;
1533 } BNDReg;
1534
1535 typedef struct BNDCSReg {
1536 uint64_t cfgu;
1537 uint64_t sts;
1538 } BNDCSReg;
1539
1540 #define BNDCFG_ENABLE 1ULL
1541 #define BNDCFG_BNDPRESERVE 2ULL
1542 #define BNDCFG_BDIR_MASK TARGET_PAGE_MASK
1543
1544 #if HOST_BIG_ENDIAN
1545 #define ZMM_B(n) _b_ZMMReg[63 - (n)]
1546 #define ZMM_W(n) _w_ZMMReg[31 - (n)]
1547 #define ZMM_L(n) _l_ZMMReg[15 - (n)]
1548 #define ZMM_H(n) _h_ZMMReg[31 - (n)]
1549 #define ZMM_S(n) _s_ZMMReg[15 - (n)]
1550 #define ZMM_Q(n) _q_ZMMReg[7 - (n)]
1551 #define ZMM_D(n) _d_ZMMReg[7 - (n)]
1552 #define ZMM_X(n) _x_ZMMReg[3 - (n)]
1553 #define ZMM_Y(n) _y_ZMMReg[1 - (n)]
1554
1555 #define XMM_Q(n) _q_XMMReg[1 - (n)]
1556
1557 #define YMM_Q(n) _q_YMMReg[3 - (n)]
1558 #define YMM_X(n) _x_YMMReg[1 - (n)]
1559
1560 #define MMX_B(n) _b_MMXReg[7 - (n)]
1561 #define MMX_W(n) _w_MMXReg[3 - (n)]
1562 #define MMX_L(n) _l_MMXReg[1 - (n)]
1563 #define MMX_S(n) _s_MMXReg[1 - (n)]
1564 #else
1565 #define ZMM_B(n) _b_ZMMReg[n]
1566 #define ZMM_W(n) _w_ZMMReg[n]
1567 #define ZMM_L(n) _l_ZMMReg[n]
1568 #define ZMM_H(n) _h_ZMMReg[n]
1569 #define ZMM_S(n) _s_ZMMReg[n]
1570 #define ZMM_Q(n) _q_ZMMReg[n]
1571 #define ZMM_D(n) _d_ZMMReg[n]
1572 #define ZMM_X(n) _x_ZMMReg[n]
1573 #define ZMM_Y(n) _y_ZMMReg[n]
1574
1575 #define XMM_Q(n) _q_XMMReg[n]
1576
1577 #define YMM_Q(n) _q_YMMReg[n]
1578 #define YMM_X(n) _x_YMMReg[n]
1579
1580 #define MMX_B(n) _b_MMXReg[n]
1581 #define MMX_W(n) _w_MMXReg[n]
1582 #define MMX_L(n) _l_MMXReg[n]
1583 #define MMX_S(n) _s_MMXReg[n]
1584 #endif
1585 #define MMX_Q(n) _q_MMXReg[n]
1586
1587 typedef union {
1588 floatx80 d __attribute__((aligned(16)));
1589 MMXReg mmx;
1590 } FPReg;
1591
1592 typedef struct {
1593 uint64_t base;
1594 uint64_t mask;
1595 } MTRRVar;
1596
1597 #define CPU_NB_REGS64 16
1598 #define CPU_NB_REGS32 8
1599
1600 #ifdef TARGET_X86_64
1601 #define CPU_NB_REGS CPU_NB_REGS64
1602 #else
1603 #define CPU_NB_REGS CPU_NB_REGS32
1604 #endif
1605
1606 #define MAX_FIXED_COUNTERS 3
1607 #define MAX_GP_COUNTERS (MSR_IA32_PERF_STATUS - MSR_P6_EVNTSEL0)
1608
1609 #define NB_OPMASK_REGS 8
1610
1611 /* CPU can't have 0xFFFFFFFF APIC ID, use that value to distinguish
1612 * that APIC ID hasn't been set yet
1613 */
1614 #define UNASSIGNED_APIC_ID 0xFFFFFFFF
1615
1616 typedef struct X86LegacyXSaveArea {
1617 uint16_t fcw;
1618 uint16_t fsw;
1619 uint8_t ftw;
1620 uint8_t reserved;
1621 uint16_t fpop;
1622 union {
1623 struct {
1624 uint64_t fpip;
1625 uint64_t fpdp;
1626 };
1627 struct {
1628 uint32_t fip;
1629 uint32_t fcs;
1630 uint32_t foo;
1631 uint32_t fos;
1632 };
1633 };
1634 uint32_t mxcsr;
1635 uint32_t mxcsr_mask;
1636 FPReg fpregs[8];
1637 uint8_t xmm_regs[16][16];
1638 uint32_t hw_reserved[12];
1639 uint32_t sw_reserved[12];
1640 } X86LegacyXSaveArea;
1641
1642 QEMU_BUILD_BUG_ON(sizeof(X86LegacyXSaveArea) != 512);
1643
1644 typedef struct X86XSaveHeader {
1645 uint64_t xstate_bv;
1646 uint64_t xcomp_bv;
1647 uint64_t reserve0;
1648 uint8_t reserved[40];
1649 } X86XSaveHeader;
1650
1651 /* Ext. save area 2: AVX State */
1652 typedef struct XSaveAVX {
1653 uint8_t ymmh[16][16];
1654 } XSaveAVX;
1655
1656 /* Ext. save area 3: BNDREG */
1657 typedef struct XSaveBNDREG {
1658 BNDReg bnd_regs[4];
1659 } XSaveBNDREG;
1660
1661 /* Ext. save area 4: BNDCSR */
1662 typedef union XSaveBNDCSR {
1663 BNDCSReg bndcsr;
1664 uint8_t data[64];
1665 } XSaveBNDCSR;
1666
1667 /* Ext. save area 5: Opmask */
1668 typedef struct XSaveOpmask {
1669 uint64_t opmask_regs[NB_OPMASK_REGS];
1670 } XSaveOpmask;
1671
1672 /* Ext. save area 6: ZMM_Hi256 */
1673 typedef struct XSaveZMM_Hi256 {
1674 uint8_t zmm_hi256[16][32];
1675 } XSaveZMM_Hi256;
1676
1677 /* Ext. save area 7: Hi16_ZMM */
1678 typedef struct XSaveHi16_ZMM {
1679 uint8_t hi16_zmm[16][64];
1680 } XSaveHi16_ZMM;
1681
1682 /* Ext. save area 9: PKRU state */
1683 typedef struct XSavePKRU {
1684 uint32_t pkru;
1685 uint32_t padding;
1686 } XSavePKRU;
1687
1688 /* Ext. save area 17: AMX XTILECFG state */
1689 typedef struct XSaveXTILECFG {
1690 uint8_t xtilecfg[64];
1691 } XSaveXTILECFG;
1692
1693 /* Ext. save area 18: AMX XTILEDATA state */
1694 typedef struct XSaveXTILEDATA {
1695 uint8_t xtiledata[8][1024];
1696 } XSaveXTILEDATA;
1697
1698 typedef struct {
1699 uint64_t from;
1700 uint64_t to;
1701 uint64_t info;
1702 } LBREntry;
1703
1704 #define ARCH_LBR_NR_ENTRIES 32
1705
1706 /* Ext. save area 19: Supervisor mode Arch LBR state */
1707 typedef struct XSavesArchLBR {
1708 uint64_t lbr_ctl;
1709 uint64_t lbr_depth;
1710 uint64_t ler_from;
1711 uint64_t ler_to;
1712 uint64_t ler_info;
1713 LBREntry lbr_records[ARCH_LBR_NR_ENTRIES];
1714 } XSavesArchLBR;
1715
1716 QEMU_BUILD_BUG_ON(sizeof(XSaveAVX) != 0x100);
1717 QEMU_BUILD_BUG_ON(sizeof(XSaveBNDREG) != 0x40);
1718 QEMU_BUILD_BUG_ON(sizeof(XSaveBNDCSR) != 0x40);
1719 QEMU_BUILD_BUG_ON(sizeof(XSaveOpmask) != 0x40);
1720 QEMU_BUILD_BUG_ON(sizeof(XSaveZMM_Hi256) != 0x200);
1721 QEMU_BUILD_BUG_ON(sizeof(XSaveHi16_ZMM) != 0x400);
1722 QEMU_BUILD_BUG_ON(sizeof(XSavePKRU) != 0x8);
1723 QEMU_BUILD_BUG_ON(sizeof(XSaveXTILECFG) != 0x40);
1724 QEMU_BUILD_BUG_ON(sizeof(XSaveXTILEDATA) != 0x2000);
1725 QEMU_BUILD_BUG_ON(sizeof(XSavesArchLBR) != 0x328);
1726
1727 typedef struct ExtSaveArea {
1728 uint32_t feature, bits;
1729 uint32_t offset, size;
1730 uint32_t ecx;
1731 } ExtSaveArea;
1732
1733 #define XSAVE_STATE_AREA_COUNT (XSTATE_XTILE_DATA_BIT + 1)
1734
1735 extern ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT];
1736
1737 typedef enum TPRAccess {
1738 TPR_ACCESS_READ,
1739 TPR_ACCESS_WRITE,
1740 } TPRAccess;
1741
1742 /* Cache information data structures: */
1743
1744 enum CacheType {
1745 DATA_CACHE,
1746 INSTRUCTION_CACHE,
1747 UNIFIED_CACHE
1748 };
1749
1750 typedef struct CPUCacheInfo {
1751 enum CacheType type;
1752 uint8_t level;
1753 /* Size in bytes */
1754 uint32_t size;
1755 /* Line size, in bytes */
1756 uint16_t line_size;
1757 /*
1758 * Associativity.
1759 * Note: representation of fully-associative caches is not implemented
1760 */
1761 uint8_t associativity;
1762 /* Physical line partitions. CPUID[0x8000001D].EBX, CPUID[4].EBX */
1763 uint8_t partitions;
1764 /* Number of sets. CPUID[0x8000001D].ECX, CPUID[4].ECX */
1765 uint32_t sets;
1766 /*
1767 * Lines per tag.
1768 * AMD-specific: CPUID[0x80000005], CPUID[0x80000006].
1769 * (Is this synonym to @partitions?)
1770 */
1771 uint8_t lines_per_tag;
1772
1773 /* Self-initializing cache */
1774 bool self_init;
1775 /*
1776 * WBINVD/INVD is not guaranteed to act upon lower level caches of
1777 * non-originating threads sharing this cache.
1778 * CPUID[4].EDX[bit 0], CPUID[0x8000001D].EDX[bit 0]
1779 */
1780 bool no_invd_sharing;
1781 /*
1782 * Cache is inclusive of lower cache levels.
1783 * CPUID[4].EDX[bit 1], CPUID[0x8000001D].EDX[bit 1].
1784 */
1785 bool inclusive;
1786 /*
1787 * A complex function is used to index the cache, potentially using all
1788 * address bits. CPUID[4].EDX[bit 2].
1789 */
1790 bool complex_indexing;
1791
1792 /*
1793 * Cache Topology. The level that cache is shared in.
1794 * Used to encode CPUID[4].EAX[bits 25:14] or
1795 * CPUID[0x8000001D].EAX[bits 25:14].
1796 */
1797 CpuTopologyLevel share_level;
1798 } CPUCacheInfo;
1799
1800
1801 typedef struct CPUCaches {
1802 CPUCacheInfo *l1d_cache;
1803 CPUCacheInfo *l1i_cache;
1804 CPUCacheInfo *l2_cache;
1805 CPUCacheInfo *l3_cache;
1806 } CPUCaches;
1807
1808 typedef struct X86LazyFlags {
1809 target_ulong result;
1810 target_ulong auxbits;
1811 } X86LazyFlags;
1812
1813 typedef struct CPUArchState {
1814 /* standard registers */
1815 target_ulong regs[CPU_NB_REGS];
1816 target_ulong eip;
1817 target_ulong eflags; /* eflags register. During CPU emulation, CC
1818 flags and DF are set to zero because they are
1819 stored elsewhere */
1820
1821 /* emulator internal eflags handling */
1822 target_ulong cc_dst;
1823 target_ulong cc_src;
1824 target_ulong cc_src2;
1825 uint32_t cc_op;
1826 int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */
1827 uint32_t hflags; /* TB flags, see HF_xxx constants. These flags
1828 are known at translation time. */
1829 uint32_t hflags2; /* various other flags, see HF2_xxx constants. */
1830
1831 /* segments */
1832 SegmentCache segs[6]; /* selector values */
1833 SegmentCache ldt;
1834 SegmentCache tr;
1835 SegmentCache gdt; /* only base and limit are used */
1836 SegmentCache idt; /* only base and limit are used */
1837
1838 target_ulong cr[5]; /* NOTE: cr1 is unused */
1839
1840 bool pdptrs_valid;
1841 uint64_t pdptrs[4];
1842 int32_t a20_mask;
1843
1844 BNDReg bnd_regs[4];
1845 BNDCSReg bndcs_regs;
1846 uint64_t msr_bndcfgs;
1847 uint64_t efer;
1848
1849 /* Beginning of state preserved by INIT (dummy marker). */
1850 struct {} start_init_save;
1851
1852 /* FPU state */
1853 unsigned int fpstt; /* top of stack index */
1854 uint16_t fpus;
1855 uint16_t fpuc;
1856 uint8_t fptags[8]; /* 0 = valid, 1 = empty */
1857 FPReg fpregs[8];
1858 /* KVM-only so far */
1859 uint16_t fpop;
1860 uint16_t fpcs;
1861 uint16_t fpds;
1862 uint64_t fpip;
1863 uint64_t fpdp;
1864
1865 /* emulator internal variables */
1866 float_status fp_status;
1867 floatx80 ft0;
1868
1869 float_status mmx_status; /* for 3DNow! float ops */
1870 float_status sse_status;
1871 uint32_t mxcsr;
1872 ZMMReg xmm_regs[CPU_NB_REGS == 8 ? 8 : 32] QEMU_ALIGNED(16);
1873 ZMMReg xmm_t0 QEMU_ALIGNED(16);
1874 MMXReg mmx_t0;
1875
1876 uint64_t opmask_regs[NB_OPMASK_REGS];
1877 #ifdef TARGET_X86_64
1878 uint8_t xtilecfg[64];
1879 uint8_t xtiledata[8192];
1880 #endif
1881
1882 /* sysenter registers */
1883 uint32_t sysenter_cs;
1884 target_ulong sysenter_esp;
1885 target_ulong sysenter_eip;
1886 uint64_t star;
1887
1888 uint64_t vm_hsave;
1889
1890 #ifdef TARGET_X86_64
1891 target_ulong lstar;
1892 target_ulong cstar;
1893 target_ulong fmask;
1894 target_ulong kernelgsbase;
1895
1896 /* FRED MSRs */
1897 uint64_t fred_rsp0;
1898 uint64_t fred_rsp1;
1899 uint64_t fred_rsp2;
1900 uint64_t fred_rsp3;
1901 uint64_t fred_stklvls;
1902 uint64_t fred_ssp1;
1903 uint64_t fred_ssp2;
1904 uint64_t fred_ssp3;
1905 uint64_t fred_config;
1906 #endif
1907
1908 uint64_t tsc_adjust;
1909 uint64_t tsc_deadline;
1910 uint64_t tsc_aux;
1911
1912 uint64_t xcr0;
1913
1914 uint64_t mcg_status;
1915 uint64_t msr_ia32_misc_enable;
1916 uint64_t msr_ia32_feature_control;
1917 uint64_t msr_ia32_sgxlepubkeyhash[4];
1918
1919 uint64_t msr_fixed_ctr_ctrl;
1920 uint64_t msr_global_ctrl;
1921 uint64_t msr_global_status;
1922 uint64_t msr_global_ovf_ctrl;
1923 uint64_t msr_fixed_counters[MAX_FIXED_COUNTERS];
1924 uint64_t msr_gp_counters[MAX_GP_COUNTERS];
1925 uint64_t msr_gp_evtsel[MAX_GP_COUNTERS];
1926
1927 uint64_t pat;
1928 uint32_t smbase;
1929 uint64_t msr_smi_count;
1930
1931 uint32_t pkru;
1932 uint32_t pkrs;
1933 uint32_t tsx_ctrl;
1934
1935 uint64_t spec_ctrl;
1936 uint64_t amd_tsc_scale_msr;
1937 uint64_t virt_ssbd;
1938
1939 /* End of state preserved by INIT (dummy marker). */
1940 struct {} end_init_save;
1941
1942 uint64_t system_time_msr;
1943 uint64_t wall_clock_msr;
1944 uint64_t steal_time_msr;
1945 uint64_t async_pf_en_msr;
1946 uint64_t async_pf_int_msr;
1947 uint64_t pv_eoi_en_msr;
1948 uint64_t poll_control_msr;
1949
1950 /* Partition-wide HV MSRs, will be updated only on the first vcpu */
1951 uint64_t msr_hv_hypercall;
1952 uint64_t msr_hv_guest_os_id;
1953 uint64_t msr_hv_tsc;
1954 uint64_t msr_hv_syndbg_control;
1955 uint64_t msr_hv_syndbg_status;
1956 uint64_t msr_hv_syndbg_send_page;
1957 uint64_t msr_hv_syndbg_recv_page;
1958 uint64_t msr_hv_syndbg_pending_page;
1959 uint64_t msr_hv_syndbg_options;
1960
1961 /* Per-VCPU HV MSRs */
1962 uint64_t msr_hv_vapic;
1963 uint64_t msr_hv_crash_params[HV_CRASH_PARAMS];
1964 uint64_t msr_hv_runtime;
1965 uint64_t msr_hv_synic_control;
1966 uint64_t msr_hv_synic_evt_page;
1967 uint64_t msr_hv_synic_msg_page;
1968 uint64_t msr_hv_synic_sint[HV_SINT_COUNT];
1969 uint64_t msr_hv_stimer_config[HV_STIMER_COUNT];
1970 uint64_t msr_hv_stimer_count[HV_STIMER_COUNT];
1971 uint64_t msr_hv_reenlightenment_control;
1972 uint64_t msr_hv_tsc_emulation_control;
1973 uint64_t msr_hv_tsc_emulation_status;
1974
1975 uint64_t msr_rtit_ctrl;
1976 uint64_t msr_rtit_status;
1977 uint64_t msr_rtit_output_base;
1978 uint64_t msr_rtit_output_mask;
1979 uint64_t msr_rtit_cr3_match;
1980 uint64_t msr_rtit_addrs[MAX_RTIT_ADDRS];
1981
1982 /* Per-VCPU XFD MSRs */
1983 uint64_t msr_xfd;
1984 uint64_t msr_xfd_err;
1985
1986 /* Per-VCPU Arch LBR MSRs */
1987 uint64_t msr_lbr_ctl;
1988 uint64_t msr_lbr_depth;
1989 LBREntry lbr_records[ARCH_LBR_NR_ENTRIES];
1990
1991 /* AMD MSRC001_0015 Hardware Configuration */
1992 uint64_t msr_hwcr;
1993
1994 /* exception/interrupt handling */
1995 int error_code;
1996 int exception_is_int;
1997 target_ulong exception_next_eip;
1998 target_ulong dr[8]; /* debug registers; note dr4 and dr5 are unused */
1999 union {
2000 struct CPUBreakpoint *cpu_breakpoint[4];
2001 struct CPUWatchpoint *cpu_watchpoint[4];
2002 }; /* break/watchpoints for dr[0..3] */
2003 int old_exception; /* exception in flight */
2004
2005 uint64_t vm_vmcb;
2006 uint64_t tsc_offset;
2007 uint64_t intercept;
2008 uint16_t intercept_cr_read;
2009 uint16_t intercept_cr_write;
2010 uint16_t intercept_dr_read;
2011 uint16_t intercept_dr_write;
2012 uint32_t intercept_exceptions;
2013 uint64_t nested_cr3;
2014 uint32_t nested_pg_mode;
2015 uint8_t v_tpr;
2016 uint32_t int_ctl;
2017
2018 /* KVM states, automatically cleared on reset */
2019 uint8_t nmi_injected;
2020 uint8_t nmi_pending;
2021
2022 uintptr_t retaddr;
2023
2024 /* RAPL MSR */
2025 uint64_t msr_rapl_power_unit;
2026 uint64_t msr_pkg_energy_status;
2027
2028 /* Fields up to this point are cleared by a CPU reset */
2029 struct {} end_reset_fields;
2030
2031 /* Fields after this point are preserved across CPU reset. */
2032
2033 /* processor features (e.g. for CPUID insn) */
2034 /* Minimum cpuid leaf 7 value */
2035 uint32_t cpuid_level_func7;
2036 /* Actual cpuid leaf 7 value */
2037 uint32_t cpuid_min_level_func7;
2038 /* Minimum level/xlevel/xlevel2, based on CPU model + features */
2039 uint32_t cpuid_min_level, cpuid_min_xlevel, cpuid_min_xlevel2;
2040 /* Maximum level/xlevel/xlevel2 value for auto-assignment: */
2041 uint32_t cpuid_max_level, cpuid_max_xlevel, cpuid_max_xlevel2;
2042 /* Actual level/xlevel/xlevel2 value: */
2043 uint32_t cpuid_level, cpuid_xlevel, cpuid_xlevel2;
2044 uint32_t cpuid_vendor1;
2045 uint32_t cpuid_vendor2;
2046 uint32_t cpuid_vendor3;
2047 uint32_t cpuid_version;
2048 FeatureWordArray features;
2049 /* AVX10 version */
2050 uint8_t avx10_version;
2051 /* Features that were explicitly enabled/disabled */
2052 FeatureWordArray user_features;
2053 uint32_t cpuid_model[12];
2054 /* Cache information for CPUID. When legacy-cache=on, the cache data
2055 * on each CPUID leaf will be different, because we keep compatibility
2056 * with old QEMU versions.
2057 */
2058 CPUCaches cache_info_cpuid2, cache_info_cpuid4, cache_info_amd;
2059
2060 /* MTRRs */
2061 uint64_t mtrr_fixed[11];
2062 uint64_t mtrr_deftype;
2063 MTRRVar mtrr_var[MSR_MTRRcap_VCNT];
2064
2065 /* For KVM */
2066 uint32_t mp_state;
2067 int32_t exception_nr;
2068 int32_t interrupt_injected;
2069 uint8_t soft_interrupt;
2070 uint8_t exception_pending;
2071 uint8_t exception_injected;
2072 uint8_t has_error_code;
2073 uint8_t exception_has_payload;
2074 uint64_t exception_payload;
2075 uint8_t triple_fault_pending;
2076 uint32_t ins_len;
2077 uint32_t sipi_vector;
2078 bool tsc_valid;
2079 int64_t tsc_khz;
2080 int64_t user_tsc_khz; /* for sanity check only */
2081 uint64_t apic_bus_freq;
2082 uint64_t tsc;
2083 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
2084 void *xsave_buf;
2085 uint32_t xsave_buf_len;
2086 #endif
2087 #if defined(CONFIG_KVM)
2088 struct kvm_nested_state *nested_state;
2089 MemoryRegion *xen_vcpu_info_mr;
2090 void *xen_vcpu_info_hva;
2091 uint64_t xen_vcpu_info_gpa;
2092 uint64_t xen_vcpu_info_default_gpa;
2093 uint64_t xen_vcpu_time_info_gpa;
2094 uint64_t xen_vcpu_runstate_gpa;
2095 uint8_t xen_vcpu_callback_vector;
2096 bool xen_callback_asserted;
2097 uint16_t xen_virq[XEN_NR_VIRQS];
2098 uint64_t xen_singleshot_timer_ns;
2099 QEMUTimer *xen_singleshot_timer;
2100 uint64_t xen_periodic_timer_period;
2101 QEMUTimer *xen_periodic_timer;
2102 QemuMutex xen_timers_lock;
2103 #endif
2104 #if defined(CONFIG_HVF)
2105 X86LazyFlags lflags;
2106 void *emu_mmio_buf;
2107 #endif
2108
2109 uint64_t mcg_cap;
2110 uint64_t mcg_ctl;
2111 uint64_t mcg_ext_ctl;
2112 uint64_t mce_banks[MCE_BANKS_DEF*4];
2113 uint64_t xstate_bv;
2114
2115 /* vmstate */
2116 uint16_t fpus_vmstate;
2117 uint16_t fptag_vmstate;
2118 uint16_t fpregs_format_vmstate;
2119
2120 uint64_t xss;
2121 uint32_t umwait;
2122
2123 TPRAccess tpr_access_type;
2124
2125 X86CPUTopoInfo topo_info;
2126
2127 /* Bitmap of available CPU topology levels for this CPU. */
2128 DECLARE_BITMAP(avail_cpu_topo, CPU_TOPOLOGY_LEVEL__MAX);
2129 } CPUX86State;
2130
2131 struct kvm_msrs;
2132
2133 /**
2134 * X86CPU:
2135 * @env: #CPUX86State
2136 * @migratable: If set, only migratable flags will be accepted when "enforce"
2137 * mode is used, and only migratable flags will be included in the "host"
2138 * CPU model.
2139 *
2140 * An x86 CPU.
2141 */
2142 struct ArchCPU {
2143 CPUState parent_obj;
2144
2145 CPUX86State env;
2146 VMChangeStateEntry *vmsentry;
2147
2148 uint64_t ucode_rev;
2149
2150 uint32_t hyperv_spinlock_attempts;
2151 char *hyperv_vendor;
2152 bool hyperv_synic_kvm_only;
2153 uint64_t hyperv_features;
2154 bool hyperv_passthrough;
2155 OnOffAuto hyperv_no_nonarch_cs;
2156 uint32_t hyperv_vendor_id[3];
2157 uint32_t hyperv_interface_id[4];
2158 uint32_t hyperv_limits[3];
2159 bool hyperv_enforce_cpuid;
2160 uint32_t hyperv_ver_id_build;
2161 uint16_t hyperv_ver_id_major;
2162 uint16_t hyperv_ver_id_minor;
2163 uint32_t hyperv_ver_id_sp;
2164 uint8_t hyperv_ver_id_sb;
2165 uint32_t hyperv_ver_id_sn;
2166
2167 bool check_cpuid;
2168 bool enforce_cpuid;
2169 /*
2170 * Force features to be enabled even if the host doesn't support them.
2171 * This is dangerous and should be done only for testing CPUID
2172 * compatibility.
2173 */
2174 bool force_features;
2175 bool expose_kvm;
2176 bool expose_tcg;
2177 bool migratable;
2178 bool migrate_smi_count;
2179 bool max_features; /* Enable all supported features automatically */
2180 uint32_t apic_id;
2181
2182 /* Enables publishing of TSC increment and Local APIC bus frequencies to
2183 * the guest OS in CPUID page 0x40000010, the same way that VMWare does. */
2184 bool vmware_cpuid_freq;
2185
2186 /* if true the CPUID code directly forward host cache leaves to the guest */
2187 bool cache_info_passthrough;
2188
2189 /* if true the CPUID code directly forwards
2190 * host monitor/mwait leaves to the guest */
2191 struct {
2192 uint32_t eax;
2193 uint32_t ebx;
2194 uint32_t ecx;
2195 uint32_t edx;
2196 } mwait;
2197
2198 /* Features that were filtered out because of missing host capabilities */
2199 FeatureWordArray filtered_features;
2200
2201 /* Enable PMU CPUID bits. This can't be enabled by default yet because
2202 * it doesn't have ABI stability guarantees, as it passes all PMU CPUID
2203 * bits returned by GET_SUPPORTED_CPUID (that depend on host CPU and kernel
2204 * capabilities) directly to the guest.
2205 */
2206 bool enable_pmu;
2207
2208 /*
2209 * Enable LBR_FMT bits of IA32_PERF_CAPABILITIES MSR.
2210 * This can't be initialized with a default because it doesn't have
2211 * stable ABI support yet. It is only allowed to pass all LBR_FMT bits
2212 * returned by kvm_arch_get_supported_msr_feature()(which depends on both
2213 * host CPU and kernel capabilities) to the guest.
2214 */
2215 uint64_t lbr_fmt;
2216
2217 /* LMCE support can be enabled/disabled via cpu option 'lmce=on/off'. It is
2218 * disabled by default to avoid breaking migration between QEMU with
2219 * different LMCE configurations.
2220 */
2221 bool enable_lmce;
2222
2223 /* Compatibility bits for old machine types.
2224 * If true present virtual l3 cache for VM, the vcpus in the same virtual
2225 * socket share an virtual l3 cache.
2226 */
2227 bool enable_l3_cache;
2228
2229 /* Compatibility bits for old machine types.
2230 * If true present L1 cache as per-thread, not per-core.
2231 */
2232 bool l1_cache_per_core;
2233
2234 /* Compatibility bits for old machine types.
2235 * If true present the old cache topology information
2236 */
2237 bool legacy_cache;
2238
2239 /* Compatibility bits for old machine types.
2240 * If true decode the CPUID Function 0x8000001E_ECX to support multiple
2241 * nodes per processor
2242 */
2243 bool legacy_multi_node;
2244
2245 /* Compatibility bits for old machine types: */
2246 bool enable_cpuid_0xb;
2247
2248 /* Enable auto level-increase for all CPUID leaves */
2249 bool full_cpuid_auto_level;
2250
2251 /* Only advertise CPUID leaves defined by the vendor */
2252 bool vendor_cpuid_only;
2253
2254 /* Only advertise TOPOEXT features that AMD defines */
2255 bool amd_topoext_features_only;
2256
2257 /* Enable auto level-increase for Intel Processor Trace leave */
2258 bool intel_pt_auto_level;
2259
2260 /* if true fill the top bits of the MTRR_PHYSMASKn variable range */
2261 bool fill_mtrr_mask;
2262
2263 /* if true override the phys_bits value with a value read from the host */
2264 bool host_phys_bits;
2265
2266 /* if set, limit maximum value for phys_bits when host_phys_bits is true */
2267 uint8_t host_phys_bits_limit;
2268
2269 /* Forcefully disable KVM PV features not exposed in guest CPUIDs */
2270 bool kvm_pv_enforce_cpuid;
2271
2272 /* Number of physical address bits supported */
2273 uint32_t phys_bits;
2274
2275 /*
2276 * Number of guest physical address bits available. Usually this is
2277 * identical to host physical address bits. With NPT or EPT 4-level
2278 * paging, guest physical address space might be restricted to 48 bits
2279 * even if the host cpu supports more physical address bits.
2280 */
2281 uint32_t guest_phys_bits;
2282
2283 /* in order to simplify APIC support, we leave this pointer to the
2284 user */
2285 struct DeviceState *apic_state;
2286 struct MemoryRegion *cpu_as_root, *cpu_as_mem, *smram;
2287 Notifier machine_done;
2288
2289 struct kvm_msrs *kvm_msr_buf;
2290
2291 int32_t node_id; /* NUMA node this CPU belongs to */
2292 int32_t socket_id;
2293 int32_t die_id;
2294 int32_t module_id;
2295 int32_t core_id;
2296 int32_t thread_id;
2297
2298 int32_t hv_max_vps;
2299
2300 bool xen_vapic;
2301 };
2302
2303 typedef struct X86CPUModel X86CPUModel;
2304
2305 /**
2306 * X86CPUClass:
2307 * @cpu_def: CPU model definition
2308 * @host_cpuid_required: Whether CPU model requires cpuid from host.
2309 * @ordering: Ordering on the "-cpu help" CPU model list.
2310 * @migration_safe: See CpuDefinitionInfo::migration_safe
2311 * @static_model: See CpuDefinitionInfo::static
2312 * @parent_realize: The parent class' realize handler.
2313 * @parent_phases: The parent class' reset phase handlers.
2314 *
2315 * An x86 CPU model or family.
2316 */
2317 struct X86CPUClass {
2318 CPUClass parent_class;
2319
2320 /*
2321 * CPU definition, automatically loaded by instance_init if not NULL.
2322 * Should be eventually replaced by subclass-specific property defaults.
2323 */
2324 const X86CPUModel *model;
2325
2326 bool host_cpuid_required;
2327 int ordering;
2328 bool migration_safe;
2329 bool static_model;
2330
2331 /*
2332 * Optional description of CPU model.
2333 * If unavailable, cpu_def->model_id is used.
2334 */
2335 const char *model_description;
2336
2337 DeviceRealize parent_realize;
2338 DeviceUnrealize parent_unrealize;
2339 ResettablePhases parent_phases;
2340 };
2341
2342 #ifndef CONFIG_USER_ONLY
2343 extern const VMStateDescription vmstate_x86_cpu;
2344 #endif
2345
2346 int x86_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
2347 int cpuid, DumpState *s);
2348 int x86_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
2349 int cpuid, DumpState *s);
2350 int x86_cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
2351 DumpState *s);
2352 int x86_cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
2353 DumpState *s);
2354
2355 bool x86_cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
2356 Error **errp);
2357
2358 void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags);
2359
2360 int x86_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
2361 int x86_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
2362 void x86_cpu_gdb_init(CPUState *cs);
2363
2364 int cpu_x86_support_mca_broadcast(CPUX86State *env);
2365
2366 #ifndef CONFIG_USER_ONLY
2367 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request);
2368
2369 hwaddr x86_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
2370 MemTxAttrs *attrs);
2371 int cpu_get_pic_interrupt(CPUX86State *s);
2372
2373 /* MS-DOS compatibility mode FPU exception support */
2374 void x86_register_ferr_irq(qemu_irq irq);
2375 void fpu_check_raise_ferr_irq(CPUX86State *s);
2376 void cpu_set_ignne(void);
2377 void cpu_clear_ignne(void);
2378 #endif
2379
2380 /* mpx_helper.c */
2381 void cpu_sync_bndcs_hflags(CPUX86State *env);
2382
2383 /* this function must always be used to load data in the segment
2384 cache: it synchronizes the hflags with the segment cache values */
cpu_x86_load_seg_cache(CPUX86State * env,X86Seg seg_reg,unsigned int selector,target_ulong base,unsigned int limit,unsigned int flags)2385 static inline void cpu_x86_load_seg_cache(CPUX86State *env,
2386 X86Seg seg_reg, unsigned int selector,
2387 target_ulong base,
2388 unsigned int limit,
2389 unsigned int flags)
2390 {
2391 SegmentCache *sc;
2392 unsigned int new_hflags;
2393
2394 sc = &env->segs[seg_reg];
2395 sc->selector = selector;
2396 sc->base = base;
2397 sc->limit = limit;
2398 sc->flags = flags;
2399
2400 /* update the hidden flags */
2401 {
2402 if (seg_reg == R_CS) {
2403 #ifdef TARGET_X86_64
2404 if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) {
2405 /* long mode */
2406 env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
2407 env->hflags &= ~(HF_ADDSEG_MASK);
2408 } else
2409 #endif
2410 {
2411 /* legacy / compatibility case */
2412 new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
2413 >> (DESC_B_SHIFT - HF_CS32_SHIFT);
2414 env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) |
2415 new_hflags;
2416 }
2417 }
2418 if (seg_reg == R_SS) {
2419 int cpl = (flags >> DESC_DPL_SHIFT) & 3;
2420 #if HF_CPL_MASK != 3
2421 #error HF_CPL_MASK is hardcoded
2422 #endif
2423 env->hflags = (env->hflags & ~HF_CPL_MASK) | cpl;
2424 /* Possibly switch between BNDCFGS and BNDCFGU */
2425 cpu_sync_bndcs_hflags(env);
2426 }
2427 new_hflags = (env->segs[R_SS].flags & DESC_B_MASK)
2428 >> (DESC_B_SHIFT - HF_SS32_SHIFT);
2429 if (env->hflags & HF_CS64_MASK) {
2430 /* zero base assumed for DS, ES and SS in long mode */
2431 } else if (!(env->cr[0] & CR0_PE_MASK) ||
2432 (env->eflags & VM_MASK) ||
2433 !(env->hflags & HF_CS32_MASK)) {
2434 /* XXX: try to avoid this test. The problem comes from the
2435 fact that is real mode or vm86 mode we only modify the
2436 'base' and 'selector' fields of the segment cache to go
2437 faster. A solution may be to force addseg to one in
2438 translate-i386.c. */
2439 new_hflags |= HF_ADDSEG_MASK;
2440 } else {
2441 new_hflags |= ((env->segs[R_DS].base |
2442 env->segs[R_ES].base |
2443 env->segs[R_SS].base) != 0) <<
2444 HF_ADDSEG_SHIFT;
2445 }
2446 env->hflags = (env->hflags &
2447 ~(HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags;
2448 }
2449 }
2450
cpu_x86_load_seg_cache_sipi(X86CPU * cpu,uint8_t sipi_vector)2451 static inline void cpu_x86_load_seg_cache_sipi(X86CPU *cpu,
2452 uint8_t sipi_vector)
2453 {
2454 CPUState *cs = CPU(cpu);
2455 CPUX86State *env = &cpu->env;
2456
2457 env->eip = 0;
2458 cpu_x86_load_seg_cache(env, R_CS, sipi_vector << 8,
2459 sipi_vector << 12,
2460 env->segs[R_CS].limit,
2461 env->segs[R_CS].flags);
2462 cs->halted = 0;
2463 }
2464
2465 uint64_t cpu_x86_get_msr_core_thread_count(X86CPU *cpu);
2466
2467 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
2468 target_ulong *base, unsigned int *limit,
2469 unsigned int *flags);
2470
2471 /* op_helper.c */
2472 /* used for debug or cpu save/restore */
2473
2474 /* cpu-exec.c */
2475 /*
2476 * The following helpers are only usable in user mode simulation.
2477 * The host pointers should come from lock_user().
2478 */
2479 void cpu_x86_load_seg(CPUX86State *s, X86Seg seg_reg, int selector);
2480 void cpu_x86_fsave(CPUX86State *s, void *host, size_t len);
2481 void cpu_x86_frstor(CPUX86State *s, void *host, size_t len);
2482 void cpu_x86_fxsave(CPUX86State *s, void *host, size_t len);
2483 void cpu_x86_fxrstor(CPUX86State *s, void *host, size_t len);
2484 void cpu_x86_xsave(CPUX86State *s, void *host, size_t len, uint64_t rbfm);
2485 bool cpu_x86_xrstor(CPUX86State *s, void *host, size_t len, uint64_t rbfm);
2486
2487 /* cpu.c */
2488 void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
2489 uint32_t vendor2, uint32_t vendor3);
2490 typedef struct PropValue {
2491 const char *prop, *value;
2492 } PropValue;
2493 void x86_cpu_apply_props(X86CPU *cpu, PropValue *props);
2494
2495 void x86_cpu_after_reset(X86CPU *cpu);
2496
2497 uint32_t cpu_x86_virtual_addr_width(CPUX86State *env);
2498
2499 /* cpu.c other functions (cpuid) */
2500 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2501 uint32_t *eax, uint32_t *ebx,
2502 uint32_t *ecx, uint32_t *edx);
2503 void cpu_clear_apic_feature(CPUX86State *env);
2504 void cpu_set_apic_feature(CPUX86State *env);
2505 void host_cpuid(uint32_t function, uint32_t count,
2506 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
2507 bool cpu_has_x2apic_feature(CPUX86State *env);
2508
2509 /* helper.c */
2510 void x86_cpu_set_a20(X86CPU *cpu, int a20_state);
2511 void cpu_sync_avx_hflag(CPUX86State *env);
2512
2513 #ifndef CONFIG_USER_ONLY
x86_asidx_from_attrs(CPUState * cs,MemTxAttrs attrs)2514 static inline int x86_asidx_from_attrs(CPUState *cs, MemTxAttrs attrs)
2515 {
2516 return !!attrs.secure;
2517 }
2518
cpu_addressspace(CPUState * cs,MemTxAttrs attrs)2519 static inline AddressSpace *cpu_addressspace(CPUState *cs, MemTxAttrs attrs)
2520 {
2521 return cpu_get_address_space(cs, cpu_asidx_from_attrs(cs, attrs));
2522 }
2523
2524 /*
2525 * load efer and update the corresponding hflags. XXX: do consistency
2526 * checks with cpuid bits?
2527 */
2528 void cpu_load_efer(CPUX86State *env, uint64_t val);
2529 uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr);
2530 uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr);
2531 uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr);
2532 uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr);
2533 void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val);
2534 void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val);
2535 void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val);
2536 void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val);
2537 void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val);
2538 #endif
2539
2540 /* will be suppressed */
2541 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
2542 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
2543 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
2544 void cpu_x86_update_dr7(CPUX86State *env, uint32_t new_dr7);
2545
2546 /* hw/pc.c */
2547 uint64_t cpu_get_tsc(CPUX86State *env);
2548
2549 #define CPU_RESOLVING_TYPE TYPE_X86_CPU
2550
2551 #ifdef TARGET_X86_64
2552 #define TARGET_DEFAULT_CPU_TYPE X86_CPU_TYPE_NAME("qemu64")
2553 #else
2554 #define TARGET_DEFAULT_CPU_TYPE X86_CPU_TYPE_NAME("qemu32")
2555 #endif
2556
2557 /* MMU modes definitions */
2558 #define MMU_KSMAP64_IDX 0
2559 #define MMU_KSMAP32_IDX 1
2560 #define MMU_USER64_IDX 2
2561 #define MMU_USER32_IDX 3
2562 #define MMU_KNOSMAP64_IDX 4
2563 #define MMU_KNOSMAP32_IDX 5
2564 #define MMU_PHYS_IDX 6
2565 #define MMU_NESTED_IDX 7
2566
2567 #ifdef CONFIG_USER_ONLY
2568 #ifdef TARGET_X86_64
2569 #define MMU_USER_IDX MMU_USER64_IDX
2570 #else
2571 #define MMU_USER_IDX MMU_USER32_IDX
2572 #endif
2573 #endif
2574
is_mmu_index_smap(int mmu_index)2575 static inline bool is_mmu_index_smap(int mmu_index)
2576 {
2577 return (mmu_index & ~1) == MMU_KSMAP64_IDX;
2578 }
2579
is_mmu_index_user(int mmu_index)2580 static inline bool is_mmu_index_user(int mmu_index)
2581 {
2582 return (mmu_index & ~1) == MMU_USER64_IDX;
2583 }
2584
is_mmu_index_32(int mmu_index)2585 static inline bool is_mmu_index_32(int mmu_index)
2586 {
2587 assert(mmu_index < MMU_PHYS_IDX);
2588 return mmu_index & 1;
2589 }
2590
2591 #define CC_DST (env->cc_dst)
2592 #define CC_SRC (env->cc_src)
2593 #define CC_SRC2 (env->cc_src2)
2594 #define CC_OP (env->cc_op)
2595
2596 #include "svm.h"
2597
2598 #if !defined(CONFIG_USER_ONLY)
2599 #include "hw/i386/apic.h"
2600 #endif
2601
2602 void do_cpu_init(X86CPU *cpu);
2603
2604 #define MCE_INJECT_BROADCAST 1
2605 #define MCE_INJECT_UNCOND_AO 2
2606
2607 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
2608 uint64_t status, uint64_t mcg_status, uint64_t addr,
2609 uint64_t misc, int flags);
2610
2611 uint32_t cpu_cc_compute_all(CPUX86State *env1);
2612
cpu_compute_eflags(CPUX86State * env)2613 static inline uint32_t cpu_compute_eflags(CPUX86State *env)
2614 {
2615 uint32_t eflags = env->eflags;
2616 if (tcg_enabled()) {
2617 eflags |= cpu_cc_compute_all(env) | (env->df & DF_MASK);
2618 }
2619 return eflags;
2620 }
2621
cpu_get_mem_attrs(CPUX86State * env)2622 static inline MemTxAttrs cpu_get_mem_attrs(CPUX86State *env)
2623 {
2624 return ((MemTxAttrs) { .secure = (env->hflags & HF_SMM_MASK) != 0 });
2625 }
2626
x86_get_a20_mask(CPUX86State * env)2627 static inline int32_t x86_get_a20_mask(CPUX86State *env)
2628 {
2629 if (env->hflags & HF_SMM_MASK) {
2630 return -1;
2631 } else {
2632 return env->a20_mask;
2633 }
2634 }
2635
cpu_has_vmx(CPUX86State * env)2636 static inline bool cpu_has_vmx(CPUX86State *env)
2637 {
2638 return env->features[FEAT_1_ECX] & CPUID_EXT_VMX;
2639 }
2640
cpu_has_svm(CPUX86State * env)2641 static inline bool cpu_has_svm(CPUX86State *env)
2642 {
2643 return env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM;
2644 }
2645
2646 /*
2647 * In order for a vCPU to enter VMX operation it must have CR4.VMXE set.
2648 * Since it was set, CR4.VMXE must remain set as long as vCPU is in
2649 * VMX operation. This is because CR4.VMXE is one of the bits set
2650 * in MSR_IA32_VMX_CR4_FIXED1.
2651 *
2652 * There is one exception to above statement when vCPU enters SMM mode.
2653 * When a vCPU enters SMM mode, it temporarily exit VMX operation and
2654 * may also reset CR4.VMXE during execution in SMM mode.
2655 * When vCPU exits SMM mode, vCPU state is restored to be in VMX operation
2656 * and CR4.VMXE is restored to it's original value of being set.
2657 *
2658 * Therefore, when vCPU is not in SMM mode, we can infer whether
2659 * VMX is being used by examining CR4.VMXE. Otherwise, we cannot
2660 * know for certain.
2661 */
cpu_vmx_maybe_enabled(CPUX86State * env)2662 static inline bool cpu_vmx_maybe_enabled(CPUX86State *env)
2663 {
2664 return cpu_has_vmx(env) &&
2665 ((env->cr[4] & CR4_VMXE_MASK) || (env->hflags & HF_SMM_MASK));
2666 }
2667
2668 /* excp_helper.c */
2669 int get_pg_mode(CPUX86State *env);
2670
2671 /* fpu_helper.c */
2672
2673 /* Set all non-runtime-variable float_status fields to x86 handling */
2674 void cpu_init_fp_statuses(CPUX86State *env);
2675 void update_fp_status(CPUX86State *env);
2676 void update_mxcsr_status(CPUX86State *env);
2677 void update_mxcsr_from_sse_status(CPUX86State *env);
2678
cpu_set_mxcsr(CPUX86State * env,uint32_t mxcsr)2679 static inline void cpu_set_mxcsr(CPUX86State *env, uint32_t mxcsr)
2680 {
2681 env->mxcsr = mxcsr;
2682 if (tcg_enabled()) {
2683 update_mxcsr_status(env);
2684 }
2685 }
2686
cpu_set_fpuc(CPUX86State * env,uint16_t fpuc)2687 static inline void cpu_set_fpuc(CPUX86State *env, uint16_t fpuc)
2688 {
2689 env->fpuc = fpuc;
2690 if (tcg_enabled()) {
2691 update_fp_status(env);
2692 }
2693 }
2694
2695 /* svm_helper.c */
2696 #ifdef CONFIG_USER_ONLY
2697 static inline void
cpu_svm_check_intercept_param(CPUX86State * env1,uint32_t type,uint64_t param,uintptr_t retaddr)2698 cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type,
2699 uint64_t param, uintptr_t retaddr)
2700 { /* no-op */ }
2701 static inline bool
cpu_svm_has_intercept(CPUX86State * env,uint32_t type)2702 cpu_svm_has_intercept(CPUX86State *env, uint32_t type)
2703 { return false; }
2704 #else
2705 void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type,
2706 uint64_t param, uintptr_t retaddr);
2707 bool cpu_svm_has_intercept(CPUX86State *env, uint32_t type);
2708 #endif
2709
2710 /* apic.c */
2711 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access);
2712 void apic_handle_tpr_access_report(DeviceState *d, target_ulong ip,
2713 TPRAccess access);
2714
2715 /* Special values for X86CPUVersion: */
2716
2717 /* Resolve to latest CPU version */
2718 #define CPU_VERSION_LATEST -1
2719
2720 /*
2721 * Resolve to version defined by current machine type.
2722 * See x86_cpu_set_default_version()
2723 */
2724 #define CPU_VERSION_AUTO -2
2725
2726 /* Don't resolve to any versioned CPU models, like old QEMU versions */
2727 #define CPU_VERSION_LEGACY 0
2728
2729 typedef int X86CPUVersion;
2730
2731 /*
2732 * Set default CPU model version for CPU models having
2733 * version == CPU_VERSION_AUTO.
2734 */
2735 void x86_cpu_set_default_version(X86CPUVersion version);
2736
2737 #ifndef CONFIG_USER_ONLY
2738
2739 void do_cpu_sipi(X86CPU *cpu);
2740
2741 #define APIC_DEFAULT_ADDRESS 0xfee00000
2742 #define APIC_SPACE_SIZE 0x100000
2743
2744 /* cpu-dump.c */
2745 void x86_cpu_dump_local_apic_state(CPUState *cs, int flags);
2746
2747 #endif
2748
2749 /* cpu.c */
2750 bool cpu_is_bsp(X86CPU *cpu);
2751
2752 void x86_cpu_xrstor_all_areas(X86CPU *cpu, const void *buf, uint32_t buflen);
2753 void x86_cpu_xsave_all_areas(X86CPU *cpu, void *buf, uint32_t buflen);
2754 uint32_t xsave_area_size(uint64_t mask, bool compacted);
2755 void x86_update_hflags(CPUX86State* env);
2756
hyperv_feat_enabled(X86CPU * cpu,int feat)2757 static inline bool hyperv_feat_enabled(X86CPU *cpu, int feat)
2758 {
2759 return !!(cpu->hyperv_features & BIT(feat));
2760 }
2761
cr4_reserved_bits(CPUX86State * env)2762 static inline uint64_t cr4_reserved_bits(CPUX86State *env)
2763 {
2764 uint64_t reserved_bits = CR4_RESERVED_MASK;
2765 if (!env->features[FEAT_XSAVE]) {
2766 reserved_bits |= CR4_OSXSAVE_MASK;
2767 }
2768 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMEP)) {
2769 reserved_bits |= CR4_SMEP_MASK;
2770 }
2771 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
2772 reserved_bits |= CR4_SMAP_MASK;
2773 }
2774 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE)) {
2775 reserved_bits |= CR4_FSGSBASE_MASK;
2776 }
2777 if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKU)) {
2778 reserved_bits |= CR4_PKE_MASK;
2779 }
2780 if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57)) {
2781 reserved_bits |= CR4_LA57_MASK;
2782 }
2783 if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_UMIP)) {
2784 reserved_bits |= CR4_UMIP_MASK;
2785 }
2786 if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKS)) {
2787 reserved_bits |= CR4_PKS_MASK;
2788 }
2789 if (!(env->features[FEAT_7_1_EAX] & CPUID_7_1_EAX_LAM)) {
2790 reserved_bits |= CR4_LAM_SUP_MASK;
2791 }
2792 if (!(env->features[FEAT_7_1_EAX] & CPUID_7_1_EAX_FRED)) {
2793 reserved_bits |= CR4_FRED_MASK;
2794 }
2795 return reserved_bits;
2796 }
2797
ctl_has_irq(CPUX86State * env)2798 static inline bool ctl_has_irq(CPUX86State *env)
2799 {
2800 uint32_t int_prio;
2801 uint32_t tpr;
2802
2803 int_prio = (env->int_ctl & V_INTR_PRIO_MASK) >> V_INTR_PRIO_SHIFT;
2804 tpr = env->int_ctl & V_TPR_MASK;
2805
2806 if (env->int_ctl & V_IGN_TPR_MASK) {
2807 return (env->int_ctl & V_IRQ_MASK);
2808 }
2809
2810 return (env->int_ctl & V_IRQ_MASK) && (int_prio >= tpr);
2811 }
2812
2813 #if defined(TARGET_X86_64) && \
2814 defined(CONFIG_USER_ONLY) && \
2815 defined(CONFIG_LINUX)
2816 # define TARGET_VSYSCALL_PAGE (UINT64_C(-10) << 20)
2817 #endif
2818
2819 /* majority(NOT a, b, c) = (a ^ b) ? b : c */
2820 #define MAJ_INV1(a, b, c) ((((a) ^ (b)) & ((b) ^ (c))) ^ (c))
2821
2822 /*
2823 * ADD_COUT_VEC(x, y) = majority((x + y) ^ x ^ y, x, y)
2824 *
2825 * If two corresponding bits in x and y are the same, that's the carry
2826 * independent of the value (x+y)^x^y. Hence x^y can be replaced with
2827 * 1 in (x+y)^x^y, resulting in majority(NOT (x+y), x, y)
2828 */
2829 #define ADD_COUT_VEC(op1, op2, result) \
2830 MAJ_INV1(result, op1, op2)
2831
2832 /*
2833 * SUB_COUT_VEC(x, y) = NOT majority(x, NOT y, (x - y) ^ x ^ NOT y)
2834 * = majority(NOT x, y, (x - y) ^ x ^ y)
2835 *
2836 * Note that the carry out is actually a borrow, i.e. it is inverted.
2837 * If two corresponding bits in x and y are different, the value of the
2838 * bit in (x-y)^x^y likewise does not matter. Hence, x^y can be replaced
2839 * with 0 in (x-y)^x^y, resulting in majority(NOT x, y, x-y)
2840 */
2841 #define SUB_COUT_VEC(op1, op2, result) \
2842 MAJ_INV1(op1, op2, result)
2843
2844 #endif /* I386_CPU_H */
2845