xref: /kvm-unit-tests/lib/x86/processor.h (revision dca3f4c041143c8e8dc70c6890a19a5730310230)
1c865f654SCornelia Huck #ifndef _X86_PROCESSOR_H_
2c865f654SCornelia Huck #define _X86_PROCESSOR_H_
37d36db35SAvi Kivity 
47d36db35SAvi Kivity #include "libcflat.h"
5142ff635SSean Christopherson #include "desc.h"
6867f820dSPaolo Bonzini #include "msr.h"
7e2f3fe1dSSean Christopherson #include <bitops.h>
82b2d7aadSAvi Kivity #include <stdint.h>
97d36db35SAvi Kivity 
10f6257e24SMaxim Levitsky #define CANONICAL_48_VAL 0xffffaaaaaaaaaaaaull
11f6257e24SMaxim Levitsky #define CANONICAL_57_VAL 0xffaaaaaaaaaaaaaaull
1288f0bb17SSean Christopherson #define NONCANONICAL	 0xaaaaaaaaaaaaaaaaull
1388f0bb17SSean Christopherson 
1414520f8eSRobert Hoo #define LAM57_MASK	GENMASK_ULL(62, 57)
1514520f8eSRobert Hoo #define LAM48_MASK	GENMASK_ULL(62, 48)
1614520f8eSRobert Hoo 
1714520f8eSRobert Hoo /*
1814520f8eSRobert Hoo  * Get a linear address by combining @addr with a non-canonical pattern in the
1914520f8eSRobert Hoo  * @mask bits.
2014520f8eSRobert Hoo  */
get_non_canonical(u64 addr,u64 mask)2114520f8eSRobert Hoo static inline u64 get_non_canonical(u64 addr, u64 mask)
2214520f8eSRobert Hoo {
2314520f8eSRobert Hoo 	return (addr & ~mask) | (NONCANONICAL & mask);
2414520f8eSRobert Hoo }
2514520f8eSRobert Hoo 
26a3d1fb55SPaolo Bonzini #ifdef __x86_64__
27a3d1fb55SPaolo Bonzini #  define R "r"
28a3d1fb55SPaolo Bonzini #  define W "q"
29a3d1fb55SPaolo Bonzini #  define S "8"
30a3d1fb55SPaolo Bonzini #else
31a3d1fb55SPaolo Bonzini #  define R "e"
32a3d1fb55SPaolo Bonzini #  define W "l"
33a3d1fb55SPaolo Bonzini #  define S "4"
34a3d1fb55SPaolo Bonzini #endif
35a3d1fb55SPaolo Bonzini 
36dacbea0fSMathias Krause #define DE_VECTOR 0
372b934609SXiaoyao Li #define DB_VECTOR 1
38dacbea0fSMathias Krause #define NMI_VECTOR 2
392b934609SXiaoyao Li #define BP_VECTOR 3
40dacbea0fSMathias Krause #define OF_VECTOR 4
41dacbea0fSMathias Krause #define BR_VECTOR 5
422b934609SXiaoyao Li #define UD_VECTOR 6
43dacbea0fSMathias Krause #define NM_VECTOR 7
448d2cdb35SMarc Orr #define DF_VECTOR 8
458d2cdb35SMarc Orr #define TS_VECTOR 10
468d2cdb35SMarc Orr #define NP_VECTOR 11
478d2cdb35SMarc Orr #define SS_VECTOR 12
488d2cdb35SMarc Orr #define GP_VECTOR 13
498d2cdb35SMarc Orr #define PF_VECTOR 14
50dacbea0fSMathias Krause #define MF_VECTOR 16
518d2cdb35SMarc Orr #define AC_VECTOR 17
52dacbea0fSMathias Krause #define MC_VECTOR 18
53620ea38eSMathias Krause #define XM_VECTOR 19
540eb5b50fSMathias Krause #define XF_VECTOR XM_VECTOR /* AMD */
550eb5b50fSMathias Krause #define VE_VECTOR 20 /* Intel only */
56c986dbe8SNadav Amit #define CP_VECTOR 21
570eb5b50fSMathias Krause #define HV_VECTOR 28 /* AMD only */
580eb5b50fSMathias Krause #define VC_VECTOR 29 /* AMD only */
590eb5b50fSMathias Krause #define SX_VECTOR 30 /* AMD only */
608d2cdb35SMarc Orr 
61a106b30dSPaolo Bonzini #define X86_CR0_PE_BIT		(0)
62a106b30dSPaolo Bonzini #define X86_CR0_PE		BIT(X86_CR0_PE_BIT)
63a106b30dSPaolo Bonzini #define X86_CR0_MP_BIT		(1)
64a106b30dSPaolo Bonzini #define X86_CR0_MP		BIT(X86_CR0_MP_BIT)
65a106b30dSPaolo Bonzini #define X86_CR0_EM_BIT		(2)
66a106b30dSPaolo Bonzini #define X86_CR0_EM		BIT(X86_CR0_EM_BIT)
67a106b30dSPaolo Bonzini #define X86_CR0_TS_BIT		(3)
68a106b30dSPaolo Bonzini #define X86_CR0_TS		BIT(X86_CR0_TS_BIT)
69a106b30dSPaolo Bonzini #define X86_CR0_ET_BIT		(4)
70a106b30dSPaolo Bonzini #define X86_CR0_ET		BIT(X86_CR0_ET_BIT)
71a106b30dSPaolo Bonzini #define X86_CR0_NE_BIT		(5)
72a106b30dSPaolo Bonzini #define X86_CR0_NE		BIT(X86_CR0_NE_BIT)
73a106b30dSPaolo Bonzini #define X86_CR0_WP_BIT		(16)
74a106b30dSPaolo Bonzini #define X86_CR0_WP		BIT(X86_CR0_WP_BIT)
75a106b30dSPaolo Bonzini #define X86_CR0_AM_BIT		(18)
76a106b30dSPaolo Bonzini #define X86_CR0_AM		BIT(X86_CR0_AM_BIT)
77a106b30dSPaolo Bonzini #define X86_CR0_NW_BIT		(29)
78a106b30dSPaolo Bonzini #define X86_CR0_NW		BIT(X86_CR0_NW_BIT)
79a106b30dSPaolo Bonzini #define X86_CR0_CD_BIT		(30)
80a106b30dSPaolo Bonzini #define X86_CR0_CD		BIT(X86_CR0_CD_BIT)
81a106b30dSPaolo Bonzini #define X86_CR0_PG_BIT		(31)
82a106b30dSPaolo Bonzini #define X86_CR0_PG		BIT(X86_CR0_PG_BIT)
83b6a0ff03SPaolo Bonzini 
84e2f3fe1dSSean Christopherson #define X86_CR3_PCID_MASK	GENMASK(11, 0)
850a6b8b7dSBinbin Wu #define X86_CR3_LAM_U57_BIT	(61)
86*0164d759SBinbin Wu #define X86_CR3_LAM_U57		BIT_ULL(X86_CR3_LAM_U57_BIT)
870a6b8b7dSBinbin Wu #define X86_CR3_LAM_U48_BIT	(62)
88*0164d759SBinbin Wu #define X86_CR3_LAM_U48		BIT_ULL(X86_CR3_LAM_U48_BIT)
89e2f3fe1dSSean Christopherson 
90a106b30dSPaolo Bonzini #define X86_CR4_VME_BIT		(0)
91a106b30dSPaolo Bonzini #define X86_CR4_VME		BIT(X86_CR4_VME_BIT)
92a106b30dSPaolo Bonzini #define X86_CR4_PVI_BIT		(1)
93a106b30dSPaolo Bonzini #define X86_CR4_PVI		BIT(X86_CR4_PVI_BIT)
94a106b30dSPaolo Bonzini #define X86_CR4_TSD_BIT		(2)
95a106b30dSPaolo Bonzini #define X86_CR4_TSD		BIT(X86_CR4_TSD_BIT)
96a106b30dSPaolo Bonzini #define X86_CR4_DE_BIT		(3)
97a106b30dSPaolo Bonzini #define X86_CR4_DE		BIT(X86_CR4_DE_BIT)
98a106b30dSPaolo Bonzini #define X86_CR4_PSE_BIT		(4)
99a106b30dSPaolo Bonzini #define X86_CR4_PSE		BIT(X86_CR4_PSE_BIT)
100a106b30dSPaolo Bonzini #define X86_CR4_PAE_BIT		(5)
101a106b30dSPaolo Bonzini #define X86_CR4_PAE		BIT(X86_CR4_PAE_BIT)
102a106b30dSPaolo Bonzini #define X86_CR4_MCE_BIT		(6)
103a106b30dSPaolo Bonzini #define X86_CR4_MCE		BIT(X86_CR4_MCE_BIT)
104a106b30dSPaolo Bonzini #define X86_CR4_PGE_BIT		(7)
105a106b30dSPaolo Bonzini #define X86_CR4_PGE		BIT(X86_CR4_PGE_BIT)
106a106b30dSPaolo Bonzini #define X86_CR4_PCE_BIT		(8)
107a106b30dSPaolo Bonzini #define X86_CR4_PCE		BIT(X86_CR4_PCE_BIT)
108a106b30dSPaolo Bonzini #define X86_CR4_OSFXSR_BIT	(9)
109a106b30dSPaolo Bonzini #define X86_CR4_OSFXSR		BIT(X86_CR4_OSFXSR_BIT)
110a106b30dSPaolo Bonzini #define X86_CR4_OSXMMEXCPT_BIT	(10)
111a106b30dSPaolo Bonzini #define X86_CR4_OSXMMEXCPT	BIT(X86_CR4_OSXMMEXCPT_BIT)
112a106b30dSPaolo Bonzini #define X86_CR4_UMIP_BIT	(11)
113a106b30dSPaolo Bonzini #define X86_CR4_UMIP		BIT(X86_CR4_UMIP_BIT)
114a106b30dSPaolo Bonzini #define X86_CR4_LA57_BIT	(12)
115a106b30dSPaolo Bonzini #define X86_CR4_LA57		BIT(X86_CR4_LA57_BIT)
116a106b30dSPaolo Bonzini #define X86_CR4_VMXE_BIT	(13)
117a106b30dSPaolo Bonzini #define X86_CR4_VMXE		BIT(X86_CR4_VMXE_BIT)
118a106b30dSPaolo Bonzini #define X86_CR4_SMXE_BIT	(14)
119a106b30dSPaolo Bonzini #define X86_CR4_SMXE		BIT(X86_CR4_SMXE_BIT)
120a106b30dSPaolo Bonzini /* UNUSED			(15) */
121a106b30dSPaolo Bonzini #define X86_CR4_FSGSBASE_BIT	(16)
122a106b30dSPaolo Bonzini #define X86_CR4_FSGSBASE	BIT(X86_CR4_FSGSBASE_BIT)
123a106b30dSPaolo Bonzini #define X86_CR4_PCIDE_BIT	(17)
124a106b30dSPaolo Bonzini #define X86_CR4_PCIDE		BIT(X86_CR4_PCIDE_BIT)
125a106b30dSPaolo Bonzini #define X86_CR4_OSXSAVE_BIT	(18)
126a106b30dSPaolo Bonzini #define X86_CR4_OSXSAVE		BIT(X86_CR4_OSXSAVE_BIT)
127a106b30dSPaolo Bonzini #define X86_CR4_KL_BIT		(19)
128a106b30dSPaolo Bonzini #define X86_CR4_KL		BIT(X86_CR4_KL_BIT)
129a106b30dSPaolo Bonzini #define X86_CR4_SMEP_BIT	(20)
130a106b30dSPaolo Bonzini #define X86_CR4_SMEP		BIT(X86_CR4_SMEP_BIT)
131a106b30dSPaolo Bonzini #define X86_CR4_SMAP_BIT	(21)
132a106b30dSPaolo Bonzini #define X86_CR4_SMAP		BIT(X86_CR4_SMAP_BIT)
133a106b30dSPaolo Bonzini #define X86_CR4_PKE_BIT		(22)
134a106b30dSPaolo Bonzini #define X86_CR4_PKE		BIT(X86_CR4_PKE_BIT)
135a106b30dSPaolo Bonzini #define X86_CR4_CET_BIT		(23)
136a106b30dSPaolo Bonzini #define X86_CR4_CET		BIT(X86_CR4_CET_BIT)
137a106b30dSPaolo Bonzini #define X86_CR4_PKS_BIT		(24)
138a106b30dSPaolo Bonzini #define X86_CR4_PKS		BIT(X86_CR4_PKS_BIT)
13914520f8eSRobert Hoo #define X86_CR4_LAM_SUP_BIT	(28)
14014520f8eSRobert Hoo #define X86_CR4_LAM_SUP		BIT(X86_CR4_LAM_SUP_BIT)
141e2f3fe1dSSean Christopherson 
142a106b30dSPaolo Bonzini #define X86_EFLAGS_CF_BIT	(0)
143a106b30dSPaolo Bonzini #define X86_EFLAGS_CF		BIT(X86_EFLAGS_CF_BIT)
144a106b30dSPaolo Bonzini #define X86_EFLAGS_FIXED_BIT	(1)
145a106b30dSPaolo Bonzini #define X86_EFLAGS_FIXED	BIT(X86_EFLAGS_FIXED_BIT)
146a106b30dSPaolo Bonzini #define X86_EFLAGS_PF_BIT	(2)
147a106b30dSPaolo Bonzini #define X86_EFLAGS_PF		BIT(X86_EFLAGS_PF_BIT)
148a106b30dSPaolo Bonzini /* RESERVED 0			(3) */
149a106b30dSPaolo Bonzini #define X86_EFLAGS_AF_BIT	(4)
150a106b30dSPaolo Bonzini #define X86_EFLAGS_AF		BIT(X86_EFLAGS_AF_BIT)
151a106b30dSPaolo Bonzini /* RESERVED 0			(5) */
152a106b30dSPaolo Bonzini #define X86_EFLAGS_ZF_BIT	(6)
153a106b30dSPaolo Bonzini #define X86_EFLAGS_ZF		BIT(X86_EFLAGS_ZF_BIT)
154a106b30dSPaolo Bonzini #define X86_EFLAGS_SF_BIT	(7)
155a106b30dSPaolo Bonzini #define X86_EFLAGS_SF		BIT(X86_EFLAGS_SF_BIT)
156a106b30dSPaolo Bonzini #define X86_EFLAGS_TF_BIT	(8)
157a106b30dSPaolo Bonzini #define X86_EFLAGS_TF		BIT(X86_EFLAGS_TF_BIT)
158a106b30dSPaolo Bonzini #define X86_EFLAGS_IF_BIT	(9)
159a106b30dSPaolo Bonzini #define X86_EFLAGS_IF		BIT(X86_EFLAGS_IF_BIT)
160a106b30dSPaolo Bonzini #define X86_EFLAGS_DF_BIT	(10)
161a106b30dSPaolo Bonzini #define X86_EFLAGS_DF		BIT(X86_EFLAGS_DF_BIT)
162a106b30dSPaolo Bonzini #define X86_EFLAGS_OF_BIT	(11)
163a106b30dSPaolo Bonzini #define X86_EFLAGS_OF		BIT(X86_EFLAGS_OF_BIT)
164e2f3fe1dSSean Christopherson #define X86_EFLAGS_IOPL		GENMASK(13, 12)
165a106b30dSPaolo Bonzini #define X86_EFLAGS_NT_BIT	(14)
166a106b30dSPaolo Bonzini #define X86_EFLAGS_NT		BIT(X86_EFLAGS_NT_BIT)
167a106b30dSPaolo Bonzini /* RESERVED 0			(15) */
168a106b30dSPaolo Bonzini #define X86_EFLAGS_RF_BIT	(16)
169a106b30dSPaolo Bonzini #define X86_EFLAGS_RF		BIT(X86_EFLAGS_RF_BIT)
170a106b30dSPaolo Bonzini #define X86_EFLAGS_VM_BIT	(17)
171a106b30dSPaolo Bonzini #define X86_EFLAGS_VM		BIT(X86_EFLAGS_VM_BIT)
172a106b30dSPaolo Bonzini #define X86_EFLAGS_AC_BIT	(18)
173a106b30dSPaolo Bonzini #define X86_EFLAGS_AC		BIT(X86_EFLAGS_AC_BIT)
174a106b30dSPaolo Bonzini #define X86_EFLAGS_VIF_BIT	(19)
175a106b30dSPaolo Bonzini #define X86_EFLAGS_VIF		BIT(X86_EFLAGS_VIF_BIT)
176a106b30dSPaolo Bonzini #define X86_EFLAGS_VIP_BIT	(20)
177a106b30dSPaolo Bonzini #define X86_EFLAGS_VIP		BIT(X86_EFLAGS_VIP_BIT)
178a106b30dSPaolo Bonzini #define X86_EFLAGS_ID_BIT	(21)
179a106b30dSPaolo Bonzini #define X86_EFLAGS_ID		BIT(X86_EFLAGS_ID_BIT)
1807488d290SPaolo Bonzini 
18160d8090cSLiran Alon #define X86_EFLAGS_ALU (X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | \
18260d8090cSLiran Alon 			X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)
18360d8090cSLiran Alon 
184a3d1fb55SPaolo Bonzini 
1856ddcc298SKrish Sadhukhan /*
1866ddcc298SKrish Sadhukhan  * CPU features
1876ddcc298SKrish Sadhukhan  */
1886ddcc298SKrish Sadhukhan 
1896ddcc298SKrish Sadhukhan enum cpuid_output_regs {
1906ddcc298SKrish Sadhukhan 	EAX,
1916ddcc298SKrish Sadhukhan 	EBX,
1926ddcc298SKrish Sadhukhan 	ECX,
1936ddcc298SKrish Sadhukhan 	EDX
1946ddcc298SKrish Sadhukhan };
1956ddcc298SKrish Sadhukhan 
1966ddcc298SKrish Sadhukhan struct cpuid { u32 a, b, c, d; };
1976ddcc298SKrish Sadhukhan 
raw_cpuid(u32 function,u32 index)1986ddcc298SKrish Sadhukhan static inline struct cpuid raw_cpuid(u32 function, u32 index)
1996ddcc298SKrish Sadhukhan {
2006ddcc298SKrish Sadhukhan 	struct cpuid r;
2016ddcc298SKrish Sadhukhan 	asm volatile ("cpuid"
2026ddcc298SKrish Sadhukhan 		      : "=a"(r.a), "=b"(r.b), "=c"(r.c), "=d"(r.d)
2036ddcc298SKrish Sadhukhan 		      : "0"(function), "2"(index));
2046ddcc298SKrish Sadhukhan 	return r;
2056ddcc298SKrish Sadhukhan }
2066ddcc298SKrish Sadhukhan 
cpuid_indexed(u32 function,u32 index)2076ddcc298SKrish Sadhukhan static inline struct cpuid cpuid_indexed(u32 function, u32 index)
2086ddcc298SKrish Sadhukhan {
2096ddcc298SKrish Sadhukhan 	u32 level = raw_cpuid(function & 0xf0000000, 0).a;
2106ddcc298SKrish Sadhukhan 	if (level < function)
2116ddcc298SKrish Sadhukhan 	return (struct cpuid) { 0, 0, 0, 0 };
2126ddcc298SKrish Sadhukhan 	return raw_cpuid(function, index);
2136ddcc298SKrish Sadhukhan }
2146ddcc298SKrish Sadhukhan 
cpuid(u32 function)2156ddcc298SKrish Sadhukhan static inline struct cpuid cpuid(u32 function)
2166ddcc298SKrish Sadhukhan {
2176ddcc298SKrish Sadhukhan 	return cpuid_indexed(function, 0);
2186ddcc298SKrish Sadhukhan }
2196ddcc298SKrish Sadhukhan 
cpuid_maxphyaddr(void)2206ddcc298SKrish Sadhukhan static inline u8 cpuid_maxphyaddr(void)
2216ddcc298SKrish Sadhukhan {
2226ddcc298SKrish Sadhukhan 	if (raw_cpuid(0x80000000, 0).a < 0x80000008)
2236ddcc298SKrish Sadhukhan 	return 36;
2246ddcc298SKrish Sadhukhan 	return raw_cpuid(0x80000008, 0).a & 0xff;
2256ddcc298SKrish Sadhukhan }
2266ddcc298SKrish Sadhukhan 
is_intel(void)22722abdd39SNadav Amit static inline bool is_intel(void)
22822abdd39SNadav Amit {
22922abdd39SNadav Amit 	struct cpuid c = cpuid(0);
23022abdd39SNadav Amit 	u32 name[4] = {c.b, c.d, c.c };
23122abdd39SNadav Amit 
23222abdd39SNadav Amit 	return strcmp((char *)name, "GenuineIntel") == 0;
23322abdd39SNadav Amit }
23422abdd39SNadav Amit 
2356ddcc298SKrish Sadhukhan #define	CPUID(a, b, c, d) ((((unsigned long long) a) << 32) | (b << 16) | \
2366ddcc298SKrish Sadhukhan 			  (c << 8) | d)
2376ddcc298SKrish Sadhukhan 
2386ddcc298SKrish Sadhukhan /*
2396ddcc298SKrish Sadhukhan  * Each X86_FEATURE_XXX definition is 64-bit and contains the following
2406ddcc298SKrish Sadhukhan  * CPUID meta-data:
2416ddcc298SKrish Sadhukhan  *
2426ddcc298SKrish Sadhukhan  * 	[63:32] :  input value for EAX
2436ddcc298SKrish Sadhukhan  * 	[31:16] :  input value for ECX
2446ddcc298SKrish Sadhukhan  * 	[15:8]  :  output register
2456ddcc298SKrish Sadhukhan  * 	[7:0]   :  bit position in output register
2466ddcc298SKrish Sadhukhan  */
2476ddcc298SKrish Sadhukhan 
2486ddcc298SKrish Sadhukhan /*
249b52bf046SSean Christopherson  * Basic Leafs, a.k.a. Intel defined
2506ddcc298SKrish Sadhukhan  */
2516ddcc298SKrish Sadhukhan #define	X86_FEATURE_MWAIT		(CPUID(0x1, 0, ECX, 3))
2526ddcc298SKrish Sadhukhan #define	X86_FEATURE_VMX			(CPUID(0x1, 0, ECX, 5))
2539d9000a5SYang Weijiang #define	X86_FEATURE_PDCM		(CPUID(0x1, 0, ECX, 15))
2546ddcc298SKrish Sadhukhan #define	X86_FEATURE_PCID		(CPUID(0x1, 0, ECX, 17))
255baf248c5SSean Christopherson #define X86_FEATURE_X2APIC		(CPUID(0x1, 0, ECX, 21))
2566ddcc298SKrish Sadhukhan #define	X86_FEATURE_MOVBE		(CPUID(0x1, 0, ECX, 22))
2576ddcc298SKrish Sadhukhan #define	X86_FEATURE_TSC_DEADLINE_TIMER	(CPUID(0x1, 0, ECX, 24))
2586ddcc298SKrish Sadhukhan #define	X86_FEATURE_XSAVE		(CPUID(0x1, 0, ECX, 26))
2596ddcc298SKrish Sadhukhan #define	X86_FEATURE_OSXSAVE		(CPUID(0x1, 0, ECX, 27))
2606ddcc298SKrish Sadhukhan #define	X86_FEATURE_RDRAND		(CPUID(0x1, 0, ECX, 30))
2616ddcc298SKrish Sadhukhan #define	X86_FEATURE_MCE			(CPUID(0x1, 0, EDX, 7))
2626ddcc298SKrish Sadhukhan #define	X86_FEATURE_APIC		(CPUID(0x1, 0, EDX, 9))
2636ddcc298SKrish Sadhukhan #define	X86_FEATURE_CLFLUSH		(CPUID(0x1, 0, EDX, 19))
264f6257e24SMaxim Levitsky #define	X86_FEATURE_DS			(CPUID(0x1, 0, EDX, 21))
2656ddcc298SKrish Sadhukhan #define	X86_FEATURE_XMM			(CPUID(0x1, 0, EDX, 25))
2666ddcc298SKrish Sadhukhan #define	X86_FEATURE_XMM2		(CPUID(0x1, 0, EDX, 26))
2676ddcc298SKrish Sadhukhan #define	X86_FEATURE_TSC_ADJUST		(CPUID(0x7, 0, EBX, 1))
2686163f75dSPaolo Bonzini #define	X86_FEATURE_HLE			(CPUID(0x7, 0, EBX, 4))
26997b5f955SSean Christopherson #define	X86_FEATURE_SMEP		(CPUID(0x7, 0, EBX, 7))
2706ddcc298SKrish Sadhukhan #define	X86_FEATURE_INVPCID		(CPUID(0x7, 0, EBX, 10))
2716ddcc298SKrish Sadhukhan #define	X86_FEATURE_RTM			(CPUID(0x7, 0, EBX, 11))
2726ddcc298SKrish Sadhukhan #define	X86_FEATURE_SMAP		(CPUID(0x7, 0, EBX, 20))
2736ddcc298SKrish Sadhukhan #define	X86_FEATURE_PCOMMIT		(CPUID(0x7, 0, EBX, 22))
2746ddcc298SKrish Sadhukhan #define	X86_FEATURE_CLFLUSHOPT		(CPUID(0x7, 0, EBX, 23))
2756ddcc298SKrish Sadhukhan #define	X86_FEATURE_CLWB		(CPUID(0x7, 0, EBX, 24))
276f6257e24SMaxim Levitsky #define X86_FEATURE_INTEL_PT		(CPUID(0x7, 0, EBX, 25))
2776ddcc298SKrish Sadhukhan #define	X86_FEATURE_UMIP		(CPUID(0x7, 0, ECX, 2))
2786ddcc298SKrish Sadhukhan #define	X86_FEATURE_PKU			(CPUID(0x7, 0, ECX, 3))
2796ddcc298SKrish Sadhukhan #define	X86_FEATURE_LA57		(CPUID(0x7, 0, ECX, 16))
2806ddcc298SKrish Sadhukhan #define	X86_FEATURE_RDPID		(CPUID(0x7, 0, ECX, 22))
28179e53994SYang Weijiang #define	X86_FEATURE_SHSTK		(CPUID(0x7, 0, ECX, 7))
28279e53994SYang Weijiang #define	X86_FEATURE_IBT			(CPUID(0x7, 0, EDX, 20))
2836ddcc298SKrish Sadhukhan #define	X86_FEATURE_SPEC_CTRL		(CPUID(0x7, 0, EDX, 26))
2845cf6a3faSSean Christopherson #define	X86_FEATURE_FLUSH_L1D		(CPUID(0x7, 0, EDX, 28))
2856163f75dSPaolo Bonzini #define	X86_FEATURE_ARCH_CAPABILITIES	(CPUID(0x7, 0, EDX, 29))
286fdae6092SChenyi Qiang #define	X86_FEATURE_PKS			(CPUID(0x7, 0, ECX, 31))
2870a6b8b7dSBinbin Wu #define	X86_FEATURE_LAM			(CPUID(0x7, 1, EAX, 26))
2886ddcc298SKrish Sadhukhan 
2896ddcc298SKrish Sadhukhan /*
2903ed8e382SDan Wu  * KVM defined leafs
2913ed8e382SDan Wu  */
2923ed8e382SDan Wu #define	KVM_FEATURE_ASYNC_PF		(CPUID(0x40000001, 0, EAX, 4))
2933ed8e382SDan Wu #define	KVM_FEATURE_ASYNC_PF_INT	(CPUID(0x40000001, 0, EAX, 14))
2943ed8e382SDan Wu 
2953ed8e382SDan Wu /*
296b52bf046SSean Christopherson  * Extended Leafs, a.k.a. AMD defined
2976ddcc298SKrish Sadhukhan  */
2986ddcc298SKrish Sadhukhan #define	X86_FEATURE_SVM			(CPUID(0x80000001, 0, ECX, 2))
299b883751aSLike Xu #define	X86_FEATURE_PERFCTR_CORE	(CPUID(0x80000001, 0, ECX, 23))
300b52bf046SSean Christopherson #define	X86_FEATURE_NX			(CPUID(0x80000001, 0, EDX, 20))
301b52bf046SSean Christopherson #define	X86_FEATURE_GBPAGES		(CPUID(0x80000001, 0, EDX, 26))
3026ddcc298SKrish Sadhukhan #define	X86_FEATURE_RDTSCP		(CPUID(0x80000001, 0, EDX, 27))
303b52bf046SSean Christopherson #define	X86_FEATURE_LM			(CPUID(0x80000001, 0, EDX, 29))
304b52bf046SSean Christopherson #define	X86_FEATURE_RDPRU		(CPUID(0x80000008, 0, EBX, 4))
3056ddcc298SKrish Sadhukhan #define	X86_FEATURE_AMD_IBPB		(CPUID(0x80000008, 0, EBX, 12))
3066ddcc298SKrish Sadhukhan #define	X86_FEATURE_NPT			(CPUID(0x8000000A, 0, EDX, 0))
307537d39dfSMaxim Levitsky #define	X86_FEATURE_LBRV		(CPUID(0x8000000A, 0, EDX, 1))
3086ddcc298SKrish Sadhukhan #define	X86_FEATURE_NRIPS		(CPUID(0x8000000A, 0, EDX, 3))
309a8503d50SMaxim Levitsky #define X86_FEATURE_TSCRATEMSR		(CPUID(0x8000000A, 0, EDX, 4))
3108650dffeSMaxim Levitsky #define X86_FEATURE_PAUSEFILTER		(CPUID(0x8000000A, 0, EDX, 10))
3118650dffeSMaxim Levitsky #define X86_FEATURE_PFTHRESHOLD		(CPUID(0x8000000A, 0, EDX, 12))
312f6972bd6SLara Lazier #define	X86_FEATURE_VGIF		(CPUID(0x8000000A, 0, EDX, 16))
31308200397SSantosh Shukla #define X86_FEATURE_VNMI		(CPUID(0x8000000A, 0, EDX, 25))
314952cf19cSLike Xu #define	X86_FEATURE_AMD_PMU_V2		(CPUID(0x80000022, 0, EAX, 0))
3156ddcc298SKrish Sadhukhan 
this_cpu_has(u64 feature)3166ddcc298SKrish Sadhukhan static inline bool this_cpu_has(u64 feature)
3176ddcc298SKrish Sadhukhan {
3186ddcc298SKrish Sadhukhan 	u32 input_eax = feature >> 32;
3196ddcc298SKrish Sadhukhan 	u32 input_ecx = (feature >> 16) & 0xffff;
3206ddcc298SKrish Sadhukhan 	u32 output_reg = (feature >> 8) & 0xff;
3216ddcc298SKrish Sadhukhan 	u8 bit = feature & 0xff;
3226ddcc298SKrish Sadhukhan 	struct cpuid c;
3236ddcc298SKrish Sadhukhan 	u32 *tmp;
3246ddcc298SKrish Sadhukhan 
3256ddcc298SKrish Sadhukhan 	c = cpuid_indexed(input_eax, input_ecx);
3266ddcc298SKrish Sadhukhan 	tmp = (u32 *)&c;
3276ddcc298SKrish Sadhukhan 
3286ddcc298SKrish Sadhukhan 	return ((*(tmp + (output_reg % 32))) & (1 << bit));
3296ddcc298SKrish Sadhukhan }
3306ddcc298SKrish Sadhukhan 
3311bde9127SJim Mattson struct far_pointer32 {
3321bde9127SJim Mattson 	u32 offset;
3331bde9127SJim Mattson 	u16 selector;
3341bde9127SJim Mattson } __attribute__((packed));
3351bde9127SJim Mattson 
3367d36db35SAvi Kivity struct descriptor_table_ptr {
3377d36db35SAvi Kivity 	u16 limit;
3387d36db35SAvi Kivity 	ulong base;
3397d36db35SAvi Kivity } __attribute__((packed));
3407d36db35SAvi Kivity 
clac(void)341fa6318d1SPaolo Bonzini static inline void clac(void)
342fa6318d1SPaolo Bonzini {
343fa6318d1SPaolo Bonzini 	asm volatile (".byte 0x0f, 0x01, 0xca" : : : "memory");
344fa6318d1SPaolo Bonzini }
345fa6318d1SPaolo Bonzini 
stac(void)346fa6318d1SPaolo Bonzini static inline void stac(void)
347fa6318d1SPaolo Bonzini {
348fa6318d1SPaolo Bonzini 	asm volatile (".byte 0x0f, 0x01, 0xcb" : : : "memory");
349fa6318d1SPaolo Bonzini }
350fa6318d1SPaolo Bonzini 
read_cs(void)3517d36db35SAvi Kivity static inline u16 read_cs(void)
3527d36db35SAvi Kivity {
3537d36db35SAvi Kivity 	unsigned val;
3547d36db35SAvi Kivity 
355eab64501SPaolo Bonzini 	asm volatile ("mov %%cs, %0" : "=mr"(val));
3567d36db35SAvi Kivity 	return val;
3577d36db35SAvi Kivity }
3587d36db35SAvi Kivity 
read_ds(void)3597d36db35SAvi Kivity static inline u16 read_ds(void)
3607d36db35SAvi Kivity {
3617d36db35SAvi Kivity 	unsigned val;
3627d36db35SAvi Kivity 
363eab64501SPaolo Bonzini 	asm volatile ("mov %%ds, %0" : "=mr"(val));
3647d36db35SAvi Kivity 	return val;
3657d36db35SAvi Kivity }
3667d36db35SAvi Kivity 
read_es(void)3677d36db35SAvi Kivity static inline u16 read_es(void)
3687d36db35SAvi Kivity {
3697d36db35SAvi Kivity 	unsigned val;
3707d36db35SAvi Kivity 
371eab64501SPaolo Bonzini 	asm volatile ("mov %%es, %0" : "=mr"(val));
3727d36db35SAvi Kivity 	return val;
3737d36db35SAvi Kivity }
3747d36db35SAvi Kivity 
read_ss(void)3757d36db35SAvi Kivity static inline u16 read_ss(void)
3767d36db35SAvi Kivity {
3777d36db35SAvi Kivity 	unsigned val;
3787d36db35SAvi Kivity 
379eab64501SPaolo Bonzini 	asm volatile ("mov %%ss, %0" : "=mr"(val));
3807d36db35SAvi Kivity 	return val;
3817d36db35SAvi Kivity }
3827d36db35SAvi Kivity 
read_fs(void)3837d36db35SAvi Kivity static inline u16 read_fs(void)
3847d36db35SAvi Kivity {
3857d36db35SAvi Kivity 	unsigned val;
3867d36db35SAvi Kivity 
387eab64501SPaolo Bonzini 	asm volatile ("mov %%fs, %0" : "=mr"(val));
3887d36db35SAvi Kivity 	return val;
3897d36db35SAvi Kivity }
3907d36db35SAvi Kivity 
read_gs(void)3917d36db35SAvi Kivity static inline u16 read_gs(void)
3927d36db35SAvi Kivity {
3937d36db35SAvi Kivity 	unsigned val;
3947d36db35SAvi Kivity 
395eab64501SPaolo Bonzini 	asm volatile ("mov %%gs, %0" : "=mr"(val));
3967d36db35SAvi Kivity 	return val;
3977d36db35SAvi Kivity }
3987d36db35SAvi Kivity 
read_rflags(void)39977e03b63SGleb Natapov static inline unsigned long read_rflags(void)
40077e03b63SGleb Natapov {
40177e03b63SGleb Natapov 	unsigned long f;
402eab64501SPaolo Bonzini 	asm volatile ("pushf; pop %0\n\t" : "=rm"(f));
40377e03b63SGleb Natapov 	return f;
40477e03b63SGleb Natapov }
40577e03b63SGleb Natapov 
write_ds(unsigned val)4067d36db35SAvi Kivity static inline void write_ds(unsigned val)
4077d36db35SAvi Kivity {
408eab64501SPaolo Bonzini 	asm volatile ("mov %0, %%ds" : : "rm"(val) : "memory");
4097d36db35SAvi Kivity }
4107d36db35SAvi Kivity 
write_es(unsigned val)4117d36db35SAvi Kivity static inline void write_es(unsigned val)
4127d36db35SAvi Kivity {
413eab64501SPaolo Bonzini 	asm volatile ("mov %0, %%es" : : "rm"(val) : "memory");
4147d36db35SAvi Kivity }
4157d36db35SAvi Kivity 
write_ss(unsigned val)4167d36db35SAvi Kivity static inline void write_ss(unsigned val)
4177d36db35SAvi Kivity {
418eab64501SPaolo Bonzini 	asm volatile ("mov %0, %%ss" : : "rm"(val) : "memory");
4197d36db35SAvi Kivity }
4207d36db35SAvi Kivity 
write_fs(unsigned val)4217d36db35SAvi Kivity static inline void write_fs(unsigned val)
4227d36db35SAvi Kivity {
423eab64501SPaolo Bonzini 	asm volatile ("mov %0, %%fs" : : "rm"(val) : "memory");
4247d36db35SAvi Kivity }
4257d36db35SAvi Kivity 
write_gs(unsigned val)4267d36db35SAvi Kivity static inline void write_gs(unsigned val)
4277d36db35SAvi Kivity {
428eab64501SPaolo Bonzini 	asm volatile ("mov %0, %%gs" : : "rm"(val) : "memory");
4297d36db35SAvi Kivity }
4307d36db35SAvi Kivity 
write_rflags(unsigned long f)4317488d290SPaolo Bonzini static inline void write_rflags(unsigned long f)
4327488d290SPaolo Bonzini {
433eab64501SPaolo Bonzini 	asm volatile ("push %0; popf\n\t" : : "rm"(f));
4347488d290SPaolo Bonzini }
4357488d290SPaolo Bonzini 
set_iopl(int iopl)436728e71eeSPaolo Bonzini static inline void set_iopl(int iopl)
437728e71eeSPaolo Bonzini {
438728e71eeSPaolo Bonzini 	unsigned long flags = read_rflags() & ~X86_EFLAGS_IOPL;
439728e71eeSPaolo Bonzini 	flags |= iopl * (X86_EFLAGS_IOPL / 3);
440728e71eeSPaolo Bonzini 	write_rflags(flags);
441728e71eeSPaolo Bonzini }
442728e71eeSPaolo Bonzini 
44364c8b768SSean Christopherson /*
44464c8b768SSean Christopherson  * Don't use the safe variants for rdmsr() or wrmsr().  The exception fixup
44564c8b768SSean Christopherson  * infrastructure uses per-CPU data and thus consumes GS.base.  Various tests
44664c8b768SSean Christopherson  * temporarily modify MSR_GS_BASE and will explode when trying to determine
44764c8b768SSean Christopherson  * whether or not RDMSR/WRMSR faulted.
44864c8b768SSean Christopherson  */
rdmsr(u32 index)4497d36db35SAvi Kivity static inline u64 rdmsr(u32 index)
4507d36db35SAvi Kivity {
4517d36db35SAvi Kivity 	u32 a, d;
4527d36db35SAvi Kivity 	asm volatile ("rdmsr" : "=a"(a), "=d"(d) : "c"(index) : "memory");
4537d36db35SAvi Kivity 	return a | ((u64)d << 32);
4547d36db35SAvi Kivity }
4557d36db35SAvi Kivity 
wrmsr(u32 index,u64 val)4567d36db35SAvi Kivity static inline void wrmsr(u32 index, u64 val)
4577d36db35SAvi Kivity {
4587d36db35SAvi Kivity 	u32 a = val, d = val >> 32;
4597d36db35SAvi Kivity 	asm volatile ("wrmsr" : : "a"(a), "d"(d), "c"(index) : "memory");
4607d36db35SAvi Kivity }
4617d36db35SAvi Kivity 
46251b87946SMingwei Zhang #define __rdreg64_safe(fep, insn, index, val)				\
4630ae3296eSSean Christopherson ({									\
4640ae3296eSSean Christopherson 	uint32_t a, d;							\
4650ae3296eSSean Christopherson 	int vector;							\
4660ae3296eSSean Christopherson 									\
46751b87946SMingwei Zhang 	vector = __asm_safe_out2(fep, insn, "=a"(a), "=d"(d), "c"(index));\
4680ae3296eSSean Christopherson 									\
4690ae3296eSSean Christopherson 	if (vector)							\
4700ae3296eSSean Christopherson 		*(val) = 0;						\
4710ae3296eSSean Christopherson 	else								\
4720ae3296eSSean Christopherson 		*(val) = (uint64_t)a | ((uint64_t)d << 32);		\
4730ae3296eSSean Christopherson 	vector;								\
4740ae3296eSSean Christopherson })
4750ae3296eSSean Christopherson 
47651b87946SMingwei Zhang #define rdreg64_safe(insn, index, val)					\
47751b87946SMingwei Zhang 	__rdreg64_safe("", insn, index, val)
47851b87946SMingwei Zhang 
47951b87946SMingwei Zhang #define __wrreg64_safe(fep, insn, index, val)				\
480fa68c037SSean Christopherson ({									\
481fa68c037SSean Christopherson 	uint32_t eax = (val), edx = (val) >> 32;			\
482fa68c037SSean Christopherson 									\
48351b87946SMingwei Zhang 	__asm_safe(fep, insn, "a" (eax), "d" (edx), "c" (index));	\
484fa68c037SSean Christopherson })
485fa68c037SSean Christopherson 
48651b87946SMingwei Zhang #define wrreg64_safe(insn, index, val)					\
48751b87946SMingwei Zhang 	__wrreg64_safe("", insn, index, val)
488fa68c037SSean Christopherson 
rdmsr_safe(u32 index,uint64_t * val)4890a4d8626SSean Christopherson static inline int rdmsr_safe(u32 index, uint64_t *val)
490142ff635SSean Christopherson {
4910ae3296eSSean Christopherson 	return rdreg64_safe("rdmsr", index, val);
492142ff635SSean Christopherson }
493142ff635SSean Christopherson 
rdmsr_fep_safe(u32 index,uint64_t * val)4945047281aSMaxim Levitsky static inline int rdmsr_fep_safe(u32 index, uint64_t *val)
4955047281aSMaxim Levitsky {
4965047281aSMaxim Levitsky 	return __rdreg64_safe(KVM_FEP, "rdmsr", index, val);
4975047281aSMaxim Levitsky }
4985047281aSMaxim Levitsky 
wrmsr_safe(u32 index,u64 val)4994143fbfdSSean Christopherson static inline int wrmsr_safe(u32 index, u64 val)
500142ff635SSean Christopherson {
501fa68c037SSean Christopherson 	return wrreg64_safe("wrmsr", index, val);
502142ff635SSean Christopherson }
503142ff635SSean Christopherson 
wrmsr_fep_safe(u32 index,u64 val)50451b87946SMingwei Zhang static inline int wrmsr_fep_safe(u32 index, u64 val)
50551b87946SMingwei Zhang {
50651b87946SMingwei Zhang 	return __wrreg64_safe(KVM_FEP, "wrmsr", index, val);
50751b87946SMingwei Zhang }
50851b87946SMingwei Zhang 
rdpmc_safe(u32 index,uint64_t * val)50985c21181SLike Xu static inline int rdpmc_safe(u32 index, uint64_t *val)
5102b2d7aadSAvi Kivity {
5110ae3296eSSean Christopherson 	return rdreg64_safe("rdpmc", index, val);
51285c21181SLike Xu }
51385c21181SLike Xu 
rdpmc(uint32_t index)514eab2fcf3SThomas Huth static inline uint64_t rdpmc(uint32_t index)
51585c21181SLike Xu {
51685c21181SLike Xu 	uint64_t val;
51785c21181SLike Xu 	int vector = rdpmc_safe(index, &val);
51885c21181SLike Xu 
51996772b2eSLike Xu 	assert_msg(!vector, "Unexpected %s on RDPMC(%" PRId32 ")",
52085c21181SLike Xu 		   exception_mnemonic(vector), index);
52185c21181SLike Xu 	return val;
5222b2d7aadSAvi Kivity }
5232b2d7aadSAvi Kivity 
xgetbv_safe(u32 index,u64 * result)524fa68c037SSean Christopherson static inline int xgetbv_safe(u32 index, u64 *result)
525fa68c037SSean Christopherson {
526fa68c037SSean Christopherson 	return rdreg64_safe(".byte 0x0f,0x01,0xd0", index, result);
527fa68c037SSean Christopherson }
528fa68c037SSean Christopherson 
xsetbv_safe(u32 index,u64 value)529fa68c037SSean Christopherson static inline int xsetbv_safe(u32 index, u64 value)
530fa68c037SSean Christopherson {
531fa68c037SSean Christopherson 	return wrreg64_safe(".byte 0x0f,0x01,0xd1", index, value);
532fa68c037SSean Christopherson }
533fa68c037SSean Christopherson 
write_cr0_safe(ulong val)53464c8b768SSean Christopherson static inline int write_cr0_safe(ulong val)
53564c8b768SSean Christopherson {
536dd5d5bf1SSean Christopherson 	return asm_safe("mov %0,%%cr0", "r" (val));
53764c8b768SSean Christopherson }
53864c8b768SSean Christopherson 
write_cr0(ulong val)5397d36db35SAvi Kivity static inline void write_cr0(ulong val)
5407d36db35SAvi Kivity {
54164c8b768SSean Christopherson 	int vector = write_cr0_safe(val);
54264c8b768SSean Christopherson 
54364c8b768SSean Christopherson 	assert_msg(!vector, "Unexpected fault '%d' writing CR0 = %lx",
54464c8b768SSean Christopherson 		   vector, val);
5457d36db35SAvi Kivity }
5467d36db35SAvi Kivity 
read_cr0(void)5477d36db35SAvi Kivity static inline ulong read_cr0(void)
5487d36db35SAvi Kivity {
5497d36db35SAvi Kivity 	ulong val;
5507d36db35SAvi Kivity 	asm volatile ("mov %%cr0, %0" : "=r"(val) : : "memory");
5517d36db35SAvi Kivity 	return val;
5527d36db35SAvi Kivity }
5537d36db35SAvi Kivity 
write_cr2(ulong val)5547d36db35SAvi Kivity static inline void write_cr2(ulong val)
5557d36db35SAvi Kivity {
5567d36db35SAvi Kivity 	asm volatile ("mov %0, %%cr2" : : "r"(val) : "memory");
5577d36db35SAvi Kivity }
5587d36db35SAvi Kivity 
read_cr2(void)5597d36db35SAvi Kivity static inline ulong read_cr2(void)
5607d36db35SAvi Kivity {
5617d36db35SAvi Kivity 	ulong val;
5627d36db35SAvi Kivity 	asm volatile ("mov %%cr2, %0" : "=r"(val) : : "memory");
5637d36db35SAvi Kivity 	return val;
5647d36db35SAvi Kivity }
5657d36db35SAvi Kivity 
write_cr3_safe(ulong val)56664c8b768SSean Christopherson static inline int write_cr3_safe(ulong val)
56764c8b768SSean Christopherson {
568dd5d5bf1SSean Christopherson 	return asm_safe("mov %0,%%cr3", "r" (val));
56964c8b768SSean Christopherson }
57064c8b768SSean Christopherson 
write_cr3(ulong val)5717d36db35SAvi Kivity static inline void write_cr3(ulong val)
5727d36db35SAvi Kivity {
57364c8b768SSean Christopherson 	int vector = write_cr3_safe(val);
57464c8b768SSean Christopherson 
57564c8b768SSean Christopherson 	assert_msg(!vector, "Unexpected fault '%d' writing CR3 = %lx",
57664c8b768SSean Christopherson 		   vector, val);
5777d36db35SAvi Kivity }
5787d36db35SAvi Kivity 
read_cr3(void)5797d36db35SAvi Kivity static inline ulong read_cr3(void)
5807d36db35SAvi Kivity {
5817d36db35SAvi Kivity 	ulong val;
5827d36db35SAvi Kivity 	asm volatile ("mov %%cr3, %0" : "=r"(val) : : "memory");
5837d36db35SAvi Kivity 	return val;
5847d36db35SAvi Kivity }
5857d36db35SAvi Kivity 
update_cr3(void * cr3)5861c320e18SYadong Qi static inline void update_cr3(void *cr3)
5871c320e18SYadong Qi {
5881c320e18SYadong Qi 	write_cr3((ulong)cr3);
5891c320e18SYadong Qi }
5901c320e18SYadong Qi 
write_cr4_safe(ulong val)59164c8b768SSean Christopherson static inline int write_cr4_safe(ulong val)
59264c8b768SSean Christopherson {
593dd5d5bf1SSean Christopherson 	return asm_safe("mov %0,%%cr4", "r" (val));
59464c8b768SSean Christopherson }
59564c8b768SSean Christopherson 
write_cr4(ulong val)5967d36db35SAvi Kivity static inline void write_cr4(ulong val)
5977d36db35SAvi Kivity {
59864c8b768SSean Christopherson 	int vector = write_cr4_safe(val);
59964c8b768SSean Christopherson 
60064c8b768SSean Christopherson 	assert_msg(!vector, "Unexpected fault '%d' writing CR4 = %lx",
60164c8b768SSean Christopherson 		   vector, val);
6027d36db35SAvi Kivity }
6037d36db35SAvi Kivity 
read_cr4(void)6047d36db35SAvi Kivity static inline ulong read_cr4(void)
6057d36db35SAvi Kivity {
6067d36db35SAvi Kivity 	ulong val;
6077d36db35SAvi Kivity 	asm volatile ("mov %%cr4, %0" : "=r"(val) : : "memory");
6087d36db35SAvi Kivity 	return val;
6097d36db35SAvi Kivity }
6107d36db35SAvi Kivity 
write_cr8(ulong val)6117d36db35SAvi Kivity static inline void write_cr8(ulong val)
6127d36db35SAvi Kivity {
6137d36db35SAvi Kivity 	asm volatile ("mov %0, %%cr8" : : "r"(val) : "memory");
6147d36db35SAvi Kivity }
6157d36db35SAvi Kivity 
read_cr8(void)6167d36db35SAvi Kivity static inline ulong read_cr8(void)
6177d36db35SAvi Kivity {
6187d36db35SAvi Kivity 	ulong val;
6197d36db35SAvi Kivity 	asm volatile ("mov %%cr8, %0" : "=r"(val) : : "memory");
6207d36db35SAvi Kivity 	return val;
6217d36db35SAvi Kivity }
6227d36db35SAvi Kivity 
lgdt(const struct descriptor_table_ptr * ptr)6237d36db35SAvi Kivity static inline void lgdt(const struct descriptor_table_ptr *ptr)
6247d36db35SAvi Kivity {
6257d36db35SAvi Kivity 	asm volatile ("lgdt %0" : : "m"(*ptr));
6267d36db35SAvi Kivity }
6277d36db35SAvi Kivity 
lgdt_safe(const struct descriptor_table_ptr * ptr)6285047281aSMaxim Levitsky static inline int lgdt_safe(const struct descriptor_table_ptr *ptr)
6295047281aSMaxim Levitsky {
6305047281aSMaxim Levitsky 	return asm_safe("lgdt %0", "m"(*ptr));
6315047281aSMaxim Levitsky }
6325047281aSMaxim Levitsky 
lgdt_fep_safe(const struct descriptor_table_ptr * ptr)6335047281aSMaxim Levitsky static inline int lgdt_fep_safe(const struct descriptor_table_ptr *ptr)
6345047281aSMaxim Levitsky {
6355047281aSMaxim Levitsky 	return asm_fep_safe("lgdt %0", "m"(*ptr));
6365047281aSMaxim Levitsky }
6375047281aSMaxim Levitsky 
sgdt(struct descriptor_table_ptr * ptr)6387d36db35SAvi Kivity static inline void sgdt(struct descriptor_table_ptr *ptr)
6397d36db35SAvi Kivity {
6407d36db35SAvi Kivity 	asm volatile ("sgdt %0" : "=m"(*ptr));
6417d36db35SAvi Kivity }
6427d36db35SAvi Kivity 
lidt(const struct descriptor_table_ptr * ptr)6437d36db35SAvi Kivity static inline void lidt(const struct descriptor_table_ptr *ptr)
6447d36db35SAvi Kivity {
6457d36db35SAvi Kivity 	asm volatile ("lidt %0" : : "m"(*ptr));
6467d36db35SAvi Kivity }
6477d36db35SAvi Kivity 
lidt_safe(const struct descriptor_table_ptr * ptr)6485047281aSMaxim Levitsky static inline int lidt_safe(const struct descriptor_table_ptr *ptr)
6495047281aSMaxim Levitsky {
6505047281aSMaxim Levitsky 	return asm_safe("lidt %0", "m"(*ptr));
6515047281aSMaxim Levitsky }
6525047281aSMaxim Levitsky 
lidt_fep_safe(const struct descriptor_table_ptr * ptr)6535047281aSMaxim Levitsky static inline int lidt_fep_safe(const struct descriptor_table_ptr *ptr)
6545047281aSMaxim Levitsky {
6555047281aSMaxim Levitsky 	return asm_fep_safe("lidt %0", "m"(*ptr));
6565047281aSMaxim Levitsky }
6575047281aSMaxim Levitsky 
sidt(struct descriptor_table_ptr * ptr)6587d36db35SAvi Kivity static inline void sidt(struct descriptor_table_ptr *ptr)
6597d36db35SAvi Kivity {
6607d36db35SAvi Kivity 	asm volatile ("sidt %0" : "=m"(*ptr));
6617d36db35SAvi Kivity }
6627d36db35SAvi Kivity 
lldt(u16 val)6637a14c1d9SJim Mattson static inline void lldt(u16 val)
6647d36db35SAvi Kivity {
6657d36db35SAvi Kivity 	asm volatile ("lldt %0" : : "rm"(val));
6667d36db35SAvi Kivity }
6677d36db35SAvi Kivity 
lldt_safe(u16 val)6685047281aSMaxim Levitsky static inline int lldt_safe(u16 val)
6695047281aSMaxim Levitsky {
6705047281aSMaxim Levitsky 	return asm_safe("lldt %0", "rm"(val));
6715047281aSMaxim Levitsky }
6725047281aSMaxim Levitsky 
lldt_fep_safe(u16 val)6735047281aSMaxim Levitsky static inline int lldt_fep_safe(u16 val)
6745047281aSMaxim Levitsky {
6755047281aSMaxim Levitsky 	return asm_safe("lldt %0", "rm"(val));
6765047281aSMaxim Levitsky }
6775047281aSMaxim Levitsky 
sldt(void)6787d36db35SAvi Kivity static inline u16 sldt(void)
6797d36db35SAvi Kivity {
6807d36db35SAvi Kivity 	u16 val;
6817d36db35SAvi Kivity 	asm volatile ("sldt %0" : "=rm"(val));
6827d36db35SAvi Kivity 	return val;
6837d36db35SAvi Kivity }
6847d36db35SAvi Kivity 
ltr(u16 val)685fd5d3dc6SAvi Kivity static inline void ltr(u16 val)
6867d36db35SAvi Kivity {
6877d36db35SAvi Kivity 	asm volatile ("ltr %0" : : "rm"(val));
6887d36db35SAvi Kivity }
6897d36db35SAvi Kivity 
ltr_safe(u16 val)6905047281aSMaxim Levitsky static inline int ltr_safe(u16 val)
6915047281aSMaxim Levitsky {
6925047281aSMaxim Levitsky 	return asm_safe("ltr %0", "rm"(val));
6935047281aSMaxim Levitsky }
6945047281aSMaxim Levitsky 
ltr_fep_safe(u16 val)6955047281aSMaxim Levitsky static inline int ltr_fep_safe(u16 val)
6965047281aSMaxim Levitsky {
6975047281aSMaxim Levitsky 	return asm_safe("ltr %0", "rm"(val));
6985047281aSMaxim Levitsky }
6995047281aSMaxim Levitsky 
str(void)7007d36db35SAvi Kivity static inline u16 str(void)
7017d36db35SAvi Kivity {
7027d36db35SAvi Kivity 	u16 val;
7037d36db35SAvi Kivity 	asm volatile ("str %0" : "=rm"(val));
7047d36db35SAvi Kivity 	return val;
7057d36db35SAvi Kivity }
7067d36db35SAvi Kivity 
write_dr0(void * val)7077f8f7356SKrish Sadhukhan static inline void write_dr0(void *val)
7087f8f7356SKrish Sadhukhan {
7097f8f7356SKrish Sadhukhan 	asm volatile ("mov %0, %%dr0" : : "r"(val) : "memory");
7107f8f7356SKrish Sadhukhan }
7117f8f7356SKrish Sadhukhan 
write_dr1(void * val)7127f8f7356SKrish Sadhukhan static inline void write_dr1(void *val)
7137f8f7356SKrish Sadhukhan {
7147f8f7356SKrish Sadhukhan 	asm volatile ("mov %0, %%dr1" : : "r"(val) : "memory");
7157f8f7356SKrish Sadhukhan }
7167f8f7356SKrish Sadhukhan 
write_dr2(void * val)7177f8f7356SKrish Sadhukhan static inline void write_dr2(void *val)
7187f8f7356SKrish Sadhukhan {
7197f8f7356SKrish Sadhukhan 	asm volatile ("mov %0, %%dr2" : : "r"(val) : "memory");
7207f8f7356SKrish Sadhukhan }
7217f8f7356SKrish Sadhukhan 
write_dr3(void * val)7227f8f7356SKrish Sadhukhan static inline void write_dr3(void *val)
7237f8f7356SKrish Sadhukhan {
7247f8f7356SKrish Sadhukhan 	asm volatile ("mov %0, %%dr3" : : "r"(val) : "memory");
7257f8f7356SKrish Sadhukhan }
7267f8f7356SKrish Sadhukhan 
write_dr6(ulong val)7277d36db35SAvi Kivity static inline void write_dr6(ulong val)
7287d36db35SAvi Kivity {
7297d36db35SAvi Kivity 	asm volatile ("mov %0, %%dr6" : : "r"(val) : "memory");
7307d36db35SAvi Kivity }
7317d36db35SAvi Kivity 
read_dr6(void)7327d36db35SAvi Kivity static inline ulong read_dr6(void)
7337d36db35SAvi Kivity {
7347d36db35SAvi Kivity 	ulong val;
7357d36db35SAvi Kivity 	asm volatile ("mov %%dr6, %0" : "=r"(val));
7367d36db35SAvi Kivity 	return val;
7377d36db35SAvi Kivity }
7387d36db35SAvi Kivity 
write_dr7(ulong val)7397d36db35SAvi Kivity static inline void write_dr7(ulong val)
7407d36db35SAvi Kivity {
7417d36db35SAvi Kivity 	asm volatile ("mov %0, %%dr7" : : "r"(val) : "memory");
7427d36db35SAvi Kivity }
7437d36db35SAvi Kivity 
read_dr7(void)7447d36db35SAvi Kivity static inline ulong read_dr7(void)
7457d36db35SAvi Kivity {
7467d36db35SAvi Kivity 	ulong val;
7477d36db35SAvi Kivity 	asm volatile ("mov %%dr7, %0" : "=r"(val));
7487d36db35SAvi Kivity 	return val;
7497d36db35SAvi Kivity }
7507d36db35SAvi Kivity 
pause(void)7517d36db35SAvi Kivity static inline void pause(void)
7527d36db35SAvi Kivity {
7537d36db35SAvi Kivity 	asm volatile ("pause");
7547d36db35SAvi Kivity }
7557d36db35SAvi Kivity 
cli(void)7567d36db35SAvi Kivity static inline void cli(void)
7577d36db35SAvi Kivity {
7587d36db35SAvi Kivity 	asm volatile ("cli");
7597d36db35SAvi Kivity }
7607d36db35SAvi Kivity 
761787f0aebSMaxim Levitsky /*
762787f0aebSMaxim Levitsky  * See also safe_halt().
763787f0aebSMaxim Levitsky  */
sti(void)7647d36db35SAvi Kivity static inline void sti(void)
7657d36db35SAvi Kivity {
7667d36db35SAvi Kivity 	asm volatile ("sti");
7677d36db35SAvi Kivity }
7687d36db35SAvi Kivity 
769e4007e62SMaxim Levitsky /*
770e4007e62SMaxim Levitsky  * Enable interrupts and ensure that interrupts are evaluated upon return from
771e4007e62SMaxim Levitsky  * this function, i.e. execute a nop to consume the STi interrupt shadow.
772e4007e62SMaxim Levitsky  */
sti_nop(void)773e4007e62SMaxim Levitsky static inline void sti_nop(void)
774e4007e62SMaxim Levitsky {
775e4007e62SMaxim Levitsky 	asm volatile ("sti; nop");
776e4007e62SMaxim Levitsky }
777e4007e62SMaxim Levitsky 
778e4007e62SMaxim Levitsky /*
779e4007e62SMaxim Levitsky  * Enable interrupts for one instruction (nop), to allow the CPU to process all
780e4007e62SMaxim Levitsky  * interrupts that are already pending.
781e4007e62SMaxim Levitsky  */
sti_nop_cli(void)782e4007e62SMaxim Levitsky static inline void sti_nop_cli(void)
783e4007e62SMaxim Levitsky {
784e4007e62SMaxim Levitsky 	asm volatile ("sti; nop; cli");
785e4007e62SMaxim Levitsky }
786e4007e62SMaxim Levitsky 
rdrand(void)787520e2789SBabu Moger static inline unsigned long long rdrand(void)
788520e2789SBabu Moger {
789520e2789SBabu Moger 	long long r;
790520e2789SBabu Moger 
791520e2789SBabu Moger 	asm volatile("rdrand %0\n\t"
792520e2789SBabu Moger 		     "jc 1f\n\t"
793520e2789SBabu Moger 		     "mov $0, %0\n\t"
794520e2789SBabu Moger 		     "1:\n\t" : "=r" (r));
795520e2789SBabu Moger 	return r;
796520e2789SBabu Moger }
797520e2789SBabu Moger 
rdtsc(void)7987db17e21SThomas Huth static inline unsigned long long rdtsc(void)
7990d7251beSJason Wang {
8000d7251beSJason Wang 	long long r;
8010d7251beSJason Wang 
8020d7251beSJason Wang #ifdef __x86_64__
8030d7251beSJason Wang 	unsigned a, d;
8040d7251beSJason Wang 
8050d7251beSJason Wang 	asm volatile ("rdtsc" : "=a"(a), "=d"(d));
8060d7251beSJason Wang 	r = a | ((long long)d << 32);
8070d7251beSJason Wang #else
8080d7251beSJason Wang 	asm volatile ("rdtsc" : "=A"(r));
8090d7251beSJason Wang #endif
8100d7251beSJason Wang 	return r;
8110d7251beSJason Wang }
8120d7251beSJason Wang 
813b49a1a6dSJim Mattson /*
814b49a1a6dSJim Mattson  * Per the advice in the SDM, volume 2, the sequence "mfence; lfence"
815b49a1a6dSJim Mattson  * executed immediately before rdtsc ensures that rdtsc will be
816b49a1a6dSJim Mattson  * executed only after all previous instructions have executed and all
817b49a1a6dSJim Mattson  * previous loads and stores are globally visible. In addition, the
818b49a1a6dSJim Mattson  * lfence immediately after rdtsc ensures that rdtsc will be executed
819b49a1a6dSJim Mattson  * prior to the execution of any subsequent instruction.
820b49a1a6dSJim Mattson  */
fenced_rdtsc(void)821b49a1a6dSJim Mattson static inline unsigned long long fenced_rdtsc(void)
822b49a1a6dSJim Mattson {
823b49a1a6dSJim Mattson 	unsigned long long tsc;
824b49a1a6dSJim Mattson 
825b49a1a6dSJim Mattson #ifdef __x86_64__
826b49a1a6dSJim Mattson 	unsigned int eax, edx;
827b49a1a6dSJim Mattson 
828b49a1a6dSJim Mattson 	asm volatile ("mfence; lfence; rdtsc; lfence" : "=a"(eax), "=d"(edx));
829b49a1a6dSJim Mattson 	tsc = eax | ((unsigned long long)edx << 32);
830b49a1a6dSJim Mattson #else
831b49a1a6dSJim Mattson 	asm volatile ("mfence; lfence; rdtsc; lfence" : "=A"(tsc));
832b49a1a6dSJim Mattson #endif
833b49a1a6dSJim Mattson 	return tsc;
834b49a1a6dSJim Mattson }
835b49a1a6dSJim Mattson 
rdtscp(u32 * aux)836867f820dSPaolo Bonzini static inline unsigned long long rdtscp(u32 *aux)
837867f820dSPaolo Bonzini {
838867f820dSPaolo Bonzini 	long long r;
839867f820dSPaolo Bonzini 
840867f820dSPaolo Bonzini #ifdef __x86_64__
841867f820dSPaolo Bonzini 	unsigned a, d;
842867f820dSPaolo Bonzini 
843867f820dSPaolo Bonzini 	asm volatile ("rdtscp" : "=a"(a), "=d"(d), "=c"(*aux));
844867f820dSPaolo Bonzini 	r = a | ((long long)d << 32);
845867f820dSPaolo Bonzini #else
846867f820dSPaolo Bonzini 	asm volatile ("rdtscp" : "=A"(r), "=c"(*aux));
847867f820dSPaolo Bonzini #endif
848867f820dSPaolo Bonzini 	return r;
849867f820dSPaolo Bonzini }
850867f820dSPaolo Bonzini 
wrtsc(u64 tsc)8510d7251beSJason Wang static inline void wrtsc(u64 tsc)
8520d7251beSJason Wang {
853c47292f4SJim Mattson 	wrmsr(MSR_IA32_TSC, tsc);
8540d7251beSJason Wang }
8550d7251beSJason Wang 
856ae0a920bSGleb Natapov 
invlpg(volatile void * va)857fa6318d1SPaolo Bonzini static inline void invlpg(volatile void *va)
8584029c34bSGleb Natapov {
8594029c34bSGleb Natapov 	asm volatile("invlpg (%0)" ::"r" (va) : "memory");
8604029c34bSGleb Natapov }
861334cd2bfSGleb Natapov 
862b88e90e6SMaxim Levitsky struct invpcid_desc {
863b88e90e6SMaxim Levitsky 	u64 pcid : 12;
864b88e90e6SMaxim Levitsky 	u64 rsv  : 52;
865b88e90e6SMaxim Levitsky 	u64 addr : 64;
866b88e90e6SMaxim Levitsky };
8678eb97e8fSSean Christopherson 
invpcid_safe(unsigned long type,struct invpcid_desc * desc)868b88e90e6SMaxim Levitsky static inline int invpcid_safe(unsigned long type, struct invpcid_desc *desc)
8698eb97e8fSSean Christopherson {
8708eb97e8fSSean Christopherson 	/* invpcid (%rax), %rbx */
8718eb97e8fSSean Christopherson 	return asm_safe(".byte 0x66,0x0f,0x38,0x82,0x18", "a" (desc), "b" (type));
8728eb97e8fSSean Christopherson }
8738eb97e8fSSean Christopherson 
874787f0aebSMaxim Levitsky /*
875787f0aebSMaxim Levitsky  * Execute HLT in an STI interrupt shadow to ensure that a pending IRQ that's
876787f0aebSMaxim Levitsky  * intended to be a wake event arrives *after* HLT is executed.  Modern CPUs,
877787f0aebSMaxim Levitsky  * except for a few oddballs that KVM is unlikely to run on, block IRQs for one
878787f0aebSMaxim Levitsky  * instruction after STI, *if* RFLAGS.IF=0 before STI.  Note, Intel CPUs may
879787f0aebSMaxim Levitsky  * block other events beyond regular IRQs, e.g. may block NMIs and SMIs too.
880787f0aebSMaxim Levitsky  */
safe_halt(void)881334cd2bfSGleb Natapov static inline void safe_halt(void)
882334cd2bfSGleb Natapov {
883334cd2bfSGleb Natapov 	asm volatile("sti; hlt");
884334cd2bfSGleb Natapov }
8859d7eaa29SArthur Chunqi Li 
read_pkru(void)886e94079c5SPaolo Bonzini static inline u32 read_pkru(void)
887e94079c5SPaolo Bonzini {
888e94079c5SPaolo Bonzini 	unsigned int eax, edx;
889e94079c5SPaolo Bonzini 	unsigned int ecx = 0;
890e94079c5SPaolo Bonzini 	unsigned int pkru;
891e94079c5SPaolo Bonzini 
892e94079c5SPaolo Bonzini 	asm volatile(".byte 0x0f,0x01,0xee\n\t"
893e94079c5SPaolo Bonzini 		     : "=a" (eax), "=d" (edx)
894e94079c5SPaolo Bonzini 		     : "c" (ecx));
895e94079c5SPaolo Bonzini 	pkru = eax;
896e94079c5SPaolo Bonzini 	return pkru;
897e94079c5SPaolo Bonzini }
898e94079c5SPaolo Bonzini 
write_pkru(u32 pkru)899e94079c5SPaolo Bonzini static inline void write_pkru(u32 pkru)
900e94079c5SPaolo Bonzini {
901e94079c5SPaolo Bonzini 	unsigned int eax = pkru;
902e94079c5SPaolo Bonzini 	unsigned int ecx = 0;
903e94079c5SPaolo Bonzini 	unsigned int edx = 0;
904e94079c5SPaolo Bonzini 
905e94079c5SPaolo Bonzini 	asm volatile(".byte 0x0f,0x01,0xef\n\t"
906e94079c5SPaolo Bonzini 		     : : "a" (eax), "c" (ecx), "d" (edx));
907e94079c5SPaolo Bonzini }
908e94079c5SPaolo Bonzini 
is_canonical(u64 addr)909aedfd771SJim Mattson static inline bool is_canonical(u64 addr)
910aedfd771SJim Mattson {
911f4a8b68cSLara Lazier 	int va_width = (raw_cpuid(0x80000008, 0).a & 0xff00) >> 8;
912f4a8b68cSLara Lazier 	int shift_amt = 64 - va_width;
913f4a8b68cSLara Lazier 
914f4a8b68cSLara Lazier 	return (s64)(addr << shift_amt) >> shift_amt == addr;
915aedfd771SJim Mattson }
916aedfd771SJim Mattson 
clear_bit(int bit,u8 * addr)917e60c87fdSLiran Alon static inline void clear_bit(int bit, u8 *addr)
918e60c87fdSLiran Alon {
9192f3c0286SNicolas Saenz Julienne 	__asm__ __volatile__("lock; btr %1, %0"
920e60c87fdSLiran Alon 			     : "+m" (*addr) : "Ir" (bit) : "cc", "memory");
921e60c87fdSLiran Alon }
922e60c87fdSLiran Alon 
set_bit(int bit,u8 * addr)923e60c87fdSLiran Alon static inline void set_bit(int bit, u8 *addr)
924e60c87fdSLiran Alon {
9252f3c0286SNicolas Saenz Julienne 	__asm__ __volatile__("lock; bts %1, %0"
926e60c87fdSLiran Alon 			     : "+m" (*addr) : "Ir" (bit) : "cc", "memory");
927e60c87fdSLiran Alon }
928e60c87fdSLiran Alon 
flush_tlb(void)9295868743aSMarc Orr static inline void flush_tlb(void)
9305868743aSMarc Orr {
9315868743aSMarc Orr 	ulong cr4;
9325868743aSMarc Orr 
9335868743aSMarc Orr 	cr4 = read_cr4();
9345868743aSMarc Orr 	write_cr4(cr4 ^ X86_CR4_PGE);
9355868743aSMarc Orr 	write_cr4(cr4);
9365868743aSMarc Orr }
9375868743aSMarc Orr 
generate_non_canonical_gp(void)938e39bee8fSSean Christopherson static inline void generate_non_canonical_gp(void)
939e39bee8fSSean Christopherson {
940e39bee8fSSean Christopherson 	*(volatile u64 *)NONCANONICAL = 0;
941e39bee8fSSean Christopherson }
942e39bee8fSSean Christopherson 
generate_ud(void)943e39bee8fSSean Christopherson static inline void generate_ud(void)
944e39bee8fSSean Christopherson {
945e39bee8fSSean Christopherson 	asm volatile ("ud2");
946e39bee8fSSean Christopherson }
947e39bee8fSSean Christopherson 
generate_de(void)948e39bee8fSSean Christopherson static inline void generate_de(void)
949e39bee8fSSean Christopherson {
950e39bee8fSSean Christopherson 	asm volatile (
951e39bee8fSSean Christopherson 		"xor %%eax, %%eax\n\t"
952e39bee8fSSean Christopherson 		"xor %%ebx, %%ebx\n\t"
953e39bee8fSSean Christopherson 		"xor %%edx, %%edx\n\t"
954e39bee8fSSean Christopherson 		"idiv %%ebx\n\t"
955e39bee8fSSean Christopherson 		::: "eax", "ebx", "edx");
956e39bee8fSSean Christopherson }
957e39bee8fSSean Christopherson 
generate_bp(void)958e39bee8fSSean Christopherson static inline void generate_bp(void)
959e39bee8fSSean Christopherson {
960e39bee8fSSean Christopherson 	asm volatile ("int3");
961e39bee8fSSean Christopherson }
962e39bee8fSSean Christopherson 
generate_single_step_db(void)963e39bee8fSSean Christopherson static inline void generate_single_step_db(void)
964e39bee8fSSean Christopherson {
965e39bee8fSSean Christopherson 	write_rflags(read_rflags() | X86_EFLAGS_TF);
966e39bee8fSSean Christopherson 	asm volatile("nop");
967e39bee8fSSean Christopherson }
968e39bee8fSSean Christopherson 
generate_usermode_ac(void)969e39bee8fSSean Christopherson static inline uint64_t generate_usermode_ac(void)
970e39bee8fSSean Christopherson {
971e39bee8fSSean Christopherson 	/*
972e39bee8fSSean Christopherson 	 * Trigger an #AC by writing 8 bytes to a 4-byte aligned address.
973e39bee8fSSean Christopherson 	 * Disclaimer: It is assumed that the stack pointer is aligned
974e39bee8fSSean Christopherson 	 * on a 16-byte boundary as x86_64 stacks should be.
975e39bee8fSSean Christopherson 	 */
976e39bee8fSSean Christopherson 	asm volatile("movq $0, -0x4(%rsp)");
977e39bee8fSSean Christopherson 
978e39bee8fSSean Christopherson 	return 0;
979e39bee8fSSean Christopherson }
980e39bee8fSSean Christopherson 
9815faf5f60SSean Christopherson /*
9825faf5f60SSean Christopherson  * Switch from 64-bit to 32-bit mode and generate #OF via INTO.  Note, if RIP
9835faf5f60SSean Christopherson  * or RSP holds a 64-bit value, this helper will NOT generate #OF.
9845faf5f60SSean Christopherson  */
generate_of(void)9855faf5f60SSean Christopherson static inline void generate_of(void)
9865faf5f60SSean Christopherson {
9875faf5f60SSean Christopherson 	struct far_pointer32 fp = {
9885faf5f60SSean Christopherson 		.offset = (uintptr_t)&&into,
9895faf5f60SSean Christopherson 		.selector = KERNEL_CS32,
9905faf5f60SSean Christopherson 	};
9915faf5f60SSean Christopherson 	uintptr_t rsp;
9925faf5f60SSean Christopherson 
9935faf5f60SSean Christopherson 	asm volatile ("mov %%rsp, %0" : "=r"(rsp));
9945faf5f60SSean Christopherson 
9955faf5f60SSean Christopherson 	if (fp.offset != (uintptr_t)&&into) {
9965faf5f60SSean Christopherson 		printf("Code address too high.\n");
9975faf5f60SSean Christopherson 		return;
9985faf5f60SSean Christopherson 	}
9995faf5f60SSean Christopherson 	if ((u32)rsp != rsp) {
10005faf5f60SSean Christopherson 		printf("Stack address too high.\n");
10015faf5f60SSean Christopherson 		return;
10025faf5f60SSean Christopherson 	}
10035faf5f60SSean Christopherson 
10045faf5f60SSean Christopherson 	asm goto ("lcall *%0" : : "m" (fp) : "rax" : into);
10055faf5f60SSean Christopherson 	return;
10065faf5f60SSean Christopherson into:
10075faf5f60SSean Christopherson 	asm volatile (".code32;"
10085faf5f60SSean Christopherson 		      "movl $0x7fffffff, %eax;"
10095faf5f60SSean Christopherson 		      "addl %eax, %eax;"
10105faf5f60SSean Christopherson 		      "into;"
10115faf5f60SSean Christopherson 		      "lret;"
10125faf5f60SSean Christopherson 		      ".code64");
10135faf5f60SSean Christopherson 	__builtin_unreachable();
10145faf5f60SSean Christopherson }
10155faf5f60SSean Christopherson 
fnop(void)1016694e59baSManali Shukla static inline void fnop(void)
1017694e59baSManali Shukla {
1018694e59baSManali Shukla 	asm volatile("fnop");
1019694e59baSManali Shukla }
1020694e59baSManali Shukla 
1021694e59baSManali Shukla /* If CR0.TS is set in L2, #NM is generated. */
generate_cr0_ts_nm(void)1022694e59baSManali Shukla static inline void generate_cr0_ts_nm(void)
1023694e59baSManali Shukla {
1024694e59baSManali Shukla 	write_cr0((read_cr0() & ~X86_CR0_EM) | X86_CR0_TS);
1025694e59baSManali Shukla 	fnop();
1026694e59baSManali Shukla }
1027694e59baSManali Shukla 
1028694e59baSManali Shukla /* If CR0.TS is cleared and CR0.EM is set, #NM is generated. */
generate_cr0_em_nm(void)1029694e59baSManali Shukla static inline void generate_cr0_em_nm(void)
1030694e59baSManali Shukla {
1031694e59baSManali Shukla 	write_cr0((read_cr0() & ~X86_CR0_TS) | X86_CR0_EM);
1032694e59baSManali Shukla 	fnop();
1033694e59baSManali Shukla }
1034694e59baSManali Shukla 
is_la57_enabled(void)103514520f8eSRobert Hoo static inline bool is_la57_enabled(void)
103614520f8eSRobert Hoo {
103714520f8eSRobert Hoo 	return !!(read_cr4() & X86_CR4_LA57);
103814520f8eSRobert Hoo }
103914520f8eSRobert Hoo 
is_lam_sup_enabled(void)104014520f8eSRobert Hoo static inline bool is_lam_sup_enabled(void)
104114520f8eSRobert Hoo {
104214520f8eSRobert Hoo 	return !!(read_cr4() & X86_CR4_LAM_SUP);
104314520f8eSRobert Hoo }
104414520f8eSRobert Hoo 
is_lam_u48_enabled(void)1045*0164d759SBinbin Wu static inline bool is_lam_u48_enabled(void)
1046*0164d759SBinbin Wu {
1047*0164d759SBinbin Wu 	return (read_cr3() & (X86_CR3_LAM_U48 | X86_CR3_LAM_U57)) == X86_CR3_LAM_U48;
1048*0164d759SBinbin Wu }
1049*0164d759SBinbin Wu 
is_lam_u57_enabled(void)1050*0164d759SBinbin Wu static inline bool is_lam_u57_enabled(void)
1051*0164d759SBinbin Wu {
1052*0164d759SBinbin Wu 	return !!(read_cr3() & X86_CR3_LAM_U57);
1053*0164d759SBinbin Wu }
1054*0164d759SBinbin Wu 
10557d36db35SAvi Kivity #endif
1056