1 #ifndef _X86_PROCESSOR_H_
2 #define _X86_PROCESSOR_H_
3
4 #include "libcflat.h"
5 #include "desc.h"
6 #include "msr.h"
7 #include <bitops.h>
8 #include <stdint.h>
9 #include <util.h>
10
11 #define CANONICAL_48_VAL 0xffffaaaaaaaaaaaaull
12 #define CANONICAL_57_VAL 0xffaaaaaaaaaaaaaaull
13 #define NONCANONICAL 0xaaaaaaaaaaaaaaaaull
14
15 #define LAM57_MASK GENMASK_ULL(62, 57)
16 #define LAM48_MASK GENMASK_ULL(62, 48)
17
18 /*
19 * Get a linear address by combining @addr with a non-canonical pattern in the
20 * @mask bits.
21 */
get_non_canonical(u64 addr,u64 mask)22 static inline u64 get_non_canonical(u64 addr, u64 mask)
23 {
24 return (addr & ~mask) | (NONCANONICAL & mask);
25 }
26
27 #ifdef __x86_64__
28 # define R "r"
29 # define W "q"
30 # define S "8"
31 #else
32 # define R "e"
33 # define W "l"
34 # define S "4"
35 #endif
36
37 #define DE_VECTOR 0
38 #define DB_VECTOR 1
39 #define NMI_VECTOR 2
40 #define BP_VECTOR 3
41 #define OF_VECTOR 4
42 #define BR_VECTOR 5
43 #define UD_VECTOR 6
44 #define NM_VECTOR 7
45 #define DF_VECTOR 8
46 #define TS_VECTOR 10
47 #define NP_VECTOR 11
48 #define SS_VECTOR 12
49 #define GP_VECTOR 13
50 #define PF_VECTOR 14
51 #define MF_VECTOR 16
52 #define AC_VECTOR 17
53 #define MC_VECTOR 18
54 #define XM_VECTOR 19
55 #define XF_VECTOR XM_VECTOR /* AMD */
56 #define VE_VECTOR 20 /* Intel only */
57 #define CP_VECTOR 21
58 #define HV_VECTOR 28 /* AMD only */
59 #define VC_VECTOR 29 /* AMD only */
60 #define SX_VECTOR 30 /* AMD only */
61
62 #define X86_CR0_PE_BIT (0)
63 #define X86_CR0_PE BIT(X86_CR0_PE_BIT)
64 #define X86_CR0_MP_BIT (1)
65 #define X86_CR0_MP BIT(X86_CR0_MP_BIT)
66 #define X86_CR0_EM_BIT (2)
67 #define X86_CR0_EM BIT(X86_CR0_EM_BIT)
68 #define X86_CR0_TS_BIT (3)
69 #define X86_CR0_TS BIT(X86_CR0_TS_BIT)
70 #define X86_CR0_ET_BIT (4)
71 #define X86_CR0_ET BIT(X86_CR0_ET_BIT)
72 #define X86_CR0_NE_BIT (5)
73 #define X86_CR0_NE BIT(X86_CR0_NE_BIT)
74 #define X86_CR0_WP_BIT (16)
75 #define X86_CR0_WP BIT(X86_CR0_WP_BIT)
76 #define X86_CR0_AM_BIT (18)
77 #define X86_CR0_AM BIT(X86_CR0_AM_BIT)
78 #define X86_CR0_NW_BIT (29)
79 #define X86_CR0_NW BIT(X86_CR0_NW_BIT)
80 #define X86_CR0_CD_BIT (30)
81 #define X86_CR0_CD BIT(X86_CR0_CD_BIT)
82 #define X86_CR0_PG_BIT (31)
83 #define X86_CR0_PG BIT(X86_CR0_PG_BIT)
84
85 #define X86_CR3_PCID_MASK GENMASK(11, 0)
86 #define X86_CR3_LAM_U57_BIT (61)
87 #define X86_CR3_LAM_U57 BIT_ULL(X86_CR3_LAM_U57_BIT)
88 #define X86_CR3_LAM_U48_BIT (62)
89 #define X86_CR3_LAM_U48 BIT_ULL(X86_CR3_LAM_U48_BIT)
90
91 #define X86_CR4_VME_BIT (0)
92 #define X86_CR4_VME BIT(X86_CR4_VME_BIT)
93 #define X86_CR4_PVI_BIT (1)
94 #define X86_CR4_PVI BIT(X86_CR4_PVI_BIT)
95 #define X86_CR4_TSD_BIT (2)
96 #define X86_CR4_TSD BIT(X86_CR4_TSD_BIT)
97 #define X86_CR4_DE_BIT (3)
98 #define X86_CR4_DE BIT(X86_CR4_DE_BIT)
99 #define X86_CR4_PSE_BIT (4)
100 #define X86_CR4_PSE BIT(X86_CR4_PSE_BIT)
101 #define X86_CR4_PAE_BIT (5)
102 #define X86_CR4_PAE BIT(X86_CR4_PAE_BIT)
103 #define X86_CR4_MCE_BIT (6)
104 #define X86_CR4_MCE BIT(X86_CR4_MCE_BIT)
105 #define X86_CR4_PGE_BIT (7)
106 #define X86_CR4_PGE BIT(X86_CR4_PGE_BIT)
107 #define X86_CR4_PCE_BIT (8)
108 #define X86_CR4_PCE BIT(X86_CR4_PCE_BIT)
109 #define X86_CR4_OSFXSR_BIT (9)
110 #define X86_CR4_OSFXSR BIT(X86_CR4_OSFXSR_BIT)
111 #define X86_CR4_OSXMMEXCPT_BIT (10)
112 #define X86_CR4_OSXMMEXCPT BIT(X86_CR4_OSXMMEXCPT_BIT)
113 #define X86_CR4_UMIP_BIT (11)
114 #define X86_CR4_UMIP BIT(X86_CR4_UMIP_BIT)
115 #define X86_CR4_LA57_BIT (12)
116 #define X86_CR4_LA57 BIT(X86_CR4_LA57_BIT)
117 #define X86_CR4_VMXE_BIT (13)
118 #define X86_CR4_VMXE BIT(X86_CR4_VMXE_BIT)
119 #define X86_CR4_SMXE_BIT (14)
120 #define X86_CR4_SMXE BIT(X86_CR4_SMXE_BIT)
121 /* UNUSED (15) */
122 #define X86_CR4_FSGSBASE_BIT (16)
123 #define X86_CR4_FSGSBASE BIT(X86_CR4_FSGSBASE_BIT)
124 #define X86_CR4_PCIDE_BIT (17)
125 #define X86_CR4_PCIDE BIT(X86_CR4_PCIDE_BIT)
126 #define X86_CR4_OSXSAVE_BIT (18)
127 #define X86_CR4_OSXSAVE BIT(X86_CR4_OSXSAVE_BIT)
128 #define X86_CR4_KL_BIT (19)
129 #define X86_CR4_KL BIT(X86_CR4_KL_BIT)
130 #define X86_CR4_SMEP_BIT (20)
131 #define X86_CR4_SMEP BIT(X86_CR4_SMEP_BIT)
132 #define X86_CR4_SMAP_BIT (21)
133 #define X86_CR4_SMAP BIT(X86_CR4_SMAP_BIT)
134 #define X86_CR4_PKE_BIT (22)
135 #define X86_CR4_PKE BIT(X86_CR4_PKE_BIT)
136 #define X86_CR4_CET_BIT (23)
137 #define X86_CR4_CET BIT(X86_CR4_CET_BIT)
138 #define X86_CR4_PKS_BIT (24)
139 #define X86_CR4_PKS BIT(X86_CR4_PKS_BIT)
140 #define X86_CR4_LAM_SUP_BIT (28)
141 #define X86_CR4_LAM_SUP BIT(X86_CR4_LAM_SUP_BIT)
142
143 #define X86_EFLAGS_CF_BIT (0)
144 #define X86_EFLAGS_CF BIT(X86_EFLAGS_CF_BIT)
145 #define X86_EFLAGS_FIXED_BIT (1)
146 #define X86_EFLAGS_FIXED BIT(X86_EFLAGS_FIXED_BIT)
147 #define X86_EFLAGS_PF_BIT (2)
148 #define X86_EFLAGS_PF BIT(X86_EFLAGS_PF_BIT)
149 /* RESERVED 0 (3) */
150 #define X86_EFLAGS_AF_BIT (4)
151 #define X86_EFLAGS_AF BIT(X86_EFLAGS_AF_BIT)
152 /* RESERVED 0 (5) */
153 #define X86_EFLAGS_ZF_BIT (6)
154 #define X86_EFLAGS_ZF BIT(X86_EFLAGS_ZF_BIT)
155 #define X86_EFLAGS_SF_BIT (7)
156 #define X86_EFLAGS_SF BIT(X86_EFLAGS_SF_BIT)
157 #define X86_EFLAGS_TF_BIT (8)
158 #define X86_EFLAGS_TF BIT(X86_EFLAGS_TF_BIT)
159 #define X86_EFLAGS_IF_BIT (9)
160 #define X86_EFLAGS_IF BIT(X86_EFLAGS_IF_BIT)
161 #define X86_EFLAGS_DF_BIT (10)
162 #define X86_EFLAGS_DF BIT(X86_EFLAGS_DF_BIT)
163 #define X86_EFLAGS_OF_BIT (11)
164 #define X86_EFLAGS_OF BIT(X86_EFLAGS_OF_BIT)
165 #define X86_EFLAGS_IOPL GENMASK(13, 12)
166 #define X86_EFLAGS_NT_BIT (14)
167 #define X86_EFLAGS_NT BIT(X86_EFLAGS_NT_BIT)
168 /* RESERVED 0 (15) */
169 #define X86_EFLAGS_RF_BIT (16)
170 #define X86_EFLAGS_RF BIT(X86_EFLAGS_RF_BIT)
171 #define X86_EFLAGS_VM_BIT (17)
172 #define X86_EFLAGS_VM BIT(X86_EFLAGS_VM_BIT)
173 #define X86_EFLAGS_AC_BIT (18)
174 #define X86_EFLAGS_AC BIT(X86_EFLAGS_AC_BIT)
175 #define X86_EFLAGS_VIF_BIT (19)
176 #define X86_EFLAGS_VIF BIT(X86_EFLAGS_VIF_BIT)
177 #define X86_EFLAGS_VIP_BIT (20)
178 #define X86_EFLAGS_VIP BIT(X86_EFLAGS_VIP_BIT)
179 #define X86_EFLAGS_ID_BIT (21)
180 #define X86_EFLAGS_ID BIT(X86_EFLAGS_ID_BIT)
181
182 #define X86_EFLAGS_ALU (X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | \
183 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)
184
185
186 /*
187 * CPU features
188 */
189
190 enum cpuid_output_regs {
191 EAX,
192 EBX,
193 ECX,
194 EDX
195 };
196
197 struct cpuid { u32 a, b, c, d; };
198
raw_cpuid(u32 function,u32 index)199 static inline struct cpuid raw_cpuid(u32 function, u32 index)
200 {
201 struct cpuid r;
202 asm volatile ("cpuid"
203 : "=a"(r.a), "=b"(r.b), "=c"(r.c), "=d"(r.d)
204 : "0"(function), "2"(index));
205 return r;
206 }
207
cpuid_indexed(u32 function,u32 index)208 static inline struct cpuid cpuid_indexed(u32 function, u32 index)
209 {
210 u32 level = raw_cpuid(function & 0xf0000000, 0).a;
211 if (level < function)
212 return (struct cpuid) { 0, 0, 0, 0 };
213 return raw_cpuid(function, index);
214 }
215
cpuid(u32 function)216 static inline struct cpuid cpuid(u32 function)
217 {
218 return cpuid_indexed(function, 0);
219 }
220
is_intel(void)221 static inline bool is_intel(void)
222 {
223 struct cpuid c = cpuid(0);
224 u32 name[4] = {c.b, c.d, c.c };
225
226 return strcmp((char *)name, "GenuineIntel") == 0;
227 }
228
229 /*
230 * Pack the information into a 64-bit value so that each X86_FEATURE_XXX can be
231 * passed by value with no overhead.
232 */
233 struct x86_cpu_feature {
234 u32 function;
235 u16 index;
236 u8 reg;
237 u8 bit;
238 };
239
240 #define X86_CPU_FEATURE(fn, idx, gpr, __bit) \
241 ({ \
242 struct x86_cpu_feature feature = { \
243 .function = fn, \
244 .index = idx, \
245 .reg = gpr, \
246 .bit = __bit, \
247 }; \
248 \
249 static_assert((fn & 0xc0000000) == 0 || \
250 (fn & 0xc0000000) == 0x40000000 || \
251 (fn & 0xc0000000) == 0x80000000 || \
252 (fn & 0xc0000000) == 0xc0000000); \
253 static_assert(idx < BIT(sizeof(feature.index) * BITS_PER_BYTE)); \
254 feature; \
255 })
256
257 /*
258 * Basic Leafs, a.k.a. Intel defined
259 */
260 #define X86_FEATURE_MWAIT X86_CPU_FEATURE(0x1, 0, ECX, 3)
261 #define X86_FEATURE_VMX X86_CPU_FEATURE(0x1, 0, ECX, 5)
262 #define X86_FEATURE_PDCM X86_CPU_FEATURE(0x1, 0, ECX, 15)
263 #define X86_FEATURE_PCID X86_CPU_FEATURE(0x1, 0, ECX, 17)
264 #define X86_FEATURE_X2APIC X86_CPU_FEATURE(0x1, 0, ECX, 21)
265 #define X86_FEATURE_MOVBE X86_CPU_FEATURE(0x1, 0, ECX, 22)
266 #define X86_FEATURE_TSC_DEADLINE_TIMER X86_CPU_FEATURE(0x1, 0, ECX, 24)
267 #define X86_FEATURE_XSAVE X86_CPU_FEATURE(0x1, 0, ECX, 26)
268 #define X86_FEATURE_OSXSAVE X86_CPU_FEATURE(0x1, 0, ECX, 27)
269 #define X86_FEATURE_RDRAND X86_CPU_FEATURE(0x1, 0, ECX, 30)
270 #define X86_FEATURE_MCE X86_CPU_FEATURE(0x1, 0, EDX, 7)
271 #define X86_FEATURE_APIC X86_CPU_FEATURE(0x1, 0, EDX, 9)
272 #define X86_FEATURE_CLFLUSH X86_CPU_FEATURE(0x1, 0, EDX, 19)
273 #define X86_FEATURE_DS X86_CPU_FEATURE(0x1, 0, EDX, 21)
274 #define X86_FEATURE_XMM X86_CPU_FEATURE(0x1, 0, EDX, 25)
275 #define X86_FEATURE_XMM2 X86_CPU_FEATURE(0x1, 0, EDX, 26)
276 #define X86_FEATURE_TSC_ADJUST X86_CPU_FEATURE(0x7, 0, EBX, 1)
277 #define X86_FEATURE_HLE X86_CPU_FEATURE(0x7, 0, EBX, 4)
278 #define X86_FEATURE_SMEP X86_CPU_FEATURE(0x7, 0, EBX, 7)
279 #define X86_FEATURE_INVPCID X86_CPU_FEATURE(0x7, 0, EBX, 10)
280 #define X86_FEATURE_RTM X86_CPU_FEATURE(0x7, 0, EBX, 11)
281 #define X86_FEATURE_SMAP X86_CPU_FEATURE(0x7, 0, EBX, 20)
282 #define X86_FEATURE_PCOMMIT X86_CPU_FEATURE(0x7, 0, EBX, 22)
283 #define X86_FEATURE_CLFLUSHOPT X86_CPU_FEATURE(0x7, 0, EBX, 23)
284 #define X86_FEATURE_CLWB X86_CPU_FEATURE(0x7, 0, EBX, 24)
285 #define X86_FEATURE_INTEL_PT X86_CPU_FEATURE(0x7, 0, EBX, 25)
286 #define X86_FEATURE_UMIP X86_CPU_FEATURE(0x7, 0, ECX, 2)
287 #define X86_FEATURE_PKU X86_CPU_FEATURE(0x7, 0, ECX, 3)
288 #define X86_FEATURE_LA57 X86_CPU_FEATURE(0x7, 0, ECX, 16)
289 #define X86_FEATURE_RDPID X86_CPU_FEATURE(0x7, 0, ECX, 22)
290 #define X86_FEATURE_SHSTK X86_CPU_FEATURE(0x7, 0, ECX, 7)
291 #define X86_FEATURE_PKS X86_CPU_FEATURE(0x7, 0, ECX, 31)
292 #define X86_FEATURE_IBT X86_CPU_FEATURE(0x7, 0, EDX, 20)
293 #define X86_FEATURE_SPEC_CTRL X86_CPU_FEATURE(0x7, 0, EDX, 26)
294 #define X86_FEATURE_STIBP X86_CPU_FEATURE(0x7, 0, EDX, 27)
295 #define X86_FEATURE_FLUSH_L1D X86_CPU_FEATURE(0x7, 0, EDX, 28)
296 #define X86_FEATURE_ARCH_CAPABILITIES X86_CPU_FEATURE(0x7, 0, EDX, 29)
297 #define X86_FEATURE_SSBD X86_CPU_FEATURE(0x7, 0, EDX, 31)
298 #define X86_FEATURE_LAM X86_CPU_FEATURE(0x7, 1, EAX, 26)
299
300 /*
301 * KVM defined leafs
302 */
303 #define KVM_FEATURE_ASYNC_PF X86_CPU_FEATURE(0x40000001, 0, EAX, 4)
304 #define KVM_FEATURE_ASYNC_PF_INT X86_CPU_FEATURE(0x40000001, 0, EAX, 14)
305
306 /*
307 * Extended Leafs, a.k.a. AMD defined
308 */
309 #define X86_FEATURE_SVM X86_CPU_FEATURE(0x80000001, 0, ECX, 2)
310 #define X86_FEATURE_PERFCTR_CORE X86_CPU_FEATURE(0x80000001, 0, ECX, 23)
311 #define X86_FEATURE_NX X86_CPU_FEATURE(0x80000001, 0, EDX, 20)
312 #define X86_FEATURE_GBPAGES X86_CPU_FEATURE(0x80000001, 0, EDX, 26)
313 #define X86_FEATURE_RDTSCP X86_CPU_FEATURE(0x80000001, 0, EDX, 27)
314 #define X86_FEATURE_LM X86_CPU_FEATURE(0x80000001, 0, EDX, 29)
315 #define X86_FEATURE_RDPRU X86_CPU_FEATURE(0x80000008, 0, EBX, 4)
316 #define X86_FEATURE_AMD_IBPB X86_CPU_FEATURE(0x80000008, 0, EBX, 12)
317 #define X86_FEATURE_AMD_IBRS X86_CPU_FEATURE(0x80000008, 0, EBX, 14)
318 #define X86_FEATURE_AMD_STIBP X86_CPU_FEATURE(0x80000008, 0, EBX, 15)
319 #define X86_FEATURE_AMD_STIBP_ALWAYS_ON X86_CPU_FEATURE(0x80000008, 0, EBX, 17)
320 #define X86_FEATURE_AMD_IBRS_SAME_MODE X86_CPU_FEATURE(0x80000008, 0, EBX, 19)
321 #define X86_FEATURE_AMD_SSBD X86_CPU_FEATURE(0x80000008, 0, EBX, 24)
322 #define X86_FEATURE_NPT X86_CPU_FEATURE(0x8000000A, 0, EDX, 0)
323 #define X86_FEATURE_LBRV X86_CPU_FEATURE(0x8000000A, 0, EDX, 1)
324 #define X86_FEATURE_NRIPS X86_CPU_FEATURE(0x8000000A, 0, EDX, 3)
325 #define X86_FEATURE_TSCRATEMSR X86_CPU_FEATURE(0x8000000A, 0, EDX, 4)
326 #define X86_FEATURE_PAUSEFILTER X86_CPU_FEATURE(0x8000000A, 0, EDX, 10)
327 #define X86_FEATURE_PFTHRESHOLD X86_CPU_FEATURE(0x8000000A, 0, EDX, 12)
328 #define X86_FEATURE_VGIF X86_CPU_FEATURE(0x8000000A, 0, EDX, 16)
329 #define X86_FEATURE_VNMI X86_CPU_FEATURE(0x8000000A, 0, EDX, 25)
330 #define X86_FEATURE_SME X86_CPU_FEATURE(0x8000001F, 0, EAX, 0)
331 #define X86_FEATURE_SEV X86_CPU_FEATURE(0x8000001F, 0, EAX, 1)
332 #define X86_FEATURE_VM_PAGE_FLUSH X86_CPU_FEATURE(0x8000001F, 0, EAX, 2)
333 #define X86_FEATURE_SEV_ES X86_CPU_FEATURE(0x8000001F, 0, EAX, 3)
334 #define X86_FEATURE_SEV_SNP X86_CPU_FEATURE(0x8000001F, 0, EAX, 4)
335 #define X86_FEATURE_V_TSC_AUX X86_CPU_FEATURE(0x8000001F, 0, EAX, 9)
336 #define X86_FEATURE_SME_COHERENT X86_CPU_FEATURE(0x8000001F, 0, EAX, 10)
337 #define X86_FEATURE_DEBUG_SWAP X86_CPU_FEATURE(0x8000001F, 0, EAX, 14)
338 #define X86_FEATURE_SVSM X86_CPU_FEATURE(0x8000001F, 0, EAX, 28)
339 #define X86_FEATURE_SBPB X86_CPU_FEATURE(0x80000021, 0, EAX, 27)
340 #define X86_FEATURE_AMD_PMU_V2 X86_CPU_FEATURE(0x80000022, 0, EAX, 0)
341
342 /*
343 * Same idea as X86_FEATURE_XXX, but X86_PROPERTY_XXX retrieves a multi-bit
344 * value/property as opposed to a single-bit feature. Again, pack the info
345 * into a 64-bit value to pass by value with no overhead on 64-bit builds.
346 */
347 struct x86_cpu_property {
348 u32 function;
349 u8 index;
350 u8 reg;
351 u8 lo_bit;
352 u8 hi_bit;
353 };
354 #define X86_CPU_PROPERTY(fn, idx, gpr, low_bit, high_bit) \
355 ({ \
356 struct x86_cpu_property property = { \
357 .function = fn, \
358 .index = idx, \
359 .reg = gpr, \
360 .lo_bit = low_bit, \
361 .hi_bit = high_bit, \
362 }; \
363 \
364 static_assert(low_bit < high_bit); \
365 static_assert((fn & 0xc0000000) == 0 || \
366 (fn & 0xc0000000) == 0x40000000 || \
367 (fn & 0xc0000000) == 0x80000000 || \
368 (fn & 0xc0000000) == 0xc0000000); \
369 static_assert(idx < BIT(sizeof(property.index) * BITS_PER_BYTE)); \
370 property; \
371 })
372
373 #define X86_PROPERTY_MAX_BASIC_LEAF X86_CPU_PROPERTY(0, 0, EAX, 0, 31)
374 #define X86_PROPERTY_PMU_VERSION X86_CPU_PROPERTY(0xa, 0, EAX, 0, 7)
375 #define X86_PROPERTY_PMU_NR_GP_COUNTERS X86_CPU_PROPERTY(0xa, 0, EAX, 8, 15)
376 #define X86_PROPERTY_PMU_GP_COUNTERS_BIT_WIDTH X86_CPU_PROPERTY(0xa, 0, EAX, 16, 23)
377 #define X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH X86_CPU_PROPERTY(0xa, 0, EAX, 24, 31)
378 #define X86_PROPERTY_PMU_EVENTS_MASK X86_CPU_PROPERTY(0xa, 0, EBX, 0, 7)
379 #define X86_PROPERTY_PMU_FIXED_COUNTERS_BITMASK X86_CPU_PROPERTY(0xa, 0, ECX, 0, 31)
380 #define X86_PROPERTY_PMU_NR_FIXED_COUNTERS X86_CPU_PROPERTY(0xa, 0, EDX, 0, 4)
381 #define X86_PROPERTY_PMU_FIXED_COUNTERS_BIT_WIDTH X86_CPU_PROPERTY(0xa, 0, EDX, 5, 12)
382
383 #define X86_PROPERTY_SUPPORTED_XCR0_LO X86_CPU_PROPERTY(0xd, 0, EAX, 0, 31)
384 #define X86_PROPERTY_XSTATE_MAX_SIZE_XCR0 X86_CPU_PROPERTY(0xd, 0, EBX, 0, 31)
385 #define X86_PROPERTY_XSTATE_MAX_SIZE X86_CPU_PROPERTY(0xd, 0, ECX, 0, 31)
386 #define X86_PROPERTY_SUPPORTED_XCR0_HI X86_CPU_PROPERTY(0xd, 0, EDX, 0, 31)
387
388 #define X86_PROPERTY_XSTATE_TILE_SIZE X86_CPU_PROPERTY(0xd, 18, EAX, 0, 31)
389 #define X86_PROPERTY_XSTATE_TILE_OFFSET X86_CPU_PROPERTY(0xd, 18, EBX, 0, 31)
390
391 #define X86_PROPERTY_INTEL_PT_NR_RANGES X86_CPU_PROPERTY(0x14, 1, EAX, 0, 2)
392
393 #define X86_PROPERTY_AMX_MAX_PALETTE_TABLES X86_CPU_PROPERTY(0x1d, 0, EAX, 0, 31)
394 #define X86_PROPERTY_AMX_TOTAL_TILE_BYTES X86_CPU_PROPERTY(0x1d, 1, EAX, 0, 15)
395 #define X86_PROPERTY_AMX_BYTES_PER_TILE X86_CPU_PROPERTY(0x1d, 1, EAX, 16, 31)
396 #define X86_PROPERTY_AMX_BYTES_PER_ROW X86_CPU_PROPERTY(0x1d, 1, EBX, 0, 15)
397 #define X86_PROPERTY_AMX_NR_TILE_REGS X86_CPU_PROPERTY(0x1d, 1, EBX, 16, 31)
398 #define X86_PROPERTY_AMX_MAX_ROWS X86_CPU_PROPERTY(0x1d, 1, ECX, 0, 15)
399
400 #define X86_PROPERTY_MAX_KVM_LEAF X86_CPU_PROPERTY(0x40000000, 0, EAX, 0, 31)
401
402 #define X86_PROPERTY_MAX_EXT_LEAF X86_CPU_PROPERTY(0x80000000, 0, EAX, 0, 31)
403 #define X86_PROPERTY_MAX_PHY_ADDR X86_CPU_PROPERTY(0x80000008, 0, EAX, 0, 7)
404 #define X86_PROPERTY_MAX_VIRT_ADDR X86_CPU_PROPERTY(0x80000008, 0, EAX, 8, 15)
405 #define X86_PROPERTY_GUEST_MAX_PHY_ADDR X86_CPU_PROPERTY(0x80000008, 0, EAX, 16, 23)
406 #define X86_PROPERTY_SEV_C_BIT X86_CPU_PROPERTY(0x8000001F, 0, EBX, 0, 5)
407 #define X86_PROPERTY_PHYS_ADDR_REDUCTION X86_CPU_PROPERTY(0x8000001F, 0, EBX, 6, 11)
408 #define X86_PROPERTY_NR_PERFCTR_CORE X86_CPU_PROPERTY(0x80000022, 0, EBX, 0, 3)
409 #define X86_PROPERTY_NR_PERFCTR_NB X86_CPU_PROPERTY(0x80000022, 0, EBX, 10, 15)
410
411 #define X86_PROPERTY_MAX_CENTAUR_LEAF X86_CPU_PROPERTY(0xC0000000, 0, EAX, 0, 31)
412
__this_cpu_has(u32 function,u32 index,u8 reg,u8 lo,u8 hi)413 static inline u32 __this_cpu_has(u32 function, u32 index, u8 reg, u8 lo, u8 hi)
414 {
415 union {
416 struct cpuid cpuid;
417 u32 gprs[4];
418 } c;
419
420 c.cpuid = cpuid_indexed(function, index);
421
422 return (c.gprs[reg] & GENMASK(hi, lo)) >> lo;
423 }
424
this_cpu_has(struct x86_cpu_feature feature)425 static inline bool this_cpu_has(struct x86_cpu_feature feature)
426 {
427 return __this_cpu_has(feature.function, feature.index,
428 feature.reg, feature.bit, feature.bit);
429 }
430
this_cpu_property(struct x86_cpu_property property)431 static inline uint32_t this_cpu_property(struct x86_cpu_property property)
432 {
433 return __this_cpu_has(property.function, property.index,
434 property.reg, property.lo_bit, property.hi_bit);
435 }
436
this_cpu_has_p(struct x86_cpu_property property)437 static __always_inline bool this_cpu_has_p(struct x86_cpu_property property)
438 {
439 uint32_t max_leaf;
440
441 switch (property.function & 0xc0000000) {
442 case 0:
443 max_leaf = this_cpu_property(X86_PROPERTY_MAX_BASIC_LEAF);
444 break;
445 case 0x40000000:
446 max_leaf = this_cpu_property(X86_PROPERTY_MAX_KVM_LEAF);
447 break;
448 case 0x80000000:
449 max_leaf = this_cpu_property(X86_PROPERTY_MAX_EXT_LEAF);
450 break;
451 case 0xc0000000:
452 max_leaf = this_cpu_property(X86_PROPERTY_MAX_CENTAUR_LEAF);
453 }
454 return max_leaf >= property.function;
455 }
456
cpuid_maxphyaddr(void)457 static inline u8 cpuid_maxphyaddr(void)
458 {
459 if (!this_cpu_has_p(X86_PROPERTY_MAX_PHY_ADDR))
460 return 36;
461
462 return this_cpu_property(X86_PROPERTY_MAX_PHY_ADDR);
463 }
464
this_cpu_supported_xcr0(void)465 static inline u64 this_cpu_supported_xcr0(void)
466 {
467 if (!this_cpu_has_p(X86_PROPERTY_SUPPORTED_XCR0_LO))
468 return 0;
469
470 return (u64)this_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_LO) |
471 ((u64)this_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32);
472 }
473
474 struct far_pointer32 {
475 u32 offset;
476 u16 selector;
477 } __attribute__((packed));
478
479 struct descriptor_table_ptr {
480 u16 limit;
481 ulong base;
482 } __attribute__((packed));
483
clac(void)484 static inline void clac(void)
485 {
486 asm volatile (".byte 0x0f, 0x01, 0xca" : : : "memory");
487 }
488
stac(void)489 static inline void stac(void)
490 {
491 asm volatile (".byte 0x0f, 0x01, 0xcb" : : : "memory");
492 }
493
read_cs(void)494 static inline u16 read_cs(void)
495 {
496 unsigned val;
497
498 asm volatile ("mov %%cs, %0" : "=mr"(val));
499 return val;
500 }
501
read_ds(void)502 static inline u16 read_ds(void)
503 {
504 unsigned val;
505
506 asm volatile ("mov %%ds, %0" : "=mr"(val));
507 return val;
508 }
509
read_es(void)510 static inline u16 read_es(void)
511 {
512 unsigned val;
513
514 asm volatile ("mov %%es, %0" : "=mr"(val));
515 return val;
516 }
517
read_ss(void)518 static inline u16 read_ss(void)
519 {
520 unsigned val;
521
522 asm volatile ("mov %%ss, %0" : "=mr"(val));
523 return val;
524 }
525
read_fs(void)526 static inline u16 read_fs(void)
527 {
528 unsigned val;
529
530 asm volatile ("mov %%fs, %0" : "=mr"(val));
531 return val;
532 }
533
read_gs(void)534 static inline u16 read_gs(void)
535 {
536 unsigned val;
537
538 asm volatile ("mov %%gs, %0" : "=mr"(val));
539 return val;
540 }
541
read_rflags(void)542 static inline unsigned long read_rflags(void)
543 {
544 unsigned long f;
545 asm volatile ("pushf; pop %0\n\t" : "=rm"(f));
546 return f;
547 }
548
write_ds(unsigned val)549 static inline void write_ds(unsigned val)
550 {
551 asm volatile ("mov %0, %%ds" : : "rm"(val) : "memory");
552 }
553
write_es(unsigned val)554 static inline void write_es(unsigned val)
555 {
556 asm volatile ("mov %0, %%es" : : "rm"(val) : "memory");
557 }
558
write_ss(unsigned val)559 static inline void write_ss(unsigned val)
560 {
561 asm volatile ("mov %0, %%ss" : : "rm"(val) : "memory");
562 }
563
write_fs(unsigned val)564 static inline void write_fs(unsigned val)
565 {
566 asm volatile ("mov %0, %%fs" : : "rm"(val) : "memory");
567 }
568
write_gs(unsigned val)569 static inline void write_gs(unsigned val)
570 {
571 asm volatile ("mov %0, %%gs" : : "rm"(val) : "memory");
572 }
573
write_rflags(unsigned long f)574 static inline void write_rflags(unsigned long f)
575 {
576 asm volatile ("push %0; popf\n\t" : : "rm"(f));
577 }
578
set_iopl(int iopl)579 static inline void set_iopl(int iopl)
580 {
581 unsigned long flags = read_rflags() & ~X86_EFLAGS_IOPL;
582 flags |= iopl * (X86_EFLAGS_IOPL / 3);
583 write_rflags(flags);
584 }
585
586 /*
587 * Don't use the safe variants for rdmsr() or wrmsr(). The exception fixup
588 * infrastructure uses per-CPU data and thus consumes GS.base. Various tests
589 * temporarily modify MSR_GS_BASE and will explode when trying to determine
590 * whether or not RDMSR/WRMSR faulted.
591 */
rdmsr(u32 index)592 static inline u64 rdmsr(u32 index)
593 {
594 u32 a, d;
595 asm volatile ("rdmsr" : "=a"(a), "=d"(d) : "c"(index) : "memory");
596 return a | ((u64)d << 32);
597 }
598
wrmsr(u32 index,u64 val)599 static inline void wrmsr(u32 index, u64 val)
600 {
601 u32 a = val, d = val >> 32;
602 asm volatile ("wrmsr" : : "a"(a), "d"(d), "c"(index) : "memory");
603 }
604
605 #define __rdreg64_safe(fep, insn, index, val) \
606 ({ \
607 uint32_t a, d; \
608 int vector; \
609 \
610 vector = __asm_safe_out2(fep, insn, "=a"(a), "=d"(d), "c"(index));\
611 \
612 if (vector) \
613 *(val) = 0; \
614 else \
615 *(val) = (uint64_t)a | ((uint64_t)d << 32); \
616 vector; \
617 })
618
619 #define rdreg64_safe(insn, index, val) \
620 __rdreg64_safe("", insn, index, val)
621
622 #define __wrreg64_safe(fep, insn, index, val) \
623 ({ \
624 uint32_t eax = (val), edx = (val) >> 32; \
625 \
626 __asm_safe(fep, insn, "a" (eax), "d" (edx), "c" (index)); \
627 })
628
629 #define wrreg64_safe(insn, index, val) \
630 __wrreg64_safe("", insn, index, val)
631
rdmsr_safe(u32 index,uint64_t * val)632 static inline int rdmsr_safe(u32 index, uint64_t *val)
633 {
634 return rdreg64_safe("rdmsr", index, val);
635 }
636
rdmsr_fep_safe(u32 index,uint64_t * val)637 static inline int rdmsr_fep_safe(u32 index, uint64_t *val)
638 {
639 return __rdreg64_safe(KVM_FEP, "rdmsr", index, val);
640 }
641
wrmsr_safe(u32 index,u64 val)642 static inline int wrmsr_safe(u32 index, u64 val)
643 {
644 return wrreg64_safe("wrmsr", index, val);
645 }
646
wrmsr_fep_safe(u32 index,u64 val)647 static inline int wrmsr_fep_safe(u32 index, u64 val)
648 {
649 return __wrreg64_safe(KVM_FEP, "wrmsr", index, val);
650 }
651
rdpmc_safe(u32 index,uint64_t * val)652 static inline int rdpmc_safe(u32 index, uint64_t *val)
653 {
654 return rdreg64_safe("rdpmc", index, val);
655 }
656
rdpmc(uint32_t index)657 static inline uint64_t rdpmc(uint32_t index)
658 {
659 uint64_t val;
660 int vector = rdpmc_safe(index, &val);
661
662 assert_msg(!vector, "Unexpected %s on RDPMC(%" PRId32 ")",
663 exception_mnemonic(vector), index);
664 return val;
665 }
666
xgetbv_safe(u32 index,u64 * result)667 static inline int xgetbv_safe(u32 index, u64 *result)
668 {
669 return rdreg64_safe(".byte 0x0f,0x01,0xd0", index, result);
670 }
671
xsetbv_safe(u32 index,u64 value)672 static inline int xsetbv_safe(u32 index, u64 value)
673 {
674 return wrreg64_safe(".byte 0x0f,0x01,0xd1", index, value);
675 }
676
write_cr0_safe(ulong val)677 static inline int write_cr0_safe(ulong val)
678 {
679 return asm_safe("mov %0,%%cr0", "r" (val));
680 }
681
write_cr0(ulong val)682 static inline void write_cr0(ulong val)
683 {
684 int vector = write_cr0_safe(val);
685
686 assert_msg(!vector, "Unexpected fault '%d' writing CR0 = %lx",
687 vector, val);
688 }
689
read_cr0(void)690 static inline ulong read_cr0(void)
691 {
692 ulong val;
693 asm volatile ("mov %%cr0, %0" : "=r"(val) : : "memory");
694 return val;
695 }
696
write_cr2(ulong val)697 static inline void write_cr2(ulong val)
698 {
699 asm volatile ("mov %0, %%cr2" : : "r"(val) : "memory");
700 }
701
read_cr2(void)702 static inline ulong read_cr2(void)
703 {
704 ulong val;
705 asm volatile ("mov %%cr2, %0" : "=r"(val) : : "memory");
706 return val;
707 }
708
write_cr3_safe(ulong val)709 static inline int write_cr3_safe(ulong val)
710 {
711 return asm_safe("mov %0,%%cr3", "r" (val));
712 }
713
write_cr3(ulong val)714 static inline void write_cr3(ulong val)
715 {
716 int vector = write_cr3_safe(val);
717
718 assert_msg(!vector, "Unexpected fault '%d' writing CR3 = %lx",
719 vector, val);
720 }
721
read_cr3(void)722 static inline ulong read_cr3(void)
723 {
724 ulong val;
725 asm volatile ("mov %%cr3, %0" : "=r"(val) : : "memory");
726 return val;
727 }
728
update_cr3(void * cr3)729 static inline void update_cr3(void *cr3)
730 {
731 write_cr3((ulong)cr3);
732 }
733
write_cr4_safe(ulong val)734 static inline int write_cr4_safe(ulong val)
735 {
736 return asm_safe("mov %0,%%cr4", "r" (val));
737 }
738
write_cr4(ulong val)739 static inline void write_cr4(ulong val)
740 {
741 int vector = write_cr4_safe(val);
742
743 assert_msg(!vector, "Unexpected fault '%d' writing CR4 = %lx",
744 vector, val);
745 }
746
read_cr4(void)747 static inline ulong read_cr4(void)
748 {
749 ulong val;
750 asm volatile ("mov %%cr4, %0" : "=r"(val) : : "memory");
751 return val;
752 }
753
write_cr8(ulong val)754 static inline void write_cr8(ulong val)
755 {
756 asm volatile ("mov %0, %%cr8" : : "r"(val) : "memory");
757 }
758
read_cr8(void)759 static inline ulong read_cr8(void)
760 {
761 ulong val;
762 asm volatile ("mov %%cr8, %0" : "=r"(val) : : "memory");
763 return val;
764 }
765
lgdt(const struct descriptor_table_ptr * ptr)766 static inline void lgdt(const struct descriptor_table_ptr *ptr)
767 {
768 asm volatile ("lgdt %0" : : "m"(*ptr));
769 }
770
lgdt_safe(const struct descriptor_table_ptr * ptr)771 static inline int lgdt_safe(const struct descriptor_table_ptr *ptr)
772 {
773 return asm_safe("lgdt %0", "m"(*ptr));
774 }
775
lgdt_fep_safe(const struct descriptor_table_ptr * ptr)776 static inline int lgdt_fep_safe(const struct descriptor_table_ptr *ptr)
777 {
778 return asm_fep_safe("lgdt %0", "m"(*ptr));
779 }
780
sgdt(struct descriptor_table_ptr * ptr)781 static inline void sgdt(struct descriptor_table_ptr *ptr)
782 {
783 asm volatile ("sgdt %0" : "=m"(*ptr));
784 }
785
lidt(const struct descriptor_table_ptr * ptr)786 static inline void lidt(const struct descriptor_table_ptr *ptr)
787 {
788 asm volatile ("lidt %0" : : "m"(*ptr));
789 }
790
lidt_safe(const struct descriptor_table_ptr * ptr)791 static inline int lidt_safe(const struct descriptor_table_ptr *ptr)
792 {
793 return asm_safe("lidt %0", "m"(*ptr));
794 }
795
lidt_fep_safe(const struct descriptor_table_ptr * ptr)796 static inline int lidt_fep_safe(const struct descriptor_table_ptr *ptr)
797 {
798 return asm_fep_safe("lidt %0", "m"(*ptr));
799 }
800
sidt(struct descriptor_table_ptr * ptr)801 static inline void sidt(struct descriptor_table_ptr *ptr)
802 {
803 asm volatile ("sidt %0" : "=m"(*ptr));
804 }
805
lldt(u16 val)806 static inline void lldt(u16 val)
807 {
808 asm volatile ("lldt %0" : : "rm"(val));
809 }
810
lldt_safe(u16 val)811 static inline int lldt_safe(u16 val)
812 {
813 return asm_safe("lldt %0", "rm"(val));
814 }
815
lldt_fep_safe(u16 val)816 static inline int lldt_fep_safe(u16 val)
817 {
818 return asm_safe("lldt %0", "rm"(val));
819 }
820
sldt(void)821 static inline u16 sldt(void)
822 {
823 u16 val;
824 asm volatile ("sldt %0" : "=rm"(val));
825 return val;
826 }
827
ltr(u16 val)828 static inline void ltr(u16 val)
829 {
830 asm volatile ("ltr %0" : : "rm"(val));
831 }
832
ltr_safe(u16 val)833 static inline int ltr_safe(u16 val)
834 {
835 return asm_safe("ltr %0", "rm"(val));
836 }
837
ltr_fep_safe(u16 val)838 static inline int ltr_fep_safe(u16 val)
839 {
840 return asm_safe("ltr %0", "rm"(val));
841 }
842
str(void)843 static inline u16 str(void)
844 {
845 u16 val;
846 asm volatile ("str %0" : "=rm"(val));
847 return val;
848 }
849
write_dr0(void * val)850 static inline void write_dr0(void *val)
851 {
852 asm volatile ("mov %0, %%dr0" : : "r"(val) : "memory");
853 }
854
write_dr1(void * val)855 static inline void write_dr1(void *val)
856 {
857 asm volatile ("mov %0, %%dr1" : : "r"(val) : "memory");
858 }
859
write_dr2(void * val)860 static inline void write_dr2(void *val)
861 {
862 asm volatile ("mov %0, %%dr2" : : "r"(val) : "memory");
863 }
864
write_dr3(void * val)865 static inline void write_dr3(void *val)
866 {
867 asm volatile ("mov %0, %%dr3" : : "r"(val) : "memory");
868 }
869
write_dr6(ulong val)870 static inline void write_dr6(ulong val)
871 {
872 asm volatile ("mov %0, %%dr6" : : "r"(val) : "memory");
873 }
874
read_dr6(void)875 static inline ulong read_dr6(void)
876 {
877 ulong val;
878 asm volatile ("mov %%dr6, %0" : "=r"(val));
879 return val;
880 }
881
write_dr7(ulong val)882 static inline void write_dr7(ulong val)
883 {
884 asm volatile ("mov %0, %%dr7" : : "r"(val) : "memory");
885 }
886
read_dr7(void)887 static inline ulong read_dr7(void)
888 {
889 ulong val;
890 asm volatile ("mov %%dr7, %0" : "=r"(val));
891 return val;
892 }
893
pause(void)894 static inline void pause(void)
895 {
896 asm volatile ("pause");
897 }
898
cli(void)899 static inline void cli(void)
900 {
901 asm volatile ("cli");
902 }
903
904 /*
905 * See also safe_halt().
906 */
sti(void)907 static inline void sti(void)
908 {
909 asm volatile ("sti");
910 }
911
912 /*
913 * Enable interrupts and ensure that interrupts are evaluated upon return from
914 * this function, i.e. execute a nop to consume the STi interrupt shadow.
915 */
sti_nop(void)916 static inline void sti_nop(void)
917 {
918 asm volatile ("sti; nop");
919 }
920
921 /*
922 * Enable interrupts for one instruction (nop), to allow the CPU to process all
923 * interrupts that are already pending.
924 */
sti_nop_cli(void)925 static inline void sti_nop_cli(void)
926 {
927 asm volatile ("sti; nop; cli");
928 }
929
rdrand(void)930 static inline unsigned long long rdrand(void)
931 {
932 long long r;
933
934 asm volatile("rdrand %0\n\t"
935 "jc 1f\n\t"
936 "mov $0, %0\n\t"
937 "1:\n\t" : "=r" (r));
938 return r;
939 }
940
rdtsc(void)941 static inline unsigned long long rdtsc(void)
942 {
943 long long r;
944
945 #ifdef __x86_64__
946 unsigned a, d;
947
948 asm volatile ("rdtsc" : "=a"(a), "=d"(d));
949 r = a | ((long long)d << 32);
950 #else
951 asm volatile ("rdtsc" : "=A"(r));
952 #endif
953 return r;
954 }
955
956 /*
957 * Per the advice in the SDM, volume 2, the sequence "mfence; lfence"
958 * executed immediately before rdtsc ensures that rdtsc will be
959 * executed only after all previous instructions have executed and all
960 * previous loads and stores are globally visible. In addition, the
961 * lfence immediately after rdtsc ensures that rdtsc will be executed
962 * prior to the execution of any subsequent instruction.
963 */
fenced_rdtsc(void)964 static inline unsigned long long fenced_rdtsc(void)
965 {
966 unsigned long long tsc;
967
968 #ifdef __x86_64__
969 unsigned int eax, edx;
970
971 asm volatile ("mfence; lfence; rdtsc; lfence" : "=a"(eax), "=d"(edx));
972 tsc = eax | ((unsigned long long)edx << 32);
973 #else
974 asm volatile ("mfence; lfence; rdtsc; lfence" : "=A"(tsc));
975 #endif
976 return tsc;
977 }
978
rdtscp(u32 * aux)979 static inline unsigned long long rdtscp(u32 *aux)
980 {
981 long long r;
982
983 #ifdef __x86_64__
984 unsigned a, d;
985
986 asm volatile ("rdtscp" : "=a"(a), "=d"(d), "=c"(*aux));
987 r = a | ((long long)d << 32);
988 #else
989 asm volatile ("rdtscp" : "=A"(r), "=c"(*aux));
990 #endif
991 return r;
992 }
993
wrtsc(u64 tsc)994 static inline void wrtsc(u64 tsc)
995 {
996 wrmsr(MSR_IA32_TSC, tsc);
997 }
998
999
invlpg(volatile void * va)1000 static inline void invlpg(volatile void *va)
1001 {
1002 asm volatile("invlpg (%0)" ::"r" (va) : "memory");
1003 }
1004
1005 struct invpcid_desc {
1006 u64 pcid : 12;
1007 u64 rsv : 52;
1008 u64 addr : 64;
1009 };
1010
invpcid_safe(unsigned long type,struct invpcid_desc * desc)1011 static inline int invpcid_safe(unsigned long type, struct invpcid_desc *desc)
1012 {
1013 /* invpcid (%rax), %rbx */
1014 return asm_safe(".byte 0x66,0x0f,0x38,0x82,0x18", "a" (desc), "b" (type));
1015 }
1016
1017 /*
1018 * Execute HLT in an STI interrupt shadow to ensure that a pending IRQ that's
1019 * intended to be a wake event arrives *after* HLT is executed. Modern CPUs,
1020 * except for a few oddballs that KVM is unlikely to run on, block IRQs for one
1021 * instruction after STI, *if* RFLAGS.IF=0 before STI. Note, Intel CPUs may
1022 * block other events beyond regular IRQs, e.g. may block NMIs and SMIs too.
1023 */
safe_halt(void)1024 static inline void safe_halt(void)
1025 {
1026 asm volatile("sti; hlt");
1027 }
1028
read_pkru(void)1029 static inline u32 read_pkru(void)
1030 {
1031 unsigned int eax, edx;
1032 unsigned int ecx = 0;
1033 unsigned int pkru;
1034
1035 asm volatile(".byte 0x0f,0x01,0xee\n\t"
1036 : "=a" (eax), "=d" (edx)
1037 : "c" (ecx));
1038 pkru = eax;
1039 return pkru;
1040 }
1041
write_pkru(u32 pkru)1042 static inline void write_pkru(u32 pkru)
1043 {
1044 unsigned int eax = pkru;
1045 unsigned int ecx = 0;
1046 unsigned int edx = 0;
1047
1048 asm volatile(".byte 0x0f,0x01,0xef\n\t"
1049 : : "a" (eax), "c" (ecx), "d" (edx));
1050 }
1051
is_canonical(u64 addr)1052 static inline bool is_canonical(u64 addr)
1053 {
1054 int va_width, shift_amt;
1055
1056 if (this_cpu_has_p(X86_PROPERTY_MAX_VIRT_ADDR))
1057 va_width = this_cpu_property(X86_PROPERTY_MAX_VIRT_ADDR);
1058 else
1059 va_width = 48;
1060
1061 shift_amt = 64 - va_width;
1062 return (s64)(addr << shift_amt) >> shift_amt == addr;
1063 }
1064
flush_tlb(void)1065 static inline void flush_tlb(void)
1066 {
1067 ulong cr4;
1068
1069 cr4 = read_cr4();
1070 write_cr4(cr4 ^ X86_CR4_PGE);
1071 write_cr4(cr4);
1072 }
1073
generate_non_canonical_gp(void)1074 static inline void generate_non_canonical_gp(void)
1075 {
1076 *(volatile u64 *)NONCANONICAL = 0;
1077 }
1078
generate_ud(void)1079 static inline void generate_ud(void)
1080 {
1081 asm volatile ("ud2");
1082 }
1083
generate_de(void)1084 static inline void generate_de(void)
1085 {
1086 asm volatile (
1087 "xor %%eax, %%eax\n\t"
1088 "xor %%ebx, %%ebx\n\t"
1089 "xor %%edx, %%edx\n\t"
1090 "idiv %%ebx\n\t"
1091 ::: "eax", "ebx", "edx");
1092 }
1093
generate_bp(void)1094 static inline void generate_bp(void)
1095 {
1096 asm volatile ("int3");
1097 }
1098
generate_single_step_db(void)1099 static inline void generate_single_step_db(void)
1100 {
1101 write_rflags(read_rflags() | X86_EFLAGS_TF);
1102 asm volatile("nop");
1103 }
1104
generate_usermode_ac(void)1105 static inline uint64_t generate_usermode_ac(void)
1106 {
1107 /*
1108 * Trigger an #AC by writing 8 bytes to a 4-byte aligned address.
1109 * Disclaimer: It is assumed that the stack pointer is aligned
1110 * on a 16-byte boundary as x86_64 stacks should be.
1111 */
1112 asm volatile("movq $0, -0x4(%rsp)");
1113
1114 return 0;
1115 }
1116
1117 /*
1118 * Switch from 64-bit to 32-bit mode and generate #OF via INTO. Note, if RIP
1119 * or RSP holds a 64-bit value, this helper will NOT generate #OF.
1120 */
generate_of(void)1121 static inline void generate_of(void)
1122 {
1123 struct far_pointer32 fp = {
1124 .offset = (uintptr_t)&&into,
1125 .selector = KERNEL_CS32,
1126 };
1127 uintptr_t rsp;
1128
1129 asm volatile ("mov %%rsp, %0" : "=r"(rsp));
1130
1131 if (fp.offset != (uintptr_t)&&into) {
1132 printf("Code address too high.\n");
1133 return;
1134 }
1135 if ((u32)rsp != rsp) {
1136 printf("Stack address too high.\n");
1137 return;
1138 }
1139
1140 asm goto ("lcall *%0" : : "m" (fp) : "rax" : into);
1141 return;
1142 into:
1143 asm volatile (".code32;"
1144 "movl $0x7fffffff, %eax;"
1145 "addl %eax, %eax;"
1146 "into;"
1147 "lret;"
1148 ".code64");
1149 __builtin_unreachable();
1150 }
1151
fnop(void)1152 static inline void fnop(void)
1153 {
1154 asm volatile("fnop");
1155 }
1156
1157 /* If CR0.TS is set in L2, #NM is generated. */
generate_cr0_ts_nm(void)1158 static inline void generate_cr0_ts_nm(void)
1159 {
1160 write_cr0((read_cr0() & ~X86_CR0_EM) | X86_CR0_TS);
1161 fnop();
1162 }
1163
1164 /* If CR0.TS is cleared and CR0.EM is set, #NM is generated. */
generate_cr0_em_nm(void)1165 static inline void generate_cr0_em_nm(void)
1166 {
1167 write_cr0((read_cr0() & ~X86_CR0_TS) | X86_CR0_EM);
1168 fnop();
1169 }
1170
is_la57_enabled(void)1171 static inline bool is_la57_enabled(void)
1172 {
1173 return !!(read_cr4() & X86_CR4_LA57);
1174 }
1175
is_lam_sup_enabled(void)1176 static inline bool is_lam_sup_enabled(void)
1177 {
1178 return !!(read_cr4() & X86_CR4_LAM_SUP);
1179 }
1180
is_lam_u48_enabled(void)1181 static inline bool is_lam_u48_enabled(void)
1182 {
1183 return (read_cr3() & (X86_CR3_LAM_U48 | X86_CR3_LAM_U57)) == X86_CR3_LAM_U48;
1184 }
1185
is_lam_u57_enabled(void)1186 static inline bool is_lam_u57_enabled(void)
1187 {
1188 return !!(read_cr3() & X86_CR3_LAM_U57);
1189 }
1190
1191 #endif
1192