xref: /kvm-unit-tests/lib/x86/processor.h (revision 2f3c02862e03fc1965b2bb54861a505fd4f6d1e5)
1 #ifndef _X86_PROCESSOR_H_
2 #define _X86_PROCESSOR_H_
3 
4 #include "libcflat.h"
5 #include "desc.h"
6 #include "msr.h"
7 #include <bitops.h>
8 #include <stdint.h>
9 
10 #define NONCANONICAL	0xaaaaaaaaaaaaaaaaull
11 
12 #ifdef __x86_64__
13 #  define R "r"
14 #  define W "q"
15 #  define S "8"
16 #else
17 #  define R "e"
18 #  define W "l"
19 #  define S "4"
20 #endif
21 
22 #define DE_VECTOR 0
23 #define DB_VECTOR 1
24 #define NMI_VECTOR 2
25 #define BP_VECTOR 3
26 #define OF_VECTOR 4
27 #define BR_VECTOR 5
28 #define UD_VECTOR 6
29 #define NM_VECTOR 7
30 #define DF_VECTOR 8
31 #define TS_VECTOR 10
32 #define NP_VECTOR 11
33 #define SS_VECTOR 12
34 #define GP_VECTOR 13
35 #define PF_VECTOR 14
36 #define MF_VECTOR 16
37 #define AC_VECTOR 17
38 #define MC_VECTOR 18
39 #define XM_VECTOR 19
40 #define XF_VECTOR XM_VECTOR /* AMD */
41 #define VE_VECTOR 20 /* Intel only */
42 #define CP_VECTOR 21
43 #define HV_VECTOR 28 /* AMD only */
44 #define VC_VECTOR 29 /* AMD only */
45 #define SX_VECTOR 30 /* AMD only */
46 
47 #define X86_CR0_PE_BIT		(0)
48 #define X86_CR0_PE		BIT(X86_CR0_PE_BIT)
49 #define X86_CR0_MP_BIT		(1)
50 #define X86_CR0_MP		BIT(X86_CR0_MP_BIT)
51 #define X86_CR0_EM_BIT		(2)
52 #define X86_CR0_EM		BIT(X86_CR0_EM_BIT)
53 #define X86_CR0_TS_BIT		(3)
54 #define X86_CR0_TS		BIT(X86_CR0_TS_BIT)
55 #define X86_CR0_ET_BIT		(4)
56 #define X86_CR0_ET		BIT(X86_CR0_ET_BIT)
57 #define X86_CR0_NE_BIT		(5)
58 #define X86_CR0_NE		BIT(X86_CR0_NE_BIT)
59 #define X86_CR0_WP_BIT		(16)
60 #define X86_CR0_WP		BIT(X86_CR0_WP_BIT)
61 #define X86_CR0_AM_BIT		(18)
62 #define X86_CR0_AM		BIT(X86_CR0_AM_BIT)
63 #define X86_CR0_NW_BIT		(29)
64 #define X86_CR0_NW		BIT(X86_CR0_NW_BIT)
65 #define X86_CR0_CD_BIT		(30)
66 #define X86_CR0_CD		BIT(X86_CR0_CD_BIT)
67 #define X86_CR0_PG_BIT		(31)
68 #define X86_CR0_PG		BIT(X86_CR0_PG_BIT)
69 
70 #define X86_CR3_PCID_MASK	GENMASK(11, 0)
71 
72 #define X86_CR4_VME_BIT		(0)
73 #define X86_CR4_VME		BIT(X86_CR4_VME_BIT)
74 #define X86_CR4_PVI_BIT		(1)
75 #define X86_CR4_PVI		BIT(X86_CR4_PVI_BIT)
76 #define X86_CR4_TSD_BIT		(2)
77 #define X86_CR4_TSD		BIT(X86_CR4_TSD_BIT)
78 #define X86_CR4_DE_BIT		(3)
79 #define X86_CR4_DE		BIT(X86_CR4_DE_BIT)
80 #define X86_CR4_PSE_BIT		(4)
81 #define X86_CR4_PSE		BIT(X86_CR4_PSE_BIT)
82 #define X86_CR4_PAE_BIT		(5)
83 #define X86_CR4_PAE		BIT(X86_CR4_PAE_BIT)
84 #define X86_CR4_MCE_BIT		(6)
85 #define X86_CR4_MCE		BIT(X86_CR4_MCE_BIT)
86 #define X86_CR4_PGE_BIT		(7)
87 #define X86_CR4_PGE		BIT(X86_CR4_PGE_BIT)
88 #define X86_CR4_PCE_BIT		(8)
89 #define X86_CR4_PCE		BIT(X86_CR4_PCE_BIT)
90 #define X86_CR4_OSFXSR_BIT	(9)
91 #define X86_CR4_OSFXSR		BIT(X86_CR4_OSFXSR_BIT)
92 #define X86_CR4_OSXMMEXCPT_BIT	(10)
93 #define X86_CR4_OSXMMEXCPT	BIT(X86_CR4_OSXMMEXCPT_BIT)
94 #define X86_CR4_UMIP_BIT	(11)
95 #define X86_CR4_UMIP		BIT(X86_CR4_UMIP_BIT)
96 #define X86_CR4_LA57_BIT	(12)
97 #define X86_CR4_LA57		BIT(X86_CR4_LA57_BIT)
98 #define X86_CR4_VMXE_BIT	(13)
99 #define X86_CR4_VMXE		BIT(X86_CR4_VMXE_BIT)
100 #define X86_CR4_SMXE_BIT	(14)
101 #define X86_CR4_SMXE		BIT(X86_CR4_SMXE_BIT)
102 /* UNUSED			(15) */
103 #define X86_CR4_FSGSBASE_BIT	(16)
104 #define X86_CR4_FSGSBASE	BIT(X86_CR4_FSGSBASE_BIT)
105 #define X86_CR4_PCIDE_BIT	(17)
106 #define X86_CR4_PCIDE		BIT(X86_CR4_PCIDE_BIT)
107 #define X86_CR4_OSXSAVE_BIT	(18)
108 #define X86_CR4_OSXSAVE		BIT(X86_CR4_OSXSAVE_BIT)
109 #define X86_CR4_KL_BIT		(19)
110 #define X86_CR4_KL		BIT(X86_CR4_KL_BIT)
111 #define X86_CR4_SMEP_BIT	(20)
112 #define X86_CR4_SMEP		BIT(X86_CR4_SMEP_BIT)
113 #define X86_CR4_SMAP_BIT	(21)
114 #define X86_CR4_SMAP		BIT(X86_CR4_SMAP_BIT)
115 #define X86_CR4_PKE_BIT		(22)
116 #define X86_CR4_PKE		BIT(X86_CR4_PKE_BIT)
117 #define X86_CR4_CET_BIT		(23)
118 #define X86_CR4_CET		BIT(X86_CR4_CET_BIT)
119 #define X86_CR4_PKS_BIT		(24)
120 #define X86_CR4_PKS		BIT(X86_CR4_PKS_BIT)
121 
122 #define X86_EFLAGS_CF_BIT	(0)
123 #define X86_EFLAGS_CF		BIT(X86_EFLAGS_CF_BIT)
124 #define X86_EFLAGS_FIXED_BIT	(1)
125 #define X86_EFLAGS_FIXED	BIT(X86_EFLAGS_FIXED_BIT)
126 #define X86_EFLAGS_PF_BIT	(2)
127 #define X86_EFLAGS_PF		BIT(X86_EFLAGS_PF_BIT)
128 /* RESERVED 0			(3) */
129 #define X86_EFLAGS_AF_BIT	(4)
130 #define X86_EFLAGS_AF		BIT(X86_EFLAGS_AF_BIT)
131 /* RESERVED 0			(5) */
132 #define X86_EFLAGS_ZF_BIT	(6)
133 #define X86_EFLAGS_ZF		BIT(X86_EFLAGS_ZF_BIT)
134 #define X86_EFLAGS_SF_BIT	(7)
135 #define X86_EFLAGS_SF		BIT(X86_EFLAGS_SF_BIT)
136 #define X86_EFLAGS_TF_BIT	(8)
137 #define X86_EFLAGS_TF		BIT(X86_EFLAGS_TF_BIT)
138 #define X86_EFLAGS_IF_BIT	(9)
139 #define X86_EFLAGS_IF		BIT(X86_EFLAGS_IF_BIT)
140 #define X86_EFLAGS_DF_BIT	(10)
141 #define X86_EFLAGS_DF		BIT(X86_EFLAGS_DF_BIT)
142 #define X86_EFLAGS_OF_BIT	(11)
143 #define X86_EFLAGS_OF		BIT(X86_EFLAGS_OF_BIT)
144 #define X86_EFLAGS_IOPL		GENMASK(13, 12)
145 #define X86_EFLAGS_NT_BIT	(14)
146 #define X86_EFLAGS_NT		BIT(X86_EFLAGS_NT_BIT)
147 /* RESERVED 0			(15) */
148 #define X86_EFLAGS_RF_BIT	(16)
149 #define X86_EFLAGS_RF		BIT(X86_EFLAGS_RF_BIT)
150 #define X86_EFLAGS_VM_BIT	(17)
151 #define X86_EFLAGS_VM		BIT(X86_EFLAGS_VM_BIT)
152 #define X86_EFLAGS_AC_BIT	(18)
153 #define X86_EFLAGS_AC		BIT(X86_EFLAGS_AC_BIT)
154 #define X86_EFLAGS_VIF_BIT	(19)
155 #define X86_EFLAGS_VIF		BIT(X86_EFLAGS_VIF_BIT)
156 #define X86_EFLAGS_VIP_BIT	(20)
157 #define X86_EFLAGS_VIP		BIT(X86_EFLAGS_VIP_BIT)
158 #define X86_EFLAGS_ID_BIT	(21)
159 #define X86_EFLAGS_ID		BIT(X86_EFLAGS_ID_BIT)
160 
161 #define X86_EFLAGS_ALU (X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | \
162 			X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)
163 
164 
165 /*
166  * CPU features
167  */
168 
169 enum cpuid_output_regs {
170 	EAX,
171 	EBX,
172 	ECX,
173 	EDX
174 };
175 
176 struct cpuid { u32 a, b, c, d; };
177 
178 static inline struct cpuid raw_cpuid(u32 function, u32 index)
179 {
180 	struct cpuid r;
181 	asm volatile ("cpuid"
182 		      : "=a"(r.a), "=b"(r.b), "=c"(r.c), "=d"(r.d)
183 		      : "0"(function), "2"(index));
184 	return r;
185 }
186 
187 static inline struct cpuid cpuid_indexed(u32 function, u32 index)
188 {
189 	u32 level = raw_cpuid(function & 0xf0000000, 0).a;
190 	if (level < function)
191 	return (struct cpuid) { 0, 0, 0, 0 };
192 	return raw_cpuid(function, index);
193 }
194 
195 static inline struct cpuid cpuid(u32 function)
196 {
197 	return cpuid_indexed(function, 0);
198 }
199 
200 static inline u8 cpuid_maxphyaddr(void)
201 {
202 	if (raw_cpuid(0x80000000, 0).a < 0x80000008)
203 	return 36;
204 	return raw_cpuid(0x80000008, 0).a & 0xff;
205 }
206 
207 static inline bool is_intel(void)
208 {
209 	struct cpuid c = cpuid(0);
210 	u32 name[4] = {c.b, c.d, c.c };
211 
212 	return strcmp((char *)name, "GenuineIntel") == 0;
213 }
214 
215 #define	CPUID(a, b, c, d) ((((unsigned long long) a) << 32) | (b << 16) | \
216 			  (c << 8) | d)
217 
218 /*
219  * Each X86_FEATURE_XXX definition is 64-bit and contains the following
220  * CPUID meta-data:
221  *
222  * 	[63:32] :  input value for EAX
223  * 	[31:16] :  input value for ECX
224  * 	[15:8]  :  output register
225  * 	[7:0]   :  bit position in output register
226  */
227 
228 /*
229  * Basic Leafs, a.k.a. Intel defined
230  */
231 #define	X86_FEATURE_MWAIT		(CPUID(0x1, 0, ECX, 3))
232 #define	X86_FEATURE_VMX			(CPUID(0x1, 0, ECX, 5))
233 #define	X86_FEATURE_PDCM		(CPUID(0x1, 0, ECX, 15))
234 #define	X86_FEATURE_PCID		(CPUID(0x1, 0, ECX, 17))
235 #define X86_FEATURE_X2APIC		(CPUID(0x1, 0, ECX, 21))
236 #define	X86_FEATURE_MOVBE		(CPUID(0x1, 0, ECX, 22))
237 #define	X86_FEATURE_TSC_DEADLINE_TIMER	(CPUID(0x1, 0, ECX, 24))
238 #define	X86_FEATURE_XSAVE		(CPUID(0x1, 0, ECX, 26))
239 #define	X86_FEATURE_OSXSAVE		(CPUID(0x1, 0, ECX, 27))
240 #define	X86_FEATURE_RDRAND		(CPUID(0x1, 0, ECX, 30))
241 #define	X86_FEATURE_MCE			(CPUID(0x1, 0, EDX, 7))
242 #define	X86_FEATURE_APIC		(CPUID(0x1, 0, EDX, 9))
243 #define	X86_FEATURE_CLFLUSH		(CPUID(0x1, 0, EDX, 19))
244 #define	X86_FEATURE_XMM			(CPUID(0x1, 0, EDX, 25))
245 #define	X86_FEATURE_XMM2		(CPUID(0x1, 0, EDX, 26))
246 #define	X86_FEATURE_TSC_ADJUST		(CPUID(0x7, 0, EBX, 1))
247 #define	X86_FEATURE_HLE			(CPUID(0x7, 0, EBX, 4))
248 #define	X86_FEATURE_SMEP		(CPUID(0x7, 0, EBX, 7))
249 #define	X86_FEATURE_INVPCID		(CPUID(0x7, 0, EBX, 10))
250 #define	X86_FEATURE_RTM			(CPUID(0x7, 0, EBX, 11))
251 #define	X86_FEATURE_SMAP		(CPUID(0x7, 0, EBX, 20))
252 #define	X86_FEATURE_PCOMMIT		(CPUID(0x7, 0, EBX, 22))
253 #define	X86_FEATURE_CLFLUSHOPT		(CPUID(0x7, 0, EBX, 23))
254 #define	X86_FEATURE_CLWB		(CPUID(0x7, 0, EBX, 24))
255 #define	X86_FEATURE_UMIP		(CPUID(0x7, 0, ECX, 2))
256 #define	X86_FEATURE_PKU			(CPUID(0x7, 0, ECX, 3))
257 #define	X86_FEATURE_LA57		(CPUID(0x7, 0, ECX, 16))
258 #define	X86_FEATURE_RDPID		(CPUID(0x7, 0, ECX, 22))
259 #define	X86_FEATURE_SHSTK		(CPUID(0x7, 0, ECX, 7))
260 #define	X86_FEATURE_IBT			(CPUID(0x7, 0, EDX, 20))
261 #define	X86_FEATURE_SPEC_CTRL		(CPUID(0x7, 0, EDX, 26))
262 #define	X86_FEATURE_FLUSH_L1D		(CPUID(0x7, 0, EDX, 28))
263 #define	X86_FEATURE_ARCH_CAPABILITIES	(CPUID(0x7, 0, EDX, 29))
264 #define	X86_FEATURE_PKS			(CPUID(0x7, 0, ECX, 31))
265 
266 /*
267  * KVM defined leafs
268  */
269 #define	KVM_FEATURE_ASYNC_PF		(CPUID(0x40000001, 0, EAX, 4))
270 #define	KVM_FEATURE_ASYNC_PF_INT	(CPUID(0x40000001, 0, EAX, 14))
271 
272 /*
273  * Extended Leafs, a.k.a. AMD defined
274  */
275 #define	X86_FEATURE_SVM			(CPUID(0x80000001, 0, ECX, 2))
276 #define	X86_FEATURE_PERFCTR_CORE	(CPUID(0x80000001, 0, ECX, 23))
277 #define	X86_FEATURE_NX			(CPUID(0x80000001, 0, EDX, 20))
278 #define	X86_FEATURE_GBPAGES		(CPUID(0x80000001, 0, EDX, 26))
279 #define	X86_FEATURE_RDTSCP		(CPUID(0x80000001, 0, EDX, 27))
280 #define	X86_FEATURE_LM			(CPUID(0x80000001, 0, EDX, 29))
281 #define	X86_FEATURE_RDPRU		(CPUID(0x80000008, 0, EBX, 4))
282 #define	X86_FEATURE_AMD_IBPB		(CPUID(0x80000008, 0, EBX, 12))
283 #define	X86_FEATURE_NPT			(CPUID(0x8000000A, 0, EDX, 0))
284 #define	X86_FEATURE_LBRV		(CPUID(0x8000000A, 0, EDX, 1))
285 #define	X86_FEATURE_NRIPS		(CPUID(0x8000000A, 0, EDX, 3))
286 #define X86_FEATURE_TSCRATEMSR		(CPUID(0x8000000A, 0, EDX, 4))
287 #define X86_FEATURE_PAUSEFILTER		(CPUID(0x8000000A, 0, EDX, 10))
288 #define X86_FEATURE_PFTHRESHOLD		(CPUID(0x8000000A, 0, EDX, 12))
289 #define	X86_FEATURE_VGIF		(CPUID(0x8000000A, 0, EDX, 16))
290 #define X86_FEATURE_VNMI		(CPUID(0x8000000A, 0, EDX, 25))
291 #define	X86_FEATURE_AMD_PMU_V2		(CPUID(0x80000022, 0, EAX, 0))
292 
293 static inline bool this_cpu_has(u64 feature)
294 {
295 	u32 input_eax = feature >> 32;
296 	u32 input_ecx = (feature >> 16) & 0xffff;
297 	u32 output_reg = (feature >> 8) & 0xff;
298 	u8 bit = feature & 0xff;
299 	struct cpuid c;
300 	u32 *tmp;
301 
302 	c = cpuid_indexed(input_eax, input_ecx);
303 	tmp = (u32 *)&c;
304 
305 	return ((*(tmp + (output_reg % 32))) & (1 << bit));
306 }
307 
308 struct far_pointer32 {
309 	u32 offset;
310 	u16 selector;
311 } __attribute__((packed));
312 
313 struct descriptor_table_ptr {
314 	u16 limit;
315 	ulong base;
316 } __attribute__((packed));
317 
318 static inline void clac(void)
319 {
320 	asm volatile (".byte 0x0f, 0x01, 0xca" : : : "memory");
321 }
322 
323 static inline void stac(void)
324 {
325 	asm volatile (".byte 0x0f, 0x01, 0xcb" : : : "memory");
326 }
327 
328 static inline u16 read_cs(void)
329 {
330 	unsigned val;
331 
332 	asm volatile ("mov %%cs, %0" : "=mr"(val));
333 	return val;
334 }
335 
336 static inline u16 read_ds(void)
337 {
338 	unsigned val;
339 
340 	asm volatile ("mov %%ds, %0" : "=mr"(val));
341 	return val;
342 }
343 
344 static inline u16 read_es(void)
345 {
346 	unsigned val;
347 
348 	asm volatile ("mov %%es, %0" : "=mr"(val));
349 	return val;
350 }
351 
352 static inline u16 read_ss(void)
353 {
354 	unsigned val;
355 
356 	asm volatile ("mov %%ss, %0" : "=mr"(val));
357 	return val;
358 }
359 
360 static inline u16 read_fs(void)
361 {
362 	unsigned val;
363 
364 	asm volatile ("mov %%fs, %0" : "=mr"(val));
365 	return val;
366 }
367 
368 static inline u16 read_gs(void)
369 {
370 	unsigned val;
371 
372 	asm volatile ("mov %%gs, %0" : "=mr"(val));
373 	return val;
374 }
375 
376 static inline unsigned long read_rflags(void)
377 {
378 	unsigned long f;
379 	asm volatile ("pushf; pop %0\n\t" : "=rm"(f));
380 	return f;
381 }
382 
383 static inline void write_ds(unsigned val)
384 {
385 	asm volatile ("mov %0, %%ds" : : "rm"(val) : "memory");
386 }
387 
388 static inline void write_es(unsigned val)
389 {
390 	asm volatile ("mov %0, %%es" : : "rm"(val) : "memory");
391 }
392 
393 static inline void write_ss(unsigned val)
394 {
395 	asm volatile ("mov %0, %%ss" : : "rm"(val) : "memory");
396 }
397 
398 static inline void write_fs(unsigned val)
399 {
400 	asm volatile ("mov %0, %%fs" : : "rm"(val) : "memory");
401 }
402 
403 static inline void write_gs(unsigned val)
404 {
405 	asm volatile ("mov %0, %%gs" : : "rm"(val) : "memory");
406 }
407 
408 static inline void write_rflags(unsigned long f)
409 {
410 	asm volatile ("push %0; popf\n\t" : : "rm"(f));
411 }
412 
413 static inline void set_iopl(int iopl)
414 {
415 	unsigned long flags = read_rflags() & ~X86_EFLAGS_IOPL;
416 	flags |= iopl * (X86_EFLAGS_IOPL / 3);
417 	write_rflags(flags);
418 }
419 
420 /*
421  * Don't use the safe variants for rdmsr() or wrmsr().  The exception fixup
422  * infrastructure uses per-CPU data and thus consumes GS.base.  Various tests
423  * temporarily modify MSR_GS_BASE and will explode when trying to determine
424  * whether or not RDMSR/WRMSR faulted.
425  */
426 static inline u64 rdmsr(u32 index)
427 {
428 	u32 a, d;
429 	asm volatile ("rdmsr" : "=a"(a), "=d"(d) : "c"(index) : "memory");
430 	return a | ((u64)d << 32);
431 }
432 
433 static inline void wrmsr(u32 index, u64 val)
434 {
435 	u32 a = val, d = val >> 32;
436 	asm volatile ("wrmsr" : : "a"(a), "d"(d), "c"(index) : "memory");
437 }
438 
439 #define __rdreg64_safe(fep, insn, index, val)				\
440 ({									\
441 	uint32_t a, d;							\
442 	int vector;							\
443 									\
444 	vector = __asm_safe_out2(fep, insn, "=a"(a), "=d"(d), "c"(index));\
445 									\
446 	if (vector)							\
447 		*(val) = 0;						\
448 	else								\
449 		*(val) = (uint64_t)a | ((uint64_t)d << 32);		\
450 	vector;								\
451 })
452 
453 #define rdreg64_safe(insn, index, val)					\
454 	__rdreg64_safe("", insn, index, val)
455 
456 #define __wrreg64_safe(fep, insn, index, val)				\
457 ({									\
458 	uint32_t eax = (val), edx = (val) >> 32;			\
459 									\
460 	__asm_safe(fep, insn, "a" (eax), "d" (edx), "c" (index));	\
461 })
462 
463 #define wrreg64_safe(insn, index, val)					\
464 	__wrreg64_safe("", insn, index, val)
465 
466 static inline int rdmsr_safe(u32 index, uint64_t *val)
467 {
468 	return rdreg64_safe("rdmsr", index, val);
469 }
470 
471 static inline int wrmsr_safe(u32 index, u64 val)
472 {
473 	return wrreg64_safe("wrmsr", index, val);
474 }
475 
476 static inline int wrmsr_fep_safe(u32 index, u64 val)
477 {
478 	return __wrreg64_safe(KVM_FEP, "wrmsr", index, val);
479 }
480 
481 static inline int rdpmc_safe(u32 index, uint64_t *val)
482 {
483 	return rdreg64_safe("rdpmc", index, val);
484 }
485 
486 static inline uint64_t rdpmc(uint32_t index)
487 {
488 	uint64_t val;
489 	int vector = rdpmc_safe(index, &val);
490 
491 	assert_msg(!vector, "Unexpected %s on RDPMC(%" PRId32 ")",
492 		   exception_mnemonic(vector), index);
493 	return val;
494 }
495 
496 static inline int xgetbv_safe(u32 index, u64 *result)
497 {
498 	return rdreg64_safe(".byte 0x0f,0x01,0xd0", index, result);
499 }
500 
501 static inline int xsetbv_safe(u32 index, u64 value)
502 {
503 	return wrreg64_safe(".byte 0x0f,0x01,0xd1", index, value);
504 }
505 
506 static inline int write_cr0_safe(ulong val)
507 {
508 	return asm_safe("mov %0,%%cr0", "r" (val));
509 }
510 
511 static inline void write_cr0(ulong val)
512 {
513 	int vector = write_cr0_safe(val);
514 
515 	assert_msg(!vector, "Unexpected fault '%d' writing CR0 = %lx",
516 		   vector, val);
517 }
518 
519 static inline ulong read_cr0(void)
520 {
521 	ulong val;
522 	asm volatile ("mov %%cr0, %0" : "=r"(val) : : "memory");
523 	return val;
524 }
525 
526 static inline void write_cr2(ulong val)
527 {
528 	asm volatile ("mov %0, %%cr2" : : "r"(val) : "memory");
529 }
530 
531 static inline ulong read_cr2(void)
532 {
533 	ulong val;
534 	asm volatile ("mov %%cr2, %0" : "=r"(val) : : "memory");
535 	return val;
536 }
537 
538 static inline int write_cr3_safe(ulong val)
539 {
540 	return asm_safe("mov %0,%%cr3", "r" (val));
541 }
542 
543 static inline void write_cr3(ulong val)
544 {
545 	int vector = write_cr3_safe(val);
546 
547 	assert_msg(!vector, "Unexpected fault '%d' writing CR3 = %lx",
548 		   vector, val);
549 }
550 
551 static inline ulong read_cr3(void)
552 {
553 	ulong val;
554 	asm volatile ("mov %%cr3, %0" : "=r"(val) : : "memory");
555 	return val;
556 }
557 
558 static inline void update_cr3(void *cr3)
559 {
560 	write_cr3((ulong)cr3);
561 }
562 
563 static inline int write_cr4_safe(ulong val)
564 {
565 	return asm_safe("mov %0,%%cr4", "r" (val));
566 }
567 
568 static inline void write_cr4(ulong val)
569 {
570 	int vector = write_cr4_safe(val);
571 
572 	assert_msg(!vector, "Unexpected fault '%d' writing CR4 = %lx",
573 		   vector, val);
574 }
575 
576 static inline ulong read_cr4(void)
577 {
578 	ulong val;
579 	asm volatile ("mov %%cr4, %0" : "=r"(val) : : "memory");
580 	return val;
581 }
582 
583 static inline void write_cr8(ulong val)
584 {
585 	asm volatile ("mov %0, %%cr8" : : "r"(val) : "memory");
586 }
587 
588 static inline ulong read_cr8(void)
589 {
590 	ulong val;
591 	asm volatile ("mov %%cr8, %0" : "=r"(val) : : "memory");
592 	return val;
593 }
594 
595 static inline void lgdt(const struct descriptor_table_ptr *ptr)
596 {
597 	asm volatile ("lgdt %0" : : "m"(*ptr));
598 }
599 
600 static inline void sgdt(struct descriptor_table_ptr *ptr)
601 {
602 	asm volatile ("sgdt %0" : "=m"(*ptr));
603 }
604 
605 static inline void lidt(const struct descriptor_table_ptr *ptr)
606 {
607 	asm volatile ("lidt %0" : : "m"(*ptr));
608 }
609 
610 static inline void sidt(struct descriptor_table_ptr *ptr)
611 {
612 	asm volatile ("sidt %0" : "=m"(*ptr));
613 }
614 
615 static inline void lldt(u16 val)
616 {
617 	asm volatile ("lldt %0" : : "rm"(val));
618 }
619 
620 static inline u16 sldt(void)
621 {
622 	u16 val;
623 	asm volatile ("sldt %0" : "=rm"(val));
624 	return val;
625 }
626 
627 static inline void ltr(u16 val)
628 {
629 	asm volatile ("ltr %0" : : "rm"(val));
630 }
631 
632 static inline u16 str(void)
633 {
634 	u16 val;
635 	asm volatile ("str %0" : "=rm"(val));
636 	return val;
637 }
638 
639 static inline void write_dr0(void *val)
640 {
641 	asm volatile ("mov %0, %%dr0" : : "r"(val) : "memory");
642 }
643 
644 static inline void write_dr1(void *val)
645 {
646 	asm volatile ("mov %0, %%dr1" : : "r"(val) : "memory");
647 }
648 
649 static inline void write_dr2(void *val)
650 {
651 	asm volatile ("mov %0, %%dr2" : : "r"(val) : "memory");
652 }
653 
654 static inline void write_dr3(void *val)
655 {
656 	asm volatile ("mov %0, %%dr3" : : "r"(val) : "memory");
657 }
658 
659 static inline void write_dr6(ulong val)
660 {
661 	asm volatile ("mov %0, %%dr6" : : "r"(val) : "memory");
662 }
663 
664 static inline ulong read_dr6(void)
665 {
666 	ulong val;
667 	asm volatile ("mov %%dr6, %0" : "=r"(val));
668 	return val;
669 }
670 
671 static inline void write_dr7(ulong val)
672 {
673 	asm volatile ("mov %0, %%dr7" : : "r"(val) : "memory");
674 }
675 
676 static inline ulong read_dr7(void)
677 {
678 	ulong val;
679 	asm volatile ("mov %%dr7, %0" : "=r"(val));
680 	return val;
681 }
682 
683 static inline void pause(void)
684 {
685 	asm volatile ("pause");
686 }
687 
688 static inline void cli(void)
689 {
690 	asm volatile ("cli");
691 }
692 
693 /*
694  * See also safe_halt().
695  */
696 static inline void sti(void)
697 {
698 	asm volatile ("sti");
699 }
700 
701 /*
702  * Enable interrupts and ensure that interrupts are evaluated upon return from
703  * this function, i.e. execute a nop to consume the STi interrupt shadow.
704  */
705 static inline void sti_nop(void)
706 {
707 	asm volatile ("sti; nop");
708 }
709 
710 /*
711  * Enable interrupts for one instruction (nop), to allow the CPU to process all
712  * interrupts that are already pending.
713  */
714 static inline void sti_nop_cli(void)
715 {
716 	asm volatile ("sti; nop; cli");
717 }
718 
719 static inline unsigned long long rdrand(void)
720 {
721 	long long r;
722 
723 	asm volatile("rdrand %0\n\t"
724 		     "jc 1f\n\t"
725 		     "mov $0, %0\n\t"
726 		     "1:\n\t" : "=r" (r));
727 	return r;
728 }
729 
730 static inline unsigned long long rdtsc(void)
731 {
732 	long long r;
733 
734 #ifdef __x86_64__
735 	unsigned a, d;
736 
737 	asm volatile ("rdtsc" : "=a"(a), "=d"(d));
738 	r = a | ((long long)d << 32);
739 #else
740 	asm volatile ("rdtsc" : "=A"(r));
741 #endif
742 	return r;
743 }
744 
745 /*
746  * Per the advice in the SDM, volume 2, the sequence "mfence; lfence"
747  * executed immediately before rdtsc ensures that rdtsc will be
748  * executed only after all previous instructions have executed and all
749  * previous loads and stores are globally visible. In addition, the
750  * lfence immediately after rdtsc ensures that rdtsc will be executed
751  * prior to the execution of any subsequent instruction.
752  */
753 static inline unsigned long long fenced_rdtsc(void)
754 {
755 	unsigned long long tsc;
756 
757 #ifdef __x86_64__
758 	unsigned int eax, edx;
759 
760 	asm volatile ("mfence; lfence; rdtsc; lfence" : "=a"(eax), "=d"(edx));
761 	tsc = eax | ((unsigned long long)edx << 32);
762 #else
763 	asm volatile ("mfence; lfence; rdtsc; lfence" : "=A"(tsc));
764 #endif
765 	return tsc;
766 }
767 
768 static inline unsigned long long rdtscp(u32 *aux)
769 {
770 	long long r;
771 
772 #ifdef __x86_64__
773 	unsigned a, d;
774 
775 	asm volatile ("rdtscp" : "=a"(a), "=d"(d), "=c"(*aux));
776 	r = a | ((long long)d << 32);
777 #else
778 	asm volatile ("rdtscp" : "=A"(r), "=c"(*aux));
779 #endif
780 	return r;
781 }
782 
783 static inline void wrtsc(u64 tsc)
784 {
785 	wrmsr(MSR_IA32_TSC, tsc);
786 }
787 
788 
789 static inline void invlpg(volatile void *va)
790 {
791 	asm volatile("invlpg (%0)" ::"r" (va) : "memory");
792 }
793 
794 
795 static inline int invpcid_safe(unsigned long type, void *desc)
796 {
797 	/* invpcid (%rax), %rbx */
798 	return asm_safe(".byte 0x66,0x0f,0x38,0x82,0x18", "a" (desc), "b" (type));
799 }
800 
801 /*
802  * Execute HLT in an STI interrupt shadow to ensure that a pending IRQ that's
803  * intended to be a wake event arrives *after* HLT is executed.  Modern CPUs,
804  * except for a few oddballs that KVM is unlikely to run on, block IRQs for one
805  * instruction after STI, *if* RFLAGS.IF=0 before STI.  Note, Intel CPUs may
806  * block other events beyond regular IRQs, e.g. may block NMIs and SMIs too.
807  */
808 static inline void safe_halt(void)
809 {
810 	asm volatile("sti; hlt");
811 }
812 
813 static inline u32 read_pkru(void)
814 {
815 	unsigned int eax, edx;
816 	unsigned int ecx = 0;
817 	unsigned int pkru;
818 
819 	asm volatile(".byte 0x0f,0x01,0xee\n\t"
820 		     : "=a" (eax), "=d" (edx)
821 		     : "c" (ecx));
822 	pkru = eax;
823 	return pkru;
824 }
825 
826 static inline void write_pkru(u32 pkru)
827 {
828 	unsigned int eax = pkru;
829 	unsigned int ecx = 0;
830 	unsigned int edx = 0;
831 
832 	asm volatile(".byte 0x0f,0x01,0xef\n\t"
833 		     : : "a" (eax), "c" (ecx), "d" (edx));
834 }
835 
836 static inline bool is_canonical(u64 addr)
837 {
838 	int va_width = (raw_cpuid(0x80000008, 0).a & 0xff00) >> 8;
839 	int shift_amt = 64 - va_width;
840 
841 	return (s64)(addr << shift_amt) >> shift_amt == addr;
842 }
843 
844 static inline void clear_bit(int bit, u8 *addr)
845 {
846 	__asm__ __volatile__("lock; btr %1, %0"
847 			     : "+m" (*addr) : "Ir" (bit) : "cc", "memory");
848 }
849 
850 static inline void set_bit(int bit, u8 *addr)
851 {
852 	__asm__ __volatile__("lock; bts %1, %0"
853 			     : "+m" (*addr) : "Ir" (bit) : "cc", "memory");
854 }
855 
856 static inline void flush_tlb(void)
857 {
858 	ulong cr4;
859 
860 	cr4 = read_cr4();
861 	write_cr4(cr4 ^ X86_CR4_PGE);
862 	write_cr4(cr4);
863 }
864 
865 static inline void generate_non_canonical_gp(void)
866 {
867 	*(volatile u64 *)NONCANONICAL = 0;
868 }
869 
870 static inline void generate_ud(void)
871 {
872 	asm volatile ("ud2");
873 }
874 
875 static inline void generate_de(void)
876 {
877 	asm volatile (
878 		"xor %%eax, %%eax\n\t"
879 		"xor %%ebx, %%ebx\n\t"
880 		"xor %%edx, %%edx\n\t"
881 		"idiv %%ebx\n\t"
882 		::: "eax", "ebx", "edx");
883 }
884 
885 static inline void generate_bp(void)
886 {
887 	asm volatile ("int3");
888 }
889 
890 static inline void generate_single_step_db(void)
891 {
892 	write_rflags(read_rflags() | X86_EFLAGS_TF);
893 	asm volatile("nop");
894 }
895 
896 static inline uint64_t generate_usermode_ac(void)
897 {
898 	/*
899 	 * Trigger an #AC by writing 8 bytes to a 4-byte aligned address.
900 	 * Disclaimer: It is assumed that the stack pointer is aligned
901 	 * on a 16-byte boundary as x86_64 stacks should be.
902 	 */
903 	asm volatile("movq $0, -0x4(%rsp)");
904 
905 	return 0;
906 }
907 
908 /*
909  * Switch from 64-bit to 32-bit mode and generate #OF via INTO.  Note, if RIP
910  * or RSP holds a 64-bit value, this helper will NOT generate #OF.
911  */
912 static inline void generate_of(void)
913 {
914 	struct far_pointer32 fp = {
915 		.offset = (uintptr_t)&&into,
916 		.selector = KERNEL_CS32,
917 	};
918 	uintptr_t rsp;
919 
920 	asm volatile ("mov %%rsp, %0" : "=r"(rsp));
921 
922 	if (fp.offset != (uintptr_t)&&into) {
923 		printf("Code address too high.\n");
924 		return;
925 	}
926 	if ((u32)rsp != rsp) {
927 		printf("Stack address too high.\n");
928 		return;
929 	}
930 
931 	asm goto ("lcall *%0" : : "m" (fp) : "rax" : into);
932 	return;
933 into:
934 	asm volatile (".code32;"
935 		      "movl $0x7fffffff, %eax;"
936 		      "addl %eax, %eax;"
937 		      "into;"
938 		      "lret;"
939 		      ".code64");
940 	__builtin_unreachable();
941 }
942 
943 static inline void fnop(void)
944 {
945 	asm volatile("fnop");
946 }
947 
948 /* If CR0.TS is set in L2, #NM is generated. */
949 static inline void generate_cr0_ts_nm(void)
950 {
951 	write_cr0((read_cr0() & ~X86_CR0_EM) | X86_CR0_TS);
952 	fnop();
953 }
954 
955 /* If CR0.TS is cleared and CR0.EM is set, #NM is generated. */
956 static inline void generate_cr0_em_nm(void)
957 {
958 	write_cr0((read_cr0() & ~X86_CR0_TS) | X86_CR0_EM);
959 	fnop();
960 }
961 
962 #endif
963