xref: /kvm-unit-tests/lib/x86/processor.h (revision 0ae3296e30291655a86f11a4c1c1a320e586b01f)
1 #ifndef _X86_PROCESSOR_H_
2 #define _X86_PROCESSOR_H_
3 
4 #include "libcflat.h"
5 #include "desc.h"
6 #include "msr.h"
7 #include <bitops.h>
8 #include <stdint.h>
9 
10 #define NONCANONICAL	0xaaaaaaaaaaaaaaaaull
11 
12 #ifdef __x86_64__
13 #  define R "r"
14 #  define W "q"
15 #  define S "8"
16 #else
17 #  define R "e"
18 #  define W "l"
19 #  define S "4"
20 #endif
21 
22 #define DB_VECTOR 1
23 #define BP_VECTOR 3
24 #define UD_VECTOR 6
25 #define DF_VECTOR 8
26 #define TS_VECTOR 10
27 #define NP_VECTOR 11
28 #define SS_VECTOR 12
29 #define GP_VECTOR 13
30 #define PF_VECTOR 14
31 #define AC_VECTOR 17
32 #define CP_VECTOR 21
33 
34 #define X86_CR0_PE_BIT		(0)
35 #define X86_CR0_PE		BIT(X86_CR0_PE_BIT)
36 #define X86_CR0_MP_BIT		(1)
37 #define X86_CR0_MP		BIT(X86_CR0_MP_BIT)
38 #define X86_CR0_EM_BIT		(2)
39 #define X86_CR0_EM		BIT(X86_CR0_EM_BIT)
40 #define X86_CR0_TS_BIT		(3)
41 #define X86_CR0_TS		BIT(X86_CR0_TS_BIT)
42 #define X86_CR0_ET_BIT		(4)
43 #define X86_CR0_ET		BIT(X86_CR0_ET_BIT)
44 #define X86_CR0_NE_BIT		(5)
45 #define X86_CR0_NE		BIT(X86_CR0_NE_BIT)
46 #define X86_CR0_WP_BIT		(16)
47 #define X86_CR0_WP		BIT(X86_CR0_WP_BIT)
48 #define X86_CR0_AM_BIT		(18)
49 #define X86_CR0_AM		BIT(X86_CR0_AM_BIT)
50 #define X86_CR0_NW_BIT		(29)
51 #define X86_CR0_NW		BIT(X86_CR0_NW_BIT)
52 #define X86_CR0_CD_BIT		(30)
53 #define X86_CR0_CD		BIT(X86_CR0_CD_BIT)
54 #define X86_CR0_PG_BIT		(31)
55 #define X86_CR0_PG		BIT(X86_CR0_PG_BIT)
56 
57 #define X86_CR3_PCID_MASK	GENMASK(11, 0)
58 
59 #define X86_CR4_VME_BIT		(0)
60 #define X86_CR4_VME		BIT(X86_CR4_VME_BIT)
61 #define X86_CR4_PVI_BIT		(1)
62 #define X86_CR4_PVI		BIT(X86_CR4_PVI_BIT)
63 #define X86_CR4_TSD_BIT		(2)
64 #define X86_CR4_TSD		BIT(X86_CR4_TSD_BIT)
65 #define X86_CR4_DE_BIT		(3)
66 #define X86_CR4_DE		BIT(X86_CR4_DE_BIT)
67 #define X86_CR4_PSE_BIT		(4)
68 #define X86_CR4_PSE		BIT(X86_CR4_PSE_BIT)
69 #define X86_CR4_PAE_BIT		(5)
70 #define X86_CR4_PAE		BIT(X86_CR4_PAE_BIT)
71 #define X86_CR4_MCE_BIT		(6)
72 #define X86_CR4_MCE		BIT(X86_CR4_MCE_BIT)
73 #define X86_CR4_PGE_BIT		(7)
74 #define X86_CR4_PGE		BIT(X86_CR4_PGE_BIT)
75 #define X86_CR4_PCE_BIT		(8)
76 #define X86_CR4_PCE		BIT(X86_CR4_PCE_BIT)
77 #define X86_CR4_OSFXSR_BIT	(9)
78 #define X86_CR4_OSFXSR		BIT(X86_CR4_OSFXSR_BIT)
79 #define X86_CR4_OSXMMEXCPT_BIT	(10)
80 #define X86_CR4_OSXMMEXCPT	BIT(X86_CR4_OSXMMEXCPT_BIT)
81 #define X86_CR4_UMIP_BIT	(11)
82 #define X86_CR4_UMIP		BIT(X86_CR4_UMIP_BIT)
83 #define X86_CR4_LA57_BIT	(12)
84 #define X86_CR4_LA57		BIT(X86_CR4_LA57_BIT)
85 #define X86_CR4_VMXE_BIT	(13)
86 #define X86_CR4_VMXE		BIT(X86_CR4_VMXE_BIT)
87 #define X86_CR4_SMXE_BIT	(14)
88 #define X86_CR4_SMXE		BIT(X86_CR4_SMXE_BIT)
89 /* UNUSED			(15) */
90 #define X86_CR4_FSGSBASE_BIT	(16)
91 #define X86_CR4_FSGSBASE	BIT(X86_CR4_FSGSBASE_BIT)
92 #define X86_CR4_PCIDE_BIT	(17)
93 #define X86_CR4_PCIDE		BIT(X86_CR4_PCIDE_BIT)
94 #define X86_CR4_OSXSAVE_BIT	(18)
95 #define X86_CR4_OSXSAVE		BIT(X86_CR4_OSXSAVE_BIT)
96 #define X86_CR4_KL_BIT		(19)
97 #define X86_CR4_KL		BIT(X86_CR4_KL_BIT)
98 #define X86_CR4_SMEP_BIT	(20)
99 #define X86_CR4_SMEP		BIT(X86_CR4_SMEP_BIT)
100 #define X86_CR4_SMAP_BIT	(21)
101 #define X86_CR4_SMAP		BIT(X86_CR4_SMAP_BIT)
102 #define X86_CR4_PKE_BIT		(22)
103 #define X86_CR4_PKE		BIT(X86_CR4_PKE_BIT)
104 #define X86_CR4_CET_BIT		(23)
105 #define X86_CR4_CET		BIT(X86_CR4_CET_BIT)
106 #define X86_CR4_PKS_BIT		(24)
107 #define X86_CR4_PKS		BIT(X86_CR4_PKS_BIT)
108 
109 #define X86_EFLAGS_CF_BIT	(0)
110 #define X86_EFLAGS_CF		BIT(X86_EFLAGS_CF_BIT)
111 #define X86_EFLAGS_FIXED_BIT	(1)
112 #define X86_EFLAGS_FIXED	BIT(X86_EFLAGS_FIXED_BIT)
113 #define X86_EFLAGS_PF_BIT	(2)
114 #define X86_EFLAGS_PF		BIT(X86_EFLAGS_PF_BIT)
115 /* RESERVED 0			(3) */
116 #define X86_EFLAGS_AF_BIT	(4)
117 #define X86_EFLAGS_AF		BIT(X86_EFLAGS_AF_BIT)
118 /* RESERVED 0			(5) */
119 #define X86_EFLAGS_ZF_BIT	(6)
120 #define X86_EFLAGS_ZF		BIT(X86_EFLAGS_ZF_BIT)
121 #define X86_EFLAGS_SF_BIT	(7)
122 #define X86_EFLAGS_SF		BIT(X86_EFLAGS_SF_BIT)
123 #define X86_EFLAGS_TF_BIT	(8)
124 #define X86_EFLAGS_TF		BIT(X86_EFLAGS_TF_BIT)
125 #define X86_EFLAGS_IF_BIT	(9)
126 #define X86_EFLAGS_IF		BIT(X86_EFLAGS_IF_BIT)
127 #define X86_EFLAGS_DF_BIT	(10)
128 #define X86_EFLAGS_DF		BIT(X86_EFLAGS_DF_BIT)
129 #define X86_EFLAGS_OF_BIT	(11)
130 #define X86_EFLAGS_OF		BIT(X86_EFLAGS_OF_BIT)
131 #define X86_EFLAGS_IOPL		GENMASK(13, 12)
132 #define X86_EFLAGS_NT_BIT	(14)
133 #define X86_EFLAGS_NT		BIT(X86_EFLAGS_NT_BIT)
134 /* RESERVED 0			(15) */
135 #define X86_EFLAGS_RF_BIT	(16)
136 #define X86_EFLAGS_RF		BIT(X86_EFLAGS_RF_BIT)
137 #define X86_EFLAGS_VM_BIT	(17)
138 #define X86_EFLAGS_VM		BIT(X86_EFLAGS_VM_BIT)
139 #define X86_EFLAGS_AC_BIT	(18)
140 #define X86_EFLAGS_AC		BIT(X86_EFLAGS_AC_BIT)
141 #define X86_EFLAGS_VIF_BIT	(19)
142 #define X86_EFLAGS_VIF		BIT(X86_EFLAGS_VIF_BIT)
143 #define X86_EFLAGS_VIP_BIT	(20)
144 #define X86_EFLAGS_VIP		BIT(X86_EFLAGS_VIP_BIT)
145 #define X86_EFLAGS_ID_BIT	(21)
146 #define X86_EFLAGS_ID		BIT(X86_EFLAGS_ID_BIT)
147 
148 #define X86_EFLAGS_ALU (X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | \
149 			X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)
150 
151 
152 /*
153  * CPU features
154  */
155 
156 enum cpuid_output_regs {
157 	EAX,
158 	EBX,
159 	ECX,
160 	EDX
161 };
162 
163 struct cpuid { u32 a, b, c, d; };
164 
165 static inline struct cpuid raw_cpuid(u32 function, u32 index)
166 {
167 	struct cpuid r;
168 	asm volatile ("cpuid"
169 		      : "=a"(r.a), "=b"(r.b), "=c"(r.c), "=d"(r.d)
170 		      : "0"(function), "2"(index));
171 	return r;
172 }
173 
174 static inline struct cpuid cpuid_indexed(u32 function, u32 index)
175 {
176 	u32 level = raw_cpuid(function & 0xf0000000, 0).a;
177 	if (level < function)
178 	return (struct cpuid) { 0, 0, 0, 0 };
179 	return raw_cpuid(function, index);
180 }
181 
182 static inline struct cpuid cpuid(u32 function)
183 {
184 	return cpuid_indexed(function, 0);
185 }
186 
187 static inline u8 cpuid_maxphyaddr(void)
188 {
189 	if (raw_cpuid(0x80000000, 0).a < 0x80000008)
190 	return 36;
191 	return raw_cpuid(0x80000008, 0).a & 0xff;
192 }
193 
194 static inline bool is_intel(void)
195 {
196 	struct cpuid c = cpuid(0);
197 	u32 name[4] = {c.b, c.d, c.c };
198 
199 	return strcmp((char *)name, "GenuineIntel") == 0;
200 }
201 
202 #define	CPUID(a, b, c, d) ((((unsigned long long) a) << 32) | (b << 16) | \
203 			  (c << 8) | d)
204 
205 /*
206  * Each X86_FEATURE_XXX definition is 64-bit and contains the following
207  * CPUID meta-data:
208  *
209  * 	[63:32] :  input value for EAX
210  * 	[31:16] :  input value for ECX
211  * 	[15:8]  :  output register
212  * 	[7:0]   :  bit position in output register
213  */
214 
215 /*
216  * Basic Leafs, a.k.a. Intel defined
217  */
218 #define	X86_FEATURE_MWAIT		(CPUID(0x1, 0, ECX, 3))
219 #define	X86_FEATURE_VMX			(CPUID(0x1, 0, ECX, 5))
220 #define	X86_FEATURE_PDCM		(CPUID(0x1, 0, ECX, 15))
221 #define	X86_FEATURE_PCID		(CPUID(0x1, 0, ECX, 17))
222 #define X86_FEATURE_X2APIC		(CPUID(0x1, 0, ECX, 21))
223 #define	X86_FEATURE_MOVBE		(CPUID(0x1, 0, ECX, 22))
224 #define	X86_FEATURE_TSC_DEADLINE_TIMER	(CPUID(0x1, 0, ECX, 24))
225 #define	X86_FEATURE_XSAVE		(CPUID(0x1, 0, ECX, 26))
226 #define	X86_FEATURE_OSXSAVE		(CPUID(0x1, 0, ECX, 27))
227 #define	X86_FEATURE_RDRAND		(CPUID(0x1, 0, ECX, 30))
228 #define	X86_FEATURE_MCE			(CPUID(0x1, 0, EDX, 7))
229 #define	X86_FEATURE_APIC		(CPUID(0x1, 0, EDX, 9))
230 #define	X86_FEATURE_CLFLUSH		(CPUID(0x1, 0, EDX, 19))
231 #define	X86_FEATURE_XMM			(CPUID(0x1, 0, EDX, 25))
232 #define	X86_FEATURE_XMM2		(CPUID(0x1, 0, EDX, 26))
233 #define	X86_FEATURE_TSC_ADJUST		(CPUID(0x7, 0, EBX, 1))
234 #define	X86_FEATURE_HLE			(CPUID(0x7, 0, EBX, 4))
235 #define	X86_FEATURE_SMEP		(CPUID(0x7, 0, EBX, 7))
236 #define	X86_FEATURE_INVPCID		(CPUID(0x7, 0, EBX, 10))
237 #define	X86_FEATURE_RTM			(CPUID(0x7, 0, EBX, 11))
238 #define	X86_FEATURE_SMAP		(CPUID(0x7, 0, EBX, 20))
239 #define	X86_FEATURE_PCOMMIT		(CPUID(0x7, 0, EBX, 22))
240 #define	X86_FEATURE_CLFLUSHOPT		(CPUID(0x7, 0, EBX, 23))
241 #define	X86_FEATURE_CLWB		(CPUID(0x7, 0, EBX, 24))
242 #define	X86_FEATURE_UMIP		(CPUID(0x7, 0, ECX, 2))
243 #define	X86_FEATURE_PKU			(CPUID(0x7, 0, ECX, 3))
244 #define	X86_FEATURE_LA57		(CPUID(0x7, 0, ECX, 16))
245 #define	X86_FEATURE_RDPID		(CPUID(0x7, 0, ECX, 22))
246 #define	X86_FEATURE_SHSTK		(CPUID(0x7, 0, ECX, 7))
247 #define	X86_FEATURE_IBT			(CPUID(0x7, 0, EDX, 20))
248 #define	X86_FEATURE_SPEC_CTRL		(CPUID(0x7, 0, EDX, 26))
249 #define	X86_FEATURE_FLUSH_L1D		(CPUID(0x7, 0, EDX, 28))
250 #define	X86_FEATURE_ARCH_CAPABILITIES	(CPUID(0x7, 0, EDX, 29))
251 #define	X86_FEATURE_PKS			(CPUID(0x7, 0, ECX, 31))
252 
253 /*
254  * Extended Leafs, a.k.a. AMD defined
255  */
256 #define	X86_FEATURE_SVM			(CPUID(0x80000001, 0, ECX, 2))
257 #define	X86_FEATURE_PERFCTR_CORE	(CPUID(0x80000001, 0, ECX, 23))
258 #define	X86_FEATURE_NX			(CPUID(0x80000001, 0, EDX, 20))
259 #define	X86_FEATURE_GBPAGES		(CPUID(0x80000001, 0, EDX, 26))
260 #define	X86_FEATURE_RDTSCP		(CPUID(0x80000001, 0, EDX, 27))
261 #define	X86_FEATURE_LM			(CPUID(0x80000001, 0, EDX, 29))
262 #define	X86_FEATURE_RDPRU		(CPUID(0x80000008, 0, EBX, 4))
263 #define	X86_FEATURE_AMD_IBPB		(CPUID(0x80000008, 0, EBX, 12))
264 #define	X86_FEATURE_NPT			(CPUID(0x8000000A, 0, EDX, 0))
265 #define	X86_FEATURE_LBRV		(CPUID(0x8000000A, 0, EDX, 1))
266 #define	X86_FEATURE_NRIPS		(CPUID(0x8000000A, 0, EDX, 3))
267 #define X86_FEATURE_TSCRATEMSR		(CPUID(0x8000000A, 0, EDX, 4))
268 #define X86_FEATURE_PAUSEFILTER		(CPUID(0x8000000A, 0, EDX, 10))
269 #define X86_FEATURE_PFTHRESHOLD		(CPUID(0x8000000A, 0, EDX, 12))
270 #define	X86_FEATURE_VGIF		(CPUID(0x8000000A, 0, EDX, 16))
271 #define X86_FEATURE_VNMI		(CPUID(0x8000000A, 0, EDX, 25))
272 #define	X86_FEATURE_AMD_PMU_V2		(CPUID(0x80000022, 0, EAX, 0))
273 
274 static inline bool this_cpu_has(u64 feature)
275 {
276 	u32 input_eax = feature >> 32;
277 	u32 input_ecx = (feature >> 16) & 0xffff;
278 	u32 output_reg = (feature >> 8) & 0xff;
279 	u8 bit = feature & 0xff;
280 	struct cpuid c;
281 	u32 *tmp;
282 
283 	c = cpuid_indexed(input_eax, input_ecx);
284 	tmp = (u32 *)&c;
285 
286 	return ((*(tmp + (output_reg % 32))) & (1 << bit));
287 }
288 
289 struct far_pointer32 {
290 	u32 offset;
291 	u16 selector;
292 } __attribute__((packed));
293 
294 struct descriptor_table_ptr {
295 	u16 limit;
296 	ulong base;
297 } __attribute__((packed));
298 
299 static inline void clac(void)
300 {
301 	asm volatile (".byte 0x0f, 0x01, 0xca" : : : "memory");
302 }
303 
304 static inline void stac(void)
305 {
306 	asm volatile (".byte 0x0f, 0x01, 0xcb" : : : "memory");
307 }
308 
309 static inline u16 read_cs(void)
310 {
311 	unsigned val;
312 
313 	asm volatile ("mov %%cs, %0" : "=mr"(val));
314 	return val;
315 }
316 
317 static inline u16 read_ds(void)
318 {
319 	unsigned val;
320 
321 	asm volatile ("mov %%ds, %0" : "=mr"(val));
322 	return val;
323 }
324 
325 static inline u16 read_es(void)
326 {
327 	unsigned val;
328 
329 	asm volatile ("mov %%es, %0" : "=mr"(val));
330 	return val;
331 }
332 
333 static inline u16 read_ss(void)
334 {
335 	unsigned val;
336 
337 	asm volatile ("mov %%ss, %0" : "=mr"(val));
338 	return val;
339 }
340 
341 static inline u16 read_fs(void)
342 {
343 	unsigned val;
344 
345 	asm volatile ("mov %%fs, %0" : "=mr"(val));
346 	return val;
347 }
348 
349 static inline u16 read_gs(void)
350 {
351 	unsigned val;
352 
353 	asm volatile ("mov %%gs, %0" : "=mr"(val));
354 	return val;
355 }
356 
357 static inline unsigned long read_rflags(void)
358 {
359 	unsigned long f;
360 	asm volatile ("pushf; pop %0\n\t" : "=rm"(f));
361 	return f;
362 }
363 
364 static inline void write_ds(unsigned val)
365 {
366 	asm volatile ("mov %0, %%ds" : : "rm"(val) : "memory");
367 }
368 
369 static inline void write_es(unsigned val)
370 {
371 	asm volatile ("mov %0, %%es" : : "rm"(val) : "memory");
372 }
373 
374 static inline void write_ss(unsigned val)
375 {
376 	asm volatile ("mov %0, %%ss" : : "rm"(val) : "memory");
377 }
378 
379 static inline void write_fs(unsigned val)
380 {
381 	asm volatile ("mov %0, %%fs" : : "rm"(val) : "memory");
382 }
383 
384 static inline void write_gs(unsigned val)
385 {
386 	asm volatile ("mov %0, %%gs" : : "rm"(val) : "memory");
387 }
388 
389 static inline void write_rflags(unsigned long f)
390 {
391 	asm volatile ("push %0; popf\n\t" : : "rm"(f));
392 }
393 
394 static inline void set_iopl(int iopl)
395 {
396 	unsigned long flags = read_rflags() & ~X86_EFLAGS_IOPL;
397 	flags |= iopl * (X86_EFLAGS_IOPL / 3);
398 	write_rflags(flags);
399 }
400 
401 /*
402  * Don't use the safe variants for rdmsr() or wrmsr().  The exception fixup
403  * infrastructure uses per-CPU data and thus consumes GS.base.  Various tests
404  * temporarily modify MSR_GS_BASE and will explode when trying to determine
405  * whether or not RDMSR/WRMSR faulted.
406  */
407 static inline u64 rdmsr(u32 index)
408 {
409 	u32 a, d;
410 	asm volatile ("rdmsr" : "=a"(a), "=d"(d) : "c"(index) : "memory");
411 	return a | ((u64)d << 32);
412 }
413 
414 static inline void wrmsr(u32 index, u64 val)
415 {
416 	u32 a = val, d = val >> 32;
417 	asm volatile ("wrmsr" : : "a"(a), "d"(d), "c"(index) : "memory");
418 }
419 
420 #define rdreg64_safe(insn, index, val)					\
421 ({									\
422 	uint32_t a, d;							\
423 	int vector;							\
424 									\
425 	vector = asm_safe_out2(insn, "=a"(a), "=d"(d), "c"(index));	\
426 									\
427 	if (vector)							\
428 		*(val) = 0;						\
429 	else								\
430 		*(val) = (uint64_t)a | ((uint64_t)d << 32);		\
431 	vector;								\
432 })
433 
434 static inline int rdmsr_safe(u32 index, uint64_t *val)
435 {
436 	return rdreg64_safe("rdmsr", index, val);
437 }
438 
439 static inline int wrmsr_safe(u32 index, u64 val)
440 {
441 	u32 a = val, d = val >> 32;
442 
443 	return asm_safe("wrmsr", "a"(a), "d"(d), "c"(index));
444 }
445 
446 static inline int rdpmc_safe(u32 index, uint64_t *val)
447 {
448 	return rdreg64_safe("rdpmc", index, val);
449 }
450 
451 static inline uint64_t rdpmc(uint32_t index)
452 {
453 	uint64_t val;
454 	int vector = rdpmc_safe(index, &val);
455 
456 	assert_msg(!vector, "Unexpected %s on RDPMC(%" PRId32 ")",
457 		   exception_mnemonic(vector), index);
458 	return val;
459 }
460 
461 static inline int write_cr0_safe(ulong val)
462 {
463 	return asm_safe("mov %0,%%cr0", "r" (val));
464 }
465 
466 static inline void write_cr0(ulong val)
467 {
468 	int vector = write_cr0_safe(val);
469 
470 	assert_msg(!vector, "Unexpected fault '%d' writing CR0 = %lx",
471 		   vector, val);
472 }
473 
474 static inline ulong read_cr0(void)
475 {
476 	ulong val;
477 	asm volatile ("mov %%cr0, %0" : "=r"(val) : : "memory");
478 	return val;
479 }
480 
481 static inline void write_cr2(ulong val)
482 {
483 	asm volatile ("mov %0, %%cr2" : : "r"(val) : "memory");
484 }
485 
486 static inline ulong read_cr2(void)
487 {
488 	ulong val;
489 	asm volatile ("mov %%cr2, %0" : "=r"(val) : : "memory");
490 	return val;
491 }
492 
493 static inline int write_cr3_safe(ulong val)
494 {
495 	return asm_safe("mov %0,%%cr3", "r" (val));
496 }
497 
498 static inline void write_cr3(ulong val)
499 {
500 	int vector = write_cr3_safe(val);
501 
502 	assert_msg(!vector, "Unexpected fault '%d' writing CR3 = %lx",
503 		   vector, val);
504 }
505 
506 static inline ulong read_cr3(void)
507 {
508 	ulong val;
509 	asm volatile ("mov %%cr3, %0" : "=r"(val) : : "memory");
510 	return val;
511 }
512 
513 static inline void update_cr3(void *cr3)
514 {
515 	write_cr3((ulong)cr3);
516 }
517 
518 static inline int write_cr4_safe(ulong val)
519 {
520 	return asm_safe("mov %0,%%cr4", "r" (val));
521 }
522 
523 static inline void write_cr4(ulong val)
524 {
525 	int vector = write_cr4_safe(val);
526 
527 	assert_msg(!vector, "Unexpected fault '%d' writing CR4 = %lx",
528 		   vector, val);
529 }
530 
531 static inline ulong read_cr4(void)
532 {
533 	ulong val;
534 	asm volatile ("mov %%cr4, %0" : "=r"(val) : : "memory");
535 	return val;
536 }
537 
538 static inline void write_cr8(ulong val)
539 {
540 	asm volatile ("mov %0, %%cr8" : : "r"(val) : "memory");
541 }
542 
543 static inline ulong read_cr8(void)
544 {
545 	ulong val;
546 	asm volatile ("mov %%cr8, %0" : "=r"(val) : : "memory");
547 	return val;
548 }
549 
550 static inline void lgdt(const struct descriptor_table_ptr *ptr)
551 {
552 	asm volatile ("lgdt %0" : : "m"(*ptr));
553 }
554 
555 static inline void sgdt(struct descriptor_table_ptr *ptr)
556 {
557 	asm volatile ("sgdt %0" : "=m"(*ptr));
558 }
559 
560 static inline void lidt(const struct descriptor_table_ptr *ptr)
561 {
562 	asm volatile ("lidt %0" : : "m"(*ptr));
563 }
564 
565 static inline void sidt(struct descriptor_table_ptr *ptr)
566 {
567 	asm volatile ("sidt %0" : "=m"(*ptr));
568 }
569 
570 static inline void lldt(u16 val)
571 {
572 	asm volatile ("lldt %0" : : "rm"(val));
573 }
574 
575 static inline u16 sldt(void)
576 {
577 	u16 val;
578 	asm volatile ("sldt %0" : "=rm"(val));
579 	return val;
580 }
581 
582 static inline void ltr(u16 val)
583 {
584 	asm volatile ("ltr %0" : : "rm"(val));
585 }
586 
587 static inline u16 str(void)
588 {
589 	u16 val;
590 	asm volatile ("str %0" : "=rm"(val));
591 	return val;
592 }
593 
594 static inline void write_dr0(void *val)
595 {
596 	asm volatile ("mov %0, %%dr0" : : "r"(val) : "memory");
597 }
598 
599 static inline void write_dr1(void *val)
600 {
601 	asm volatile ("mov %0, %%dr1" : : "r"(val) : "memory");
602 }
603 
604 static inline void write_dr2(void *val)
605 {
606 	asm volatile ("mov %0, %%dr2" : : "r"(val) : "memory");
607 }
608 
609 static inline void write_dr3(void *val)
610 {
611 	asm volatile ("mov %0, %%dr3" : : "r"(val) : "memory");
612 }
613 
614 static inline void write_dr6(ulong val)
615 {
616 	asm volatile ("mov %0, %%dr6" : : "r"(val) : "memory");
617 }
618 
619 static inline ulong read_dr6(void)
620 {
621 	ulong val;
622 	asm volatile ("mov %%dr6, %0" : "=r"(val));
623 	return val;
624 }
625 
626 static inline void write_dr7(ulong val)
627 {
628 	asm volatile ("mov %0, %%dr7" : : "r"(val) : "memory");
629 }
630 
631 static inline ulong read_dr7(void)
632 {
633 	ulong val;
634 	asm volatile ("mov %%dr7, %0" : "=r"(val));
635 	return val;
636 }
637 
638 static inline void pause(void)
639 {
640 	asm volatile ("pause");
641 }
642 
643 static inline void cli(void)
644 {
645 	asm volatile ("cli");
646 }
647 
648 static inline void sti(void)
649 {
650 	asm volatile ("sti");
651 }
652 
653 static inline unsigned long long rdrand(void)
654 {
655 	long long r;
656 
657 	asm volatile("rdrand %0\n\t"
658 		     "jc 1f\n\t"
659 		     "mov $0, %0\n\t"
660 		     "1:\n\t" : "=r" (r));
661 	return r;
662 }
663 
664 static inline unsigned long long rdtsc(void)
665 {
666 	long long r;
667 
668 #ifdef __x86_64__
669 	unsigned a, d;
670 
671 	asm volatile ("rdtsc" : "=a"(a), "=d"(d));
672 	r = a | ((long long)d << 32);
673 #else
674 	asm volatile ("rdtsc" : "=A"(r));
675 #endif
676 	return r;
677 }
678 
679 /*
680  * Per the advice in the SDM, volume 2, the sequence "mfence; lfence"
681  * executed immediately before rdtsc ensures that rdtsc will be
682  * executed only after all previous instructions have executed and all
683  * previous loads and stores are globally visible. In addition, the
684  * lfence immediately after rdtsc ensures that rdtsc will be executed
685  * prior to the execution of any subsequent instruction.
686  */
687 static inline unsigned long long fenced_rdtsc(void)
688 {
689 	unsigned long long tsc;
690 
691 #ifdef __x86_64__
692 	unsigned int eax, edx;
693 
694 	asm volatile ("mfence; lfence; rdtsc; lfence" : "=a"(eax), "=d"(edx));
695 	tsc = eax | ((unsigned long long)edx << 32);
696 #else
697 	asm volatile ("mfence; lfence; rdtsc; lfence" : "=A"(tsc));
698 #endif
699 	return tsc;
700 }
701 
702 static inline unsigned long long rdtscp(u32 *aux)
703 {
704 	long long r;
705 
706 #ifdef __x86_64__
707 	unsigned a, d;
708 
709 	asm volatile ("rdtscp" : "=a"(a), "=d"(d), "=c"(*aux));
710 	r = a | ((long long)d << 32);
711 #else
712 	asm volatile ("rdtscp" : "=A"(r), "=c"(*aux));
713 #endif
714 	return r;
715 }
716 
717 static inline void wrtsc(u64 tsc)
718 {
719 	wrmsr(MSR_IA32_TSC, tsc);
720 }
721 
722 static inline void irq_disable(void)
723 {
724 	asm volatile("cli");
725 }
726 
727 /* Note that irq_enable() does not ensure an interrupt shadow due
728  * to the vagaries of compiler optimizations.  If you need the
729  * shadow, use a single asm with "sti" and the instruction after it.
730  */
731 static inline void irq_enable(void)
732 {
733 	asm volatile("sti");
734 }
735 
736 static inline void invlpg(volatile void *va)
737 {
738 	asm volatile("invlpg (%0)" ::"r" (va) : "memory");
739 }
740 
741 static inline void safe_halt(void)
742 {
743 	asm volatile("sti; hlt");
744 }
745 
746 static inline u32 read_pkru(void)
747 {
748 	unsigned int eax, edx;
749 	unsigned int ecx = 0;
750 	unsigned int pkru;
751 
752 	asm volatile(".byte 0x0f,0x01,0xee\n\t"
753 		     : "=a" (eax), "=d" (edx)
754 		     : "c" (ecx));
755 	pkru = eax;
756 	return pkru;
757 }
758 
759 static inline void write_pkru(u32 pkru)
760 {
761 	unsigned int eax = pkru;
762 	unsigned int ecx = 0;
763 	unsigned int edx = 0;
764 
765 	asm volatile(".byte 0x0f,0x01,0xef\n\t"
766 		     : : "a" (eax), "c" (ecx), "d" (edx));
767 }
768 
769 static inline bool is_canonical(u64 addr)
770 {
771 	int va_width = (raw_cpuid(0x80000008, 0).a & 0xff00) >> 8;
772 	int shift_amt = 64 - va_width;
773 
774 	return (s64)(addr << shift_amt) >> shift_amt == addr;
775 }
776 
777 static inline void clear_bit(int bit, u8 *addr)
778 {
779 	__asm__ __volatile__("btr %1, %0"
780 			     : "+m" (*addr) : "Ir" (bit) : "cc", "memory");
781 }
782 
783 static inline void set_bit(int bit, u8 *addr)
784 {
785 	__asm__ __volatile__("bts %1, %0"
786 			     : "+m" (*addr) : "Ir" (bit) : "cc", "memory");
787 }
788 
789 static inline void flush_tlb(void)
790 {
791 	ulong cr4;
792 
793 	cr4 = read_cr4();
794 	write_cr4(cr4 ^ X86_CR4_PGE);
795 	write_cr4(cr4);
796 }
797 
798 static inline void generate_non_canonical_gp(void)
799 {
800 	*(volatile u64 *)NONCANONICAL = 0;
801 }
802 
803 static inline void generate_ud(void)
804 {
805 	asm volatile ("ud2");
806 }
807 
808 static inline void generate_de(void)
809 {
810 	asm volatile (
811 		"xor %%eax, %%eax\n\t"
812 		"xor %%ebx, %%ebx\n\t"
813 		"xor %%edx, %%edx\n\t"
814 		"idiv %%ebx\n\t"
815 		::: "eax", "ebx", "edx");
816 }
817 
818 static inline void generate_bp(void)
819 {
820 	asm volatile ("int3");
821 }
822 
823 static inline void generate_single_step_db(void)
824 {
825 	write_rflags(read_rflags() | X86_EFLAGS_TF);
826 	asm volatile("nop");
827 }
828 
829 static inline uint64_t generate_usermode_ac(void)
830 {
831 	/*
832 	 * Trigger an #AC by writing 8 bytes to a 4-byte aligned address.
833 	 * Disclaimer: It is assumed that the stack pointer is aligned
834 	 * on a 16-byte boundary as x86_64 stacks should be.
835 	 */
836 	asm volatile("movq $0, -0x4(%rsp)");
837 
838 	return 0;
839 }
840 
841 /*
842  * Switch from 64-bit to 32-bit mode and generate #OF via INTO.  Note, if RIP
843  * or RSP holds a 64-bit value, this helper will NOT generate #OF.
844  */
845 static inline void generate_of(void)
846 {
847 	struct far_pointer32 fp = {
848 		.offset = (uintptr_t)&&into,
849 		.selector = KERNEL_CS32,
850 	};
851 	uintptr_t rsp;
852 
853 	asm volatile ("mov %%rsp, %0" : "=r"(rsp));
854 
855 	if (fp.offset != (uintptr_t)&&into) {
856 		printf("Code address too high.\n");
857 		return;
858 	}
859 	if ((u32)rsp != rsp) {
860 		printf("Stack address too high.\n");
861 		return;
862 	}
863 
864 	asm goto ("lcall *%0" : : "m" (fp) : "rax" : into);
865 	return;
866 into:
867 	asm volatile (".code32;"
868 		      "movl $0x7fffffff, %eax;"
869 		      "addl %eax, %eax;"
870 		      "into;"
871 		      "lret;"
872 		      ".code64");
873 	__builtin_unreachable();
874 }
875 
876 static inline void fnop(void)
877 {
878 	asm volatile("fnop");
879 }
880 
881 /* If CR0.TS is set in L2, #NM is generated. */
882 static inline void generate_cr0_ts_nm(void)
883 {
884 	write_cr0((read_cr0() & ~X86_CR0_EM) | X86_CR0_TS);
885 	fnop();
886 }
887 
888 /* If CR0.TS is cleared and CR0.EM is set, #NM is generated. */
889 static inline void generate_cr0_em_nm(void)
890 {
891 	write_cr0((read_cr0() & ~X86_CR0_TS) | X86_CR0_EM);
892 	fnop();
893 }
894 
895 #endif
896