xref: /kvm-unit-tests/lib/x86/processor.h (revision e97e1c827fadc972c4efc8fc0650984b6fcc74e8)
1 #ifndef LIBCFLAT_PROCESSOR_H
2 #define LIBCFLAT_PROCESSOR_H
3 
4 #include "libcflat.h"
5 #include "desc.h"
6 #include "msr.h"
7 #include <stdint.h>
8 
9 #define NONCANONICAL            0xaaaaaaaaaaaaaaaaull
10 
11 #ifdef __x86_64__
12 #  define R "r"
13 #  define W "q"
14 #  define S "8"
15 #else
16 #  define R "e"
17 #  define W "l"
18 #  define S "4"
19 #endif
20 
21 #define DB_VECTOR 1
22 #define BP_VECTOR 3
23 #define UD_VECTOR 6
24 #define DF_VECTOR 8
25 #define TS_VECTOR 10
26 #define NP_VECTOR 11
27 #define SS_VECTOR 12
28 #define GP_VECTOR 13
29 #define PF_VECTOR 14
30 #define AC_VECTOR 17
31 
32 #define X86_CR0_PE	0x00000001
33 #define X86_CR0_MP	0x00000002
34 #define X86_CR0_EM	0x00000004
35 #define X86_CR0_TS	0x00000008
36 #define X86_CR0_WP	0x00010000
37 #define X86_CR0_AM	0x00040000
38 #define X86_CR0_NW	0x20000000
39 #define X86_CR0_CD	0x40000000
40 #define X86_CR0_PG	0x80000000
41 #define X86_CR3_PCID_MASK 0x00000fff
42 #define X86_CR4_TSD	0x00000004
43 #define X86_CR4_DE	0x00000008
44 #define X86_CR4_PSE	0x00000010
45 #define X86_CR4_PAE	0x00000020
46 #define X86_CR4_MCE	0x00000040
47 #define X86_CR4_PGE	0x00000080
48 #define X86_CR4_PCE	0x00000100
49 #define X86_CR4_UMIP	0x00000800
50 #define X86_CR4_LA57	0x00001000
51 #define X86_CR4_VMXE	0x00002000
52 #define X86_CR4_PCIDE	0x00020000
53 #define X86_CR4_OSXSAVE	0x00040000
54 #define X86_CR4_SMEP	0x00100000
55 #define X86_CR4_SMAP	0x00200000
56 #define X86_CR4_PKE	0x00400000
57 #define X86_CR4_CET	0x00800000
58 #define X86_CR4_PKS	0x01000000
59 
60 #define X86_EFLAGS_CF    0x00000001
61 #define X86_EFLAGS_FIXED 0x00000002
62 #define X86_EFLAGS_PF    0x00000004
63 #define X86_EFLAGS_AF    0x00000010
64 #define X86_EFLAGS_ZF    0x00000040
65 #define X86_EFLAGS_SF    0x00000080
66 #define X86_EFLAGS_TF    0x00000100
67 #define X86_EFLAGS_IF    0x00000200
68 #define X86_EFLAGS_DF    0x00000400
69 #define X86_EFLAGS_OF    0x00000800
70 #define X86_EFLAGS_IOPL  0x00003000
71 #define X86_EFLAGS_NT    0x00004000
72 #define X86_EFLAGS_VM    0x00020000
73 #define X86_EFLAGS_AC    0x00040000
74 
75 #define X86_EFLAGS_ALU (X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | \
76 			X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)
77 
78 
79 /*
80  * CPU features
81  */
82 
83 enum cpuid_output_regs {
84 	EAX,
85 	EBX,
86 	ECX,
87 	EDX
88 };
89 
90 struct cpuid { u32 a, b, c, d; };
91 
92 static inline struct cpuid raw_cpuid(u32 function, u32 index)
93 {
94     struct cpuid r;
95     asm volatile ("cpuid"
96                   : "=a"(r.a), "=b"(r.b), "=c"(r.c), "=d"(r.d)
97                   : "0"(function), "2"(index));
98     return r;
99 }
100 
101 static inline struct cpuid cpuid_indexed(u32 function, u32 index)
102 {
103     u32 level = raw_cpuid(function & 0xf0000000, 0).a;
104     if (level < function)
105         return (struct cpuid) { 0, 0, 0, 0 };
106     return raw_cpuid(function, index);
107 }
108 
109 static inline struct cpuid cpuid(u32 function)
110 {
111     return cpuid_indexed(function, 0);
112 }
113 
114 static inline u8 cpuid_maxphyaddr(void)
115 {
116     if (raw_cpuid(0x80000000, 0).a < 0x80000008)
117         return 36;
118     return raw_cpuid(0x80000008, 0).a & 0xff;
119 }
120 
121 #define	CPUID(a, b, c, d) ((((unsigned long long) a) << 32) | (b << 16) | \
122 			  (c << 8) | d)
123 
124 /*
125  * Each X86_FEATURE_XXX definition is 64-bit and contains the following
126  * CPUID meta-data:
127  *
128  * 	[63:32] :  input value for EAX
129  * 	[31:16] :  input value for ECX
130  * 	[15:8]  :  output register
131  * 	[7:0]   :  bit position in output register
132  */
133 
134 /*
135  * Intel CPUID features
136  */
137 #define	X86_FEATURE_MWAIT		(CPUID(0x1, 0, ECX, 3))
138 #define	X86_FEATURE_VMX			(CPUID(0x1, 0, ECX, 5))
139 #define	X86_FEATURE_PCID		(CPUID(0x1, 0, ECX, 17))
140 #define	X86_FEATURE_MOVBE		(CPUID(0x1, 0, ECX, 22))
141 #define	X86_FEATURE_TSC_DEADLINE_TIMER	(CPUID(0x1, 0, ECX, 24))
142 #define	X86_FEATURE_XSAVE		(CPUID(0x1, 0, ECX, 26))
143 #define	X86_FEATURE_OSXSAVE		(CPUID(0x1, 0, ECX, 27))
144 #define	X86_FEATURE_RDRAND		(CPUID(0x1, 0, ECX, 30))
145 #define	X86_FEATURE_MCE			(CPUID(0x1, 0, EDX, 7))
146 #define	X86_FEATURE_APIC		(CPUID(0x1, 0, EDX, 9))
147 #define	X86_FEATURE_CLFLUSH		(CPUID(0x1, 0, EDX, 19))
148 #define	X86_FEATURE_XMM			(CPUID(0x1, 0, EDX, 25))
149 #define	X86_FEATURE_XMM2		(CPUID(0x1, 0, EDX, 26))
150 #define	X86_FEATURE_TSC_ADJUST		(CPUID(0x7, 0, EBX, 1))
151 #define	X86_FEATURE_HLE			(CPUID(0x7, 0, EBX, 4))
152 #define	X86_FEATURE_SMEP	        (CPUID(0x7, 0, EBX, 7))
153 #define	X86_FEATURE_INVPCID		(CPUID(0x7, 0, EBX, 10))
154 #define	X86_FEATURE_RTM			(CPUID(0x7, 0, EBX, 11))
155 #define	X86_FEATURE_SMAP		(CPUID(0x7, 0, EBX, 20))
156 #define	X86_FEATURE_PCOMMIT		(CPUID(0x7, 0, EBX, 22))
157 #define	X86_FEATURE_CLFLUSHOPT		(CPUID(0x7, 0, EBX, 23))
158 #define	X86_FEATURE_CLWB		(CPUID(0x7, 0, EBX, 24))
159 #define	X86_FEATURE_UMIP		(CPUID(0x7, 0, ECX, 2))
160 #define	X86_FEATURE_PKU			(CPUID(0x7, 0, ECX, 3))
161 #define	X86_FEATURE_LA57		(CPUID(0x7, 0, ECX, 16))
162 #define	X86_FEATURE_RDPID		(CPUID(0x7, 0, ECX, 22))
163 #define	X86_FEATURE_SHSTK		(CPUID(0x7, 0, ECX, 7))
164 #define	X86_FEATURE_IBT			(CPUID(0x7, 0, EDX, 20))
165 #define	X86_FEATURE_SPEC_CTRL		(CPUID(0x7, 0, EDX, 26))
166 #define	X86_FEATURE_ARCH_CAPABILITIES	(CPUID(0x7, 0, EDX, 29))
167 #define	X86_FEATURE_PKS			(CPUID(0x7, 0, ECX, 31))
168 #define	X86_FEATURE_NX			(CPUID(0x80000001, 0, EDX, 20))
169 #define	X86_FEATURE_LM			(CPUID(0x80000001, 0, EDX, 29))
170 #define	X86_FEATURE_RDPRU		(CPUID(0x80000008, 0, EBX, 4))
171 
172 /*
173  * AMD CPUID features
174  */
175 #define	X86_FEATURE_SVM			(CPUID(0x80000001, 0, ECX, 2))
176 #define	X86_FEATURE_RDTSCP		(CPUID(0x80000001, 0, EDX, 27))
177 #define	X86_FEATURE_AMD_IBPB		(CPUID(0x80000008, 0, EBX, 12))
178 #define	X86_FEATURE_NPT			(CPUID(0x8000000A, 0, EDX, 0))
179 #define	X86_FEATURE_NRIPS		(CPUID(0x8000000A, 0, EDX, 3))
180 
181 
182 static inline bool this_cpu_has(u64 feature)
183 {
184 	u32 input_eax = feature >> 32;
185 	u32 input_ecx = (feature >> 16) & 0xffff;
186 	u32 output_reg = (feature >> 8) & 0xff;
187 	u8 bit = feature & 0xff;
188 	struct cpuid c;
189 	u32 *tmp;
190 
191 	c = cpuid_indexed(input_eax, input_ecx);
192 	tmp = (u32 *)&c;
193 
194 	return ((*(tmp + (output_reg % 32))) & (1 << bit));
195 }
196 
197 struct far_pointer32 {
198 	u32 offset;
199 	u16 selector;
200 } __attribute__((packed));
201 
202 struct descriptor_table_ptr {
203     u16 limit;
204     ulong base;
205 } __attribute__((packed));
206 
207 static inline void barrier(void)
208 {
209     asm volatile ("" : : : "memory");
210 }
211 
212 static inline void clac(void)
213 {
214     asm volatile (".byte 0x0f, 0x01, 0xca" : : : "memory");
215 }
216 
217 static inline void stac(void)
218 {
219     asm volatile (".byte 0x0f, 0x01, 0xcb" : : : "memory");
220 }
221 
222 static inline u16 read_cs(void)
223 {
224     unsigned val;
225 
226     asm volatile ("mov %%cs, %0" : "=mr"(val));
227     return val;
228 }
229 
230 static inline u16 read_ds(void)
231 {
232     unsigned val;
233 
234     asm volatile ("mov %%ds, %0" : "=mr"(val));
235     return val;
236 }
237 
238 static inline u16 read_es(void)
239 {
240     unsigned val;
241 
242     asm volatile ("mov %%es, %0" : "=mr"(val));
243     return val;
244 }
245 
246 static inline u16 read_ss(void)
247 {
248     unsigned val;
249 
250     asm volatile ("mov %%ss, %0" : "=mr"(val));
251     return val;
252 }
253 
254 static inline u16 read_fs(void)
255 {
256     unsigned val;
257 
258     asm volatile ("mov %%fs, %0" : "=mr"(val));
259     return val;
260 }
261 
262 static inline u16 read_gs(void)
263 {
264     unsigned val;
265 
266     asm volatile ("mov %%gs, %0" : "=mr"(val));
267     return val;
268 }
269 
270 static inline unsigned long read_rflags(void)
271 {
272 	unsigned long f;
273 	asm volatile ("pushf; pop %0\n\t" : "=rm"(f));
274 	return f;
275 }
276 
277 static inline void write_ds(unsigned val)
278 {
279     asm volatile ("mov %0, %%ds" : : "rm"(val) : "memory");
280 }
281 
282 static inline void write_es(unsigned val)
283 {
284     asm volatile ("mov %0, %%es" : : "rm"(val) : "memory");
285 }
286 
287 static inline void write_ss(unsigned val)
288 {
289     asm volatile ("mov %0, %%ss" : : "rm"(val) : "memory");
290 }
291 
292 static inline void write_fs(unsigned val)
293 {
294     asm volatile ("mov %0, %%fs" : : "rm"(val) : "memory");
295 }
296 
297 static inline void write_gs(unsigned val)
298 {
299     asm volatile ("mov %0, %%gs" : : "rm"(val) : "memory");
300 }
301 
302 static inline void write_rflags(unsigned long f)
303 {
304     asm volatile ("push %0; popf\n\t" : : "rm"(f));
305 }
306 
307 static inline void set_iopl(int iopl)
308 {
309 	unsigned long flags = read_rflags() & ~X86_EFLAGS_IOPL;
310 	flags |= iopl * (X86_EFLAGS_IOPL / 3);
311 	write_rflags(flags);
312 }
313 
314 static inline u64 rdmsr(u32 index)
315 {
316     u32 a, d;
317     asm volatile ("rdmsr" : "=a"(a), "=d"(d) : "c"(index) : "memory");
318     return a | ((u64)d << 32);
319 }
320 
321 static inline void wrmsr(u32 index, u64 val)
322 {
323     u32 a = val, d = val >> 32;
324     asm volatile ("wrmsr" : : "a"(a), "d"(d), "c"(index) : "memory");
325 }
326 
327 static inline int rdmsr_checking(u32 index)
328 {
329 	asm volatile (ASM_TRY("1f")
330 		      "rdmsr\n\t"
331 		      "1:"
332 		      : : "c"(index) : "memory", "eax", "edx");
333 	return exception_vector();
334 }
335 
336 static inline int wrmsr_checking(u32 index, u64 val)
337 {
338         u32 a = val, d = val >> 32;
339 
340 	asm volatile (ASM_TRY("1f")
341 		      "wrmsr\n\t"
342 		      "1:"
343 		      : : "a"(a), "d"(d), "c"(index) : "memory");
344 	return exception_vector();
345 }
346 
347 static inline uint64_t rdpmc(uint32_t index)
348 {
349     uint32_t a, d;
350     asm volatile ("rdpmc" : "=a"(a), "=d"(d) : "c"(index));
351     return a | ((uint64_t)d << 32);
352 }
353 
354 static inline void write_cr0(ulong val)
355 {
356     asm volatile ("mov %0, %%cr0" : : "r"(val) : "memory");
357 }
358 
359 static inline ulong read_cr0(void)
360 {
361     ulong val;
362     asm volatile ("mov %%cr0, %0" : "=r"(val) : : "memory");
363     return val;
364 }
365 
366 static inline void write_cr2(ulong val)
367 {
368     asm volatile ("mov %0, %%cr2" : : "r"(val) : "memory");
369 }
370 
371 static inline ulong read_cr2(void)
372 {
373     ulong val;
374     asm volatile ("mov %%cr2, %0" : "=r"(val) : : "memory");
375     return val;
376 }
377 
378 static inline void write_cr3(ulong val)
379 {
380     asm volatile ("mov %0, %%cr3" : : "r"(val) : "memory");
381 }
382 
383 static inline ulong read_cr3(void)
384 {
385     ulong val;
386     asm volatile ("mov %%cr3, %0" : "=r"(val) : : "memory");
387     return val;
388 }
389 
390 static inline void update_cr3(void *cr3)
391 {
392     write_cr3((ulong)cr3);
393 }
394 
395 static inline void write_cr4(ulong val)
396 {
397     asm volatile ("mov %0, %%cr4" : : "r"(val) : "memory");
398 }
399 
400 static inline ulong read_cr4(void)
401 {
402     ulong val;
403     asm volatile ("mov %%cr4, %0" : "=r"(val) : : "memory");
404     return val;
405 }
406 
407 static inline void write_cr8(ulong val)
408 {
409     asm volatile ("mov %0, %%cr8" : : "r"(val) : "memory");
410 }
411 
412 static inline ulong read_cr8(void)
413 {
414     ulong val;
415     asm volatile ("mov %%cr8, %0" : "=r"(val) : : "memory");
416     return val;
417 }
418 
419 static inline void lgdt(const struct descriptor_table_ptr *ptr)
420 {
421     asm volatile ("lgdt %0" : : "m"(*ptr));
422 }
423 
424 static inline void sgdt(struct descriptor_table_ptr *ptr)
425 {
426     asm volatile ("sgdt %0" : "=m"(*ptr));
427 }
428 
429 static inline void lidt(const struct descriptor_table_ptr *ptr)
430 {
431     asm volatile ("lidt %0" : : "m"(*ptr));
432 }
433 
434 static inline void sidt(struct descriptor_table_ptr *ptr)
435 {
436     asm volatile ("sidt %0" : "=m"(*ptr));
437 }
438 
439 static inline void lldt(unsigned val)
440 {
441     asm volatile ("lldt %0" : : "rm"(val));
442 }
443 
444 static inline u16 sldt(void)
445 {
446     u16 val;
447     asm volatile ("sldt %0" : "=rm"(val));
448     return val;
449 }
450 
451 static inline void ltr(u16 val)
452 {
453     asm volatile ("ltr %0" : : "rm"(val));
454 }
455 
456 static inline u16 str(void)
457 {
458     u16 val;
459     asm volatile ("str %0" : "=rm"(val));
460     return val;
461 }
462 
463 static inline void write_dr6(ulong val)
464 {
465     asm volatile ("mov %0, %%dr6" : : "r"(val) : "memory");
466 }
467 
468 static inline ulong read_dr6(void)
469 {
470     ulong val;
471     asm volatile ("mov %%dr6, %0" : "=r"(val));
472     return val;
473 }
474 
475 static inline void write_dr7(ulong val)
476 {
477     asm volatile ("mov %0, %%dr7" : : "r"(val) : "memory");
478 }
479 
480 static inline ulong read_dr7(void)
481 {
482     ulong val;
483     asm volatile ("mov %%dr7, %0" : "=r"(val));
484     return val;
485 }
486 
487 static inline void pause(void)
488 {
489     asm volatile ("pause");
490 }
491 
492 static inline void cli(void)
493 {
494     asm volatile ("cli");
495 }
496 
497 static inline void sti(void)
498 {
499     asm volatile ("sti");
500 }
501 
502 static inline unsigned long long rdtsc(void)
503 {
504 	long long r;
505 
506 #ifdef __x86_64__
507 	unsigned a, d;
508 
509 	asm volatile ("rdtsc" : "=a"(a), "=d"(d));
510 	r = a | ((long long)d << 32);
511 #else
512 	asm volatile ("rdtsc" : "=A"(r));
513 #endif
514 	return r;
515 }
516 
517 /*
518  * Per the advice in the SDM, volume 2, the sequence "mfence; lfence"
519  * executed immediately before rdtsc ensures that rdtsc will be
520  * executed only after all previous instructions have executed and all
521  * previous loads and stores are globally visible. In addition, the
522  * lfence immediately after rdtsc ensures that rdtsc will be executed
523  * prior to the execution of any subsequent instruction.
524  */
525 static inline unsigned long long fenced_rdtsc(void)
526 {
527 	unsigned long long tsc;
528 
529 #ifdef __x86_64__
530 	unsigned int eax, edx;
531 
532 	asm volatile ("mfence; lfence; rdtsc; lfence" : "=a"(eax), "=d"(edx));
533 	tsc = eax | ((unsigned long long)edx << 32);
534 #else
535 	asm volatile ("mfence; lfence; rdtsc; lfence" : "=A"(tsc));
536 #endif
537 	return tsc;
538 }
539 
540 static inline unsigned long long rdtscp(u32 *aux)
541 {
542        long long r;
543 
544 #ifdef __x86_64__
545        unsigned a, d;
546 
547        asm volatile ("rdtscp" : "=a"(a), "=d"(d), "=c"(*aux));
548        r = a | ((long long)d << 32);
549 #else
550        asm volatile ("rdtscp" : "=A"(r), "=c"(*aux));
551 #endif
552        return r;
553 }
554 
555 static inline void wrtsc(u64 tsc)
556 {
557 	unsigned a = tsc, d = tsc >> 32;
558 
559 	asm volatile("wrmsr" : : "a"(a), "d"(d), "c"(0x10));
560 }
561 
562 static inline void irq_disable(void)
563 {
564     asm volatile("cli");
565 }
566 
567 /* Note that irq_enable() does not ensure an interrupt shadow due
568  * to the vagaries of compiler optimizations.  If you need the
569  * shadow, use a single asm with "sti" and the instruction after it.
570  */
571 static inline void irq_enable(void)
572 {
573     asm volatile("sti");
574 }
575 
576 static inline void invlpg(volatile void *va)
577 {
578 	asm volatile("invlpg (%0)" ::"r" (va) : "memory");
579 }
580 
581 static inline void safe_halt(void)
582 {
583 	asm volatile("sti; hlt");
584 }
585 
586 static inline u32 read_pkru(void)
587 {
588     unsigned int eax, edx;
589     unsigned int ecx = 0;
590     unsigned int pkru;
591 
592     asm volatile(".byte 0x0f,0x01,0xee\n\t"
593                  : "=a" (eax), "=d" (edx)
594                  : "c" (ecx));
595     pkru = eax;
596     return pkru;
597 }
598 
599 static inline void write_pkru(u32 pkru)
600 {
601     unsigned int eax = pkru;
602     unsigned int ecx = 0;
603     unsigned int edx = 0;
604 
605     asm volatile(".byte 0x0f,0x01,0xef\n\t"
606         : : "a" (eax), "c" (ecx), "d" (edx));
607 }
608 
609 static inline bool is_canonical(u64 addr)
610 {
611 	return (s64)(addr << 16) >> 16 == addr;
612 }
613 
614 static inline void clear_bit(int bit, u8 *addr)
615 {
616 	__asm__ __volatile__("btr %1, %0"
617 			     : "+m" (*addr) : "Ir" (bit) : "cc", "memory");
618 }
619 
620 static inline void set_bit(int bit, u8 *addr)
621 {
622 	__asm__ __volatile__("bts %1, %0"
623 			     : "+m" (*addr) : "Ir" (bit) : "cc", "memory");
624 }
625 
626 static inline void flush_tlb(void)
627 {
628 	ulong cr4;
629 
630 	cr4 = read_cr4();
631 	write_cr4(cr4 ^ X86_CR4_PGE);
632 	write_cr4(cr4);
633 }
634 
635 static inline int has_spec_ctrl(void)
636 {
637     return !!(this_cpu_has(X86_FEATURE_SPEC_CTRL));
638 }
639 
640 static inline int cpu_has_efer_nx(void)
641 {
642 	return !!(this_cpu_has(X86_FEATURE_NX));
643 }
644 
645 static inline bool cpuid_osxsave(void)
646 {
647 	return cpuid(1).c & (1 << (X86_FEATURE_OSXSAVE % 32));
648 }
649 
650 #endif
651