xref: /kvm-unit-tests/lib/x86/processor.h (revision 537d39dfc223da21d3e3d4a63a07d3fcd21f358a)
1 #ifndef _X86_PROCESSOR_H_
2 #define _X86_PROCESSOR_H_
3 
4 #include "libcflat.h"
5 #include "desc.h"
6 #include "msr.h"
7 #include <stdint.h>
8 
9 #define NONCANONICAL            0xaaaaaaaaaaaaaaaaull
10 
11 #ifdef __x86_64__
12 #  define R "r"
13 #  define W "q"
14 #  define S "8"
15 #else
16 #  define R "e"
17 #  define W "l"
18 #  define S "4"
19 #endif
20 
21 #define DB_VECTOR 1
22 #define BP_VECTOR 3
23 #define UD_VECTOR 6
24 #define DF_VECTOR 8
25 #define TS_VECTOR 10
26 #define NP_VECTOR 11
27 #define SS_VECTOR 12
28 #define GP_VECTOR 13
29 #define PF_VECTOR 14
30 #define AC_VECTOR 17
31 #define CP_VECTOR 21
32 
33 #define X86_CR0_PE	0x00000001
34 #define X86_CR0_MP	0x00000002
35 #define X86_CR0_EM	0x00000004
36 #define X86_CR0_TS	0x00000008
37 #define X86_CR0_WP	0x00010000
38 #define X86_CR0_AM	0x00040000
39 #define X86_CR0_NW	0x20000000
40 #define X86_CR0_CD	0x40000000
41 #define X86_CR0_PG	0x80000000
42 #define X86_CR3_PCID_MASK 0x00000fff
43 #define X86_CR4_TSD	0x00000004
44 #define X86_CR4_DE	0x00000008
45 #define X86_CR4_PSE	0x00000010
46 #define X86_CR4_PAE	0x00000020
47 #define X86_CR4_MCE	0x00000040
48 #define X86_CR4_PGE	0x00000080
49 #define X86_CR4_PCE	0x00000100
50 #define X86_CR4_UMIP	0x00000800
51 #define X86_CR4_LA57	0x00001000
52 #define X86_CR4_VMXE	0x00002000
53 #define X86_CR4_PCIDE	0x00020000
54 #define X86_CR4_OSXSAVE	0x00040000
55 #define X86_CR4_SMEP	0x00100000
56 #define X86_CR4_SMAP	0x00200000
57 #define X86_CR4_PKE	0x00400000
58 #define X86_CR4_CET	0x00800000
59 #define X86_CR4_PKS	0x01000000
60 
61 #define X86_EFLAGS_CF    0x00000001
62 #define X86_EFLAGS_FIXED 0x00000002
63 #define X86_EFLAGS_PF    0x00000004
64 #define X86_EFLAGS_AF    0x00000010
65 #define X86_EFLAGS_ZF    0x00000040
66 #define X86_EFLAGS_SF    0x00000080
67 #define X86_EFLAGS_TF    0x00000100
68 #define X86_EFLAGS_IF    0x00000200
69 #define X86_EFLAGS_DF    0x00000400
70 #define X86_EFLAGS_OF    0x00000800
71 #define X86_EFLAGS_IOPL  0x00003000
72 #define X86_EFLAGS_NT    0x00004000
73 #define X86_EFLAGS_RF    0x00010000
74 #define X86_EFLAGS_VM    0x00020000
75 #define X86_EFLAGS_AC    0x00040000
76 
77 #define X86_EFLAGS_ALU (X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | \
78 			X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)
79 
80 
81 /*
82  * CPU features
83  */
84 
85 enum cpuid_output_regs {
86 	EAX,
87 	EBX,
88 	ECX,
89 	EDX
90 };
91 
92 struct cpuid { u32 a, b, c, d; };
93 
94 static inline struct cpuid raw_cpuid(u32 function, u32 index)
95 {
96     struct cpuid r;
97     asm volatile ("cpuid"
98                   : "=a"(r.a), "=b"(r.b), "=c"(r.c), "=d"(r.d)
99                   : "0"(function), "2"(index));
100     return r;
101 }
102 
103 static inline struct cpuid cpuid_indexed(u32 function, u32 index)
104 {
105     u32 level = raw_cpuid(function & 0xf0000000, 0).a;
106     if (level < function)
107         return (struct cpuid) { 0, 0, 0, 0 };
108     return raw_cpuid(function, index);
109 }
110 
111 static inline struct cpuid cpuid(u32 function)
112 {
113     return cpuid_indexed(function, 0);
114 }
115 
116 static inline u8 cpuid_maxphyaddr(void)
117 {
118     if (raw_cpuid(0x80000000, 0).a < 0x80000008)
119         return 36;
120     return raw_cpuid(0x80000008, 0).a & 0xff;
121 }
122 
123 static inline bool is_intel(void)
124 {
125 	struct cpuid c = cpuid(0);
126 	u32 name[4] = {c.b, c.d, c.c };
127 
128 	return strcmp((char *)name, "GenuineIntel") == 0;
129 }
130 
131 #define	CPUID(a, b, c, d) ((((unsigned long long) a) << 32) | (b << 16) | \
132 			  (c << 8) | d)
133 
134 /*
135  * Each X86_FEATURE_XXX definition is 64-bit and contains the following
136  * CPUID meta-data:
137  *
138  * 	[63:32] :  input value for EAX
139  * 	[31:16] :  input value for ECX
140  * 	[15:8]  :  output register
141  * 	[7:0]   :  bit position in output register
142  */
143 
144 /*
145  * Basic Leafs, a.k.a. Intel defined
146  */
147 #define	X86_FEATURE_MWAIT		(CPUID(0x1, 0, ECX, 3))
148 #define	X86_FEATURE_VMX			(CPUID(0x1, 0, ECX, 5))
149 #define	X86_FEATURE_PCID		(CPUID(0x1, 0, ECX, 17))
150 #define	X86_FEATURE_MOVBE		(CPUID(0x1, 0, ECX, 22))
151 #define	X86_FEATURE_TSC_DEADLINE_TIMER	(CPUID(0x1, 0, ECX, 24))
152 #define	X86_FEATURE_XSAVE		(CPUID(0x1, 0, ECX, 26))
153 #define	X86_FEATURE_OSXSAVE		(CPUID(0x1, 0, ECX, 27))
154 #define	X86_FEATURE_RDRAND		(CPUID(0x1, 0, ECX, 30))
155 #define	X86_FEATURE_MCE			(CPUID(0x1, 0, EDX, 7))
156 #define	X86_FEATURE_APIC		(CPUID(0x1, 0, EDX, 9))
157 #define	X86_FEATURE_CLFLUSH		(CPUID(0x1, 0, EDX, 19))
158 #define	X86_FEATURE_XMM			(CPUID(0x1, 0, EDX, 25))
159 #define	X86_FEATURE_XMM2		(CPUID(0x1, 0, EDX, 26))
160 #define	X86_FEATURE_TSC_ADJUST		(CPUID(0x7, 0, EBX, 1))
161 #define	X86_FEATURE_HLE			(CPUID(0x7, 0, EBX, 4))
162 #define	X86_FEATURE_SMEP	        (CPUID(0x7, 0, EBX, 7))
163 #define	X86_FEATURE_INVPCID		(CPUID(0x7, 0, EBX, 10))
164 #define	X86_FEATURE_RTM			(CPUID(0x7, 0, EBX, 11))
165 #define	X86_FEATURE_SMAP		(CPUID(0x7, 0, EBX, 20))
166 #define	X86_FEATURE_PCOMMIT		(CPUID(0x7, 0, EBX, 22))
167 #define	X86_FEATURE_CLFLUSHOPT		(CPUID(0x7, 0, EBX, 23))
168 #define	X86_FEATURE_CLWB		(CPUID(0x7, 0, EBX, 24))
169 #define	X86_FEATURE_UMIP		(CPUID(0x7, 0, ECX, 2))
170 #define	X86_FEATURE_PKU			(CPUID(0x7, 0, ECX, 3))
171 #define	X86_FEATURE_LA57		(CPUID(0x7, 0, ECX, 16))
172 #define	X86_FEATURE_RDPID		(CPUID(0x7, 0, ECX, 22))
173 #define	X86_FEATURE_SHSTK		(CPUID(0x7, 0, ECX, 7))
174 #define	X86_FEATURE_IBT			(CPUID(0x7, 0, EDX, 20))
175 #define	X86_FEATURE_SPEC_CTRL		(CPUID(0x7, 0, EDX, 26))
176 #define	X86_FEATURE_ARCH_CAPABILITIES	(CPUID(0x7, 0, EDX, 29))
177 #define	X86_FEATURE_PKS			(CPUID(0x7, 0, ECX, 31))
178 
179 /*
180  * Extended Leafs, a.k.a. AMD defined
181  */
182 #define	X86_FEATURE_SVM			(CPUID(0x80000001, 0, ECX, 2))
183 #define	X86_FEATURE_NX			(CPUID(0x80000001, 0, EDX, 20))
184 #define	X86_FEATURE_GBPAGES		(CPUID(0x80000001, 0, EDX, 26))
185 #define	X86_FEATURE_RDTSCP		(CPUID(0x80000001, 0, EDX, 27))
186 #define	X86_FEATURE_LM			(CPUID(0x80000001, 0, EDX, 29))
187 #define	X86_FEATURE_RDPRU		(CPUID(0x80000008, 0, EBX, 4))
188 #define	X86_FEATURE_AMD_IBPB		(CPUID(0x80000008, 0, EBX, 12))
189 #define	X86_FEATURE_NPT			(CPUID(0x8000000A, 0, EDX, 0))
190 #define	X86_FEATURE_LBRV		(CPUID(0x8000000A, 0, EDX, 1))
191 #define	X86_FEATURE_NRIPS		(CPUID(0x8000000A, 0, EDX, 3))
192 #define	X86_FEATURE_VGIF		(CPUID(0x8000000A, 0, EDX, 16))
193 
194 
195 static inline bool this_cpu_has(u64 feature)
196 {
197 	u32 input_eax = feature >> 32;
198 	u32 input_ecx = (feature >> 16) & 0xffff;
199 	u32 output_reg = (feature >> 8) & 0xff;
200 	u8 bit = feature & 0xff;
201 	struct cpuid c;
202 	u32 *tmp;
203 
204 	c = cpuid_indexed(input_eax, input_ecx);
205 	tmp = (u32 *)&c;
206 
207 	return ((*(tmp + (output_reg % 32))) & (1 << bit));
208 }
209 
210 struct far_pointer32 {
211 	u32 offset;
212 	u16 selector;
213 } __attribute__((packed));
214 
215 struct descriptor_table_ptr {
216     u16 limit;
217     ulong base;
218 } __attribute__((packed));
219 
220 static inline void clac(void)
221 {
222     asm volatile (".byte 0x0f, 0x01, 0xca" : : : "memory");
223 }
224 
225 static inline void stac(void)
226 {
227     asm volatile (".byte 0x0f, 0x01, 0xcb" : : : "memory");
228 }
229 
230 static inline u16 read_cs(void)
231 {
232     unsigned val;
233 
234     asm volatile ("mov %%cs, %0" : "=mr"(val));
235     return val;
236 }
237 
238 static inline u16 read_ds(void)
239 {
240     unsigned val;
241 
242     asm volatile ("mov %%ds, %0" : "=mr"(val));
243     return val;
244 }
245 
246 static inline u16 read_es(void)
247 {
248     unsigned val;
249 
250     asm volatile ("mov %%es, %0" : "=mr"(val));
251     return val;
252 }
253 
254 static inline u16 read_ss(void)
255 {
256     unsigned val;
257 
258     asm volatile ("mov %%ss, %0" : "=mr"(val));
259     return val;
260 }
261 
262 static inline u16 read_fs(void)
263 {
264     unsigned val;
265 
266     asm volatile ("mov %%fs, %0" : "=mr"(val));
267     return val;
268 }
269 
270 static inline u16 read_gs(void)
271 {
272     unsigned val;
273 
274     asm volatile ("mov %%gs, %0" : "=mr"(val));
275     return val;
276 }
277 
278 static inline unsigned long read_rflags(void)
279 {
280 	unsigned long f;
281 	asm volatile ("pushf; pop %0\n\t" : "=rm"(f));
282 	return f;
283 }
284 
285 static inline void write_ds(unsigned val)
286 {
287     asm volatile ("mov %0, %%ds" : : "rm"(val) : "memory");
288 }
289 
290 static inline void write_es(unsigned val)
291 {
292     asm volatile ("mov %0, %%es" : : "rm"(val) : "memory");
293 }
294 
295 static inline void write_ss(unsigned val)
296 {
297     asm volatile ("mov %0, %%ss" : : "rm"(val) : "memory");
298 }
299 
300 static inline void write_fs(unsigned val)
301 {
302     asm volatile ("mov %0, %%fs" : : "rm"(val) : "memory");
303 }
304 
305 static inline void write_gs(unsigned val)
306 {
307     asm volatile ("mov %0, %%gs" : : "rm"(val) : "memory");
308 }
309 
310 static inline void write_rflags(unsigned long f)
311 {
312     asm volatile ("push %0; popf\n\t" : : "rm"(f));
313 }
314 
315 static inline void set_iopl(int iopl)
316 {
317 	unsigned long flags = read_rflags() & ~X86_EFLAGS_IOPL;
318 	flags |= iopl * (X86_EFLAGS_IOPL / 3);
319 	write_rflags(flags);
320 }
321 
322 static inline u64 rdmsr(u32 index)
323 {
324     u32 a, d;
325     asm volatile ("rdmsr" : "=a"(a), "=d"(d) : "c"(index) : "memory");
326     return a | ((u64)d << 32);
327 }
328 
329 static inline void wrmsr(u32 index, u64 val)
330 {
331     u32 a = val, d = val >> 32;
332     asm volatile ("wrmsr" : : "a"(a), "d"(d), "c"(index) : "memory");
333 }
334 
335 static inline int rdmsr_checking(u32 index)
336 {
337 	asm volatile (ASM_TRY("1f")
338 		      "rdmsr\n\t"
339 		      "1:"
340 		      : : "c"(index) : "memory", "eax", "edx");
341 	return exception_vector();
342 }
343 
344 static inline int wrmsr_checking(u32 index, u64 val)
345 {
346         u32 a = val, d = val >> 32;
347 
348 	asm volatile (ASM_TRY("1f")
349 		      "wrmsr\n\t"
350 		      "1:"
351 		      : : "a"(a), "d"(d), "c"(index) : "memory");
352 	return exception_vector();
353 }
354 
355 static inline uint64_t rdpmc(uint32_t index)
356 {
357     uint32_t a, d;
358     asm volatile ("rdpmc" : "=a"(a), "=d"(d) : "c"(index));
359     return a | ((uint64_t)d << 32);
360 }
361 
362 static inline void write_cr0(ulong val)
363 {
364     asm volatile ("mov %0, %%cr0" : : "r"(val) : "memory");
365 }
366 
367 static inline ulong read_cr0(void)
368 {
369     ulong val;
370     asm volatile ("mov %%cr0, %0" : "=r"(val) : : "memory");
371     return val;
372 }
373 
374 static inline void write_cr2(ulong val)
375 {
376     asm volatile ("mov %0, %%cr2" : : "r"(val) : "memory");
377 }
378 
379 static inline ulong read_cr2(void)
380 {
381     ulong val;
382     asm volatile ("mov %%cr2, %0" : "=r"(val) : : "memory");
383     return val;
384 }
385 
386 static inline void write_cr3(ulong val)
387 {
388     asm volatile ("mov %0, %%cr3" : : "r"(val) : "memory");
389 }
390 
391 static inline ulong read_cr3(void)
392 {
393     ulong val;
394     asm volatile ("mov %%cr3, %0" : "=r"(val) : : "memory");
395     return val;
396 }
397 
398 static inline void update_cr3(void *cr3)
399 {
400     write_cr3((ulong)cr3);
401 }
402 
403 static inline void write_cr4(ulong val)
404 {
405     asm volatile ("mov %0, %%cr4" : : "r"(val) : "memory");
406 }
407 
408 static inline ulong read_cr4(void)
409 {
410     ulong val;
411     asm volatile ("mov %%cr4, %0" : "=r"(val) : : "memory");
412     return val;
413 }
414 
415 static inline void write_cr8(ulong val)
416 {
417     asm volatile ("mov %0, %%cr8" : : "r"(val) : "memory");
418 }
419 
420 static inline ulong read_cr8(void)
421 {
422     ulong val;
423     asm volatile ("mov %%cr8, %0" : "=r"(val) : : "memory");
424     return val;
425 }
426 
427 static inline void lgdt(const struct descriptor_table_ptr *ptr)
428 {
429     asm volatile ("lgdt %0" : : "m"(*ptr));
430 }
431 
432 static inline void sgdt(struct descriptor_table_ptr *ptr)
433 {
434     asm volatile ("sgdt %0" : "=m"(*ptr));
435 }
436 
437 static inline void lidt(const struct descriptor_table_ptr *ptr)
438 {
439     asm volatile ("lidt %0" : : "m"(*ptr));
440 }
441 
442 static inline void sidt(struct descriptor_table_ptr *ptr)
443 {
444     asm volatile ("sidt %0" : "=m"(*ptr));
445 }
446 
447 static inline void lldt(u16 val)
448 {
449     asm volatile ("lldt %0" : : "rm"(val));
450 }
451 
452 static inline u16 sldt(void)
453 {
454     u16 val;
455     asm volatile ("sldt %0" : "=rm"(val));
456     return val;
457 }
458 
459 static inline void ltr(u16 val)
460 {
461     asm volatile ("ltr %0" : : "rm"(val));
462 }
463 
464 static inline u16 str(void)
465 {
466     u16 val;
467     asm volatile ("str %0" : "=rm"(val));
468     return val;
469 }
470 
471 static inline void write_dr0(void *val)
472 {
473     asm volatile ("mov %0, %%dr0" : : "r"(val) : "memory");
474 }
475 
476 static inline void write_dr1(void *val)
477 {
478     asm volatile ("mov %0, %%dr1" : : "r"(val) : "memory");
479 }
480 
481 static inline void write_dr2(void *val)
482 {
483     asm volatile ("mov %0, %%dr2" : : "r"(val) : "memory");
484 }
485 
486 static inline void write_dr3(void *val)
487 {
488     asm volatile ("mov %0, %%dr3" : : "r"(val) : "memory");
489 }
490 
491 static inline void write_dr6(ulong val)
492 {
493     asm volatile ("mov %0, %%dr6" : : "r"(val) : "memory");
494 }
495 
496 static inline ulong read_dr6(void)
497 {
498     ulong val;
499     asm volatile ("mov %%dr6, %0" : "=r"(val));
500     return val;
501 }
502 
503 static inline void write_dr7(ulong val)
504 {
505     asm volatile ("mov %0, %%dr7" : : "r"(val) : "memory");
506 }
507 
508 static inline ulong read_dr7(void)
509 {
510     ulong val;
511     asm volatile ("mov %%dr7, %0" : "=r"(val));
512     return val;
513 }
514 
515 static inline void pause(void)
516 {
517     asm volatile ("pause");
518 }
519 
520 static inline void cli(void)
521 {
522     asm volatile ("cli");
523 }
524 
525 static inline void sti(void)
526 {
527     asm volatile ("sti");
528 }
529 
530 static inline unsigned long long rdrand(void)
531 {
532 	long long r;
533 
534 	asm volatile("rdrand %0\n\t"
535 		     "jc 1f\n\t"
536 		     "mov $0, %0\n\t"
537 		     "1:\n\t" : "=r" (r));
538 	return r;
539 }
540 
541 static inline unsigned long long rdtsc(void)
542 {
543 	long long r;
544 
545 #ifdef __x86_64__
546 	unsigned a, d;
547 
548 	asm volatile ("rdtsc" : "=a"(a), "=d"(d));
549 	r = a | ((long long)d << 32);
550 #else
551 	asm volatile ("rdtsc" : "=A"(r));
552 #endif
553 	return r;
554 }
555 
556 /*
557  * Per the advice in the SDM, volume 2, the sequence "mfence; lfence"
558  * executed immediately before rdtsc ensures that rdtsc will be
559  * executed only after all previous instructions have executed and all
560  * previous loads and stores are globally visible. In addition, the
561  * lfence immediately after rdtsc ensures that rdtsc will be executed
562  * prior to the execution of any subsequent instruction.
563  */
564 static inline unsigned long long fenced_rdtsc(void)
565 {
566 	unsigned long long tsc;
567 
568 #ifdef __x86_64__
569 	unsigned int eax, edx;
570 
571 	asm volatile ("mfence; lfence; rdtsc; lfence" : "=a"(eax), "=d"(edx));
572 	tsc = eax | ((unsigned long long)edx << 32);
573 #else
574 	asm volatile ("mfence; lfence; rdtsc; lfence" : "=A"(tsc));
575 #endif
576 	return tsc;
577 }
578 
579 static inline unsigned long long rdtscp(u32 *aux)
580 {
581        long long r;
582 
583 #ifdef __x86_64__
584        unsigned a, d;
585 
586        asm volatile ("rdtscp" : "=a"(a), "=d"(d), "=c"(*aux));
587        r = a | ((long long)d << 32);
588 #else
589        asm volatile ("rdtscp" : "=A"(r), "=c"(*aux));
590 #endif
591        return r;
592 }
593 
594 static inline void wrtsc(u64 tsc)
595 {
596 	wrmsr(MSR_IA32_TSC, tsc);
597 }
598 
599 static inline void irq_disable(void)
600 {
601     asm volatile("cli");
602 }
603 
604 /* Note that irq_enable() does not ensure an interrupt shadow due
605  * to the vagaries of compiler optimizations.  If you need the
606  * shadow, use a single asm with "sti" and the instruction after it.
607  */
608 static inline void irq_enable(void)
609 {
610     asm volatile("sti");
611 }
612 
613 static inline void invlpg(volatile void *va)
614 {
615 	asm volatile("invlpg (%0)" ::"r" (va) : "memory");
616 }
617 
618 static inline void safe_halt(void)
619 {
620 	asm volatile("sti; hlt");
621 }
622 
623 static inline u32 read_pkru(void)
624 {
625     unsigned int eax, edx;
626     unsigned int ecx = 0;
627     unsigned int pkru;
628 
629     asm volatile(".byte 0x0f,0x01,0xee\n\t"
630                  : "=a" (eax), "=d" (edx)
631                  : "c" (ecx));
632     pkru = eax;
633     return pkru;
634 }
635 
636 static inline void write_pkru(u32 pkru)
637 {
638     unsigned int eax = pkru;
639     unsigned int ecx = 0;
640     unsigned int edx = 0;
641 
642     asm volatile(".byte 0x0f,0x01,0xef\n\t"
643         : : "a" (eax), "c" (ecx), "d" (edx));
644 }
645 
646 static inline bool is_canonical(u64 addr)
647 {
648 	int va_width = (raw_cpuid(0x80000008, 0).a & 0xff00) >> 8;
649 	int shift_amt = 64 - va_width;
650 
651 	return (s64)(addr << shift_amt) >> shift_amt == addr;
652 }
653 
654 static inline void clear_bit(int bit, u8 *addr)
655 {
656 	__asm__ __volatile__("btr %1, %0"
657 			     : "+m" (*addr) : "Ir" (bit) : "cc", "memory");
658 }
659 
660 static inline void set_bit(int bit, u8 *addr)
661 {
662 	__asm__ __volatile__("bts %1, %0"
663 			     : "+m" (*addr) : "Ir" (bit) : "cc", "memory");
664 }
665 
666 static inline void flush_tlb(void)
667 {
668 	ulong cr4;
669 
670 	cr4 = read_cr4();
671 	write_cr4(cr4 ^ X86_CR4_PGE);
672 	write_cr4(cr4);
673 }
674 
675 static inline int has_spec_ctrl(void)
676 {
677     return !!(this_cpu_has(X86_FEATURE_SPEC_CTRL));
678 }
679 
680 static inline int cpu_has_efer_nx(void)
681 {
682 	return !!(this_cpu_has(X86_FEATURE_NX));
683 }
684 
685 static inline bool cpuid_osxsave(void)
686 {
687 	return cpuid(1).c & (1 << (X86_FEATURE_OSXSAVE % 32));
688 }
689 
690 #endif
691