xref: /kvm-unit-tests/lib/x86/processor.h (revision f4a8b68c47291842a6044038b283542b3fc0009c)
1 #ifndef _X86_PROCESSOR_H_
2 #define _X86_PROCESSOR_H_
3 
4 #include "libcflat.h"
5 #include "desc.h"
6 #include "msr.h"
7 #include <stdint.h>
8 
9 #define NONCANONICAL            0xaaaaaaaaaaaaaaaaull
10 
11 #ifdef __x86_64__
12 #  define R "r"
13 #  define W "q"
14 #  define S "8"
15 #else
16 #  define R "e"
17 #  define W "l"
18 #  define S "4"
19 #endif
20 
21 #define DB_VECTOR 1
22 #define BP_VECTOR 3
23 #define UD_VECTOR 6
24 #define DF_VECTOR 8
25 #define TS_VECTOR 10
26 #define NP_VECTOR 11
27 #define SS_VECTOR 12
28 #define GP_VECTOR 13
29 #define PF_VECTOR 14
30 #define AC_VECTOR 17
31 #define CP_VECTOR 21
32 
33 #define X86_CR0_PE	0x00000001
34 #define X86_CR0_MP	0x00000002
35 #define X86_CR0_EM	0x00000004
36 #define X86_CR0_TS	0x00000008
37 #define X86_CR0_WP	0x00010000
38 #define X86_CR0_AM	0x00040000
39 #define X86_CR0_NW	0x20000000
40 #define X86_CR0_CD	0x40000000
41 #define X86_CR0_PG	0x80000000
42 #define X86_CR3_PCID_MASK 0x00000fff
43 #define X86_CR4_TSD	0x00000004
44 #define X86_CR4_DE	0x00000008
45 #define X86_CR4_PSE	0x00000010
46 #define X86_CR4_PAE	0x00000020
47 #define X86_CR4_MCE	0x00000040
48 #define X86_CR4_PGE	0x00000080
49 #define X86_CR4_PCE	0x00000100
50 #define X86_CR4_UMIP	0x00000800
51 #define X86_CR4_LA57	0x00001000
52 #define X86_CR4_VMXE	0x00002000
53 #define X86_CR4_PCIDE	0x00020000
54 #define X86_CR4_OSXSAVE	0x00040000
55 #define X86_CR4_SMEP	0x00100000
56 #define X86_CR4_SMAP	0x00200000
57 #define X86_CR4_PKE	0x00400000
58 #define X86_CR4_CET	0x00800000
59 #define X86_CR4_PKS	0x01000000
60 
61 #define X86_EFLAGS_CF    0x00000001
62 #define X86_EFLAGS_FIXED 0x00000002
63 #define X86_EFLAGS_PF    0x00000004
64 #define X86_EFLAGS_AF    0x00000010
65 #define X86_EFLAGS_ZF    0x00000040
66 #define X86_EFLAGS_SF    0x00000080
67 #define X86_EFLAGS_TF    0x00000100
68 #define X86_EFLAGS_IF    0x00000200
69 #define X86_EFLAGS_DF    0x00000400
70 #define X86_EFLAGS_OF    0x00000800
71 #define X86_EFLAGS_IOPL  0x00003000
72 #define X86_EFLAGS_NT    0x00004000
73 #define X86_EFLAGS_RF    0x00010000
74 #define X86_EFLAGS_VM    0x00020000
75 #define X86_EFLAGS_AC    0x00040000
76 
77 #define X86_EFLAGS_ALU (X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | \
78 			X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)
79 
80 
81 /*
82  * CPU features
83  */
84 
85 enum cpuid_output_regs {
86 	EAX,
87 	EBX,
88 	ECX,
89 	EDX
90 };
91 
92 struct cpuid { u32 a, b, c, d; };
93 
94 static inline struct cpuid raw_cpuid(u32 function, u32 index)
95 {
96     struct cpuid r;
97     asm volatile ("cpuid"
98                   : "=a"(r.a), "=b"(r.b), "=c"(r.c), "=d"(r.d)
99                   : "0"(function), "2"(index));
100     return r;
101 }
102 
103 static inline struct cpuid cpuid_indexed(u32 function, u32 index)
104 {
105     u32 level = raw_cpuid(function & 0xf0000000, 0).a;
106     if (level < function)
107         return (struct cpuid) { 0, 0, 0, 0 };
108     return raw_cpuid(function, index);
109 }
110 
111 static inline struct cpuid cpuid(u32 function)
112 {
113     return cpuid_indexed(function, 0);
114 }
115 
116 static inline u8 cpuid_maxphyaddr(void)
117 {
118     if (raw_cpuid(0x80000000, 0).a < 0x80000008)
119         return 36;
120     return raw_cpuid(0x80000008, 0).a & 0xff;
121 }
122 
123 static inline bool is_intel(void)
124 {
125 	struct cpuid c = cpuid(0);
126 	u32 name[4] = {c.b, c.d, c.c };
127 
128 	return strcmp((char *)name, "GenuineIntel") == 0;
129 }
130 
131 #define	CPUID(a, b, c, d) ((((unsigned long long) a) << 32) | (b << 16) | \
132 			  (c << 8) | d)
133 
134 /*
135  * Each X86_FEATURE_XXX definition is 64-bit and contains the following
136  * CPUID meta-data:
137  *
138  * 	[63:32] :  input value for EAX
139  * 	[31:16] :  input value for ECX
140  * 	[15:8]  :  output register
141  * 	[7:0]   :  bit position in output register
142  */
143 
144 /*
145  * Basic Leafs, a.k.a. Intel defined
146  */
147 #define	X86_FEATURE_MWAIT		(CPUID(0x1, 0, ECX, 3))
148 #define	X86_FEATURE_VMX			(CPUID(0x1, 0, ECX, 5))
149 #define	X86_FEATURE_PCID		(CPUID(0x1, 0, ECX, 17))
150 #define	X86_FEATURE_MOVBE		(CPUID(0x1, 0, ECX, 22))
151 #define	X86_FEATURE_TSC_DEADLINE_TIMER	(CPUID(0x1, 0, ECX, 24))
152 #define	X86_FEATURE_XSAVE		(CPUID(0x1, 0, ECX, 26))
153 #define	X86_FEATURE_OSXSAVE		(CPUID(0x1, 0, ECX, 27))
154 #define	X86_FEATURE_RDRAND		(CPUID(0x1, 0, ECX, 30))
155 #define	X86_FEATURE_MCE			(CPUID(0x1, 0, EDX, 7))
156 #define	X86_FEATURE_APIC		(CPUID(0x1, 0, EDX, 9))
157 #define	X86_FEATURE_CLFLUSH		(CPUID(0x1, 0, EDX, 19))
158 #define	X86_FEATURE_XMM			(CPUID(0x1, 0, EDX, 25))
159 #define	X86_FEATURE_XMM2		(CPUID(0x1, 0, EDX, 26))
160 #define	X86_FEATURE_TSC_ADJUST		(CPUID(0x7, 0, EBX, 1))
161 #define	X86_FEATURE_HLE			(CPUID(0x7, 0, EBX, 4))
162 #define	X86_FEATURE_SMEP	        (CPUID(0x7, 0, EBX, 7))
163 #define	X86_FEATURE_INVPCID		(CPUID(0x7, 0, EBX, 10))
164 #define	X86_FEATURE_RTM			(CPUID(0x7, 0, EBX, 11))
165 #define	X86_FEATURE_SMAP		(CPUID(0x7, 0, EBX, 20))
166 #define	X86_FEATURE_PCOMMIT		(CPUID(0x7, 0, EBX, 22))
167 #define	X86_FEATURE_CLFLUSHOPT		(CPUID(0x7, 0, EBX, 23))
168 #define	X86_FEATURE_CLWB		(CPUID(0x7, 0, EBX, 24))
169 #define	X86_FEATURE_UMIP		(CPUID(0x7, 0, ECX, 2))
170 #define	X86_FEATURE_PKU			(CPUID(0x7, 0, ECX, 3))
171 #define	X86_FEATURE_LA57		(CPUID(0x7, 0, ECX, 16))
172 #define	X86_FEATURE_RDPID		(CPUID(0x7, 0, ECX, 22))
173 #define	X86_FEATURE_SHSTK		(CPUID(0x7, 0, ECX, 7))
174 #define	X86_FEATURE_IBT			(CPUID(0x7, 0, EDX, 20))
175 #define	X86_FEATURE_SPEC_CTRL		(CPUID(0x7, 0, EDX, 26))
176 #define	X86_FEATURE_ARCH_CAPABILITIES	(CPUID(0x7, 0, EDX, 29))
177 #define	X86_FEATURE_PKS			(CPUID(0x7, 0, ECX, 31))
178 
179 /*
180  * Extended Leafs, a.k.a. AMD defined
181  */
182 #define	X86_FEATURE_SVM			(CPUID(0x80000001, 0, ECX, 2))
183 #define	X86_FEATURE_NX			(CPUID(0x80000001, 0, EDX, 20))
184 #define	X86_FEATURE_GBPAGES		(CPUID(0x80000001, 0, EDX, 26))
185 #define	X86_FEATURE_RDTSCP		(CPUID(0x80000001, 0, EDX, 27))
186 #define	X86_FEATURE_LM			(CPUID(0x80000001, 0, EDX, 29))
187 #define	X86_FEATURE_RDPRU		(CPUID(0x80000008, 0, EBX, 4))
188 #define	X86_FEATURE_AMD_IBPB		(CPUID(0x80000008, 0, EBX, 12))
189 #define	X86_FEATURE_NPT			(CPUID(0x8000000A, 0, EDX, 0))
190 #define	X86_FEATURE_NRIPS		(CPUID(0x8000000A, 0, EDX, 3))
191 #define	X86_FEATURE_VGIF		(CPUID(0x8000000A, 0, EDX, 16))
192 
193 
194 static inline bool this_cpu_has(u64 feature)
195 {
196 	u32 input_eax = feature >> 32;
197 	u32 input_ecx = (feature >> 16) & 0xffff;
198 	u32 output_reg = (feature >> 8) & 0xff;
199 	u8 bit = feature & 0xff;
200 	struct cpuid c;
201 	u32 *tmp;
202 
203 	c = cpuid_indexed(input_eax, input_ecx);
204 	tmp = (u32 *)&c;
205 
206 	return ((*(tmp + (output_reg % 32))) & (1 << bit));
207 }
208 
209 struct far_pointer32 {
210 	u32 offset;
211 	u16 selector;
212 } __attribute__((packed));
213 
214 struct descriptor_table_ptr {
215     u16 limit;
216     ulong base;
217 } __attribute__((packed));
218 
219 static inline void barrier(void)
220 {
221     asm volatile ("" : : : "memory");
222 }
223 
224 static inline void clac(void)
225 {
226     asm volatile (".byte 0x0f, 0x01, 0xca" : : : "memory");
227 }
228 
229 static inline void stac(void)
230 {
231     asm volatile (".byte 0x0f, 0x01, 0xcb" : : : "memory");
232 }
233 
234 static inline u16 read_cs(void)
235 {
236     unsigned val;
237 
238     asm volatile ("mov %%cs, %0" : "=mr"(val));
239     return val;
240 }
241 
242 static inline u16 read_ds(void)
243 {
244     unsigned val;
245 
246     asm volatile ("mov %%ds, %0" : "=mr"(val));
247     return val;
248 }
249 
250 static inline u16 read_es(void)
251 {
252     unsigned val;
253 
254     asm volatile ("mov %%es, %0" : "=mr"(val));
255     return val;
256 }
257 
258 static inline u16 read_ss(void)
259 {
260     unsigned val;
261 
262     asm volatile ("mov %%ss, %0" : "=mr"(val));
263     return val;
264 }
265 
266 static inline u16 read_fs(void)
267 {
268     unsigned val;
269 
270     asm volatile ("mov %%fs, %0" : "=mr"(val));
271     return val;
272 }
273 
274 static inline u16 read_gs(void)
275 {
276     unsigned val;
277 
278     asm volatile ("mov %%gs, %0" : "=mr"(val));
279     return val;
280 }
281 
282 static inline unsigned long read_rflags(void)
283 {
284 	unsigned long f;
285 	asm volatile ("pushf; pop %0\n\t" : "=rm"(f));
286 	return f;
287 }
288 
289 static inline void write_ds(unsigned val)
290 {
291     asm volatile ("mov %0, %%ds" : : "rm"(val) : "memory");
292 }
293 
294 static inline void write_es(unsigned val)
295 {
296     asm volatile ("mov %0, %%es" : : "rm"(val) : "memory");
297 }
298 
299 static inline void write_ss(unsigned val)
300 {
301     asm volatile ("mov %0, %%ss" : : "rm"(val) : "memory");
302 }
303 
304 static inline void write_fs(unsigned val)
305 {
306     asm volatile ("mov %0, %%fs" : : "rm"(val) : "memory");
307 }
308 
309 static inline void write_gs(unsigned val)
310 {
311     asm volatile ("mov %0, %%gs" : : "rm"(val) : "memory");
312 }
313 
314 static inline void write_rflags(unsigned long f)
315 {
316     asm volatile ("push %0; popf\n\t" : : "rm"(f));
317 }
318 
319 static inline void set_iopl(int iopl)
320 {
321 	unsigned long flags = read_rflags() & ~X86_EFLAGS_IOPL;
322 	flags |= iopl * (X86_EFLAGS_IOPL / 3);
323 	write_rflags(flags);
324 }
325 
326 static inline u64 rdmsr(u32 index)
327 {
328     u32 a, d;
329     asm volatile ("rdmsr" : "=a"(a), "=d"(d) : "c"(index) : "memory");
330     return a | ((u64)d << 32);
331 }
332 
333 static inline void wrmsr(u32 index, u64 val)
334 {
335     u32 a = val, d = val >> 32;
336     asm volatile ("wrmsr" : : "a"(a), "d"(d), "c"(index) : "memory");
337 }
338 
339 static inline int rdmsr_checking(u32 index)
340 {
341 	asm volatile (ASM_TRY("1f")
342 		      "rdmsr\n\t"
343 		      "1:"
344 		      : : "c"(index) : "memory", "eax", "edx");
345 	return exception_vector();
346 }
347 
348 static inline int wrmsr_checking(u32 index, u64 val)
349 {
350         u32 a = val, d = val >> 32;
351 
352 	asm volatile (ASM_TRY("1f")
353 		      "wrmsr\n\t"
354 		      "1:"
355 		      : : "a"(a), "d"(d), "c"(index) : "memory");
356 	return exception_vector();
357 }
358 
359 static inline uint64_t rdpmc(uint32_t index)
360 {
361     uint32_t a, d;
362     asm volatile ("rdpmc" : "=a"(a), "=d"(d) : "c"(index));
363     return a | ((uint64_t)d << 32);
364 }
365 
366 static inline void write_cr0(ulong val)
367 {
368     asm volatile ("mov %0, %%cr0" : : "r"(val) : "memory");
369 }
370 
371 static inline ulong read_cr0(void)
372 {
373     ulong val;
374     asm volatile ("mov %%cr0, %0" : "=r"(val) : : "memory");
375     return val;
376 }
377 
378 static inline void write_cr2(ulong val)
379 {
380     asm volatile ("mov %0, %%cr2" : : "r"(val) : "memory");
381 }
382 
383 static inline ulong read_cr2(void)
384 {
385     ulong val;
386     asm volatile ("mov %%cr2, %0" : "=r"(val) : : "memory");
387     return val;
388 }
389 
390 static inline void write_cr3(ulong val)
391 {
392     asm volatile ("mov %0, %%cr3" : : "r"(val) : "memory");
393 }
394 
395 static inline ulong read_cr3(void)
396 {
397     ulong val;
398     asm volatile ("mov %%cr3, %0" : "=r"(val) : : "memory");
399     return val;
400 }
401 
402 static inline void update_cr3(void *cr3)
403 {
404     write_cr3((ulong)cr3);
405 }
406 
407 static inline void write_cr4(ulong val)
408 {
409     asm volatile ("mov %0, %%cr4" : : "r"(val) : "memory");
410 }
411 
412 static inline ulong read_cr4(void)
413 {
414     ulong val;
415     asm volatile ("mov %%cr4, %0" : "=r"(val) : : "memory");
416     return val;
417 }
418 
419 static inline void write_cr8(ulong val)
420 {
421     asm volatile ("mov %0, %%cr8" : : "r"(val) : "memory");
422 }
423 
424 static inline ulong read_cr8(void)
425 {
426     ulong val;
427     asm volatile ("mov %%cr8, %0" : "=r"(val) : : "memory");
428     return val;
429 }
430 
431 static inline void lgdt(const struct descriptor_table_ptr *ptr)
432 {
433     asm volatile ("lgdt %0" : : "m"(*ptr));
434 }
435 
436 static inline void sgdt(struct descriptor_table_ptr *ptr)
437 {
438     asm volatile ("sgdt %0" : "=m"(*ptr));
439 }
440 
441 static inline void lidt(const struct descriptor_table_ptr *ptr)
442 {
443     asm volatile ("lidt %0" : : "m"(*ptr));
444 }
445 
446 static inline void sidt(struct descriptor_table_ptr *ptr)
447 {
448     asm volatile ("sidt %0" : "=m"(*ptr));
449 }
450 
451 static inline void lldt(unsigned val)
452 {
453     asm volatile ("lldt %0" : : "rm"(val));
454 }
455 
456 static inline u16 sldt(void)
457 {
458     u16 val;
459     asm volatile ("sldt %0" : "=rm"(val));
460     return val;
461 }
462 
463 static inline void ltr(u16 val)
464 {
465     asm volatile ("ltr %0" : : "rm"(val));
466 }
467 
468 static inline u16 str(void)
469 {
470     u16 val;
471     asm volatile ("str %0" : "=rm"(val));
472     return val;
473 }
474 
475 static inline void write_dr0(void *val)
476 {
477     asm volatile ("mov %0, %%dr0" : : "r"(val) : "memory");
478 }
479 
480 static inline void write_dr1(void *val)
481 {
482     asm volatile ("mov %0, %%dr1" : : "r"(val) : "memory");
483 }
484 
485 static inline void write_dr2(void *val)
486 {
487     asm volatile ("mov %0, %%dr2" : : "r"(val) : "memory");
488 }
489 
490 static inline void write_dr3(void *val)
491 {
492     asm volatile ("mov %0, %%dr3" : : "r"(val) : "memory");
493 }
494 
495 static inline void write_dr6(ulong val)
496 {
497     asm volatile ("mov %0, %%dr6" : : "r"(val) : "memory");
498 }
499 
500 static inline ulong read_dr6(void)
501 {
502     ulong val;
503     asm volatile ("mov %%dr6, %0" : "=r"(val));
504     return val;
505 }
506 
507 static inline void write_dr7(ulong val)
508 {
509     asm volatile ("mov %0, %%dr7" : : "r"(val) : "memory");
510 }
511 
512 static inline ulong read_dr7(void)
513 {
514     ulong val;
515     asm volatile ("mov %%dr7, %0" : "=r"(val));
516     return val;
517 }
518 
519 static inline void pause(void)
520 {
521     asm volatile ("pause");
522 }
523 
524 static inline void cli(void)
525 {
526     asm volatile ("cli");
527 }
528 
529 static inline void sti(void)
530 {
531     asm volatile ("sti");
532 }
533 
534 static inline unsigned long long rdrand(void)
535 {
536 	long long r;
537 
538 	asm volatile("rdrand %0\n\t"
539 		     "jc 1f\n\t"
540 		     "mov $0, %0\n\t"
541 		     "1:\n\t" : "=r" (r));
542 	return r;
543 }
544 
545 static inline unsigned long long rdtsc(void)
546 {
547 	long long r;
548 
549 #ifdef __x86_64__
550 	unsigned a, d;
551 
552 	asm volatile ("rdtsc" : "=a"(a), "=d"(d));
553 	r = a | ((long long)d << 32);
554 #else
555 	asm volatile ("rdtsc" : "=A"(r));
556 #endif
557 	return r;
558 }
559 
560 /*
561  * Per the advice in the SDM, volume 2, the sequence "mfence; lfence"
562  * executed immediately before rdtsc ensures that rdtsc will be
563  * executed only after all previous instructions have executed and all
564  * previous loads and stores are globally visible. In addition, the
565  * lfence immediately after rdtsc ensures that rdtsc will be executed
566  * prior to the execution of any subsequent instruction.
567  */
568 static inline unsigned long long fenced_rdtsc(void)
569 {
570 	unsigned long long tsc;
571 
572 #ifdef __x86_64__
573 	unsigned int eax, edx;
574 
575 	asm volatile ("mfence; lfence; rdtsc; lfence" : "=a"(eax), "=d"(edx));
576 	tsc = eax | ((unsigned long long)edx << 32);
577 #else
578 	asm volatile ("mfence; lfence; rdtsc; lfence" : "=A"(tsc));
579 #endif
580 	return tsc;
581 }
582 
583 static inline unsigned long long rdtscp(u32 *aux)
584 {
585        long long r;
586 
587 #ifdef __x86_64__
588        unsigned a, d;
589 
590        asm volatile ("rdtscp" : "=a"(a), "=d"(d), "=c"(*aux));
591        r = a | ((long long)d << 32);
592 #else
593        asm volatile ("rdtscp" : "=A"(r), "=c"(*aux));
594 #endif
595        return r;
596 }
597 
598 static inline void wrtsc(u64 tsc)
599 {
600 	unsigned a = tsc, d = tsc >> 32;
601 
602 	asm volatile("wrmsr" : : "a"(a), "d"(d), "c"(0x10));
603 }
604 
605 static inline void irq_disable(void)
606 {
607     asm volatile("cli");
608 }
609 
610 /* Note that irq_enable() does not ensure an interrupt shadow due
611  * to the vagaries of compiler optimizations.  If you need the
612  * shadow, use a single asm with "sti" and the instruction after it.
613  */
614 static inline void irq_enable(void)
615 {
616     asm volatile("sti");
617 }
618 
619 static inline void invlpg(volatile void *va)
620 {
621 	asm volatile("invlpg (%0)" ::"r" (va) : "memory");
622 }
623 
624 static inline void safe_halt(void)
625 {
626 	asm volatile("sti; hlt");
627 }
628 
629 static inline u32 read_pkru(void)
630 {
631     unsigned int eax, edx;
632     unsigned int ecx = 0;
633     unsigned int pkru;
634 
635     asm volatile(".byte 0x0f,0x01,0xee\n\t"
636                  : "=a" (eax), "=d" (edx)
637                  : "c" (ecx));
638     pkru = eax;
639     return pkru;
640 }
641 
642 static inline void write_pkru(u32 pkru)
643 {
644     unsigned int eax = pkru;
645     unsigned int ecx = 0;
646     unsigned int edx = 0;
647 
648     asm volatile(".byte 0x0f,0x01,0xef\n\t"
649         : : "a" (eax), "c" (ecx), "d" (edx));
650 }
651 
652 static inline bool is_canonical(u64 addr)
653 {
654 	int va_width = (raw_cpuid(0x80000008, 0).a & 0xff00) >> 8;
655 	int shift_amt = 64 - va_width;
656 
657 	return (s64)(addr << shift_amt) >> shift_amt == addr;
658 }
659 
660 static inline void clear_bit(int bit, u8 *addr)
661 {
662 	__asm__ __volatile__("btr %1, %0"
663 			     : "+m" (*addr) : "Ir" (bit) : "cc", "memory");
664 }
665 
666 static inline void set_bit(int bit, u8 *addr)
667 {
668 	__asm__ __volatile__("bts %1, %0"
669 			     : "+m" (*addr) : "Ir" (bit) : "cc", "memory");
670 }
671 
672 static inline void flush_tlb(void)
673 {
674 	ulong cr4;
675 
676 	cr4 = read_cr4();
677 	write_cr4(cr4 ^ X86_CR4_PGE);
678 	write_cr4(cr4);
679 }
680 
681 static inline int has_spec_ctrl(void)
682 {
683     return !!(this_cpu_has(X86_FEATURE_SPEC_CTRL));
684 }
685 
686 static inline int cpu_has_efer_nx(void)
687 {
688 	return !!(this_cpu_has(X86_FEATURE_NX));
689 }
690 
691 static inline bool cpuid_osxsave(void)
692 {
693 	return cpuid(1).c & (1 << (X86_FEATURE_OSXSAVE % 32));
694 }
695 
696 #endif
697