xref: /kvm-unit-tests/lib/x86/processor.h (revision f1dcfd54130ca2b1851d46dffd7ffadbe5eb4a3b)
1 #ifndef _X86_PROCESSOR_H_
2 #define _X86_PROCESSOR_H_
3 
4 #include "libcflat.h"
5 #include "desc.h"
6 #include "msr.h"
7 #include <stdint.h>
8 
9 #define NONCANONICAL            0xaaaaaaaaaaaaaaaaull
10 
11 #ifdef __x86_64__
12 #  define R "r"
13 #  define W "q"
14 #  define S "8"
15 #else
16 #  define R "e"
17 #  define W "l"
18 #  define S "4"
19 #endif
20 
21 #define DB_VECTOR 1
22 #define BP_VECTOR 3
23 #define UD_VECTOR 6
24 #define DF_VECTOR 8
25 #define TS_VECTOR 10
26 #define NP_VECTOR 11
27 #define SS_VECTOR 12
28 #define GP_VECTOR 13
29 #define PF_VECTOR 14
30 #define AC_VECTOR 17
31 #define CP_VECTOR 21
32 
33 #define X86_CR0_PE	0x00000001
34 #define X86_CR0_MP	0x00000002
35 #define X86_CR0_EM	0x00000004
36 #define X86_CR0_TS	0x00000008
37 #define X86_CR0_WP	0x00010000
38 #define X86_CR0_AM	0x00040000
39 #define X86_CR0_NW	0x20000000
40 #define X86_CR0_CD	0x40000000
41 #define X86_CR0_PG	0x80000000
42 #define X86_CR3_PCID_MASK 0x00000fff
43 #define X86_CR4_TSD	0x00000004
44 #define X86_CR4_DE	0x00000008
45 #define X86_CR4_PSE	0x00000010
46 #define X86_CR4_PAE	0x00000020
47 #define X86_CR4_MCE	0x00000040
48 #define X86_CR4_PGE	0x00000080
49 #define X86_CR4_PCE	0x00000100
50 #define X86_CR4_UMIP	0x00000800
51 #define X86_CR4_LA57	0x00001000
52 #define X86_CR4_VMXE	0x00002000
53 #define X86_CR4_PCIDE	0x00020000
54 #define X86_CR4_OSXSAVE	0x00040000
55 #define X86_CR4_SMEP	0x00100000
56 #define X86_CR4_SMAP	0x00200000
57 #define X86_CR4_PKE	0x00400000
58 #define X86_CR4_CET	0x00800000
59 #define X86_CR4_PKS	0x01000000
60 
61 #define X86_EFLAGS_CF    0x00000001
62 #define X86_EFLAGS_FIXED 0x00000002
63 #define X86_EFLAGS_PF    0x00000004
64 #define X86_EFLAGS_AF    0x00000010
65 #define X86_EFLAGS_ZF    0x00000040
66 #define X86_EFLAGS_SF    0x00000080
67 #define X86_EFLAGS_TF    0x00000100
68 #define X86_EFLAGS_IF    0x00000200
69 #define X86_EFLAGS_DF    0x00000400
70 #define X86_EFLAGS_OF    0x00000800
71 #define X86_EFLAGS_IOPL  0x00003000
72 #define X86_EFLAGS_NT    0x00004000
73 #define X86_EFLAGS_RF    0x00010000
74 #define X86_EFLAGS_VM    0x00020000
75 #define X86_EFLAGS_AC    0x00040000
76 
77 #define X86_EFLAGS_ALU (X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | \
78 			X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)
79 
80 
81 /*
82  * CPU features
83  */
84 
85 enum cpuid_output_regs {
86 	EAX,
87 	EBX,
88 	ECX,
89 	EDX
90 };
91 
92 struct cpuid { u32 a, b, c, d; };
93 
94 static inline struct cpuid raw_cpuid(u32 function, u32 index)
95 {
96     struct cpuid r;
97     asm volatile ("cpuid"
98                   : "=a"(r.a), "=b"(r.b), "=c"(r.c), "=d"(r.d)
99                   : "0"(function), "2"(index));
100     return r;
101 }
102 
103 static inline struct cpuid cpuid_indexed(u32 function, u32 index)
104 {
105     u32 level = raw_cpuid(function & 0xf0000000, 0).a;
106     if (level < function)
107         return (struct cpuid) { 0, 0, 0, 0 };
108     return raw_cpuid(function, index);
109 }
110 
111 static inline struct cpuid cpuid(u32 function)
112 {
113     return cpuid_indexed(function, 0);
114 }
115 
116 static inline u8 cpuid_maxphyaddr(void)
117 {
118     if (raw_cpuid(0x80000000, 0).a < 0x80000008)
119         return 36;
120     return raw_cpuid(0x80000008, 0).a & 0xff;
121 }
122 
123 static inline bool is_intel(void)
124 {
125 	struct cpuid c = cpuid(0);
126 	u32 name[4] = {c.b, c.d, c.c };
127 
128 	return strcmp((char *)name, "GenuineIntel") == 0;
129 }
130 
131 #define	CPUID(a, b, c, d) ((((unsigned long long) a) << 32) | (b << 16) | \
132 			  (c << 8) | d)
133 
134 /*
135  * Each X86_FEATURE_XXX definition is 64-bit and contains the following
136  * CPUID meta-data:
137  *
138  * 	[63:32] :  input value for EAX
139  * 	[31:16] :  input value for ECX
140  * 	[15:8]  :  output register
141  * 	[7:0]   :  bit position in output register
142  */
143 
144 /*
145  * Basic Leafs, a.k.a. Intel defined
146  */
147 #define	X86_FEATURE_MWAIT		(CPUID(0x1, 0, ECX, 3))
148 #define	X86_FEATURE_VMX			(CPUID(0x1, 0, ECX, 5))
149 #define	X86_FEATURE_PCID		(CPUID(0x1, 0, ECX, 17))
150 #define	X86_FEATURE_MOVBE		(CPUID(0x1, 0, ECX, 22))
151 #define	X86_FEATURE_TSC_DEADLINE_TIMER	(CPUID(0x1, 0, ECX, 24))
152 #define	X86_FEATURE_XSAVE		(CPUID(0x1, 0, ECX, 26))
153 #define	X86_FEATURE_OSXSAVE		(CPUID(0x1, 0, ECX, 27))
154 #define	X86_FEATURE_RDRAND		(CPUID(0x1, 0, ECX, 30))
155 #define	X86_FEATURE_MCE			(CPUID(0x1, 0, EDX, 7))
156 #define	X86_FEATURE_APIC		(CPUID(0x1, 0, EDX, 9))
157 #define	X86_FEATURE_CLFLUSH		(CPUID(0x1, 0, EDX, 19))
158 #define	X86_FEATURE_XMM			(CPUID(0x1, 0, EDX, 25))
159 #define	X86_FEATURE_XMM2		(CPUID(0x1, 0, EDX, 26))
160 #define	X86_FEATURE_TSC_ADJUST		(CPUID(0x7, 0, EBX, 1))
161 #define	X86_FEATURE_HLE			(CPUID(0x7, 0, EBX, 4))
162 #define	X86_FEATURE_SMEP	        (CPUID(0x7, 0, EBX, 7))
163 #define	X86_FEATURE_INVPCID		(CPUID(0x7, 0, EBX, 10))
164 #define	X86_FEATURE_RTM			(CPUID(0x7, 0, EBX, 11))
165 #define	X86_FEATURE_SMAP		(CPUID(0x7, 0, EBX, 20))
166 #define	X86_FEATURE_PCOMMIT		(CPUID(0x7, 0, EBX, 22))
167 #define	X86_FEATURE_CLFLUSHOPT		(CPUID(0x7, 0, EBX, 23))
168 #define	X86_FEATURE_CLWB		(CPUID(0x7, 0, EBX, 24))
169 #define	X86_FEATURE_UMIP		(CPUID(0x7, 0, ECX, 2))
170 #define	X86_FEATURE_PKU			(CPUID(0x7, 0, ECX, 3))
171 #define	X86_FEATURE_LA57		(CPUID(0x7, 0, ECX, 16))
172 #define	X86_FEATURE_RDPID		(CPUID(0x7, 0, ECX, 22))
173 #define	X86_FEATURE_SHSTK		(CPUID(0x7, 0, ECX, 7))
174 #define	X86_FEATURE_IBT			(CPUID(0x7, 0, EDX, 20))
175 #define	X86_FEATURE_SPEC_CTRL		(CPUID(0x7, 0, EDX, 26))
176 #define	X86_FEATURE_ARCH_CAPABILITIES	(CPUID(0x7, 0, EDX, 29))
177 #define	X86_FEATURE_PKS			(CPUID(0x7, 0, ECX, 31))
178 
179 /*
180  * Extended Leafs, a.k.a. AMD defined
181  */
182 #define	X86_FEATURE_SVM			(CPUID(0x80000001, 0, ECX, 2))
183 #define	X86_FEATURE_NX			(CPUID(0x80000001, 0, EDX, 20))
184 #define	X86_FEATURE_GBPAGES		(CPUID(0x80000001, 0, EDX, 26))
185 #define	X86_FEATURE_RDTSCP		(CPUID(0x80000001, 0, EDX, 27))
186 #define	X86_FEATURE_LM			(CPUID(0x80000001, 0, EDX, 29))
187 #define	X86_FEATURE_RDPRU		(CPUID(0x80000008, 0, EBX, 4))
188 #define	X86_FEATURE_AMD_IBPB		(CPUID(0x80000008, 0, EBX, 12))
189 #define	X86_FEATURE_NPT			(CPUID(0x8000000A, 0, EDX, 0))
190 #define	X86_FEATURE_NRIPS		(CPUID(0x8000000A, 0, EDX, 3))
191 #define	X86_FEATURE_VGIF		(CPUID(0x8000000A, 0, EDX, 16))
192 
193 
194 static inline bool this_cpu_has(u64 feature)
195 {
196 	u32 input_eax = feature >> 32;
197 	u32 input_ecx = (feature >> 16) & 0xffff;
198 	u32 output_reg = (feature >> 8) & 0xff;
199 	u8 bit = feature & 0xff;
200 	struct cpuid c;
201 	u32 *tmp;
202 
203 	c = cpuid_indexed(input_eax, input_ecx);
204 	tmp = (u32 *)&c;
205 
206 	return ((*(tmp + (output_reg % 32))) & (1 << bit));
207 }
208 
209 struct far_pointer32 {
210 	u32 offset;
211 	u16 selector;
212 } __attribute__((packed));
213 
214 struct descriptor_table_ptr {
215     u16 limit;
216     ulong base;
217 } __attribute__((packed));
218 
219 static inline void clac(void)
220 {
221     asm volatile (".byte 0x0f, 0x01, 0xca" : : : "memory");
222 }
223 
224 static inline void stac(void)
225 {
226     asm volatile (".byte 0x0f, 0x01, 0xcb" : : : "memory");
227 }
228 
229 static inline u16 read_cs(void)
230 {
231     unsigned val;
232 
233     asm volatile ("mov %%cs, %0" : "=mr"(val));
234     return val;
235 }
236 
237 static inline u16 read_ds(void)
238 {
239     unsigned val;
240 
241     asm volatile ("mov %%ds, %0" : "=mr"(val));
242     return val;
243 }
244 
245 static inline u16 read_es(void)
246 {
247     unsigned val;
248 
249     asm volatile ("mov %%es, %0" : "=mr"(val));
250     return val;
251 }
252 
253 static inline u16 read_ss(void)
254 {
255     unsigned val;
256 
257     asm volatile ("mov %%ss, %0" : "=mr"(val));
258     return val;
259 }
260 
261 static inline u16 read_fs(void)
262 {
263     unsigned val;
264 
265     asm volatile ("mov %%fs, %0" : "=mr"(val));
266     return val;
267 }
268 
269 static inline u16 read_gs(void)
270 {
271     unsigned val;
272 
273     asm volatile ("mov %%gs, %0" : "=mr"(val));
274     return val;
275 }
276 
277 static inline unsigned long read_rflags(void)
278 {
279 	unsigned long f;
280 	asm volatile ("pushf; pop %0\n\t" : "=rm"(f));
281 	return f;
282 }
283 
284 static inline void write_ds(unsigned val)
285 {
286     asm volatile ("mov %0, %%ds" : : "rm"(val) : "memory");
287 }
288 
289 static inline void write_es(unsigned val)
290 {
291     asm volatile ("mov %0, %%es" : : "rm"(val) : "memory");
292 }
293 
294 static inline void write_ss(unsigned val)
295 {
296     asm volatile ("mov %0, %%ss" : : "rm"(val) : "memory");
297 }
298 
299 static inline void write_fs(unsigned val)
300 {
301     asm volatile ("mov %0, %%fs" : : "rm"(val) : "memory");
302 }
303 
304 static inline void write_gs(unsigned val)
305 {
306     asm volatile ("mov %0, %%gs" : : "rm"(val) : "memory");
307 }
308 
309 static inline void write_rflags(unsigned long f)
310 {
311     asm volatile ("push %0; popf\n\t" : : "rm"(f));
312 }
313 
314 static inline void set_iopl(int iopl)
315 {
316 	unsigned long flags = read_rflags() & ~X86_EFLAGS_IOPL;
317 	flags |= iopl * (X86_EFLAGS_IOPL / 3);
318 	write_rflags(flags);
319 }
320 
321 static inline u64 rdmsr(u32 index)
322 {
323     u32 a, d;
324     asm volatile ("rdmsr" : "=a"(a), "=d"(d) : "c"(index) : "memory");
325     return a | ((u64)d << 32);
326 }
327 
328 static inline void wrmsr(u32 index, u64 val)
329 {
330     u32 a = val, d = val >> 32;
331     asm volatile ("wrmsr" : : "a"(a), "d"(d), "c"(index) : "memory");
332 }
333 
334 static inline int rdmsr_checking(u32 index)
335 {
336 	asm volatile (ASM_TRY("1f")
337 		      "rdmsr\n\t"
338 		      "1:"
339 		      : : "c"(index) : "memory", "eax", "edx");
340 	return exception_vector();
341 }
342 
343 static inline int wrmsr_checking(u32 index, u64 val)
344 {
345         u32 a = val, d = val >> 32;
346 
347 	asm volatile (ASM_TRY("1f")
348 		      "wrmsr\n\t"
349 		      "1:"
350 		      : : "a"(a), "d"(d), "c"(index) : "memory");
351 	return exception_vector();
352 }
353 
354 static inline uint64_t rdpmc(uint32_t index)
355 {
356     uint32_t a, d;
357     asm volatile ("rdpmc" : "=a"(a), "=d"(d) : "c"(index));
358     return a | ((uint64_t)d << 32);
359 }
360 
361 static inline void write_cr0(ulong val)
362 {
363     asm volatile ("mov %0, %%cr0" : : "r"(val) : "memory");
364 }
365 
366 static inline ulong read_cr0(void)
367 {
368     ulong val;
369     asm volatile ("mov %%cr0, %0" : "=r"(val) : : "memory");
370     return val;
371 }
372 
373 static inline void write_cr2(ulong val)
374 {
375     asm volatile ("mov %0, %%cr2" : : "r"(val) : "memory");
376 }
377 
378 static inline ulong read_cr2(void)
379 {
380     ulong val;
381     asm volatile ("mov %%cr2, %0" : "=r"(val) : : "memory");
382     return val;
383 }
384 
385 static inline void write_cr3(ulong val)
386 {
387     asm volatile ("mov %0, %%cr3" : : "r"(val) : "memory");
388 }
389 
390 static inline ulong read_cr3(void)
391 {
392     ulong val;
393     asm volatile ("mov %%cr3, %0" : "=r"(val) : : "memory");
394     return val;
395 }
396 
397 static inline void update_cr3(void *cr3)
398 {
399     write_cr3((ulong)cr3);
400 }
401 
402 static inline void write_cr4(ulong val)
403 {
404     asm volatile ("mov %0, %%cr4" : : "r"(val) : "memory");
405 }
406 
407 static inline ulong read_cr4(void)
408 {
409     ulong val;
410     asm volatile ("mov %%cr4, %0" : "=r"(val) : : "memory");
411     return val;
412 }
413 
414 static inline void write_cr8(ulong val)
415 {
416     asm volatile ("mov %0, %%cr8" : : "r"(val) : "memory");
417 }
418 
419 static inline ulong read_cr8(void)
420 {
421     ulong val;
422     asm volatile ("mov %%cr8, %0" : "=r"(val) : : "memory");
423     return val;
424 }
425 
426 static inline void lgdt(const struct descriptor_table_ptr *ptr)
427 {
428     asm volatile ("lgdt %0" : : "m"(*ptr));
429 }
430 
431 static inline void sgdt(struct descriptor_table_ptr *ptr)
432 {
433     asm volatile ("sgdt %0" : "=m"(*ptr));
434 }
435 
436 static inline void lidt(const struct descriptor_table_ptr *ptr)
437 {
438     asm volatile ("lidt %0" : : "m"(*ptr));
439 }
440 
441 static inline void sidt(struct descriptor_table_ptr *ptr)
442 {
443     asm volatile ("sidt %0" : "=m"(*ptr));
444 }
445 
446 static inline void lldt(u16 val)
447 {
448     asm volatile ("lldt %0" : : "rm"(val));
449 }
450 
451 static inline u16 sldt(void)
452 {
453     u16 val;
454     asm volatile ("sldt %0" : "=rm"(val));
455     return val;
456 }
457 
458 static inline void ltr(u16 val)
459 {
460     asm volatile ("ltr %0" : : "rm"(val));
461 }
462 
463 static inline u16 str(void)
464 {
465     u16 val;
466     asm volatile ("str %0" : "=rm"(val));
467     return val;
468 }
469 
470 static inline void write_dr0(void *val)
471 {
472     asm volatile ("mov %0, %%dr0" : : "r"(val) : "memory");
473 }
474 
475 static inline void write_dr1(void *val)
476 {
477     asm volatile ("mov %0, %%dr1" : : "r"(val) : "memory");
478 }
479 
480 static inline void write_dr2(void *val)
481 {
482     asm volatile ("mov %0, %%dr2" : : "r"(val) : "memory");
483 }
484 
485 static inline void write_dr3(void *val)
486 {
487     asm volatile ("mov %0, %%dr3" : : "r"(val) : "memory");
488 }
489 
490 static inline void write_dr6(ulong val)
491 {
492     asm volatile ("mov %0, %%dr6" : : "r"(val) : "memory");
493 }
494 
495 static inline ulong read_dr6(void)
496 {
497     ulong val;
498     asm volatile ("mov %%dr6, %0" : "=r"(val));
499     return val;
500 }
501 
502 static inline void write_dr7(ulong val)
503 {
504     asm volatile ("mov %0, %%dr7" : : "r"(val) : "memory");
505 }
506 
507 static inline ulong read_dr7(void)
508 {
509     ulong val;
510     asm volatile ("mov %%dr7, %0" : "=r"(val));
511     return val;
512 }
513 
514 static inline void pause(void)
515 {
516     asm volatile ("pause");
517 }
518 
519 static inline void cli(void)
520 {
521     asm volatile ("cli");
522 }
523 
524 static inline void sti(void)
525 {
526     asm volatile ("sti");
527 }
528 
529 static inline unsigned long long rdrand(void)
530 {
531 	long long r;
532 
533 	asm volatile("rdrand %0\n\t"
534 		     "jc 1f\n\t"
535 		     "mov $0, %0\n\t"
536 		     "1:\n\t" : "=r" (r));
537 	return r;
538 }
539 
540 static inline unsigned long long rdtsc(void)
541 {
542 	long long r;
543 
544 #ifdef __x86_64__
545 	unsigned a, d;
546 
547 	asm volatile ("rdtsc" : "=a"(a), "=d"(d));
548 	r = a | ((long long)d << 32);
549 #else
550 	asm volatile ("rdtsc" : "=A"(r));
551 #endif
552 	return r;
553 }
554 
555 /*
556  * Per the advice in the SDM, volume 2, the sequence "mfence; lfence"
557  * executed immediately before rdtsc ensures that rdtsc will be
558  * executed only after all previous instructions have executed and all
559  * previous loads and stores are globally visible. In addition, the
560  * lfence immediately after rdtsc ensures that rdtsc will be executed
561  * prior to the execution of any subsequent instruction.
562  */
563 static inline unsigned long long fenced_rdtsc(void)
564 {
565 	unsigned long long tsc;
566 
567 #ifdef __x86_64__
568 	unsigned int eax, edx;
569 
570 	asm volatile ("mfence; lfence; rdtsc; lfence" : "=a"(eax), "=d"(edx));
571 	tsc = eax | ((unsigned long long)edx << 32);
572 #else
573 	asm volatile ("mfence; lfence; rdtsc; lfence" : "=A"(tsc));
574 #endif
575 	return tsc;
576 }
577 
578 static inline unsigned long long rdtscp(u32 *aux)
579 {
580        long long r;
581 
582 #ifdef __x86_64__
583        unsigned a, d;
584 
585        asm volatile ("rdtscp" : "=a"(a), "=d"(d), "=c"(*aux));
586        r = a | ((long long)d << 32);
587 #else
588        asm volatile ("rdtscp" : "=A"(r), "=c"(*aux));
589 #endif
590        return r;
591 }
592 
593 static inline void wrtsc(u64 tsc)
594 {
595 	wrmsr(MSR_IA32_TSC, tsc);
596 }
597 
598 static inline void irq_disable(void)
599 {
600     asm volatile("cli");
601 }
602 
603 /* Note that irq_enable() does not ensure an interrupt shadow due
604  * to the vagaries of compiler optimizations.  If you need the
605  * shadow, use a single asm with "sti" and the instruction after it.
606  */
607 static inline void irq_enable(void)
608 {
609     asm volatile("sti");
610 }
611 
612 static inline void invlpg(volatile void *va)
613 {
614 	asm volatile("invlpg (%0)" ::"r" (va) : "memory");
615 }
616 
617 static inline void safe_halt(void)
618 {
619 	asm volatile("sti; hlt");
620 }
621 
622 static inline u32 read_pkru(void)
623 {
624     unsigned int eax, edx;
625     unsigned int ecx = 0;
626     unsigned int pkru;
627 
628     asm volatile(".byte 0x0f,0x01,0xee\n\t"
629                  : "=a" (eax), "=d" (edx)
630                  : "c" (ecx));
631     pkru = eax;
632     return pkru;
633 }
634 
635 static inline void write_pkru(u32 pkru)
636 {
637     unsigned int eax = pkru;
638     unsigned int ecx = 0;
639     unsigned int edx = 0;
640 
641     asm volatile(".byte 0x0f,0x01,0xef\n\t"
642         : : "a" (eax), "c" (ecx), "d" (edx));
643 }
644 
645 static inline bool is_canonical(u64 addr)
646 {
647 	int va_width = (raw_cpuid(0x80000008, 0).a & 0xff00) >> 8;
648 	int shift_amt = 64 - va_width;
649 
650 	return (s64)(addr << shift_amt) >> shift_amt == addr;
651 }
652 
653 static inline void clear_bit(int bit, u8 *addr)
654 {
655 	__asm__ __volatile__("btr %1, %0"
656 			     : "+m" (*addr) : "Ir" (bit) : "cc", "memory");
657 }
658 
659 static inline void set_bit(int bit, u8 *addr)
660 {
661 	__asm__ __volatile__("bts %1, %0"
662 			     : "+m" (*addr) : "Ir" (bit) : "cc", "memory");
663 }
664 
665 static inline void flush_tlb(void)
666 {
667 	ulong cr4;
668 
669 	cr4 = read_cr4();
670 	write_cr4(cr4 ^ X86_CR4_PGE);
671 	write_cr4(cr4);
672 }
673 
674 static inline int has_spec_ctrl(void)
675 {
676     return !!(this_cpu_has(X86_FEATURE_SPEC_CTRL));
677 }
678 
679 static inline int cpu_has_efer_nx(void)
680 {
681 	return !!(this_cpu_has(X86_FEATURE_NX));
682 }
683 
684 static inline bool cpuid_osxsave(void)
685 {
686 	return cpuid(1).c & (1 << (X86_FEATURE_OSXSAVE % 32));
687 }
688 
689 #endif
690