xref: /kvm-unit-tests/x86/vmexit.c (revision 7d36db351752e29ad27eaafe3f102de7064e429b)
1 
2 #include "libcflat.h"
3 #include "smp.h"
4 
5 static inline unsigned long long rdtsc()
6 {
7 	long long r;
8 
9 #ifdef __x86_64__
10 	unsigned a, d;
11 
12 	asm volatile ("rdtsc" : "=a"(a), "=d"(d));
13 	r = a | ((long long)d << 32);
14 #else
15 	asm volatile ("rdtsc" : "=A"(r));
16 #endif
17 	return r;
18 }
19 
20 static unsigned int inl(unsigned short port)
21 {
22     unsigned int val;
23     asm volatile("inl %w1, %0" : "=a"(val) : "Nd"(port));
24     return val;
25 }
26 
27 #define GOAL (1ull << 30)
28 
29 #ifdef __x86_64__
30 #  define R "r"
31 #else
32 #  define R "e"
33 #endif
34 
35 static void cpuid(void)
36 {
37 	asm volatile ("push %%"R "bx; cpuid; pop %%"R "bx"
38 		      : : : "eax", "ecx", "edx");
39 }
40 
41 static void vmcall(void)
42 {
43 	unsigned long a = 0, b, c, d;
44 
45 	asm volatile ("vmcall" : "+a"(a), "=b"(b), "=c"(c), "=d"(d));
46 }
47 
48 #define MSR_EFER 0xc0000080
49 #define EFER_NX_MASK            (1ull << 11)
50 
51 unsigned long long rdmsr(unsigned index)
52 {
53 	unsigned a, d;
54 
55 	asm volatile("rdmsr" : "=a"(a), "=d"(d) : "c"(index));
56 	return ((unsigned long long)d << 32) | a;
57 }
58 
59 void wrmsr(unsigned index, unsigned long long val)
60 {
61 	unsigned a = val, d = val >> 32;
62 
63 	asm volatile("wrmsr" : : "a"(a), "d"(d), "c"(index));
64 }
65 
66 static void mov_from_cr8(void)
67 {
68 	unsigned long cr8;
69 
70 	asm volatile ("mov %%cr8, %0" : "=r"(cr8));
71 }
72 
73 static void mov_to_cr8(void)
74 {
75 	unsigned long cr8 = 0;
76 
77 	asm volatile ("mov %0, %%cr8" : : "r"(cr8));
78 }
79 
80 static int is_smp(void)
81 {
82 	return cpu_count() > 1;
83 }
84 
85 static void nop(void *junk)
86 {
87 }
88 
89 static void ipi(void)
90 {
91 	on_cpu(1, nop, 0);
92 }
93 
94 static void ipi_halt(void)
95 {
96 	unsigned long long t;
97 
98 	on_cpu(1, nop, 0);
99 	t = rdtsc() + 2000;
100 	while (rdtsc() < t)
101 		;
102 }
103 
104 static void inl_pmtimer(void)
105 {
106     inl(0xb008);
107 }
108 
109 static struct test {
110 	void (*func)(void);
111 	const char *name;
112 	int (*valid)(void);
113 	int parallel;
114 } tests[] = {
115 	{ cpuid, "cpuid", .parallel = 1,  },
116 	{ vmcall, "vmcall", .parallel = 1, },
117 	{ mov_from_cr8, "mov_from_cr8", .parallel = 1, },
118 	{ mov_to_cr8, "mov_to_cr8" , .parallel = 1, },
119 	{ inl_pmtimer, "inl_from_pmtimer", .parallel = 1, },
120 	{ ipi, "ipi", is_smp, .parallel = 0, },
121 	{ ipi_halt, "ipi+halt", is_smp, .parallel = 0, },
122 };
123 
124 unsigned iterations;
125 volatile int nr_cpus_done;
126 
127 static void run_test(void *_func)
128 {
129     int i;
130     void (*func)(void) = _func;
131 
132     for (i = 0; i < iterations; ++i)
133         func();
134 
135     nr_cpus_done++;
136 }
137 
138 static void do_test(struct test *test)
139 {
140 	int i;
141 	unsigned long long t1, t2;
142         void (*func)(void) = test->func;
143 
144         iterations = 32;
145 
146         if (test->valid && !test->valid()) {
147 		printf("%s (skipped)\n", test->name);
148 		return;
149 	}
150 
151 	do {
152 		iterations *= 2;
153 		t1 = rdtsc();
154 
155 		if (!test->parallel) {
156 			for (i = 0; i < iterations; ++i)
157 				func();
158 		} else {
159 			nr_cpus_done = 0;
160 			for (i = cpu_count(); i > 0; i--)
161 				on_cpu_async(i-1, run_test, func);
162 			while (nr_cpus_done < cpu_count())
163 				;
164 		}
165 		t2 = rdtsc();
166 	} while ((t2 - t1) < GOAL);
167 	printf("%s %d\n", test->name, (int)((t2 - t1) / iterations));
168 }
169 
170 static void enable_nx(void *junk)
171 {
172 	wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NX_MASK);
173 }
174 
175 int main(void)
176 {
177 	int i;
178 
179 	smp_init();
180 
181 	for (i = cpu_count(); i > 0; i--)
182 		on_cpu(i-1, enable_nx, 0);
183 
184 	for (i = 0; i < ARRAY_SIZE(tests); ++i)
185 		do_test(&tests[i]);
186 
187 	return 0;
188 }
189