1 #include <asm/debugreg.h>
2
3 #include "ioram.h"
4 #include "vm.h"
5 #include "libcflat.h"
6 #include "desc.h"
7 #include "processor.h"
8 #include "vmalloc.h"
9 #include "alloc_page.h"
10 #include "usermode.h"
11
12 #define TESTDEV_IO_PORT 0xe0
13
14 static int exceptions;
15
16 #ifdef __x86_64__
17 #include "emulator64.c"
18 #endif
19
20 static char st1[] = "abcdefghijklmnop";
21
test_stringio(void)22 static void test_stringio(void)
23 {
24 unsigned char r = 0;
25 asm volatile("cld \n\t"
26 "movw %0, %%dx \n\t"
27 "rep outsb \n\t"
28 : : "i"((short)TESTDEV_IO_PORT),
29 "S"(st1), "c"(sizeof(st1) - 1));
30 asm volatile("inb %1, %0\n\t" : "=a"(r) : "i"((short)TESTDEV_IO_PORT));
31 report(r == st1[sizeof(st1) - 2], "outsb up"); /* last char */
32
33 asm volatile("std \n\t"
34 "movw %0, %%dx \n\t"
35 "rep outsb \n\t"
36 : : "i"((short)TESTDEV_IO_PORT),
37 "S"(st1 + sizeof(st1) - 2), "c"(sizeof(st1) - 1));
38 asm volatile("cld \n\t" : : );
39 asm volatile("in %1, %0\n\t" : "=a"(r) : "i"((short)TESTDEV_IO_PORT));
40 report(r == st1[0], "outsb down");
41 }
42
test_cmps_one(unsigned char * m1,unsigned char * m3)43 static void test_cmps_one(unsigned char *m1, unsigned char *m3)
44 {
45 void *rsi, *rdi;
46 long rcx, tmp;
47
48 rsi = m1; rdi = m3; rcx = 30;
49 asm volatile("xor %[tmp], %[tmp] \n\t"
50 "repe cmpsb"
51 : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
52 : : "cc");
53 report(rcx == 0 && rsi == m1 + 30 && rdi == m3 + 30, "repe/cmpsb (1)");
54
55 rsi = m1; rdi = m3; rcx = 30;
56 asm volatile("or $1, %[tmp]\n\t" // clear ZF
57 "repe cmpsb"
58 : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
59 : : "cc");
60 report(rcx == 0 && rsi == m1 + 30 && rdi == m3 + 30,
61 "repe cmpsb (1.zf)");
62
63 rsi = m1; rdi = m3; rcx = 15;
64 asm volatile("xor %[tmp], %[tmp] \n\t"
65 "repe cmpsw"
66 : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
67 : : "cc");
68 report(rcx == 0 && rsi == m1 + 30 && rdi == m3 + 30, "repe cmpsw (1)");
69
70 rsi = m1; rdi = m3; rcx = 7;
71 asm volatile("xor %[tmp], %[tmp] \n\t"
72 "repe cmpsl"
73 : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
74 : : "cc");
75 report(rcx == 0 && rsi == m1 + 28 && rdi == m3 + 28, "repe cmpll (1)");
76
77 #ifdef __x86_64__
78 rsi = m1; rdi = m3; rcx = 4;
79 asm volatile("xor %[tmp], %[tmp] \n\t"
80 "repe cmpsq"
81 : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
82 : : "cc");
83 report(rcx == 0 && rsi == m1 + 32 && rdi == m3 + 32, "repe cmpsq (1)");
84 #endif
85
86 rsi = m1; rdi = m3; rcx = 130;
87 asm volatile("xor %[tmp], %[tmp] \n\t"
88 "repe cmpsb"
89 : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
90 : : "cc");
91 report(rcx == 29 && rsi == m1 + 101 && rdi == m3 + 101,
92 "repe cmpsb (2)");
93
94 rsi = m1; rdi = m3; rcx = 65;
95 asm volatile("xor %[tmp], %[tmp] \n\t"
96 "repe cmpsw"
97 : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
98 : : "cc");
99 report(rcx == 14 && rsi == m1 + 102 && rdi == m3 + 102,
100 "repe cmpsw (2)");
101
102 rsi = m1; rdi = m3; rcx = 32;
103 asm volatile("xor %[tmp], %[tmp] \n\t"
104 "repe cmpsl"
105 : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
106 : : "cc");
107 report(rcx == 6 && rsi == m1 + 104 && rdi == m3 + 104,
108 "repe cmpll (2)");
109
110 #ifdef __x86_64__
111 rsi = m1; rdi = m3; rcx = 16;
112 asm volatile("xor %[tmp], %[tmp] \n\t"
113 "repe cmpsq"
114 : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
115 : : "cc");
116 report(rcx == 3 && rsi == m1 + 104 && rdi == m3 + 104,
117 "repe cmpsq (2)");
118 #endif
119 }
120
test_cmps(void * mem)121 static void test_cmps(void *mem)
122 {
123 unsigned char *m1 = mem, *m2 = mem + 1024;
124 unsigned char m3[1024];
125
126 for (int i = 0; i < 100; ++i)
127 m1[i] = m2[i] = m3[i] = i;
128 for (int i = 100; i < 200; ++i)
129 m1[i] = (m3[i] = m2[i] = i) + 1;
130 test_cmps_one(m1, m3);
131 test_cmps_one(m1, m2);
132 }
133
test_scas(void * mem)134 static void test_scas(void *mem)
135 {
136 bool z;
137 void *di;
138
139 *(uint64_t *)mem = 0x77665544332211;
140
141 di = mem;
142 asm ("scasb; setz %0" : "=rm"(z), "+D"(di) : "a"(0xff11));
143 report(di == mem + 1 && z, "scasb match");
144
145 di = mem;
146 asm ("scasb; setz %0" : "=rm"(z), "+D"(di) : "a"(0xff54));
147 report(di == mem + 1 && !z, "scasb mismatch");
148
149 di = mem;
150 asm ("scasw; setz %0" : "=rm"(z), "+D"(di) : "a"(0xff2211));
151 report(di == mem + 2 && z, "scasw match");
152
153 di = mem;
154 asm ("scasw; setz %0" : "=rm"(z), "+D"(di) : "a"(0xffdd11));
155 report(di == mem + 2 && !z, "scasw mismatch");
156
157 di = mem;
158 asm ("scasl; setz %0" : "=rm"(z), "+D"(di) : "a"((ulong)0xff44332211ul));
159 report(di == mem + 4 && z, "scasd match");
160
161 di = mem;
162 asm ("scasl; setz %0" : "=rm"(z), "+D"(di) : "a"(0x45332211));
163 report(di == mem + 4 && !z, "scasd mismatch");
164
165 #ifdef __x86_64__
166 di = mem;
167 asm ("scasq; setz %0" : "=rm"(z), "+D"(di) : "a"(0x77665544332211ul));
168 report(di == mem + 8 && z, "scasq match");
169
170 di = mem;
171 asm ("scasq; setz %0" : "=rm"(z), "+D"(di) : "a"(3));
172 report(di == mem + 8 && !z, "scasq mismatch");
173 #endif
174 }
175
test_incdecnotneg(void * mem)176 static void test_incdecnotneg(void *mem)
177 {
178 unsigned long *m = mem, v = 1234;
179 unsigned char *mb = mem, vb = 66;
180
181 *m = 0;
182
183 asm volatile ("incl %0":"+m"(*m));
184 report(*m == 1, "incl");
185 asm volatile ("decl %0":"+m"(*m));
186 report(*m == 0, "decl");
187 asm volatile ("incb %0":"+m"(*m));
188 report(*m == 1, "incb");
189 asm volatile ("decb %0":"+m"(*m));
190 report(*m == 0, "decb");
191
192 asm volatile ("lock incl %0":"+m"(*m));
193 report(*m == 1, "lock incl");
194 asm volatile ("lock decl %0":"+m"(*m));
195 report(*m == 0, "lock decl");
196 asm volatile ("lock incb %0":"+m"(*m));
197 report(*m == 1, "lock incb");
198 asm volatile ("lock decb %0":"+m"(*m));
199 report(*m == 0, "lock decb");
200
201 *m = v;
202
203 #ifdef __x86_64__
204 asm ("lock negq %0" : "+m"(*m)); v = -v;
205 report(*m == v, "lock negl");
206 asm ("lock notq %0" : "+m"(*m)); v = ~v;
207 report(*m == v, "lock notl");
208 #endif
209
210 *mb = vb;
211
212 asm ("lock negb %0" : "+m"(*mb)); vb = -vb;
213 report(*mb == vb, "lock negb");
214 asm ("lock notb %0" : "+m"(*mb)); vb = ~vb;
215 report(*mb == vb, "lock notb");
216 }
217
test_smsw(unsigned long * h_mem)218 static void test_smsw(unsigned long *h_mem)
219 {
220 char mem[16];
221 unsigned short msw, msw_orig, *pmsw;
222 int i, zero;
223
224 msw_orig = read_cr0();
225
226 asm("smsw %0" : "=r"(msw));
227 report(msw == msw_orig, "smsw (1)");
228
229 memset(mem, 0, 16);
230 pmsw = (void *)mem;
231 asm("smsw %0" : "=m"(pmsw[4]));
232 zero = 1;
233 for (i = 0; i < 8; ++i)
234 if (i != 4 && pmsw[i])
235 zero = 0;
236 report(msw == pmsw[4] && zero, "smsw (2)");
237
238 /* Trigger exit on smsw */
239 *h_mem = -1ul;
240 asm volatile("smsw %0" : "+m"(*h_mem));
241 report(msw == (unsigned short)*h_mem &&
242 (*h_mem & ~0xfffful) == (-1ul & ~0xfffful), "smsw (3)");
243 }
244
test_lmsw(void)245 static void test_lmsw(void)
246 {
247 char mem[16];
248 unsigned short msw, *pmsw;
249 unsigned long cr0;
250
251 cr0 = read_cr0();
252
253 msw = cr0 ^ 8;
254 asm("lmsw %0" : : "r"(msw));
255 printf("before %lx after %lx\n", cr0, read_cr0());
256 report((cr0 ^ read_cr0()) == 8, "lmsw (1)");
257
258 pmsw = (void *)mem;
259 *pmsw = cr0;
260 asm("lmsw %0" : : "m"(*pmsw));
261 printf("before %lx after %lx\n", cr0, read_cr0());
262 report(cr0 == read_cr0(), "lmsw (2)");
263
264 /* lmsw can't clear cr0.pe */
265 msw = (cr0 & ~1ul) ^ 4; /* change EM to force trap */
266 asm("lmsw %0" : : "r"(msw));
267 report((cr0 ^ read_cr0()) == 4 && (cr0 & 1), "lmsw (3)");
268
269 /* back to normal */
270 msw = cr0;
271 asm("lmsw %0" : : "r"(msw));
272 }
273
test_btc(void * mem)274 static void test_btc(void *mem)
275 {
276 unsigned int *a = mem;
277
278 memset(mem, 0, 4 * sizeof(unsigned int));
279
280 asm ("btcl $32, %0" :: "m"(a[0]) : "memory");
281 asm ("btcl $1, %0" :: "m"(a[1]) : "memory");
282 asm ("btcl %1, %0" :: "m"(a[0]), "r"(66) : "memory");
283 report(a[0] == 1 && a[1] == 2 && a[2] == 4, "btcl imm8, r/m");
284
285 asm ("btcl %1, %0" :: "m"(a[3]), "r"(-1) : "memory");
286 report(a[0] == 1 && a[1] == 2 && a[2] == 0x80000004, "btcl reg, r/m");
287
288 #ifdef __x86_64__
289 asm ("btcq %1, %0" : : "m"(a[2]), "r"(-1l) : "memory");
290 report(a[0] == 1 && a[1] == 0x80000002 && a[2] == 0x80000004 && a[3] == 0,
291 "btcq reg, r/m");
292 #endif
293 }
294
test_bsfbsr(void * mem)295 static void test_bsfbsr(void *mem)
296 {
297 unsigned eax, *meml = mem;
298 unsigned short ax, *memw = mem;
299 #ifdef __x86_64__
300 unsigned long rax, *memq = mem;
301 unsigned char z;
302 #endif
303
304 *memw = 0xc000;
305 asm("bsfw %[mem], %[a]" : [a]"=a"(ax) : [mem]"m"(*memw));
306 report(ax == 14, "bsfw r/m, reg");
307
308 *meml = 0xc0000000;
309 asm("bsfl %[mem], %[a]" : [a]"=a"(eax) : [mem]"m"(*meml));
310 report(eax == 30, "bsfl r/m, reg");
311
312 #ifdef __x86_64__
313 *memq = 0xc00000000000;
314 asm("bsfq %[mem], %[a]" : [a]"=a"(rax) : [mem]"m"(*memq));
315 report(rax == 46, "bsfq r/m, reg");
316
317 *memq = 0;
318 asm("bsfq %[mem], %[a]; setz %[z]"
319 : [a]"=a"(rax), [z]"=rm"(z) : [mem]"m"(*memq));
320 report(z == 1, "bsfq r/m, reg");
321 #endif
322
323 *memw = 0xc000;
324 asm("bsrw %[mem], %[a]" : [a]"=a"(ax) : [mem]"m"(*memw));
325 report(ax == 15, "bsrw r/m, reg");
326
327 *meml = 0xc0000000;
328 asm("bsrl %[mem], %[a]" : [a]"=a"(eax) : [mem]"m"(*meml));
329 report(eax == 31, "bsrl r/m, reg");
330
331 #ifdef __x86_64__
332 *memq = 0xc00000000000;
333 asm("bsrq %[mem], %[a]" : [a]"=a"(rax) : [mem]"m"(*memq));
334 report(rax == 47, "bsrq r/m, reg");
335
336 *memq = 0;
337 asm("bsrq %[mem], %[a]; setz %[z]"
338 : [a]"=a"(rax), [z]"=rm"(z) : [mem]"m"(*memq));
339 report(z == 1, "bsrq r/m, reg");
340 #endif
341 }
342
test_imul(uint64_t * mem)343 static void test_imul(uint64_t *mem)
344 {
345 ulong a;
346
347 *mem = 51; a = 0x1234567812345678ULL & -1ul;;
348 asm ("imulw %1, %%ax" : "+a"(a) : "m"(*mem));
349 report(a == (0x12345678123439e8ULL & -1ul), "imul ax, mem");
350
351 *mem = 51; a = 0x1234567812345678ULL & -1ul;;
352 asm ("imull %1, %%eax" : "+a"(a) : "m"(*mem));
353 report(a == 0xa06d39e8, "imul eax, mem");
354
355 *mem = 0x1234567812345678ULL; a = 0x8765432187654321ULL & -1ul;
356 asm ("imulw $51, %1, %%ax" : "+a"(a) : "m"(*mem));
357 report(a == (0x87654321876539e8ULL & -1ul), "imul ax, mem, imm8");
358
359 *mem = 0x1234567812345678ULL;
360 asm ("imull $51, %1, %%eax" : "+a"(a) : "m"(*mem));
361 report(a == 0xa06d39e8, "imul eax, mem, imm8");
362
363 *mem = 0x1234567812345678ULL; a = 0x8765432187654321ULL & -1ul;
364 asm ("imulw $311, %1, %%ax" : "+a"(a) : "m"(*mem));
365 report(a == (0x8765432187650bc8ULL & -1ul), "imul ax, mem, imm");
366
367 *mem = 0x1234567812345678ULL;
368 asm ("imull $311, %1, %%eax" : "+a"(a) : "m"(*mem));
369 report(a == 0x1d950bc8, "imul eax, mem, imm");
370
371 #ifdef __x86_64__
372 *mem = 51; a = 0x1234567812345678UL;
373 asm ("imulq %1, %%rax" : "+a"(a) : "m"(*mem));
374 report(a == 0xA06D39EBA06D39E8UL, "imul rax, mem");
375
376 *mem = 0x1234567812345678UL;
377 asm ("imulq $51, %1, %%rax" : "+a"(a) : "m"(*mem));
378 report(a == 0xA06D39EBA06D39E8UL, "imul rax, mem, imm8");
379
380 *mem = 0x1234567812345678UL;
381 asm ("imulq $311, %1, %%rax" : "+a"(a) : "m"(*mem));
382 report(a == 0x1D950BDE1D950BC8L, "imul rax, mem, imm");
383 #endif
384 }
385 typedef unsigned __attribute__((vector_size(16))) sse128;
386
sseeq(uint32_t * v1,uint32_t * v2)387 static bool sseeq(uint32_t *v1, uint32_t *v2)
388 {
389 bool ok = true;
390 int i;
391
392 for (i = 0; i < 4; ++i)
393 ok &= v1[i] == v2[i];
394
395 return ok;
396 }
397
test_sse(uint32_t * mem)398 static __attribute__((target("sse2"))) void test_sse(uint32_t *mem)
399 {
400 sse128 vv;
401 uint32_t *v = (uint32_t *)&vv;
402
403 write_cr0(read_cr0() & ~6); /* EM, TS */
404 write_cr4(read_cr4() | 0x200); /* OSFXSR */
405 memset(&vv, 0, sizeof(vv));
406
407 #define TEST_RW_SSE(insn) do { \
408 v[0] = 1; v[1] = 2; v[2] = 3; v[3] = 4; \
409 asm(insn " %1, %0" : "=m"(*mem) : "x"(vv) : "memory"); \
410 report(sseeq(v, mem), insn " (read)"); \
411 mem[0] = 5; mem[1] = 6; mem[2] = 7; mem[3] = 8; \
412 asm(insn " %1, %0" : "=x"(vv) : "m"(*mem) : "memory"); \
413 report(sseeq(v, mem), insn " (write)"); \
414 } while (0)
415
416 TEST_RW_SSE("movdqu");
417 TEST_RW_SSE("movaps");
418 TEST_RW_SSE("movapd");
419 TEST_RW_SSE("movups");
420 TEST_RW_SSE("movupd");
421 #undef TEST_RW_SSE
422 }
423
unaligned_movaps_handler(struct ex_regs * regs)424 static void unaligned_movaps_handler(struct ex_regs *regs)
425 {
426 extern char unaligned_movaps_cont;
427
428 ++exceptions;
429 regs->rip = (ulong)&unaligned_movaps_cont;
430 }
431
cross_movups_handler(struct ex_regs * regs)432 static void cross_movups_handler(struct ex_regs *regs)
433 {
434 extern char cross_movups_cont;
435
436 ++exceptions;
437 regs->rip = (ulong)&cross_movups_cont;
438 }
439
test_sse_exceptions(void * cross_mem)440 static __attribute__((target("sse2"))) void test_sse_exceptions(void *cross_mem)
441 {
442 sse128 vv;
443 uint32_t *v = (uint32_t *)&vv;
444 uint32_t *mem;
445 uint8_t *bytes = cross_mem; // aligned on PAGE_SIZE*2
446 void *page2 = (void *)(&bytes[4096]);
447 struct pte_search search;
448 pteval_t orig_pte;
449 handler old;
450
451 // setup memory for unaligned access
452 mem = (uint32_t *)(&bytes[8]);
453
454 // test unaligned access for movups, movupd and movaps
455 v[0] = 1; v[1] = 2; v[2] = 3; v[3] = 4;
456 mem[0] = 5; mem[1] = 6; mem[2] = 8; mem[3] = 9;
457 asm("movups %1, %0" : "=m"(*mem) : "x"(vv) : "memory");
458 report(sseeq(v, mem), "movups unaligned");
459
460 v[0] = 1; v[1] = 2; v[2] = 3; v[3] = 4;
461 mem[0] = 5; mem[1] = 6; mem[2] = 7; mem[3] = 8;
462 asm("movupd %1, %0" : "=m"(*mem) : "x"(vv) : "memory");
463 report(sseeq(v, mem), "movupd unaligned");
464 exceptions = 0;
465 old = handle_exception(GP_VECTOR, unaligned_movaps_handler);
466 asm("movaps %1, %0\n\t unaligned_movaps_cont:"
467 : "=m"(*mem) : "x"(vv));
468 handle_exception(GP_VECTOR, old);
469 report(exceptions == 1, "unaligned movaps exception");
470
471 // setup memory for cross page access
472 mem = (uint32_t *)(&bytes[4096-8]);
473 v[0] = 1; v[1] = 2; v[2] = 3; v[3] = 4;
474 mem[0] = 5; mem[1] = 6; mem[2] = 7; mem[3] = 8;
475
476 asm("movups %1, %0" : "=m"(*mem) : "x"(vv) : "memory");
477 report(sseeq(v, mem), "movups unaligned crosspage");
478
479 // invalidate second page
480 search = find_pte_level(current_page_table(), page2, 1);
481 orig_pte = *search.pte;
482 install_pte(current_page_table(), 1, page2, 0, NULL);
483 invlpg(page2);
484
485 exceptions = 0;
486 old = handle_exception(PF_VECTOR, cross_movups_handler);
487 asm("movups %1, %0\n\t cross_movups_cont:" : "=m"(*mem) : "x"(vv) :
488 "memory");
489 handle_exception(PF_VECTOR, old);
490 report(exceptions == 1, "movups crosspage exception");
491
492 // restore invalidated page
493 install_pte(current_page_table(), 1, page2, orig_pte, NULL);
494 }
495
test_shld_shrd(u32 * mem)496 static void test_shld_shrd(u32 *mem)
497 {
498 *mem = 0x12345678;
499 asm("shld %2, %1, %0" : "+m"(*mem) : "r"(0xaaaaaaaaU), "c"((u8)3));
500 report(*mem == ((0x12345678 << 3) | 5), "shld (cl)");
501 *mem = 0x12345678;
502 asm("shrd %2, %1, %0" : "+m"(*mem) : "r"(0x55555555U), "c"((u8)3));
503 report(*mem == ((0x12345678 >> 3) | (5u << 29)), "shrd (cl)");
504 }
505
test_smsw_reg(uint64_t * mem)506 static void test_smsw_reg(uint64_t *mem)
507 {
508 unsigned long cr0 = read_cr0();
509 unsigned long rax;
510 const unsigned long in_rax = 0x1234567890abcdefull & -1ul;
511
512 asm(KVM_FEP "smsww %w0\n\t" : "=a" (rax) : "0" (in_rax));
513 report((u16)rax == (u16)cr0 && rax >> 16 == in_rax >> 16,
514 "16-bit smsw reg");
515
516 asm(KVM_FEP "smswl %k0\n\t" : "=a" (rax) : "0" (in_rax));
517 report(rax == (u32)cr0, "32-bit smsw reg");
518
519 #ifdef __x86_64__
520 asm(KVM_FEP "smswq %q0\n\t" : "=a" (rax) : "0" (in_rax));
521 report(rax == cr0, "64-bit smsw reg");
522 #endif
523 }
524
test_nop(uint64_t * mem)525 static void test_nop(uint64_t *mem)
526 {
527 unsigned long rax;
528 const unsigned long in_rax = 0x12345678ul;
529 asm(KVM_FEP "nop\n\t" : "=a" (rax) : "0" (in_rax));
530 report(rax == in_rax, "nop");
531 }
532
test_mov_dr(uint64_t * mem)533 static void test_mov_dr(uint64_t *mem)
534 {
535 unsigned long rax;
536
537 asm(KVM_FEP "mov %0, %%dr6\n\t"
538 KVM_FEP "mov %%dr6, %0\n\t" : "=a" (rax) : "a" (0));
539
540 if (this_cpu_has(X86_FEATURE_RTM))
541 report(rax == (DR6_ACTIVE_LOW & ~DR6_RTM), "mov_dr6");
542 else
543 report(rax == DR6_ACTIVE_LOW, "mov_dr6");
544 }
545
test_illegal_lea(void)546 static void test_illegal_lea(void)
547 {
548 unsigned int vector;
549
550 asm volatile (ASM_TRY_FEP("1f")
551 ".byte 0x8d; .byte 0xc0\n\t"
552 "1:"
553 : : : "memory", "eax");
554
555 vector = exception_vector();
556 report(vector == UD_VECTOR,
557 "Wanted #UD on LEA with /reg, got vector = %u", vector);
558 }
559
test_crosspage_mmio(volatile uint8_t * mem)560 static void test_crosspage_mmio(volatile uint8_t *mem)
561 {
562 volatile uint16_t w, *pw;
563
564 pw = (volatile uint16_t *)&mem[4095];
565 mem[4095] = 0x99;
566 mem[4096] = 0x77;
567 asm volatile("mov %1, %0" : "=r"(w) : "m"(*pw) : "memory");
568 report(w == 0x7799, "cross-page mmio read");
569 asm volatile("mov %1, %0" : "=m"(*pw) : "r"((uint16_t)0x88aa));
570 report(mem[4095] == 0xaa && mem[4096] == 0x88, "cross-page mmio write");
571 }
572
test_string_io_mmio(volatile uint8_t * mem)573 static void test_string_io_mmio(volatile uint8_t *mem)
574 {
575 /* Cross MMIO pages.*/
576 volatile uint8_t *mmio = mem + 4032;
577
578 asm volatile("outw %%ax, %%dx \n\t" : : "a"(0x9999), "d"(TESTDEV_IO_PORT));
579
580 asm volatile ("cld; rep insb" : : "d" (TESTDEV_IO_PORT), "D" (mmio), "c" (1024));
581
582 report(mmio[1023] == 0x99, "string_io_mmio");
583 }
584
585 /* kvm doesn't allow lidt/lgdt from mmio, so the test is disabled */
586 #if 0
587 static void test_lgdt_lidt(volatile uint8_t *mem)
588 {
589 struct descriptor_table_ptr orig, fresh = {};
590
591 sgdt(&orig);
592 *(struct descriptor_table_ptr *)mem = (struct descriptor_table_ptr) {
593 .limit = 0xf234,
594 .base = 0x12345678abcd,
595 };
596 cli();
597 asm volatile("lgdt %0" : : "m"(*(struct descriptor_table_ptr *)mem));
598 sgdt(&fresh);
599 lgdt(&orig);
600 sti();
601 report(orig.limit == fresh.limit && orig.base == fresh.base, "lgdt (long address)");
602
603 sidt(&orig);
604 *(struct descriptor_table_ptr *)mem = (struct descriptor_table_ptr) {
605 .limit = 0x432f,
606 .base = 0xdbca87654321,
607 };
608 cli();
609 asm volatile("lidt %0" : : "m"(*(struct descriptor_table_ptr *)mem));
610 sidt(&fresh);
611 lidt(&orig);
612 sti();
613 report(orig.limit == fresh.limit && orig.base == fresh.base, "lidt (long address)");
614 }
615 #endif
616
617 /* Broken emulation causes triple fault, which skips the other tests. */
618 #if 0
619 static void test_lldt(volatile uint16_t *mem)
620 {
621 u64 gdt[] = { 0, /* null descriptor */
622 #ifdef __X86_64__
623 0, /* ldt descriptor is 16 bytes in long mode */
624 #endif
625 0x0000f82000000ffffull /* ldt descriptor */
626 };
627 struct descriptor_table_ptr gdt_ptr = { .limit = sizeof(gdt) - 1,
628 .base = (ulong)&gdt };
629 struct descriptor_table_ptr orig_gdt;
630
631 cli();
632 sgdt(&orig_gdt);
633 lgdt(&gdt_ptr);
634 *mem = 0x8;
635 asm volatile("lldt %0" : : "m"(*mem));
636 lgdt(&orig_gdt);
637 sti();
638 report(sldt() == *mem, "lldt");
639 }
640 #endif
641
test_ltr(volatile uint16_t * mem)642 static void test_ltr(volatile uint16_t *mem)
643 {
644 struct descriptor_table_ptr gdt_ptr;
645 uint64_t *gdt, *trp;
646 uint16_t tr = str();
647 uint64_t busy_mask = (uint64_t)1 << 41;
648
649 sgdt(&gdt_ptr);
650 gdt = (uint64_t *)gdt_ptr.base;
651 trp = &gdt[tr >> 3];
652 *trp &= ~busy_mask;
653 *mem = tr;
654 asm volatile("ltr %0" : : "m"(*mem) : "memory");
655 report(str() == tr && (*trp & busy_mask), "ltr");
656 }
657
test_mov(void * mem)658 static void test_mov(void *mem)
659 {
660 unsigned long t1, t2;
661
662 // test mov reg, r/m and mov r/m, reg
663 t1 = 0x123456789abcdefull & -1ul;
664 asm volatile("mov %[t1], (%[mem]) \n\t"
665 "mov (%[mem]), %[t2]"
666 : [t2]"=r"(t2)
667 : [t1]"r"(t1), [mem]"r"(mem)
668 : "memory");
669 report(t2 == (0x123456789abcdefull & -1ul), "mov reg, r/m (1)");
670 }
671
test_simplealu(u32 * mem)672 static void test_simplealu(u32 *mem)
673 {
674 *mem = 0x1234;
675 asm("or %1, %0" : "+m"(*mem) : "r"(0x8001));
676 report(*mem == 0x9235, "or");
677 asm("add %1, %0" : "+m"(*mem) : "r"(2));
678 report(*mem == 0x9237, "add");
679 asm("xor %1, %0" : "+m"(*mem) : "r"(0x1111));
680 report(*mem == 0x8326, "xor");
681 asm("sub %1, %0" : "+m"(*mem) : "r"(0x26));
682 report(*mem == 0x8300, "sub");
683 asm("clc; adc %1, %0" : "+m"(*mem) : "r"(0x100));
684 report(*mem == 0x8400, "adc(0)");
685 asm("stc; adc %1, %0" : "+m"(*mem) : "r"(0x100));
686 report(*mem == 0x8501, "adc(0)");
687 asm("clc; sbb %1, %0" : "+m"(*mem) : "r"(0));
688 report(*mem == 0x8501, "sbb(0)");
689 asm("stc; sbb %1, %0" : "+m"(*mem) : "r"(0));
690 report(*mem == 0x8500, "sbb(1)");
691 asm("and %1, %0" : "+m"(*mem) : "r"(0xfe77));
692 report(*mem == 0x8400, "and");
693 asm("test %1, %0" : "+m"(*mem) : "r"(0xf000));
694 report(*mem == 0x8400, "test");
695 }
696
test_illegal_movbe(void)697 static void test_illegal_movbe(void)
698 {
699 unsigned int vector;
700
701 if (!this_cpu_has(X86_FEATURE_MOVBE)) {
702 report_skip("MOVBE unsupported by CPU");
703 return;
704 }
705
706 asm volatile(ASM_TRY("1f")
707 ".byte 0x0f; .byte 0x38; .byte 0xf0; .byte 0xc0;\n\t"
708 "1:"
709 : : : "memory", "rax");
710
711 vector = exception_vector();
712 report(vector == UD_VECTOR,
713 "Wanted #UD on MOVBE with /reg, got vector = %u", vector);
714 }
715
716 #ifdef __x86_64__
717 #define RIP_RELATIVE "(%%rip)"
718 #else
719 #define RIP_RELATIVE ""
720 #endif
721
handle_db(struct ex_regs * regs)722 static void handle_db(struct ex_regs *regs)
723 {
724 ++exceptions;
725 regs->rflags |= X86_EFLAGS_RF;
726 }
727
test_mov_pop_ss_code_db(void)728 static void test_mov_pop_ss_code_db(void)
729 {
730 handler old_db_handler = handle_exception(DB_VECTOR, handle_db);
731 bool fep_available = is_fep_available();
732 /* On Intel, code #DBs are inhibited when MOV/POP SS blocking is active. */
733 int nr_expected = is_intel() ? 0 : 1;
734
735 write_dr7(DR7_FIXED_1 |
736 DR7_GLOBAL_ENABLE_DRx(0) |
737 DR7_EXECUTE_DRx(0) |
738 DR7_LEN_1_DRx(0));
739
740 #define MOV_POP_SS_DB(desc, fep1, fep2, insn, store_ss, load_ss) \
741 ({ \
742 unsigned long r; \
743 \
744 exceptions = 0; \
745 asm volatile("lea 1f " RIP_RELATIVE ", %0\n\t" \
746 "mov %0, %%dr0\n\t" \
747 store_ss \
748 fep1 load_ss \
749 fep2 "1: xor %0, %0\n\t" \
750 "2:" \
751 : "=r" (r) \
752 : \
753 : "memory"); \
754 report(exceptions == nr_expected && !r, \
755 desc ": #DB %s after " insn " SS", \
756 nr_expected ? "occurred" : "suppressed"); \
757 })
758
759 #define MOV_SS_DB(desc, fep1, fep2) \
760 MOV_POP_SS_DB(desc, fep1, fep2, "MOV", \
761 "mov %%ss, %0\n\t", "mov %0, %%ss\n\t")
762
763 MOV_SS_DB("no fep", "", "");
764 if (fep_available) {
765 MOV_SS_DB("fep MOV-SS", KVM_FEP, "");
766 MOV_SS_DB("fep XOR", "", KVM_FEP);
767 MOV_SS_DB("fep MOV-SS/fep XOR", KVM_FEP, KVM_FEP);
768 }
769
770 /* PUSH/POP SS are invalid in 64-bit mode. */
771 #ifndef __x86_64__
772 #define POP_SS_DB(desc, fep1, fep2) \
773 MOV_POP_SS_DB(desc, fep1, fep2, "POP", \
774 "push %%ss\n\t", "pop %%ss\n\t")
775
776 POP_SS_DB("no fep", "", "");
777 if (fep_available) {
778 POP_SS_DB("fep POP-SS", KVM_FEP, "");
779 POP_SS_DB("fep XOR", "", KVM_FEP);
780 POP_SS_DB("fep POP-SS/fep XOR", KVM_FEP, KVM_FEP);
781 }
782 #endif
783
784 write_dr7(DR7_FIXED_1);
785
786 handle_exception(DB_VECTOR, old_db_handler);
787 }
788
main(void)789 int main(void)
790 {
791 void *mem;
792 void *cross_mem;
793
794 if (!is_fep_available())
795 report_skip("Skipping tests the require forced emulation, "
796 "use kvm.force_emulation_prefix=1 to enable");
797
798 setup_vm();
799
800 mem = alloc_vpages(2);
801 install_page((void *)read_cr3(), IORAM_BASE_PHYS, mem);
802 // install the page twice to test cross-page mmio
803 install_page((void *)read_cr3(), IORAM_BASE_PHYS, mem + 4096);
804 cross_mem = vmap(virt_to_phys(alloc_pages(2)), 2 * PAGE_SIZE);
805
806 test_mov(mem);
807 test_simplealu(mem);
808 test_cmps(mem);
809 test_scas(mem);
810 test_smsw(mem);
811 test_lmsw();
812 test_stringio();
813 test_incdecnotneg(mem);
814 test_btc(mem);
815 test_bsfbsr(mem);
816 test_imul(mem);
817 test_sse(mem);
818 test_sse_exceptions(cross_mem);
819 test_shld_shrd(mem);
820 //test_lgdt_lidt(mem);
821 //test_lldt(mem);
822 test_ltr(mem);
823
824 if (is_fep_available()) {
825 test_smsw_reg(mem);
826 test_nop(mem);
827 test_mov_dr(mem);
828 test_illegal_lea();
829 }
830
831 test_crosspage_mmio(mem);
832
833 test_string_io_mmio(mem);
834 test_illegal_movbe();
835 test_mov_pop_ss_code_db();
836
837 #ifdef __x86_64__
838 test_emulator_64(mem);
839 #endif
840 return report_summary();
841 }
842