xref: /kvm-unit-tests/x86/emulator.c (revision c604fa931a1cb70c3649ac1b7223178fc79eab6a)
1 #include <asm/debugreg.h>
2 
3 #include "ioram.h"
4 #include "vm.h"
5 #include "libcflat.h"
6 #include "desc.h"
7 #include "types.h"
8 #include "processor.h"
9 #include "vmalloc.h"
10 #include "alloc_page.h"
11 #include "usermode.h"
12 
13 #define TESTDEV_IO_PORT 0xe0
14 
15 #define MAGIC_NUM 0xdeadbeefdeadbeefUL
16 #define GS_BASE 0x400000
17 
18 static int exceptions;
19 
20 /* Forced emulation prefix, used to invoke the emulator unconditionally.  */
21 #define KVM_FEP "ud2; .byte 'k', 'v', 'm';"
22 #define KVM_FEP_LENGTH 5
23 static int fep_available = 1;
24 
25 struct regs {
26 	u64 rax, rbx, rcx, rdx;
27 	u64 rsi, rdi, rsp, rbp;
28 	u64 r8, r9, r10, r11;
29 	u64 r12, r13, r14, r15;
30 	u64 rip, rflags;
31 };
32 struct regs inregs, outregs, save;
33 
34 struct insn_desc {
35 	u64 ptr;
36 	size_t len;
37 };
38 
39 static char st1[] = "abcdefghijklmnop";
40 
41 static void test_stringio(void)
42 {
43 	unsigned char r = 0;
44 	asm volatile("cld \n\t"
45 		     "movw %0, %%dx \n\t"
46 		     "rep outsb \n\t"
47 		     : : "i"((short)TESTDEV_IO_PORT),
48 		       "S"(st1), "c"(sizeof(st1) - 1));
49 	asm volatile("inb %1, %0\n\t" : "=a"(r) : "i"((short)TESTDEV_IO_PORT));
50 	report(r == st1[sizeof(st1) - 2], "outsb up"); /* last char */
51 
52 	asm volatile("std \n\t"
53 		     "movw %0, %%dx \n\t"
54 		     "rep outsb \n\t"
55 		     : : "i"((short)TESTDEV_IO_PORT),
56 		       "S"(st1 + sizeof(st1) - 2), "c"(sizeof(st1) - 1));
57 	asm volatile("cld \n\t" : : );
58 	asm volatile("in %1, %0\n\t" : "=a"(r) : "i"((short)TESTDEV_IO_PORT));
59 	report(r == st1[0], "outsb down");
60 }
61 
62 static void test_cmps_one(unsigned char *m1, unsigned char *m3)
63 {
64 	void *rsi, *rdi;
65 	long rcx, tmp;
66 
67 	rsi = m1; rdi = m3; rcx = 30;
68 	asm volatile("xor %[tmp], %[tmp] \n\t"
69 		     "repe cmpsb"
70 		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
71 		     : : "cc");
72 	report(rcx == 0 && rsi == m1 + 30 && rdi == m3 + 30, "repe/cmpsb (1)");
73 
74 	rsi = m1; rdi = m3; rcx = 30;
75 	asm volatile("or $1, %[tmp]\n\t" // clear ZF
76 		     "repe cmpsb"
77 		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
78 		     : : "cc");
79 	report(rcx == 0 && rsi == m1 + 30 && rdi == m3 + 30,
80 	       "repe cmpsb (1.zf)");
81 
82 	rsi = m1; rdi = m3; rcx = 15;
83 	asm volatile("xor %[tmp], %[tmp] \n\t"
84 		     "repe cmpsw"
85 		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
86 		     : : "cc");
87 	report(rcx == 0 && rsi == m1 + 30 && rdi == m3 + 30, "repe cmpsw (1)");
88 
89 	rsi = m1; rdi = m3; rcx = 7;
90 	asm volatile("xor %[tmp], %[tmp] \n\t"
91 		     "repe cmpsl"
92 		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
93 		     : : "cc");
94 	report(rcx == 0 && rsi == m1 + 28 && rdi == m3 + 28, "repe cmpll (1)");
95 
96 	rsi = m1; rdi = m3; rcx = 4;
97 	asm volatile("xor %[tmp], %[tmp] \n\t"
98 		     "repe cmpsq"
99 		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
100 		     : : "cc");
101 	report(rcx == 0 && rsi == m1 + 32 && rdi == m3 + 32, "repe cmpsq (1)");
102 
103 	rsi = m1; rdi = m3; rcx = 130;
104 	asm volatile("xor %[tmp], %[tmp] \n\t"
105 		     "repe cmpsb"
106 		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
107 		     : : "cc");
108 	report(rcx == 29 && rsi == m1 + 101 && rdi == m3 + 101,
109 	       "repe cmpsb (2)");
110 
111 	rsi = m1; rdi = m3; rcx = 65;
112 	asm volatile("xor %[tmp], %[tmp] \n\t"
113 		     "repe cmpsw"
114 		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
115 		     : : "cc");
116 	report(rcx == 14 && rsi == m1 + 102 && rdi == m3 + 102,
117 	       "repe cmpsw (2)");
118 
119 	rsi = m1; rdi = m3; rcx = 32;
120 	asm volatile("xor %[tmp], %[tmp] \n\t"
121 		     "repe cmpsl"
122 		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
123 		     : : "cc");
124 	report(rcx == 6 && rsi == m1 + 104 && rdi == m3 + 104,
125 	       "repe cmpll (2)");
126 
127 	rsi = m1; rdi = m3; rcx = 16;
128 	asm volatile("xor %[tmp], %[tmp] \n\t"
129 		     "repe cmpsq"
130 		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
131 		     : : "cc");
132 	report(rcx == 3 && rsi == m1 + 104 && rdi == m3 + 104,
133 	       "repe cmpsq (2)");
134 
135 }
136 
137 static void test_cmps(void *mem)
138 {
139 	unsigned char *m1 = mem, *m2 = mem + 1024;
140 	unsigned char m3[1024];
141 
142 	for (int i = 0; i < 100; ++i)
143 		m1[i] = m2[i] = m3[i] = i;
144 	for (int i = 100; i < 200; ++i)
145 		m1[i] = (m3[i] = m2[i] = i) + 1;
146 	test_cmps_one(m1, m3);
147 	test_cmps_one(m1, m2);
148 }
149 
150 static void test_scas(void *mem)
151 {
152     bool z;
153     void *di;
154 
155     *(ulong *)mem = 0x77665544332211;
156 
157     di = mem;
158     asm ("scasb; setz %0" : "=rm"(z), "+D"(di) : "a"(0xff11));
159     report(di == mem + 1 && z, "scasb match");
160 
161     di = mem;
162     asm ("scasb; setz %0" : "=rm"(z), "+D"(di) : "a"(0xff54));
163     report(di == mem + 1 && !z, "scasb mismatch");
164 
165     di = mem;
166     asm ("scasw; setz %0" : "=rm"(z), "+D"(di) : "a"(0xff2211));
167     report(di == mem + 2 && z, "scasw match");
168 
169     di = mem;
170     asm ("scasw; setz %0" : "=rm"(z), "+D"(di) : "a"(0xffdd11));
171     report(di == mem + 2 && !z, "scasw mismatch");
172 
173     di = mem;
174     asm ("scasl; setz %0" : "=rm"(z), "+D"(di) : "a"(0xff44332211ul));
175     report(di == mem + 4 && z, "scasd match");
176 
177     di = mem;
178     asm ("scasl; setz %0" : "=rm"(z), "+D"(di) : "a"(0x45332211));
179     report(di == mem + 4 && !z, "scasd mismatch");
180 
181     di = mem;
182     asm ("scasq; setz %0" : "=rm"(z), "+D"(di) : "a"(0x77665544332211ul));
183     report(di == mem + 8 && z, "scasq match");
184 
185     di = mem;
186     asm ("scasq; setz %0" : "=rm"(z), "+D"(di) : "a"(3));
187     report(di == mem + 8 && !z, "scasq mismatch");
188 }
189 
190 static void test_cr8(void)
191 {
192 	unsigned long src, dst;
193 
194 	dst = 777;
195 	src = 3;
196 	asm volatile("mov %[src], %%cr8; mov %%cr8, %[dst]"
197 		     : [dst]"+r"(dst), [src]"+r"(src));
198 	report(dst == 3 && src == 3, "mov %%cr8");
199 }
200 
201 static void test_push(void *mem)
202 {
203 	unsigned long tmp;
204 	unsigned long *stack_top = mem + 4096;
205 	unsigned long *new_stack_top;
206 	unsigned long memw = 0x123456789abcdeful;
207 
208 	memset(mem, 0x55, (void *)stack_top - mem);
209 
210 	asm volatile("mov %%rsp, %[tmp] \n\t"
211 		     "mov %[stack_top], %%rsp \n\t"
212 		     "pushq $-7 \n\t"
213 		     "pushq %[reg] \n\t"
214 		     "pushq (%[mem]) \n\t"
215 		     "pushq $-7070707 \n\t"
216 		     "mov %%rsp, %[new_stack_top] \n\t"
217 		     "mov %[tmp], %%rsp"
218 		     : [tmp]"=&r"(tmp), [new_stack_top]"=r"(new_stack_top)
219 		     : [stack_top]"r"(stack_top),
220 		       [reg]"r"(-17l), [mem]"r"(&memw)
221 		     : "memory");
222 
223 	report(stack_top[-1] == -7ul, "push $imm8");
224 	report(stack_top[-2] == -17ul, "push %%reg");
225 	report(stack_top[-3] == 0x123456789abcdeful, "push mem");
226 	report(stack_top[-4] == -7070707, "push $imm");
227 }
228 
229 static void test_pop(void *mem)
230 {
231 	unsigned long tmp, tmp3, rsp, rbp;
232 	unsigned long *stack_top = mem + 4096;
233 	unsigned long memw = 0x123456789abcdeful;
234 	static unsigned long tmp2;
235 
236 	memset(mem, 0x55, (void *)stack_top - mem);
237 
238 	asm volatile("pushq %[val] \n\t"
239 		     "popq (%[mem])"
240 		     : : [val]"m"(memw), [mem]"r"(mem) : "memory");
241 	report(*(unsigned long *)mem == memw, "pop mem");
242 
243 	memw = 7 - memw;
244 	asm volatile("mov %%rsp, %[tmp] \n\t"
245 		     "mov %[stack_top], %%rsp \n\t"
246 		     "pushq %[val] \n\t"
247 		     "popq %[tmp2] \n\t"
248 		     "mov %[tmp], %%rsp"
249 		     : [tmp]"=&r"(tmp), [tmp2]"=m"(tmp2)
250 		     : [val]"r"(memw), [stack_top]"r"(stack_top)
251 		     : "memory");
252 	report(tmp2 == memw, "pop mem (2)");
253 
254 	memw = 129443 - memw;
255 	asm volatile("mov %%rsp, %[tmp] \n\t"
256 		     "mov %[stack_top], %%rsp \n\t"
257 		     "pushq %[val] \n\t"
258 		     "popq %[tmp2] \n\t"
259 		     "mov %[tmp], %%rsp"
260 		     : [tmp]"=&r"(tmp), [tmp2]"=r"(tmp2)
261 		     : [val]"r"(memw), [stack_top]"r"(stack_top)
262 		     : "memory");
263 	report(tmp2 == memw, "pop reg");
264 
265 	asm volatile("mov %%rsp, %[tmp] \n\t"
266 		     "mov %[stack_top], %%rsp \n\t"
267 		     "lea 1f(%%rip), %%rax \n\t"
268 		     "push %%rax \n\t"
269 		     "ret \n\t"
270 		     "2: jmp 2b \n\t"
271 		     "1: mov %[tmp], %%rsp"
272 		     : [tmp]"=&r"(tmp) : [stack_top]"r"(stack_top)
273 		     : "memory", "rax");
274 	report_pass("ret");
275 
276 	stack_top[-1] = 0x778899;
277 	asm volatile("mov %[stack_top], %%r8 \n\t"
278 		     "mov %%rsp, %%r9 \n\t"
279 		     "xchg %%rbp, %%r8 \n\t"
280 		     "leave \n\t"
281 		     "xchg %%rsp, %%r9 \n\t"
282 		     "xchg %%rbp, %%r8 \n\t"
283 		     "mov %%r9, %[tmp] \n\t"
284 		     "mov %%r8, %[tmp3]"
285 		     : [tmp]"=&r"(tmp), [tmp3]"=&r"(tmp3) : [stack_top]"r"(stack_top-1)
286 		     : "memory", "r8", "r9");
287 	report(tmp == (ulong)stack_top && tmp3 == 0x778899, "leave");
288 
289 	rbp = 0xaa55aa55bb66bb66ULL;
290 	rsp = (unsigned long)stack_top;
291 	asm volatile("mov %[rsp], %%r8 \n\t"
292 		     "mov %[rbp], %%r9 \n\t"
293 		     "xchg %%rsp, %%r8 \n\t"
294 		     "xchg %%rbp, %%r9 \n\t"
295 		     "enter $0x1238, $0 \n\t"
296 		     "xchg %%rsp, %%r8 \n\t"
297 		     "xchg %%rbp, %%r9 \n\t"
298 		     "xchg %%r8, %[rsp] \n\t"
299 		     "xchg %%r9, %[rbp]"
300 		     : [rsp]"+a"(rsp), [rbp]"+b"(rbp) : : "memory", "r8", "r9");
301 	report(rsp == (unsigned long)stack_top - 8 - 0x1238
302 	       && rbp == (unsigned long)stack_top - 8
303 	       && stack_top[-1] == 0xaa55aa55bb66bb66ULL,
304 	       "enter");
305 }
306 
307 static void test_ljmp(void *mem)
308 {
309     unsigned char *m = mem;
310     volatile int res = 1;
311 
312     *(unsigned long**)m = &&jmpf;
313     asm volatile ("data16 mov %%cs, %0":"=m"(*(m + sizeof(unsigned long))));
314     asm volatile ("rex64 ljmp *%0"::"m"(*m));
315     res = 0;
316 jmpf:
317     report(res, "ljmp");
318 }
319 
320 static void test_incdecnotneg(void *mem)
321 {
322     unsigned long *m = mem, v = 1234;
323     unsigned char *mb = mem, vb = 66;
324 
325     *m = 0;
326 
327     asm volatile ("incl %0":"+m"(*m));
328     report(*m == 1, "incl");
329     asm volatile ("decl %0":"+m"(*m));
330     report(*m == 0, "decl");
331     asm volatile ("incb %0":"+m"(*m));
332     report(*m == 1, "incb");
333     asm volatile ("decb %0":"+m"(*m));
334     report(*m == 0, "decb");
335 
336     asm volatile ("lock incl %0":"+m"(*m));
337     report(*m == 1, "lock incl");
338     asm volatile ("lock decl %0":"+m"(*m));
339     report(*m == 0, "lock decl");
340     asm volatile ("lock incb %0":"+m"(*m));
341     report(*m == 1, "lock incb");
342     asm volatile ("lock decb %0":"+m"(*m));
343     report(*m == 0, "lock decb");
344 
345     *m = v;
346 
347     asm ("lock negq %0" : "+m"(*m)); v = -v;
348     report(*m == v, "lock negl");
349     asm ("lock notq %0" : "+m"(*m)); v = ~v;
350     report(*m == v, "lock notl");
351 
352     *mb = vb;
353 
354     asm ("lock negb %0" : "+m"(*mb)); vb = -vb;
355     report(*mb == vb, "lock negb");
356     asm ("lock notb %0" : "+m"(*mb)); vb = ~vb;
357     report(*mb == vb, "lock notb");
358 }
359 
360 static void test_smsw(uint64_t *h_mem)
361 {
362 	char mem[16];
363 	unsigned short msw, msw_orig, *pmsw;
364 	int i, zero;
365 
366 	msw_orig = read_cr0();
367 
368 	asm("smsw %0" : "=r"(msw));
369 	report(msw == msw_orig, "smsw (1)");
370 
371 	memset(mem, 0, 16);
372 	pmsw = (void *)mem;
373 	asm("smsw %0" : "=m"(pmsw[4]));
374 	zero = 1;
375 	for (i = 0; i < 8; ++i)
376 		if (i != 4 && pmsw[i])
377 			zero = 0;
378 	report(msw == pmsw[4] && zero, "smsw (2)");
379 
380 	/* Trigger exit on smsw */
381 	*h_mem = 0x12345678abcdeful;
382 	asm volatile("smsw %0" : "+m"(*h_mem));
383 	report(msw == (unsigned short)*h_mem &&
384 	       (*h_mem & ~0xfffful) == 0x12345678ab0000ul, "smsw (3)");
385 }
386 
387 static void test_lmsw(void)
388 {
389 	char mem[16];
390 	unsigned short msw, *pmsw;
391 	unsigned long cr0;
392 
393 	cr0 = read_cr0();
394 
395 	msw = cr0 ^ 8;
396 	asm("lmsw %0" : : "r"(msw));
397 	printf("before %lx after %lx\n", cr0, read_cr0());
398 	report((cr0 ^ read_cr0()) == 8, "lmsw (1)");
399 
400 	pmsw = (void *)mem;
401 	*pmsw = cr0;
402 	asm("lmsw %0" : : "m"(*pmsw));
403 	printf("before %lx after %lx\n", cr0, read_cr0());
404 	report(cr0 == read_cr0(), "lmsw (2)");
405 
406 	/* lmsw can't clear cr0.pe */
407 	msw = (cr0 & ~1ul) ^ 4;  /* change EM to force trap */
408 	asm("lmsw %0" : : "r"(msw));
409 	report((cr0 ^ read_cr0()) == 4 && (cr0 & 1), "lmsw (3)");
410 
411 	/* back to normal */
412 	msw = cr0;
413 	asm("lmsw %0" : : "r"(msw));
414 }
415 
416 static void test_xchg(void *mem)
417 {
418 	unsigned long *memq = mem;
419 	unsigned long rax;
420 
421 	asm volatile("mov $0x123456789abcdef, %%rax\n\t"
422 		     "mov %%rax, (%[memq])\n\t"
423 		     "mov $0xfedcba9876543210, %%rax\n\t"
424 		     "xchg %%al, (%[memq])\n\t"
425 		     "mov %%rax, %[rax]\n\t"
426 		     : [rax]"=r"(rax)
427 		     : [memq]"r"(memq)
428 		     : "memory", "rax");
429 	report(rax == 0xfedcba98765432ef && *memq == 0x123456789abcd10,
430 	       "xchg reg, r/m (1)");
431 
432 	asm volatile("mov $0x123456789abcdef, %%rax\n\t"
433 		     "mov %%rax, (%[memq])\n\t"
434 		     "mov $0xfedcba9876543210, %%rax\n\t"
435 		     "xchg %%ax, (%[memq])\n\t"
436 		     "mov %%rax, %[rax]\n\t"
437 		     : [rax]"=r"(rax)
438 		     : [memq]"r"(memq)
439 		     : "memory", "rax");
440 	report(rax == 0xfedcba987654cdef && *memq == 0x123456789ab3210,
441 	       "xchg reg, r/m (2)");
442 
443 	asm volatile("mov $0x123456789abcdef, %%rax\n\t"
444 		     "mov %%rax, (%[memq])\n\t"
445 		     "mov $0xfedcba9876543210, %%rax\n\t"
446 		     "xchg %%eax, (%[memq])\n\t"
447 		     "mov %%rax, %[rax]\n\t"
448 		     : [rax]"=r"(rax)
449 		     : [memq]"r"(memq)
450 		     : "memory", "rax");
451 	report(rax == 0x89abcdef && *memq == 0x123456776543210,
452 	       "xchg reg, r/m (3)");
453 
454 	asm volatile("mov $0x123456789abcdef, %%rax\n\t"
455 		     "mov %%rax, (%[memq])\n\t"
456 		     "mov $0xfedcba9876543210, %%rax\n\t"
457 		     "xchg %%rax, (%[memq])\n\t"
458 		     "mov %%rax, %[rax]\n\t"
459 		     : [rax]"=r"(rax)
460 		     : [memq]"r"(memq)
461 		     : "memory", "rax");
462 	report(rax == 0x123456789abcdef && *memq == 0xfedcba9876543210,
463 	       "xchg reg, r/m (4)");
464 }
465 
466 static void test_xadd(void *mem)
467 {
468 	unsigned long *memq = mem;
469 	unsigned long rax;
470 
471 	asm volatile("mov $0x123456789abcdef, %%rax\n\t"
472 		     "mov %%rax, (%[memq])\n\t"
473 		     "mov $0xfedcba9876543210, %%rax\n\t"
474 		     "xadd %%al, (%[memq])\n\t"
475 		     "mov %%rax, %[rax]\n\t"
476 		     : [rax]"=r"(rax)
477 		     : [memq]"r"(memq)
478 		     : "memory", "rax");
479 	report(rax == 0xfedcba98765432ef && *memq == 0x123456789abcdff,
480 	       "xadd reg, r/m (1)");
481 
482 	asm volatile("mov $0x123456789abcdef, %%rax\n\t"
483 		     "mov %%rax, (%[memq])\n\t"
484 		     "mov $0xfedcba9876543210, %%rax\n\t"
485 		     "xadd %%ax, (%[memq])\n\t"
486 		     "mov %%rax, %[rax]\n\t"
487 		     : [rax]"=r"(rax)
488 		     : [memq]"r"(memq)
489 		     : "memory", "rax");
490 	report(rax == 0xfedcba987654cdef && *memq == 0x123456789abffff,
491 	       "xadd reg, r/m (2)");
492 
493 	asm volatile("mov $0x123456789abcdef, %%rax\n\t"
494 		     "mov %%rax, (%[memq])\n\t"
495 		     "mov $0xfedcba9876543210, %%rax\n\t"
496 		     "xadd %%eax, (%[memq])\n\t"
497 		     "mov %%rax, %[rax]\n\t"
498 		     : [rax]"=r"(rax)
499 		     : [memq]"r"(memq)
500 		     : "memory", "rax");
501 	report(rax == 0x89abcdef && *memq == 0x1234567ffffffff,
502 	       "xadd reg, r/m (3)");
503 
504 	asm volatile("mov $0x123456789abcdef, %%rax\n\t"
505 		     "mov %%rax, (%[memq])\n\t"
506 		     "mov $0xfedcba9876543210, %%rax\n\t"
507 		     "xadd %%rax, (%[memq])\n\t"
508 		     "mov %%rax, %[rax]\n\t"
509 		     : [rax]"=r"(rax)
510 		     : [memq]"r"(memq)
511 		     : "memory", "rax");
512 	report(rax == 0x123456789abcdef && *memq == 0xffffffffffffffff,
513 	       "xadd reg, r/m (4)");
514 }
515 
516 static void test_btc(void *mem)
517 {
518 	unsigned int *a = mem;
519 
520 	memset(mem, 0, 4 * sizeof(unsigned int));
521 
522 	asm ("btcl $32, %0" :: "m"(a[0]) : "memory");
523 	asm ("btcl $1, %0" :: "m"(a[1]) : "memory");
524 	asm ("btcl %1, %0" :: "m"(a[0]), "r"(66) : "memory");
525 	report(a[0] == 1 && a[1] == 2 && a[2] == 4, "btcl imm8, r/m");
526 
527 	asm ("btcl %1, %0" :: "m"(a[3]), "r"(-1) : "memory");
528 	report(a[0] == 1 && a[1] == 2 && a[2] == 0x80000004, "btcl reg, r/m");
529 
530 	asm ("btcq %1, %0" : : "m"(a[2]), "r"(-1l) : "memory");
531 	report(a[0] == 1 && a[1] == 0x80000002 && a[2] == 0x80000004 && a[3] == 0,
532 	       "btcq reg, r/m");
533 }
534 
535 static void test_bsfbsr(void *mem)
536 {
537 	unsigned long rax, *memq = mem;
538 	unsigned eax, *meml = mem;
539 	unsigned short ax, *memw = mem;
540 	unsigned char z;
541 
542 	*memw = 0xc000;
543 	asm("bsfw %[mem], %[a]" : [a]"=a"(ax) : [mem]"m"(*memw));
544 	report(ax == 14, "bsfw r/m, reg");
545 
546 	*meml = 0xc0000000;
547 	asm("bsfl %[mem], %[a]" : [a]"=a"(eax) : [mem]"m"(*meml));
548 	report(eax == 30, "bsfl r/m, reg");
549 
550 	*memq = 0xc00000000000;
551 	asm("bsfq %[mem], %[a]" : [a]"=a"(rax) : [mem]"m"(*memq));
552 	report(rax == 46, "bsfq r/m, reg");
553 
554 	*memq = 0;
555 	asm("bsfq %[mem], %[a]; setz %[z]"
556 	    : [a]"=a"(rax), [z]"=rm"(z) : [mem]"m"(*memq));
557 	report(z == 1, "bsfq r/m, reg");
558 
559 	*memw = 0xc000;
560 	asm("bsrw %[mem], %[a]" : [a]"=a"(ax) : [mem]"m"(*memw));
561 	report(ax == 15, "bsrw r/m, reg");
562 
563 	*meml = 0xc0000000;
564 	asm("bsrl %[mem], %[a]" : [a]"=a"(eax) : [mem]"m"(*meml));
565 	report(eax == 31, "bsrl r/m, reg");
566 
567 	*memq = 0xc00000000000;
568 	asm("bsrq %[mem], %[a]" : [a]"=a"(rax) : [mem]"m"(*memq));
569 	report(rax == 47, "bsrq r/m, reg");
570 
571 	*memq = 0;
572 	asm("bsrq %[mem], %[a]; setz %[z]"
573 	    : [a]"=a"(rax), [z]"=rm"(z) : [mem]"m"(*memq));
574 	report(z == 1, "bsrq r/m, reg");
575 }
576 
577 static void test_imul(ulong *mem)
578 {
579     ulong a;
580 
581     *mem = 51; a = 0x1234567812345678UL;
582     asm ("imulw %1, %%ax" : "+a"(a) : "m"(*mem));
583     report(a == 0x12345678123439e8, "imul ax, mem");
584 
585     *mem = 51; a = 0x1234567812345678UL;
586     asm ("imull %1, %%eax" : "+a"(a) : "m"(*mem));
587     report(a == 0xa06d39e8, "imul eax, mem");
588 
589     *mem = 51; a = 0x1234567812345678UL;
590     asm ("imulq %1, %%rax" : "+a"(a) : "m"(*mem));
591     report(a == 0xA06D39EBA06D39E8UL, "imul rax, mem");
592 
593     *mem  = 0x1234567812345678UL; a = 0x8765432187654321L;
594     asm ("imulw $51, %1, %%ax" : "+a"(a) : "m"(*mem));
595     report(a == 0x87654321876539e8, "imul ax, mem, imm8");
596 
597     *mem = 0x1234567812345678UL;
598     asm ("imull $51, %1, %%eax" : "+a"(a) : "m"(*mem));
599     report(a == 0xa06d39e8, "imul eax, mem, imm8");
600 
601     *mem = 0x1234567812345678UL;
602     asm ("imulq $51, %1, %%rax" : "+a"(a) : "m"(*mem));
603     report(a == 0xA06D39EBA06D39E8UL, "imul rax, mem, imm8");
604 
605     *mem  = 0x1234567812345678UL; a = 0x8765432187654321L;
606     asm ("imulw $311, %1, %%ax" : "+a"(a) : "m"(*mem));
607     report(a == 0x8765432187650bc8, "imul ax, mem, imm");
608 
609     *mem = 0x1234567812345678UL;
610     asm ("imull $311, %1, %%eax" : "+a"(a) : "m"(*mem));
611     report(a == 0x1d950bc8, "imul eax, mem, imm");
612 
613     *mem = 0x1234567812345678UL;
614     asm ("imulq $311, %1, %%rax" : "+a"(a) : "m"(*mem));
615     report(a == 0x1D950BDE1D950BC8L, "imul rax, mem, imm");
616 }
617 
618 static void test_muldiv(long *mem)
619 {
620     long a, d, aa, dd;
621     u8 ex = 1;
622 
623     *mem = 0; a = 1; d = 2;
624     asm (ASM_TRY("1f") "divq %3; movb $0, %2; 1:"
625 	 : "+a"(a), "+d"(d), "+q"(ex) : "m"(*mem));
626     report(a == 1 && d == 2 && ex, "divq (fault)");
627 
628     *mem = 987654321098765UL; a = 123456789012345UL; d = 123456789012345UL;
629     asm (ASM_TRY("1f") "divq %3; movb $0, %2; 1:"
630 	 : "+a"(a), "+d"(d), "+q"(ex) : "m"(*mem));
631     report(a == 0x1ffffffb1b963b33ul && d == 0x273ba4384ede2ul && !ex,
632            "divq (1)");
633     aa = 0x1111111111111111; dd = 0x2222222222222222;
634     *mem = 0x3333333333333333; a = aa; d = dd;
635     asm("mulb %2" : "+a"(a), "+d"(d) : "m"(*mem));
636     report(a == 0x1111111111110363 && d == dd, "mulb mem");
637     *mem = 0x3333333333333333; a = aa; d = dd;
638     asm("mulw %2" : "+a"(a), "+d"(d) : "m"(*mem));
639     report(a == 0x111111111111c963 && d == 0x2222222222220369, "mulw mem");
640     *mem = 0x3333333333333333; a = aa; d = dd;
641     asm("mull %2" : "+a"(a), "+d"(d) : "m"(*mem));
642     report(a == 0x962fc963 && d == 0x369d036, "mull mem");
643     *mem = 0x3333333333333333; a = aa; d = dd;
644     asm("mulq %2" : "+a"(a), "+d"(d) : "m"(*mem));
645     report(a == 0x2fc962fc962fc963 && d == 0x369d0369d0369d0, "mulq mem");
646 }
647 
648 typedef unsigned __attribute__((vector_size(16))) sse128;
649 
650 static bool sseeq(uint32_t *v1, uint32_t *v2)
651 {
652     bool ok = true;
653     int i;
654 
655     for (i = 0; i < 4; ++i) {
656 	ok &= v1[i] == v2[i];
657     }
658 
659     return ok;
660 }
661 
662 static __attribute__((target("sse2"))) void test_sse(uint32_t *mem)
663 {
664 	sse128 vv;
665 	uint32_t *v = (uint32_t *)&vv;
666 
667 	write_cr0(read_cr0() & ~6); /* EM, TS */
668 	write_cr4(read_cr4() | 0x200); /* OSFXSR */
669 	memset(&vv, 0, sizeof(vv));
670 
671 #define TEST_RW_SSE(insn) do { \
672 		v[0] = 1; v[1] = 2; v[2] = 3; v[3] = 4; \
673 		asm(insn " %1, %0" : "=m"(*mem) : "x"(vv) : "memory"); \
674 		report(sseeq(v, mem), insn " (read)"); \
675 		mem[0] = 5; mem[1] = 6; mem[2] = 7; mem[3] = 8; \
676 		asm(insn " %1, %0" : "=x"(vv) : "m"(*mem) : "memory"); \
677 		report(sseeq(v, mem), insn " (write)"); \
678 } while (0)
679 
680 	TEST_RW_SSE("movdqu");
681 	TEST_RW_SSE("movaps");
682 	TEST_RW_SSE("movapd");
683 	TEST_RW_SSE("movups");
684 	TEST_RW_SSE("movupd");
685 #undef TEST_RW_SSE
686 }
687 
688 static void unaligned_movaps_handler(struct ex_regs *regs)
689 {
690 	extern char unaligned_movaps_cont;
691 
692 	++exceptions;
693 	regs->rip = (ulong)&unaligned_movaps_cont;
694 }
695 
696 static void cross_movups_handler(struct ex_regs *regs)
697 {
698 	extern char cross_movups_cont;
699 
700 	++exceptions;
701 	regs->rip = (ulong)&cross_movups_cont;
702 }
703 
704 static __attribute__((target("sse2"))) void test_sse_exceptions(void *cross_mem)
705 {
706 	sse128 vv;
707 	uint32_t *v = (uint32_t *)&vv;
708 	uint32_t *mem;
709 	uint8_t *bytes = cross_mem; // aligned on PAGE_SIZE*2
710 	void *page2 = (void *)(&bytes[4096]);
711 	struct pte_search search;
712 	pteval_t orig_pte;
713 
714 	// setup memory for unaligned access
715 	mem = (uint32_t *)(&bytes[8]);
716 
717 	// test unaligned access for movups, movupd and movaps
718 	v[0] = 1; v[1] = 2; v[2] = 3; v[3] = 4;
719 	mem[0] = 5; mem[1] = 6; mem[2] = 8; mem[3] = 9;
720 	asm("movups %1, %0" : "=m"(*mem) : "x"(vv) : "memory");
721 	report(sseeq(v, mem), "movups unaligned");
722 
723 	v[0] = 1; v[1] = 2; v[2] = 3; v[3] = 4;
724 	mem[0] = 5; mem[1] = 6; mem[2] = 7; mem[3] = 8;
725 	asm("movupd %1, %0" : "=m"(*mem) : "x"(vv) : "memory");
726 	report(sseeq(v, mem), "movupd unaligned");
727 	exceptions = 0;
728 	handle_exception(GP_VECTOR, unaligned_movaps_handler);
729 	asm("movaps %1, %0\n\t unaligned_movaps_cont:"
730 			: "=m"(*mem) : "x"(vv));
731 	handle_exception(GP_VECTOR, 0);
732 	report(exceptions == 1, "unaligned movaps exception");
733 
734 	// setup memory for cross page access
735 	mem = (uint32_t *)(&bytes[4096-8]);
736 	v[0] = 1; v[1] = 2; v[2] = 3; v[3] = 4;
737 	mem[0] = 5; mem[1] = 6; mem[2] = 7; mem[3] = 8;
738 
739 	asm("movups %1, %0" : "=m"(*mem) : "x"(vv) : "memory");
740 	report(sseeq(v, mem), "movups unaligned crosspage");
741 
742 	// invalidate second page
743 	search = find_pte_level(current_page_table(), page2, 1);
744 	orig_pte = *search.pte;
745 	install_pte(current_page_table(), 1, page2, 0, NULL);
746 	invlpg(page2);
747 
748 	exceptions = 0;
749 	handle_exception(PF_VECTOR, cross_movups_handler);
750 	asm("movups %1, %0\n\t cross_movups_cont:" : "=m"(*mem) : "x"(vv) :
751 			"memory");
752 	handle_exception(PF_VECTOR, 0);
753 	report(exceptions == 1, "movups crosspage exception");
754 
755 	// restore invalidated page
756 	install_pte(current_page_table(), 1, page2, orig_pte, NULL);
757 }
758 
759 static void test_mmx(uint64_t *mem)
760 {
761     uint64_t v;
762 
763     write_cr0(read_cr0() & ~6); /* EM, TS */
764     asm volatile("fninit");
765     v = 0x0102030405060708ULL;
766     asm("movq %1, %0" : "=m"(*mem) : "y"(v));
767     report(v == *mem, "movq (mmx, read)");
768     *mem = 0x8070605040302010ull;
769     asm("movq %1, %0" : "=y"(v) : "m"(*mem));
770     report(v == *mem, "movq (mmx, write)");
771 }
772 
773 static void test_rip_relative(unsigned *mem, char *insn_ram)
774 {
775     /* movb $1, mem+2(%rip) */
776     insn_ram[0] = 0xc6;
777     insn_ram[1] = 0x05;
778     *(unsigned *)&insn_ram[2] = 2 + (char *)mem - (insn_ram + 7);
779     insn_ram[6] = 0x01;
780     /* ret */
781     insn_ram[7] = 0xc3;
782 
783     *mem = 0;
784     asm("callq *%1" : "+m"(*mem) : "r"(insn_ram));
785     report(*mem == 0x10000, "movb $imm, 0(%%rip)");
786 }
787 
788 static void test_shld_shrd(u32 *mem)
789 {
790     *mem = 0x12345678;
791     asm("shld %2, %1, %0" : "+m"(*mem) : "r"(0xaaaaaaaaU), "c"((u8)3));
792     report(*mem == ((0x12345678 << 3) | 5), "shld (cl)");
793     *mem = 0x12345678;
794     asm("shrd %2, %1, %0" : "+m"(*mem) : "r"(0x55555555U), "c"((u8)3));
795     report(*mem == ((0x12345678 >> 3) | (5u << 29)), "shrd (cl)");
796 }
797 
798 static void test_cmov(u32 *mem)
799 {
800 	u64 val;
801 	*mem = 0xabcdef12u;
802 	asm ("movq $0x1234567812345678, %%rax\n\t"
803 	     "cmpl %%eax, %%eax\n\t"
804 	     "cmovnel (%[mem]), %%eax\n\t"
805 	     "movq %%rax, %[val]\n\t"
806 	     : [val]"=r"(val) : [mem]"r"(mem) : "%rax", "cc");
807 	report(val == 0x12345678ul, "cmovnel");
808 }
809 
810 static unsigned long rip_advance;
811 
812 static void advance_rip_and_note_exception(struct ex_regs *regs)
813 {
814     ++exceptions;
815     regs->rip += rip_advance;
816 }
817 
818 static void test_mmx_movq_mf(uint64_t *mem)
819 {
820     /* movq %mm0, (%rax) */
821     extern char movq_start, movq_end;
822 
823     uint16_t fcw = 0;  /* all exceptions unmasked */
824     write_cr0(read_cr0() & ~6);  /* TS, EM */
825     exceptions = 0;
826     handle_exception(MF_VECTOR, advance_rip_and_note_exception);
827     asm volatile("fninit; fldcw %0" : : "m"(fcw));
828     asm volatile("fldz; fldz; fdivp"); /* generate exception */
829 
830     rip_advance = &movq_end - &movq_start;
831     asm(KVM_FEP "movq_start: movq %mm0, (%rax); movq_end:");
832     /* exit MMX mode */
833     asm volatile("fnclex; emms");
834     report(exceptions == 1, "movq mmx generates #MF");
835     handle_exception(MF_VECTOR, 0);
836 }
837 
838 static void test_jmp_noncanonical(uint64_t *mem)
839 {
840 	extern char nc_jmp_start, nc_jmp_end;
841 
842 	*mem = 0x1111111111111111ul;
843 
844 	exceptions = 0;
845 	rip_advance = &nc_jmp_end - &nc_jmp_start;
846 	handle_exception(GP_VECTOR, advance_rip_and_note_exception);
847 	asm volatile ("nc_jmp_start: jmp *%0; nc_jmp_end:" : : "m"(*mem));
848 	report(exceptions == 1, "jump to non-canonical address");
849 	handle_exception(GP_VECTOR, 0);
850 }
851 
852 static void test_movabs(uint64_t *mem)
853 {
854     /* mov $0x9090909090909090, %rcx */
855     unsigned long rcx;
856     asm(KVM_FEP "mov $0x9090909090909090, %0" : "=c" (rcx) : "0" (0));
857     report(rcx == 0x9090909090909090, "64-bit mov imm2");
858 }
859 
860 static void test_smsw_reg(uint64_t *mem)
861 {
862 	unsigned long cr0 = read_cr0();
863 	unsigned long rax;
864 	const unsigned long in_rax = 0x1234567890abcdeful;
865 
866 	asm(KVM_FEP "smsww %w0\n\t" : "=a" (rax) : "0" (in_rax));
867 	report((u16)rax == (u16)cr0 && rax >> 16 == in_rax >> 16,
868 	       "16-bit smsw reg");
869 
870 	asm(KVM_FEP "smswl %k0\n\t" : "=a" (rax) : "0" (in_rax));
871 	report(rax == (u32)cr0, "32-bit smsw reg");
872 
873 	asm(KVM_FEP "smswq %q0\n\t" : "=a" (rax) : "0" (in_rax));
874 	report(rax == cr0, "64-bit smsw reg");
875 }
876 
877 static void test_nop(uint64_t *mem)
878 {
879 	unsigned long rax;
880 	const unsigned long in_rax = 0x1234567890abcdeful;
881 	asm(KVM_FEP "nop\n\t" : "=a" (rax) : "0" (in_rax));
882 	report(rax == in_rax, "nop");
883 }
884 
885 static void test_mov_dr(uint64_t *mem)
886 {
887 	unsigned long rax;
888 
889 	asm(KVM_FEP "movq %0, %%dr6\n\t"
890 	    KVM_FEP "movq %%dr6, %0\n\t" : "=a" (rax) : "a" (0));
891 
892 	if (this_cpu_has(X86_FEATURE_RTM))
893 		report(rax == (DR6_ACTIVE_LOW & ~DR6_RTM), "mov_dr6");
894 	else
895 		report(rax == DR6_ACTIVE_LOW, "mov_dr6");
896 }
897 
898 static void test_push16(uint64_t *mem)
899 {
900 	uint64_t rsp1, rsp2;
901 	uint16_t r;
902 
903 	asm volatile (	"movq %%rsp, %[rsp1]\n\t"
904 			"pushw %[v]\n\t"
905 			"popw %[r]\n\t"
906 			"movq %%rsp, %[rsp2]\n\t"
907 			"movq %[rsp1], %%rsp\n\t" :
908 			[rsp1]"=r"(rsp1), [rsp2]"=r"(rsp2), [r]"=r"(r)
909 			: [v]"m"(*mem) : "memory");
910 	report(rsp1 == rsp2, "push16");
911 }
912 
913 static void test_crosspage_mmio(volatile uint8_t *mem)
914 {
915     volatile uint16_t w, *pw;
916 
917     pw = (volatile uint16_t *)&mem[4095];
918     mem[4095] = 0x99;
919     mem[4096] = 0x77;
920     asm volatile("mov %1, %0" : "=r"(w) : "m"(*pw) : "memory");
921     report(w == 0x7799, "cross-page mmio read");
922     asm volatile("mov %1, %0" : "=m"(*pw) : "r"((uint16_t)0x88aa));
923     report(mem[4095] == 0xaa && mem[4096] == 0x88, "cross-page mmio write");
924 }
925 
926 static void test_string_io_mmio(volatile uint8_t *mem)
927 {
928 	/* Cross MMIO pages.*/
929 	volatile uint8_t *mmio = mem + 4032;
930 
931 	asm volatile("outw %%ax, %%dx  \n\t" : : "a"(0x9999), "d"(TESTDEV_IO_PORT));
932 
933 	asm volatile ("cld; rep insb" : : "d" (TESTDEV_IO_PORT), "D" (mmio), "c" (1024));
934 
935 	report(mmio[1023] == 0x99, "string_io_mmio");
936 }
937 
938 /* kvm doesn't allow lidt/lgdt from mmio, so the test is disabled */
939 #if 0
940 static void test_lgdt_lidt(volatile uint8_t *mem)
941 {
942     struct descriptor_table_ptr orig, fresh = {};
943 
944     sgdt(&orig);
945     *(struct descriptor_table_ptr *)mem = (struct descriptor_table_ptr) {
946 	.limit = 0xf234,
947 	.base = 0x12345678abcd,
948     };
949     cli();
950     asm volatile("lgdt %0" : : "m"(*(struct descriptor_table_ptr *)mem));
951     sgdt(&fresh);
952     lgdt(&orig);
953     sti();
954     report(orig.limit == fresh.limit && orig.base == fresh.base,
955            "lgdt (long address)");
956 
957     sidt(&orig);
958     *(struct descriptor_table_ptr *)mem = (struct descriptor_table_ptr) {
959 	.limit = 0x432f,
960 	.base = 0xdbca87654321,
961     };
962     cli();
963     asm volatile("lidt %0" : : "m"(*(struct descriptor_table_ptr *)mem));
964     sidt(&fresh);
965     lidt(&orig);
966     sti();
967     report(orig.limit == fresh.limit && orig.base == fresh.base,
968            "lidt (long address)");
969 }
970 #endif
971 
972 static void ss_bad_rpl(struct ex_regs *regs)
973 {
974     extern char ss_bad_rpl_cont;
975 
976     ++exceptions;
977     regs->rip = (ulong)&ss_bad_rpl_cont;
978 }
979 
980 static void test_sreg(volatile uint16_t *mem)
981 {
982     u16 ss = read_ss();
983 
984     // check for null segment load
985     *mem = 0;
986     asm volatile("mov %0, %%ss" : : "m"(*mem));
987     report(read_ss() == 0, "mov null, %%ss");
988 
989     // check for exception when ss.rpl != cpl on null segment load
990     exceptions = 0;
991     handle_exception(GP_VECTOR, ss_bad_rpl);
992     *mem = 3;
993     asm volatile("mov %0, %%ss; ss_bad_rpl_cont:" : : "m"(*mem));
994     report(exceptions == 1 && read_ss() == 0,
995            "mov null, %%ss (with ss.rpl != cpl)");
996     handle_exception(GP_VECTOR, 0);
997     write_ss(ss);
998 }
999 
1000 static uint64_t usr_gs_mov(void)
1001 {
1002     static uint64_t dummy = MAGIC_NUM;
1003     uint64_t dummy_ptr = (uint64_t)&dummy;
1004     uint64_t ret;
1005 
1006     dummy_ptr -= GS_BASE;
1007     asm volatile("mov %%gs:(%%rcx), %%rax" : "=a"(ret): "c"(dummy_ptr) :);
1008 
1009     return ret;
1010 }
1011 
1012 static void test_iret(void)
1013 {
1014     uint64_t val;
1015     bool raised_vector;
1016 
1017     /* Update GS base to 4MiB */
1018     wrmsr(MSR_GS_BASE, GS_BASE);
1019 
1020     /*
1021      * Per the SDM, jumping to user mode via `iret`, which is returning to
1022      * outer privilege level, for segment registers (ES, FS, GS, and DS)
1023      * if the check fails, the segment selector becomes null.
1024      *
1025      * In our test case, GS becomes null.
1026      */
1027     val = run_in_user((usermode_func)usr_gs_mov, GP_VECTOR,
1028                       0, 0, 0, 0, &raised_vector);
1029 
1030     report(val == MAGIC_NUM, "Test ret/iret with a nullified segment");
1031 }
1032 
1033 /* Broken emulation causes triple fault, which skips the other tests. */
1034 #if 0
1035 static void test_lldt(volatile uint16_t *mem)
1036 {
1037     u64 gdt[] = { 0, /* null descriptor */
1038 #ifdef __X86_64__
1039 		  0, /* ldt descriptor is 16 bytes in long mode */
1040 #endif
1041 		  0x0000f82000000ffffull /* ldt descriptor */ };
1042     struct descriptor_table_ptr gdt_ptr = { .limit = sizeof(gdt) - 1,
1043 					    .base = (ulong)&gdt };
1044     struct descriptor_table_ptr orig_gdt;
1045 
1046     cli();
1047     sgdt(&orig_gdt);
1048     lgdt(&gdt_ptr);
1049     *mem = 0x8;
1050     asm volatile("lldt %0" : : "m"(*mem));
1051     lgdt(&orig_gdt);
1052     sti();
1053     report(sldt() == *mem, "lldt");
1054 }
1055 #endif
1056 
1057 static void test_ltr(volatile uint16_t *mem)
1058 {
1059     struct descriptor_table_ptr gdt_ptr;
1060     uint64_t *gdt, *trp;
1061     uint16_t tr = str();
1062     uint64_t busy_mask = (uint64_t)1 << 41;
1063 
1064     sgdt(&gdt_ptr);
1065     gdt = (uint64_t *)gdt_ptr.base;
1066     trp = &gdt[tr >> 3];
1067     *trp &= ~busy_mask;
1068     *mem = tr;
1069     asm volatile("ltr %0" : : "m"(*mem) : "memory");
1070     report(str() == tr && (*trp & busy_mask), "ltr");
1071 }
1072 
1073 static void test_simplealu(u32 *mem)
1074 {
1075     *mem = 0x1234;
1076     asm("or %1, %0" : "+m"(*mem) : "r"(0x8001));
1077     report(*mem == 0x9235, "or");
1078     asm("add %1, %0" : "+m"(*mem) : "r"(2));
1079     report(*mem == 0x9237, "add");
1080     asm("xor %1, %0" : "+m"(*mem) : "r"(0x1111));
1081     report(*mem == 0x8326, "xor");
1082     asm("sub %1, %0" : "+m"(*mem) : "r"(0x26));
1083     report(*mem == 0x8300, "sub");
1084     asm("clc; adc %1, %0" : "+m"(*mem) : "r"(0x100));
1085     report(*mem == 0x8400, "adc(0)");
1086     asm("stc; adc %1, %0" : "+m"(*mem) : "r"(0x100));
1087     report(*mem == 0x8501, "adc(0)");
1088     asm("clc; sbb %1, %0" : "+m"(*mem) : "r"(0));
1089     report(*mem == 0x8501, "sbb(0)");
1090     asm("stc; sbb %1, %0" : "+m"(*mem) : "r"(0));
1091     report(*mem == 0x8500, "sbb(1)");
1092     asm("and %1, %0" : "+m"(*mem) : "r"(0xfe77));
1093     report(*mem == 0x8400, "and");
1094     asm("test %1, %0" : "+m"(*mem) : "r"(0xf000));
1095     report(*mem == 0x8400, "test");
1096 }
1097 
1098 static void illegal_movbe_handler(struct ex_regs *regs)
1099 {
1100 	extern char bad_movbe_cont;
1101 
1102 	++exceptions;
1103 	regs->rip = (ulong)&bad_movbe_cont;
1104 }
1105 
1106 static void test_illegal_movbe(void)
1107 {
1108 	if (!this_cpu_has(X86_FEATURE_MOVBE)) {
1109 		report_skip("illegal movbe");
1110 		return;
1111 	}
1112 
1113 	exceptions = 0;
1114 	handle_exception(UD_VECTOR, illegal_movbe_handler);
1115 	asm volatile(".byte 0x0f; .byte 0x38; .byte 0xf0; .byte 0xc0;\n\t"
1116 		     " bad_movbe_cont:" : : : "rax");
1117 	report(exceptions == 1, "illegal movbe");
1118 	handle_exception(UD_VECTOR, 0);
1119 }
1120 
1121 static void record_no_fep(struct ex_regs *regs)
1122 {
1123 	fep_available = 0;
1124 	regs->rip += KVM_FEP_LENGTH;
1125 }
1126 
1127 int main(void)
1128 {
1129 	void *mem;
1130 	void *insn_page;
1131 	void *insn_ram;
1132 	void *cross_mem;
1133 	unsigned long t1, t2;
1134 
1135 	setup_vm();
1136 	handle_exception(UD_VECTOR, record_no_fep);
1137 	asm(KVM_FEP "nop");
1138 	handle_exception(UD_VECTOR, 0);
1139 
1140 	mem = alloc_vpages(2);
1141 	install_page((void *)read_cr3(), IORAM_BASE_PHYS, mem);
1142 	// install the page twice to test cross-page mmio
1143 	install_page((void *)read_cr3(), IORAM_BASE_PHYS, mem + 4096);
1144 	insn_page = alloc_page();
1145 	insn_ram = vmap(virt_to_phys(insn_page), 4096);
1146 	cross_mem = vmap(virt_to_phys(alloc_pages(2)), 2 * PAGE_SIZE);
1147 
1148 	// test mov reg, r/m and mov r/m, reg
1149 	t1 = 0x123456789abcdef;
1150 	asm volatile("mov %[t1], (%[mem]) \n\t"
1151 		     "mov (%[mem]), %[t2]"
1152 		     : [t2]"=r"(t2)
1153 		     : [t1]"r"(t1), [mem]"r"(mem)
1154 		     : "memory");
1155 	report(t2 == 0x123456789abcdef, "mov reg, r/m (1)");
1156 
1157 	test_simplealu(mem);
1158 	test_cmps(mem);
1159 	test_scas(mem);
1160 
1161 	test_push(mem);
1162 	test_pop(mem);
1163 
1164 	test_xchg(mem);
1165 	test_xadd(mem);
1166 
1167 	test_cr8();
1168 
1169 	test_smsw(mem);
1170 	test_lmsw();
1171 	test_ljmp(mem);
1172 	test_stringio();
1173 	test_incdecnotneg(mem);
1174 	test_btc(mem);
1175 	test_bsfbsr(mem);
1176 	test_imul(mem);
1177 	test_muldiv(mem);
1178 	test_sse(mem);
1179 	test_sse_exceptions(cross_mem);
1180 	test_mmx(mem);
1181 	test_rip_relative(mem, insn_ram);
1182 	test_shld_shrd(mem);
1183 	//test_lgdt_lidt(mem);
1184 	test_sreg(mem);
1185 	test_iret();
1186 	//test_lldt(mem);
1187 	test_ltr(mem);
1188 	test_cmov(mem);
1189 
1190 	if (fep_available) {
1191 		test_mmx_movq_mf(mem);
1192 		test_movabs(mem);
1193 		test_smsw_reg(mem);
1194 		test_nop(mem);
1195 		test_mov_dr(mem);
1196 	} else {
1197 		report_skip("skipping register-only tests, "
1198 			    "use kvm.force_emulation_prefix=1 to enable");
1199 	}
1200 
1201 	test_push16(mem);
1202 	test_crosspage_mmio(mem);
1203 
1204 	test_string_io_mmio(mem);
1205 
1206 	test_jmp_noncanonical(mem);
1207 	test_illegal_movbe();
1208 
1209 	return report_summary();
1210 }
1211