xref: /kvm-unit-tests/x86/emulator.c (revision 2c96b77ec9d3b1fcec7525174e23a6240ee05949)
1 #include "ioram.h"
2 #include "vm.h"
3 #include "libcflat.h"
4 #include "desc.h"
5 #include "types.h"
6 #include "processor.h"
7 #include "vmalloc.h"
8 #include "alloc_page.h"
9 #include "usermode.h"
10 
11 #define TESTDEV_IO_PORT 0xe0
12 
13 #define MAGIC_NUM 0xdeadbeefdeadbeefUL
14 #define GS_BASE 0x400000
15 
16 static int exceptions;
17 
18 /* Forced emulation prefix, used to invoke the emulator unconditionally.  */
19 #define KVM_FEP "ud2; .byte 'k', 'v', 'm';"
20 #define KVM_FEP_LENGTH 5
21 static int fep_available = 1;
22 
23 struct regs {
24 	u64 rax, rbx, rcx, rdx;
25 	u64 rsi, rdi, rsp, rbp;
26 	u64 r8, r9, r10, r11;
27 	u64 r12, r13, r14, r15;
28 	u64 rip, rflags;
29 };
30 struct regs inregs, outregs, save;
31 
32 struct insn_desc {
33 	u64 ptr;
34 	size_t len;
35 };
36 
37 static char st1[] = "abcdefghijklmnop";
38 
39 static void test_stringio(void)
40 {
41 	unsigned char r = 0;
42 	asm volatile("cld \n\t"
43 		     "movw %0, %%dx \n\t"
44 		     "rep outsb \n\t"
45 		     : : "i"((short)TESTDEV_IO_PORT),
46 		       "S"(st1), "c"(sizeof(st1) - 1));
47 	asm volatile("inb %1, %0\n\t" : "=a"(r) : "i"((short)TESTDEV_IO_PORT));
48 	report(r == st1[sizeof(st1) - 2], "outsb up"); /* last char */
49 
50 	asm volatile("std \n\t"
51 		     "movw %0, %%dx \n\t"
52 		     "rep outsb \n\t"
53 		     : : "i"((short)TESTDEV_IO_PORT),
54 		       "S"(st1 + sizeof(st1) - 2), "c"(sizeof(st1) - 1));
55 	asm volatile("cld \n\t" : : );
56 	asm volatile("in %1, %0\n\t" : "=a"(r) : "i"((short)TESTDEV_IO_PORT));
57 	report(r == st1[0], "outsb down");
58 }
59 
60 static void test_cmps_one(unsigned char *m1, unsigned char *m3)
61 {
62 	void *rsi, *rdi;
63 	long rcx, tmp;
64 
65 	rsi = m1; rdi = m3; rcx = 30;
66 	asm volatile("xor %[tmp], %[tmp] \n\t"
67 		     "repe cmpsb"
68 		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
69 		     : : "cc");
70 	report(rcx == 0 && rsi == m1 + 30 && rdi == m3 + 30, "repe/cmpsb (1)");
71 
72 	rsi = m1; rdi = m3; rcx = 30;
73 	asm volatile("or $1, %[tmp]\n\t" // clear ZF
74 		     "repe cmpsb"
75 		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
76 		     : : "cc");
77 	report(rcx == 0 && rsi == m1 + 30 && rdi == m3 + 30,
78 	       "repe cmpsb (1.zf)");
79 
80 	rsi = m1; rdi = m3; rcx = 15;
81 	asm volatile("xor %[tmp], %[tmp] \n\t"
82 		     "repe cmpsw"
83 		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
84 		     : : "cc");
85 	report(rcx == 0 && rsi == m1 + 30 && rdi == m3 + 30, "repe cmpsw (1)");
86 
87 	rsi = m1; rdi = m3; rcx = 7;
88 	asm volatile("xor %[tmp], %[tmp] \n\t"
89 		     "repe cmpsl"
90 		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
91 		     : : "cc");
92 	report(rcx == 0 && rsi == m1 + 28 && rdi == m3 + 28, "repe cmpll (1)");
93 
94 	rsi = m1; rdi = m3; rcx = 4;
95 	asm volatile("xor %[tmp], %[tmp] \n\t"
96 		     "repe cmpsq"
97 		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
98 		     : : "cc");
99 	report(rcx == 0 && rsi == m1 + 32 && rdi == m3 + 32, "repe cmpsq (1)");
100 
101 	rsi = m1; rdi = m3; rcx = 130;
102 	asm volatile("xor %[tmp], %[tmp] \n\t"
103 		     "repe cmpsb"
104 		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
105 		     : : "cc");
106 	report(rcx == 29 && rsi == m1 + 101 && rdi == m3 + 101,
107 	       "repe cmpsb (2)");
108 
109 	rsi = m1; rdi = m3; rcx = 65;
110 	asm volatile("xor %[tmp], %[tmp] \n\t"
111 		     "repe cmpsw"
112 		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
113 		     : : "cc");
114 	report(rcx == 14 && rsi == m1 + 102 && rdi == m3 + 102,
115 	       "repe cmpsw (2)");
116 
117 	rsi = m1; rdi = m3; rcx = 32;
118 	asm volatile("xor %[tmp], %[tmp] \n\t"
119 		     "repe cmpsl"
120 		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
121 		     : : "cc");
122 	report(rcx == 6 && rsi == m1 + 104 && rdi == m3 + 104,
123 	       "repe cmpll (2)");
124 
125 	rsi = m1; rdi = m3; rcx = 16;
126 	asm volatile("xor %[tmp], %[tmp] \n\t"
127 		     "repe cmpsq"
128 		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
129 		     : : "cc");
130 	report(rcx == 3 && rsi == m1 + 104 && rdi == m3 + 104,
131 	       "repe cmpsq (2)");
132 
133 }
134 
135 static void test_cmps(void *mem)
136 {
137 	unsigned char *m1 = mem, *m2 = mem + 1024;
138 	unsigned char m3[1024];
139 
140 	for (int i = 0; i < 100; ++i)
141 		m1[i] = m2[i] = m3[i] = i;
142 	for (int i = 100; i < 200; ++i)
143 		m1[i] = (m3[i] = m2[i] = i) + 1;
144 	test_cmps_one(m1, m3);
145 	test_cmps_one(m1, m2);
146 }
147 
148 static void test_scas(void *mem)
149 {
150     bool z;
151     void *di;
152 
153     *(ulong *)mem = 0x77665544332211;
154 
155     di = mem;
156     asm ("scasb; setz %0" : "=rm"(z), "+D"(di) : "a"(0xff11));
157     report(di == mem + 1 && z, "scasb match");
158 
159     di = mem;
160     asm ("scasb; setz %0" : "=rm"(z), "+D"(di) : "a"(0xff54));
161     report(di == mem + 1 && !z, "scasb mismatch");
162 
163     di = mem;
164     asm ("scasw; setz %0" : "=rm"(z), "+D"(di) : "a"(0xff2211));
165     report(di == mem + 2 && z, "scasw match");
166 
167     di = mem;
168     asm ("scasw; setz %0" : "=rm"(z), "+D"(di) : "a"(0xffdd11));
169     report(di == mem + 2 && !z, "scasw mismatch");
170 
171     di = mem;
172     asm ("scasl; setz %0" : "=rm"(z), "+D"(di) : "a"(0xff44332211ul));
173     report(di == mem + 4 && z, "scasd match");
174 
175     di = mem;
176     asm ("scasl; setz %0" : "=rm"(z), "+D"(di) : "a"(0x45332211));
177     report(di == mem + 4 && !z, "scasd mismatch");
178 
179     di = mem;
180     asm ("scasq; setz %0" : "=rm"(z), "+D"(di) : "a"(0x77665544332211ul));
181     report(di == mem + 8 && z, "scasq match");
182 
183     di = mem;
184     asm ("scasq; setz %0" : "=rm"(z), "+D"(di) : "a"(3));
185     report(di == mem + 8 && !z, "scasq mismatch");
186 }
187 
188 static void test_cr8(void)
189 {
190 	unsigned long src, dst;
191 
192 	dst = 777;
193 	src = 3;
194 	asm volatile("mov %[src], %%cr8; mov %%cr8, %[dst]"
195 		     : [dst]"+r"(dst), [src]"+r"(src));
196 	report(dst == 3 && src == 3, "mov %%cr8");
197 }
198 
199 static void test_push(void *mem)
200 {
201 	unsigned long tmp;
202 	unsigned long *stack_top = mem + 4096;
203 	unsigned long *new_stack_top;
204 	unsigned long memw = 0x123456789abcdeful;
205 
206 	memset(mem, 0x55, (void *)stack_top - mem);
207 
208 	asm volatile("mov %%rsp, %[tmp] \n\t"
209 		     "mov %[stack_top], %%rsp \n\t"
210 		     "pushq $-7 \n\t"
211 		     "pushq %[reg] \n\t"
212 		     "pushq (%[mem]) \n\t"
213 		     "pushq $-7070707 \n\t"
214 		     "mov %%rsp, %[new_stack_top] \n\t"
215 		     "mov %[tmp], %%rsp"
216 		     : [tmp]"=&r"(tmp), [new_stack_top]"=r"(new_stack_top)
217 		     : [stack_top]"r"(stack_top),
218 		       [reg]"r"(-17l), [mem]"r"(&memw)
219 		     : "memory");
220 
221 	report(stack_top[-1] == -7ul, "push $imm8");
222 	report(stack_top[-2] == -17ul, "push %%reg");
223 	report(stack_top[-3] == 0x123456789abcdeful, "push mem");
224 	report(stack_top[-4] == -7070707, "push $imm");
225 }
226 
227 static void test_pop(void *mem)
228 {
229 	unsigned long tmp, tmp3, rsp, rbp;
230 	unsigned long *stack_top = mem + 4096;
231 	unsigned long memw = 0x123456789abcdeful;
232 	static unsigned long tmp2;
233 
234 	memset(mem, 0x55, (void *)stack_top - mem);
235 
236 	asm volatile("pushq %[val] \n\t"
237 		     "popq (%[mem])"
238 		     : : [val]"m"(memw), [mem]"r"(mem) : "memory");
239 	report(*(unsigned long *)mem == memw, "pop mem");
240 
241 	memw = 7 - memw;
242 	asm volatile("mov %%rsp, %[tmp] \n\t"
243 		     "mov %[stack_top], %%rsp \n\t"
244 		     "pushq %[val] \n\t"
245 		     "popq %[tmp2] \n\t"
246 		     "mov %[tmp], %%rsp"
247 		     : [tmp]"=&r"(tmp), [tmp2]"=m"(tmp2)
248 		     : [val]"r"(memw), [stack_top]"r"(stack_top)
249 		     : "memory");
250 	report(tmp2 == memw, "pop mem (2)");
251 
252 	memw = 129443 - memw;
253 	asm volatile("mov %%rsp, %[tmp] \n\t"
254 		     "mov %[stack_top], %%rsp \n\t"
255 		     "pushq %[val] \n\t"
256 		     "popq %[tmp2] \n\t"
257 		     "mov %[tmp], %%rsp"
258 		     : [tmp]"=&r"(tmp), [tmp2]"=r"(tmp2)
259 		     : [val]"r"(memw), [stack_top]"r"(stack_top)
260 		     : "memory");
261 	report(tmp2 == memw, "pop reg");
262 
263 	asm volatile("mov %%rsp, %[tmp] \n\t"
264 		     "mov %[stack_top], %%rsp \n\t"
265 		     "lea 1f(%%rip), %%rax \n\t"
266 		     "push %%rax \n\t"
267 		     "ret \n\t"
268 		     "2: jmp 2b \n\t"
269 		     "1: mov %[tmp], %%rsp"
270 		     : [tmp]"=&r"(tmp) : [stack_top]"r"(stack_top)
271 		     : "memory", "rax");
272 	report_pass("ret");
273 
274 	stack_top[-1] = 0x778899;
275 	asm volatile("mov %[stack_top], %%r8 \n\t"
276 		     "mov %%rsp, %%r9 \n\t"
277 		     "xchg %%rbp, %%r8 \n\t"
278 		     "leave \n\t"
279 		     "xchg %%rsp, %%r9 \n\t"
280 		     "xchg %%rbp, %%r8 \n\t"
281 		     "mov %%r9, %[tmp] \n\t"
282 		     "mov %%r8, %[tmp3]"
283 		     : [tmp]"=&r"(tmp), [tmp3]"=&r"(tmp3) : [stack_top]"r"(stack_top-1)
284 		     : "memory", "r8", "r9");
285 	report(tmp == (ulong)stack_top && tmp3 == 0x778899, "leave");
286 
287 	rbp = 0xaa55aa55bb66bb66ULL;
288 	rsp = (unsigned long)stack_top;
289 	asm volatile("mov %[rsp], %%r8 \n\t"
290 		     "mov %[rbp], %%r9 \n\t"
291 		     "xchg %%rsp, %%r8 \n\t"
292 		     "xchg %%rbp, %%r9 \n\t"
293 		     "enter $0x1238, $0 \n\t"
294 		     "xchg %%rsp, %%r8 \n\t"
295 		     "xchg %%rbp, %%r9 \n\t"
296 		     "xchg %%r8, %[rsp] \n\t"
297 		     "xchg %%r9, %[rbp]"
298 		     : [rsp]"+a"(rsp), [rbp]"+b"(rbp) : : "memory", "r8", "r9");
299 	report(rsp == (unsigned long)stack_top - 8 - 0x1238
300 	       && rbp == (unsigned long)stack_top - 8
301 	       && stack_top[-1] == 0xaa55aa55bb66bb66ULL,
302 	       "enter");
303 }
304 
305 static void test_ljmp(void *mem)
306 {
307     unsigned char *m = mem;
308     volatile int res = 1;
309 
310     *(unsigned long**)m = &&jmpf;
311     asm volatile ("data16 mov %%cs, %0":"=m"(*(m + sizeof(unsigned long))));
312     asm volatile ("rex64 ljmp *%0"::"m"(*m));
313     res = 0;
314 jmpf:
315     report(res, "ljmp");
316 }
317 
318 static void test_incdecnotneg(void *mem)
319 {
320     unsigned long *m = mem, v = 1234;
321     unsigned char *mb = mem, vb = 66;
322 
323     *m = 0;
324 
325     asm volatile ("incl %0":"+m"(*m));
326     report(*m == 1, "incl");
327     asm volatile ("decl %0":"+m"(*m));
328     report(*m == 0, "decl");
329     asm volatile ("incb %0":"+m"(*m));
330     report(*m == 1, "incb");
331     asm volatile ("decb %0":"+m"(*m));
332     report(*m == 0, "decb");
333 
334     asm volatile ("lock incl %0":"+m"(*m));
335     report(*m == 1, "lock incl");
336     asm volatile ("lock decl %0":"+m"(*m));
337     report(*m == 0, "lock decl");
338     asm volatile ("lock incb %0":"+m"(*m));
339     report(*m == 1, "lock incb");
340     asm volatile ("lock decb %0":"+m"(*m));
341     report(*m == 0, "lock decb");
342 
343     *m = v;
344 
345     asm ("lock negq %0" : "+m"(*m)); v = -v;
346     report(*m == v, "lock negl");
347     asm ("lock notq %0" : "+m"(*m)); v = ~v;
348     report(*m == v, "lock notl");
349 
350     *mb = vb;
351 
352     asm ("lock negb %0" : "+m"(*mb)); vb = -vb;
353     report(*mb == vb, "lock negb");
354     asm ("lock notb %0" : "+m"(*mb)); vb = ~vb;
355     report(*mb == vb, "lock notb");
356 }
357 
358 static void test_smsw(uint64_t *h_mem)
359 {
360 	char mem[16];
361 	unsigned short msw, msw_orig, *pmsw;
362 	int i, zero;
363 
364 	msw_orig = read_cr0();
365 
366 	asm("smsw %0" : "=r"(msw));
367 	report(msw == msw_orig, "smsw (1)");
368 
369 	memset(mem, 0, 16);
370 	pmsw = (void *)mem;
371 	asm("smsw %0" : "=m"(pmsw[4]));
372 	zero = 1;
373 	for (i = 0; i < 8; ++i)
374 		if (i != 4 && pmsw[i])
375 			zero = 0;
376 	report(msw == pmsw[4] && zero, "smsw (2)");
377 
378 	/* Trigger exit on smsw */
379 	*h_mem = 0x12345678abcdeful;
380 	asm volatile("smsw %0" : "+m"(*h_mem));
381 	report(msw == (unsigned short)*h_mem &&
382 	       (*h_mem & ~0xfffful) == 0x12345678ab0000ul, "smsw (3)");
383 }
384 
385 static void test_lmsw(void)
386 {
387 	char mem[16];
388 	unsigned short msw, *pmsw;
389 	unsigned long cr0;
390 
391 	cr0 = read_cr0();
392 
393 	msw = cr0 ^ 8;
394 	asm("lmsw %0" : : "r"(msw));
395 	printf("before %lx after %lx\n", cr0, read_cr0());
396 	report((cr0 ^ read_cr0()) == 8, "lmsw (1)");
397 
398 	pmsw = (void *)mem;
399 	*pmsw = cr0;
400 	asm("lmsw %0" : : "m"(*pmsw));
401 	printf("before %lx after %lx\n", cr0, read_cr0());
402 	report(cr0 == read_cr0(), "lmsw (2)");
403 
404 	/* lmsw can't clear cr0.pe */
405 	msw = (cr0 & ~1ul) ^ 4;  /* change EM to force trap */
406 	asm("lmsw %0" : : "r"(msw));
407 	report((cr0 ^ read_cr0()) == 4 && (cr0 & 1), "lmsw (3)");
408 
409 	/* back to normal */
410 	msw = cr0;
411 	asm("lmsw %0" : : "r"(msw));
412 }
413 
414 static void test_xchg(void *mem)
415 {
416 	unsigned long *memq = mem;
417 	unsigned long rax;
418 
419 	asm volatile("mov $0x123456789abcdef, %%rax\n\t"
420 		     "mov %%rax, (%[memq])\n\t"
421 		     "mov $0xfedcba9876543210, %%rax\n\t"
422 		     "xchg %%al, (%[memq])\n\t"
423 		     "mov %%rax, %[rax]\n\t"
424 		     : [rax]"=r"(rax)
425 		     : [memq]"r"(memq)
426 		     : "memory", "rax");
427 	report(rax == 0xfedcba98765432ef && *memq == 0x123456789abcd10,
428 	       "xchg reg, r/m (1)");
429 
430 	asm volatile("mov $0x123456789abcdef, %%rax\n\t"
431 		     "mov %%rax, (%[memq])\n\t"
432 		     "mov $0xfedcba9876543210, %%rax\n\t"
433 		     "xchg %%ax, (%[memq])\n\t"
434 		     "mov %%rax, %[rax]\n\t"
435 		     : [rax]"=r"(rax)
436 		     : [memq]"r"(memq)
437 		     : "memory", "rax");
438 	report(rax == 0xfedcba987654cdef && *memq == 0x123456789ab3210,
439 	       "xchg reg, r/m (2)");
440 
441 	asm volatile("mov $0x123456789abcdef, %%rax\n\t"
442 		     "mov %%rax, (%[memq])\n\t"
443 		     "mov $0xfedcba9876543210, %%rax\n\t"
444 		     "xchg %%eax, (%[memq])\n\t"
445 		     "mov %%rax, %[rax]\n\t"
446 		     : [rax]"=r"(rax)
447 		     : [memq]"r"(memq)
448 		     : "memory", "rax");
449 	report(rax == 0x89abcdef && *memq == 0x123456776543210,
450 	       "xchg reg, r/m (3)");
451 
452 	asm volatile("mov $0x123456789abcdef, %%rax\n\t"
453 		     "mov %%rax, (%[memq])\n\t"
454 		     "mov $0xfedcba9876543210, %%rax\n\t"
455 		     "xchg %%rax, (%[memq])\n\t"
456 		     "mov %%rax, %[rax]\n\t"
457 		     : [rax]"=r"(rax)
458 		     : [memq]"r"(memq)
459 		     : "memory", "rax");
460 	report(rax == 0x123456789abcdef && *memq == 0xfedcba9876543210,
461 	       "xchg reg, r/m (4)");
462 }
463 
464 static void test_xadd(void *mem)
465 {
466 	unsigned long *memq = mem;
467 	unsigned long rax;
468 
469 	asm volatile("mov $0x123456789abcdef, %%rax\n\t"
470 		     "mov %%rax, (%[memq])\n\t"
471 		     "mov $0xfedcba9876543210, %%rax\n\t"
472 		     "xadd %%al, (%[memq])\n\t"
473 		     "mov %%rax, %[rax]\n\t"
474 		     : [rax]"=r"(rax)
475 		     : [memq]"r"(memq)
476 		     : "memory", "rax");
477 	report(rax == 0xfedcba98765432ef && *memq == 0x123456789abcdff,
478 	       "xadd reg, r/m (1)");
479 
480 	asm volatile("mov $0x123456789abcdef, %%rax\n\t"
481 		     "mov %%rax, (%[memq])\n\t"
482 		     "mov $0xfedcba9876543210, %%rax\n\t"
483 		     "xadd %%ax, (%[memq])\n\t"
484 		     "mov %%rax, %[rax]\n\t"
485 		     : [rax]"=r"(rax)
486 		     : [memq]"r"(memq)
487 		     : "memory", "rax");
488 	report(rax == 0xfedcba987654cdef && *memq == 0x123456789abffff,
489 	       "xadd reg, r/m (2)");
490 
491 	asm volatile("mov $0x123456789abcdef, %%rax\n\t"
492 		     "mov %%rax, (%[memq])\n\t"
493 		     "mov $0xfedcba9876543210, %%rax\n\t"
494 		     "xadd %%eax, (%[memq])\n\t"
495 		     "mov %%rax, %[rax]\n\t"
496 		     : [rax]"=r"(rax)
497 		     : [memq]"r"(memq)
498 		     : "memory", "rax");
499 	report(rax == 0x89abcdef && *memq == 0x1234567ffffffff,
500 	       "xadd reg, r/m (3)");
501 
502 	asm volatile("mov $0x123456789abcdef, %%rax\n\t"
503 		     "mov %%rax, (%[memq])\n\t"
504 		     "mov $0xfedcba9876543210, %%rax\n\t"
505 		     "xadd %%rax, (%[memq])\n\t"
506 		     "mov %%rax, %[rax]\n\t"
507 		     : [rax]"=r"(rax)
508 		     : [memq]"r"(memq)
509 		     : "memory", "rax");
510 	report(rax == 0x123456789abcdef && *memq == 0xffffffffffffffff,
511 	       "xadd reg, r/m (4)");
512 }
513 
514 static void test_btc(void *mem)
515 {
516 	unsigned int *a = mem;
517 
518 	memset(mem, 0, 4 * sizeof(unsigned int));
519 
520 	asm ("btcl $32, %0" :: "m"(a[0]) : "memory");
521 	asm ("btcl $1, %0" :: "m"(a[1]) : "memory");
522 	asm ("btcl %1, %0" :: "m"(a[0]), "r"(66) : "memory");
523 	report(a[0] == 1 && a[1] == 2 && a[2] == 4, "btcl imm8, r/m");
524 
525 	asm ("btcl %1, %0" :: "m"(a[3]), "r"(-1) : "memory");
526 	report(a[0] == 1 && a[1] == 2 && a[2] == 0x80000004, "btcl reg, r/m");
527 
528 	asm ("btcq %1, %0" : : "m"(a[2]), "r"(-1l) : "memory");
529 	report(a[0] == 1 && a[1] == 0x80000002 && a[2] == 0x80000004 && a[3] == 0,
530 	       "btcq reg, r/m");
531 }
532 
533 static void test_bsfbsr(void *mem)
534 {
535 	unsigned long rax, *memq = mem;
536 	unsigned eax, *meml = mem;
537 	unsigned short ax, *memw = mem;
538 	unsigned char z;
539 
540 	*memw = 0xc000;
541 	asm("bsfw %[mem], %[a]" : [a]"=a"(ax) : [mem]"m"(*memw));
542 	report(ax == 14, "bsfw r/m, reg");
543 
544 	*meml = 0xc0000000;
545 	asm("bsfl %[mem], %[a]" : [a]"=a"(eax) : [mem]"m"(*meml));
546 	report(eax == 30, "bsfl r/m, reg");
547 
548 	*memq = 0xc00000000000;
549 	asm("bsfq %[mem], %[a]" : [a]"=a"(rax) : [mem]"m"(*memq));
550 	report(rax == 46, "bsfq r/m, reg");
551 
552 	*memq = 0;
553 	asm("bsfq %[mem], %[a]; setz %[z]"
554 	    : [a]"=a"(rax), [z]"=rm"(z) : [mem]"m"(*memq));
555 	report(z == 1, "bsfq r/m, reg");
556 
557 	*memw = 0xc000;
558 	asm("bsrw %[mem], %[a]" : [a]"=a"(ax) : [mem]"m"(*memw));
559 	report(ax == 15, "bsrw r/m, reg");
560 
561 	*meml = 0xc0000000;
562 	asm("bsrl %[mem], %[a]" : [a]"=a"(eax) : [mem]"m"(*meml));
563 	report(eax == 31, "bsrl r/m, reg");
564 
565 	*memq = 0xc00000000000;
566 	asm("bsrq %[mem], %[a]" : [a]"=a"(rax) : [mem]"m"(*memq));
567 	report(rax == 47, "bsrq r/m, reg");
568 
569 	*memq = 0;
570 	asm("bsrq %[mem], %[a]; setz %[z]"
571 	    : [a]"=a"(rax), [z]"=rm"(z) : [mem]"m"(*memq));
572 	report(z == 1, "bsrq r/m, reg");
573 }
574 
575 static void test_imul(ulong *mem)
576 {
577     ulong a;
578 
579     *mem = 51; a = 0x1234567812345678UL;
580     asm ("imulw %1, %%ax" : "+a"(a) : "m"(*mem));
581     report(a == 0x12345678123439e8, "imul ax, mem");
582 
583     *mem = 51; a = 0x1234567812345678UL;
584     asm ("imull %1, %%eax" : "+a"(a) : "m"(*mem));
585     report(a == 0xa06d39e8, "imul eax, mem");
586 
587     *mem = 51; a = 0x1234567812345678UL;
588     asm ("imulq %1, %%rax" : "+a"(a) : "m"(*mem));
589     report(a == 0xA06D39EBA06D39E8UL, "imul rax, mem");
590 
591     *mem  = 0x1234567812345678UL; a = 0x8765432187654321L;
592     asm ("imulw $51, %1, %%ax" : "+a"(a) : "m"(*mem));
593     report(a == 0x87654321876539e8, "imul ax, mem, imm8");
594 
595     *mem = 0x1234567812345678UL;
596     asm ("imull $51, %1, %%eax" : "+a"(a) : "m"(*mem));
597     report(a == 0xa06d39e8, "imul eax, mem, imm8");
598 
599     *mem = 0x1234567812345678UL;
600     asm ("imulq $51, %1, %%rax" : "+a"(a) : "m"(*mem));
601     report(a == 0xA06D39EBA06D39E8UL, "imul rax, mem, imm8");
602 
603     *mem  = 0x1234567812345678UL; a = 0x8765432187654321L;
604     asm ("imulw $311, %1, %%ax" : "+a"(a) : "m"(*mem));
605     report(a == 0x8765432187650bc8, "imul ax, mem, imm");
606 
607     *mem = 0x1234567812345678UL;
608     asm ("imull $311, %1, %%eax" : "+a"(a) : "m"(*mem));
609     report(a == 0x1d950bc8, "imul eax, mem, imm");
610 
611     *mem = 0x1234567812345678UL;
612     asm ("imulq $311, %1, %%rax" : "+a"(a) : "m"(*mem));
613     report(a == 0x1D950BDE1D950BC8L, "imul rax, mem, imm");
614 }
615 
616 static void test_muldiv(long *mem)
617 {
618     long a, d, aa, dd;
619     u8 ex = 1;
620 
621     *mem = 0; a = 1; d = 2;
622     asm (ASM_TRY("1f") "divq %3; movb $0, %2; 1:"
623 	 : "+a"(a), "+d"(d), "+q"(ex) : "m"(*mem));
624     report(a == 1 && d == 2 && ex, "divq (fault)");
625 
626     *mem = 987654321098765UL; a = 123456789012345UL; d = 123456789012345UL;
627     asm (ASM_TRY("1f") "divq %3; movb $0, %2; 1:"
628 	 : "+a"(a), "+d"(d), "+q"(ex) : "m"(*mem));
629     report(a == 0x1ffffffb1b963b33ul && d == 0x273ba4384ede2ul && !ex,
630            "divq (1)");
631     aa = 0x1111111111111111; dd = 0x2222222222222222;
632     *mem = 0x3333333333333333; a = aa; d = dd;
633     asm("mulb %2" : "+a"(a), "+d"(d) : "m"(*mem));
634     report(a == 0x1111111111110363 && d == dd, "mulb mem");
635     *mem = 0x3333333333333333; a = aa; d = dd;
636     asm("mulw %2" : "+a"(a), "+d"(d) : "m"(*mem));
637     report(a == 0x111111111111c963 && d == 0x2222222222220369, "mulw mem");
638     *mem = 0x3333333333333333; a = aa; d = dd;
639     asm("mull %2" : "+a"(a), "+d"(d) : "m"(*mem));
640     report(a == 0x962fc963 && d == 0x369d036, "mull mem");
641     *mem = 0x3333333333333333; a = aa; d = dd;
642     asm("mulq %2" : "+a"(a), "+d"(d) : "m"(*mem));
643     report(a == 0x2fc962fc962fc963 && d == 0x369d0369d0369d0, "mulq mem");
644 }
645 
646 typedef unsigned __attribute__((vector_size(16))) sse128;
647 
648 static bool sseeq(uint32_t *v1, uint32_t *v2)
649 {
650     bool ok = true;
651     int i;
652 
653     for (i = 0; i < 4; ++i) {
654 	ok &= v1[i] == v2[i];
655     }
656 
657     return ok;
658 }
659 
660 static __attribute__((target("sse2"))) void test_sse(uint32_t *mem)
661 {
662 	sse128 vv;
663 	uint32_t *v = (uint32_t *)&vv;
664 
665 	write_cr0(read_cr0() & ~6); /* EM, TS */
666 	write_cr4(read_cr4() | 0x200); /* OSFXSR */
667 	memset(&vv, 0, sizeof(vv));
668 
669 #define TEST_RW_SSE(insn) do { \
670 		v[0] = 1; v[1] = 2; v[2] = 3; v[3] = 4; \
671 		asm(insn " %1, %0" : "=m"(*mem) : "x"(vv) : "memory"); \
672 		report(sseeq(v, mem), insn " (read)"); \
673 		mem[0] = 5; mem[1] = 6; mem[2] = 7; mem[3] = 8; \
674 		asm(insn " %1, %0" : "=x"(vv) : "m"(*mem) : "memory"); \
675 		report(sseeq(v, mem), insn " (write)"); \
676 } while (0)
677 
678 	TEST_RW_SSE("movdqu");
679 	TEST_RW_SSE("movaps");
680 	TEST_RW_SSE("movapd");
681 	TEST_RW_SSE("movups");
682 	TEST_RW_SSE("movupd");
683 #undef TEST_RW_SSE
684 }
685 
686 static void unaligned_movaps_handler(struct ex_regs *regs)
687 {
688 	extern char unaligned_movaps_cont;
689 
690 	++exceptions;
691 	regs->rip = (ulong)&unaligned_movaps_cont;
692 }
693 
694 static void cross_movups_handler(struct ex_regs *regs)
695 {
696 	extern char cross_movups_cont;
697 
698 	++exceptions;
699 	regs->rip = (ulong)&cross_movups_cont;
700 }
701 
702 static __attribute__((target("sse2"))) void test_sse_exceptions(void *cross_mem)
703 {
704 	sse128 vv;
705 	uint32_t *v = (uint32_t *)&vv;
706 	uint32_t *mem;
707 	uint8_t *bytes = cross_mem; // aligned on PAGE_SIZE*2
708 	void *page2 = (void *)(&bytes[4096]);
709 	struct pte_search search;
710 	pteval_t orig_pte;
711 
712 	// setup memory for unaligned access
713 	mem = (uint32_t *)(&bytes[8]);
714 
715 	// test unaligned access for movups, movupd and movaps
716 	v[0] = 1; v[1] = 2; v[2] = 3; v[3] = 4;
717 	mem[0] = 5; mem[1] = 6; mem[2] = 8; mem[3] = 9;
718 	asm("movups %1, %0" : "=m"(*mem) : "x"(vv) : "memory");
719 	report(sseeq(v, mem), "movups unaligned");
720 
721 	v[0] = 1; v[1] = 2; v[2] = 3; v[3] = 4;
722 	mem[0] = 5; mem[1] = 6; mem[2] = 7; mem[3] = 8;
723 	asm("movupd %1, %0" : "=m"(*mem) : "x"(vv) : "memory");
724 	report(sseeq(v, mem), "movupd unaligned");
725 	exceptions = 0;
726 	handle_exception(GP_VECTOR, unaligned_movaps_handler);
727 	asm("movaps %1, %0\n\t unaligned_movaps_cont:"
728 			: "=m"(*mem) : "x"(vv));
729 	handle_exception(GP_VECTOR, 0);
730 	report(exceptions == 1, "unaligned movaps exception");
731 
732 	// setup memory for cross page access
733 	mem = (uint32_t *)(&bytes[4096-8]);
734 	v[0] = 1; v[1] = 2; v[2] = 3; v[3] = 4;
735 	mem[0] = 5; mem[1] = 6; mem[2] = 7; mem[3] = 8;
736 
737 	asm("movups %1, %0" : "=m"(*mem) : "x"(vv) : "memory");
738 	report(sseeq(v, mem), "movups unaligned crosspage");
739 
740 	// invalidate second page
741 	search = find_pte_level(current_page_table(), page2, 1);
742 	orig_pte = *search.pte;
743 	install_pte(current_page_table(), 1, page2, 0, NULL);
744 	invlpg(page2);
745 
746 	exceptions = 0;
747 	handle_exception(PF_VECTOR, cross_movups_handler);
748 	asm("movups %1, %0\n\t cross_movups_cont:" : "=m"(*mem) : "x"(vv) :
749 			"memory");
750 	handle_exception(PF_VECTOR, 0);
751 	report(exceptions == 1, "movups crosspage exception");
752 
753 	// restore invalidated page
754 	install_pte(current_page_table(), 1, page2, orig_pte, NULL);
755 }
756 
757 static void test_mmx(uint64_t *mem)
758 {
759     uint64_t v;
760 
761     write_cr0(read_cr0() & ~6); /* EM, TS */
762     asm volatile("fninit");
763     v = 0x0102030405060708ULL;
764     asm("movq %1, %0" : "=m"(*mem) : "y"(v));
765     report(v == *mem, "movq (mmx, read)");
766     *mem = 0x8070605040302010ull;
767     asm("movq %1, %0" : "=y"(v) : "m"(*mem));
768     report(v == *mem, "movq (mmx, write)");
769 }
770 
771 static void test_rip_relative(unsigned *mem, char *insn_ram)
772 {
773     /* movb $1, mem+2(%rip) */
774     insn_ram[0] = 0xc6;
775     insn_ram[1] = 0x05;
776     *(unsigned *)&insn_ram[2] = 2 + (char *)mem - (insn_ram + 7);
777     insn_ram[6] = 0x01;
778     /* ret */
779     insn_ram[7] = 0xc3;
780 
781     *mem = 0;
782     asm("callq *%1" : "+m"(*mem) : "r"(insn_ram));
783     report(*mem == 0x10000, "movb $imm, 0(%%rip)");
784 }
785 
786 static void test_shld_shrd(u32 *mem)
787 {
788     *mem = 0x12345678;
789     asm("shld %2, %1, %0" : "+m"(*mem) : "r"(0xaaaaaaaaU), "c"((u8)3));
790     report(*mem == ((0x12345678 << 3) | 5), "shld (cl)");
791     *mem = 0x12345678;
792     asm("shrd %2, %1, %0" : "+m"(*mem) : "r"(0x55555555U), "c"((u8)3));
793     report(*mem == ((0x12345678 >> 3) | (5u << 29)), "shrd (cl)");
794 }
795 
796 static void test_cmov(u32 *mem)
797 {
798 	u64 val;
799 	*mem = 0xabcdef12u;
800 	asm ("movq $0x1234567812345678, %%rax\n\t"
801 	     "cmpl %%eax, %%eax\n\t"
802 	     "cmovnel (%[mem]), %%eax\n\t"
803 	     "movq %%rax, %[val]\n\t"
804 	     : [val]"=r"(val) : [mem]"r"(mem) : "%rax", "cc");
805 	report(val == 0x12345678ul, "cmovnel");
806 }
807 
808 static unsigned long rip_advance;
809 
810 static void advance_rip_and_note_exception(struct ex_regs *regs)
811 {
812     ++exceptions;
813     regs->rip += rip_advance;
814 }
815 
816 static void test_mmx_movq_mf(uint64_t *mem)
817 {
818     /* movq %mm0, (%rax) */
819     extern char movq_start, movq_end;
820 
821     uint16_t fcw = 0;  /* all exceptions unmasked */
822     write_cr0(read_cr0() & ~6);  /* TS, EM */
823     exceptions = 0;
824     handle_exception(MF_VECTOR, advance_rip_and_note_exception);
825     asm volatile("fninit; fldcw %0" : : "m"(fcw));
826     asm volatile("fldz; fldz; fdivp"); /* generate exception */
827 
828     rip_advance = &movq_end - &movq_start;
829     asm(KVM_FEP "movq_start: movq %mm0, (%rax); movq_end:");
830     /* exit MMX mode */
831     asm volatile("fnclex; emms");
832     report(exceptions == 1, "movq mmx generates #MF");
833     handle_exception(MF_VECTOR, 0);
834 }
835 
836 static void test_jmp_noncanonical(uint64_t *mem)
837 {
838 	extern char nc_jmp_start, nc_jmp_end;
839 
840 	*mem = 0x1111111111111111ul;
841 
842 	exceptions = 0;
843 	rip_advance = &nc_jmp_end - &nc_jmp_start;
844 	handle_exception(GP_VECTOR, advance_rip_and_note_exception);
845 	asm volatile ("nc_jmp_start: jmp *%0; nc_jmp_end:" : : "m"(*mem));
846 	report(exceptions == 1, "jump to non-canonical address");
847 	handle_exception(GP_VECTOR, 0);
848 }
849 
850 static void test_movabs(uint64_t *mem)
851 {
852     /* mov $0x9090909090909090, %rcx */
853     unsigned long rcx;
854     asm(KVM_FEP "mov $0x9090909090909090, %0" : "=c" (rcx) : "0" (0));
855     report(rcx == 0x9090909090909090, "64-bit mov imm2");
856 }
857 
858 static void test_smsw_reg(uint64_t *mem)
859 {
860 	unsigned long cr0 = read_cr0();
861 	unsigned long rax;
862 	const unsigned long in_rax = 0x1234567890abcdeful;
863 
864 	asm(KVM_FEP "smsww %w0\n\t" : "=a" (rax) : "0" (in_rax));
865 	report((u16)rax == (u16)cr0 && rax >> 16 == in_rax >> 16,
866 	       "16-bit smsw reg");
867 
868 	asm(KVM_FEP "smswl %k0\n\t" : "=a" (rax) : "0" (in_rax));
869 	report(rax == (u32)cr0, "32-bit smsw reg");
870 
871 	asm(KVM_FEP "smswq %q0\n\t" : "=a" (rax) : "0" (in_rax));
872 	report(rax == cr0, "64-bit smsw reg");
873 }
874 
875 static void test_nop(uint64_t *mem)
876 {
877 	unsigned long rax;
878 	const unsigned long in_rax = 0x1234567890abcdeful;
879 	asm(KVM_FEP "nop\n\t" : "=a" (rax) : "0" (in_rax));
880 	report(rax == in_rax, "nop");
881 }
882 
883 static void test_mov_dr(uint64_t *mem)
884 {
885 	unsigned long rax;
886 	const unsigned long in_rax = 0;
887 	bool rtm_support = this_cpu_has(X86_FEATURE_RTM);
888 	unsigned long dr6_fixed_1 = rtm_support ? 0xfffe0ff0ul : 0xffff0ff0ul;
889 	asm(KVM_FEP "movq %0, %%dr6\n\t"
890 	    KVM_FEP "movq %%dr6, %0\n\t" : "=a" (rax) : "a" (in_rax));
891 	report(rax == dr6_fixed_1, "mov_dr6");
892 }
893 
894 static void test_push16(uint64_t *mem)
895 {
896 	uint64_t rsp1, rsp2;
897 	uint16_t r;
898 
899 	asm volatile (	"movq %%rsp, %[rsp1]\n\t"
900 			"pushw %[v]\n\t"
901 			"popw %[r]\n\t"
902 			"movq %%rsp, %[rsp2]\n\t"
903 			"movq %[rsp1], %%rsp\n\t" :
904 			[rsp1]"=r"(rsp1), [rsp2]"=r"(rsp2), [r]"=r"(r)
905 			: [v]"m"(*mem) : "memory");
906 	report(rsp1 == rsp2, "push16");
907 }
908 
909 static void test_crosspage_mmio(volatile uint8_t *mem)
910 {
911     volatile uint16_t w, *pw;
912 
913     pw = (volatile uint16_t *)&mem[4095];
914     mem[4095] = 0x99;
915     mem[4096] = 0x77;
916     asm volatile("mov %1, %0" : "=r"(w) : "m"(*pw) : "memory");
917     report(w == 0x7799, "cross-page mmio read");
918     asm volatile("mov %1, %0" : "=m"(*pw) : "r"((uint16_t)0x88aa));
919     report(mem[4095] == 0xaa && mem[4096] == 0x88, "cross-page mmio write");
920 }
921 
922 static void test_string_io_mmio(volatile uint8_t *mem)
923 {
924 	/* Cross MMIO pages.*/
925 	volatile uint8_t *mmio = mem + 4032;
926 
927 	asm volatile("outw %%ax, %%dx  \n\t" : : "a"(0x9999), "d"(TESTDEV_IO_PORT));
928 
929 	asm volatile ("cld; rep insb" : : "d" (TESTDEV_IO_PORT), "D" (mmio), "c" (1024));
930 
931 	report(mmio[1023] == 0x99, "string_io_mmio");
932 }
933 
934 /* kvm doesn't allow lidt/lgdt from mmio, so the test is disabled */
935 #if 0
936 static void test_lgdt_lidt(volatile uint8_t *mem)
937 {
938     struct descriptor_table_ptr orig, fresh = {};
939 
940     sgdt(&orig);
941     *(struct descriptor_table_ptr *)mem = (struct descriptor_table_ptr) {
942 	.limit = 0xf234,
943 	.base = 0x12345678abcd,
944     };
945     cli();
946     asm volatile("lgdt %0" : : "m"(*(struct descriptor_table_ptr *)mem));
947     sgdt(&fresh);
948     lgdt(&orig);
949     sti();
950     report(orig.limit == fresh.limit && orig.base == fresh.base,
951            "lgdt (long address)");
952 
953     sidt(&orig);
954     *(struct descriptor_table_ptr *)mem = (struct descriptor_table_ptr) {
955 	.limit = 0x432f,
956 	.base = 0xdbca87654321,
957     };
958     cli();
959     asm volatile("lidt %0" : : "m"(*(struct descriptor_table_ptr *)mem));
960     sidt(&fresh);
961     lidt(&orig);
962     sti();
963     report(orig.limit == fresh.limit && orig.base == fresh.base,
964            "lidt (long address)");
965 }
966 #endif
967 
968 static void ss_bad_rpl(struct ex_regs *regs)
969 {
970     extern char ss_bad_rpl_cont;
971 
972     ++exceptions;
973     regs->rip = (ulong)&ss_bad_rpl_cont;
974 }
975 
976 static void test_sreg(volatile uint16_t *mem)
977 {
978     u16 ss = read_ss();
979 
980     // check for null segment load
981     *mem = 0;
982     asm volatile("mov %0, %%ss" : : "m"(*mem));
983     report(read_ss() == 0, "mov null, %%ss");
984 
985     // check for exception when ss.rpl != cpl on null segment load
986     exceptions = 0;
987     handle_exception(GP_VECTOR, ss_bad_rpl);
988     *mem = 3;
989     asm volatile("mov %0, %%ss; ss_bad_rpl_cont:" : : "m"(*mem));
990     report(exceptions == 1 && read_ss() == 0,
991            "mov null, %%ss (with ss.rpl != cpl)");
992     handle_exception(GP_VECTOR, 0);
993     write_ss(ss);
994 }
995 
996 static uint64_t usr_gs_mov(void)
997 {
998     static uint64_t dummy = MAGIC_NUM;
999     uint64_t dummy_ptr = (uint64_t)&dummy;
1000     uint64_t ret;
1001 
1002     dummy_ptr -= GS_BASE;
1003     asm volatile("mov %%gs:(%%rcx), %%rax" : "=a"(ret): "c"(dummy_ptr) :);
1004 
1005     return ret;
1006 }
1007 
1008 static void test_iret(void)
1009 {
1010     uint64_t val;
1011     bool raised_vector;
1012 
1013     /* Update GS base to 4MiB */
1014     wrmsr(MSR_GS_BASE, GS_BASE);
1015 
1016     /*
1017      * Per the SDM, jumping to user mode via `iret`, which is returning to
1018      * outer privilege level, for segment registers (ES, FS, GS, and DS)
1019      * if the check fails, the segment selector becomes null.
1020      *
1021      * In our test case, GS becomes null.
1022      */
1023     val = run_in_user((usermode_func)usr_gs_mov, GP_VECTOR,
1024                       0, 0, 0, 0, &raised_vector);
1025 
1026     report(val == MAGIC_NUM, "Test ret/iret with a nullified segment");
1027 }
1028 
1029 /* Broken emulation causes triple fault, which skips the other tests. */
1030 #if 0
1031 static void test_lldt(volatile uint16_t *mem)
1032 {
1033     u64 gdt[] = { 0, /* null descriptor */
1034 #ifdef __X86_64__
1035 		  0, /* ldt descriptor is 16 bytes in long mode */
1036 #endif
1037 		  0x0000f82000000ffffull /* ldt descriptor */ };
1038     struct descriptor_table_ptr gdt_ptr = { .limit = sizeof(gdt) - 1,
1039 					    .base = (ulong)&gdt };
1040     struct descriptor_table_ptr orig_gdt;
1041 
1042     cli();
1043     sgdt(&orig_gdt);
1044     lgdt(&gdt_ptr);
1045     *mem = 0x8;
1046     asm volatile("lldt %0" : : "m"(*mem));
1047     lgdt(&orig_gdt);
1048     sti();
1049     report(sldt() == *mem, "lldt");
1050 }
1051 #endif
1052 
1053 static void test_ltr(volatile uint16_t *mem)
1054 {
1055     struct descriptor_table_ptr gdt_ptr;
1056     uint64_t *gdt, *trp;
1057     uint16_t tr = str();
1058     uint64_t busy_mask = (uint64_t)1 << 41;
1059 
1060     sgdt(&gdt_ptr);
1061     gdt = (uint64_t *)gdt_ptr.base;
1062     trp = &gdt[tr >> 3];
1063     *trp &= ~busy_mask;
1064     *mem = tr;
1065     asm volatile("ltr %0" : : "m"(*mem) : "memory");
1066     report(str() == tr && (*trp & busy_mask), "ltr");
1067 }
1068 
1069 static void test_simplealu(u32 *mem)
1070 {
1071     *mem = 0x1234;
1072     asm("or %1, %0" : "+m"(*mem) : "r"(0x8001));
1073     report(*mem == 0x9235, "or");
1074     asm("add %1, %0" : "+m"(*mem) : "r"(2));
1075     report(*mem == 0x9237, "add");
1076     asm("xor %1, %0" : "+m"(*mem) : "r"(0x1111));
1077     report(*mem == 0x8326, "xor");
1078     asm("sub %1, %0" : "+m"(*mem) : "r"(0x26));
1079     report(*mem == 0x8300, "sub");
1080     asm("clc; adc %1, %0" : "+m"(*mem) : "r"(0x100));
1081     report(*mem == 0x8400, "adc(0)");
1082     asm("stc; adc %1, %0" : "+m"(*mem) : "r"(0x100));
1083     report(*mem == 0x8501, "adc(0)");
1084     asm("clc; sbb %1, %0" : "+m"(*mem) : "r"(0));
1085     report(*mem == 0x8501, "sbb(0)");
1086     asm("stc; sbb %1, %0" : "+m"(*mem) : "r"(0));
1087     report(*mem == 0x8500, "sbb(1)");
1088     asm("and %1, %0" : "+m"(*mem) : "r"(0xfe77));
1089     report(*mem == 0x8400, "and");
1090     asm("test %1, %0" : "+m"(*mem) : "r"(0xf000));
1091     report(*mem == 0x8400, "test");
1092 }
1093 
1094 static void illegal_movbe_handler(struct ex_regs *regs)
1095 {
1096 	extern char bad_movbe_cont;
1097 
1098 	++exceptions;
1099 	regs->rip = (ulong)&bad_movbe_cont;
1100 }
1101 
1102 static void test_illegal_movbe(void)
1103 {
1104 	if (!this_cpu_has(X86_FEATURE_MOVBE)) {
1105 		report_skip("illegal movbe");
1106 		return;
1107 	}
1108 
1109 	exceptions = 0;
1110 	handle_exception(UD_VECTOR, illegal_movbe_handler);
1111 	asm volatile(".byte 0x0f; .byte 0x38; .byte 0xf0; .byte 0xc0;\n\t"
1112 		     " bad_movbe_cont:" : : : "rax");
1113 	report(exceptions == 1, "illegal movbe");
1114 	handle_exception(UD_VECTOR, 0);
1115 }
1116 
1117 static void record_no_fep(struct ex_regs *regs)
1118 {
1119 	fep_available = 0;
1120 	regs->rip += KVM_FEP_LENGTH;
1121 }
1122 
1123 int main(void)
1124 {
1125 	void *mem;
1126 	void *insn_page;
1127 	void *insn_ram;
1128 	void *cross_mem;
1129 	unsigned long t1, t2;
1130 
1131 	setup_vm();
1132 	handle_exception(UD_VECTOR, record_no_fep);
1133 	asm(KVM_FEP "nop");
1134 	handle_exception(UD_VECTOR, 0);
1135 
1136 	mem = alloc_vpages(2);
1137 	install_page((void *)read_cr3(), IORAM_BASE_PHYS, mem);
1138 	// install the page twice to test cross-page mmio
1139 	install_page((void *)read_cr3(), IORAM_BASE_PHYS, mem + 4096);
1140 	insn_page = alloc_page();
1141 	insn_ram = vmap(virt_to_phys(insn_page), 4096);
1142 	cross_mem = vmap(virt_to_phys(alloc_pages(2)), 2 * PAGE_SIZE);
1143 
1144 	// test mov reg, r/m and mov r/m, reg
1145 	t1 = 0x123456789abcdef;
1146 	asm volatile("mov %[t1], (%[mem]) \n\t"
1147 		     "mov (%[mem]), %[t2]"
1148 		     : [t2]"=r"(t2)
1149 		     : [t1]"r"(t1), [mem]"r"(mem)
1150 		     : "memory");
1151 	report(t2 == 0x123456789abcdef, "mov reg, r/m (1)");
1152 
1153 	test_simplealu(mem);
1154 	test_cmps(mem);
1155 	test_scas(mem);
1156 
1157 	test_push(mem);
1158 	test_pop(mem);
1159 
1160 	test_xchg(mem);
1161 	test_xadd(mem);
1162 
1163 	test_cr8();
1164 
1165 	test_smsw(mem);
1166 	test_lmsw();
1167 	test_ljmp(mem);
1168 	test_stringio();
1169 	test_incdecnotneg(mem);
1170 	test_btc(mem);
1171 	test_bsfbsr(mem);
1172 	test_imul(mem);
1173 	test_muldiv(mem);
1174 	test_sse(mem);
1175 	test_sse_exceptions(cross_mem);
1176 	test_mmx(mem);
1177 	test_rip_relative(mem, insn_ram);
1178 	test_shld_shrd(mem);
1179 	//test_lgdt_lidt(mem);
1180 	test_sreg(mem);
1181 	test_iret();
1182 	//test_lldt(mem);
1183 	test_ltr(mem);
1184 	test_cmov(mem);
1185 
1186 	if (fep_available) {
1187 		test_mmx_movq_mf(mem);
1188 		test_movabs(mem);
1189 		test_smsw_reg(mem);
1190 		test_nop(mem);
1191 		test_mov_dr(mem);
1192 	} else {
1193 		report_skip("skipping register-only tests, "
1194 			    "use kvm.force_emulation_prefix=1 to enable");
1195 	}
1196 
1197 	test_push16(mem);
1198 	test_crosspage_mmio(mem);
1199 
1200 	test_string_io_mmio(mem);
1201 
1202 	test_jmp_noncanonical(mem);
1203 	test_illegal_movbe();
1204 
1205 	return report_summary();
1206 }
1207