xref: /kvm-unit-tests/x86/emulator.c (revision 3ee1b91bcc0ef45078946730d8625a269bdb8e6c)
1 #include "ioram.h"
2 #include "vm.h"
3 #include "libcflat.h"
4 #include "desc.h"
5 #include "types.h"
6 #include "processor.h"
7 #include "vmalloc.h"
8 #include "alloc_page.h"
9 #include "usermode.h"
10 
11 #define memset __builtin_memset
12 #define TESTDEV_IO_PORT 0xe0
13 
14 #define MAGIC_NUM 0xdeadbeefdeadbeefUL
15 #define GS_BASE 0x400000
16 
17 static int exceptions;
18 
19 /* Forced emulation prefix, used to invoke the emulator unconditionally.  */
20 #define KVM_FEP "ud2; .byte 'k', 'v', 'm';"
21 #define KVM_FEP_LENGTH 5
22 static int fep_available = 1;
23 
24 struct regs {
25 	u64 rax, rbx, rcx, rdx;
26 	u64 rsi, rdi, rsp, rbp;
27 	u64 r8, r9, r10, r11;
28 	u64 r12, r13, r14, r15;
29 	u64 rip, rflags;
30 };
31 struct regs inregs, outregs, save;
32 
33 struct insn_desc {
34 	u64 ptr;
35 	size_t len;
36 };
37 
38 static char st1[] = "abcdefghijklmnop";
39 
40 static void test_stringio(void)
41 {
42 	unsigned char r = 0;
43 	asm volatile("cld \n\t"
44 		     "movw %0, %%dx \n\t"
45 		     "rep outsb \n\t"
46 		     : : "i"((short)TESTDEV_IO_PORT),
47 		       "S"(st1), "c"(sizeof(st1) - 1));
48 	asm volatile("inb %1, %0\n\t" : "=a"(r) : "i"((short)TESTDEV_IO_PORT));
49 	report(r == st1[sizeof(st1) - 2], "outsb up"); /* last char */
50 
51 	asm volatile("std \n\t"
52 		     "movw %0, %%dx \n\t"
53 		     "rep outsb \n\t"
54 		     : : "i"((short)TESTDEV_IO_PORT),
55 		       "S"(st1 + sizeof(st1) - 2), "c"(sizeof(st1) - 1));
56 	asm volatile("cld \n\t" : : );
57 	asm volatile("in %1, %0\n\t" : "=a"(r) : "i"((short)TESTDEV_IO_PORT));
58 	report(r == st1[0], "outsb down");
59 }
60 
61 static void test_cmps_one(unsigned char *m1, unsigned char *m3)
62 {
63 	void *rsi, *rdi;
64 	long rcx, tmp;
65 
66 	rsi = m1; rdi = m3; rcx = 30;
67 	asm volatile("xor %[tmp], %[tmp] \n\t"
68 		     "repe cmpsb"
69 		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
70 		     : : "cc");
71 	report(rcx == 0 && rsi == m1 + 30 && rdi == m3 + 30, "repe/cmpsb (1)");
72 
73 	rsi = m1; rdi = m3; rcx = 30;
74 	asm volatile("or $1, %[tmp]\n\t" // clear ZF
75 		     "repe cmpsb"
76 		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
77 		     : : "cc");
78 	report(rcx == 0 && rsi == m1 + 30 && rdi == m3 + 30,
79 	       "repe cmpsb (1.zf)");
80 
81 	rsi = m1; rdi = m3; rcx = 15;
82 	asm volatile("xor %[tmp], %[tmp] \n\t"
83 		     "repe cmpsw"
84 		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
85 		     : : "cc");
86 	report(rcx == 0 && rsi == m1 + 30 && rdi == m3 + 30, "repe cmpsw (1)");
87 
88 	rsi = m1; rdi = m3; rcx = 7;
89 	asm volatile("xor %[tmp], %[tmp] \n\t"
90 		     "repe cmpsl"
91 		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
92 		     : : "cc");
93 	report(rcx == 0 && rsi == m1 + 28 && rdi == m3 + 28, "repe cmpll (1)");
94 
95 	rsi = m1; rdi = m3; rcx = 4;
96 	asm volatile("xor %[tmp], %[tmp] \n\t"
97 		     "repe cmpsq"
98 		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
99 		     : : "cc");
100 	report(rcx == 0 && rsi == m1 + 32 && rdi == m3 + 32, "repe cmpsq (1)");
101 
102 	rsi = m1; rdi = m3; rcx = 130;
103 	asm volatile("xor %[tmp], %[tmp] \n\t"
104 		     "repe cmpsb"
105 		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
106 		     : : "cc");
107 	report(rcx == 29 && rsi == m1 + 101 && rdi == m3 + 101,
108 	       "repe cmpsb (2)");
109 
110 	rsi = m1; rdi = m3; rcx = 65;
111 	asm volatile("xor %[tmp], %[tmp] \n\t"
112 		     "repe cmpsw"
113 		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
114 		     : : "cc");
115 	report(rcx == 14 && rsi == m1 + 102 && rdi == m3 + 102,
116 	       "repe cmpsw (2)");
117 
118 	rsi = m1; rdi = m3; rcx = 32;
119 	asm volatile("xor %[tmp], %[tmp] \n\t"
120 		     "repe cmpsl"
121 		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
122 		     : : "cc");
123 	report(rcx == 6 && rsi == m1 + 104 && rdi == m3 + 104,
124 	       "repe cmpll (2)");
125 
126 	rsi = m1; rdi = m3; rcx = 16;
127 	asm volatile("xor %[tmp], %[tmp] \n\t"
128 		     "repe cmpsq"
129 		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
130 		     : : "cc");
131 	report(rcx == 3 && rsi == m1 + 104 && rdi == m3 + 104,
132 	       "repe cmpsq (2)");
133 
134 }
135 
136 static void test_cmps(void *mem)
137 {
138 	unsigned char *m1 = mem, *m2 = mem + 1024;
139 	unsigned char m3[1024];
140 
141 	for (int i = 0; i < 100; ++i)
142 		m1[i] = m2[i] = m3[i] = i;
143 	for (int i = 100; i < 200; ++i)
144 		m1[i] = (m3[i] = m2[i] = i) + 1;
145 	test_cmps_one(m1, m3);
146 	test_cmps_one(m1, m2);
147 }
148 
149 static void test_scas(void *mem)
150 {
151     bool z;
152     void *di;
153 
154     *(ulong *)mem = 0x77665544332211;
155 
156     di = mem;
157     asm ("scasb; setz %0" : "=rm"(z), "+D"(di) : "a"(0xff11));
158     report(di == mem + 1 && z, "scasb match");
159 
160     di = mem;
161     asm ("scasb; setz %0" : "=rm"(z), "+D"(di) : "a"(0xff54));
162     report(di == mem + 1 && !z, "scasb mismatch");
163 
164     di = mem;
165     asm ("scasw; setz %0" : "=rm"(z), "+D"(di) : "a"(0xff2211));
166     report(di == mem + 2 && z, "scasw match");
167 
168     di = mem;
169     asm ("scasw; setz %0" : "=rm"(z), "+D"(di) : "a"(0xffdd11));
170     report(di == mem + 2 && !z, "scasw mismatch");
171 
172     di = mem;
173     asm ("scasl; setz %0" : "=rm"(z), "+D"(di) : "a"(0xff44332211ul));
174     report(di == mem + 4 && z, "scasd match");
175 
176     di = mem;
177     asm ("scasl; setz %0" : "=rm"(z), "+D"(di) : "a"(0x45332211));
178     report(di == mem + 4 && !z, "scasd mismatch");
179 
180     di = mem;
181     asm ("scasq; setz %0" : "=rm"(z), "+D"(di) : "a"(0x77665544332211ul));
182     report(di == mem + 8 && z, "scasq match");
183 
184     di = mem;
185     asm ("scasq; setz %0" : "=rm"(z), "+D"(di) : "a"(3));
186     report(di == mem + 8 && !z, "scasq mismatch");
187 }
188 
189 static void test_cr8(void)
190 {
191 	unsigned long src, dst;
192 
193 	dst = 777;
194 	src = 3;
195 	asm volatile("mov %[src], %%cr8; mov %%cr8, %[dst]"
196 		     : [dst]"+r"(dst), [src]"+r"(src));
197 	report(dst == 3 && src == 3, "mov %%cr8");
198 }
199 
200 static void test_push(void *mem)
201 {
202 	unsigned long tmp;
203 	unsigned long *stack_top = mem + 4096;
204 	unsigned long *new_stack_top;
205 	unsigned long memw = 0x123456789abcdeful;
206 
207 	memset(mem, 0x55, (void *)stack_top - mem);
208 
209 	asm volatile("mov %%rsp, %[tmp] \n\t"
210 		     "mov %[stack_top], %%rsp \n\t"
211 		     "pushq $-7 \n\t"
212 		     "pushq %[reg] \n\t"
213 		     "pushq (%[mem]) \n\t"
214 		     "pushq $-7070707 \n\t"
215 		     "mov %%rsp, %[new_stack_top] \n\t"
216 		     "mov %[tmp], %%rsp"
217 		     : [tmp]"=&r"(tmp), [new_stack_top]"=r"(new_stack_top)
218 		     : [stack_top]"r"(stack_top),
219 		       [reg]"r"(-17l), [mem]"r"(&memw)
220 		     : "memory");
221 
222 	report(stack_top[-1] == -7ul, "push $imm8");
223 	report(stack_top[-2] == -17ul, "push %%reg");
224 	report(stack_top[-3] == 0x123456789abcdeful, "push mem");
225 	report(stack_top[-4] == -7070707, "push $imm");
226 }
227 
228 static void test_pop(void *mem)
229 {
230 	unsigned long tmp, tmp3, rsp, rbp;
231 	unsigned long *stack_top = mem + 4096;
232 	unsigned long memw = 0x123456789abcdeful;
233 	static unsigned long tmp2;
234 
235 	memset(mem, 0x55, (void *)stack_top - mem);
236 
237 	asm volatile("pushq %[val] \n\t"
238 		     "popq (%[mem])"
239 		     : : [val]"m"(memw), [mem]"r"(mem) : "memory");
240 	report(*(unsigned long *)mem == memw, "pop mem");
241 
242 	memw = 7 - memw;
243 	asm volatile("mov %%rsp, %[tmp] \n\t"
244 		     "mov %[stack_top], %%rsp \n\t"
245 		     "pushq %[val] \n\t"
246 		     "popq %[tmp2] \n\t"
247 		     "mov %[tmp], %%rsp"
248 		     : [tmp]"=&r"(tmp), [tmp2]"=m"(tmp2)
249 		     : [val]"r"(memw), [stack_top]"r"(stack_top)
250 		     : "memory");
251 	report(tmp2 == memw, "pop mem (2)");
252 
253 	memw = 129443 - memw;
254 	asm volatile("mov %%rsp, %[tmp] \n\t"
255 		     "mov %[stack_top], %%rsp \n\t"
256 		     "pushq %[val] \n\t"
257 		     "popq %[tmp2] \n\t"
258 		     "mov %[tmp], %%rsp"
259 		     : [tmp]"=&r"(tmp), [tmp2]"=r"(tmp2)
260 		     : [val]"r"(memw), [stack_top]"r"(stack_top)
261 		     : "memory");
262 	report(tmp2 == memw, "pop reg");
263 
264 	asm volatile("mov %%rsp, %[tmp] \n\t"
265 		     "mov %[stack_top], %%rsp \n\t"
266 		     "push $1f \n\t"
267 		     "ret \n\t"
268 		     "2: jmp 2b \n\t"
269 		     "1: mov %[tmp], %%rsp"
270 		     : [tmp]"=&r"(tmp) : [stack_top]"r"(stack_top)
271 		     : "memory");
272 	report(1, "ret");
273 
274 	stack_top[-1] = 0x778899;
275 	asm volatile("mov %[stack_top], %%r8 \n\t"
276 		     "mov %%rsp, %%r9 \n\t"
277 		     "xchg %%rbp, %%r8 \n\t"
278 		     "leave \n\t"
279 		     "xchg %%rsp, %%r9 \n\t"
280 		     "xchg %%rbp, %%r8 \n\t"
281 		     "mov %%r9, %[tmp] \n\t"
282 		     "mov %%r8, %[tmp3]"
283 		     : [tmp]"=&r"(tmp), [tmp3]"=&r"(tmp3) : [stack_top]"r"(stack_top-1)
284 		     : "memory", "r8", "r9");
285 	report(tmp == (ulong)stack_top && tmp3 == 0x778899, "leave");
286 
287 	rbp = 0xaa55aa55bb66bb66ULL;
288 	rsp = (unsigned long)stack_top;
289 	asm volatile("mov %[rsp], %%r8 \n\t"
290 		     "mov %[rbp], %%r9 \n\t"
291 		     "xchg %%rsp, %%r8 \n\t"
292 		     "xchg %%rbp, %%r9 \n\t"
293 		     "enter $0x1238, $0 \n\t"
294 		     "xchg %%rsp, %%r8 \n\t"
295 		     "xchg %%rbp, %%r9 \n\t"
296 		     "xchg %%r8, %[rsp] \n\t"
297 		     "xchg %%r9, %[rbp]"
298 		     : [rsp]"+a"(rsp), [rbp]"+b"(rbp) : : "memory", "r8", "r9");
299 	report(rsp == (unsigned long)stack_top - 8 - 0x1238
300 	       && rbp == (unsigned long)stack_top - 8
301 	       && stack_top[-1] == 0xaa55aa55bb66bb66ULL,
302 	       "enter");
303 }
304 
305 static void test_ljmp(void *mem)
306 {
307     unsigned char *m = mem;
308     volatile int res = 1;
309 
310     *(unsigned long**)m = &&jmpf;
311     asm volatile ("data16 mov %%cs, %0":"=m"(*(m + sizeof(unsigned long))));
312     asm volatile ("rex64 ljmp *%0"::"m"(*m));
313     res = 0;
314 jmpf:
315     report(res, "ljmp");
316 }
317 
318 static void test_incdecnotneg(void *mem)
319 {
320     unsigned long *m = mem, v = 1234;
321     unsigned char *mb = mem, vb = 66;
322 
323     *m = 0;
324 
325     asm volatile ("incl %0":"+m"(*m));
326     report(*m == 1, "incl");
327     asm volatile ("decl %0":"+m"(*m));
328     report(*m == 0, "decl");
329     asm volatile ("incb %0":"+m"(*m));
330     report(*m == 1, "incb");
331     asm volatile ("decb %0":"+m"(*m));
332     report(*m == 0, "decb");
333 
334     asm volatile ("lock incl %0":"+m"(*m));
335     report(*m == 1, "lock incl");
336     asm volatile ("lock decl %0":"+m"(*m));
337     report(*m == 0, "lock decl");
338     asm volatile ("lock incb %0":"+m"(*m));
339     report(*m == 1, "lock incb");
340     asm volatile ("lock decb %0":"+m"(*m));
341     report(*m == 0, "lock decb");
342 
343     *m = v;
344 
345     asm ("lock negq %0" : "+m"(*m)); v = -v;
346     report(*m == v, "lock negl");
347     asm ("lock notq %0" : "+m"(*m)); v = ~v;
348     report(*m == v, "lock notl");
349 
350     *mb = vb;
351 
352     asm ("lock negb %0" : "+m"(*mb)); vb = -vb;
353     report(*mb == vb, "lock negb");
354     asm ("lock notb %0" : "+m"(*mb)); vb = ~vb;
355     report(*mb == vb, "lock notb");
356 }
357 
358 static void test_smsw(uint64_t *h_mem)
359 {
360 	char mem[16];
361 	unsigned short msw, msw_orig, *pmsw;
362 	int i, zero;
363 
364 	msw_orig = read_cr0();
365 
366 	asm("smsw %0" : "=r"(msw));
367 	report(msw == msw_orig, "smsw (1)");
368 
369 	memset(mem, 0, 16);
370 	pmsw = (void *)mem;
371 	asm("smsw %0" : "=m"(pmsw[4]));
372 	zero = 1;
373 	for (i = 0; i < 8; ++i)
374 		if (i != 4 && pmsw[i])
375 			zero = 0;
376 	report(msw == pmsw[4] && zero, "smsw (2)");
377 
378 	/* Trigger exit on smsw */
379 	*h_mem = 0x12345678abcdeful;
380 	asm volatile("smsw %0" : "+m"(*h_mem));
381 	report(msw == (unsigned short)*h_mem &&
382 	       (*h_mem & ~0xfffful) == 0x12345678ab0000ul, "smsw (3)");
383 }
384 
385 static void test_lmsw(void)
386 {
387 	char mem[16];
388 	unsigned short msw, *pmsw;
389 	unsigned long cr0;
390 
391 	cr0 = read_cr0();
392 
393 	msw = cr0 ^ 8;
394 	asm("lmsw %0" : : "r"(msw));
395 	printf("before %lx after %lx\n", cr0, read_cr0());
396 	report((cr0 ^ read_cr0()) == 8, "lmsw (1)");
397 
398 	pmsw = (void *)mem;
399 	*pmsw = cr0;
400 	asm("lmsw %0" : : "m"(*pmsw));
401 	printf("before %lx after %lx\n", cr0, read_cr0());
402 	report(cr0 == read_cr0(), "lmsw (2)");
403 
404 	/* lmsw can't clear cr0.pe */
405 	msw = (cr0 & ~1ul) ^ 4;  /* change EM to force trap */
406 	asm("lmsw %0" : : "r"(msw));
407 	report((cr0 ^ read_cr0()) == 4 && (cr0 & 1), "lmsw (3)");
408 
409 	/* back to normal */
410 	msw = cr0;
411 	asm("lmsw %0" : : "r"(msw));
412 }
413 
414 static void test_xchg(void *mem)
415 {
416 	unsigned long *memq = mem;
417 	unsigned long rax;
418 
419 	asm volatile("mov $0x123456789abcdef, %%rax\n\t"
420 		     "mov %%rax, (%[memq])\n\t"
421 		     "mov $0xfedcba9876543210, %%rax\n\t"
422 		     "xchg %%al, (%[memq])\n\t"
423 		     "mov %%rax, %[rax]\n\t"
424 		     : [rax]"=r"(rax)
425 		     : [memq]"r"(memq)
426 		     : "memory", "rax");
427 	report(rax == 0xfedcba98765432ef && *memq == 0x123456789abcd10,
428 	       "xchg reg, r/m (1)");
429 
430 	asm volatile("mov $0x123456789abcdef, %%rax\n\t"
431 		     "mov %%rax, (%[memq])\n\t"
432 		     "mov $0xfedcba9876543210, %%rax\n\t"
433 		     "xchg %%ax, (%[memq])\n\t"
434 		     "mov %%rax, %[rax]\n\t"
435 		     : [rax]"=r"(rax)
436 		     : [memq]"r"(memq)
437 		     : "memory", "rax");
438 	report(rax == 0xfedcba987654cdef && *memq == 0x123456789ab3210,
439 	       "xchg reg, r/m (2)");
440 
441 	asm volatile("mov $0x123456789abcdef, %%rax\n\t"
442 		     "mov %%rax, (%[memq])\n\t"
443 		     "mov $0xfedcba9876543210, %%rax\n\t"
444 		     "xchg %%eax, (%[memq])\n\t"
445 		     "mov %%rax, %[rax]\n\t"
446 		     : [rax]"=r"(rax)
447 		     : [memq]"r"(memq)
448 		     : "memory", "rax");
449 	report(rax == 0x89abcdef && *memq == 0x123456776543210,
450 	       "xchg reg, r/m (3)");
451 
452 	asm volatile("mov $0x123456789abcdef, %%rax\n\t"
453 		     "mov %%rax, (%[memq])\n\t"
454 		     "mov $0xfedcba9876543210, %%rax\n\t"
455 		     "xchg %%rax, (%[memq])\n\t"
456 		     "mov %%rax, %[rax]\n\t"
457 		     : [rax]"=r"(rax)
458 		     : [memq]"r"(memq)
459 		     : "memory", "rax");
460 	report(rax == 0x123456789abcdef && *memq == 0xfedcba9876543210,
461 	       "xchg reg, r/m (4)");
462 }
463 
464 static void test_xadd(void *mem)
465 {
466 	unsigned long *memq = mem;
467 	unsigned long rax;
468 
469 	asm volatile("mov $0x123456789abcdef, %%rax\n\t"
470 		     "mov %%rax, (%[memq])\n\t"
471 		     "mov $0xfedcba9876543210, %%rax\n\t"
472 		     "xadd %%al, (%[memq])\n\t"
473 		     "mov %%rax, %[rax]\n\t"
474 		     : [rax]"=r"(rax)
475 		     : [memq]"r"(memq)
476 		     : "memory", "rax");
477 	report(rax == 0xfedcba98765432ef && *memq == 0x123456789abcdff,
478 	       "xadd reg, r/m (1)");
479 
480 	asm volatile("mov $0x123456789abcdef, %%rax\n\t"
481 		     "mov %%rax, (%[memq])\n\t"
482 		     "mov $0xfedcba9876543210, %%rax\n\t"
483 		     "xadd %%ax, (%[memq])\n\t"
484 		     "mov %%rax, %[rax]\n\t"
485 		     : [rax]"=r"(rax)
486 		     : [memq]"r"(memq)
487 		     : "memory", "rax");
488 	report(rax == 0xfedcba987654cdef && *memq == 0x123456789abffff,
489 	       "xadd reg, r/m (2)");
490 
491 	asm volatile("mov $0x123456789abcdef, %%rax\n\t"
492 		     "mov %%rax, (%[memq])\n\t"
493 		     "mov $0xfedcba9876543210, %%rax\n\t"
494 		     "xadd %%eax, (%[memq])\n\t"
495 		     "mov %%rax, %[rax]\n\t"
496 		     : [rax]"=r"(rax)
497 		     : [memq]"r"(memq)
498 		     : "memory", "rax");
499 	report(rax == 0x89abcdef && *memq == 0x1234567ffffffff,
500 	       "xadd reg, r/m (3)");
501 
502 	asm volatile("mov $0x123456789abcdef, %%rax\n\t"
503 		     "mov %%rax, (%[memq])\n\t"
504 		     "mov $0xfedcba9876543210, %%rax\n\t"
505 		     "xadd %%rax, (%[memq])\n\t"
506 		     "mov %%rax, %[rax]\n\t"
507 		     : [rax]"=r"(rax)
508 		     : [memq]"r"(memq)
509 		     : "memory", "rax");
510 	report(rax == 0x123456789abcdef && *memq == 0xffffffffffffffff,
511 	       "xadd reg, r/m (4)");
512 }
513 
514 static void test_btc(void *mem)
515 {
516 	unsigned int *a = mem;
517 
518 	memset(mem, 0, 4 * sizeof(unsigned int));
519 
520 	asm ("btcl $32, %0" :: "m"(a[0]) : "memory");
521 	asm ("btcl $1, %0" :: "m"(a[1]) : "memory");
522 	asm ("btcl %1, %0" :: "m"(a[0]), "r"(66) : "memory");
523 	report(a[0] == 1 && a[1] == 2 && a[2] == 4, "btcl imm8, r/m");
524 
525 	asm ("btcl %1, %0" :: "m"(a[3]), "r"(-1) : "memory");
526 	report(a[0] == 1 && a[1] == 2 && a[2] == 0x80000004, "btcl reg, r/m");
527 
528 	asm ("btcq %1, %0" : : "m"(a[2]), "r"(-1l) : "memory");
529 	report(a[0] == 1 && a[1] == 0x80000002 && a[2] == 0x80000004 && a[3] == 0,
530 	       "btcq reg, r/m");
531 }
532 
533 static void test_bsfbsr(void *mem)
534 {
535 	unsigned long rax, *memq = mem;
536 	unsigned eax, *meml = mem;
537 	unsigned short ax, *memw = mem;
538 	unsigned char z;
539 
540 	*memw = 0xc000;
541 	asm("bsfw %[mem], %[a]" : [a]"=a"(ax) : [mem]"m"(*memw));
542 	report(ax == 14, "bsfw r/m, reg");
543 
544 	*meml = 0xc0000000;
545 	asm("bsfl %[mem], %[a]" : [a]"=a"(eax) : [mem]"m"(*meml));
546 	report(eax == 30, "bsfl r/m, reg");
547 
548 	*memq = 0xc00000000000;
549 	asm("bsfq %[mem], %[a]" : [a]"=a"(rax) : [mem]"m"(*memq));
550 	report(rax == 46, "bsfq r/m, reg");
551 
552 	*memq = 0;
553 	asm("bsfq %[mem], %[a]; setz %[z]"
554 	    : [a]"=a"(rax), [z]"=rm"(z) : [mem]"m"(*memq));
555 	report(z == 1, "bsfq r/m, reg");
556 
557 	*memw = 0xc000;
558 	asm("bsrw %[mem], %[a]" : [a]"=a"(ax) : [mem]"m"(*memw));
559 	report(ax == 15, "bsrw r/m, reg");
560 
561 	*meml = 0xc0000000;
562 	asm("bsrl %[mem], %[a]" : [a]"=a"(eax) : [mem]"m"(*meml));
563 	report(eax == 31, "bsrl r/m, reg");
564 
565 	*memq = 0xc00000000000;
566 	asm("bsrq %[mem], %[a]" : [a]"=a"(rax) : [mem]"m"(*memq));
567 	report(rax == 47, "bsrq r/m, reg");
568 
569 	*memq = 0;
570 	asm("bsrq %[mem], %[a]; setz %[z]"
571 	    : [a]"=a"(rax), [z]"=rm"(z) : [mem]"m"(*memq));
572 	report(z == 1, "bsrq r/m, reg");
573 }
574 
575 static void test_imul(ulong *mem)
576 {
577     ulong a;
578 
579     *mem = 51; a = 0x1234567812345678UL;
580     asm ("imulw %1, %%ax" : "+a"(a) : "m"(*mem));
581     report(a == 0x12345678123439e8, "imul ax, mem");
582 
583     *mem = 51; a = 0x1234567812345678UL;
584     asm ("imull %1, %%eax" : "+a"(a) : "m"(*mem));
585     report(a == 0xa06d39e8, "imul eax, mem");
586 
587     *mem = 51; a = 0x1234567812345678UL;
588     asm ("imulq %1, %%rax" : "+a"(a) : "m"(*mem));
589     report(a == 0xA06D39EBA06D39E8UL, "imul rax, mem");
590 
591     *mem  = 0x1234567812345678UL; a = 0x8765432187654321L;
592     asm ("imulw $51, %1, %%ax" : "+a"(a) : "m"(*mem));
593     report(a == 0x87654321876539e8, "imul ax, mem, imm8");
594 
595     *mem = 0x1234567812345678UL;
596     asm ("imull $51, %1, %%eax" : "+a"(a) : "m"(*mem));
597     report(a == 0xa06d39e8, "imul eax, mem, imm8");
598 
599     *mem = 0x1234567812345678UL;
600     asm ("imulq $51, %1, %%rax" : "+a"(a) : "m"(*mem));
601     report(a == 0xA06D39EBA06D39E8UL, "imul rax, mem, imm8");
602 
603     *mem  = 0x1234567812345678UL; a = 0x8765432187654321L;
604     asm ("imulw $311, %1, %%ax" : "+a"(a) : "m"(*mem));
605     report(a == 0x8765432187650bc8, "imul ax, mem, imm");
606 
607     *mem = 0x1234567812345678UL;
608     asm ("imull $311, %1, %%eax" : "+a"(a) : "m"(*mem));
609     report(a == 0x1d950bc8, "imul eax, mem, imm");
610 
611     *mem = 0x1234567812345678UL;
612     asm ("imulq $311, %1, %%rax" : "+a"(a) : "m"(*mem));
613     report(a == 0x1D950BDE1D950BC8L, "imul rax, mem, imm");
614 }
615 
616 static void test_muldiv(long *mem)
617 {
618     long a, d, aa, dd;
619     u8 ex = 1;
620 
621     *mem = 0; a = 1; d = 2;
622     asm (ASM_TRY("1f") "divq %3; movb $0, %2; 1:"
623 	 : "+a"(a), "+d"(d), "+q"(ex) : "m"(*mem));
624     report(a == 1 && d == 2 && ex, "divq (fault)");
625 
626     *mem = 987654321098765UL; a = 123456789012345UL; d = 123456789012345UL;
627     asm (ASM_TRY("1f") "divq %3; movb $0, %2; 1:"
628 	 : "+a"(a), "+d"(d), "+q"(ex) : "m"(*mem));
629     report(a == 0x1ffffffb1b963b33ul && d == 0x273ba4384ede2ul && !ex,
630            "divq (1)");
631     aa = 0x1111111111111111; dd = 0x2222222222222222;
632     *mem = 0x3333333333333333; a = aa; d = dd;
633     asm("mulb %2" : "+a"(a), "+d"(d) : "m"(*mem));
634     report(a == 0x1111111111110363 && d == dd, "mulb mem");
635     *mem = 0x3333333333333333; a = aa; d = dd;
636     asm("mulw %2" : "+a"(a), "+d"(d) : "m"(*mem));
637     report(a == 0x111111111111c963 && d == 0x2222222222220369, "mulw mem");
638     *mem = 0x3333333333333333; a = aa; d = dd;
639     asm("mull %2" : "+a"(a), "+d"(d) : "m"(*mem));
640     report(a == 0x962fc963 && d == 0x369d036, "mull mem");
641     *mem = 0x3333333333333333; a = aa; d = dd;
642     asm("mulq %2" : "+a"(a), "+d"(d) : "m"(*mem));
643     report(a == 0x2fc962fc962fc963 && d == 0x369d0369d0369d0, "mulq mem");
644 }
645 
646 typedef unsigned __attribute__((vector_size(16))) sse128;
647 
648 typedef union {
649     sse128 sse;
650     unsigned u[4];
651 } sse_union;
652 
653 static bool sseeq(sse_union *v1, sse_union *v2)
654 {
655     bool ok = true;
656     int i;
657 
658     for (i = 0; i < 4; ++i) {
659 	ok &= v1->u[i] == v2->u[i];
660     }
661 
662     return ok;
663 }
664 
665 static __attribute__((target("sse2"))) void test_sse(sse_union *mem)
666 {
667     sse_union v;
668 
669     write_cr0(read_cr0() & ~6); /* EM, TS */
670     write_cr4(read_cr4() | 0x200); /* OSFXSR */
671     v.u[0] = 1; v.u[1] = 2; v.u[2] = 3; v.u[3] = 4;
672     asm("movdqu %1, %0" : "=m"(*mem) : "x"(v.sse));
673     report(sseeq(&v, mem), "movdqu (read)");
674     mem->u[0] = 5; mem->u[1] = 6; mem->u[2] = 7; mem->u[3] = 8;
675     asm("movdqu %1, %0" : "=x"(v.sse) : "m"(*mem));
676     report(sseeq(mem, &v), "movdqu (write)");
677 
678     v.u[0] = 1; v.u[1] = 2; v.u[2] = 3; v.u[3] = 4;
679     asm("movaps %1, %0" : "=m"(*mem) : "x"(v.sse));
680     report(sseeq(mem, &v), "movaps (read)");
681     mem->u[0] = 5; mem->u[1] = 6; mem->u[2] = 7; mem->u[3] = 8;
682     asm("movaps %1, %0" : "=x"(v.sse) : "m"(*mem));
683     report(sseeq(&v, mem), "movaps (write)");
684 
685     v.u[0] = 1; v.u[1] = 2; v.u[2] = 3; v.u[3] = 4;
686     asm("movapd %1, %0" : "=m"(*mem) : "x"(v.sse));
687     report(sseeq(mem, &v), "movapd (read)");
688     mem->u[0] = 5; mem->u[1] = 6; mem->u[2] = 7; mem->u[3] = 8;
689     asm("movapd %1, %0" : "=x"(v.sse) : "m"(*mem));
690     report(sseeq(&v, mem), "movapd (write)");
691 }
692 
693 static void test_mmx(uint64_t *mem)
694 {
695     uint64_t v;
696 
697     write_cr0(read_cr0() & ~6); /* EM, TS */
698     asm volatile("fninit");
699     v = 0x0102030405060708ULL;
700     asm("movq %1, %0" : "=m"(*mem) : "y"(v));
701     report(v == *mem, "movq (mmx, read)");
702     *mem = 0x8070605040302010ull;
703     asm("movq %1, %0" : "=y"(v) : "m"(*mem));
704     report(v == *mem, "movq (mmx, write)");
705 }
706 
707 static void test_rip_relative(unsigned *mem, char *insn_ram)
708 {
709     /* movb $1, mem+2(%rip) */
710     insn_ram[0] = 0xc6;
711     insn_ram[1] = 0x05;
712     *(unsigned *)&insn_ram[2] = 2 + (char *)mem - (insn_ram + 7);
713     insn_ram[6] = 0x01;
714     /* ret */
715     insn_ram[7] = 0xc3;
716 
717     *mem = 0;
718     asm("callq *%1" : "+m"(*mem) : "r"(insn_ram));
719     report(*mem == 0x10000, "movb $imm, 0(%%rip)");
720 }
721 
722 static void test_shld_shrd(u32 *mem)
723 {
724     *mem = 0x12345678;
725     asm("shld %2, %1, %0" : "+m"(*mem) : "r"(0xaaaaaaaaU), "c"((u8)3));
726     report(*mem == ((0x12345678 << 3) | 5), "shld (cl)");
727     *mem = 0x12345678;
728     asm("shrd %2, %1, %0" : "+m"(*mem) : "r"(0x55555555U), "c"((u8)3));
729     report(*mem == ((0x12345678 >> 3) | (5u << 29)), "shrd (cl)");
730 }
731 
732 static void test_cmov(u32 *mem)
733 {
734 	u64 val;
735 	*mem = 0xabcdef12u;
736 	asm ("movq $0x1234567812345678, %%rax\n\t"
737 	     "cmpl %%eax, %%eax\n\t"
738 	     "cmovnel (%[mem]), %%eax\n\t"
739 	     "movq %%rax, %[val]\n\t"
740 	     : [val]"=r"(val) : [mem]"r"(mem) : "%rax", "cc");
741 	report(val == 0x12345678ul, "cmovnel");
742 }
743 
744 static unsigned long rip_advance;
745 
746 static void advance_rip_and_note_exception(struct ex_regs *regs)
747 {
748     ++exceptions;
749     regs->rip += rip_advance;
750 }
751 
752 static void test_mmx_movq_mf(uint64_t *mem)
753 {
754     /* movq %mm0, (%rax) */
755     extern char movq_start, movq_end;
756 
757     uint16_t fcw = 0;  /* all exceptions unmasked */
758     write_cr0(read_cr0() & ~6);  /* TS, EM */
759     exceptions = 0;
760     handle_exception(MF_VECTOR, advance_rip_and_note_exception);
761     asm volatile("fninit; fldcw %0" : : "m"(fcw));
762     asm volatile("fldz; fldz; fdivp"); /* generate exception */
763 
764     rip_advance = &movq_end - &movq_start;
765     asm(KVM_FEP "movq_start: movq %mm0, (%rax); movq_end:");
766     /* exit MMX mode */
767     asm volatile("fnclex; emms");
768     report(exceptions == 1, "movq mmx generates #MF");
769     handle_exception(MF_VECTOR, 0);
770 }
771 
772 static void test_jmp_noncanonical(uint64_t *mem)
773 {
774 	extern char nc_jmp_start, nc_jmp_end;
775 
776 	*mem = 0x1111111111111111ul;
777 
778 	exceptions = 0;
779 	rip_advance = &nc_jmp_end - &nc_jmp_start;
780 	handle_exception(GP_VECTOR, advance_rip_and_note_exception);
781 	asm volatile ("nc_jmp_start: jmp *%0; nc_jmp_end:" : : "m"(*mem));
782 	report(exceptions == 1, "jump to non-canonical address");
783 	handle_exception(GP_VECTOR, 0);
784 }
785 
786 static void test_movabs(uint64_t *mem)
787 {
788     /* mov $0x9090909090909090, %rcx */
789     unsigned long rcx;
790     asm(KVM_FEP "mov $0x9090909090909090, %0" : "=c" (rcx) : "0" (0));
791     report(rcx == 0x9090909090909090, "64-bit mov imm2");
792 }
793 
794 static void test_smsw_reg(uint64_t *mem)
795 {
796 	unsigned long cr0 = read_cr0();
797 	unsigned long rax;
798 	const unsigned long in_rax = 0x1234567890abcdeful;
799 
800 	asm(KVM_FEP "smsww %w0\n\t" : "=a" (rax) : "0" (in_rax));
801 	report((u16)rax == (u16)cr0 && rax >> 16 == in_rax >> 16,
802 	       "16-bit smsw reg");
803 
804 	asm(KVM_FEP "smswl %k0\n\t" : "=a" (rax) : "0" (in_rax));
805 	report(rax == (u32)cr0, "32-bit smsw reg");
806 
807 	asm(KVM_FEP "smswq %q0\n\t" : "=a" (rax) : "0" (in_rax));
808 	report(rax == cr0, "64-bit smsw reg");
809 }
810 
811 static void test_nop(uint64_t *mem)
812 {
813 	unsigned long rax;
814 	const unsigned long in_rax = 0x1234567890abcdeful;
815 	asm(KVM_FEP "nop\n\t" : "=a" (rax) : "0" (in_rax));
816 	report(rax == in_rax, "nop");
817 }
818 
819 static void test_mov_dr(uint64_t *mem)
820 {
821 	unsigned long rax;
822 	const unsigned long in_rax = 0;
823 	bool rtm_support = this_cpu_has(X86_FEATURE_RTM);
824 	unsigned long dr6_fixed_1 = rtm_support ? 0xfffe0ff0ul : 0xffff0ff0ul;
825 	asm(KVM_FEP "movq %0, %%dr6\n\t"
826 	    KVM_FEP "movq %%dr6, %0\n\t" : "=a" (rax) : "a" (in_rax));
827 	report(rax == dr6_fixed_1, "mov_dr6");
828 }
829 
830 static void test_push16(uint64_t *mem)
831 {
832 	uint64_t rsp1, rsp2;
833 	uint16_t r;
834 
835 	asm volatile (	"movq %%rsp, %[rsp1]\n\t"
836 			"pushw %[v]\n\t"
837 			"popw %[r]\n\t"
838 			"movq %%rsp, %[rsp2]\n\t"
839 			"movq %[rsp1], %%rsp\n\t" :
840 			[rsp1]"=r"(rsp1), [rsp2]"=r"(rsp2), [r]"=r"(r)
841 			: [v]"m"(*mem) : "memory");
842 	report(rsp1 == rsp2, "push16");
843 }
844 
845 static void test_crosspage_mmio(volatile uint8_t *mem)
846 {
847     volatile uint16_t w, *pw;
848 
849     pw = (volatile uint16_t *)&mem[4095];
850     mem[4095] = 0x99;
851     mem[4096] = 0x77;
852     asm volatile("mov %1, %0" : "=r"(w) : "m"(*pw) : "memory");
853     report(w == 0x7799, "cross-page mmio read");
854     asm volatile("mov %1, %0" : "=m"(*pw) : "r"((uint16_t)0x88aa));
855     report(mem[4095] == 0xaa && mem[4096] == 0x88, "cross-page mmio write");
856 }
857 
858 static void test_string_io_mmio(volatile uint8_t *mem)
859 {
860 	/* Cross MMIO pages.*/
861 	volatile uint8_t *mmio = mem + 4032;
862 
863 	asm volatile("outw %%ax, %%dx  \n\t" : : "a"(0x9999), "d"(TESTDEV_IO_PORT));
864 
865 	asm volatile ("cld; rep insb" : : "d" (TESTDEV_IO_PORT), "D" (mmio), "c" (1024));
866 
867 	report(mmio[1023] == 0x99, "string_io_mmio");
868 }
869 
870 /* kvm doesn't allow lidt/lgdt from mmio, so the test is disabled */
871 #if 0
872 static void test_lgdt_lidt(volatile uint8_t *mem)
873 {
874     struct descriptor_table_ptr orig, fresh = {};
875 
876     sgdt(&orig);
877     *(struct descriptor_table_ptr *)mem = (struct descriptor_table_ptr) {
878 	.limit = 0xf234,
879 	.base = 0x12345678abcd,
880     };
881     cli();
882     asm volatile("lgdt %0" : : "m"(*(struct descriptor_table_ptr *)mem));
883     sgdt(&fresh);
884     lgdt(&orig);
885     sti();
886     report(orig.limit == fresh.limit && orig.base == fresh.base,
887            "lgdt (long address)");
888 
889     sidt(&orig);
890     *(struct descriptor_table_ptr *)mem = (struct descriptor_table_ptr) {
891 	.limit = 0x432f,
892 	.base = 0xdbca87654321,
893     };
894     cli();
895     asm volatile("lidt %0" : : "m"(*(struct descriptor_table_ptr *)mem));
896     sidt(&fresh);
897     lidt(&orig);
898     sti();
899     report(orig.limit == fresh.limit && orig.base == fresh.base,
900            "lidt (long address)");
901 }
902 #endif
903 
904 static void ss_bad_rpl(struct ex_regs *regs)
905 {
906     extern char ss_bad_rpl_cont;
907 
908     ++exceptions;
909     regs->rip = (ulong)&ss_bad_rpl_cont;
910 }
911 
912 static void test_sreg(volatile uint16_t *mem)
913 {
914     u16 ss = read_ss();
915 
916     // check for null segment load
917     *mem = 0;
918     asm volatile("mov %0, %%ss" : : "m"(*mem));
919     report(read_ss() == 0, "mov null, %%ss");
920 
921     // check for exception when ss.rpl != cpl on null segment load
922     exceptions = 0;
923     handle_exception(GP_VECTOR, ss_bad_rpl);
924     *mem = 3;
925     asm volatile("mov %0, %%ss; ss_bad_rpl_cont:" : : "m"(*mem));
926     report(exceptions == 1 && read_ss() == 0,
927            "mov null, %%ss (with ss.rpl != cpl)");
928     handle_exception(GP_VECTOR, 0);
929     write_ss(ss);
930 }
931 
932 static uint64_t usr_gs_mov(void)
933 {
934     static uint64_t dummy = MAGIC_NUM;
935     uint64_t dummy_ptr = (uint64_t)&dummy;
936     uint64_t ret;
937 
938     dummy_ptr -= GS_BASE;
939     asm volatile("mov %%gs:(%%rcx), %%rax" : "=a"(ret): "c"(dummy_ptr) :);
940 
941     return ret;
942 }
943 
944 static void test_iret(void)
945 {
946     uint64_t val;
947     bool raised_vector;
948 
949     /* Update GS base to 4MiB */
950     wrmsr(MSR_GS_BASE, GS_BASE);
951 
952     /*
953      * Per the SDM, jumping to user mode via `iret`, which is returning to
954      * outer privilege level, for segment registers (ES, FS, GS, and DS)
955      * if the check fails, the segment selector becomes null.
956      *
957      * In our test case, GS becomes null.
958      */
959     val = run_in_user((usermode_func)usr_gs_mov, GP_VECTOR,
960                       0, 0, 0, 0, &raised_vector);
961 
962     report(val == MAGIC_NUM, "Test ret/iret with a nullified segment");
963 }
964 
965 /* Broken emulation causes triple fault, which skips the other tests. */
966 #if 0
967 static void test_lldt(volatile uint16_t *mem)
968 {
969     u64 gdt[] = { 0, /* null descriptor */
970 #ifdef __X86_64__
971 		  0, /* ldt descriptor is 16 bytes in long mode */
972 #endif
973 		  0x0000f82000000ffffull /* ldt descriptor */ };
974     struct descriptor_table_ptr gdt_ptr = { .limit = sizeof(gdt) - 1,
975 					    .base = (ulong)&gdt };
976     struct descriptor_table_ptr orig_gdt;
977 
978     cli();
979     sgdt(&orig_gdt);
980     lgdt(&gdt_ptr);
981     *mem = 0x8;
982     asm volatile("lldt %0" : : "m"(*mem));
983     lgdt(&orig_gdt);
984     sti();
985     report(sldt() == *mem, "lldt");
986 }
987 #endif
988 
989 static void test_ltr(volatile uint16_t *mem)
990 {
991     struct descriptor_table_ptr gdt_ptr;
992     uint64_t *gdt, *trp;
993     uint16_t tr = str();
994     uint64_t busy_mask = (uint64_t)1 << 41;
995 
996     sgdt(&gdt_ptr);
997     gdt = (uint64_t *)gdt_ptr.base;
998     trp = &gdt[tr >> 3];
999     *trp &= ~busy_mask;
1000     *mem = tr;
1001     asm volatile("ltr %0" : : "m"(*mem) : "memory");
1002     report(str() == tr && (*trp & busy_mask), "ltr");
1003 }
1004 
1005 static void test_simplealu(u32 *mem)
1006 {
1007     *mem = 0x1234;
1008     asm("or %1, %0" : "+m"(*mem) : "r"(0x8001));
1009     report(*mem == 0x9235, "or");
1010     asm("add %1, %0" : "+m"(*mem) : "r"(2));
1011     report(*mem == 0x9237, "add");
1012     asm("xor %1, %0" : "+m"(*mem) : "r"(0x1111));
1013     report(*mem == 0x8326, "xor");
1014     asm("sub %1, %0" : "+m"(*mem) : "r"(0x26));
1015     report(*mem == 0x8300, "sub");
1016     asm("clc; adc %1, %0" : "+m"(*mem) : "r"(0x100));
1017     report(*mem == 0x8400, "adc(0)");
1018     asm("stc; adc %1, %0" : "+m"(*mem) : "r"(0x100));
1019     report(*mem == 0x8501, "adc(0)");
1020     asm("clc; sbb %1, %0" : "+m"(*mem) : "r"(0));
1021     report(*mem == 0x8501, "sbb(0)");
1022     asm("stc; sbb %1, %0" : "+m"(*mem) : "r"(0));
1023     report(*mem == 0x8500, "sbb(1)");
1024     asm("and %1, %0" : "+m"(*mem) : "r"(0xfe77));
1025     report(*mem == 0x8400, "and");
1026     asm("test %1, %0" : "+m"(*mem) : "r"(0xf000));
1027     report(*mem == 0x8400, "test");
1028 }
1029 
1030 static void illegal_movbe_handler(struct ex_regs *regs)
1031 {
1032 	extern char bad_movbe_cont;
1033 
1034 	++exceptions;
1035 	regs->rip = (ulong)&bad_movbe_cont;
1036 }
1037 
1038 static void test_illegal_movbe(void)
1039 {
1040 	if (!this_cpu_has(X86_FEATURE_MOVBE)) {
1041 		report_skip("illegal movbe");
1042 		return;
1043 	}
1044 
1045 	exceptions = 0;
1046 	handle_exception(UD_VECTOR, illegal_movbe_handler);
1047 	asm volatile(".byte 0x0f; .byte 0x38; .byte 0xf0; .byte 0xc0;\n\t"
1048 		     " bad_movbe_cont:" : : : "rax");
1049 	report(exceptions == 1, "illegal movbe");
1050 	handle_exception(UD_VECTOR, 0);
1051 }
1052 
1053 static void record_no_fep(struct ex_regs *regs)
1054 {
1055 	fep_available = 0;
1056 	regs->rip += KVM_FEP_LENGTH;
1057 }
1058 
1059 int main(void)
1060 {
1061 	void *mem;
1062 	void *insn_page;
1063 	void *insn_ram;
1064 	unsigned long t1, t2;
1065 
1066 	setup_vm();
1067 	handle_exception(UD_VECTOR, record_no_fep);
1068 	asm(KVM_FEP "nop");
1069 	handle_exception(UD_VECTOR, 0);
1070 
1071 	mem = alloc_vpages(2);
1072 	install_page((void *)read_cr3(), IORAM_BASE_PHYS, mem);
1073 	// install the page twice to test cross-page mmio
1074 	install_page((void *)read_cr3(), IORAM_BASE_PHYS, mem + 4096);
1075 	insn_page = alloc_page();
1076 	insn_ram = vmap(virt_to_phys(insn_page), 4096);
1077 
1078 	// test mov reg, r/m and mov r/m, reg
1079 	t1 = 0x123456789abcdef;
1080 	asm volatile("mov %[t1], (%[mem]) \n\t"
1081 		     "mov (%[mem]), %[t2]"
1082 		     : [t2]"=r"(t2)
1083 		     : [t1]"r"(t1), [mem]"r"(mem)
1084 		     : "memory");
1085 	report(t2 == 0x123456789abcdef, "mov reg, r/m (1)");
1086 
1087 	test_simplealu(mem);
1088 	test_cmps(mem);
1089 	test_scas(mem);
1090 
1091 	test_push(mem);
1092 	test_pop(mem);
1093 
1094 	test_xchg(mem);
1095 	test_xadd(mem);
1096 
1097 	test_cr8();
1098 
1099 	test_smsw(mem);
1100 	test_lmsw();
1101 	test_ljmp(mem);
1102 	test_stringio();
1103 	test_incdecnotneg(mem);
1104 	test_btc(mem);
1105 	test_bsfbsr(mem);
1106 	test_imul(mem);
1107 	test_muldiv(mem);
1108 	test_sse(mem);
1109 	test_mmx(mem);
1110 	test_rip_relative(mem, insn_ram);
1111 	test_shld_shrd(mem);
1112 	//test_lgdt_lidt(mem);
1113 	test_sreg(mem);
1114 	test_iret();
1115 	//test_lldt(mem);
1116 	test_ltr(mem);
1117 	test_cmov(mem);
1118 
1119 	if (fep_available) {
1120 		test_mmx_movq_mf(mem);
1121 		test_movabs(mem);
1122 		test_smsw_reg(mem);
1123 		test_nop(mem);
1124 		test_mov_dr(mem);
1125 	} else {
1126 		report_skip("skipping register-only tests, "
1127 			    "use kvm.forced_emulation_prefix=1 to enable");
1128 	}
1129 
1130 	test_push16(mem);
1131 	test_crosspage_mmio(mem);
1132 
1133 	test_string_io_mmio(mem);
1134 
1135 	test_jmp_noncanonical(mem);
1136 	test_illegal_movbe();
1137 
1138 	return report_summary();
1139 }
1140