1 #include "ioram.h" 2 #include "vm.h" 3 #include "libcflat.h" 4 #include "desc.h" 5 #include "types.h" 6 #include "processor.h" 7 #include "vmalloc.h" 8 #include "alloc_page.h" 9 #include "usermode.h" 10 11 #define TESTDEV_IO_PORT 0xe0 12 13 #define MAGIC_NUM 0xdeadbeefdeadbeefUL 14 #define GS_BASE 0x400000 15 16 static int exceptions; 17 18 /* Forced emulation prefix, used to invoke the emulator unconditionally. */ 19 #define KVM_FEP "ud2; .byte 'k', 'v', 'm';" 20 #define KVM_FEP_LENGTH 5 21 static int fep_available = 1; 22 23 struct regs { 24 u64 rax, rbx, rcx, rdx; 25 u64 rsi, rdi, rsp, rbp; 26 u64 r8, r9, r10, r11; 27 u64 r12, r13, r14, r15; 28 u64 rip, rflags; 29 }; 30 struct regs inregs, outregs, save; 31 32 struct insn_desc { 33 u64 ptr; 34 size_t len; 35 }; 36 37 static char st1[] = "abcdefghijklmnop"; 38 39 static void test_stringio(void) 40 { 41 unsigned char r = 0; 42 asm volatile("cld \n\t" 43 "movw %0, %%dx \n\t" 44 "rep outsb \n\t" 45 : : "i"((short)TESTDEV_IO_PORT), 46 "S"(st1), "c"(sizeof(st1) - 1)); 47 asm volatile("inb %1, %0\n\t" : "=a"(r) : "i"((short)TESTDEV_IO_PORT)); 48 report(r == st1[sizeof(st1) - 2], "outsb up"); /* last char */ 49 50 asm volatile("std \n\t" 51 "movw %0, %%dx \n\t" 52 "rep outsb \n\t" 53 : : "i"((short)TESTDEV_IO_PORT), 54 "S"(st1 + sizeof(st1) - 2), "c"(sizeof(st1) - 1)); 55 asm volatile("cld \n\t" : : ); 56 asm volatile("in %1, %0\n\t" : "=a"(r) : "i"((short)TESTDEV_IO_PORT)); 57 report(r == st1[0], "outsb down"); 58 } 59 60 static void test_cmps_one(unsigned char *m1, unsigned char *m3) 61 { 62 void *rsi, *rdi; 63 long rcx, tmp; 64 65 rsi = m1; rdi = m3; rcx = 30; 66 asm volatile("xor %[tmp], %[tmp] \n\t" 67 "repe cmpsb" 68 : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp) 69 : : "cc"); 70 report(rcx == 0 && rsi == m1 + 30 && rdi == m3 + 30, "repe/cmpsb (1)"); 71 72 rsi = m1; rdi = m3; rcx = 30; 73 asm volatile("or $1, %[tmp]\n\t" // clear ZF 74 "repe cmpsb" 75 : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp) 76 : : "cc"); 77 report(rcx == 0 && rsi == m1 + 30 && rdi == m3 + 30, 78 "repe cmpsb (1.zf)"); 79 80 rsi = m1; rdi = m3; rcx = 15; 81 asm volatile("xor %[tmp], %[tmp] \n\t" 82 "repe cmpsw" 83 : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp) 84 : : "cc"); 85 report(rcx == 0 && rsi == m1 + 30 && rdi == m3 + 30, "repe cmpsw (1)"); 86 87 rsi = m1; rdi = m3; rcx = 7; 88 asm volatile("xor %[tmp], %[tmp] \n\t" 89 "repe cmpsl" 90 : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp) 91 : : "cc"); 92 report(rcx == 0 && rsi == m1 + 28 && rdi == m3 + 28, "repe cmpll (1)"); 93 94 rsi = m1; rdi = m3; rcx = 4; 95 asm volatile("xor %[tmp], %[tmp] \n\t" 96 "repe cmpsq" 97 : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp) 98 : : "cc"); 99 report(rcx == 0 && rsi == m1 + 32 && rdi == m3 + 32, "repe cmpsq (1)"); 100 101 rsi = m1; rdi = m3; rcx = 130; 102 asm volatile("xor %[tmp], %[tmp] \n\t" 103 "repe cmpsb" 104 : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp) 105 : : "cc"); 106 report(rcx == 29 && rsi == m1 + 101 && rdi == m3 + 101, 107 "repe cmpsb (2)"); 108 109 rsi = m1; rdi = m3; rcx = 65; 110 asm volatile("xor %[tmp], %[tmp] \n\t" 111 "repe cmpsw" 112 : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp) 113 : : "cc"); 114 report(rcx == 14 && rsi == m1 + 102 && rdi == m3 + 102, 115 "repe cmpsw (2)"); 116 117 rsi = m1; rdi = m3; rcx = 32; 118 asm volatile("xor %[tmp], %[tmp] \n\t" 119 "repe cmpsl" 120 : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp) 121 : : "cc"); 122 report(rcx == 6 && rsi == m1 + 104 && rdi == m3 + 104, 123 "repe cmpll (2)"); 124 125 rsi = m1; rdi = m3; rcx = 16; 126 asm volatile("xor %[tmp], %[tmp] \n\t" 127 "repe cmpsq" 128 : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp) 129 : : "cc"); 130 report(rcx == 3 && rsi == m1 + 104 && rdi == m3 + 104, 131 "repe cmpsq (2)"); 132 133 } 134 135 static void test_cmps(void *mem) 136 { 137 unsigned char *m1 = mem, *m2 = mem + 1024; 138 unsigned char m3[1024]; 139 140 for (int i = 0; i < 100; ++i) 141 m1[i] = m2[i] = m3[i] = i; 142 for (int i = 100; i < 200; ++i) 143 m1[i] = (m3[i] = m2[i] = i) + 1; 144 test_cmps_one(m1, m3); 145 test_cmps_one(m1, m2); 146 } 147 148 static void test_scas(void *mem) 149 { 150 bool z; 151 void *di; 152 153 *(ulong *)mem = 0x77665544332211; 154 155 di = mem; 156 asm ("scasb; setz %0" : "=rm"(z), "+D"(di) : "a"(0xff11)); 157 report(di == mem + 1 && z, "scasb match"); 158 159 di = mem; 160 asm ("scasb; setz %0" : "=rm"(z), "+D"(di) : "a"(0xff54)); 161 report(di == mem + 1 && !z, "scasb mismatch"); 162 163 di = mem; 164 asm ("scasw; setz %0" : "=rm"(z), "+D"(di) : "a"(0xff2211)); 165 report(di == mem + 2 && z, "scasw match"); 166 167 di = mem; 168 asm ("scasw; setz %0" : "=rm"(z), "+D"(di) : "a"(0xffdd11)); 169 report(di == mem + 2 && !z, "scasw mismatch"); 170 171 di = mem; 172 asm ("scasl; setz %0" : "=rm"(z), "+D"(di) : "a"(0xff44332211ul)); 173 report(di == mem + 4 && z, "scasd match"); 174 175 di = mem; 176 asm ("scasl; setz %0" : "=rm"(z), "+D"(di) : "a"(0x45332211)); 177 report(di == mem + 4 && !z, "scasd mismatch"); 178 179 di = mem; 180 asm ("scasq; setz %0" : "=rm"(z), "+D"(di) : "a"(0x77665544332211ul)); 181 report(di == mem + 8 && z, "scasq match"); 182 183 di = mem; 184 asm ("scasq; setz %0" : "=rm"(z), "+D"(di) : "a"(3)); 185 report(di == mem + 8 && !z, "scasq mismatch"); 186 } 187 188 static void test_cr8(void) 189 { 190 unsigned long src, dst; 191 192 dst = 777; 193 src = 3; 194 asm volatile("mov %[src], %%cr8; mov %%cr8, %[dst]" 195 : [dst]"+r"(dst), [src]"+r"(src)); 196 report(dst == 3 && src == 3, "mov %%cr8"); 197 } 198 199 static void test_push(void *mem) 200 { 201 unsigned long tmp; 202 unsigned long *stack_top = mem + 4096; 203 unsigned long *new_stack_top; 204 unsigned long memw = 0x123456789abcdeful; 205 206 memset(mem, 0x55, (void *)stack_top - mem); 207 208 asm volatile("mov %%rsp, %[tmp] \n\t" 209 "mov %[stack_top], %%rsp \n\t" 210 "pushq $-7 \n\t" 211 "pushq %[reg] \n\t" 212 "pushq (%[mem]) \n\t" 213 "pushq $-7070707 \n\t" 214 "mov %%rsp, %[new_stack_top] \n\t" 215 "mov %[tmp], %%rsp" 216 : [tmp]"=&r"(tmp), [new_stack_top]"=r"(new_stack_top) 217 : [stack_top]"r"(stack_top), 218 [reg]"r"(-17l), [mem]"r"(&memw) 219 : "memory"); 220 221 report(stack_top[-1] == -7ul, "push $imm8"); 222 report(stack_top[-2] == -17ul, "push %%reg"); 223 report(stack_top[-3] == 0x123456789abcdeful, "push mem"); 224 report(stack_top[-4] == -7070707, "push $imm"); 225 } 226 227 static void test_pop(void *mem) 228 { 229 unsigned long tmp, tmp3, rsp, rbp; 230 unsigned long *stack_top = mem + 4096; 231 unsigned long memw = 0x123456789abcdeful; 232 static unsigned long tmp2; 233 234 memset(mem, 0x55, (void *)stack_top - mem); 235 236 asm volatile("pushq %[val] \n\t" 237 "popq (%[mem])" 238 : : [val]"m"(memw), [mem]"r"(mem) : "memory"); 239 report(*(unsigned long *)mem == memw, "pop mem"); 240 241 memw = 7 - memw; 242 asm volatile("mov %%rsp, %[tmp] \n\t" 243 "mov %[stack_top], %%rsp \n\t" 244 "pushq %[val] \n\t" 245 "popq %[tmp2] \n\t" 246 "mov %[tmp], %%rsp" 247 : [tmp]"=&r"(tmp), [tmp2]"=m"(tmp2) 248 : [val]"r"(memw), [stack_top]"r"(stack_top) 249 : "memory"); 250 report(tmp2 == memw, "pop mem (2)"); 251 252 memw = 129443 - memw; 253 asm volatile("mov %%rsp, %[tmp] \n\t" 254 "mov %[stack_top], %%rsp \n\t" 255 "pushq %[val] \n\t" 256 "popq %[tmp2] \n\t" 257 "mov %[tmp], %%rsp" 258 : [tmp]"=&r"(tmp), [tmp2]"=r"(tmp2) 259 : [val]"r"(memw), [stack_top]"r"(stack_top) 260 : "memory"); 261 report(tmp2 == memw, "pop reg"); 262 263 asm volatile("mov %%rsp, %[tmp] \n\t" 264 "mov %[stack_top], %%rsp \n\t" 265 "push $1f \n\t" 266 "ret \n\t" 267 "2: jmp 2b \n\t" 268 "1: mov %[tmp], %%rsp" 269 : [tmp]"=&r"(tmp) : [stack_top]"r"(stack_top) 270 : "memory"); 271 report_pass("ret"); 272 273 stack_top[-1] = 0x778899; 274 asm volatile("mov %[stack_top], %%r8 \n\t" 275 "mov %%rsp, %%r9 \n\t" 276 "xchg %%rbp, %%r8 \n\t" 277 "leave \n\t" 278 "xchg %%rsp, %%r9 \n\t" 279 "xchg %%rbp, %%r8 \n\t" 280 "mov %%r9, %[tmp] \n\t" 281 "mov %%r8, %[tmp3]" 282 : [tmp]"=&r"(tmp), [tmp3]"=&r"(tmp3) : [stack_top]"r"(stack_top-1) 283 : "memory", "r8", "r9"); 284 report(tmp == (ulong)stack_top && tmp3 == 0x778899, "leave"); 285 286 rbp = 0xaa55aa55bb66bb66ULL; 287 rsp = (unsigned long)stack_top; 288 asm volatile("mov %[rsp], %%r8 \n\t" 289 "mov %[rbp], %%r9 \n\t" 290 "xchg %%rsp, %%r8 \n\t" 291 "xchg %%rbp, %%r9 \n\t" 292 "enter $0x1238, $0 \n\t" 293 "xchg %%rsp, %%r8 \n\t" 294 "xchg %%rbp, %%r9 \n\t" 295 "xchg %%r8, %[rsp] \n\t" 296 "xchg %%r9, %[rbp]" 297 : [rsp]"+a"(rsp), [rbp]"+b"(rbp) : : "memory", "r8", "r9"); 298 report(rsp == (unsigned long)stack_top - 8 - 0x1238 299 && rbp == (unsigned long)stack_top - 8 300 && stack_top[-1] == 0xaa55aa55bb66bb66ULL, 301 "enter"); 302 } 303 304 static void test_ljmp(void *mem) 305 { 306 unsigned char *m = mem; 307 volatile int res = 1; 308 309 *(unsigned long**)m = &&jmpf; 310 asm volatile ("data16 mov %%cs, %0":"=m"(*(m + sizeof(unsigned long)))); 311 asm volatile ("rex64 ljmp *%0"::"m"(*m)); 312 res = 0; 313 jmpf: 314 report(res, "ljmp"); 315 } 316 317 static void test_incdecnotneg(void *mem) 318 { 319 unsigned long *m = mem, v = 1234; 320 unsigned char *mb = mem, vb = 66; 321 322 *m = 0; 323 324 asm volatile ("incl %0":"+m"(*m)); 325 report(*m == 1, "incl"); 326 asm volatile ("decl %0":"+m"(*m)); 327 report(*m == 0, "decl"); 328 asm volatile ("incb %0":"+m"(*m)); 329 report(*m == 1, "incb"); 330 asm volatile ("decb %0":"+m"(*m)); 331 report(*m == 0, "decb"); 332 333 asm volatile ("lock incl %0":"+m"(*m)); 334 report(*m == 1, "lock incl"); 335 asm volatile ("lock decl %0":"+m"(*m)); 336 report(*m == 0, "lock decl"); 337 asm volatile ("lock incb %0":"+m"(*m)); 338 report(*m == 1, "lock incb"); 339 asm volatile ("lock decb %0":"+m"(*m)); 340 report(*m == 0, "lock decb"); 341 342 *m = v; 343 344 asm ("lock negq %0" : "+m"(*m)); v = -v; 345 report(*m == v, "lock negl"); 346 asm ("lock notq %0" : "+m"(*m)); v = ~v; 347 report(*m == v, "lock notl"); 348 349 *mb = vb; 350 351 asm ("lock negb %0" : "+m"(*mb)); vb = -vb; 352 report(*mb == vb, "lock negb"); 353 asm ("lock notb %0" : "+m"(*mb)); vb = ~vb; 354 report(*mb == vb, "lock notb"); 355 } 356 357 static void test_smsw(uint64_t *h_mem) 358 { 359 char mem[16]; 360 unsigned short msw, msw_orig, *pmsw; 361 int i, zero; 362 363 msw_orig = read_cr0(); 364 365 asm("smsw %0" : "=r"(msw)); 366 report(msw == msw_orig, "smsw (1)"); 367 368 memset(mem, 0, 16); 369 pmsw = (void *)mem; 370 asm("smsw %0" : "=m"(pmsw[4])); 371 zero = 1; 372 for (i = 0; i < 8; ++i) 373 if (i != 4 && pmsw[i]) 374 zero = 0; 375 report(msw == pmsw[4] && zero, "smsw (2)"); 376 377 /* Trigger exit on smsw */ 378 *h_mem = 0x12345678abcdeful; 379 asm volatile("smsw %0" : "+m"(*h_mem)); 380 report(msw == (unsigned short)*h_mem && 381 (*h_mem & ~0xfffful) == 0x12345678ab0000ul, "smsw (3)"); 382 } 383 384 static void test_lmsw(void) 385 { 386 char mem[16]; 387 unsigned short msw, *pmsw; 388 unsigned long cr0; 389 390 cr0 = read_cr0(); 391 392 msw = cr0 ^ 8; 393 asm("lmsw %0" : : "r"(msw)); 394 printf("before %lx after %lx\n", cr0, read_cr0()); 395 report((cr0 ^ read_cr0()) == 8, "lmsw (1)"); 396 397 pmsw = (void *)mem; 398 *pmsw = cr0; 399 asm("lmsw %0" : : "m"(*pmsw)); 400 printf("before %lx after %lx\n", cr0, read_cr0()); 401 report(cr0 == read_cr0(), "lmsw (2)"); 402 403 /* lmsw can't clear cr0.pe */ 404 msw = (cr0 & ~1ul) ^ 4; /* change EM to force trap */ 405 asm("lmsw %0" : : "r"(msw)); 406 report((cr0 ^ read_cr0()) == 4 && (cr0 & 1), "lmsw (3)"); 407 408 /* back to normal */ 409 msw = cr0; 410 asm("lmsw %0" : : "r"(msw)); 411 } 412 413 static void test_xchg(void *mem) 414 { 415 unsigned long *memq = mem; 416 unsigned long rax; 417 418 asm volatile("mov $0x123456789abcdef, %%rax\n\t" 419 "mov %%rax, (%[memq])\n\t" 420 "mov $0xfedcba9876543210, %%rax\n\t" 421 "xchg %%al, (%[memq])\n\t" 422 "mov %%rax, %[rax]\n\t" 423 : [rax]"=r"(rax) 424 : [memq]"r"(memq) 425 : "memory", "rax"); 426 report(rax == 0xfedcba98765432ef && *memq == 0x123456789abcd10, 427 "xchg reg, r/m (1)"); 428 429 asm volatile("mov $0x123456789abcdef, %%rax\n\t" 430 "mov %%rax, (%[memq])\n\t" 431 "mov $0xfedcba9876543210, %%rax\n\t" 432 "xchg %%ax, (%[memq])\n\t" 433 "mov %%rax, %[rax]\n\t" 434 : [rax]"=r"(rax) 435 : [memq]"r"(memq) 436 : "memory", "rax"); 437 report(rax == 0xfedcba987654cdef && *memq == 0x123456789ab3210, 438 "xchg reg, r/m (2)"); 439 440 asm volatile("mov $0x123456789abcdef, %%rax\n\t" 441 "mov %%rax, (%[memq])\n\t" 442 "mov $0xfedcba9876543210, %%rax\n\t" 443 "xchg %%eax, (%[memq])\n\t" 444 "mov %%rax, %[rax]\n\t" 445 : [rax]"=r"(rax) 446 : [memq]"r"(memq) 447 : "memory", "rax"); 448 report(rax == 0x89abcdef && *memq == 0x123456776543210, 449 "xchg reg, r/m (3)"); 450 451 asm volatile("mov $0x123456789abcdef, %%rax\n\t" 452 "mov %%rax, (%[memq])\n\t" 453 "mov $0xfedcba9876543210, %%rax\n\t" 454 "xchg %%rax, (%[memq])\n\t" 455 "mov %%rax, %[rax]\n\t" 456 : [rax]"=r"(rax) 457 : [memq]"r"(memq) 458 : "memory", "rax"); 459 report(rax == 0x123456789abcdef && *memq == 0xfedcba9876543210, 460 "xchg reg, r/m (4)"); 461 } 462 463 static void test_xadd(void *mem) 464 { 465 unsigned long *memq = mem; 466 unsigned long rax; 467 468 asm volatile("mov $0x123456789abcdef, %%rax\n\t" 469 "mov %%rax, (%[memq])\n\t" 470 "mov $0xfedcba9876543210, %%rax\n\t" 471 "xadd %%al, (%[memq])\n\t" 472 "mov %%rax, %[rax]\n\t" 473 : [rax]"=r"(rax) 474 : [memq]"r"(memq) 475 : "memory", "rax"); 476 report(rax == 0xfedcba98765432ef && *memq == 0x123456789abcdff, 477 "xadd reg, r/m (1)"); 478 479 asm volatile("mov $0x123456789abcdef, %%rax\n\t" 480 "mov %%rax, (%[memq])\n\t" 481 "mov $0xfedcba9876543210, %%rax\n\t" 482 "xadd %%ax, (%[memq])\n\t" 483 "mov %%rax, %[rax]\n\t" 484 : [rax]"=r"(rax) 485 : [memq]"r"(memq) 486 : "memory", "rax"); 487 report(rax == 0xfedcba987654cdef && *memq == 0x123456789abffff, 488 "xadd reg, r/m (2)"); 489 490 asm volatile("mov $0x123456789abcdef, %%rax\n\t" 491 "mov %%rax, (%[memq])\n\t" 492 "mov $0xfedcba9876543210, %%rax\n\t" 493 "xadd %%eax, (%[memq])\n\t" 494 "mov %%rax, %[rax]\n\t" 495 : [rax]"=r"(rax) 496 : [memq]"r"(memq) 497 : "memory", "rax"); 498 report(rax == 0x89abcdef && *memq == 0x1234567ffffffff, 499 "xadd reg, r/m (3)"); 500 501 asm volatile("mov $0x123456789abcdef, %%rax\n\t" 502 "mov %%rax, (%[memq])\n\t" 503 "mov $0xfedcba9876543210, %%rax\n\t" 504 "xadd %%rax, (%[memq])\n\t" 505 "mov %%rax, %[rax]\n\t" 506 : [rax]"=r"(rax) 507 : [memq]"r"(memq) 508 : "memory", "rax"); 509 report(rax == 0x123456789abcdef && *memq == 0xffffffffffffffff, 510 "xadd reg, r/m (4)"); 511 } 512 513 static void test_btc(void *mem) 514 { 515 unsigned int *a = mem; 516 517 memset(mem, 0, 4 * sizeof(unsigned int)); 518 519 asm ("btcl $32, %0" :: "m"(a[0]) : "memory"); 520 asm ("btcl $1, %0" :: "m"(a[1]) : "memory"); 521 asm ("btcl %1, %0" :: "m"(a[0]), "r"(66) : "memory"); 522 report(a[0] == 1 && a[1] == 2 && a[2] == 4, "btcl imm8, r/m"); 523 524 asm ("btcl %1, %0" :: "m"(a[3]), "r"(-1) : "memory"); 525 report(a[0] == 1 && a[1] == 2 && a[2] == 0x80000004, "btcl reg, r/m"); 526 527 asm ("btcq %1, %0" : : "m"(a[2]), "r"(-1l) : "memory"); 528 report(a[0] == 1 && a[1] == 0x80000002 && a[2] == 0x80000004 && a[3] == 0, 529 "btcq reg, r/m"); 530 } 531 532 static void test_bsfbsr(void *mem) 533 { 534 unsigned long rax, *memq = mem; 535 unsigned eax, *meml = mem; 536 unsigned short ax, *memw = mem; 537 unsigned char z; 538 539 *memw = 0xc000; 540 asm("bsfw %[mem], %[a]" : [a]"=a"(ax) : [mem]"m"(*memw)); 541 report(ax == 14, "bsfw r/m, reg"); 542 543 *meml = 0xc0000000; 544 asm("bsfl %[mem], %[a]" : [a]"=a"(eax) : [mem]"m"(*meml)); 545 report(eax == 30, "bsfl r/m, reg"); 546 547 *memq = 0xc00000000000; 548 asm("bsfq %[mem], %[a]" : [a]"=a"(rax) : [mem]"m"(*memq)); 549 report(rax == 46, "bsfq r/m, reg"); 550 551 *memq = 0; 552 asm("bsfq %[mem], %[a]; setz %[z]" 553 : [a]"=a"(rax), [z]"=rm"(z) : [mem]"m"(*memq)); 554 report(z == 1, "bsfq r/m, reg"); 555 556 *memw = 0xc000; 557 asm("bsrw %[mem], %[a]" : [a]"=a"(ax) : [mem]"m"(*memw)); 558 report(ax == 15, "bsrw r/m, reg"); 559 560 *meml = 0xc0000000; 561 asm("bsrl %[mem], %[a]" : [a]"=a"(eax) : [mem]"m"(*meml)); 562 report(eax == 31, "bsrl r/m, reg"); 563 564 *memq = 0xc00000000000; 565 asm("bsrq %[mem], %[a]" : [a]"=a"(rax) : [mem]"m"(*memq)); 566 report(rax == 47, "bsrq r/m, reg"); 567 568 *memq = 0; 569 asm("bsrq %[mem], %[a]; setz %[z]" 570 : [a]"=a"(rax), [z]"=rm"(z) : [mem]"m"(*memq)); 571 report(z == 1, "bsrq r/m, reg"); 572 } 573 574 static void test_imul(ulong *mem) 575 { 576 ulong a; 577 578 *mem = 51; a = 0x1234567812345678UL; 579 asm ("imulw %1, %%ax" : "+a"(a) : "m"(*mem)); 580 report(a == 0x12345678123439e8, "imul ax, mem"); 581 582 *mem = 51; a = 0x1234567812345678UL; 583 asm ("imull %1, %%eax" : "+a"(a) : "m"(*mem)); 584 report(a == 0xa06d39e8, "imul eax, mem"); 585 586 *mem = 51; a = 0x1234567812345678UL; 587 asm ("imulq %1, %%rax" : "+a"(a) : "m"(*mem)); 588 report(a == 0xA06D39EBA06D39E8UL, "imul rax, mem"); 589 590 *mem = 0x1234567812345678UL; a = 0x8765432187654321L; 591 asm ("imulw $51, %1, %%ax" : "+a"(a) : "m"(*mem)); 592 report(a == 0x87654321876539e8, "imul ax, mem, imm8"); 593 594 *mem = 0x1234567812345678UL; 595 asm ("imull $51, %1, %%eax" : "+a"(a) : "m"(*mem)); 596 report(a == 0xa06d39e8, "imul eax, mem, imm8"); 597 598 *mem = 0x1234567812345678UL; 599 asm ("imulq $51, %1, %%rax" : "+a"(a) : "m"(*mem)); 600 report(a == 0xA06D39EBA06D39E8UL, "imul rax, mem, imm8"); 601 602 *mem = 0x1234567812345678UL; a = 0x8765432187654321L; 603 asm ("imulw $311, %1, %%ax" : "+a"(a) : "m"(*mem)); 604 report(a == 0x8765432187650bc8, "imul ax, mem, imm"); 605 606 *mem = 0x1234567812345678UL; 607 asm ("imull $311, %1, %%eax" : "+a"(a) : "m"(*mem)); 608 report(a == 0x1d950bc8, "imul eax, mem, imm"); 609 610 *mem = 0x1234567812345678UL; 611 asm ("imulq $311, %1, %%rax" : "+a"(a) : "m"(*mem)); 612 report(a == 0x1D950BDE1D950BC8L, "imul rax, mem, imm"); 613 } 614 615 static void test_muldiv(long *mem) 616 { 617 long a, d, aa, dd; 618 u8 ex = 1; 619 620 *mem = 0; a = 1; d = 2; 621 asm (ASM_TRY("1f") "divq %3; movb $0, %2; 1:" 622 : "+a"(a), "+d"(d), "+q"(ex) : "m"(*mem)); 623 report(a == 1 && d == 2 && ex, "divq (fault)"); 624 625 *mem = 987654321098765UL; a = 123456789012345UL; d = 123456789012345UL; 626 asm (ASM_TRY("1f") "divq %3; movb $0, %2; 1:" 627 : "+a"(a), "+d"(d), "+q"(ex) : "m"(*mem)); 628 report(a == 0x1ffffffb1b963b33ul && d == 0x273ba4384ede2ul && !ex, 629 "divq (1)"); 630 aa = 0x1111111111111111; dd = 0x2222222222222222; 631 *mem = 0x3333333333333333; a = aa; d = dd; 632 asm("mulb %2" : "+a"(a), "+d"(d) : "m"(*mem)); 633 report(a == 0x1111111111110363 && d == dd, "mulb mem"); 634 *mem = 0x3333333333333333; a = aa; d = dd; 635 asm("mulw %2" : "+a"(a), "+d"(d) : "m"(*mem)); 636 report(a == 0x111111111111c963 && d == 0x2222222222220369, "mulw mem"); 637 *mem = 0x3333333333333333; a = aa; d = dd; 638 asm("mull %2" : "+a"(a), "+d"(d) : "m"(*mem)); 639 report(a == 0x962fc963 && d == 0x369d036, "mull mem"); 640 *mem = 0x3333333333333333; a = aa; d = dd; 641 asm("mulq %2" : "+a"(a), "+d"(d) : "m"(*mem)); 642 report(a == 0x2fc962fc962fc963 && d == 0x369d0369d0369d0, "mulq mem"); 643 } 644 645 typedef unsigned __attribute__((vector_size(16))) sse128; 646 647 static bool sseeq(uint32_t *v1, uint32_t *v2) 648 { 649 bool ok = true; 650 int i; 651 652 for (i = 0; i < 4; ++i) { 653 ok &= v1[i] == v2[i]; 654 } 655 656 return ok; 657 } 658 659 static __attribute__((target("sse2"))) void test_sse(uint32_t *mem) 660 { 661 sse128 vv; 662 uint32_t *v = (uint32_t *)&vv; 663 664 write_cr0(read_cr0() & ~6); /* EM, TS */ 665 write_cr4(read_cr4() | 0x200); /* OSFXSR */ 666 memset(&vv, 0, sizeof(vv)); 667 668 #define TEST_RW_SSE(insn) do { \ 669 v[0] = 1; v[1] = 2; v[2] = 3; v[3] = 4; \ 670 asm(insn " %1, %0" : "=m"(*mem) : "x"(vv) : "memory"); \ 671 report(sseeq(v, mem), insn " (read)"); \ 672 mem[0] = 5; mem[1] = 6; mem[2] = 7; mem[3] = 8; \ 673 asm(insn " %1, %0" : "=x"(vv) : "m"(*mem) : "memory"); \ 674 report(sseeq(v, mem), insn " (write)"); \ 675 } while (0) 676 677 TEST_RW_SSE("movdqu"); 678 TEST_RW_SSE("movaps"); 679 TEST_RW_SSE("movapd"); 680 TEST_RW_SSE("movups"); 681 TEST_RW_SSE("movupd"); 682 #undef TEST_RW_SSE 683 } 684 685 static void unaligned_movaps_handler(struct ex_regs *regs) 686 { 687 extern char unaligned_movaps_cont; 688 689 ++exceptions; 690 regs->rip = (ulong)&unaligned_movaps_cont; 691 } 692 693 static void cross_movups_handler(struct ex_regs *regs) 694 { 695 extern char cross_movups_cont; 696 697 ++exceptions; 698 regs->rip = (ulong)&cross_movups_cont; 699 } 700 701 static __attribute__((target("sse2"))) void test_sse_exceptions(void *cross_mem) 702 { 703 sse128 vv; 704 uint32_t *v = (uint32_t *)&vv; 705 uint32_t *mem; 706 uint8_t *bytes = cross_mem; // aligned on PAGE_SIZE*2 707 void *page2 = (void *)(&bytes[4096]); 708 struct pte_search search; 709 pteval_t orig_pte; 710 711 // setup memory for unaligned access 712 mem = (uint32_t *)(&bytes[8]); 713 714 // test unaligned access for movups, movupd and movaps 715 v[0] = 1; v[1] = 2; v[2] = 3; v[3] = 4; 716 mem[0] = 5; mem[1] = 6; mem[2] = 8; mem[3] = 9; 717 asm("movups %1, %0" : "=m"(*mem) : "x"(vv) : "memory"); 718 report(sseeq(v, mem), "movups unaligned"); 719 720 v[0] = 1; v[1] = 2; v[2] = 3; v[3] = 4; 721 mem[0] = 5; mem[1] = 6; mem[2] = 7; mem[3] = 8; 722 asm("movupd %1, %0" : "=m"(*mem) : "x"(vv) : "memory"); 723 report(sseeq(v, mem), "movupd unaligned"); 724 exceptions = 0; 725 handle_exception(GP_VECTOR, unaligned_movaps_handler); 726 asm("movaps %1, %0\n\t unaligned_movaps_cont:" 727 : "=m"(*mem) : "x"(vv)); 728 handle_exception(GP_VECTOR, 0); 729 report(exceptions == 1, "unaligned movaps exception"); 730 731 // setup memory for cross page access 732 mem = (uint32_t *)(&bytes[4096-8]); 733 v[0] = 1; v[1] = 2; v[2] = 3; v[3] = 4; 734 mem[0] = 5; mem[1] = 6; mem[2] = 7; mem[3] = 8; 735 736 asm("movups %1, %0" : "=m"(*mem) : "x"(vv) : "memory"); 737 report(sseeq(v, mem), "movups unaligned crosspage"); 738 739 // invalidate second page 740 search = find_pte_level(current_page_table(), page2, 1); 741 orig_pte = *search.pte; 742 install_pte(current_page_table(), 1, page2, 0, NULL); 743 invlpg(page2); 744 745 exceptions = 0; 746 handle_exception(PF_VECTOR, cross_movups_handler); 747 asm("movups %1, %0\n\t cross_movups_cont:" : "=m"(*mem) : "x"(vv) : 748 "memory"); 749 handle_exception(PF_VECTOR, 0); 750 report(exceptions == 1, "movups crosspage exception"); 751 752 // restore invalidated page 753 install_pte(current_page_table(), 1, page2, orig_pte, NULL); 754 } 755 756 static void test_mmx(uint64_t *mem) 757 { 758 uint64_t v; 759 760 write_cr0(read_cr0() & ~6); /* EM, TS */ 761 asm volatile("fninit"); 762 v = 0x0102030405060708ULL; 763 asm("movq %1, %0" : "=m"(*mem) : "y"(v)); 764 report(v == *mem, "movq (mmx, read)"); 765 *mem = 0x8070605040302010ull; 766 asm("movq %1, %0" : "=y"(v) : "m"(*mem)); 767 report(v == *mem, "movq (mmx, write)"); 768 } 769 770 static void test_rip_relative(unsigned *mem, char *insn_ram) 771 { 772 /* movb $1, mem+2(%rip) */ 773 insn_ram[0] = 0xc6; 774 insn_ram[1] = 0x05; 775 *(unsigned *)&insn_ram[2] = 2 + (char *)mem - (insn_ram + 7); 776 insn_ram[6] = 0x01; 777 /* ret */ 778 insn_ram[7] = 0xc3; 779 780 *mem = 0; 781 asm("callq *%1" : "+m"(*mem) : "r"(insn_ram)); 782 report(*mem == 0x10000, "movb $imm, 0(%%rip)"); 783 } 784 785 static void test_shld_shrd(u32 *mem) 786 { 787 *mem = 0x12345678; 788 asm("shld %2, %1, %0" : "+m"(*mem) : "r"(0xaaaaaaaaU), "c"((u8)3)); 789 report(*mem == ((0x12345678 << 3) | 5), "shld (cl)"); 790 *mem = 0x12345678; 791 asm("shrd %2, %1, %0" : "+m"(*mem) : "r"(0x55555555U), "c"((u8)3)); 792 report(*mem == ((0x12345678 >> 3) | (5u << 29)), "shrd (cl)"); 793 } 794 795 static void test_cmov(u32 *mem) 796 { 797 u64 val; 798 *mem = 0xabcdef12u; 799 asm ("movq $0x1234567812345678, %%rax\n\t" 800 "cmpl %%eax, %%eax\n\t" 801 "cmovnel (%[mem]), %%eax\n\t" 802 "movq %%rax, %[val]\n\t" 803 : [val]"=r"(val) : [mem]"r"(mem) : "%rax", "cc"); 804 report(val == 0x12345678ul, "cmovnel"); 805 } 806 807 static unsigned long rip_advance; 808 809 static void advance_rip_and_note_exception(struct ex_regs *regs) 810 { 811 ++exceptions; 812 regs->rip += rip_advance; 813 } 814 815 static void test_mmx_movq_mf(uint64_t *mem) 816 { 817 /* movq %mm0, (%rax) */ 818 extern char movq_start, movq_end; 819 820 uint16_t fcw = 0; /* all exceptions unmasked */ 821 write_cr0(read_cr0() & ~6); /* TS, EM */ 822 exceptions = 0; 823 handle_exception(MF_VECTOR, advance_rip_and_note_exception); 824 asm volatile("fninit; fldcw %0" : : "m"(fcw)); 825 asm volatile("fldz; fldz; fdivp"); /* generate exception */ 826 827 rip_advance = &movq_end - &movq_start; 828 asm(KVM_FEP "movq_start: movq %mm0, (%rax); movq_end:"); 829 /* exit MMX mode */ 830 asm volatile("fnclex; emms"); 831 report(exceptions == 1, "movq mmx generates #MF"); 832 handle_exception(MF_VECTOR, 0); 833 } 834 835 static void test_jmp_noncanonical(uint64_t *mem) 836 { 837 extern char nc_jmp_start, nc_jmp_end; 838 839 *mem = 0x1111111111111111ul; 840 841 exceptions = 0; 842 rip_advance = &nc_jmp_end - &nc_jmp_start; 843 handle_exception(GP_VECTOR, advance_rip_and_note_exception); 844 asm volatile ("nc_jmp_start: jmp *%0; nc_jmp_end:" : : "m"(*mem)); 845 report(exceptions == 1, "jump to non-canonical address"); 846 handle_exception(GP_VECTOR, 0); 847 } 848 849 static void test_movabs(uint64_t *mem) 850 { 851 /* mov $0x9090909090909090, %rcx */ 852 unsigned long rcx; 853 asm(KVM_FEP "mov $0x9090909090909090, %0" : "=c" (rcx) : "0" (0)); 854 report(rcx == 0x9090909090909090, "64-bit mov imm2"); 855 } 856 857 static void test_smsw_reg(uint64_t *mem) 858 { 859 unsigned long cr0 = read_cr0(); 860 unsigned long rax; 861 const unsigned long in_rax = 0x1234567890abcdeful; 862 863 asm(KVM_FEP "smsww %w0\n\t" : "=a" (rax) : "0" (in_rax)); 864 report((u16)rax == (u16)cr0 && rax >> 16 == in_rax >> 16, 865 "16-bit smsw reg"); 866 867 asm(KVM_FEP "smswl %k0\n\t" : "=a" (rax) : "0" (in_rax)); 868 report(rax == (u32)cr0, "32-bit smsw reg"); 869 870 asm(KVM_FEP "smswq %q0\n\t" : "=a" (rax) : "0" (in_rax)); 871 report(rax == cr0, "64-bit smsw reg"); 872 } 873 874 static void test_nop(uint64_t *mem) 875 { 876 unsigned long rax; 877 const unsigned long in_rax = 0x1234567890abcdeful; 878 asm(KVM_FEP "nop\n\t" : "=a" (rax) : "0" (in_rax)); 879 report(rax == in_rax, "nop"); 880 } 881 882 static void test_mov_dr(uint64_t *mem) 883 { 884 unsigned long rax; 885 const unsigned long in_rax = 0; 886 bool rtm_support = this_cpu_has(X86_FEATURE_RTM); 887 unsigned long dr6_fixed_1 = rtm_support ? 0xfffe0ff0ul : 0xffff0ff0ul; 888 asm(KVM_FEP "movq %0, %%dr6\n\t" 889 KVM_FEP "movq %%dr6, %0\n\t" : "=a" (rax) : "a" (in_rax)); 890 report(rax == dr6_fixed_1, "mov_dr6"); 891 } 892 893 static void test_push16(uint64_t *mem) 894 { 895 uint64_t rsp1, rsp2; 896 uint16_t r; 897 898 asm volatile ( "movq %%rsp, %[rsp1]\n\t" 899 "pushw %[v]\n\t" 900 "popw %[r]\n\t" 901 "movq %%rsp, %[rsp2]\n\t" 902 "movq %[rsp1], %%rsp\n\t" : 903 [rsp1]"=r"(rsp1), [rsp2]"=r"(rsp2), [r]"=r"(r) 904 : [v]"m"(*mem) : "memory"); 905 report(rsp1 == rsp2, "push16"); 906 } 907 908 static void test_crosspage_mmio(volatile uint8_t *mem) 909 { 910 volatile uint16_t w, *pw; 911 912 pw = (volatile uint16_t *)&mem[4095]; 913 mem[4095] = 0x99; 914 mem[4096] = 0x77; 915 asm volatile("mov %1, %0" : "=r"(w) : "m"(*pw) : "memory"); 916 report(w == 0x7799, "cross-page mmio read"); 917 asm volatile("mov %1, %0" : "=m"(*pw) : "r"((uint16_t)0x88aa)); 918 report(mem[4095] == 0xaa && mem[4096] == 0x88, "cross-page mmio write"); 919 } 920 921 static void test_string_io_mmio(volatile uint8_t *mem) 922 { 923 /* Cross MMIO pages.*/ 924 volatile uint8_t *mmio = mem + 4032; 925 926 asm volatile("outw %%ax, %%dx \n\t" : : "a"(0x9999), "d"(TESTDEV_IO_PORT)); 927 928 asm volatile ("cld; rep insb" : : "d" (TESTDEV_IO_PORT), "D" (mmio), "c" (1024)); 929 930 report(mmio[1023] == 0x99, "string_io_mmio"); 931 } 932 933 /* kvm doesn't allow lidt/lgdt from mmio, so the test is disabled */ 934 #if 0 935 static void test_lgdt_lidt(volatile uint8_t *mem) 936 { 937 struct descriptor_table_ptr orig, fresh = {}; 938 939 sgdt(&orig); 940 *(struct descriptor_table_ptr *)mem = (struct descriptor_table_ptr) { 941 .limit = 0xf234, 942 .base = 0x12345678abcd, 943 }; 944 cli(); 945 asm volatile("lgdt %0" : : "m"(*(struct descriptor_table_ptr *)mem)); 946 sgdt(&fresh); 947 lgdt(&orig); 948 sti(); 949 report(orig.limit == fresh.limit && orig.base == fresh.base, 950 "lgdt (long address)"); 951 952 sidt(&orig); 953 *(struct descriptor_table_ptr *)mem = (struct descriptor_table_ptr) { 954 .limit = 0x432f, 955 .base = 0xdbca87654321, 956 }; 957 cli(); 958 asm volatile("lidt %0" : : "m"(*(struct descriptor_table_ptr *)mem)); 959 sidt(&fresh); 960 lidt(&orig); 961 sti(); 962 report(orig.limit == fresh.limit && orig.base == fresh.base, 963 "lidt (long address)"); 964 } 965 #endif 966 967 static void ss_bad_rpl(struct ex_regs *regs) 968 { 969 extern char ss_bad_rpl_cont; 970 971 ++exceptions; 972 regs->rip = (ulong)&ss_bad_rpl_cont; 973 } 974 975 static void test_sreg(volatile uint16_t *mem) 976 { 977 u16 ss = read_ss(); 978 979 // check for null segment load 980 *mem = 0; 981 asm volatile("mov %0, %%ss" : : "m"(*mem)); 982 report(read_ss() == 0, "mov null, %%ss"); 983 984 // check for exception when ss.rpl != cpl on null segment load 985 exceptions = 0; 986 handle_exception(GP_VECTOR, ss_bad_rpl); 987 *mem = 3; 988 asm volatile("mov %0, %%ss; ss_bad_rpl_cont:" : : "m"(*mem)); 989 report(exceptions == 1 && read_ss() == 0, 990 "mov null, %%ss (with ss.rpl != cpl)"); 991 handle_exception(GP_VECTOR, 0); 992 write_ss(ss); 993 } 994 995 static uint64_t usr_gs_mov(void) 996 { 997 static uint64_t dummy = MAGIC_NUM; 998 uint64_t dummy_ptr = (uint64_t)&dummy; 999 uint64_t ret; 1000 1001 dummy_ptr -= GS_BASE; 1002 asm volatile("mov %%gs:(%%rcx), %%rax" : "=a"(ret): "c"(dummy_ptr) :); 1003 1004 return ret; 1005 } 1006 1007 static void test_iret(void) 1008 { 1009 uint64_t val; 1010 bool raised_vector; 1011 1012 /* Update GS base to 4MiB */ 1013 wrmsr(MSR_GS_BASE, GS_BASE); 1014 1015 /* 1016 * Per the SDM, jumping to user mode via `iret`, which is returning to 1017 * outer privilege level, for segment registers (ES, FS, GS, and DS) 1018 * if the check fails, the segment selector becomes null. 1019 * 1020 * In our test case, GS becomes null. 1021 */ 1022 val = run_in_user((usermode_func)usr_gs_mov, GP_VECTOR, 1023 0, 0, 0, 0, &raised_vector); 1024 1025 report(val == MAGIC_NUM, "Test ret/iret with a nullified segment"); 1026 } 1027 1028 /* Broken emulation causes triple fault, which skips the other tests. */ 1029 #if 0 1030 static void test_lldt(volatile uint16_t *mem) 1031 { 1032 u64 gdt[] = { 0, /* null descriptor */ 1033 #ifdef __X86_64__ 1034 0, /* ldt descriptor is 16 bytes in long mode */ 1035 #endif 1036 0x0000f82000000ffffull /* ldt descriptor */ }; 1037 struct descriptor_table_ptr gdt_ptr = { .limit = sizeof(gdt) - 1, 1038 .base = (ulong)&gdt }; 1039 struct descriptor_table_ptr orig_gdt; 1040 1041 cli(); 1042 sgdt(&orig_gdt); 1043 lgdt(&gdt_ptr); 1044 *mem = 0x8; 1045 asm volatile("lldt %0" : : "m"(*mem)); 1046 lgdt(&orig_gdt); 1047 sti(); 1048 report(sldt() == *mem, "lldt"); 1049 } 1050 #endif 1051 1052 static void test_ltr(volatile uint16_t *mem) 1053 { 1054 struct descriptor_table_ptr gdt_ptr; 1055 uint64_t *gdt, *trp; 1056 uint16_t tr = str(); 1057 uint64_t busy_mask = (uint64_t)1 << 41; 1058 1059 sgdt(&gdt_ptr); 1060 gdt = (uint64_t *)gdt_ptr.base; 1061 trp = &gdt[tr >> 3]; 1062 *trp &= ~busy_mask; 1063 *mem = tr; 1064 asm volatile("ltr %0" : : "m"(*mem) : "memory"); 1065 report(str() == tr && (*trp & busy_mask), "ltr"); 1066 } 1067 1068 static void test_simplealu(u32 *mem) 1069 { 1070 *mem = 0x1234; 1071 asm("or %1, %0" : "+m"(*mem) : "r"(0x8001)); 1072 report(*mem == 0x9235, "or"); 1073 asm("add %1, %0" : "+m"(*mem) : "r"(2)); 1074 report(*mem == 0x9237, "add"); 1075 asm("xor %1, %0" : "+m"(*mem) : "r"(0x1111)); 1076 report(*mem == 0x8326, "xor"); 1077 asm("sub %1, %0" : "+m"(*mem) : "r"(0x26)); 1078 report(*mem == 0x8300, "sub"); 1079 asm("clc; adc %1, %0" : "+m"(*mem) : "r"(0x100)); 1080 report(*mem == 0x8400, "adc(0)"); 1081 asm("stc; adc %1, %0" : "+m"(*mem) : "r"(0x100)); 1082 report(*mem == 0x8501, "adc(0)"); 1083 asm("clc; sbb %1, %0" : "+m"(*mem) : "r"(0)); 1084 report(*mem == 0x8501, "sbb(0)"); 1085 asm("stc; sbb %1, %0" : "+m"(*mem) : "r"(0)); 1086 report(*mem == 0x8500, "sbb(1)"); 1087 asm("and %1, %0" : "+m"(*mem) : "r"(0xfe77)); 1088 report(*mem == 0x8400, "and"); 1089 asm("test %1, %0" : "+m"(*mem) : "r"(0xf000)); 1090 report(*mem == 0x8400, "test"); 1091 } 1092 1093 static void illegal_movbe_handler(struct ex_regs *regs) 1094 { 1095 extern char bad_movbe_cont; 1096 1097 ++exceptions; 1098 regs->rip = (ulong)&bad_movbe_cont; 1099 } 1100 1101 static void test_illegal_movbe(void) 1102 { 1103 if (!this_cpu_has(X86_FEATURE_MOVBE)) { 1104 report_skip("illegal movbe"); 1105 return; 1106 } 1107 1108 exceptions = 0; 1109 handle_exception(UD_VECTOR, illegal_movbe_handler); 1110 asm volatile(".byte 0x0f; .byte 0x38; .byte 0xf0; .byte 0xc0;\n\t" 1111 " bad_movbe_cont:" : : : "rax"); 1112 report(exceptions == 1, "illegal movbe"); 1113 handle_exception(UD_VECTOR, 0); 1114 } 1115 1116 static void record_no_fep(struct ex_regs *regs) 1117 { 1118 fep_available = 0; 1119 regs->rip += KVM_FEP_LENGTH; 1120 } 1121 1122 int main(void) 1123 { 1124 void *mem; 1125 void *insn_page; 1126 void *insn_ram; 1127 void *cross_mem; 1128 unsigned long t1, t2; 1129 1130 setup_vm(); 1131 handle_exception(UD_VECTOR, record_no_fep); 1132 asm(KVM_FEP "nop"); 1133 handle_exception(UD_VECTOR, 0); 1134 1135 mem = alloc_vpages(2); 1136 install_page((void *)read_cr3(), IORAM_BASE_PHYS, mem); 1137 // install the page twice to test cross-page mmio 1138 install_page((void *)read_cr3(), IORAM_BASE_PHYS, mem + 4096); 1139 insn_page = alloc_page(); 1140 insn_ram = vmap(virt_to_phys(insn_page), 4096); 1141 cross_mem = vmap(virt_to_phys(alloc_pages(2)), 2 * PAGE_SIZE); 1142 1143 // test mov reg, r/m and mov r/m, reg 1144 t1 = 0x123456789abcdef; 1145 asm volatile("mov %[t1], (%[mem]) \n\t" 1146 "mov (%[mem]), %[t2]" 1147 : [t2]"=r"(t2) 1148 : [t1]"r"(t1), [mem]"r"(mem) 1149 : "memory"); 1150 report(t2 == 0x123456789abcdef, "mov reg, r/m (1)"); 1151 1152 test_simplealu(mem); 1153 test_cmps(mem); 1154 test_scas(mem); 1155 1156 test_push(mem); 1157 test_pop(mem); 1158 1159 test_xchg(mem); 1160 test_xadd(mem); 1161 1162 test_cr8(); 1163 1164 test_smsw(mem); 1165 test_lmsw(); 1166 test_ljmp(mem); 1167 test_stringio(); 1168 test_incdecnotneg(mem); 1169 test_btc(mem); 1170 test_bsfbsr(mem); 1171 test_imul(mem); 1172 test_muldiv(mem); 1173 test_sse(mem); 1174 test_sse_exceptions(cross_mem); 1175 test_mmx(mem); 1176 test_rip_relative(mem, insn_ram); 1177 test_shld_shrd(mem); 1178 //test_lgdt_lidt(mem); 1179 test_sreg(mem); 1180 test_iret(); 1181 //test_lldt(mem); 1182 test_ltr(mem); 1183 test_cmov(mem); 1184 1185 if (fep_available) { 1186 test_mmx_movq_mf(mem); 1187 test_movabs(mem); 1188 test_smsw_reg(mem); 1189 test_nop(mem); 1190 test_mov_dr(mem); 1191 } else { 1192 report_skip("skipping register-only tests, " 1193 "use kvm.force_emulation_prefix=1 to enable"); 1194 } 1195 1196 test_push16(mem); 1197 test_crosspage_mmio(mem); 1198 1199 test_string_io_mmio(mem); 1200 1201 test_jmp_noncanonical(mem); 1202 test_illegal_movbe(); 1203 1204 return report_summary(); 1205 } 1206