1 #include "ioram.h" 2 #include "vm.h" 3 #include "libcflat.h" 4 #include "desc.h" 5 #include "types.h" 6 #include "processor.h" 7 #include "vmalloc.h" 8 #include "alloc_page.h" 9 #include "usermode.h" 10 11 #define memset __builtin_memset 12 #define TESTDEV_IO_PORT 0xe0 13 14 #define MAGIC_NUM 0xdeadbeefdeadbeefUL 15 #define GS_BASE 0x400000 16 17 static int exceptions; 18 19 /* Forced emulation prefix, used to invoke the emulator unconditionally. */ 20 #define KVM_FEP "ud2; .byte 'k', 'v', 'm';" 21 #define KVM_FEP_LENGTH 5 22 static int fep_available = 1; 23 24 struct regs { 25 u64 rax, rbx, rcx, rdx; 26 u64 rsi, rdi, rsp, rbp; 27 u64 r8, r9, r10, r11; 28 u64 r12, r13, r14, r15; 29 u64 rip, rflags; 30 }; 31 struct regs inregs, outregs, save; 32 33 struct insn_desc { 34 u64 ptr; 35 size_t len; 36 }; 37 38 static char st1[] = "abcdefghijklmnop"; 39 40 static void test_stringio(void) 41 { 42 unsigned char r = 0; 43 asm volatile("cld \n\t" 44 "movw %0, %%dx \n\t" 45 "rep outsb \n\t" 46 : : "i"((short)TESTDEV_IO_PORT), 47 "S"(st1), "c"(sizeof(st1) - 1)); 48 asm volatile("inb %1, %0\n\t" : "=a"(r) : "i"((short)TESTDEV_IO_PORT)); 49 report(r == st1[sizeof(st1) - 2], "outsb up"); /* last char */ 50 51 asm volatile("std \n\t" 52 "movw %0, %%dx \n\t" 53 "rep outsb \n\t" 54 : : "i"((short)TESTDEV_IO_PORT), 55 "S"(st1 + sizeof(st1) - 2), "c"(sizeof(st1) - 1)); 56 asm volatile("cld \n\t" : : ); 57 asm volatile("in %1, %0\n\t" : "=a"(r) : "i"((short)TESTDEV_IO_PORT)); 58 report(r == st1[0], "outsb down"); 59 } 60 61 static void test_cmps_one(unsigned char *m1, unsigned char *m3) 62 { 63 void *rsi, *rdi; 64 long rcx, tmp; 65 66 rsi = m1; rdi = m3; rcx = 30; 67 asm volatile("xor %[tmp], %[tmp] \n\t" 68 "repe cmpsb" 69 : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp) 70 : : "cc"); 71 report(rcx == 0 && rsi == m1 + 30 && rdi == m3 + 30, "repe/cmpsb (1)"); 72 73 rsi = m1; rdi = m3; rcx = 30; 74 asm volatile("or $1, %[tmp]\n\t" // clear ZF 75 "repe cmpsb" 76 : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp) 77 : : "cc"); 78 report(rcx == 0 && rsi == m1 + 30 && rdi == m3 + 30, 79 "repe cmpsb (1.zf)"); 80 81 rsi = m1; rdi = m3; rcx = 15; 82 asm volatile("xor %[tmp], %[tmp] \n\t" 83 "repe cmpsw" 84 : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp) 85 : : "cc"); 86 report(rcx == 0 && rsi == m1 + 30 && rdi == m3 + 30, "repe cmpsw (1)"); 87 88 rsi = m1; rdi = m3; rcx = 7; 89 asm volatile("xor %[tmp], %[tmp] \n\t" 90 "repe cmpsl" 91 : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp) 92 : : "cc"); 93 report(rcx == 0 && rsi == m1 + 28 && rdi == m3 + 28, "repe cmpll (1)"); 94 95 rsi = m1; rdi = m3; rcx = 4; 96 asm volatile("xor %[tmp], %[tmp] \n\t" 97 "repe cmpsq" 98 : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp) 99 : : "cc"); 100 report(rcx == 0 && rsi == m1 + 32 && rdi == m3 + 32, "repe cmpsq (1)"); 101 102 rsi = m1; rdi = m3; rcx = 130; 103 asm volatile("xor %[tmp], %[tmp] \n\t" 104 "repe cmpsb" 105 : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp) 106 : : "cc"); 107 report(rcx == 29 && rsi == m1 + 101 && rdi == m3 + 101, 108 "repe cmpsb (2)"); 109 110 rsi = m1; rdi = m3; rcx = 65; 111 asm volatile("xor %[tmp], %[tmp] \n\t" 112 "repe cmpsw" 113 : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp) 114 : : "cc"); 115 report(rcx == 14 && rsi == m1 + 102 && rdi == m3 + 102, 116 "repe cmpsw (2)"); 117 118 rsi = m1; rdi = m3; rcx = 32; 119 asm volatile("xor %[tmp], %[tmp] \n\t" 120 "repe cmpsl" 121 : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp) 122 : : "cc"); 123 report(rcx == 6 && rsi == m1 + 104 && rdi == m3 + 104, 124 "repe cmpll (2)"); 125 126 rsi = m1; rdi = m3; rcx = 16; 127 asm volatile("xor %[tmp], %[tmp] \n\t" 128 "repe cmpsq" 129 : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp) 130 : : "cc"); 131 report(rcx == 3 && rsi == m1 + 104 && rdi == m3 + 104, 132 "repe cmpsq (2)"); 133 134 } 135 136 static void test_cmps(void *mem) 137 { 138 unsigned char *m1 = mem, *m2 = mem + 1024; 139 unsigned char m3[1024]; 140 141 for (int i = 0; i < 100; ++i) 142 m1[i] = m2[i] = m3[i] = i; 143 for (int i = 100; i < 200; ++i) 144 m1[i] = (m3[i] = m2[i] = i) + 1; 145 test_cmps_one(m1, m3); 146 test_cmps_one(m1, m2); 147 } 148 149 static void test_scas(void *mem) 150 { 151 bool z; 152 void *di; 153 154 *(ulong *)mem = 0x77665544332211; 155 156 di = mem; 157 asm ("scasb; setz %0" : "=rm"(z), "+D"(di) : "a"(0xff11)); 158 report(di == mem + 1 && z, "scasb match"); 159 160 di = mem; 161 asm ("scasb; setz %0" : "=rm"(z), "+D"(di) : "a"(0xff54)); 162 report(di == mem + 1 && !z, "scasb mismatch"); 163 164 di = mem; 165 asm ("scasw; setz %0" : "=rm"(z), "+D"(di) : "a"(0xff2211)); 166 report(di == mem + 2 && z, "scasw match"); 167 168 di = mem; 169 asm ("scasw; setz %0" : "=rm"(z), "+D"(di) : "a"(0xffdd11)); 170 report(di == mem + 2 && !z, "scasw mismatch"); 171 172 di = mem; 173 asm ("scasl; setz %0" : "=rm"(z), "+D"(di) : "a"(0xff44332211ul)); 174 report(di == mem + 4 && z, "scasd match"); 175 176 di = mem; 177 asm ("scasl; setz %0" : "=rm"(z), "+D"(di) : "a"(0x45332211)); 178 report(di == mem + 4 && !z, "scasd mismatch"); 179 180 di = mem; 181 asm ("scasq; setz %0" : "=rm"(z), "+D"(di) : "a"(0x77665544332211ul)); 182 report(di == mem + 8 && z, "scasq match"); 183 184 di = mem; 185 asm ("scasq; setz %0" : "=rm"(z), "+D"(di) : "a"(3)); 186 report(di == mem + 8 && !z, "scasq mismatch"); 187 } 188 189 static void test_cr8(void) 190 { 191 unsigned long src, dst; 192 193 dst = 777; 194 src = 3; 195 asm volatile("mov %[src], %%cr8; mov %%cr8, %[dst]" 196 : [dst]"+r"(dst), [src]"+r"(src)); 197 report(dst == 3 && src == 3, "mov %%cr8"); 198 } 199 200 static void test_push(void *mem) 201 { 202 unsigned long tmp; 203 unsigned long *stack_top = mem + 4096; 204 unsigned long *new_stack_top; 205 unsigned long memw = 0x123456789abcdeful; 206 207 memset(mem, 0x55, (void *)stack_top - mem); 208 209 asm volatile("mov %%rsp, %[tmp] \n\t" 210 "mov %[stack_top], %%rsp \n\t" 211 "pushq $-7 \n\t" 212 "pushq %[reg] \n\t" 213 "pushq (%[mem]) \n\t" 214 "pushq $-7070707 \n\t" 215 "mov %%rsp, %[new_stack_top] \n\t" 216 "mov %[tmp], %%rsp" 217 : [tmp]"=&r"(tmp), [new_stack_top]"=r"(new_stack_top) 218 : [stack_top]"r"(stack_top), 219 [reg]"r"(-17l), [mem]"r"(&memw) 220 : "memory"); 221 222 report(stack_top[-1] == -7ul, "push $imm8"); 223 report(stack_top[-2] == -17ul, "push %%reg"); 224 report(stack_top[-3] == 0x123456789abcdeful, "push mem"); 225 report(stack_top[-4] == -7070707, "push $imm"); 226 } 227 228 static void test_pop(void *mem) 229 { 230 unsigned long tmp, tmp3, rsp, rbp; 231 unsigned long *stack_top = mem + 4096; 232 unsigned long memw = 0x123456789abcdeful; 233 static unsigned long tmp2; 234 235 memset(mem, 0x55, (void *)stack_top - mem); 236 237 asm volatile("pushq %[val] \n\t" 238 "popq (%[mem])" 239 : : [val]"m"(memw), [mem]"r"(mem) : "memory"); 240 report(*(unsigned long *)mem == memw, "pop mem"); 241 242 memw = 7 - memw; 243 asm volatile("mov %%rsp, %[tmp] \n\t" 244 "mov %[stack_top], %%rsp \n\t" 245 "pushq %[val] \n\t" 246 "popq %[tmp2] \n\t" 247 "mov %[tmp], %%rsp" 248 : [tmp]"=&r"(tmp), [tmp2]"=m"(tmp2) 249 : [val]"r"(memw), [stack_top]"r"(stack_top) 250 : "memory"); 251 report(tmp2 == memw, "pop mem (2)"); 252 253 memw = 129443 - memw; 254 asm volatile("mov %%rsp, %[tmp] \n\t" 255 "mov %[stack_top], %%rsp \n\t" 256 "pushq %[val] \n\t" 257 "popq %[tmp2] \n\t" 258 "mov %[tmp], %%rsp" 259 : [tmp]"=&r"(tmp), [tmp2]"=r"(tmp2) 260 : [val]"r"(memw), [stack_top]"r"(stack_top) 261 : "memory"); 262 report(tmp2 == memw, "pop reg"); 263 264 asm volatile("mov %%rsp, %[tmp] \n\t" 265 "mov %[stack_top], %%rsp \n\t" 266 "push $1f \n\t" 267 "ret \n\t" 268 "2: jmp 2b \n\t" 269 "1: mov %[tmp], %%rsp" 270 : [tmp]"=&r"(tmp) : [stack_top]"r"(stack_top) 271 : "memory"); 272 report(1, "ret"); 273 274 stack_top[-1] = 0x778899; 275 asm volatile("mov %[stack_top], %%r8 \n\t" 276 "mov %%rsp, %%r9 \n\t" 277 "xchg %%rbp, %%r8 \n\t" 278 "leave \n\t" 279 "xchg %%rsp, %%r9 \n\t" 280 "xchg %%rbp, %%r8 \n\t" 281 "mov %%r9, %[tmp] \n\t" 282 "mov %%r8, %[tmp3]" 283 : [tmp]"=&r"(tmp), [tmp3]"=&r"(tmp3) : [stack_top]"r"(stack_top-1) 284 : "memory", "r8", "r9"); 285 report(tmp == (ulong)stack_top && tmp3 == 0x778899, "leave"); 286 287 rbp = 0xaa55aa55bb66bb66ULL; 288 rsp = (unsigned long)stack_top; 289 asm volatile("mov %[rsp], %%r8 \n\t" 290 "mov %[rbp], %%r9 \n\t" 291 "xchg %%rsp, %%r8 \n\t" 292 "xchg %%rbp, %%r9 \n\t" 293 "enter $0x1238, $0 \n\t" 294 "xchg %%rsp, %%r8 \n\t" 295 "xchg %%rbp, %%r9 \n\t" 296 "xchg %%r8, %[rsp] \n\t" 297 "xchg %%r9, %[rbp]" 298 : [rsp]"+a"(rsp), [rbp]"+b"(rbp) : : "memory", "r8", "r9"); 299 report(rsp == (unsigned long)stack_top - 8 - 0x1238 300 && rbp == (unsigned long)stack_top - 8 301 && stack_top[-1] == 0xaa55aa55bb66bb66ULL, 302 "enter"); 303 } 304 305 static void test_ljmp(void *mem) 306 { 307 unsigned char *m = mem; 308 volatile int res = 1; 309 310 *(unsigned long**)m = &&jmpf; 311 asm volatile ("data16 mov %%cs, %0":"=m"(*(m + sizeof(unsigned long)))); 312 asm volatile ("rex64 ljmp *%0"::"m"(*m)); 313 res = 0; 314 jmpf: 315 report(res, "ljmp"); 316 } 317 318 static void test_incdecnotneg(void *mem) 319 { 320 unsigned long *m = mem, v = 1234; 321 unsigned char *mb = mem, vb = 66; 322 323 *m = 0; 324 325 asm volatile ("incl %0":"+m"(*m)); 326 report(*m == 1, "incl"); 327 asm volatile ("decl %0":"+m"(*m)); 328 report(*m == 0, "decl"); 329 asm volatile ("incb %0":"+m"(*m)); 330 report(*m == 1, "incb"); 331 asm volatile ("decb %0":"+m"(*m)); 332 report(*m == 0, "decb"); 333 334 asm volatile ("lock incl %0":"+m"(*m)); 335 report(*m == 1, "lock incl"); 336 asm volatile ("lock decl %0":"+m"(*m)); 337 report(*m == 0, "lock decl"); 338 asm volatile ("lock incb %0":"+m"(*m)); 339 report(*m == 1, "lock incb"); 340 asm volatile ("lock decb %0":"+m"(*m)); 341 report(*m == 0, "lock decb"); 342 343 *m = v; 344 345 asm ("lock negq %0" : "+m"(*m)); v = -v; 346 report(*m == v, "lock negl"); 347 asm ("lock notq %0" : "+m"(*m)); v = ~v; 348 report(*m == v, "lock notl"); 349 350 *mb = vb; 351 352 asm ("lock negb %0" : "+m"(*mb)); vb = -vb; 353 report(*mb == vb, "lock negb"); 354 asm ("lock notb %0" : "+m"(*mb)); vb = ~vb; 355 report(*mb == vb, "lock notb"); 356 } 357 358 static void test_smsw(uint64_t *h_mem) 359 { 360 char mem[16]; 361 unsigned short msw, msw_orig, *pmsw; 362 int i, zero; 363 364 msw_orig = read_cr0(); 365 366 asm("smsw %0" : "=r"(msw)); 367 report(msw == msw_orig, "smsw (1)"); 368 369 memset(mem, 0, 16); 370 pmsw = (void *)mem; 371 asm("smsw %0" : "=m"(pmsw[4])); 372 zero = 1; 373 for (i = 0; i < 8; ++i) 374 if (i != 4 && pmsw[i]) 375 zero = 0; 376 report(msw == pmsw[4] && zero, "smsw (2)"); 377 378 /* Trigger exit on smsw */ 379 *h_mem = 0x12345678abcdeful; 380 asm volatile("smsw %0" : "+m"(*h_mem)); 381 report(msw == (unsigned short)*h_mem && 382 (*h_mem & ~0xfffful) == 0x12345678ab0000ul, "smsw (3)"); 383 } 384 385 static void test_lmsw(void) 386 { 387 char mem[16]; 388 unsigned short msw, *pmsw; 389 unsigned long cr0; 390 391 cr0 = read_cr0(); 392 393 msw = cr0 ^ 8; 394 asm("lmsw %0" : : "r"(msw)); 395 printf("before %lx after %lx\n", cr0, read_cr0()); 396 report((cr0 ^ read_cr0()) == 8, "lmsw (1)"); 397 398 pmsw = (void *)mem; 399 *pmsw = cr0; 400 asm("lmsw %0" : : "m"(*pmsw)); 401 printf("before %lx after %lx\n", cr0, read_cr0()); 402 report(cr0 == read_cr0(), "lmsw (2)"); 403 404 /* lmsw can't clear cr0.pe */ 405 msw = (cr0 & ~1ul) ^ 4; /* change EM to force trap */ 406 asm("lmsw %0" : : "r"(msw)); 407 report((cr0 ^ read_cr0()) == 4 && (cr0 & 1), "lmsw (3)"); 408 409 /* back to normal */ 410 msw = cr0; 411 asm("lmsw %0" : : "r"(msw)); 412 } 413 414 static void test_xchg(void *mem) 415 { 416 unsigned long *memq = mem; 417 unsigned long rax; 418 419 asm volatile("mov $0x123456789abcdef, %%rax\n\t" 420 "mov %%rax, (%[memq])\n\t" 421 "mov $0xfedcba9876543210, %%rax\n\t" 422 "xchg %%al, (%[memq])\n\t" 423 "mov %%rax, %[rax]\n\t" 424 : [rax]"=r"(rax) 425 : [memq]"r"(memq) 426 : "memory", "rax"); 427 report(rax == 0xfedcba98765432ef && *memq == 0x123456789abcd10, 428 "xchg reg, r/m (1)"); 429 430 asm volatile("mov $0x123456789abcdef, %%rax\n\t" 431 "mov %%rax, (%[memq])\n\t" 432 "mov $0xfedcba9876543210, %%rax\n\t" 433 "xchg %%ax, (%[memq])\n\t" 434 "mov %%rax, %[rax]\n\t" 435 : [rax]"=r"(rax) 436 : [memq]"r"(memq) 437 : "memory", "rax"); 438 report(rax == 0xfedcba987654cdef && *memq == 0x123456789ab3210, 439 "xchg reg, r/m (2)"); 440 441 asm volatile("mov $0x123456789abcdef, %%rax\n\t" 442 "mov %%rax, (%[memq])\n\t" 443 "mov $0xfedcba9876543210, %%rax\n\t" 444 "xchg %%eax, (%[memq])\n\t" 445 "mov %%rax, %[rax]\n\t" 446 : [rax]"=r"(rax) 447 : [memq]"r"(memq) 448 : "memory", "rax"); 449 report(rax == 0x89abcdef && *memq == 0x123456776543210, 450 "xchg reg, r/m (3)"); 451 452 asm volatile("mov $0x123456789abcdef, %%rax\n\t" 453 "mov %%rax, (%[memq])\n\t" 454 "mov $0xfedcba9876543210, %%rax\n\t" 455 "xchg %%rax, (%[memq])\n\t" 456 "mov %%rax, %[rax]\n\t" 457 : [rax]"=r"(rax) 458 : [memq]"r"(memq) 459 : "memory", "rax"); 460 report(rax == 0x123456789abcdef && *memq == 0xfedcba9876543210, 461 "xchg reg, r/m (4)"); 462 } 463 464 static void test_xadd(void *mem) 465 { 466 unsigned long *memq = mem; 467 unsigned long rax; 468 469 asm volatile("mov $0x123456789abcdef, %%rax\n\t" 470 "mov %%rax, (%[memq])\n\t" 471 "mov $0xfedcba9876543210, %%rax\n\t" 472 "xadd %%al, (%[memq])\n\t" 473 "mov %%rax, %[rax]\n\t" 474 : [rax]"=r"(rax) 475 : [memq]"r"(memq) 476 : "memory", "rax"); 477 report(rax == 0xfedcba98765432ef && *memq == 0x123456789abcdff, 478 "xadd reg, r/m (1)"); 479 480 asm volatile("mov $0x123456789abcdef, %%rax\n\t" 481 "mov %%rax, (%[memq])\n\t" 482 "mov $0xfedcba9876543210, %%rax\n\t" 483 "xadd %%ax, (%[memq])\n\t" 484 "mov %%rax, %[rax]\n\t" 485 : [rax]"=r"(rax) 486 : [memq]"r"(memq) 487 : "memory", "rax"); 488 report(rax == 0xfedcba987654cdef && *memq == 0x123456789abffff, 489 "xadd reg, r/m (2)"); 490 491 asm volatile("mov $0x123456789abcdef, %%rax\n\t" 492 "mov %%rax, (%[memq])\n\t" 493 "mov $0xfedcba9876543210, %%rax\n\t" 494 "xadd %%eax, (%[memq])\n\t" 495 "mov %%rax, %[rax]\n\t" 496 : [rax]"=r"(rax) 497 : [memq]"r"(memq) 498 : "memory", "rax"); 499 report(rax == 0x89abcdef && *memq == 0x1234567ffffffff, 500 "xadd reg, r/m (3)"); 501 502 asm volatile("mov $0x123456789abcdef, %%rax\n\t" 503 "mov %%rax, (%[memq])\n\t" 504 "mov $0xfedcba9876543210, %%rax\n\t" 505 "xadd %%rax, (%[memq])\n\t" 506 "mov %%rax, %[rax]\n\t" 507 : [rax]"=r"(rax) 508 : [memq]"r"(memq) 509 : "memory", "rax"); 510 report(rax == 0x123456789abcdef && *memq == 0xffffffffffffffff, 511 "xadd reg, r/m (4)"); 512 } 513 514 static void test_btc(void *mem) 515 { 516 unsigned int *a = mem; 517 518 memset(mem, 0, 4 * sizeof(unsigned int)); 519 520 asm ("btcl $32, %0" :: "m"(a[0]) : "memory"); 521 asm ("btcl $1, %0" :: "m"(a[1]) : "memory"); 522 asm ("btcl %1, %0" :: "m"(a[0]), "r"(66) : "memory"); 523 report(a[0] == 1 && a[1] == 2 && a[2] == 4, "btcl imm8, r/m"); 524 525 asm ("btcl %1, %0" :: "m"(a[3]), "r"(-1) : "memory"); 526 report(a[0] == 1 && a[1] == 2 && a[2] == 0x80000004, "btcl reg, r/m"); 527 528 asm ("btcq %1, %0" : : "m"(a[2]), "r"(-1l) : "memory"); 529 report(a[0] == 1 && a[1] == 0x80000002 && a[2] == 0x80000004 && a[3] == 0, 530 "btcq reg, r/m"); 531 } 532 533 static void test_bsfbsr(void *mem) 534 { 535 unsigned long rax, *memq = mem; 536 unsigned eax, *meml = mem; 537 unsigned short ax, *memw = mem; 538 unsigned char z; 539 540 *memw = 0xc000; 541 asm("bsfw %[mem], %[a]" : [a]"=a"(ax) : [mem]"m"(*memw)); 542 report(ax == 14, "bsfw r/m, reg"); 543 544 *meml = 0xc0000000; 545 asm("bsfl %[mem], %[a]" : [a]"=a"(eax) : [mem]"m"(*meml)); 546 report(eax == 30, "bsfl r/m, reg"); 547 548 *memq = 0xc00000000000; 549 asm("bsfq %[mem], %[a]" : [a]"=a"(rax) : [mem]"m"(*memq)); 550 report(rax == 46, "bsfq r/m, reg"); 551 552 *memq = 0; 553 asm("bsfq %[mem], %[a]; setz %[z]" 554 : [a]"=a"(rax), [z]"=rm"(z) : [mem]"m"(*memq)); 555 report(z == 1, "bsfq r/m, reg"); 556 557 *memw = 0xc000; 558 asm("bsrw %[mem], %[a]" : [a]"=a"(ax) : [mem]"m"(*memw)); 559 report(ax == 15, "bsrw r/m, reg"); 560 561 *meml = 0xc0000000; 562 asm("bsrl %[mem], %[a]" : [a]"=a"(eax) : [mem]"m"(*meml)); 563 report(eax == 31, "bsrl r/m, reg"); 564 565 *memq = 0xc00000000000; 566 asm("bsrq %[mem], %[a]" : [a]"=a"(rax) : [mem]"m"(*memq)); 567 report(rax == 47, "bsrq r/m, reg"); 568 569 *memq = 0; 570 asm("bsrq %[mem], %[a]; setz %[z]" 571 : [a]"=a"(rax), [z]"=rm"(z) : [mem]"m"(*memq)); 572 report(z == 1, "bsrq r/m, reg"); 573 } 574 575 static void test_imul(ulong *mem) 576 { 577 ulong a; 578 579 *mem = 51; a = 0x1234567812345678UL; 580 asm ("imulw %1, %%ax" : "+a"(a) : "m"(*mem)); 581 report(a == 0x12345678123439e8, "imul ax, mem"); 582 583 *mem = 51; a = 0x1234567812345678UL; 584 asm ("imull %1, %%eax" : "+a"(a) : "m"(*mem)); 585 report(a == 0xa06d39e8, "imul eax, mem"); 586 587 *mem = 51; a = 0x1234567812345678UL; 588 asm ("imulq %1, %%rax" : "+a"(a) : "m"(*mem)); 589 report(a == 0xA06D39EBA06D39E8UL, "imul rax, mem"); 590 591 *mem = 0x1234567812345678UL; a = 0x8765432187654321L; 592 asm ("imulw $51, %1, %%ax" : "+a"(a) : "m"(*mem)); 593 report(a == 0x87654321876539e8, "imul ax, mem, imm8"); 594 595 *mem = 0x1234567812345678UL; 596 asm ("imull $51, %1, %%eax" : "+a"(a) : "m"(*mem)); 597 report(a == 0xa06d39e8, "imul eax, mem, imm8"); 598 599 *mem = 0x1234567812345678UL; 600 asm ("imulq $51, %1, %%rax" : "+a"(a) : "m"(*mem)); 601 report(a == 0xA06D39EBA06D39E8UL, "imul rax, mem, imm8"); 602 603 *mem = 0x1234567812345678UL; a = 0x8765432187654321L; 604 asm ("imulw $311, %1, %%ax" : "+a"(a) : "m"(*mem)); 605 report(a == 0x8765432187650bc8, "imul ax, mem, imm"); 606 607 *mem = 0x1234567812345678UL; 608 asm ("imull $311, %1, %%eax" : "+a"(a) : "m"(*mem)); 609 report(a == 0x1d950bc8, "imul eax, mem, imm"); 610 611 *mem = 0x1234567812345678UL; 612 asm ("imulq $311, %1, %%rax" : "+a"(a) : "m"(*mem)); 613 report(a == 0x1D950BDE1D950BC8L, "imul rax, mem, imm"); 614 } 615 616 static void test_muldiv(long *mem) 617 { 618 long a, d, aa, dd; 619 u8 ex = 1; 620 621 *mem = 0; a = 1; d = 2; 622 asm (ASM_TRY("1f") "divq %3; movb $0, %2; 1:" 623 : "+a"(a), "+d"(d), "+q"(ex) : "m"(*mem)); 624 report(a == 1 && d == 2 && ex, "divq (fault)"); 625 626 *mem = 987654321098765UL; a = 123456789012345UL; d = 123456789012345UL; 627 asm (ASM_TRY("1f") "divq %3; movb $0, %2; 1:" 628 : "+a"(a), "+d"(d), "+q"(ex) : "m"(*mem)); 629 report(a == 0x1ffffffb1b963b33ul && d == 0x273ba4384ede2ul && !ex, 630 "divq (1)"); 631 aa = 0x1111111111111111; dd = 0x2222222222222222; 632 *mem = 0x3333333333333333; a = aa; d = dd; 633 asm("mulb %2" : "+a"(a), "+d"(d) : "m"(*mem)); 634 report(a == 0x1111111111110363 && d == dd, "mulb mem"); 635 *mem = 0x3333333333333333; a = aa; d = dd; 636 asm("mulw %2" : "+a"(a), "+d"(d) : "m"(*mem)); 637 report(a == 0x111111111111c963 && d == 0x2222222222220369, "mulw mem"); 638 *mem = 0x3333333333333333; a = aa; d = dd; 639 asm("mull %2" : "+a"(a), "+d"(d) : "m"(*mem)); 640 report(a == 0x962fc963 && d == 0x369d036, "mull mem"); 641 *mem = 0x3333333333333333; a = aa; d = dd; 642 asm("mulq %2" : "+a"(a), "+d"(d) : "m"(*mem)); 643 report(a == 0x2fc962fc962fc963 && d == 0x369d0369d0369d0, "mulq mem"); 644 } 645 646 typedef unsigned __attribute__((vector_size(16))) sse128; 647 648 typedef union { 649 sse128 sse; 650 unsigned u[4]; 651 } sse_union; 652 653 static bool sseeq(sse_union *v1, sse_union *v2) 654 { 655 bool ok = true; 656 int i; 657 658 for (i = 0; i < 4; ++i) { 659 ok &= v1->u[i] == v2->u[i]; 660 } 661 662 return ok; 663 } 664 665 static __attribute__((target("sse2"))) void test_sse(sse_union *mem) 666 { 667 sse_union v; 668 669 write_cr0(read_cr0() & ~6); /* EM, TS */ 670 write_cr4(read_cr4() | 0x200); /* OSFXSR */ 671 672 #define TEST_RW_SSE(insn) do { \ 673 v.u[0] = 1; v.u[1] = 2; v.u[2] = 3; v.u[3] = 4; \ 674 asm(insn " %1, %0" : "=m"(*mem) : "x"(v.sse)); \ 675 report(sseeq(&v, mem), insn " (read)"); \ 676 mem->u[0] = 5; mem->u[1] = 6; mem->u[2] = 7; mem->u[3] = 8; \ 677 asm(insn " %1, %0" : "=x"(v.sse) : "m"(*mem)); \ 678 report(sseeq(&v, mem), insn " (write)"); \ 679 } while (0) 680 681 TEST_RW_SSE("movdqu"); 682 TEST_RW_SSE("movaps"); 683 TEST_RW_SSE("movapd"); 684 TEST_RW_SSE("movups"); 685 TEST_RW_SSE("movupd"); 686 #undef TEST_RW_SSE 687 } 688 689 static void unaligned_movaps_handler(struct ex_regs *regs) 690 { 691 extern char unaligned_movaps_cont; 692 693 ++exceptions; 694 regs->rip = (ulong)&unaligned_movaps_cont; 695 } 696 697 static void cross_movups_handler(struct ex_regs *regs) 698 { 699 extern char cross_movups_cont; 700 701 ++exceptions; 702 regs->rip = (ulong)&cross_movups_cont; 703 } 704 705 static __attribute__((target("sse2"))) void test_sse_exceptions(void *cross_mem) 706 { 707 sse_union v; 708 sse_union *mem; 709 uint8_t *bytes = cross_mem; // aligned on PAGE_SIZE*2 710 void *page2 = (void *)(&bytes[4096]); 711 struct pte_search search; 712 pteval_t orig_pte; 713 714 // setup memory for unaligned access 715 mem = (sse_union *)(&bytes[8]); 716 717 // test unaligned access for movups, movupd and movaps 718 v.u[0] = 1; v.u[1] = 2; v.u[2] = 3; v.u[3] = 4; 719 mem->u[0] = 5; mem->u[1] = 6; mem->u[2] = 7; mem->u[3] = 8; 720 asm("movups %1, %0" : "=m"(*mem) : "x"(v.sse)); 721 report(sseeq(&v, mem), "movups unaligned"); 722 723 v.u[0] = 1; v.u[1] = 2; v.u[2] = 3; v.u[3] = 4; 724 mem->u[0] = 5; mem->u[1] = 6; mem->u[2] = 7; mem->u[3] = 8; 725 asm("movupd %1, %0" : "=m"(*mem) : "x"(v.sse)); 726 report(sseeq(&v, mem), "movupd unaligned"); 727 exceptions = 0; 728 handle_exception(GP_VECTOR, unaligned_movaps_handler); 729 asm("movaps %1, %0\n\t unaligned_movaps_cont:" 730 : "=m"(*mem) : "x"(v.sse)); 731 handle_exception(GP_VECTOR, 0); 732 report(exceptions == 1, "unaligned movaps exception"); 733 734 // setup memory for cross page access 735 mem = (sse_union *)(&bytes[4096-8]); 736 v.u[0] = 1; v.u[1] = 2; v.u[2] = 3; v.u[3] = 4; 737 mem->u[0] = 5; mem->u[1] = 6; mem->u[2] = 7; mem->u[3] = 8; 738 739 asm("movups %1, %0" : "=m"(*mem) : "x"(v.sse)); 740 report(sseeq(&v, mem), "movups unaligned crosspage"); 741 742 // invalidate second page 743 search = find_pte_level(current_page_table(), page2, 1); 744 orig_pte = *search.pte; 745 install_pte(current_page_table(), 1, page2, 0, NULL); 746 invlpg(page2); 747 748 exceptions = 0; 749 handle_exception(PF_VECTOR, cross_movups_handler); 750 asm("movups %1, %0\n\t cross_movups_cont:" : "=m"(*mem) : "x"(v.sse)); 751 handle_exception(PF_VECTOR, 0); 752 report(exceptions == 1, "movups crosspage exception"); 753 754 // restore invalidated page 755 install_pte(current_page_table(), 1, page2, orig_pte, NULL); 756 } 757 758 static void test_mmx(uint64_t *mem) 759 { 760 uint64_t v; 761 762 write_cr0(read_cr0() & ~6); /* EM, TS */ 763 asm volatile("fninit"); 764 v = 0x0102030405060708ULL; 765 asm("movq %1, %0" : "=m"(*mem) : "y"(v)); 766 report(v == *mem, "movq (mmx, read)"); 767 *mem = 0x8070605040302010ull; 768 asm("movq %1, %0" : "=y"(v) : "m"(*mem)); 769 report(v == *mem, "movq (mmx, write)"); 770 } 771 772 static void test_rip_relative(unsigned *mem, char *insn_ram) 773 { 774 /* movb $1, mem+2(%rip) */ 775 insn_ram[0] = 0xc6; 776 insn_ram[1] = 0x05; 777 *(unsigned *)&insn_ram[2] = 2 + (char *)mem - (insn_ram + 7); 778 insn_ram[6] = 0x01; 779 /* ret */ 780 insn_ram[7] = 0xc3; 781 782 *mem = 0; 783 asm("callq *%1" : "+m"(*mem) : "r"(insn_ram)); 784 report(*mem == 0x10000, "movb $imm, 0(%%rip)"); 785 } 786 787 static void test_shld_shrd(u32 *mem) 788 { 789 *mem = 0x12345678; 790 asm("shld %2, %1, %0" : "+m"(*mem) : "r"(0xaaaaaaaaU), "c"((u8)3)); 791 report(*mem == ((0x12345678 << 3) | 5), "shld (cl)"); 792 *mem = 0x12345678; 793 asm("shrd %2, %1, %0" : "+m"(*mem) : "r"(0x55555555U), "c"((u8)3)); 794 report(*mem == ((0x12345678 >> 3) | (5u << 29)), "shrd (cl)"); 795 } 796 797 static void test_cmov(u32 *mem) 798 { 799 u64 val; 800 *mem = 0xabcdef12u; 801 asm ("movq $0x1234567812345678, %%rax\n\t" 802 "cmpl %%eax, %%eax\n\t" 803 "cmovnel (%[mem]), %%eax\n\t" 804 "movq %%rax, %[val]\n\t" 805 : [val]"=r"(val) : [mem]"r"(mem) : "%rax", "cc"); 806 report(val == 0x12345678ul, "cmovnel"); 807 } 808 809 static unsigned long rip_advance; 810 811 static void advance_rip_and_note_exception(struct ex_regs *regs) 812 { 813 ++exceptions; 814 regs->rip += rip_advance; 815 } 816 817 static void test_mmx_movq_mf(uint64_t *mem) 818 { 819 /* movq %mm0, (%rax) */ 820 extern char movq_start, movq_end; 821 822 uint16_t fcw = 0; /* all exceptions unmasked */ 823 write_cr0(read_cr0() & ~6); /* TS, EM */ 824 exceptions = 0; 825 handle_exception(MF_VECTOR, advance_rip_and_note_exception); 826 asm volatile("fninit; fldcw %0" : : "m"(fcw)); 827 asm volatile("fldz; fldz; fdivp"); /* generate exception */ 828 829 rip_advance = &movq_end - &movq_start; 830 asm(KVM_FEP "movq_start: movq %mm0, (%rax); movq_end:"); 831 /* exit MMX mode */ 832 asm volatile("fnclex; emms"); 833 report(exceptions == 1, "movq mmx generates #MF"); 834 handle_exception(MF_VECTOR, 0); 835 } 836 837 static void test_jmp_noncanonical(uint64_t *mem) 838 { 839 extern char nc_jmp_start, nc_jmp_end; 840 841 *mem = 0x1111111111111111ul; 842 843 exceptions = 0; 844 rip_advance = &nc_jmp_end - &nc_jmp_start; 845 handle_exception(GP_VECTOR, advance_rip_and_note_exception); 846 asm volatile ("nc_jmp_start: jmp *%0; nc_jmp_end:" : : "m"(*mem)); 847 report(exceptions == 1, "jump to non-canonical address"); 848 handle_exception(GP_VECTOR, 0); 849 } 850 851 static void test_movabs(uint64_t *mem) 852 { 853 /* mov $0x9090909090909090, %rcx */ 854 unsigned long rcx; 855 asm(KVM_FEP "mov $0x9090909090909090, %0" : "=c" (rcx) : "0" (0)); 856 report(rcx == 0x9090909090909090, "64-bit mov imm2"); 857 } 858 859 static void test_smsw_reg(uint64_t *mem) 860 { 861 unsigned long cr0 = read_cr0(); 862 unsigned long rax; 863 const unsigned long in_rax = 0x1234567890abcdeful; 864 865 asm(KVM_FEP "smsww %w0\n\t" : "=a" (rax) : "0" (in_rax)); 866 report((u16)rax == (u16)cr0 && rax >> 16 == in_rax >> 16, 867 "16-bit smsw reg"); 868 869 asm(KVM_FEP "smswl %k0\n\t" : "=a" (rax) : "0" (in_rax)); 870 report(rax == (u32)cr0, "32-bit smsw reg"); 871 872 asm(KVM_FEP "smswq %q0\n\t" : "=a" (rax) : "0" (in_rax)); 873 report(rax == cr0, "64-bit smsw reg"); 874 } 875 876 static void test_nop(uint64_t *mem) 877 { 878 unsigned long rax; 879 const unsigned long in_rax = 0x1234567890abcdeful; 880 asm(KVM_FEP "nop\n\t" : "=a" (rax) : "0" (in_rax)); 881 report(rax == in_rax, "nop"); 882 } 883 884 static void test_mov_dr(uint64_t *mem) 885 { 886 unsigned long rax; 887 const unsigned long in_rax = 0; 888 bool rtm_support = this_cpu_has(X86_FEATURE_RTM); 889 unsigned long dr6_fixed_1 = rtm_support ? 0xfffe0ff0ul : 0xffff0ff0ul; 890 asm(KVM_FEP "movq %0, %%dr6\n\t" 891 KVM_FEP "movq %%dr6, %0\n\t" : "=a" (rax) : "a" (in_rax)); 892 report(rax == dr6_fixed_1, "mov_dr6"); 893 } 894 895 static void test_push16(uint64_t *mem) 896 { 897 uint64_t rsp1, rsp2; 898 uint16_t r; 899 900 asm volatile ( "movq %%rsp, %[rsp1]\n\t" 901 "pushw %[v]\n\t" 902 "popw %[r]\n\t" 903 "movq %%rsp, %[rsp2]\n\t" 904 "movq %[rsp1], %%rsp\n\t" : 905 [rsp1]"=r"(rsp1), [rsp2]"=r"(rsp2), [r]"=r"(r) 906 : [v]"m"(*mem) : "memory"); 907 report(rsp1 == rsp2, "push16"); 908 } 909 910 static void test_crosspage_mmio(volatile uint8_t *mem) 911 { 912 volatile uint16_t w, *pw; 913 914 pw = (volatile uint16_t *)&mem[4095]; 915 mem[4095] = 0x99; 916 mem[4096] = 0x77; 917 asm volatile("mov %1, %0" : "=r"(w) : "m"(*pw) : "memory"); 918 report(w == 0x7799, "cross-page mmio read"); 919 asm volatile("mov %1, %0" : "=m"(*pw) : "r"((uint16_t)0x88aa)); 920 report(mem[4095] == 0xaa && mem[4096] == 0x88, "cross-page mmio write"); 921 } 922 923 static void test_string_io_mmio(volatile uint8_t *mem) 924 { 925 /* Cross MMIO pages.*/ 926 volatile uint8_t *mmio = mem + 4032; 927 928 asm volatile("outw %%ax, %%dx \n\t" : : "a"(0x9999), "d"(TESTDEV_IO_PORT)); 929 930 asm volatile ("cld; rep insb" : : "d" (TESTDEV_IO_PORT), "D" (mmio), "c" (1024)); 931 932 report(mmio[1023] == 0x99, "string_io_mmio"); 933 } 934 935 /* kvm doesn't allow lidt/lgdt from mmio, so the test is disabled */ 936 #if 0 937 static void test_lgdt_lidt(volatile uint8_t *mem) 938 { 939 struct descriptor_table_ptr orig, fresh = {}; 940 941 sgdt(&orig); 942 *(struct descriptor_table_ptr *)mem = (struct descriptor_table_ptr) { 943 .limit = 0xf234, 944 .base = 0x12345678abcd, 945 }; 946 cli(); 947 asm volatile("lgdt %0" : : "m"(*(struct descriptor_table_ptr *)mem)); 948 sgdt(&fresh); 949 lgdt(&orig); 950 sti(); 951 report(orig.limit == fresh.limit && orig.base == fresh.base, 952 "lgdt (long address)"); 953 954 sidt(&orig); 955 *(struct descriptor_table_ptr *)mem = (struct descriptor_table_ptr) { 956 .limit = 0x432f, 957 .base = 0xdbca87654321, 958 }; 959 cli(); 960 asm volatile("lidt %0" : : "m"(*(struct descriptor_table_ptr *)mem)); 961 sidt(&fresh); 962 lidt(&orig); 963 sti(); 964 report(orig.limit == fresh.limit && orig.base == fresh.base, 965 "lidt (long address)"); 966 } 967 #endif 968 969 static void ss_bad_rpl(struct ex_regs *regs) 970 { 971 extern char ss_bad_rpl_cont; 972 973 ++exceptions; 974 regs->rip = (ulong)&ss_bad_rpl_cont; 975 } 976 977 static void test_sreg(volatile uint16_t *mem) 978 { 979 u16 ss = read_ss(); 980 981 // check for null segment load 982 *mem = 0; 983 asm volatile("mov %0, %%ss" : : "m"(*mem)); 984 report(read_ss() == 0, "mov null, %%ss"); 985 986 // check for exception when ss.rpl != cpl on null segment load 987 exceptions = 0; 988 handle_exception(GP_VECTOR, ss_bad_rpl); 989 *mem = 3; 990 asm volatile("mov %0, %%ss; ss_bad_rpl_cont:" : : "m"(*mem)); 991 report(exceptions == 1 && read_ss() == 0, 992 "mov null, %%ss (with ss.rpl != cpl)"); 993 handle_exception(GP_VECTOR, 0); 994 write_ss(ss); 995 } 996 997 static uint64_t usr_gs_mov(void) 998 { 999 static uint64_t dummy = MAGIC_NUM; 1000 uint64_t dummy_ptr = (uint64_t)&dummy; 1001 uint64_t ret; 1002 1003 dummy_ptr -= GS_BASE; 1004 asm volatile("mov %%gs:(%%rcx), %%rax" : "=a"(ret): "c"(dummy_ptr) :); 1005 1006 return ret; 1007 } 1008 1009 static void test_iret(void) 1010 { 1011 uint64_t val; 1012 bool raised_vector; 1013 1014 /* Update GS base to 4MiB */ 1015 wrmsr(MSR_GS_BASE, GS_BASE); 1016 1017 /* 1018 * Per the SDM, jumping to user mode via `iret`, which is returning to 1019 * outer privilege level, for segment registers (ES, FS, GS, and DS) 1020 * if the check fails, the segment selector becomes null. 1021 * 1022 * In our test case, GS becomes null. 1023 */ 1024 val = run_in_user((usermode_func)usr_gs_mov, GP_VECTOR, 1025 0, 0, 0, 0, &raised_vector); 1026 1027 report(val == MAGIC_NUM, "Test ret/iret with a nullified segment"); 1028 } 1029 1030 /* Broken emulation causes triple fault, which skips the other tests. */ 1031 #if 0 1032 static void test_lldt(volatile uint16_t *mem) 1033 { 1034 u64 gdt[] = { 0, /* null descriptor */ 1035 #ifdef __X86_64__ 1036 0, /* ldt descriptor is 16 bytes in long mode */ 1037 #endif 1038 0x0000f82000000ffffull /* ldt descriptor */ }; 1039 struct descriptor_table_ptr gdt_ptr = { .limit = sizeof(gdt) - 1, 1040 .base = (ulong)&gdt }; 1041 struct descriptor_table_ptr orig_gdt; 1042 1043 cli(); 1044 sgdt(&orig_gdt); 1045 lgdt(&gdt_ptr); 1046 *mem = 0x8; 1047 asm volatile("lldt %0" : : "m"(*mem)); 1048 lgdt(&orig_gdt); 1049 sti(); 1050 report(sldt() == *mem, "lldt"); 1051 } 1052 #endif 1053 1054 static void test_ltr(volatile uint16_t *mem) 1055 { 1056 struct descriptor_table_ptr gdt_ptr; 1057 uint64_t *gdt, *trp; 1058 uint16_t tr = str(); 1059 uint64_t busy_mask = (uint64_t)1 << 41; 1060 1061 sgdt(&gdt_ptr); 1062 gdt = (uint64_t *)gdt_ptr.base; 1063 trp = &gdt[tr >> 3]; 1064 *trp &= ~busy_mask; 1065 *mem = tr; 1066 asm volatile("ltr %0" : : "m"(*mem) : "memory"); 1067 report(str() == tr && (*trp & busy_mask), "ltr"); 1068 } 1069 1070 static void test_simplealu(u32 *mem) 1071 { 1072 *mem = 0x1234; 1073 asm("or %1, %0" : "+m"(*mem) : "r"(0x8001)); 1074 report(*mem == 0x9235, "or"); 1075 asm("add %1, %0" : "+m"(*mem) : "r"(2)); 1076 report(*mem == 0x9237, "add"); 1077 asm("xor %1, %0" : "+m"(*mem) : "r"(0x1111)); 1078 report(*mem == 0x8326, "xor"); 1079 asm("sub %1, %0" : "+m"(*mem) : "r"(0x26)); 1080 report(*mem == 0x8300, "sub"); 1081 asm("clc; adc %1, %0" : "+m"(*mem) : "r"(0x100)); 1082 report(*mem == 0x8400, "adc(0)"); 1083 asm("stc; adc %1, %0" : "+m"(*mem) : "r"(0x100)); 1084 report(*mem == 0x8501, "adc(0)"); 1085 asm("clc; sbb %1, %0" : "+m"(*mem) : "r"(0)); 1086 report(*mem == 0x8501, "sbb(0)"); 1087 asm("stc; sbb %1, %0" : "+m"(*mem) : "r"(0)); 1088 report(*mem == 0x8500, "sbb(1)"); 1089 asm("and %1, %0" : "+m"(*mem) : "r"(0xfe77)); 1090 report(*mem == 0x8400, "and"); 1091 asm("test %1, %0" : "+m"(*mem) : "r"(0xf000)); 1092 report(*mem == 0x8400, "test"); 1093 } 1094 1095 static void illegal_movbe_handler(struct ex_regs *regs) 1096 { 1097 extern char bad_movbe_cont; 1098 1099 ++exceptions; 1100 regs->rip = (ulong)&bad_movbe_cont; 1101 } 1102 1103 static void test_illegal_movbe(void) 1104 { 1105 if (!this_cpu_has(X86_FEATURE_MOVBE)) { 1106 report_skip("illegal movbe"); 1107 return; 1108 } 1109 1110 exceptions = 0; 1111 handle_exception(UD_VECTOR, illegal_movbe_handler); 1112 asm volatile(".byte 0x0f; .byte 0x38; .byte 0xf0; .byte 0xc0;\n\t" 1113 " bad_movbe_cont:" : : : "rax"); 1114 report(exceptions == 1, "illegal movbe"); 1115 handle_exception(UD_VECTOR, 0); 1116 } 1117 1118 static void record_no_fep(struct ex_regs *regs) 1119 { 1120 fep_available = 0; 1121 regs->rip += KVM_FEP_LENGTH; 1122 } 1123 1124 int main(void) 1125 { 1126 void *mem; 1127 void *insn_page; 1128 void *insn_ram; 1129 void *cross_mem; 1130 unsigned long t1, t2; 1131 1132 setup_vm(); 1133 handle_exception(UD_VECTOR, record_no_fep); 1134 asm(KVM_FEP "nop"); 1135 handle_exception(UD_VECTOR, 0); 1136 1137 mem = alloc_vpages(2); 1138 install_page((void *)read_cr3(), IORAM_BASE_PHYS, mem); 1139 // install the page twice to test cross-page mmio 1140 install_page((void *)read_cr3(), IORAM_BASE_PHYS, mem + 4096); 1141 insn_page = alloc_page(); 1142 insn_ram = vmap(virt_to_phys(insn_page), 4096); 1143 cross_mem = vmap(virt_to_phys(alloc_pages(2)), 2 * PAGE_SIZE); 1144 1145 // test mov reg, r/m and mov r/m, reg 1146 t1 = 0x123456789abcdef; 1147 asm volatile("mov %[t1], (%[mem]) \n\t" 1148 "mov (%[mem]), %[t2]" 1149 : [t2]"=r"(t2) 1150 : [t1]"r"(t1), [mem]"r"(mem) 1151 : "memory"); 1152 report(t2 == 0x123456789abcdef, "mov reg, r/m (1)"); 1153 1154 test_simplealu(mem); 1155 test_cmps(mem); 1156 test_scas(mem); 1157 1158 test_push(mem); 1159 test_pop(mem); 1160 1161 test_xchg(mem); 1162 test_xadd(mem); 1163 1164 test_cr8(); 1165 1166 test_smsw(mem); 1167 test_lmsw(); 1168 test_ljmp(mem); 1169 test_stringio(); 1170 test_incdecnotneg(mem); 1171 test_btc(mem); 1172 test_bsfbsr(mem); 1173 test_imul(mem); 1174 test_muldiv(mem); 1175 test_sse(mem); 1176 test_sse_exceptions(cross_mem); 1177 test_mmx(mem); 1178 test_rip_relative(mem, insn_ram); 1179 test_shld_shrd(mem); 1180 //test_lgdt_lidt(mem); 1181 test_sreg(mem); 1182 test_iret(); 1183 //test_lldt(mem); 1184 test_ltr(mem); 1185 test_cmov(mem); 1186 1187 if (fep_available) { 1188 test_mmx_movq_mf(mem); 1189 test_movabs(mem); 1190 test_smsw_reg(mem); 1191 test_nop(mem); 1192 test_mov_dr(mem); 1193 } else { 1194 report_skip("skipping register-only tests, " 1195 "use kvm.forced_emulation_prefix=1 to enable"); 1196 } 1197 1198 test_push16(mem); 1199 test_crosspage_mmio(mem); 1200 1201 test_string_io_mmio(mem); 1202 1203 test_jmp_noncanonical(mem); 1204 test_illegal_movbe(); 1205 1206 return report_summary(); 1207 } 1208