1 /* 2 * vm86 linux syscall support 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, 19 * MA 02110-1301, USA. 20 */ 21 #include <stdlib.h> 22 #include <stdio.h> 23 #include <stdarg.h> 24 #include <string.h> 25 #include <errno.h> 26 #include <unistd.h> 27 28 #include "qemu.h" 29 30 //#define DEBUG_VM86 31 32 #ifdef DEBUG_VM86 33 # define LOG_VM86(...) qemu_log(__VA_ARGS__); 34 #else 35 # define LOG_VM86(...) do { } while (0) 36 #endif 37 38 39 #define set_flags(X,new,mask) \ 40 ((X) = ((X) & ~(mask)) | ((new) & (mask))) 41 42 #define SAFE_MASK (0xDD5) 43 #define RETURN_MASK (0xDFF) 44 45 static inline int is_revectored(int nr, struct target_revectored_struct *bitmap) 46 { 47 return (((uint8_t *)bitmap)[nr >> 3] >> (nr & 7)) & 1; 48 } 49 50 static inline void vm_putw(uint32_t segptr, unsigned int reg16, unsigned int val) 51 { 52 stw(segptr + (reg16 & 0xffff), val); 53 } 54 55 static inline void vm_putl(uint32_t segptr, unsigned int reg16, unsigned int val) 56 { 57 stl(segptr + (reg16 & 0xffff), val); 58 } 59 60 static inline unsigned int vm_getb(uint32_t segptr, unsigned int reg16) 61 { 62 return ldub(segptr + (reg16 & 0xffff)); 63 } 64 65 static inline unsigned int vm_getw(uint32_t segptr, unsigned int reg16) 66 { 67 return lduw(segptr + (reg16 & 0xffff)); 68 } 69 70 static inline unsigned int vm_getl(uint32_t segptr, unsigned int reg16) 71 { 72 return ldl(segptr + (reg16 & 0xffff)); 73 } 74 75 void save_v86_state(CPUX86State *env) 76 { 77 TaskState *ts = env->opaque; 78 struct target_vm86plus_struct * target_v86; 79 80 if (!lock_user_struct(VERIFY_WRITE, target_v86, ts->target_v86, 0)) 81 /* FIXME - should return an error */ 82 return; 83 /* put the VM86 registers in the userspace register structure */ 84 target_v86->regs.eax = tswap32(env->regs[R_EAX]); 85 target_v86->regs.ebx = tswap32(env->regs[R_EBX]); 86 target_v86->regs.ecx = tswap32(env->regs[R_ECX]); 87 target_v86->regs.edx = tswap32(env->regs[R_EDX]); 88 target_v86->regs.esi = tswap32(env->regs[R_ESI]); 89 target_v86->regs.edi = tswap32(env->regs[R_EDI]); 90 target_v86->regs.ebp = tswap32(env->regs[R_EBP]); 91 target_v86->regs.esp = tswap32(env->regs[R_ESP]); 92 target_v86->regs.eip = tswap32(env->eip); 93 target_v86->regs.cs = tswap16(env->segs[R_CS].selector); 94 target_v86->regs.ss = tswap16(env->segs[R_SS].selector); 95 target_v86->regs.ds = tswap16(env->segs[R_DS].selector); 96 target_v86->regs.es = tswap16(env->segs[R_ES].selector); 97 target_v86->regs.fs = tswap16(env->segs[R_FS].selector); 98 target_v86->regs.gs = tswap16(env->segs[R_GS].selector); 99 set_flags(env->eflags, ts->v86flags, VIF_MASK | ts->v86mask); 100 target_v86->regs.eflags = tswap32(env->eflags); 101 unlock_user_struct(target_v86, ts->target_v86, 1); 102 LOG_VM86("save_v86_state: eflags=%08x cs:ip=%04x:%04x\n", 103 env->eflags, env->segs[R_CS].selector, env->eip); 104 105 /* restore 32 bit registers */ 106 env->regs[R_EAX] = ts->vm86_saved_regs.eax; 107 env->regs[R_EBX] = ts->vm86_saved_regs.ebx; 108 env->regs[R_ECX] = ts->vm86_saved_regs.ecx; 109 env->regs[R_EDX] = ts->vm86_saved_regs.edx; 110 env->regs[R_ESI] = ts->vm86_saved_regs.esi; 111 env->regs[R_EDI] = ts->vm86_saved_regs.edi; 112 env->regs[R_EBP] = ts->vm86_saved_regs.ebp; 113 env->regs[R_ESP] = ts->vm86_saved_regs.esp; 114 env->eflags = ts->vm86_saved_regs.eflags; 115 env->eip = ts->vm86_saved_regs.eip; 116 117 cpu_x86_load_seg(env, R_CS, ts->vm86_saved_regs.cs); 118 cpu_x86_load_seg(env, R_SS, ts->vm86_saved_regs.ss); 119 cpu_x86_load_seg(env, R_DS, ts->vm86_saved_regs.ds); 120 cpu_x86_load_seg(env, R_ES, ts->vm86_saved_regs.es); 121 cpu_x86_load_seg(env, R_FS, ts->vm86_saved_regs.fs); 122 cpu_x86_load_seg(env, R_GS, ts->vm86_saved_regs.gs); 123 } 124 125 /* return from vm86 mode to 32 bit. The vm86() syscall will return 126 'retval' */ 127 static inline void return_to_32bit(CPUX86State *env, int retval) 128 { 129 LOG_VM86("return_to_32bit: ret=0x%x\n", retval); 130 save_v86_state(env); 131 env->regs[R_EAX] = retval; 132 } 133 134 static inline int set_IF(CPUX86State *env) 135 { 136 TaskState *ts = env->opaque; 137 138 ts->v86flags |= VIF_MASK; 139 if (ts->v86flags & VIP_MASK) { 140 return_to_32bit(env, TARGET_VM86_STI); 141 return 1; 142 } 143 return 0; 144 } 145 146 static inline void clear_IF(CPUX86State *env) 147 { 148 TaskState *ts = env->opaque; 149 150 ts->v86flags &= ~VIF_MASK; 151 } 152 153 static inline void clear_TF(CPUX86State *env) 154 { 155 env->eflags &= ~TF_MASK; 156 } 157 158 static inline void clear_AC(CPUX86State *env) 159 { 160 env->eflags &= ~AC_MASK; 161 } 162 163 static inline int set_vflags_long(unsigned long eflags, CPUX86State *env) 164 { 165 TaskState *ts = env->opaque; 166 167 set_flags(ts->v86flags, eflags, ts->v86mask); 168 set_flags(env->eflags, eflags, SAFE_MASK); 169 if (eflags & IF_MASK) 170 return set_IF(env); 171 else 172 clear_IF(env); 173 return 0; 174 } 175 176 static inline int set_vflags_short(unsigned short flags, CPUX86State *env) 177 { 178 TaskState *ts = env->opaque; 179 180 set_flags(ts->v86flags, flags, ts->v86mask & 0xffff); 181 set_flags(env->eflags, flags, SAFE_MASK); 182 if (flags & IF_MASK) 183 return set_IF(env); 184 else 185 clear_IF(env); 186 return 0; 187 } 188 189 static inline unsigned int get_vflags(CPUX86State *env) 190 { 191 TaskState *ts = env->opaque; 192 unsigned int flags; 193 194 flags = env->eflags & RETURN_MASK; 195 if (ts->v86flags & VIF_MASK) 196 flags |= IF_MASK; 197 flags |= IOPL_MASK; 198 return flags | (ts->v86flags & ts->v86mask); 199 } 200 201 #define ADD16(reg, val) reg = (reg & ~0xffff) | ((reg + (val)) & 0xffff) 202 203 /* handle VM86 interrupt (NOTE: the CPU core currently does not 204 support TSS interrupt revectoring, so this code is always executed) */ 205 static void do_int(CPUX86State *env, int intno) 206 { 207 TaskState *ts = env->opaque; 208 uint32_t int_addr, segoffs, ssp; 209 unsigned int sp; 210 211 if (env->segs[R_CS].selector == TARGET_BIOSSEG) 212 goto cannot_handle; 213 if (is_revectored(intno, &ts->vm86plus.int_revectored)) 214 goto cannot_handle; 215 if (intno == 0x21 && is_revectored((env->regs[R_EAX] >> 8) & 0xff, 216 &ts->vm86plus.int21_revectored)) 217 goto cannot_handle; 218 int_addr = (intno << 2); 219 segoffs = ldl(int_addr); 220 if ((segoffs >> 16) == TARGET_BIOSSEG) 221 goto cannot_handle; 222 LOG_VM86("VM86: emulating int 0x%x. CS:IP=%04x:%04x\n", 223 intno, segoffs >> 16, segoffs & 0xffff); 224 /* save old state */ 225 ssp = env->segs[R_SS].selector << 4; 226 sp = env->regs[R_ESP] & 0xffff; 227 vm_putw(ssp, sp - 2, get_vflags(env)); 228 vm_putw(ssp, sp - 4, env->segs[R_CS].selector); 229 vm_putw(ssp, sp - 6, env->eip); 230 ADD16(env->regs[R_ESP], -6); 231 /* goto interrupt handler */ 232 env->eip = segoffs & 0xffff; 233 cpu_x86_load_seg(env, R_CS, segoffs >> 16); 234 clear_TF(env); 235 clear_IF(env); 236 clear_AC(env); 237 return; 238 cannot_handle: 239 LOG_VM86("VM86: return to 32 bits int 0x%x\n", intno); 240 return_to_32bit(env, TARGET_VM86_INTx | (intno << 8)); 241 } 242 243 void handle_vm86_trap(CPUX86State *env, int trapno) 244 { 245 if (trapno == 1 || trapno == 3) { 246 return_to_32bit(env, TARGET_VM86_TRAP + (trapno << 8)); 247 } else { 248 do_int(env, trapno); 249 } 250 } 251 252 #define CHECK_IF_IN_TRAP() \ 253 if ((ts->vm86plus.vm86plus.flags & TARGET_vm86dbg_active) && \ 254 (ts->vm86plus.vm86plus.flags & TARGET_vm86dbg_TFpendig)) \ 255 newflags |= TF_MASK 256 257 #define VM86_FAULT_RETURN \ 258 if ((ts->vm86plus.vm86plus.flags & TARGET_force_return_for_pic) && \ 259 (ts->v86flags & (IF_MASK | VIF_MASK))) \ 260 return_to_32bit(env, TARGET_VM86_PICRETURN); \ 261 return 262 263 void handle_vm86_fault(CPUX86State *env) 264 { 265 TaskState *ts = env->opaque; 266 uint32_t csp, ssp; 267 unsigned int ip, sp, newflags, newip, newcs, opcode, intno; 268 int data32, pref_done; 269 270 csp = env->segs[R_CS].selector << 4; 271 ip = env->eip & 0xffff; 272 273 ssp = env->segs[R_SS].selector << 4; 274 sp = env->regs[R_ESP] & 0xffff; 275 276 LOG_VM86("VM86 exception %04x:%08x\n", 277 env->segs[R_CS].selector, env->eip); 278 279 data32 = 0; 280 pref_done = 0; 281 do { 282 opcode = vm_getb(csp, ip); 283 ADD16(ip, 1); 284 switch (opcode) { 285 case 0x66: /* 32-bit data */ data32=1; break; 286 case 0x67: /* 32-bit address */ break; 287 case 0x2e: /* CS */ break; 288 case 0x3e: /* DS */ break; 289 case 0x26: /* ES */ break; 290 case 0x36: /* SS */ break; 291 case 0x65: /* GS */ break; 292 case 0x64: /* FS */ break; 293 case 0xf2: /* repnz */ break; 294 case 0xf3: /* rep */ break; 295 default: pref_done = 1; 296 } 297 } while (!pref_done); 298 299 /* VM86 mode */ 300 switch(opcode) { 301 case 0x9c: /* pushf */ 302 if (data32) { 303 vm_putl(ssp, sp - 4, get_vflags(env)); 304 ADD16(env->regs[R_ESP], -4); 305 } else { 306 vm_putw(ssp, sp - 2, get_vflags(env)); 307 ADD16(env->regs[R_ESP], -2); 308 } 309 env->eip = ip; 310 VM86_FAULT_RETURN; 311 312 case 0x9d: /* popf */ 313 if (data32) { 314 newflags = vm_getl(ssp, sp); 315 ADD16(env->regs[R_ESP], 4); 316 } else { 317 newflags = vm_getw(ssp, sp); 318 ADD16(env->regs[R_ESP], 2); 319 } 320 env->eip = ip; 321 CHECK_IF_IN_TRAP(); 322 if (data32) { 323 if (set_vflags_long(newflags, env)) 324 return; 325 } else { 326 if (set_vflags_short(newflags, env)) 327 return; 328 } 329 VM86_FAULT_RETURN; 330 331 case 0xcd: /* int */ 332 intno = vm_getb(csp, ip); 333 ADD16(ip, 1); 334 env->eip = ip; 335 if (ts->vm86plus.vm86plus.flags & TARGET_vm86dbg_active) { 336 if ( (ts->vm86plus.vm86plus.vm86dbg_intxxtab[intno >> 3] >> 337 (intno &7)) & 1) { 338 return_to_32bit(env, TARGET_VM86_INTx + (intno << 8)); 339 return; 340 } 341 } 342 do_int(env, intno); 343 break; 344 345 case 0xcf: /* iret */ 346 if (data32) { 347 newip = vm_getl(ssp, sp) & 0xffff; 348 newcs = vm_getl(ssp, sp + 4) & 0xffff; 349 newflags = vm_getl(ssp, sp + 8); 350 ADD16(env->regs[R_ESP], 12); 351 } else { 352 newip = vm_getw(ssp, sp); 353 newcs = vm_getw(ssp, sp + 2); 354 newflags = vm_getw(ssp, sp + 4); 355 ADD16(env->regs[R_ESP], 6); 356 } 357 env->eip = newip; 358 cpu_x86_load_seg(env, R_CS, newcs); 359 CHECK_IF_IN_TRAP(); 360 if (data32) { 361 if (set_vflags_long(newflags, env)) 362 return; 363 } else { 364 if (set_vflags_short(newflags, env)) 365 return; 366 } 367 VM86_FAULT_RETURN; 368 369 case 0xfa: /* cli */ 370 env->eip = ip; 371 clear_IF(env); 372 VM86_FAULT_RETURN; 373 374 case 0xfb: /* sti */ 375 env->eip = ip; 376 if (set_IF(env)) 377 return; 378 VM86_FAULT_RETURN; 379 380 default: 381 /* real VM86 GPF exception */ 382 return_to_32bit(env, TARGET_VM86_UNKNOWN); 383 break; 384 } 385 } 386 387 int do_vm86(CPUX86State *env, long subfunction, abi_ulong vm86_addr) 388 { 389 TaskState *ts = env->opaque; 390 struct target_vm86plus_struct * target_v86; 391 int ret; 392 393 switch (subfunction) { 394 case TARGET_VM86_REQUEST_IRQ: 395 case TARGET_VM86_FREE_IRQ: 396 case TARGET_VM86_GET_IRQ_BITS: 397 case TARGET_VM86_GET_AND_RESET_IRQ: 398 gemu_log("qemu: unsupported vm86 subfunction (%ld)\n", subfunction); 399 ret = -TARGET_EINVAL; 400 goto out; 401 case TARGET_VM86_PLUS_INSTALL_CHECK: 402 /* NOTE: on old vm86 stuff this will return the error 403 from verify_area(), because the subfunction is 404 interpreted as (invalid) address to vm86_struct. 405 So the installation check works. 406 */ 407 ret = 0; 408 goto out; 409 } 410 411 /* save current CPU regs */ 412 ts->vm86_saved_regs.eax = 0; /* default vm86 syscall return code */ 413 ts->vm86_saved_regs.ebx = env->regs[R_EBX]; 414 ts->vm86_saved_regs.ecx = env->regs[R_ECX]; 415 ts->vm86_saved_regs.edx = env->regs[R_EDX]; 416 ts->vm86_saved_regs.esi = env->regs[R_ESI]; 417 ts->vm86_saved_regs.edi = env->regs[R_EDI]; 418 ts->vm86_saved_regs.ebp = env->regs[R_EBP]; 419 ts->vm86_saved_regs.esp = env->regs[R_ESP]; 420 ts->vm86_saved_regs.eflags = env->eflags; 421 ts->vm86_saved_regs.eip = env->eip; 422 ts->vm86_saved_regs.cs = env->segs[R_CS].selector; 423 ts->vm86_saved_regs.ss = env->segs[R_SS].selector; 424 ts->vm86_saved_regs.ds = env->segs[R_DS].selector; 425 ts->vm86_saved_regs.es = env->segs[R_ES].selector; 426 ts->vm86_saved_regs.fs = env->segs[R_FS].selector; 427 ts->vm86_saved_regs.gs = env->segs[R_GS].selector; 428 429 ts->target_v86 = vm86_addr; 430 if (!lock_user_struct(VERIFY_READ, target_v86, vm86_addr, 1)) 431 return -TARGET_EFAULT; 432 /* build vm86 CPU state */ 433 ts->v86flags = tswap32(target_v86->regs.eflags); 434 env->eflags = (env->eflags & ~SAFE_MASK) | 435 (tswap32(target_v86->regs.eflags) & SAFE_MASK) | VM_MASK; 436 437 ts->vm86plus.cpu_type = tswapl(target_v86->cpu_type); 438 switch (ts->vm86plus.cpu_type) { 439 case TARGET_CPU_286: 440 ts->v86mask = 0; 441 break; 442 case TARGET_CPU_386: 443 ts->v86mask = NT_MASK | IOPL_MASK; 444 break; 445 case TARGET_CPU_486: 446 ts->v86mask = AC_MASK | NT_MASK | IOPL_MASK; 447 break; 448 default: 449 ts->v86mask = ID_MASK | AC_MASK | NT_MASK | IOPL_MASK; 450 break; 451 } 452 453 env->regs[R_EBX] = tswap32(target_v86->regs.ebx); 454 env->regs[R_ECX] = tswap32(target_v86->regs.ecx); 455 env->regs[R_EDX] = tswap32(target_v86->regs.edx); 456 env->regs[R_ESI] = tswap32(target_v86->regs.esi); 457 env->regs[R_EDI] = tswap32(target_v86->regs.edi); 458 env->regs[R_EBP] = tswap32(target_v86->regs.ebp); 459 env->regs[R_ESP] = tswap32(target_v86->regs.esp); 460 env->eip = tswap32(target_v86->regs.eip); 461 cpu_x86_load_seg(env, R_CS, tswap16(target_v86->regs.cs)); 462 cpu_x86_load_seg(env, R_SS, tswap16(target_v86->regs.ss)); 463 cpu_x86_load_seg(env, R_DS, tswap16(target_v86->regs.ds)); 464 cpu_x86_load_seg(env, R_ES, tswap16(target_v86->regs.es)); 465 cpu_x86_load_seg(env, R_FS, tswap16(target_v86->regs.fs)); 466 cpu_x86_load_seg(env, R_GS, tswap16(target_v86->regs.gs)); 467 ret = tswap32(target_v86->regs.eax); /* eax will be restored at 468 the end of the syscall */ 469 memcpy(&ts->vm86plus.int_revectored, 470 &target_v86->int_revectored, 32); 471 memcpy(&ts->vm86plus.int21_revectored, 472 &target_v86->int21_revectored, 32); 473 ts->vm86plus.vm86plus.flags = tswapl(target_v86->vm86plus.flags); 474 memcpy(&ts->vm86plus.vm86plus.vm86dbg_intxxtab, 475 target_v86->vm86plus.vm86dbg_intxxtab, 32); 476 unlock_user_struct(target_v86, vm86_addr, 0); 477 478 LOG_VM86("do_vm86: cs:ip=%04x:%04x\n", 479 env->segs[R_CS].selector, env->eip); 480 /* now the virtual CPU is ready for vm86 execution ! */ 481 out: 482 return ret; 483 } 484