1 /* 2 * Linux syscalls 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #define _ATFILE_SOURCE 20 #include <stdlib.h> 21 #include <stdio.h> 22 #include <stdarg.h> 23 #include <string.h> 24 #include <elf.h> 25 #include <endian.h> 26 #include <errno.h> 27 #include <unistd.h> 28 #include <fcntl.h> 29 #include <time.h> 30 #include <limits.h> 31 #include <grp.h> 32 #include <sys/types.h> 33 #include <sys/ipc.h> 34 #include <sys/msg.h> 35 #include <sys/wait.h> 36 #include <sys/time.h> 37 #include <sys/stat.h> 38 #include <sys/mount.h> 39 #include <sys/file.h> 40 #include <sys/fsuid.h> 41 #include <sys/personality.h> 42 #include <sys/prctl.h> 43 #include <sys/resource.h> 44 #include <sys/mman.h> 45 #include <sys/swap.h> 46 #include <signal.h> 47 #include <sched.h> 48 #ifdef __ia64__ 49 int __clone2(int (*fn)(void *), void *child_stack_base, 50 size_t stack_size, int flags, void *arg, ...); 51 #endif 52 #include <sys/socket.h> 53 #include <sys/un.h> 54 #include <sys/uio.h> 55 #include <sys/poll.h> 56 #include <sys/times.h> 57 #include <sys/shm.h> 58 #include <sys/sem.h> 59 #include <sys/statfs.h> 60 #include <utime.h> 61 #include <sys/sysinfo.h> 62 #include <sys/utsname.h> 63 //#include <sys/user.h> 64 #include <netinet/ip.h> 65 #include <netinet/tcp.h> 66 #include <linux/wireless.h> 67 #include <linux/icmp.h> 68 #include "qemu-common.h" 69 #ifdef TARGET_GPROF 70 #include <sys/gmon.h> 71 #endif 72 #ifdef CONFIG_EVENTFD 73 #include <sys/eventfd.h> 74 #endif 75 #ifdef CONFIG_EPOLL 76 #include <sys/epoll.h> 77 #endif 78 #ifdef CONFIG_ATTR 79 #include "qemu/xattr.h" 80 #endif 81 #ifdef CONFIG_SENDFILE 82 #include <sys/sendfile.h> 83 #endif 84 85 #define termios host_termios 86 #define winsize host_winsize 87 #define termio host_termio 88 #define sgttyb host_sgttyb /* same as target */ 89 #define tchars host_tchars /* same as target */ 90 #define ltchars host_ltchars /* same as target */ 91 92 #include <linux/termios.h> 93 #include <linux/unistd.h> 94 #include <linux/utsname.h> 95 #include <linux/cdrom.h> 96 #include <linux/hdreg.h> 97 #include <linux/soundcard.h> 98 #include <linux/kd.h> 99 #include <linux/mtio.h> 100 #include <linux/fs.h> 101 #if defined(CONFIG_FIEMAP) 102 #include <linux/fiemap.h> 103 #endif 104 #include <linux/fb.h> 105 #include <linux/vt.h> 106 #include <linux/dm-ioctl.h> 107 #include <linux/reboot.h> 108 #include <linux/route.h> 109 #include <linux/filter.h> 110 #include "linux_loop.h" 111 #include "cpu-uname.h" 112 113 #include "qemu.h" 114 115 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \ 116 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID) 117 118 //#define DEBUG 119 120 //#include <linux/msdos_fs.h> 121 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2]) 122 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2]) 123 124 125 #undef _syscall0 126 #undef _syscall1 127 #undef _syscall2 128 #undef _syscall3 129 #undef _syscall4 130 #undef _syscall5 131 #undef _syscall6 132 133 #define _syscall0(type,name) \ 134 static type name (void) \ 135 { \ 136 return syscall(__NR_##name); \ 137 } 138 139 #define _syscall1(type,name,type1,arg1) \ 140 static type name (type1 arg1) \ 141 { \ 142 return syscall(__NR_##name, arg1); \ 143 } 144 145 #define _syscall2(type,name,type1,arg1,type2,arg2) \ 146 static type name (type1 arg1,type2 arg2) \ 147 { \ 148 return syscall(__NR_##name, arg1, arg2); \ 149 } 150 151 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ 152 static type name (type1 arg1,type2 arg2,type3 arg3) \ 153 { \ 154 return syscall(__NR_##name, arg1, arg2, arg3); \ 155 } 156 157 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ 158 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \ 159 { \ 160 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 161 } 162 163 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 164 type5,arg5) \ 165 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ 166 { \ 167 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 168 } 169 170 171 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 172 type5,arg5,type6,arg6) \ 173 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \ 174 type6 arg6) \ 175 { \ 176 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 177 } 178 179 180 #define __NR_sys_uname __NR_uname 181 #define __NR_sys_getcwd1 __NR_getcwd 182 #define __NR_sys_getdents __NR_getdents 183 #define __NR_sys_getdents64 __NR_getdents64 184 #define __NR_sys_getpriority __NR_getpriority 185 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo 186 #define __NR_sys_syslog __NR_syslog 187 #define __NR_sys_tgkill __NR_tgkill 188 #define __NR_sys_tkill __NR_tkill 189 #define __NR_sys_futex __NR_futex 190 #define __NR_sys_inotify_init __NR_inotify_init 191 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch 192 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch 193 194 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \ 195 defined(__s390x__) 196 #define __NR__llseek __NR_lseek 197 #endif 198 199 #ifdef __NR_gettid 200 _syscall0(int, gettid) 201 #else 202 /* This is a replacement for the host gettid() and must return a host 203 errno. */ 204 static int gettid(void) { 205 return -ENOSYS; 206 } 207 #endif 208 #ifdef __NR_getdents 209 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count); 210 #endif 211 #if !defined(__NR_getdents) || \ 212 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64)) 213 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count); 214 #endif 215 #if defined(TARGET_NR__llseek) && defined(__NR_llseek) 216 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo, 217 loff_t *, res, uint, wh); 218 #endif 219 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo) 220 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len) 221 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill) 222 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig) 223 #endif 224 #if defined(TARGET_NR_tkill) && defined(__NR_tkill) 225 _syscall2(int,sys_tkill,int,tid,int,sig) 226 #endif 227 #ifdef __NR_exit_group 228 _syscall1(int,exit_group,int,error_code) 229 #endif 230 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 231 _syscall1(int,set_tid_address,int *,tidptr) 232 #endif 233 #if defined(TARGET_NR_futex) && defined(__NR_futex) 234 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val, 235 const struct timespec *,timeout,int *,uaddr2,int,val3) 236 #endif 237 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity 238 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len, 239 unsigned long *, user_mask_ptr); 240 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity 241 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len, 242 unsigned long *, user_mask_ptr); 243 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd, 244 void *, arg); 245 246 static bitmask_transtbl fcntl_flags_tbl[] = { 247 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, }, 248 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, }, 249 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, }, 250 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, }, 251 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, }, 252 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, }, 253 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, }, 254 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, }, 255 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, }, 256 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, }, 257 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, }, 258 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, }, 259 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, }, 260 #if defined(O_DIRECT) 261 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, }, 262 #endif 263 #if defined(O_NOATIME) 264 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME }, 265 #endif 266 #if defined(O_CLOEXEC) 267 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC }, 268 #endif 269 #if defined(O_PATH) 270 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH }, 271 #endif 272 /* Don't terminate the list prematurely on 64-bit host+guest. */ 273 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0 274 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, }, 275 #endif 276 { 0, 0, 0, 0 } 277 }; 278 279 #define COPY_UTSNAME_FIELD(dest, src) \ 280 do { \ 281 /* __NEW_UTS_LEN doesn't include terminating null */ \ 282 (void) strncpy((dest), (src), __NEW_UTS_LEN); \ 283 (dest)[__NEW_UTS_LEN] = '\0'; \ 284 } while (0) 285 286 static int sys_uname(struct new_utsname *buf) 287 { 288 struct utsname uts_buf; 289 290 if (uname(&uts_buf) < 0) 291 return (-1); 292 293 /* 294 * Just in case these have some differences, we 295 * translate utsname to new_utsname (which is the 296 * struct linux kernel uses). 297 */ 298 299 memset(buf, 0, sizeof(*buf)); 300 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname); 301 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename); 302 COPY_UTSNAME_FIELD(buf->release, uts_buf.release); 303 COPY_UTSNAME_FIELD(buf->version, uts_buf.version); 304 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine); 305 #ifdef _GNU_SOURCE 306 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname); 307 #endif 308 return (0); 309 310 #undef COPY_UTSNAME_FIELD 311 } 312 313 static int sys_getcwd1(char *buf, size_t size) 314 { 315 if (getcwd(buf, size) == NULL) { 316 /* getcwd() sets errno */ 317 return (-1); 318 } 319 return strlen(buf)+1; 320 } 321 322 #ifdef TARGET_NR_openat 323 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode) 324 { 325 /* 326 * open(2) has extra parameter 'mode' when called with 327 * flag O_CREAT. 328 */ 329 if ((flags & O_CREAT) != 0) { 330 return (openat(dirfd, pathname, flags, mode)); 331 } 332 return (openat(dirfd, pathname, flags)); 333 } 334 #endif 335 336 #ifdef TARGET_NR_utimensat 337 #ifdef CONFIG_UTIMENSAT 338 static int sys_utimensat(int dirfd, const char *pathname, 339 const struct timespec times[2], int flags) 340 { 341 if (pathname == NULL) 342 return futimens(dirfd, times); 343 else 344 return utimensat(dirfd, pathname, times, flags); 345 } 346 #elif defined(__NR_utimensat) 347 #define __NR_sys_utimensat __NR_utimensat 348 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname, 349 const struct timespec *,tsp,int,flags) 350 #else 351 static int sys_utimensat(int dirfd, const char *pathname, 352 const struct timespec times[2], int flags) 353 { 354 errno = ENOSYS; 355 return -1; 356 } 357 #endif 358 #endif /* TARGET_NR_utimensat */ 359 360 #ifdef CONFIG_INOTIFY 361 #include <sys/inotify.h> 362 363 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 364 static int sys_inotify_init(void) 365 { 366 return (inotify_init()); 367 } 368 #endif 369 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 370 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask) 371 { 372 return (inotify_add_watch(fd, pathname, mask)); 373 } 374 #endif 375 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 376 static int sys_inotify_rm_watch(int fd, int32_t wd) 377 { 378 return (inotify_rm_watch(fd, wd)); 379 } 380 #endif 381 #ifdef CONFIG_INOTIFY1 382 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 383 static int sys_inotify_init1(int flags) 384 { 385 return (inotify_init1(flags)); 386 } 387 #endif 388 #endif 389 #else 390 /* Userspace can usually survive runtime without inotify */ 391 #undef TARGET_NR_inotify_init 392 #undef TARGET_NR_inotify_init1 393 #undef TARGET_NR_inotify_add_watch 394 #undef TARGET_NR_inotify_rm_watch 395 #endif /* CONFIG_INOTIFY */ 396 397 #if defined(TARGET_NR_ppoll) 398 #ifndef __NR_ppoll 399 # define __NR_ppoll -1 400 #endif 401 #define __NR_sys_ppoll __NR_ppoll 402 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds, 403 struct timespec *, timeout, const __sigset_t *, sigmask, 404 size_t, sigsetsize) 405 #endif 406 407 #if defined(TARGET_NR_pselect6) 408 #ifndef __NR_pselect6 409 # define __NR_pselect6 -1 410 #endif 411 #define __NR_sys_pselect6 __NR_pselect6 412 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, 413 fd_set *, exceptfds, struct timespec *, timeout, void *, sig); 414 #endif 415 416 #if defined(TARGET_NR_prlimit64) 417 #ifndef __NR_prlimit64 418 # define __NR_prlimit64 -1 419 #endif 420 #define __NR_sys_prlimit64 __NR_prlimit64 421 /* The glibc rlimit structure may not be that used by the underlying syscall */ 422 struct host_rlimit64 { 423 uint64_t rlim_cur; 424 uint64_t rlim_max; 425 }; 426 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource, 427 const struct host_rlimit64 *, new_limit, 428 struct host_rlimit64 *, old_limit) 429 #endif 430 431 432 #if defined(TARGET_NR_timer_create) 433 /* Maxiumum of 32 active POSIX timers allowed at any one time. */ 434 static timer_t g_posix_timers[32] = { 0, } ; 435 436 static inline int next_free_host_timer(void) 437 { 438 int k ; 439 /* FIXME: Does finding the next free slot require a lock? */ 440 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) { 441 if (g_posix_timers[k] == 0) { 442 g_posix_timers[k] = (timer_t) 1; 443 return k; 444 } 445 } 446 return -1; 447 } 448 #endif 449 450 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */ 451 #ifdef TARGET_ARM 452 static inline int regpairs_aligned(void *cpu_env) { 453 return ((((CPUARMState *)cpu_env)->eabi) == 1) ; 454 } 455 #elif defined(TARGET_MIPS) 456 static inline int regpairs_aligned(void *cpu_env) { return 1; } 457 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64) 458 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs 459 * of registers which translates to the same as ARM/MIPS, because we start with 460 * r3 as arg1 */ 461 static inline int regpairs_aligned(void *cpu_env) { return 1; } 462 #else 463 static inline int regpairs_aligned(void *cpu_env) { return 0; } 464 #endif 465 466 #define ERRNO_TABLE_SIZE 1200 467 468 /* target_to_host_errno_table[] is initialized from 469 * host_to_target_errno_table[] in syscall_init(). */ 470 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = { 471 }; 472 473 /* 474 * This list is the union of errno values overridden in asm-<arch>/errno.h 475 * minus the errnos that are not actually generic to all archs. 476 */ 477 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = { 478 [EIDRM] = TARGET_EIDRM, 479 [ECHRNG] = TARGET_ECHRNG, 480 [EL2NSYNC] = TARGET_EL2NSYNC, 481 [EL3HLT] = TARGET_EL3HLT, 482 [EL3RST] = TARGET_EL3RST, 483 [ELNRNG] = TARGET_ELNRNG, 484 [EUNATCH] = TARGET_EUNATCH, 485 [ENOCSI] = TARGET_ENOCSI, 486 [EL2HLT] = TARGET_EL2HLT, 487 [EDEADLK] = TARGET_EDEADLK, 488 [ENOLCK] = TARGET_ENOLCK, 489 [EBADE] = TARGET_EBADE, 490 [EBADR] = TARGET_EBADR, 491 [EXFULL] = TARGET_EXFULL, 492 [ENOANO] = TARGET_ENOANO, 493 [EBADRQC] = TARGET_EBADRQC, 494 [EBADSLT] = TARGET_EBADSLT, 495 [EBFONT] = TARGET_EBFONT, 496 [ENOSTR] = TARGET_ENOSTR, 497 [ENODATA] = TARGET_ENODATA, 498 [ETIME] = TARGET_ETIME, 499 [ENOSR] = TARGET_ENOSR, 500 [ENONET] = TARGET_ENONET, 501 [ENOPKG] = TARGET_ENOPKG, 502 [EREMOTE] = TARGET_EREMOTE, 503 [ENOLINK] = TARGET_ENOLINK, 504 [EADV] = TARGET_EADV, 505 [ESRMNT] = TARGET_ESRMNT, 506 [ECOMM] = TARGET_ECOMM, 507 [EPROTO] = TARGET_EPROTO, 508 [EDOTDOT] = TARGET_EDOTDOT, 509 [EMULTIHOP] = TARGET_EMULTIHOP, 510 [EBADMSG] = TARGET_EBADMSG, 511 [ENAMETOOLONG] = TARGET_ENAMETOOLONG, 512 [EOVERFLOW] = TARGET_EOVERFLOW, 513 [ENOTUNIQ] = TARGET_ENOTUNIQ, 514 [EBADFD] = TARGET_EBADFD, 515 [EREMCHG] = TARGET_EREMCHG, 516 [ELIBACC] = TARGET_ELIBACC, 517 [ELIBBAD] = TARGET_ELIBBAD, 518 [ELIBSCN] = TARGET_ELIBSCN, 519 [ELIBMAX] = TARGET_ELIBMAX, 520 [ELIBEXEC] = TARGET_ELIBEXEC, 521 [EILSEQ] = TARGET_EILSEQ, 522 [ENOSYS] = TARGET_ENOSYS, 523 [ELOOP] = TARGET_ELOOP, 524 [ERESTART] = TARGET_ERESTART, 525 [ESTRPIPE] = TARGET_ESTRPIPE, 526 [ENOTEMPTY] = TARGET_ENOTEMPTY, 527 [EUSERS] = TARGET_EUSERS, 528 [ENOTSOCK] = TARGET_ENOTSOCK, 529 [EDESTADDRREQ] = TARGET_EDESTADDRREQ, 530 [EMSGSIZE] = TARGET_EMSGSIZE, 531 [EPROTOTYPE] = TARGET_EPROTOTYPE, 532 [ENOPROTOOPT] = TARGET_ENOPROTOOPT, 533 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT, 534 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT, 535 [EOPNOTSUPP] = TARGET_EOPNOTSUPP, 536 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT, 537 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT, 538 [EADDRINUSE] = TARGET_EADDRINUSE, 539 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL, 540 [ENETDOWN] = TARGET_ENETDOWN, 541 [ENETUNREACH] = TARGET_ENETUNREACH, 542 [ENETRESET] = TARGET_ENETRESET, 543 [ECONNABORTED] = TARGET_ECONNABORTED, 544 [ECONNRESET] = TARGET_ECONNRESET, 545 [ENOBUFS] = TARGET_ENOBUFS, 546 [EISCONN] = TARGET_EISCONN, 547 [ENOTCONN] = TARGET_ENOTCONN, 548 [EUCLEAN] = TARGET_EUCLEAN, 549 [ENOTNAM] = TARGET_ENOTNAM, 550 [ENAVAIL] = TARGET_ENAVAIL, 551 [EISNAM] = TARGET_EISNAM, 552 [EREMOTEIO] = TARGET_EREMOTEIO, 553 [ESHUTDOWN] = TARGET_ESHUTDOWN, 554 [ETOOMANYREFS] = TARGET_ETOOMANYREFS, 555 [ETIMEDOUT] = TARGET_ETIMEDOUT, 556 [ECONNREFUSED] = TARGET_ECONNREFUSED, 557 [EHOSTDOWN] = TARGET_EHOSTDOWN, 558 [EHOSTUNREACH] = TARGET_EHOSTUNREACH, 559 [EALREADY] = TARGET_EALREADY, 560 [EINPROGRESS] = TARGET_EINPROGRESS, 561 [ESTALE] = TARGET_ESTALE, 562 [ECANCELED] = TARGET_ECANCELED, 563 [ENOMEDIUM] = TARGET_ENOMEDIUM, 564 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE, 565 #ifdef ENOKEY 566 [ENOKEY] = TARGET_ENOKEY, 567 #endif 568 #ifdef EKEYEXPIRED 569 [EKEYEXPIRED] = TARGET_EKEYEXPIRED, 570 #endif 571 #ifdef EKEYREVOKED 572 [EKEYREVOKED] = TARGET_EKEYREVOKED, 573 #endif 574 #ifdef EKEYREJECTED 575 [EKEYREJECTED] = TARGET_EKEYREJECTED, 576 #endif 577 #ifdef EOWNERDEAD 578 [EOWNERDEAD] = TARGET_EOWNERDEAD, 579 #endif 580 #ifdef ENOTRECOVERABLE 581 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE, 582 #endif 583 }; 584 585 static inline int host_to_target_errno(int err) 586 { 587 if(host_to_target_errno_table[err]) 588 return host_to_target_errno_table[err]; 589 return err; 590 } 591 592 static inline int target_to_host_errno(int err) 593 { 594 if (target_to_host_errno_table[err]) 595 return target_to_host_errno_table[err]; 596 return err; 597 } 598 599 static inline abi_long get_errno(abi_long ret) 600 { 601 if (ret == -1) 602 return -host_to_target_errno(errno); 603 else 604 return ret; 605 } 606 607 static inline int is_error(abi_long ret) 608 { 609 return (abi_ulong)ret >= (abi_ulong)(-4096); 610 } 611 612 char *target_strerror(int err) 613 { 614 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) { 615 return NULL; 616 } 617 return strerror(target_to_host_errno(err)); 618 } 619 620 static abi_ulong target_brk; 621 static abi_ulong target_original_brk; 622 static abi_ulong brk_page; 623 624 void target_set_brk(abi_ulong new_brk) 625 { 626 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk); 627 brk_page = HOST_PAGE_ALIGN(target_brk); 628 } 629 630 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0) 631 #define DEBUGF_BRK(message, args...) 632 633 /* do_brk() must return target values and target errnos. */ 634 abi_long do_brk(abi_ulong new_brk) 635 { 636 abi_long mapped_addr; 637 int new_alloc_size; 638 639 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk); 640 641 if (!new_brk) { 642 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk); 643 return target_brk; 644 } 645 if (new_brk < target_original_brk) { 646 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n", 647 target_brk); 648 return target_brk; 649 } 650 651 /* If the new brk is less than the highest page reserved to the 652 * target heap allocation, set it and we're almost done... */ 653 if (new_brk <= brk_page) { 654 /* Heap contents are initialized to zero, as for anonymous 655 * mapped pages. */ 656 if (new_brk > target_brk) { 657 memset(g2h(target_brk), 0, new_brk - target_brk); 658 } 659 target_brk = new_brk; 660 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk); 661 return target_brk; 662 } 663 664 /* We need to allocate more memory after the brk... Note that 665 * we don't use MAP_FIXED because that will map over the top of 666 * any existing mapping (like the one with the host libc or qemu 667 * itself); instead we treat "mapped but at wrong address" as 668 * a failure and unmap again. 669 */ 670 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page); 671 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size, 672 PROT_READ|PROT_WRITE, 673 MAP_ANON|MAP_PRIVATE, 0, 0)); 674 675 if (mapped_addr == brk_page) { 676 /* Heap contents are initialized to zero, as for anonymous 677 * mapped pages. Technically the new pages are already 678 * initialized to zero since they *are* anonymous mapped 679 * pages, however we have to take care with the contents that 680 * come from the remaining part of the previous page: it may 681 * contains garbage data due to a previous heap usage (grown 682 * then shrunken). */ 683 memset(g2h(target_brk), 0, brk_page - target_brk); 684 685 target_brk = new_brk; 686 brk_page = HOST_PAGE_ALIGN(target_brk); 687 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n", 688 target_brk); 689 return target_brk; 690 } else if (mapped_addr != -1) { 691 /* Mapped but at wrong address, meaning there wasn't actually 692 * enough space for this brk. 693 */ 694 target_munmap(mapped_addr, new_alloc_size); 695 mapped_addr = -1; 696 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk); 697 } 698 else { 699 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk); 700 } 701 702 #if defined(TARGET_ALPHA) 703 /* We (partially) emulate OSF/1 on Alpha, which requires we 704 return a proper errno, not an unchanged brk value. */ 705 return -TARGET_ENOMEM; 706 #endif 707 /* For everything else, return the previous break. */ 708 return target_brk; 709 } 710 711 static inline abi_long copy_from_user_fdset(fd_set *fds, 712 abi_ulong target_fds_addr, 713 int n) 714 { 715 int i, nw, j, k; 716 abi_ulong b, *target_fds; 717 718 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS; 719 if (!(target_fds = lock_user(VERIFY_READ, 720 target_fds_addr, 721 sizeof(abi_ulong) * nw, 722 1))) 723 return -TARGET_EFAULT; 724 725 FD_ZERO(fds); 726 k = 0; 727 for (i = 0; i < nw; i++) { 728 /* grab the abi_ulong */ 729 __get_user(b, &target_fds[i]); 730 for (j = 0; j < TARGET_ABI_BITS; j++) { 731 /* check the bit inside the abi_ulong */ 732 if ((b >> j) & 1) 733 FD_SET(k, fds); 734 k++; 735 } 736 } 737 738 unlock_user(target_fds, target_fds_addr, 0); 739 740 return 0; 741 } 742 743 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr, 744 abi_ulong target_fds_addr, 745 int n) 746 { 747 if (target_fds_addr) { 748 if (copy_from_user_fdset(fds, target_fds_addr, n)) 749 return -TARGET_EFAULT; 750 *fds_ptr = fds; 751 } else { 752 *fds_ptr = NULL; 753 } 754 return 0; 755 } 756 757 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr, 758 const fd_set *fds, 759 int n) 760 { 761 int i, nw, j, k; 762 abi_long v; 763 abi_ulong *target_fds; 764 765 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS; 766 if (!(target_fds = lock_user(VERIFY_WRITE, 767 target_fds_addr, 768 sizeof(abi_ulong) * nw, 769 0))) 770 return -TARGET_EFAULT; 771 772 k = 0; 773 for (i = 0; i < nw; i++) { 774 v = 0; 775 for (j = 0; j < TARGET_ABI_BITS; j++) { 776 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j); 777 k++; 778 } 779 __put_user(v, &target_fds[i]); 780 } 781 782 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw); 783 784 return 0; 785 } 786 787 #if defined(__alpha__) 788 #define HOST_HZ 1024 789 #else 790 #define HOST_HZ 100 791 #endif 792 793 static inline abi_long host_to_target_clock_t(long ticks) 794 { 795 #if HOST_HZ == TARGET_HZ 796 return ticks; 797 #else 798 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ; 799 #endif 800 } 801 802 static inline abi_long host_to_target_rusage(abi_ulong target_addr, 803 const struct rusage *rusage) 804 { 805 struct target_rusage *target_rusage; 806 807 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0)) 808 return -TARGET_EFAULT; 809 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec); 810 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec); 811 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec); 812 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec); 813 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss); 814 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss); 815 target_rusage->ru_idrss = tswapal(rusage->ru_idrss); 816 target_rusage->ru_isrss = tswapal(rusage->ru_isrss); 817 target_rusage->ru_minflt = tswapal(rusage->ru_minflt); 818 target_rusage->ru_majflt = tswapal(rusage->ru_majflt); 819 target_rusage->ru_nswap = tswapal(rusage->ru_nswap); 820 target_rusage->ru_inblock = tswapal(rusage->ru_inblock); 821 target_rusage->ru_oublock = tswapal(rusage->ru_oublock); 822 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd); 823 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv); 824 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals); 825 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw); 826 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw); 827 unlock_user_struct(target_rusage, target_addr, 1); 828 829 return 0; 830 } 831 832 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim) 833 { 834 abi_ulong target_rlim_swap; 835 rlim_t result; 836 837 target_rlim_swap = tswapal(target_rlim); 838 if (target_rlim_swap == TARGET_RLIM_INFINITY) 839 return RLIM_INFINITY; 840 841 result = target_rlim_swap; 842 if (target_rlim_swap != (rlim_t)result) 843 return RLIM_INFINITY; 844 845 return result; 846 } 847 848 static inline abi_ulong host_to_target_rlim(rlim_t rlim) 849 { 850 abi_ulong target_rlim_swap; 851 abi_ulong result; 852 853 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim) 854 target_rlim_swap = TARGET_RLIM_INFINITY; 855 else 856 target_rlim_swap = rlim; 857 result = tswapal(target_rlim_swap); 858 859 return result; 860 } 861 862 static inline int target_to_host_resource(int code) 863 { 864 switch (code) { 865 case TARGET_RLIMIT_AS: 866 return RLIMIT_AS; 867 case TARGET_RLIMIT_CORE: 868 return RLIMIT_CORE; 869 case TARGET_RLIMIT_CPU: 870 return RLIMIT_CPU; 871 case TARGET_RLIMIT_DATA: 872 return RLIMIT_DATA; 873 case TARGET_RLIMIT_FSIZE: 874 return RLIMIT_FSIZE; 875 case TARGET_RLIMIT_LOCKS: 876 return RLIMIT_LOCKS; 877 case TARGET_RLIMIT_MEMLOCK: 878 return RLIMIT_MEMLOCK; 879 case TARGET_RLIMIT_MSGQUEUE: 880 return RLIMIT_MSGQUEUE; 881 case TARGET_RLIMIT_NICE: 882 return RLIMIT_NICE; 883 case TARGET_RLIMIT_NOFILE: 884 return RLIMIT_NOFILE; 885 case TARGET_RLIMIT_NPROC: 886 return RLIMIT_NPROC; 887 case TARGET_RLIMIT_RSS: 888 return RLIMIT_RSS; 889 case TARGET_RLIMIT_RTPRIO: 890 return RLIMIT_RTPRIO; 891 case TARGET_RLIMIT_SIGPENDING: 892 return RLIMIT_SIGPENDING; 893 case TARGET_RLIMIT_STACK: 894 return RLIMIT_STACK; 895 default: 896 return code; 897 } 898 } 899 900 static inline abi_long copy_from_user_timeval(struct timeval *tv, 901 abi_ulong target_tv_addr) 902 { 903 struct target_timeval *target_tv; 904 905 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) 906 return -TARGET_EFAULT; 907 908 __get_user(tv->tv_sec, &target_tv->tv_sec); 909 __get_user(tv->tv_usec, &target_tv->tv_usec); 910 911 unlock_user_struct(target_tv, target_tv_addr, 0); 912 913 return 0; 914 } 915 916 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr, 917 const struct timeval *tv) 918 { 919 struct target_timeval *target_tv; 920 921 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) 922 return -TARGET_EFAULT; 923 924 __put_user(tv->tv_sec, &target_tv->tv_sec); 925 __put_user(tv->tv_usec, &target_tv->tv_usec); 926 927 unlock_user_struct(target_tv, target_tv_addr, 1); 928 929 return 0; 930 } 931 932 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 933 #include <mqueue.h> 934 935 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr, 936 abi_ulong target_mq_attr_addr) 937 { 938 struct target_mq_attr *target_mq_attr; 939 940 if (!lock_user_struct(VERIFY_READ, target_mq_attr, 941 target_mq_attr_addr, 1)) 942 return -TARGET_EFAULT; 943 944 __get_user(attr->mq_flags, &target_mq_attr->mq_flags); 945 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 946 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 947 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 948 949 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0); 950 951 return 0; 952 } 953 954 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr, 955 const struct mq_attr *attr) 956 { 957 struct target_mq_attr *target_mq_attr; 958 959 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr, 960 target_mq_attr_addr, 0)) 961 return -TARGET_EFAULT; 962 963 __put_user(attr->mq_flags, &target_mq_attr->mq_flags); 964 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 965 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 966 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 967 968 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1); 969 970 return 0; 971 } 972 #endif 973 974 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) 975 /* do_select() must return target values and target errnos. */ 976 static abi_long do_select(int n, 977 abi_ulong rfd_addr, abi_ulong wfd_addr, 978 abi_ulong efd_addr, abi_ulong target_tv_addr) 979 { 980 fd_set rfds, wfds, efds; 981 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 982 struct timeval tv, *tv_ptr; 983 abi_long ret; 984 985 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 986 if (ret) { 987 return ret; 988 } 989 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 990 if (ret) { 991 return ret; 992 } 993 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 994 if (ret) { 995 return ret; 996 } 997 998 if (target_tv_addr) { 999 if (copy_from_user_timeval(&tv, target_tv_addr)) 1000 return -TARGET_EFAULT; 1001 tv_ptr = &tv; 1002 } else { 1003 tv_ptr = NULL; 1004 } 1005 1006 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr)); 1007 1008 if (!is_error(ret)) { 1009 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 1010 return -TARGET_EFAULT; 1011 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 1012 return -TARGET_EFAULT; 1013 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 1014 return -TARGET_EFAULT; 1015 1016 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv)) 1017 return -TARGET_EFAULT; 1018 } 1019 1020 return ret; 1021 } 1022 #endif 1023 1024 static abi_long do_pipe2(int host_pipe[], int flags) 1025 { 1026 #ifdef CONFIG_PIPE2 1027 return pipe2(host_pipe, flags); 1028 #else 1029 return -ENOSYS; 1030 #endif 1031 } 1032 1033 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes, 1034 int flags, int is_pipe2) 1035 { 1036 int host_pipe[2]; 1037 abi_long ret; 1038 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe); 1039 1040 if (is_error(ret)) 1041 return get_errno(ret); 1042 1043 /* Several targets have special calling conventions for the original 1044 pipe syscall, but didn't replicate this into the pipe2 syscall. */ 1045 if (!is_pipe2) { 1046 #if defined(TARGET_ALPHA) 1047 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1]; 1048 return host_pipe[0]; 1049 #elif defined(TARGET_MIPS) 1050 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1]; 1051 return host_pipe[0]; 1052 #elif defined(TARGET_SH4) 1053 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1]; 1054 return host_pipe[0]; 1055 #elif defined(TARGET_SPARC) 1056 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1]; 1057 return host_pipe[0]; 1058 #endif 1059 } 1060 1061 if (put_user_s32(host_pipe[0], pipedes) 1062 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0]))) 1063 return -TARGET_EFAULT; 1064 return get_errno(ret); 1065 } 1066 1067 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn, 1068 abi_ulong target_addr, 1069 socklen_t len) 1070 { 1071 struct target_ip_mreqn *target_smreqn; 1072 1073 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1); 1074 if (!target_smreqn) 1075 return -TARGET_EFAULT; 1076 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr; 1077 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr; 1078 if (len == sizeof(struct target_ip_mreqn)) 1079 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex); 1080 unlock_user(target_smreqn, target_addr, 0); 1081 1082 return 0; 1083 } 1084 1085 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr, 1086 abi_ulong target_addr, 1087 socklen_t len) 1088 { 1089 const socklen_t unix_maxlen = sizeof (struct sockaddr_un); 1090 sa_family_t sa_family; 1091 struct target_sockaddr *target_saddr; 1092 1093 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1); 1094 if (!target_saddr) 1095 return -TARGET_EFAULT; 1096 1097 sa_family = tswap16(target_saddr->sa_family); 1098 1099 /* Oops. The caller might send a incomplete sun_path; sun_path 1100 * must be terminated by \0 (see the manual page), but 1101 * unfortunately it is quite common to specify sockaddr_un 1102 * length as "strlen(x->sun_path)" while it should be 1103 * "strlen(...) + 1". We'll fix that here if needed. 1104 * Linux kernel has a similar feature. 1105 */ 1106 1107 if (sa_family == AF_UNIX) { 1108 if (len < unix_maxlen && len > 0) { 1109 char *cp = (char*)target_saddr; 1110 1111 if ( cp[len-1] && !cp[len] ) 1112 len++; 1113 } 1114 if (len > unix_maxlen) 1115 len = unix_maxlen; 1116 } 1117 1118 memcpy(addr, target_saddr, len); 1119 addr->sa_family = sa_family; 1120 unlock_user(target_saddr, target_addr, 0); 1121 1122 return 0; 1123 } 1124 1125 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr, 1126 struct sockaddr *addr, 1127 socklen_t len) 1128 { 1129 struct target_sockaddr *target_saddr; 1130 1131 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0); 1132 if (!target_saddr) 1133 return -TARGET_EFAULT; 1134 memcpy(target_saddr, addr, len); 1135 target_saddr->sa_family = tswap16(addr->sa_family); 1136 unlock_user(target_saddr, target_addr, len); 1137 1138 return 0; 1139 } 1140 1141 static inline abi_long target_to_host_cmsg(struct msghdr *msgh, 1142 struct target_msghdr *target_msgh) 1143 { 1144 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1145 abi_long msg_controllen; 1146 abi_ulong target_cmsg_addr; 1147 struct target_cmsghdr *target_cmsg; 1148 socklen_t space = 0; 1149 1150 msg_controllen = tswapal(target_msgh->msg_controllen); 1151 if (msg_controllen < sizeof (struct target_cmsghdr)) 1152 goto the_end; 1153 target_cmsg_addr = tswapal(target_msgh->msg_control); 1154 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1); 1155 if (!target_cmsg) 1156 return -TARGET_EFAULT; 1157 1158 while (cmsg && target_cmsg) { 1159 void *data = CMSG_DATA(cmsg); 1160 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1161 1162 int len = tswapal(target_cmsg->cmsg_len) 1163 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr)); 1164 1165 space += CMSG_SPACE(len); 1166 if (space > msgh->msg_controllen) { 1167 space -= CMSG_SPACE(len); 1168 gemu_log("Host cmsg overflow\n"); 1169 break; 1170 } 1171 1172 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) { 1173 cmsg->cmsg_level = SOL_SOCKET; 1174 } else { 1175 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level); 1176 } 1177 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type); 1178 cmsg->cmsg_len = CMSG_LEN(len); 1179 1180 if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) { 1181 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type); 1182 memcpy(data, target_data, len); 1183 } else { 1184 int *fd = (int *)data; 1185 int *target_fd = (int *)target_data; 1186 int i, numfds = len / sizeof(int); 1187 1188 for (i = 0; i < numfds; i++) 1189 fd[i] = tswap32(target_fd[i]); 1190 } 1191 1192 cmsg = CMSG_NXTHDR(msgh, cmsg); 1193 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg); 1194 } 1195 unlock_user(target_cmsg, target_cmsg_addr, 0); 1196 the_end: 1197 msgh->msg_controllen = space; 1198 return 0; 1199 } 1200 1201 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh, 1202 struct msghdr *msgh) 1203 { 1204 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1205 abi_long msg_controllen; 1206 abi_ulong target_cmsg_addr; 1207 struct target_cmsghdr *target_cmsg; 1208 socklen_t space = 0; 1209 1210 msg_controllen = tswapal(target_msgh->msg_controllen); 1211 if (msg_controllen < sizeof (struct target_cmsghdr)) 1212 goto the_end; 1213 target_cmsg_addr = tswapal(target_msgh->msg_control); 1214 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0); 1215 if (!target_cmsg) 1216 return -TARGET_EFAULT; 1217 1218 while (cmsg && target_cmsg) { 1219 void *data = CMSG_DATA(cmsg); 1220 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1221 1222 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr)); 1223 1224 space += TARGET_CMSG_SPACE(len); 1225 if (space > msg_controllen) { 1226 space -= TARGET_CMSG_SPACE(len); 1227 gemu_log("Target cmsg overflow\n"); 1228 break; 1229 } 1230 1231 if (cmsg->cmsg_level == SOL_SOCKET) { 1232 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET); 1233 } else { 1234 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level); 1235 } 1236 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type); 1237 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len)); 1238 1239 if ((cmsg->cmsg_level == SOL_SOCKET) && 1240 (cmsg->cmsg_type == SCM_RIGHTS)) { 1241 int *fd = (int *)data; 1242 int *target_fd = (int *)target_data; 1243 int i, numfds = len / sizeof(int); 1244 1245 for (i = 0; i < numfds; i++) 1246 target_fd[i] = tswap32(fd[i]); 1247 } else if ((cmsg->cmsg_level == SOL_SOCKET) && 1248 (cmsg->cmsg_type == SO_TIMESTAMP) && 1249 (len == sizeof(struct timeval))) { 1250 /* copy struct timeval to target */ 1251 struct timeval *tv = (struct timeval *)data; 1252 struct target_timeval *target_tv = 1253 (struct target_timeval *)target_data; 1254 1255 target_tv->tv_sec = tswapal(tv->tv_sec); 1256 target_tv->tv_usec = tswapal(tv->tv_usec); 1257 } else { 1258 gemu_log("Unsupported ancillary data: %d/%d\n", 1259 cmsg->cmsg_level, cmsg->cmsg_type); 1260 memcpy(target_data, data, len); 1261 } 1262 1263 cmsg = CMSG_NXTHDR(msgh, cmsg); 1264 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg); 1265 } 1266 unlock_user(target_cmsg, target_cmsg_addr, space); 1267 the_end: 1268 target_msgh->msg_controllen = tswapal(space); 1269 return 0; 1270 } 1271 1272 /* do_setsockopt() Must return target values and target errnos. */ 1273 static abi_long do_setsockopt(int sockfd, int level, int optname, 1274 abi_ulong optval_addr, socklen_t optlen) 1275 { 1276 abi_long ret; 1277 int val; 1278 struct ip_mreqn *ip_mreq; 1279 struct ip_mreq_source *ip_mreq_source; 1280 1281 switch(level) { 1282 case SOL_TCP: 1283 /* TCP options all take an 'int' value. */ 1284 if (optlen < sizeof(uint32_t)) 1285 return -TARGET_EINVAL; 1286 1287 if (get_user_u32(val, optval_addr)) 1288 return -TARGET_EFAULT; 1289 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 1290 break; 1291 case SOL_IP: 1292 switch(optname) { 1293 case IP_TOS: 1294 case IP_TTL: 1295 case IP_HDRINCL: 1296 case IP_ROUTER_ALERT: 1297 case IP_RECVOPTS: 1298 case IP_RETOPTS: 1299 case IP_PKTINFO: 1300 case IP_MTU_DISCOVER: 1301 case IP_RECVERR: 1302 case IP_RECVTOS: 1303 #ifdef IP_FREEBIND 1304 case IP_FREEBIND: 1305 #endif 1306 case IP_MULTICAST_TTL: 1307 case IP_MULTICAST_LOOP: 1308 val = 0; 1309 if (optlen >= sizeof(uint32_t)) { 1310 if (get_user_u32(val, optval_addr)) 1311 return -TARGET_EFAULT; 1312 } else if (optlen >= 1) { 1313 if (get_user_u8(val, optval_addr)) 1314 return -TARGET_EFAULT; 1315 } 1316 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 1317 break; 1318 case IP_ADD_MEMBERSHIP: 1319 case IP_DROP_MEMBERSHIP: 1320 if (optlen < sizeof (struct target_ip_mreq) || 1321 optlen > sizeof (struct target_ip_mreqn)) 1322 return -TARGET_EINVAL; 1323 1324 ip_mreq = (struct ip_mreqn *) alloca(optlen); 1325 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen); 1326 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen)); 1327 break; 1328 1329 case IP_BLOCK_SOURCE: 1330 case IP_UNBLOCK_SOURCE: 1331 case IP_ADD_SOURCE_MEMBERSHIP: 1332 case IP_DROP_SOURCE_MEMBERSHIP: 1333 if (optlen != sizeof (struct target_ip_mreq_source)) 1334 return -TARGET_EINVAL; 1335 1336 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1); 1337 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen)); 1338 unlock_user (ip_mreq_source, optval_addr, 0); 1339 break; 1340 1341 default: 1342 goto unimplemented; 1343 } 1344 break; 1345 case SOL_IPV6: 1346 switch (optname) { 1347 case IPV6_MTU_DISCOVER: 1348 case IPV6_MTU: 1349 case IPV6_V6ONLY: 1350 case IPV6_RECVPKTINFO: 1351 val = 0; 1352 if (optlen < sizeof(uint32_t)) { 1353 return -TARGET_EINVAL; 1354 } 1355 if (get_user_u32(val, optval_addr)) { 1356 return -TARGET_EFAULT; 1357 } 1358 ret = get_errno(setsockopt(sockfd, level, optname, 1359 &val, sizeof(val))); 1360 break; 1361 default: 1362 goto unimplemented; 1363 } 1364 break; 1365 case SOL_RAW: 1366 switch (optname) { 1367 case ICMP_FILTER: 1368 /* struct icmp_filter takes an u32 value */ 1369 if (optlen < sizeof(uint32_t)) { 1370 return -TARGET_EINVAL; 1371 } 1372 1373 if (get_user_u32(val, optval_addr)) { 1374 return -TARGET_EFAULT; 1375 } 1376 ret = get_errno(setsockopt(sockfd, level, optname, 1377 &val, sizeof(val))); 1378 break; 1379 1380 default: 1381 goto unimplemented; 1382 } 1383 break; 1384 case TARGET_SOL_SOCKET: 1385 switch (optname) { 1386 case TARGET_SO_RCVTIMEO: 1387 { 1388 struct timeval tv; 1389 1390 optname = SO_RCVTIMEO; 1391 1392 set_timeout: 1393 if (optlen != sizeof(struct target_timeval)) { 1394 return -TARGET_EINVAL; 1395 } 1396 1397 if (copy_from_user_timeval(&tv, optval_addr)) { 1398 return -TARGET_EFAULT; 1399 } 1400 1401 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, 1402 &tv, sizeof(tv))); 1403 return ret; 1404 } 1405 case TARGET_SO_SNDTIMEO: 1406 optname = SO_SNDTIMEO; 1407 goto set_timeout; 1408 case TARGET_SO_ATTACH_FILTER: 1409 { 1410 struct target_sock_fprog *tfprog; 1411 struct target_sock_filter *tfilter; 1412 struct sock_fprog fprog; 1413 struct sock_filter *filter; 1414 int i; 1415 1416 if (optlen != sizeof(*tfprog)) { 1417 return -TARGET_EINVAL; 1418 } 1419 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) { 1420 return -TARGET_EFAULT; 1421 } 1422 if (!lock_user_struct(VERIFY_READ, tfilter, 1423 tswapal(tfprog->filter), 0)) { 1424 unlock_user_struct(tfprog, optval_addr, 1); 1425 return -TARGET_EFAULT; 1426 } 1427 1428 fprog.len = tswap16(tfprog->len); 1429 filter = malloc(fprog.len * sizeof(*filter)); 1430 if (filter == NULL) { 1431 unlock_user_struct(tfilter, tfprog->filter, 1); 1432 unlock_user_struct(tfprog, optval_addr, 1); 1433 return -TARGET_ENOMEM; 1434 } 1435 for (i = 0; i < fprog.len; i++) { 1436 filter[i].code = tswap16(tfilter[i].code); 1437 filter[i].jt = tfilter[i].jt; 1438 filter[i].jf = tfilter[i].jf; 1439 filter[i].k = tswap32(tfilter[i].k); 1440 } 1441 fprog.filter = filter; 1442 1443 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, 1444 SO_ATTACH_FILTER, &fprog, sizeof(fprog))); 1445 free(filter); 1446 1447 unlock_user_struct(tfilter, tfprog->filter, 1); 1448 unlock_user_struct(tfprog, optval_addr, 1); 1449 return ret; 1450 } 1451 /* Options with 'int' argument. */ 1452 case TARGET_SO_DEBUG: 1453 optname = SO_DEBUG; 1454 break; 1455 case TARGET_SO_REUSEADDR: 1456 optname = SO_REUSEADDR; 1457 break; 1458 case TARGET_SO_TYPE: 1459 optname = SO_TYPE; 1460 break; 1461 case TARGET_SO_ERROR: 1462 optname = SO_ERROR; 1463 break; 1464 case TARGET_SO_DONTROUTE: 1465 optname = SO_DONTROUTE; 1466 break; 1467 case TARGET_SO_BROADCAST: 1468 optname = SO_BROADCAST; 1469 break; 1470 case TARGET_SO_SNDBUF: 1471 optname = SO_SNDBUF; 1472 break; 1473 case TARGET_SO_RCVBUF: 1474 optname = SO_RCVBUF; 1475 break; 1476 case TARGET_SO_KEEPALIVE: 1477 optname = SO_KEEPALIVE; 1478 break; 1479 case TARGET_SO_OOBINLINE: 1480 optname = SO_OOBINLINE; 1481 break; 1482 case TARGET_SO_NO_CHECK: 1483 optname = SO_NO_CHECK; 1484 break; 1485 case TARGET_SO_PRIORITY: 1486 optname = SO_PRIORITY; 1487 break; 1488 #ifdef SO_BSDCOMPAT 1489 case TARGET_SO_BSDCOMPAT: 1490 optname = SO_BSDCOMPAT; 1491 break; 1492 #endif 1493 case TARGET_SO_PASSCRED: 1494 optname = SO_PASSCRED; 1495 break; 1496 case TARGET_SO_TIMESTAMP: 1497 optname = SO_TIMESTAMP; 1498 break; 1499 case TARGET_SO_RCVLOWAT: 1500 optname = SO_RCVLOWAT; 1501 break; 1502 break; 1503 default: 1504 goto unimplemented; 1505 } 1506 if (optlen < sizeof(uint32_t)) 1507 return -TARGET_EINVAL; 1508 1509 if (get_user_u32(val, optval_addr)) 1510 return -TARGET_EFAULT; 1511 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val))); 1512 break; 1513 default: 1514 unimplemented: 1515 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname); 1516 ret = -TARGET_ENOPROTOOPT; 1517 } 1518 return ret; 1519 } 1520 1521 /* do_getsockopt() Must return target values and target errnos. */ 1522 static abi_long do_getsockopt(int sockfd, int level, int optname, 1523 abi_ulong optval_addr, abi_ulong optlen) 1524 { 1525 abi_long ret; 1526 int len, val; 1527 socklen_t lv; 1528 1529 switch(level) { 1530 case TARGET_SOL_SOCKET: 1531 level = SOL_SOCKET; 1532 switch (optname) { 1533 /* These don't just return a single integer */ 1534 case TARGET_SO_LINGER: 1535 case TARGET_SO_RCVTIMEO: 1536 case TARGET_SO_SNDTIMEO: 1537 case TARGET_SO_PEERNAME: 1538 goto unimplemented; 1539 case TARGET_SO_PEERCRED: { 1540 struct ucred cr; 1541 socklen_t crlen; 1542 struct target_ucred *tcr; 1543 1544 if (get_user_u32(len, optlen)) { 1545 return -TARGET_EFAULT; 1546 } 1547 if (len < 0) { 1548 return -TARGET_EINVAL; 1549 } 1550 1551 crlen = sizeof(cr); 1552 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED, 1553 &cr, &crlen)); 1554 if (ret < 0) { 1555 return ret; 1556 } 1557 if (len > crlen) { 1558 len = crlen; 1559 } 1560 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) { 1561 return -TARGET_EFAULT; 1562 } 1563 __put_user(cr.pid, &tcr->pid); 1564 __put_user(cr.uid, &tcr->uid); 1565 __put_user(cr.gid, &tcr->gid); 1566 unlock_user_struct(tcr, optval_addr, 1); 1567 if (put_user_u32(len, optlen)) { 1568 return -TARGET_EFAULT; 1569 } 1570 break; 1571 } 1572 /* Options with 'int' argument. */ 1573 case TARGET_SO_DEBUG: 1574 optname = SO_DEBUG; 1575 goto int_case; 1576 case TARGET_SO_REUSEADDR: 1577 optname = SO_REUSEADDR; 1578 goto int_case; 1579 case TARGET_SO_TYPE: 1580 optname = SO_TYPE; 1581 goto int_case; 1582 case TARGET_SO_ERROR: 1583 optname = SO_ERROR; 1584 goto int_case; 1585 case TARGET_SO_DONTROUTE: 1586 optname = SO_DONTROUTE; 1587 goto int_case; 1588 case TARGET_SO_BROADCAST: 1589 optname = SO_BROADCAST; 1590 goto int_case; 1591 case TARGET_SO_SNDBUF: 1592 optname = SO_SNDBUF; 1593 goto int_case; 1594 case TARGET_SO_RCVBUF: 1595 optname = SO_RCVBUF; 1596 goto int_case; 1597 case TARGET_SO_KEEPALIVE: 1598 optname = SO_KEEPALIVE; 1599 goto int_case; 1600 case TARGET_SO_OOBINLINE: 1601 optname = SO_OOBINLINE; 1602 goto int_case; 1603 case TARGET_SO_NO_CHECK: 1604 optname = SO_NO_CHECK; 1605 goto int_case; 1606 case TARGET_SO_PRIORITY: 1607 optname = SO_PRIORITY; 1608 goto int_case; 1609 #ifdef SO_BSDCOMPAT 1610 case TARGET_SO_BSDCOMPAT: 1611 optname = SO_BSDCOMPAT; 1612 goto int_case; 1613 #endif 1614 case TARGET_SO_PASSCRED: 1615 optname = SO_PASSCRED; 1616 goto int_case; 1617 case TARGET_SO_TIMESTAMP: 1618 optname = SO_TIMESTAMP; 1619 goto int_case; 1620 case TARGET_SO_RCVLOWAT: 1621 optname = SO_RCVLOWAT; 1622 goto int_case; 1623 default: 1624 goto int_case; 1625 } 1626 break; 1627 case SOL_TCP: 1628 /* TCP options all take an 'int' value. */ 1629 int_case: 1630 if (get_user_u32(len, optlen)) 1631 return -TARGET_EFAULT; 1632 if (len < 0) 1633 return -TARGET_EINVAL; 1634 lv = sizeof(lv); 1635 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 1636 if (ret < 0) 1637 return ret; 1638 if (len > lv) 1639 len = lv; 1640 if (len == 4) { 1641 if (put_user_u32(val, optval_addr)) 1642 return -TARGET_EFAULT; 1643 } else { 1644 if (put_user_u8(val, optval_addr)) 1645 return -TARGET_EFAULT; 1646 } 1647 if (put_user_u32(len, optlen)) 1648 return -TARGET_EFAULT; 1649 break; 1650 case SOL_IP: 1651 switch(optname) { 1652 case IP_TOS: 1653 case IP_TTL: 1654 case IP_HDRINCL: 1655 case IP_ROUTER_ALERT: 1656 case IP_RECVOPTS: 1657 case IP_RETOPTS: 1658 case IP_PKTINFO: 1659 case IP_MTU_DISCOVER: 1660 case IP_RECVERR: 1661 case IP_RECVTOS: 1662 #ifdef IP_FREEBIND 1663 case IP_FREEBIND: 1664 #endif 1665 case IP_MULTICAST_TTL: 1666 case IP_MULTICAST_LOOP: 1667 if (get_user_u32(len, optlen)) 1668 return -TARGET_EFAULT; 1669 if (len < 0) 1670 return -TARGET_EINVAL; 1671 lv = sizeof(lv); 1672 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 1673 if (ret < 0) 1674 return ret; 1675 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) { 1676 len = 1; 1677 if (put_user_u32(len, optlen) 1678 || put_user_u8(val, optval_addr)) 1679 return -TARGET_EFAULT; 1680 } else { 1681 if (len > sizeof(int)) 1682 len = sizeof(int); 1683 if (put_user_u32(len, optlen) 1684 || put_user_u32(val, optval_addr)) 1685 return -TARGET_EFAULT; 1686 } 1687 break; 1688 default: 1689 ret = -TARGET_ENOPROTOOPT; 1690 break; 1691 } 1692 break; 1693 default: 1694 unimplemented: 1695 gemu_log("getsockopt level=%d optname=%d not yet supported\n", 1696 level, optname); 1697 ret = -TARGET_EOPNOTSUPP; 1698 break; 1699 } 1700 return ret; 1701 } 1702 1703 static struct iovec *lock_iovec(int type, abi_ulong target_addr, 1704 int count, int copy) 1705 { 1706 struct target_iovec *target_vec; 1707 struct iovec *vec; 1708 abi_ulong total_len, max_len; 1709 int i; 1710 1711 if (count == 0) { 1712 errno = 0; 1713 return NULL; 1714 } 1715 if (count < 0 || count > IOV_MAX) { 1716 errno = EINVAL; 1717 return NULL; 1718 } 1719 1720 vec = calloc(count, sizeof(struct iovec)); 1721 if (vec == NULL) { 1722 errno = ENOMEM; 1723 return NULL; 1724 } 1725 1726 target_vec = lock_user(VERIFY_READ, target_addr, 1727 count * sizeof(struct target_iovec), 1); 1728 if (target_vec == NULL) { 1729 errno = EFAULT; 1730 goto fail2; 1731 } 1732 1733 /* ??? If host page size > target page size, this will result in a 1734 value larger than what we can actually support. */ 1735 max_len = 0x7fffffff & TARGET_PAGE_MASK; 1736 total_len = 0; 1737 1738 for (i = 0; i < count; i++) { 1739 abi_ulong base = tswapal(target_vec[i].iov_base); 1740 abi_long len = tswapal(target_vec[i].iov_len); 1741 1742 if (len < 0) { 1743 errno = EINVAL; 1744 goto fail; 1745 } else if (len == 0) { 1746 /* Zero length pointer is ignored. */ 1747 vec[i].iov_base = 0; 1748 } else { 1749 vec[i].iov_base = lock_user(type, base, len, copy); 1750 if (!vec[i].iov_base) { 1751 errno = EFAULT; 1752 goto fail; 1753 } 1754 if (len > max_len - total_len) { 1755 len = max_len - total_len; 1756 } 1757 } 1758 vec[i].iov_len = len; 1759 total_len += len; 1760 } 1761 1762 unlock_user(target_vec, target_addr, 0); 1763 return vec; 1764 1765 fail: 1766 free(vec); 1767 fail2: 1768 unlock_user(target_vec, target_addr, 0); 1769 return NULL; 1770 } 1771 1772 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr, 1773 int count, int copy) 1774 { 1775 struct target_iovec *target_vec; 1776 int i; 1777 1778 target_vec = lock_user(VERIFY_READ, target_addr, 1779 count * sizeof(struct target_iovec), 1); 1780 if (target_vec) { 1781 for (i = 0; i < count; i++) { 1782 abi_ulong base = tswapal(target_vec[i].iov_base); 1783 abi_long len = tswapal(target_vec[i].iov_base); 1784 if (len < 0) { 1785 break; 1786 } 1787 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0); 1788 } 1789 unlock_user(target_vec, target_addr, 0); 1790 } 1791 1792 free(vec); 1793 } 1794 1795 static inline int target_to_host_sock_type(int *type) 1796 { 1797 int host_type = 0; 1798 int target_type = *type; 1799 1800 switch (target_type & TARGET_SOCK_TYPE_MASK) { 1801 case TARGET_SOCK_DGRAM: 1802 host_type = SOCK_DGRAM; 1803 break; 1804 case TARGET_SOCK_STREAM: 1805 host_type = SOCK_STREAM; 1806 break; 1807 default: 1808 host_type = target_type & TARGET_SOCK_TYPE_MASK; 1809 break; 1810 } 1811 if (target_type & TARGET_SOCK_CLOEXEC) { 1812 #if defined(SOCK_CLOEXEC) 1813 host_type |= SOCK_CLOEXEC; 1814 #else 1815 return -TARGET_EINVAL; 1816 #endif 1817 } 1818 if (target_type & TARGET_SOCK_NONBLOCK) { 1819 #if defined(SOCK_NONBLOCK) 1820 host_type |= SOCK_NONBLOCK; 1821 #elif !defined(O_NONBLOCK) 1822 return -TARGET_EINVAL; 1823 #endif 1824 } 1825 *type = host_type; 1826 return 0; 1827 } 1828 1829 /* Try to emulate socket type flags after socket creation. */ 1830 static int sock_flags_fixup(int fd, int target_type) 1831 { 1832 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK) 1833 if (target_type & TARGET_SOCK_NONBLOCK) { 1834 int flags = fcntl(fd, F_GETFL); 1835 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) { 1836 close(fd); 1837 return -TARGET_EINVAL; 1838 } 1839 } 1840 #endif 1841 return fd; 1842 } 1843 1844 /* do_socket() Must return target values and target errnos. */ 1845 static abi_long do_socket(int domain, int type, int protocol) 1846 { 1847 int target_type = type; 1848 int ret; 1849 1850 ret = target_to_host_sock_type(&type); 1851 if (ret) { 1852 return ret; 1853 } 1854 1855 if (domain == PF_NETLINK) 1856 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */ 1857 ret = get_errno(socket(domain, type, protocol)); 1858 if (ret >= 0) { 1859 ret = sock_flags_fixup(ret, target_type); 1860 } 1861 return ret; 1862 } 1863 1864 /* do_bind() Must return target values and target errnos. */ 1865 static abi_long do_bind(int sockfd, abi_ulong target_addr, 1866 socklen_t addrlen) 1867 { 1868 void *addr; 1869 abi_long ret; 1870 1871 if ((int)addrlen < 0) { 1872 return -TARGET_EINVAL; 1873 } 1874 1875 addr = alloca(addrlen+1); 1876 1877 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 1878 if (ret) 1879 return ret; 1880 1881 return get_errno(bind(sockfd, addr, addrlen)); 1882 } 1883 1884 /* do_connect() Must return target values and target errnos. */ 1885 static abi_long do_connect(int sockfd, abi_ulong target_addr, 1886 socklen_t addrlen) 1887 { 1888 void *addr; 1889 abi_long ret; 1890 1891 if ((int)addrlen < 0) { 1892 return -TARGET_EINVAL; 1893 } 1894 1895 addr = alloca(addrlen); 1896 1897 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 1898 if (ret) 1899 return ret; 1900 1901 return get_errno(connect(sockfd, addr, addrlen)); 1902 } 1903 1904 /* do_sendrecvmsg() Must return target values and target errnos. */ 1905 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg, 1906 int flags, int send) 1907 { 1908 abi_long ret, len; 1909 struct target_msghdr *msgp; 1910 struct msghdr msg; 1911 int count; 1912 struct iovec *vec; 1913 abi_ulong target_vec; 1914 1915 /* FIXME */ 1916 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE, 1917 msgp, 1918 target_msg, 1919 send ? 1 : 0)) 1920 return -TARGET_EFAULT; 1921 if (msgp->msg_name) { 1922 msg.msg_namelen = tswap32(msgp->msg_namelen); 1923 msg.msg_name = alloca(msg.msg_namelen); 1924 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name), 1925 msg.msg_namelen); 1926 if (ret) { 1927 goto out2; 1928 } 1929 } else { 1930 msg.msg_name = NULL; 1931 msg.msg_namelen = 0; 1932 } 1933 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen); 1934 msg.msg_control = alloca(msg.msg_controllen); 1935 msg.msg_flags = tswap32(msgp->msg_flags); 1936 1937 count = tswapal(msgp->msg_iovlen); 1938 target_vec = tswapal(msgp->msg_iov); 1939 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, 1940 target_vec, count, send); 1941 if (vec == NULL) { 1942 ret = -host_to_target_errno(errno); 1943 goto out2; 1944 } 1945 msg.msg_iovlen = count; 1946 msg.msg_iov = vec; 1947 1948 if (send) { 1949 ret = target_to_host_cmsg(&msg, msgp); 1950 if (ret == 0) 1951 ret = get_errno(sendmsg(fd, &msg, flags)); 1952 } else { 1953 ret = get_errno(recvmsg(fd, &msg, flags)); 1954 if (!is_error(ret)) { 1955 len = ret; 1956 ret = host_to_target_cmsg(msgp, &msg); 1957 if (!is_error(ret)) { 1958 msgp->msg_namelen = tswap32(msg.msg_namelen); 1959 if (msg.msg_name != NULL) { 1960 ret = host_to_target_sockaddr(tswapal(msgp->msg_name), 1961 msg.msg_name, msg.msg_namelen); 1962 if (ret) { 1963 goto out; 1964 } 1965 } 1966 1967 ret = len; 1968 } 1969 } 1970 } 1971 1972 out: 1973 unlock_iovec(vec, target_vec, count, !send); 1974 out2: 1975 unlock_user_struct(msgp, target_msg, send ? 0 : 1); 1976 return ret; 1977 } 1978 1979 /* If we don't have a system accept4() then just call accept. 1980 * The callsites to do_accept4() will ensure that they don't 1981 * pass a non-zero flags argument in this config. 1982 */ 1983 #ifndef CONFIG_ACCEPT4 1984 static inline int accept4(int sockfd, struct sockaddr *addr, 1985 socklen_t *addrlen, int flags) 1986 { 1987 assert(flags == 0); 1988 return accept(sockfd, addr, addrlen); 1989 } 1990 #endif 1991 1992 /* do_accept4() Must return target values and target errnos. */ 1993 static abi_long do_accept4(int fd, abi_ulong target_addr, 1994 abi_ulong target_addrlen_addr, int flags) 1995 { 1996 socklen_t addrlen; 1997 void *addr; 1998 abi_long ret; 1999 2000 if (target_addr == 0) { 2001 return get_errno(accept4(fd, NULL, NULL, flags)); 2002 } 2003 2004 /* linux returns EINVAL if addrlen pointer is invalid */ 2005 if (get_user_u32(addrlen, target_addrlen_addr)) 2006 return -TARGET_EINVAL; 2007 2008 if ((int)addrlen < 0) { 2009 return -TARGET_EINVAL; 2010 } 2011 2012 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2013 return -TARGET_EINVAL; 2014 2015 addr = alloca(addrlen); 2016 2017 ret = get_errno(accept4(fd, addr, &addrlen, flags)); 2018 if (!is_error(ret)) { 2019 host_to_target_sockaddr(target_addr, addr, addrlen); 2020 if (put_user_u32(addrlen, target_addrlen_addr)) 2021 ret = -TARGET_EFAULT; 2022 } 2023 return ret; 2024 } 2025 2026 /* do_getpeername() Must return target values and target errnos. */ 2027 static abi_long do_getpeername(int fd, abi_ulong target_addr, 2028 abi_ulong target_addrlen_addr) 2029 { 2030 socklen_t addrlen; 2031 void *addr; 2032 abi_long ret; 2033 2034 if (get_user_u32(addrlen, target_addrlen_addr)) 2035 return -TARGET_EFAULT; 2036 2037 if ((int)addrlen < 0) { 2038 return -TARGET_EINVAL; 2039 } 2040 2041 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2042 return -TARGET_EFAULT; 2043 2044 addr = alloca(addrlen); 2045 2046 ret = get_errno(getpeername(fd, addr, &addrlen)); 2047 if (!is_error(ret)) { 2048 host_to_target_sockaddr(target_addr, addr, addrlen); 2049 if (put_user_u32(addrlen, target_addrlen_addr)) 2050 ret = -TARGET_EFAULT; 2051 } 2052 return ret; 2053 } 2054 2055 /* do_getsockname() Must return target values and target errnos. */ 2056 static abi_long do_getsockname(int fd, abi_ulong target_addr, 2057 abi_ulong target_addrlen_addr) 2058 { 2059 socklen_t addrlen; 2060 void *addr; 2061 abi_long ret; 2062 2063 if (get_user_u32(addrlen, target_addrlen_addr)) 2064 return -TARGET_EFAULT; 2065 2066 if ((int)addrlen < 0) { 2067 return -TARGET_EINVAL; 2068 } 2069 2070 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2071 return -TARGET_EFAULT; 2072 2073 addr = alloca(addrlen); 2074 2075 ret = get_errno(getsockname(fd, addr, &addrlen)); 2076 if (!is_error(ret)) { 2077 host_to_target_sockaddr(target_addr, addr, addrlen); 2078 if (put_user_u32(addrlen, target_addrlen_addr)) 2079 ret = -TARGET_EFAULT; 2080 } 2081 return ret; 2082 } 2083 2084 /* do_socketpair() Must return target values and target errnos. */ 2085 static abi_long do_socketpair(int domain, int type, int protocol, 2086 abi_ulong target_tab_addr) 2087 { 2088 int tab[2]; 2089 abi_long ret; 2090 2091 target_to_host_sock_type(&type); 2092 2093 ret = get_errno(socketpair(domain, type, protocol, tab)); 2094 if (!is_error(ret)) { 2095 if (put_user_s32(tab[0], target_tab_addr) 2096 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0]))) 2097 ret = -TARGET_EFAULT; 2098 } 2099 return ret; 2100 } 2101 2102 /* do_sendto() Must return target values and target errnos. */ 2103 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags, 2104 abi_ulong target_addr, socklen_t addrlen) 2105 { 2106 void *addr; 2107 void *host_msg; 2108 abi_long ret; 2109 2110 if ((int)addrlen < 0) { 2111 return -TARGET_EINVAL; 2112 } 2113 2114 host_msg = lock_user(VERIFY_READ, msg, len, 1); 2115 if (!host_msg) 2116 return -TARGET_EFAULT; 2117 if (target_addr) { 2118 addr = alloca(addrlen); 2119 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 2120 if (ret) { 2121 unlock_user(host_msg, msg, 0); 2122 return ret; 2123 } 2124 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen)); 2125 } else { 2126 ret = get_errno(send(fd, host_msg, len, flags)); 2127 } 2128 unlock_user(host_msg, msg, 0); 2129 return ret; 2130 } 2131 2132 /* do_recvfrom() Must return target values and target errnos. */ 2133 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags, 2134 abi_ulong target_addr, 2135 abi_ulong target_addrlen) 2136 { 2137 socklen_t addrlen; 2138 void *addr; 2139 void *host_msg; 2140 abi_long ret; 2141 2142 host_msg = lock_user(VERIFY_WRITE, msg, len, 0); 2143 if (!host_msg) 2144 return -TARGET_EFAULT; 2145 if (target_addr) { 2146 if (get_user_u32(addrlen, target_addrlen)) { 2147 ret = -TARGET_EFAULT; 2148 goto fail; 2149 } 2150 if ((int)addrlen < 0) { 2151 ret = -TARGET_EINVAL; 2152 goto fail; 2153 } 2154 addr = alloca(addrlen); 2155 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen)); 2156 } else { 2157 addr = NULL; /* To keep compiler quiet. */ 2158 ret = get_errno(qemu_recv(fd, host_msg, len, flags)); 2159 } 2160 if (!is_error(ret)) { 2161 if (target_addr) { 2162 host_to_target_sockaddr(target_addr, addr, addrlen); 2163 if (put_user_u32(addrlen, target_addrlen)) { 2164 ret = -TARGET_EFAULT; 2165 goto fail; 2166 } 2167 } 2168 unlock_user(host_msg, msg, len); 2169 } else { 2170 fail: 2171 unlock_user(host_msg, msg, 0); 2172 } 2173 return ret; 2174 } 2175 2176 #ifdef TARGET_NR_socketcall 2177 /* do_socketcall() Must return target values and target errnos. */ 2178 static abi_long do_socketcall(int num, abi_ulong vptr) 2179 { 2180 abi_long ret; 2181 const int n = sizeof(abi_ulong); 2182 2183 switch(num) { 2184 case SOCKOP_socket: 2185 { 2186 abi_ulong domain, type, protocol; 2187 2188 if (get_user_ual(domain, vptr) 2189 || get_user_ual(type, vptr + n) 2190 || get_user_ual(protocol, vptr + 2 * n)) 2191 return -TARGET_EFAULT; 2192 2193 ret = do_socket(domain, type, protocol); 2194 } 2195 break; 2196 case SOCKOP_bind: 2197 { 2198 abi_ulong sockfd; 2199 abi_ulong target_addr; 2200 socklen_t addrlen; 2201 2202 if (get_user_ual(sockfd, vptr) 2203 || get_user_ual(target_addr, vptr + n) 2204 || get_user_ual(addrlen, vptr + 2 * n)) 2205 return -TARGET_EFAULT; 2206 2207 ret = do_bind(sockfd, target_addr, addrlen); 2208 } 2209 break; 2210 case SOCKOP_connect: 2211 { 2212 abi_ulong sockfd; 2213 abi_ulong target_addr; 2214 socklen_t addrlen; 2215 2216 if (get_user_ual(sockfd, vptr) 2217 || get_user_ual(target_addr, vptr + n) 2218 || get_user_ual(addrlen, vptr + 2 * n)) 2219 return -TARGET_EFAULT; 2220 2221 ret = do_connect(sockfd, target_addr, addrlen); 2222 } 2223 break; 2224 case SOCKOP_listen: 2225 { 2226 abi_ulong sockfd, backlog; 2227 2228 if (get_user_ual(sockfd, vptr) 2229 || get_user_ual(backlog, vptr + n)) 2230 return -TARGET_EFAULT; 2231 2232 ret = get_errno(listen(sockfd, backlog)); 2233 } 2234 break; 2235 case SOCKOP_accept: 2236 { 2237 abi_ulong sockfd; 2238 abi_ulong target_addr, target_addrlen; 2239 2240 if (get_user_ual(sockfd, vptr) 2241 || get_user_ual(target_addr, vptr + n) 2242 || get_user_ual(target_addrlen, vptr + 2 * n)) 2243 return -TARGET_EFAULT; 2244 2245 ret = do_accept4(sockfd, target_addr, target_addrlen, 0); 2246 } 2247 break; 2248 case SOCKOP_getsockname: 2249 { 2250 abi_ulong sockfd; 2251 abi_ulong target_addr, target_addrlen; 2252 2253 if (get_user_ual(sockfd, vptr) 2254 || get_user_ual(target_addr, vptr + n) 2255 || get_user_ual(target_addrlen, vptr + 2 * n)) 2256 return -TARGET_EFAULT; 2257 2258 ret = do_getsockname(sockfd, target_addr, target_addrlen); 2259 } 2260 break; 2261 case SOCKOP_getpeername: 2262 { 2263 abi_ulong sockfd; 2264 abi_ulong target_addr, target_addrlen; 2265 2266 if (get_user_ual(sockfd, vptr) 2267 || get_user_ual(target_addr, vptr + n) 2268 || get_user_ual(target_addrlen, vptr + 2 * n)) 2269 return -TARGET_EFAULT; 2270 2271 ret = do_getpeername(sockfd, target_addr, target_addrlen); 2272 } 2273 break; 2274 case SOCKOP_socketpair: 2275 { 2276 abi_ulong domain, type, protocol; 2277 abi_ulong tab; 2278 2279 if (get_user_ual(domain, vptr) 2280 || get_user_ual(type, vptr + n) 2281 || get_user_ual(protocol, vptr + 2 * n) 2282 || get_user_ual(tab, vptr + 3 * n)) 2283 return -TARGET_EFAULT; 2284 2285 ret = do_socketpair(domain, type, protocol, tab); 2286 } 2287 break; 2288 case SOCKOP_send: 2289 { 2290 abi_ulong sockfd; 2291 abi_ulong msg; 2292 size_t len; 2293 abi_ulong flags; 2294 2295 if (get_user_ual(sockfd, vptr) 2296 || get_user_ual(msg, vptr + n) 2297 || get_user_ual(len, vptr + 2 * n) 2298 || get_user_ual(flags, vptr + 3 * n)) 2299 return -TARGET_EFAULT; 2300 2301 ret = do_sendto(sockfd, msg, len, flags, 0, 0); 2302 } 2303 break; 2304 case SOCKOP_recv: 2305 { 2306 abi_ulong sockfd; 2307 abi_ulong msg; 2308 size_t len; 2309 abi_ulong flags; 2310 2311 if (get_user_ual(sockfd, vptr) 2312 || get_user_ual(msg, vptr + n) 2313 || get_user_ual(len, vptr + 2 * n) 2314 || get_user_ual(flags, vptr + 3 * n)) 2315 return -TARGET_EFAULT; 2316 2317 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0); 2318 } 2319 break; 2320 case SOCKOP_sendto: 2321 { 2322 abi_ulong sockfd; 2323 abi_ulong msg; 2324 size_t len; 2325 abi_ulong flags; 2326 abi_ulong addr; 2327 socklen_t addrlen; 2328 2329 if (get_user_ual(sockfd, vptr) 2330 || get_user_ual(msg, vptr + n) 2331 || get_user_ual(len, vptr + 2 * n) 2332 || get_user_ual(flags, vptr + 3 * n) 2333 || get_user_ual(addr, vptr + 4 * n) 2334 || get_user_ual(addrlen, vptr + 5 * n)) 2335 return -TARGET_EFAULT; 2336 2337 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen); 2338 } 2339 break; 2340 case SOCKOP_recvfrom: 2341 { 2342 abi_ulong sockfd; 2343 abi_ulong msg; 2344 size_t len; 2345 abi_ulong flags; 2346 abi_ulong addr; 2347 socklen_t addrlen; 2348 2349 if (get_user_ual(sockfd, vptr) 2350 || get_user_ual(msg, vptr + n) 2351 || get_user_ual(len, vptr + 2 * n) 2352 || get_user_ual(flags, vptr + 3 * n) 2353 || get_user_ual(addr, vptr + 4 * n) 2354 || get_user_ual(addrlen, vptr + 5 * n)) 2355 return -TARGET_EFAULT; 2356 2357 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen); 2358 } 2359 break; 2360 case SOCKOP_shutdown: 2361 { 2362 abi_ulong sockfd, how; 2363 2364 if (get_user_ual(sockfd, vptr) 2365 || get_user_ual(how, vptr + n)) 2366 return -TARGET_EFAULT; 2367 2368 ret = get_errno(shutdown(sockfd, how)); 2369 } 2370 break; 2371 case SOCKOP_sendmsg: 2372 case SOCKOP_recvmsg: 2373 { 2374 abi_ulong fd; 2375 abi_ulong target_msg; 2376 abi_ulong flags; 2377 2378 if (get_user_ual(fd, vptr) 2379 || get_user_ual(target_msg, vptr + n) 2380 || get_user_ual(flags, vptr + 2 * n)) 2381 return -TARGET_EFAULT; 2382 2383 ret = do_sendrecvmsg(fd, target_msg, flags, 2384 (num == SOCKOP_sendmsg)); 2385 } 2386 break; 2387 case SOCKOP_setsockopt: 2388 { 2389 abi_ulong sockfd; 2390 abi_ulong level; 2391 abi_ulong optname; 2392 abi_ulong optval; 2393 socklen_t optlen; 2394 2395 if (get_user_ual(sockfd, vptr) 2396 || get_user_ual(level, vptr + n) 2397 || get_user_ual(optname, vptr + 2 * n) 2398 || get_user_ual(optval, vptr + 3 * n) 2399 || get_user_ual(optlen, vptr + 4 * n)) 2400 return -TARGET_EFAULT; 2401 2402 ret = do_setsockopt(sockfd, level, optname, optval, optlen); 2403 } 2404 break; 2405 case SOCKOP_getsockopt: 2406 { 2407 abi_ulong sockfd; 2408 abi_ulong level; 2409 abi_ulong optname; 2410 abi_ulong optval; 2411 socklen_t optlen; 2412 2413 if (get_user_ual(sockfd, vptr) 2414 || get_user_ual(level, vptr + n) 2415 || get_user_ual(optname, vptr + 2 * n) 2416 || get_user_ual(optval, vptr + 3 * n) 2417 || get_user_ual(optlen, vptr + 4 * n)) 2418 return -TARGET_EFAULT; 2419 2420 ret = do_getsockopt(sockfd, level, optname, optval, optlen); 2421 } 2422 break; 2423 default: 2424 gemu_log("Unsupported socketcall: %d\n", num); 2425 ret = -TARGET_ENOSYS; 2426 break; 2427 } 2428 return ret; 2429 } 2430 #endif 2431 2432 #define N_SHM_REGIONS 32 2433 2434 static struct shm_region { 2435 abi_ulong start; 2436 abi_ulong size; 2437 } shm_regions[N_SHM_REGIONS]; 2438 2439 struct target_semid_ds 2440 { 2441 struct target_ipc_perm sem_perm; 2442 abi_ulong sem_otime; 2443 abi_ulong __unused1; 2444 abi_ulong sem_ctime; 2445 abi_ulong __unused2; 2446 abi_ulong sem_nsems; 2447 abi_ulong __unused3; 2448 abi_ulong __unused4; 2449 }; 2450 2451 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip, 2452 abi_ulong target_addr) 2453 { 2454 struct target_ipc_perm *target_ip; 2455 struct target_semid_ds *target_sd; 2456 2457 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2458 return -TARGET_EFAULT; 2459 target_ip = &(target_sd->sem_perm); 2460 host_ip->__key = tswap32(target_ip->__key); 2461 host_ip->uid = tswap32(target_ip->uid); 2462 host_ip->gid = tswap32(target_ip->gid); 2463 host_ip->cuid = tswap32(target_ip->cuid); 2464 host_ip->cgid = tswap32(target_ip->cgid); 2465 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 2466 host_ip->mode = tswap32(target_ip->mode); 2467 #else 2468 host_ip->mode = tswap16(target_ip->mode); 2469 #endif 2470 #if defined(TARGET_PPC) 2471 host_ip->__seq = tswap32(target_ip->__seq); 2472 #else 2473 host_ip->__seq = tswap16(target_ip->__seq); 2474 #endif 2475 unlock_user_struct(target_sd, target_addr, 0); 2476 return 0; 2477 } 2478 2479 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr, 2480 struct ipc_perm *host_ip) 2481 { 2482 struct target_ipc_perm *target_ip; 2483 struct target_semid_ds *target_sd; 2484 2485 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2486 return -TARGET_EFAULT; 2487 target_ip = &(target_sd->sem_perm); 2488 target_ip->__key = tswap32(host_ip->__key); 2489 target_ip->uid = tswap32(host_ip->uid); 2490 target_ip->gid = tswap32(host_ip->gid); 2491 target_ip->cuid = tswap32(host_ip->cuid); 2492 target_ip->cgid = tswap32(host_ip->cgid); 2493 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 2494 target_ip->mode = tswap32(host_ip->mode); 2495 #else 2496 target_ip->mode = tswap16(host_ip->mode); 2497 #endif 2498 #if defined(TARGET_PPC) 2499 target_ip->__seq = tswap32(host_ip->__seq); 2500 #else 2501 target_ip->__seq = tswap16(host_ip->__seq); 2502 #endif 2503 unlock_user_struct(target_sd, target_addr, 1); 2504 return 0; 2505 } 2506 2507 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd, 2508 abi_ulong target_addr) 2509 { 2510 struct target_semid_ds *target_sd; 2511 2512 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2513 return -TARGET_EFAULT; 2514 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr)) 2515 return -TARGET_EFAULT; 2516 host_sd->sem_nsems = tswapal(target_sd->sem_nsems); 2517 host_sd->sem_otime = tswapal(target_sd->sem_otime); 2518 host_sd->sem_ctime = tswapal(target_sd->sem_ctime); 2519 unlock_user_struct(target_sd, target_addr, 0); 2520 return 0; 2521 } 2522 2523 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr, 2524 struct semid_ds *host_sd) 2525 { 2526 struct target_semid_ds *target_sd; 2527 2528 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2529 return -TARGET_EFAULT; 2530 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm))) 2531 return -TARGET_EFAULT; 2532 target_sd->sem_nsems = tswapal(host_sd->sem_nsems); 2533 target_sd->sem_otime = tswapal(host_sd->sem_otime); 2534 target_sd->sem_ctime = tswapal(host_sd->sem_ctime); 2535 unlock_user_struct(target_sd, target_addr, 1); 2536 return 0; 2537 } 2538 2539 struct target_seminfo { 2540 int semmap; 2541 int semmni; 2542 int semmns; 2543 int semmnu; 2544 int semmsl; 2545 int semopm; 2546 int semume; 2547 int semusz; 2548 int semvmx; 2549 int semaem; 2550 }; 2551 2552 static inline abi_long host_to_target_seminfo(abi_ulong target_addr, 2553 struct seminfo *host_seminfo) 2554 { 2555 struct target_seminfo *target_seminfo; 2556 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0)) 2557 return -TARGET_EFAULT; 2558 __put_user(host_seminfo->semmap, &target_seminfo->semmap); 2559 __put_user(host_seminfo->semmni, &target_seminfo->semmni); 2560 __put_user(host_seminfo->semmns, &target_seminfo->semmns); 2561 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu); 2562 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl); 2563 __put_user(host_seminfo->semopm, &target_seminfo->semopm); 2564 __put_user(host_seminfo->semume, &target_seminfo->semume); 2565 __put_user(host_seminfo->semusz, &target_seminfo->semusz); 2566 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx); 2567 __put_user(host_seminfo->semaem, &target_seminfo->semaem); 2568 unlock_user_struct(target_seminfo, target_addr, 1); 2569 return 0; 2570 } 2571 2572 union semun { 2573 int val; 2574 struct semid_ds *buf; 2575 unsigned short *array; 2576 struct seminfo *__buf; 2577 }; 2578 2579 union target_semun { 2580 int val; 2581 abi_ulong buf; 2582 abi_ulong array; 2583 abi_ulong __buf; 2584 }; 2585 2586 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array, 2587 abi_ulong target_addr) 2588 { 2589 int nsems; 2590 unsigned short *array; 2591 union semun semun; 2592 struct semid_ds semid_ds; 2593 int i, ret; 2594 2595 semun.buf = &semid_ds; 2596 2597 ret = semctl(semid, 0, IPC_STAT, semun); 2598 if (ret == -1) 2599 return get_errno(ret); 2600 2601 nsems = semid_ds.sem_nsems; 2602 2603 *host_array = malloc(nsems*sizeof(unsigned short)); 2604 array = lock_user(VERIFY_READ, target_addr, 2605 nsems*sizeof(unsigned short), 1); 2606 if (!array) 2607 return -TARGET_EFAULT; 2608 2609 for(i=0; i<nsems; i++) { 2610 __get_user((*host_array)[i], &array[i]); 2611 } 2612 unlock_user(array, target_addr, 0); 2613 2614 return 0; 2615 } 2616 2617 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr, 2618 unsigned short **host_array) 2619 { 2620 int nsems; 2621 unsigned short *array; 2622 union semun semun; 2623 struct semid_ds semid_ds; 2624 int i, ret; 2625 2626 semun.buf = &semid_ds; 2627 2628 ret = semctl(semid, 0, IPC_STAT, semun); 2629 if (ret == -1) 2630 return get_errno(ret); 2631 2632 nsems = semid_ds.sem_nsems; 2633 2634 array = lock_user(VERIFY_WRITE, target_addr, 2635 nsems*sizeof(unsigned short), 0); 2636 if (!array) 2637 return -TARGET_EFAULT; 2638 2639 for(i=0; i<nsems; i++) { 2640 __put_user((*host_array)[i], &array[i]); 2641 } 2642 free(*host_array); 2643 unlock_user(array, target_addr, 1); 2644 2645 return 0; 2646 } 2647 2648 static inline abi_long do_semctl(int semid, int semnum, int cmd, 2649 union target_semun target_su) 2650 { 2651 union semun arg; 2652 struct semid_ds dsarg; 2653 unsigned short *array = NULL; 2654 struct seminfo seminfo; 2655 abi_long ret = -TARGET_EINVAL; 2656 abi_long err; 2657 cmd &= 0xff; 2658 2659 switch( cmd ) { 2660 case GETVAL: 2661 case SETVAL: 2662 arg.val = tswap32(target_su.val); 2663 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2664 target_su.val = tswap32(arg.val); 2665 break; 2666 case GETALL: 2667 case SETALL: 2668 err = target_to_host_semarray(semid, &array, target_su.array); 2669 if (err) 2670 return err; 2671 arg.array = array; 2672 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2673 err = host_to_target_semarray(semid, target_su.array, &array); 2674 if (err) 2675 return err; 2676 break; 2677 case IPC_STAT: 2678 case IPC_SET: 2679 case SEM_STAT: 2680 err = target_to_host_semid_ds(&dsarg, target_su.buf); 2681 if (err) 2682 return err; 2683 arg.buf = &dsarg; 2684 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2685 err = host_to_target_semid_ds(target_su.buf, &dsarg); 2686 if (err) 2687 return err; 2688 break; 2689 case IPC_INFO: 2690 case SEM_INFO: 2691 arg.__buf = &seminfo; 2692 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2693 err = host_to_target_seminfo(target_su.__buf, &seminfo); 2694 if (err) 2695 return err; 2696 break; 2697 case IPC_RMID: 2698 case GETPID: 2699 case GETNCNT: 2700 case GETZCNT: 2701 ret = get_errno(semctl(semid, semnum, cmd, NULL)); 2702 break; 2703 } 2704 2705 return ret; 2706 } 2707 2708 struct target_sembuf { 2709 unsigned short sem_num; 2710 short sem_op; 2711 short sem_flg; 2712 }; 2713 2714 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf, 2715 abi_ulong target_addr, 2716 unsigned nsops) 2717 { 2718 struct target_sembuf *target_sembuf; 2719 int i; 2720 2721 target_sembuf = lock_user(VERIFY_READ, target_addr, 2722 nsops*sizeof(struct target_sembuf), 1); 2723 if (!target_sembuf) 2724 return -TARGET_EFAULT; 2725 2726 for(i=0; i<nsops; i++) { 2727 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num); 2728 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op); 2729 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg); 2730 } 2731 2732 unlock_user(target_sembuf, target_addr, 0); 2733 2734 return 0; 2735 } 2736 2737 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops) 2738 { 2739 struct sembuf sops[nsops]; 2740 2741 if (target_to_host_sembuf(sops, ptr, nsops)) 2742 return -TARGET_EFAULT; 2743 2744 return get_errno(semop(semid, sops, nsops)); 2745 } 2746 2747 struct target_msqid_ds 2748 { 2749 struct target_ipc_perm msg_perm; 2750 abi_ulong msg_stime; 2751 #if TARGET_ABI_BITS == 32 2752 abi_ulong __unused1; 2753 #endif 2754 abi_ulong msg_rtime; 2755 #if TARGET_ABI_BITS == 32 2756 abi_ulong __unused2; 2757 #endif 2758 abi_ulong msg_ctime; 2759 #if TARGET_ABI_BITS == 32 2760 abi_ulong __unused3; 2761 #endif 2762 abi_ulong __msg_cbytes; 2763 abi_ulong msg_qnum; 2764 abi_ulong msg_qbytes; 2765 abi_ulong msg_lspid; 2766 abi_ulong msg_lrpid; 2767 abi_ulong __unused4; 2768 abi_ulong __unused5; 2769 }; 2770 2771 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md, 2772 abi_ulong target_addr) 2773 { 2774 struct target_msqid_ds *target_md; 2775 2776 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1)) 2777 return -TARGET_EFAULT; 2778 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr)) 2779 return -TARGET_EFAULT; 2780 host_md->msg_stime = tswapal(target_md->msg_stime); 2781 host_md->msg_rtime = tswapal(target_md->msg_rtime); 2782 host_md->msg_ctime = tswapal(target_md->msg_ctime); 2783 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes); 2784 host_md->msg_qnum = tswapal(target_md->msg_qnum); 2785 host_md->msg_qbytes = tswapal(target_md->msg_qbytes); 2786 host_md->msg_lspid = tswapal(target_md->msg_lspid); 2787 host_md->msg_lrpid = tswapal(target_md->msg_lrpid); 2788 unlock_user_struct(target_md, target_addr, 0); 2789 return 0; 2790 } 2791 2792 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr, 2793 struct msqid_ds *host_md) 2794 { 2795 struct target_msqid_ds *target_md; 2796 2797 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0)) 2798 return -TARGET_EFAULT; 2799 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm))) 2800 return -TARGET_EFAULT; 2801 target_md->msg_stime = tswapal(host_md->msg_stime); 2802 target_md->msg_rtime = tswapal(host_md->msg_rtime); 2803 target_md->msg_ctime = tswapal(host_md->msg_ctime); 2804 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes); 2805 target_md->msg_qnum = tswapal(host_md->msg_qnum); 2806 target_md->msg_qbytes = tswapal(host_md->msg_qbytes); 2807 target_md->msg_lspid = tswapal(host_md->msg_lspid); 2808 target_md->msg_lrpid = tswapal(host_md->msg_lrpid); 2809 unlock_user_struct(target_md, target_addr, 1); 2810 return 0; 2811 } 2812 2813 struct target_msginfo { 2814 int msgpool; 2815 int msgmap; 2816 int msgmax; 2817 int msgmnb; 2818 int msgmni; 2819 int msgssz; 2820 int msgtql; 2821 unsigned short int msgseg; 2822 }; 2823 2824 static inline abi_long host_to_target_msginfo(abi_ulong target_addr, 2825 struct msginfo *host_msginfo) 2826 { 2827 struct target_msginfo *target_msginfo; 2828 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0)) 2829 return -TARGET_EFAULT; 2830 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool); 2831 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap); 2832 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax); 2833 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb); 2834 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni); 2835 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz); 2836 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql); 2837 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg); 2838 unlock_user_struct(target_msginfo, target_addr, 1); 2839 return 0; 2840 } 2841 2842 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr) 2843 { 2844 struct msqid_ds dsarg; 2845 struct msginfo msginfo; 2846 abi_long ret = -TARGET_EINVAL; 2847 2848 cmd &= 0xff; 2849 2850 switch (cmd) { 2851 case IPC_STAT: 2852 case IPC_SET: 2853 case MSG_STAT: 2854 if (target_to_host_msqid_ds(&dsarg,ptr)) 2855 return -TARGET_EFAULT; 2856 ret = get_errno(msgctl(msgid, cmd, &dsarg)); 2857 if (host_to_target_msqid_ds(ptr,&dsarg)) 2858 return -TARGET_EFAULT; 2859 break; 2860 case IPC_RMID: 2861 ret = get_errno(msgctl(msgid, cmd, NULL)); 2862 break; 2863 case IPC_INFO: 2864 case MSG_INFO: 2865 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo)); 2866 if (host_to_target_msginfo(ptr, &msginfo)) 2867 return -TARGET_EFAULT; 2868 break; 2869 } 2870 2871 return ret; 2872 } 2873 2874 struct target_msgbuf { 2875 abi_long mtype; 2876 char mtext[1]; 2877 }; 2878 2879 static inline abi_long do_msgsnd(int msqid, abi_long msgp, 2880 unsigned int msgsz, int msgflg) 2881 { 2882 struct target_msgbuf *target_mb; 2883 struct msgbuf *host_mb; 2884 abi_long ret = 0; 2885 2886 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0)) 2887 return -TARGET_EFAULT; 2888 host_mb = malloc(msgsz+sizeof(long)); 2889 host_mb->mtype = (abi_long) tswapal(target_mb->mtype); 2890 memcpy(host_mb->mtext, target_mb->mtext, msgsz); 2891 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg)); 2892 free(host_mb); 2893 unlock_user_struct(target_mb, msgp, 0); 2894 2895 return ret; 2896 } 2897 2898 static inline abi_long do_msgrcv(int msqid, abi_long msgp, 2899 unsigned int msgsz, abi_long msgtyp, 2900 int msgflg) 2901 { 2902 struct target_msgbuf *target_mb; 2903 char *target_mtext; 2904 struct msgbuf *host_mb; 2905 abi_long ret = 0; 2906 2907 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0)) 2908 return -TARGET_EFAULT; 2909 2910 host_mb = g_malloc(msgsz+sizeof(long)); 2911 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg)); 2912 2913 if (ret > 0) { 2914 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong); 2915 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0); 2916 if (!target_mtext) { 2917 ret = -TARGET_EFAULT; 2918 goto end; 2919 } 2920 memcpy(target_mb->mtext, host_mb->mtext, ret); 2921 unlock_user(target_mtext, target_mtext_addr, ret); 2922 } 2923 2924 target_mb->mtype = tswapal(host_mb->mtype); 2925 2926 end: 2927 if (target_mb) 2928 unlock_user_struct(target_mb, msgp, 1); 2929 g_free(host_mb); 2930 return ret; 2931 } 2932 2933 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd, 2934 abi_ulong target_addr) 2935 { 2936 struct target_shmid_ds *target_sd; 2937 2938 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2939 return -TARGET_EFAULT; 2940 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr)) 2941 return -TARGET_EFAULT; 2942 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz); 2943 __get_user(host_sd->shm_atime, &target_sd->shm_atime); 2944 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime); 2945 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime); 2946 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid); 2947 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid); 2948 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch); 2949 unlock_user_struct(target_sd, target_addr, 0); 2950 return 0; 2951 } 2952 2953 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr, 2954 struct shmid_ds *host_sd) 2955 { 2956 struct target_shmid_ds *target_sd; 2957 2958 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2959 return -TARGET_EFAULT; 2960 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm))) 2961 return -TARGET_EFAULT; 2962 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz); 2963 __put_user(host_sd->shm_atime, &target_sd->shm_atime); 2964 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime); 2965 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime); 2966 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid); 2967 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid); 2968 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch); 2969 unlock_user_struct(target_sd, target_addr, 1); 2970 return 0; 2971 } 2972 2973 struct target_shminfo { 2974 abi_ulong shmmax; 2975 abi_ulong shmmin; 2976 abi_ulong shmmni; 2977 abi_ulong shmseg; 2978 abi_ulong shmall; 2979 }; 2980 2981 static inline abi_long host_to_target_shminfo(abi_ulong target_addr, 2982 struct shminfo *host_shminfo) 2983 { 2984 struct target_shminfo *target_shminfo; 2985 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0)) 2986 return -TARGET_EFAULT; 2987 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax); 2988 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin); 2989 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni); 2990 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg); 2991 __put_user(host_shminfo->shmall, &target_shminfo->shmall); 2992 unlock_user_struct(target_shminfo, target_addr, 1); 2993 return 0; 2994 } 2995 2996 struct target_shm_info { 2997 int used_ids; 2998 abi_ulong shm_tot; 2999 abi_ulong shm_rss; 3000 abi_ulong shm_swp; 3001 abi_ulong swap_attempts; 3002 abi_ulong swap_successes; 3003 }; 3004 3005 static inline abi_long host_to_target_shm_info(abi_ulong target_addr, 3006 struct shm_info *host_shm_info) 3007 { 3008 struct target_shm_info *target_shm_info; 3009 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0)) 3010 return -TARGET_EFAULT; 3011 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids); 3012 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot); 3013 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss); 3014 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp); 3015 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts); 3016 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes); 3017 unlock_user_struct(target_shm_info, target_addr, 1); 3018 return 0; 3019 } 3020 3021 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf) 3022 { 3023 struct shmid_ds dsarg; 3024 struct shminfo shminfo; 3025 struct shm_info shm_info; 3026 abi_long ret = -TARGET_EINVAL; 3027 3028 cmd &= 0xff; 3029 3030 switch(cmd) { 3031 case IPC_STAT: 3032 case IPC_SET: 3033 case SHM_STAT: 3034 if (target_to_host_shmid_ds(&dsarg, buf)) 3035 return -TARGET_EFAULT; 3036 ret = get_errno(shmctl(shmid, cmd, &dsarg)); 3037 if (host_to_target_shmid_ds(buf, &dsarg)) 3038 return -TARGET_EFAULT; 3039 break; 3040 case IPC_INFO: 3041 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo)); 3042 if (host_to_target_shminfo(buf, &shminfo)) 3043 return -TARGET_EFAULT; 3044 break; 3045 case SHM_INFO: 3046 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info)); 3047 if (host_to_target_shm_info(buf, &shm_info)) 3048 return -TARGET_EFAULT; 3049 break; 3050 case IPC_RMID: 3051 case SHM_LOCK: 3052 case SHM_UNLOCK: 3053 ret = get_errno(shmctl(shmid, cmd, NULL)); 3054 break; 3055 } 3056 3057 return ret; 3058 } 3059 3060 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg) 3061 { 3062 abi_long raddr; 3063 void *host_raddr; 3064 struct shmid_ds shm_info; 3065 int i,ret; 3066 3067 /* find out the length of the shared memory segment */ 3068 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info)); 3069 if (is_error(ret)) { 3070 /* can't get length, bail out */ 3071 return ret; 3072 } 3073 3074 mmap_lock(); 3075 3076 if (shmaddr) 3077 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg); 3078 else { 3079 abi_ulong mmap_start; 3080 3081 mmap_start = mmap_find_vma(0, shm_info.shm_segsz); 3082 3083 if (mmap_start == -1) { 3084 errno = ENOMEM; 3085 host_raddr = (void *)-1; 3086 } else 3087 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP); 3088 } 3089 3090 if (host_raddr == (void *)-1) { 3091 mmap_unlock(); 3092 return get_errno((long)host_raddr); 3093 } 3094 raddr=h2g((unsigned long)host_raddr); 3095 3096 page_set_flags(raddr, raddr + shm_info.shm_segsz, 3097 PAGE_VALID | PAGE_READ | 3098 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE)); 3099 3100 for (i = 0; i < N_SHM_REGIONS; i++) { 3101 if (shm_regions[i].start == 0) { 3102 shm_regions[i].start = raddr; 3103 shm_regions[i].size = shm_info.shm_segsz; 3104 break; 3105 } 3106 } 3107 3108 mmap_unlock(); 3109 return raddr; 3110 3111 } 3112 3113 static inline abi_long do_shmdt(abi_ulong shmaddr) 3114 { 3115 int i; 3116 3117 for (i = 0; i < N_SHM_REGIONS; ++i) { 3118 if (shm_regions[i].start == shmaddr) { 3119 shm_regions[i].start = 0; 3120 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0); 3121 break; 3122 } 3123 } 3124 3125 return get_errno(shmdt(g2h(shmaddr))); 3126 } 3127 3128 #ifdef TARGET_NR_ipc 3129 /* ??? This only works with linear mappings. */ 3130 /* do_ipc() must return target values and target errnos. */ 3131 static abi_long do_ipc(unsigned int call, int first, 3132 int second, int third, 3133 abi_long ptr, abi_long fifth) 3134 { 3135 int version; 3136 abi_long ret = 0; 3137 3138 version = call >> 16; 3139 call &= 0xffff; 3140 3141 switch (call) { 3142 case IPCOP_semop: 3143 ret = do_semop(first, ptr, second); 3144 break; 3145 3146 case IPCOP_semget: 3147 ret = get_errno(semget(first, second, third)); 3148 break; 3149 3150 case IPCOP_semctl: 3151 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr); 3152 break; 3153 3154 case IPCOP_msgget: 3155 ret = get_errno(msgget(first, second)); 3156 break; 3157 3158 case IPCOP_msgsnd: 3159 ret = do_msgsnd(first, ptr, second, third); 3160 break; 3161 3162 case IPCOP_msgctl: 3163 ret = do_msgctl(first, second, ptr); 3164 break; 3165 3166 case IPCOP_msgrcv: 3167 switch (version) { 3168 case 0: 3169 { 3170 struct target_ipc_kludge { 3171 abi_long msgp; 3172 abi_long msgtyp; 3173 } *tmp; 3174 3175 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) { 3176 ret = -TARGET_EFAULT; 3177 break; 3178 } 3179 3180 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third); 3181 3182 unlock_user_struct(tmp, ptr, 0); 3183 break; 3184 } 3185 default: 3186 ret = do_msgrcv(first, ptr, second, fifth, third); 3187 } 3188 break; 3189 3190 case IPCOP_shmat: 3191 switch (version) { 3192 default: 3193 { 3194 abi_ulong raddr; 3195 raddr = do_shmat(first, ptr, second); 3196 if (is_error(raddr)) 3197 return get_errno(raddr); 3198 if (put_user_ual(raddr, third)) 3199 return -TARGET_EFAULT; 3200 break; 3201 } 3202 case 1: 3203 ret = -TARGET_EINVAL; 3204 break; 3205 } 3206 break; 3207 case IPCOP_shmdt: 3208 ret = do_shmdt(ptr); 3209 break; 3210 3211 case IPCOP_shmget: 3212 /* IPC_* flag values are the same on all linux platforms */ 3213 ret = get_errno(shmget(first, second, third)); 3214 break; 3215 3216 /* IPC_* and SHM_* command values are the same on all linux platforms */ 3217 case IPCOP_shmctl: 3218 ret = do_shmctl(first, second, ptr); 3219 break; 3220 default: 3221 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version); 3222 ret = -TARGET_ENOSYS; 3223 break; 3224 } 3225 return ret; 3226 } 3227 #endif 3228 3229 /* kernel structure types definitions */ 3230 3231 #define STRUCT(name, ...) STRUCT_ ## name, 3232 #define STRUCT_SPECIAL(name) STRUCT_ ## name, 3233 enum { 3234 #include "syscall_types.h" 3235 }; 3236 #undef STRUCT 3237 #undef STRUCT_SPECIAL 3238 3239 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL }; 3240 #define STRUCT_SPECIAL(name) 3241 #include "syscall_types.h" 3242 #undef STRUCT 3243 #undef STRUCT_SPECIAL 3244 3245 typedef struct IOCTLEntry IOCTLEntry; 3246 3247 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp, 3248 int fd, abi_long cmd, abi_long arg); 3249 3250 struct IOCTLEntry { 3251 unsigned int target_cmd; 3252 unsigned int host_cmd; 3253 const char *name; 3254 int access; 3255 do_ioctl_fn *do_ioctl; 3256 const argtype arg_type[5]; 3257 }; 3258 3259 #define IOC_R 0x0001 3260 #define IOC_W 0x0002 3261 #define IOC_RW (IOC_R | IOC_W) 3262 3263 #define MAX_STRUCT_SIZE 4096 3264 3265 #ifdef CONFIG_FIEMAP 3266 /* So fiemap access checks don't overflow on 32 bit systems. 3267 * This is very slightly smaller than the limit imposed by 3268 * the underlying kernel. 3269 */ 3270 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \ 3271 / sizeof(struct fiemap_extent)) 3272 3273 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp, 3274 int fd, abi_long cmd, abi_long arg) 3275 { 3276 /* The parameter for this ioctl is a struct fiemap followed 3277 * by an array of struct fiemap_extent whose size is set 3278 * in fiemap->fm_extent_count. The array is filled in by the 3279 * ioctl. 3280 */ 3281 int target_size_in, target_size_out; 3282 struct fiemap *fm; 3283 const argtype *arg_type = ie->arg_type; 3284 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) }; 3285 void *argptr, *p; 3286 abi_long ret; 3287 int i, extent_size = thunk_type_size(extent_arg_type, 0); 3288 uint32_t outbufsz; 3289 int free_fm = 0; 3290 3291 assert(arg_type[0] == TYPE_PTR); 3292 assert(ie->access == IOC_RW); 3293 arg_type++; 3294 target_size_in = thunk_type_size(arg_type, 0); 3295 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1); 3296 if (!argptr) { 3297 return -TARGET_EFAULT; 3298 } 3299 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3300 unlock_user(argptr, arg, 0); 3301 fm = (struct fiemap *)buf_temp; 3302 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) { 3303 return -TARGET_EINVAL; 3304 } 3305 3306 outbufsz = sizeof (*fm) + 3307 (sizeof(struct fiemap_extent) * fm->fm_extent_count); 3308 3309 if (outbufsz > MAX_STRUCT_SIZE) { 3310 /* We can't fit all the extents into the fixed size buffer. 3311 * Allocate one that is large enough and use it instead. 3312 */ 3313 fm = malloc(outbufsz); 3314 if (!fm) { 3315 return -TARGET_ENOMEM; 3316 } 3317 memcpy(fm, buf_temp, sizeof(struct fiemap)); 3318 free_fm = 1; 3319 } 3320 ret = get_errno(ioctl(fd, ie->host_cmd, fm)); 3321 if (!is_error(ret)) { 3322 target_size_out = target_size_in; 3323 /* An extent_count of 0 means we were only counting the extents 3324 * so there are no structs to copy 3325 */ 3326 if (fm->fm_extent_count != 0) { 3327 target_size_out += fm->fm_mapped_extents * extent_size; 3328 } 3329 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0); 3330 if (!argptr) { 3331 ret = -TARGET_EFAULT; 3332 } else { 3333 /* Convert the struct fiemap */ 3334 thunk_convert(argptr, fm, arg_type, THUNK_TARGET); 3335 if (fm->fm_extent_count != 0) { 3336 p = argptr + target_size_in; 3337 /* ...and then all the struct fiemap_extents */ 3338 for (i = 0; i < fm->fm_mapped_extents; i++) { 3339 thunk_convert(p, &fm->fm_extents[i], extent_arg_type, 3340 THUNK_TARGET); 3341 p += extent_size; 3342 } 3343 } 3344 unlock_user(argptr, arg, target_size_out); 3345 } 3346 } 3347 if (free_fm) { 3348 free(fm); 3349 } 3350 return ret; 3351 } 3352 #endif 3353 3354 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp, 3355 int fd, abi_long cmd, abi_long arg) 3356 { 3357 const argtype *arg_type = ie->arg_type; 3358 int target_size; 3359 void *argptr; 3360 int ret; 3361 struct ifconf *host_ifconf; 3362 uint32_t outbufsz; 3363 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) }; 3364 int target_ifreq_size; 3365 int nb_ifreq; 3366 int free_buf = 0; 3367 int i; 3368 int target_ifc_len; 3369 abi_long target_ifc_buf; 3370 int host_ifc_len; 3371 char *host_ifc_buf; 3372 3373 assert(arg_type[0] == TYPE_PTR); 3374 assert(ie->access == IOC_RW); 3375 3376 arg_type++; 3377 target_size = thunk_type_size(arg_type, 0); 3378 3379 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3380 if (!argptr) 3381 return -TARGET_EFAULT; 3382 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3383 unlock_user(argptr, arg, 0); 3384 3385 host_ifconf = (struct ifconf *)(unsigned long)buf_temp; 3386 target_ifc_len = host_ifconf->ifc_len; 3387 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf; 3388 3389 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0); 3390 nb_ifreq = target_ifc_len / target_ifreq_size; 3391 host_ifc_len = nb_ifreq * sizeof(struct ifreq); 3392 3393 outbufsz = sizeof(*host_ifconf) + host_ifc_len; 3394 if (outbufsz > MAX_STRUCT_SIZE) { 3395 /* We can't fit all the extents into the fixed size buffer. 3396 * Allocate one that is large enough and use it instead. 3397 */ 3398 host_ifconf = malloc(outbufsz); 3399 if (!host_ifconf) { 3400 return -TARGET_ENOMEM; 3401 } 3402 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf)); 3403 free_buf = 1; 3404 } 3405 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf); 3406 3407 host_ifconf->ifc_len = host_ifc_len; 3408 host_ifconf->ifc_buf = host_ifc_buf; 3409 3410 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf)); 3411 if (!is_error(ret)) { 3412 /* convert host ifc_len to target ifc_len */ 3413 3414 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq); 3415 target_ifc_len = nb_ifreq * target_ifreq_size; 3416 host_ifconf->ifc_len = target_ifc_len; 3417 3418 /* restore target ifc_buf */ 3419 3420 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf; 3421 3422 /* copy struct ifconf to target user */ 3423 3424 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3425 if (!argptr) 3426 return -TARGET_EFAULT; 3427 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET); 3428 unlock_user(argptr, arg, target_size); 3429 3430 /* copy ifreq[] to target user */ 3431 3432 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0); 3433 for (i = 0; i < nb_ifreq ; i++) { 3434 thunk_convert(argptr + i * target_ifreq_size, 3435 host_ifc_buf + i * sizeof(struct ifreq), 3436 ifreq_arg_type, THUNK_TARGET); 3437 } 3438 unlock_user(argptr, target_ifc_buf, target_ifc_len); 3439 } 3440 3441 if (free_buf) { 3442 free(host_ifconf); 3443 } 3444 3445 return ret; 3446 } 3447 3448 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 3449 abi_long cmd, abi_long arg) 3450 { 3451 void *argptr; 3452 struct dm_ioctl *host_dm; 3453 abi_long guest_data; 3454 uint32_t guest_data_size; 3455 int target_size; 3456 const argtype *arg_type = ie->arg_type; 3457 abi_long ret; 3458 void *big_buf = NULL; 3459 char *host_data; 3460 3461 arg_type++; 3462 target_size = thunk_type_size(arg_type, 0); 3463 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3464 if (!argptr) { 3465 ret = -TARGET_EFAULT; 3466 goto out; 3467 } 3468 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3469 unlock_user(argptr, arg, 0); 3470 3471 /* buf_temp is too small, so fetch things into a bigger buffer */ 3472 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2); 3473 memcpy(big_buf, buf_temp, target_size); 3474 buf_temp = big_buf; 3475 host_dm = big_buf; 3476 3477 guest_data = arg + host_dm->data_start; 3478 if ((guest_data - arg) < 0) { 3479 ret = -EINVAL; 3480 goto out; 3481 } 3482 guest_data_size = host_dm->data_size - host_dm->data_start; 3483 host_data = (char*)host_dm + host_dm->data_start; 3484 3485 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1); 3486 switch (ie->host_cmd) { 3487 case DM_REMOVE_ALL: 3488 case DM_LIST_DEVICES: 3489 case DM_DEV_CREATE: 3490 case DM_DEV_REMOVE: 3491 case DM_DEV_SUSPEND: 3492 case DM_DEV_STATUS: 3493 case DM_DEV_WAIT: 3494 case DM_TABLE_STATUS: 3495 case DM_TABLE_CLEAR: 3496 case DM_TABLE_DEPS: 3497 case DM_LIST_VERSIONS: 3498 /* no input data */ 3499 break; 3500 case DM_DEV_RENAME: 3501 case DM_DEV_SET_GEOMETRY: 3502 /* data contains only strings */ 3503 memcpy(host_data, argptr, guest_data_size); 3504 break; 3505 case DM_TARGET_MSG: 3506 memcpy(host_data, argptr, guest_data_size); 3507 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr); 3508 break; 3509 case DM_TABLE_LOAD: 3510 { 3511 void *gspec = argptr; 3512 void *cur_data = host_data; 3513 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 3514 int spec_size = thunk_type_size(arg_type, 0); 3515 int i; 3516 3517 for (i = 0; i < host_dm->target_count; i++) { 3518 struct dm_target_spec *spec = cur_data; 3519 uint32_t next; 3520 int slen; 3521 3522 thunk_convert(spec, gspec, arg_type, THUNK_HOST); 3523 slen = strlen((char*)gspec + spec_size) + 1; 3524 next = spec->next; 3525 spec->next = sizeof(*spec) + slen; 3526 strcpy((char*)&spec[1], gspec + spec_size); 3527 gspec += next; 3528 cur_data += spec->next; 3529 } 3530 break; 3531 } 3532 default: 3533 ret = -TARGET_EINVAL; 3534 goto out; 3535 } 3536 unlock_user(argptr, guest_data, 0); 3537 3538 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3539 if (!is_error(ret)) { 3540 guest_data = arg + host_dm->data_start; 3541 guest_data_size = host_dm->data_size - host_dm->data_start; 3542 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0); 3543 switch (ie->host_cmd) { 3544 case DM_REMOVE_ALL: 3545 case DM_DEV_CREATE: 3546 case DM_DEV_REMOVE: 3547 case DM_DEV_RENAME: 3548 case DM_DEV_SUSPEND: 3549 case DM_DEV_STATUS: 3550 case DM_TABLE_LOAD: 3551 case DM_TABLE_CLEAR: 3552 case DM_TARGET_MSG: 3553 case DM_DEV_SET_GEOMETRY: 3554 /* no return data */ 3555 break; 3556 case DM_LIST_DEVICES: 3557 { 3558 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start; 3559 uint32_t remaining_data = guest_data_size; 3560 void *cur_data = argptr; 3561 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) }; 3562 int nl_size = 12; /* can't use thunk_size due to alignment */ 3563 3564 while (1) { 3565 uint32_t next = nl->next; 3566 if (next) { 3567 nl->next = nl_size + (strlen(nl->name) + 1); 3568 } 3569 if (remaining_data < nl->next) { 3570 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3571 break; 3572 } 3573 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET); 3574 strcpy(cur_data + nl_size, nl->name); 3575 cur_data += nl->next; 3576 remaining_data -= nl->next; 3577 if (!next) { 3578 break; 3579 } 3580 nl = (void*)nl + next; 3581 } 3582 break; 3583 } 3584 case DM_DEV_WAIT: 3585 case DM_TABLE_STATUS: 3586 { 3587 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start; 3588 void *cur_data = argptr; 3589 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 3590 int spec_size = thunk_type_size(arg_type, 0); 3591 int i; 3592 3593 for (i = 0; i < host_dm->target_count; i++) { 3594 uint32_t next = spec->next; 3595 int slen = strlen((char*)&spec[1]) + 1; 3596 spec->next = (cur_data - argptr) + spec_size + slen; 3597 if (guest_data_size < spec->next) { 3598 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3599 break; 3600 } 3601 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET); 3602 strcpy(cur_data + spec_size, (char*)&spec[1]); 3603 cur_data = argptr + spec->next; 3604 spec = (void*)host_dm + host_dm->data_start + next; 3605 } 3606 break; 3607 } 3608 case DM_TABLE_DEPS: 3609 { 3610 void *hdata = (void*)host_dm + host_dm->data_start; 3611 int count = *(uint32_t*)hdata; 3612 uint64_t *hdev = hdata + 8; 3613 uint64_t *gdev = argptr + 8; 3614 int i; 3615 3616 *(uint32_t*)argptr = tswap32(count); 3617 for (i = 0; i < count; i++) { 3618 *gdev = tswap64(*hdev); 3619 gdev++; 3620 hdev++; 3621 } 3622 break; 3623 } 3624 case DM_LIST_VERSIONS: 3625 { 3626 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start; 3627 uint32_t remaining_data = guest_data_size; 3628 void *cur_data = argptr; 3629 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) }; 3630 int vers_size = thunk_type_size(arg_type, 0); 3631 3632 while (1) { 3633 uint32_t next = vers->next; 3634 if (next) { 3635 vers->next = vers_size + (strlen(vers->name) + 1); 3636 } 3637 if (remaining_data < vers->next) { 3638 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3639 break; 3640 } 3641 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET); 3642 strcpy(cur_data + vers_size, vers->name); 3643 cur_data += vers->next; 3644 remaining_data -= vers->next; 3645 if (!next) { 3646 break; 3647 } 3648 vers = (void*)vers + next; 3649 } 3650 break; 3651 } 3652 default: 3653 ret = -TARGET_EINVAL; 3654 goto out; 3655 } 3656 unlock_user(argptr, guest_data, guest_data_size); 3657 3658 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3659 if (!argptr) { 3660 ret = -TARGET_EFAULT; 3661 goto out; 3662 } 3663 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3664 unlock_user(argptr, arg, target_size); 3665 } 3666 out: 3667 g_free(big_buf); 3668 return ret; 3669 } 3670 3671 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp, 3672 int fd, abi_long cmd, abi_long arg) 3673 { 3674 const argtype *arg_type = ie->arg_type; 3675 const StructEntry *se; 3676 const argtype *field_types; 3677 const int *dst_offsets, *src_offsets; 3678 int target_size; 3679 void *argptr; 3680 abi_ulong *target_rt_dev_ptr; 3681 unsigned long *host_rt_dev_ptr; 3682 abi_long ret; 3683 int i; 3684 3685 assert(ie->access == IOC_W); 3686 assert(*arg_type == TYPE_PTR); 3687 arg_type++; 3688 assert(*arg_type == TYPE_STRUCT); 3689 target_size = thunk_type_size(arg_type, 0); 3690 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3691 if (!argptr) { 3692 return -TARGET_EFAULT; 3693 } 3694 arg_type++; 3695 assert(*arg_type == (int)STRUCT_rtentry); 3696 se = struct_entries + *arg_type++; 3697 assert(se->convert[0] == NULL); 3698 /* convert struct here to be able to catch rt_dev string */ 3699 field_types = se->field_types; 3700 dst_offsets = se->field_offsets[THUNK_HOST]; 3701 src_offsets = se->field_offsets[THUNK_TARGET]; 3702 for (i = 0; i < se->nb_fields; i++) { 3703 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) { 3704 assert(*field_types == TYPE_PTRVOID); 3705 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]); 3706 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]); 3707 if (*target_rt_dev_ptr != 0) { 3708 *host_rt_dev_ptr = (unsigned long)lock_user_string( 3709 tswapal(*target_rt_dev_ptr)); 3710 if (!*host_rt_dev_ptr) { 3711 unlock_user(argptr, arg, 0); 3712 return -TARGET_EFAULT; 3713 } 3714 } else { 3715 *host_rt_dev_ptr = 0; 3716 } 3717 field_types++; 3718 continue; 3719 } 3720 field_types = thunk_convert(buf_temp + dst_offsets[i], 3721 argptr + src_offsets[i], 3722 field_types, THUNK_HOST); 3723 } 3724 unlock_user(argptr, arg, 0); 3725 3726 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3727 if (*host_rt_dev_ptr != 0) { 3728 unlock_user((void *)*host_rt_dev_ptr, 3729 *target_rt_dev_ptr, 0); 3730 } 3731 return ret; 3732 } 3733 3734 static IOCTLEntry ioctl_entries[] = { 3735 #define IOCTL(cmd, access, ...) \ 3736 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } }, 3737 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \ 3738 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } }, 3739 #include "ioctls.h" 3740 { 0, 0, }, 3741 }; 3742 3743 /* ??? Implement proper locking for ioctls. */ 3744 /* do_ioctl() Must return target values and target errnos. */ 3745 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg) 3746 { 3747 const IOCTLEntry *ie; 3748 const argtype *arg_type; 3749 abi_long ret; 3750 uint8_t buf_temp[MAX_STRUCT_SIZE]; 3751 int target_size; 3752 void *argptr; 3753 3754 ie = ioctl_entries; 3755 for(;;) { 3756 if (ie->target_cmd == 0) { 3757 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd); 3758 return -TARGET_ENOSYS; 3759 } 3760 if (ie->target_cmd == cmd) 3761 break; 3762 ie++; 3763 } 3764 arg_type = ie->arg_type; 3765 #if defined(DEBUG) 3766 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name); 3767 #endif 3768 if (ie->do_ioctl) { 3769 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg); 3770 } 3771 3772 switch(arg_type[0]) { 3773 case TYPE_NULL: 3774 /* no argument */ 3775 ret = get_errno(ioctl(fd, ie->host_cmd)); 3776 break; 3777 case TYPE_PTRVOID: 3778 case TYPE_INT: 3779 /* int argment */ 3780 ret = get_errno(ioctl(fd, ie->host_cmd, arg)); 3781 break; 3782 case TYPE_PTR: 3783 arg_type++; 3784 target_size = thunk_type_size(arg_type, 0); 3785 switch(ie->access) { 3786 case IOC_R: 3787 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3788 if (!is_error(ret)) { 3789 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3790 if (!argptr) 3791 return -TARGET_EFAULT; 3792 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3793 unlock_user(argptr, arg, target_size); 3794 } 3795 break; 3796 case IOC_W: 3797 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3798 if (!argptr) 3799 return -TARGET_EFAULT; 3800 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3801 unlock_user(argptr, arg, 0); 3802 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3803 break; 3804 default: 3805 case IOC_RW: 3806 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3807 if (!argptr) 3808 return -TARGET_EFAULT; 3809 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3810 unlock_user(argptr, arg, 0); 3811 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3812 if (!is_error(ret)) { 3813 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3814 if (!argptr) 3815 return -TARGET_EFAULT; 3816 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3817 unlock_user(argptr, arg, target_size); 3818 } 3819 break; 3820 } 3821 break; 3822 default: 3823 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n", 3824 (long)cmd, arg_type[0]); 3825 ret = -TARGET_ENOSYS; 3826 break; 3827 } 3828 return ret; 3829 } 3830 3831 static const bitmask_transtbl iflag_tbl[] = { 3832 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK }, 3833 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT }, 3834 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR }, 3835 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK }, 3836 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK }, 3837 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP }, 3838 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR }, 3839 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR }, 3840 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL }, 3841 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC }, 3842 { TARGET_IXON, TARGET_IXON, IXON, IXON }, 3843 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY }, 3844 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF }, 3845 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL }, 3846 { 0, 0, 0, 0 } 3847 }; 3848 3849 static const bitmask_transtbl oflag_tbl[] = { 3850 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST }, 3851 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC }, 3852 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR }, 3853 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL }, 3854 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR }, 3855 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET }, 3856 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL }, 3857 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL }, 3858 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 }, 3859 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 }, 3860 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 }, 3861 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 }, 3862 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 }, 3863 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 }, 3864 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 }, 3865 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 }, 3866 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 }, 3867 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 }, 3868 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 }, 3869 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 }, 3870 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 }, 3871 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 }, 3872 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 }, 3873 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 }, 3874 { 0, 0, 0, 0 } 3875 }; 3876 3877 static const bitmask_transtbl cflag_tbl[] = { 3878 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 }, 3879 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 }, 3880 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 }, 3881 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 }, 3882 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 }, 3883 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 }, 3884 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 }, 3885 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 }, 3886 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 }, 3887 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 }, 3888 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 }, 3889 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 }, 3890 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 }, 3891 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 }, 3892 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 }, 3893 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 }, 3894 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 }, 3895 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 }, 3896 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 }, 3897 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 }, 3898 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 }, 3899 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 }, 3900 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 }, 3901 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 }, 3902 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB }, 3903 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD }, 3904 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB }, 3905 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD }, 3906 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL }, 3907 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL }, 3908 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS }, 3909 { 0, 0, 0, 0 } 3910 }; 3911 3912 static const bitmask_transtbl lflag_tbl[] = { 3913 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG }, 3914 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON }, 3915 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE }, 3916 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO }, 3917 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE }, 3918 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK }, 3919 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL }, 3920 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH }, 3921 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP }, 3922 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL }, 3923 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT }, 3924 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE }, 3925 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO }, 3926 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN }, 3927 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN }, 3928 { 0, 0, 0, 0 } 3929 }; 3930 3931 static void target_to_host_termios (void *dst, const void *src) 3932 { 3933 struct host_termios *host = dst; 3934 const struct target_termios *target = src; 3935 3936 host->c_iflag = 3937 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl); 3938 host->c_oflag = 3939 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl); 3940 host->c_cflag = 3941 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl); 3942 host->c_lflag = 3943 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl); 3944 host->c_line = target->c_line; 3945 3946 memset(host->c_cc, 0, sizeof(host->c_cc)); 3947 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR]; 3948 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT]; 3949 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE]; 3950 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL]; 3951 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF]; 3952 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME]; 3953 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN]; 3954 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC]; 3955 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART]; 3956 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP]; 3957 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP]; 3958 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL]; 3959 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT]; 3960 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD]; 3961 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE]; 3962 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT]; 3963 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2]; 3964 } 3965 3966 static void host_to_target_termios (void *dst, const void *src) 3967 { 3968 struct target_termios *target = dst; 3969 const struct host_termios *host = src; 3970 3971 target->c_iflag = 3972 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl)); 3973 target->c_oflag = 3974 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl)); 3975 target->c_cflag = 3976 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl)); 3977 target->c_lflag = 3978 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl)); 3979 target->c_line = host->c_line; 3980 3981 memset(target->c_cc, 0, sizeof(target->c_cc)); 3982 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR]; 3983 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT]; 3984 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE]; 3985 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL]; 3986 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF]; 3987 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME]; 3988 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN]; 3989 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC]; 3990 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART]; 3991 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP]; 3992 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP]; 3993 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL]; 3994 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT]; 3995 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD]; 3996 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE]; 3997 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT]; 3998 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2]; 3999 } 4000 4001 static const StructEntry struct_termios_def = { 4002 .convert = { host_to_target_termios, target_to_host_termios }, 4003 .size = { sizeof(struct target_termios), sizeof(struct host_termios) }, 4004 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) }, 4005 }; 4006 4007 static bitmask_transtbl mmap_flags_tbl[] = { 4008 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED }, 4009 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE }, 4010 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED }, 4011 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS }, 4012 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN }, 4013 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE }, 4014 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE }, 4015 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED }, 4016 { 0, 0, 0, 0 } 4017 }; 4018 4019 #if defined(TARGET_I386) 4020 4021 /* NOTE: there is really one LDT for all the threads */ 4022 static uint8_t *ldt_table; 4023 4024 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount) 4025 { 4026 int size; 4027 void *p; 4028 4029 if (!ldt_table) 4030 return 0; 4031 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE; 4032 if (size > bytecount) 4033 size = bytecount; 4034 p = lock_user(VERIFY_WRITE, ptr, size, 0); 4035 if (!p) 4036 return -TARGET_EFAULT; 4037 /* ??? Should this by byteswapped? */ 4038 memcpy(p, ldt_table, size); 4039 unlock_user(p, ptr, size); 4040 return size; 4041 } 4042 4043 /* XXX: add locking support */ 4044 static abi_long write_ldt(CPUX86State *env, 4045 abi_ulong ptr, unsigned long bytecount, int oldmode) 4046 { 4047 struct target_modify_ldt_ldt_s ldt_info; 4048 struct target_modify_ldt_ldt_s *target_ldt_info; 4049 int seg_32bit, contents, read_exec_only, limit_in_pages; 4050 int seg_not_present, useable, lm; 4051 uint32_t *lp, entry_1, entry_2; 4052 4053 if (bytecount != sizeof(ldt_info)) 4054 return -TARGET_EINVAL; 4055 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1)) 4056 return -TARGET_EFAULT; 4057 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 4058 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 4059 ldt_info.limit = tswap32(target_ldt_info->limit); 4060 ldt_info.flags = tswap32(target_ldt_info->flags); 4061 unlock_user_struct(target_ldt_info, ptr, 0); 4062 4063 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES) 4064 return -TARGET_EINVAL; 4065 seg_32bit = ldt_info.flags & 1; 4066 contents = (ldt_info.flags >> 1) & 3; 4067 read_exec_only = (ldt_info.flags >> 3) & 1; 4068 limit_in_pages = (ldt_info.flags >> 4) & 1; 4069 seg_not_present = (ldt_info.flags >> 5) & 1; 4070 useable = (ldt_info.flags >> 6) & 1; 4071 #ifdef TARGET_ABI32 4072 lm = 0; 4073 #else 4074 lm = (ldt_info.flags >> 7) & 1; 4075 #endif 4076 if (contents == 3) { 4077 if (oldmode) 4078 return -TARGET_EINVAL; 4079 if (seg_not_present == 0) 4080 return -TARGET_EINVAL; 4081 } 4082 /* allocate the LDT */ 4083 if (!ldt_table) { 4084 env->ldt.base = target_mmap(0, 4085 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE, 4086 PROT_READ|PROT_WRITE, 4087 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 4088 if (env->ldt.base == -1) 4089 return -TARGET_ENOMEM; 4090 memset(g2h(env->ldt.base), 0, 4091 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE); 4092 env->ldt.limit = 0xffff; 4093 ldt_table = g2h(env->ldt.base); 4094 } 4095 4096 /* NOTE: same code as Linux kernel */ 4097 /* Allow LDTs to be cleared by the user. */ 4098 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 4099 if (oldmode || 4100 (contents == 0 && 4101 read_exec_only == 1 && 4102 seg_32bit == 0 && 4103 limit_in_pages == 0 && 4104 seg_not_present == 1 && 4105 useable == 0 )) { 4106 entry_1 = 0; 4107 entry_2 = 0; 4108 goto install; 4109 } 4110 } 4111 4112 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 4113 (ldt_info.limit & 0x0ffff); 4114 entry_2 = (ldt_info.base_addr & 0xff000000) | 4115 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 4116 (ldt_info.limit & 0xf0000) | 4117 ((read_exec_only ^ 1) << 9) | 4118 (contents << 10) | 4119 ((seg_not_present ^ 1) << 15) | 4120 (seg_32bit << 22) | 4121 (limit_in_pages << 23) | 4122 (lm << 21) | 4123 0x7000; 4124 if (!oldmode) 4125 entry_2 |= (useable << 20); 4126 4127 /* Install the new entry ... */ 4128 install: 4129 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3)); 4130 lp[0] = tswap32(entry_1); 4131 lp[1] = tswap32(entry_2); 4132 return 0; 4133 } 4134 4135 /* specific and weird i386 syscalls */ 4136 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr, 4137 unsigned long bytecount) 4138 { 4139 abi_long ret; 4140 4141 switch (func) { 4142 case 0: 4143 ret = read_ldt(ptr, bytecount); 4144 break; 4145 case 1: 4146 ret = write_ldt(env, ptr, bytecount, 1); 4147 break; 4148 case 0x11: 4149 ret = write_ldt(env, ptr, bytecount, 0); 4150 break; 4151 default: 4152 ret = -TARGET_ENOSYS; 4153 break; 4154 } 4155 return ret; 4156 } 4157 4158 #if defined(TARGET_I386) && defined(TARGET_ABI32) 4159 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr) 4160 { 4161 uint64_t *gdt_table = g2h(env->gdt.base); 4162 struct target_modify_ldt_ldt_s ldt_info; 4163 struct target_modify_ldt_ldt_s *target_ldt_info; 4164 int seg_32bit, contents, read_exec_only, limit_in_pages; 4165 int seg_not_present, useable, lm; 4166 uint32_t *lp, entry_1, entry_2; 4167 int i; 4168 4169 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 4170 if (!target_ldt_info) 4171 return -TARGET_EFAULT; 4172 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 4173 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 4174 ldt_info.limit = tswap32(target_ldt_info->limit); 4175 ldt_info.flags = tswap32(target_ldt_info->flags); 4176 if (ldt_info.entry_number == -1) { 4177 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) { 4178 if (gdt_table[i] == 0) { 4179 ldt_info.entry_number = i; 4180 target_ldt_info->entry_number = tswap32(i); 4181 break; 4182 } 4183 } 4184 } 4185 unlock_user_struct(target_ldt_info, ptr, 1); 4186 4187 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN || 4188 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX) 4189 return -TARGET_EINVAL; 4190 seg_32bit = ldt_info.flags & 1; 4191 contents = (ldt_info.flags >> 1) & 3; 4192 read_exec_only = (ldt_info.flags >> 3) & 1; 4193 limit_in_pages = (ldt_info.flags >> 4) & 1; 4194 seg_not_present = (ldt_info.flags >> 5) & 1; 4195 useable = (ldt_info.flags >> 6) & 1; 4196 #ifdef TARGET_ABI32 4197 lm = 0; 4198 #else 4199 lm = (ldt_info.flags >> 7) & 1; 4200 #endif 4201 4202 if (contents == 3) { 4203 if (seg_not_present == 0) 4204 return -TARGET_EINVAL; 4205 } 4206 4207 /* NOTE: same code as Linux kernel */ 4208 /* Allow LDTs to be cleared by the user. */ 4209 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 4210 if ((contents == 0 && 4211 read_exec_only == 1 && 4212 seg_32bit == 0 && 4213 limit_in_pages == 0 && 4214 seg_not_present == 1 && 4215 useable == 0 )) { 4216 entry_1 = 0; 4217 entry_2 = 0; 4218 goto install; 4219 } 4220 } 4221 4222 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 4223 (ldt_info.limit & 0x0ffff); 4224 entry_2 = (ldt_info.base_addr & 0xff000000) | 4225 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 4226 (ldt_info.limit & 0xf0000) | 4227 ((read_exec_only ^ 1) << 9) | 4228 (contents << 10) | 4229 ((seg_not_present ^ 1) << 15) | 4230 (seg_32bit << 22) | 4231 (limit_in_pages << 23) | 4232 (useable << 20) | 4233 (lm << 21) | 4234 0x7000; 4235 4236 /* Install the new entry ... */ 4237 install: 4238 lp = (uint32_t *)(gdt_table + ldt_info.entry_number); 4239 lp[0] = tswap32(entry_1); 4240 lp[1] = tswap32(entry_2); 4241 return 0; 4242 } 4243 4244 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr) 4245 { 4246 struct target_modify_ldt_ldt_s *target_ldt_info; 4247 uint64_t *gdt_table = g2h(env->gdt.base); 4248 uint32_t base_addr, limit, flags; 4249 int seg_32bit, contents, read_exec_only, limit_in_pages, idx; 4250 int seg_not_present, useable, lm; 4251 uint32_t *lp, entry_1, entry_2; 4252 4253 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 4254 if (!target_ldt_info) 4255 return -TARGET_EFAULT; 4256 idx = tswap32(target_ldt_info->entry_number); 4257 if (idx < TARGET_GDT_ENTRY_TLS_MIN || 4258 idx > TARGET_GDT_ENTRY_TLS_MAX) { 4259 unlock_user_struct(target_ldt_info, ptr, 1); 4260 return -TARGET_EINVAL; 4261 } 4262 lp = (uint32_t *)(gdt_table + idx); 4263 entry_1 = tswap32(lp[0]); 4264 entry_2 = tswap32(lp[1]); 4265 4266 read_exec_only = ((entry_2 >> 9) & 1) ^ 1; 4267 contents = (entry_2 >> 10) & 3; 4268 seg_not_present = ((entry_2 >> 15) & 1) ^ 1; 4269 seg_32bit = (entry_2 >> 22) & 1; 4270 limit_in_pages = (entry_2 >> 23) & 1; 4271 useable = (entry_2 >> 20) & 1; 4272 #ifdef TARGET_ABI32 4273 lm = 0; 4274 #else 4275 lm = (entry_2 >> 21) & 1; 4276 #endif 4277 flags = (seg_32bit << 0) | (contents << 1) | 4278 (read_exec_only << 3) | (limit_in_pages << 4) | 4279 (seg_not_present << 5) | (useable << 6) | (lm << 7); 4280 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000); 4281 base_addr = (entry_1 >> 16) | 4282 (entry_2 & 0xff000000) | 4283 ((entry_2 & 0xff) << 16); 4284 target_ldt_info->base_addr = tswapal(base_addr); 4285 target_ldt_info->limit = tswap32(limit); 4286 target_ldt_info->flags = tswap32(flags); 4287 unlock_user_struct(target_ldt_info, ptr, 1); 4288 return 0; 4289 } 4290 #endif /* TARGET_I386 && TARGET_ABI32 */ 4291 4292 #ifndef TARGET_ABI32 4293 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) 4294 { 4295 abi_long ret = 0; 4296 abi_ulong val; 4297 int idx; 4298 4299 switch(code) { 4300 case TARGET_ARCH_SET_GS: 4301 case TARGET_ARCH_SET_FS: 4302 if (code == TARGET_ARCH_SET_GS) 4303 idx = R_GS; 4304 else 4305 idx = R_FS; 4306 cpu_x86_load_seg(env, idx, 0); 4307 env->segs[idx].base = addr; 4308 break; 4309 case TARGET_ARCH_GET_GS: 4310 case TARGET_ARCH_GET_FS: 4311 if (code == TARGET_ARCH_GET_GS) 4312 idx = R_GS; 4313 else 4314 idx = R_FS; 4315 val = env->segs[idx].base; 4316 if (put_user(val, addr, abi_ulong)) 4317 ret = -TARGET_EFAULT; 4318 break; 4319 default: 4320 ret = -TARGET_EINVAL; 4321 break; 4322 } 4323 return ret; 4324 } 4325 #endif 4326 4327 #endif /* defined(TARGET_I386) */ 4328 4329 #define NEW_STACK_SIZE 0x40000 4330 4331 4332 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER; 4333 typedef struct { 4334 CPUArchState *env; 4335 pthread_mutex_t mutex; 4336 pthread_cond_t cond; 4337 pthread_t thread; 4338 uint32_t tid; 4339 abi_ulong child_tidptr; 4340 abi_ulong parent_tidptr; 4341 sigset_t sigmask; 4342 } new_thread_info; 4343 4344 static void *clone_func(void *arg) 4345 { 4346 new_thread_info *info = arg; 4347 CPUArchState *env; 4348 CPUState *cpu; 4349 TaskState *ts; 4350 4351 env = info->env; 4352 cpu = ENV_GET_CPU(env); 4353 thread_cpu = cpu; 4354 ts = (TaskState *)env->opaque; 4355 info->tid = gettid(); 4356 cpu->host_tid = info->tid; 4357 task_settid(ts); 4358 if (info->child_tidptr) 4359 put_user_u32(info->tid, info->child_tidptr); 4360 if (info->parent_tidptr) 4361 put_user_u32(info->tid, info->parent_tidptr); 4362 /* Enable signals. */ 4363 sigprocmask(SIG_SETMASK, &info->sigmask, NULL); 4364 /* Signal to the parent that we're ready. */ 4365 pthread_mutex_lock(&info->mutex); 4366 pthread_cond_broadcast(&info->cond); 4367 pthread_mutex_unlock(&info->mutex); 4368 /* Wait until the parent has finshed initializing the tls state. */ 4369 pthread_mutex_lock(&clone_lock); 4370 pthread_mutex_unlock(&clone_lock); 4371 cpu_loop(env); 4372 /* never exits */ 4373 return NULL; 4374 } 4375 4376 /* do_fork() Must return host values and target errnos (unlike most 4377 do_*() functions). */ 4378 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, 4379 abi_ulong parent_tidptr, target_ulong newtls, 4380 abi_ulong child_tidptr) 4381 { 4382 int ret; 4383 TaskState *ts; 4384 CPUArchState *new_env; 4385 unsigned int nptl_flags; 4386 sigset_t sigmask; 4387 4388 /* Emulate vfork() with fork() */ 4389 if (flags & CLONE_VFORK) 4390 flags &= ~(CLONE_VFORK | CLONE_VM); 4391 4392 if (flags & CLONE_VM) { 4393 TaskState *parent_ts = (TaskState *)env->opaque; 4394 new_thread_info info; 4395 pthread_attr_t attr; 4396 4397 ts = g_malloc0(sizeof(TaskState)); 4398 init_task_state(ts); 4399 /* we create a new CPU instance. */ 4400 new_env = cpu_copy(env); 4401 /* Init regs that differ from the parent. */ 4402 cpu_clone_regs(new_env, newsp); 4403 new_env->opaque = ts; 4404 ts->bprm = parent_ts->bprm; 4405 ts->info = parent_ts->info; 4406 nptl_flags = flags; 4407 flags &= ~CLONE_NPTL_FLAGS2; 4408 4409 if (nptl_flags & CLONE_CHILD_CLEARTID) { 4410 ts->child_tidptr = child_tidptr; 4411 } 4412 4413 if (nptl_flags & CLONE_SETTLS) 4414 cpu_set_tls (new_env, newtls); 4415 4416 /* Grab a mutex so that thread setup appears atomic. */ 4417 pthread_mutex_lock(&clone_lock); 4418 4419 memset(&info, 0, sizeof(info)); 4420 pthread_mutex_init(&info.mutex, NULL); 4421 pthread_mutex_lock(&info.mutex); 4422 pthread_cond_init(&info.cond, NULL); 4423 info.env = new_env; 4424 if (nptl_flags & CLONE_CHILD_SETTID) 4425 info.child_tidptr = child_tidptr; 4426 if (nptl_flags & CLONE_PARENT_SETTID) 4427 info.parent_tidptr = parent_tidptr; 4428 4429 ret = pthread_attr_init(&attr); 4430 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE); 4431 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 4432 /* It is not safe to deliver signals until the child has finished 4433 initializing, so temporarily block all signals. */ 4434 sigfillset(&sigmask); 4435 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask); 4436 4437 ret = pthread_create(&info.thread, &attr, clone_func, &info); 4438 /* TODO: Free new CPU state if thread creation failed. */ 4439 4440 sigprocmask(SIG_SETMASK, &info.sigmask, NULL); 4441 pthread_attr_destroy(&attr); 4442 if (ret == 0) { 4443 /* Wait for the child to initialize. */ 4444 pthread_cond_wait(&info.cond, &info.mutex); 4445 ret = info.tid; 4446 if (flags & CLONE_PARENT_SETTID) 4447 put_user_u32(ret, parent_tidptr); 4448 } else { 4449 ret = -1; 4450 } 4451 pthread_mutex_unlock(&info.mutex); 4452 pthread_cond_destroy(&info.cond); 4453 pthread_mutex_destroy(&info.mutex); 4454 pthread_mutex_unlock(&clone_lock); 4455 } else { 4456 /* if no CLONE_VM, we consider it is a fork */ 4457 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) 4458 return -EINVAL; 4459 fork_start(); 4460 ret = fork(); 4461 if (ret == 0) { 4462 /* Child Process. */ 4463 cpu_clone_regs(env, newsp); 4464 fork_end(1); 4465 /* There is a race condition here. The parent process could 4466 theoretically read the TID in the child process before the child 4467 tid is set. This would require using either ptrace 4468 (not implemented) or having *_tidptr to point at a shared memory 4469 mapping. We can't repeat the spinlock hack used above because 4470 the child process gets its own copy of the lock. */ 4471 if (flags & CLONE_CHILD_SETTID) 4472 put_user_u32(gettid(), child_tidptr); 4473 if (flags & CLONE_PARENT_SETTID) 4474 put_user_u32(gettid(), parent_tidptr); 4475 ts = (TaskState *)env->opaque; 4476 if (flags & CLONE_SETTLS) 4477 cpu_set_tls (env, newtls); 4478 if (flags & CLONE_CHILD_CLEARTID) 4479 ts->child_tidptr = child_tidptr; 4480 } else { 4481 fork_end(0); 4482 } 4483 } 4484 return ret; 4485 } 4486 4487 /* warning : doesn't handle linux specific flags... */ 4488 static int target_to_host_fcntl_cmd(int cmd) 4489 { 4490 switch(cmd) { 4491 case TARGET_F_DUPFD: 4492 case TARGET_F_GETFD: 4493 case TARGET_F_SETFD: 4494 case TARGET_F_GETFL: 4495 case TARGET_F_SETFL: 4496 return cmd; 4497 case TARGET_F_GETLK: 4498 return F_GETLK; 4499 case TARGET_F_SETLK: 4500 return F_SETLK; 4501 case TARGET_F_SETLKW: 4502 return F_SETLKW; 4503 case TARGET_F_GETOWN: 4504 return F_GETOWN; 4505 case TARGET_F_SETOWN: 4506 return F_SETOWN; 4507 case TARGET_F_GETSIG: 4508 return F_GETSIG; 4509 case TARGET_F_SETSIG: 4510 return F_SETSIG; 4511 #if TARGET_ABI_BITS == 32 4512 case TARGET_F_GETLK64: 4513 return F_GETLK64; 4514 case TARGET_F_SETLK64: 4515 return F_SETLK64; 4516 case TARGET_F_SETLKW64: 4517 return F_SETLKW64; 4518 #endif 4519 case TARGET_F_SETLEASE: 4520 return F_SETLEASE; 4521 case TARGET_F_GETLEASE: 4522 return F_GETLEASE; 4523 #ifdef F_DUPFD_CLOEXEC 4524 case TARGET_F_DUPFD_CLOEXEC: 4525 return F_DUPFD_CLOEXEC; 4526 #endif 4527 case TARGET_F_NOTIFY: 4528 return F_NOTIFY; 4529 default: 4530 return -TARGET_EINVAL; 4531 } 4532 return -TARGET_EINVAL; 4533 } 4534 4535 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a } 4536 static const bitmask_transtbl flock_tbl[] = { 4537 TRANSTBL_CONVERT(F_RDLCK), 4538 TRANSTBL_CONVERT(F_WRLCK), 4539 TRANSTBL_CONVERT(F_UNLCK), 4540 TRANSTBL_CONVERT(F_EXLCK), 4541 TRANSTBL_CONVERT(F_SHLCK), 4542 { 0, 0, 0, 0 } 4543 }; 4544 4545 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg) 4546 { 4547 struct flock fl; 4548 struct target_flock *target_fl; 4549 struct flock64 fl64; 4550 struct target_flock64 *target_fl64; 4551 abi_long ret; 4552 int host_cmd = target_to_host_fcntl_cmd(cmd); 4553 4554 if (host_cmd == -TARGET_EINVAL) 4555 return host_cmd; 4556 4557 switch(cmd) { 4558 case TARGET_F_GETLK: 4559 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1)) 4560 return -TARGET_EFAULT; 4561 fl.l_type = 4562 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl); 4563 fl.l_whence = tswap16(target_fl->l_whence); 4564 fl.l_start = tswapal(target_fl->l_start); 4565 fl.l_len = tswapal(target_fl->l_len); 4566 fl.l_pid = tswap32(target_fl->l_pid); 4567 unlock_user_struct(target_fl, arg, 0); 4568 ret = get_errno(fcntl(fd, host_cmd, &fl)); 4569 if (ret == 0) { 4570 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0)) 4571 return -TARGET_EFAULT; 4572 target_fl->l_type = 4573 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl); 4574 target_fl->l_whence = tswap16(fl.l_whence); 4575 target_fl->l_start = tswapal(fl.l_start); 4576 target_fl->l_len = tswapal(fl.l_len); 4577 target_fl->l_pid = tswap32(fl.l_pid); 4578 unlock_user_struct(target_fl, arg, 1); 4579 } 4580 break; 4581 4582 case TARGET_F_SETLK: 4583 case TARGET_F_SETLKW: 4584 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1)) 4585 return -TARGET_EFAULT; 4586 fl.l_type = 4587 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl); 4588 fl.l_whence = tswap16(target_fl->l_whence); 4589 fl.l_start = tswapal(target_fl->l_start); 4590 fl.l_len = tswapal(target_fl->l_len); 4591 fl.l_pid = tswap32(target_fl->l_pid); 4592 unlock_user_struct(target_fl, arg, 0); 4593 ret = get_errno(fcntl(fd, host_cmd, &fl)); 4594 break; 4595 4596 case TARGET_F_GETLK64: 4597 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1)) 4598 return -TARGET_EFAULT; 4599 fl64.l_type = 4600 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1; 4601 fl64.l_whence = tswap16(target_fl64->l_whence); 4602 fl64.l_start = tswap64(target_fl64->l_start); 4603 fl64.l_len = tswap64(target_fl64->l_len); 4604 fl64.l_pid = tswap32(target_fl64->l_pid); 4605 unlock_user_struct(target_fl64, arg, 0); 4606 ret = get_errno(fcntl(fd, host_cmd, &fl64)); 4607 if (ret == 0) { 4608 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0)) 4609 return -TARGET_EFAULT; 4610 target_fl64->l_type = 4611 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1; 4612 target_fl64->l_whence = tswap16(fl64.l_whence); 4613 target_fl64->l_start = tswap64(fl64.l_start); 4614 target_fl64->l_len = tswap64(fl64.l_len); 4615 target_fl64->l_pid = tswap32(fl64.l_pid); 4616 unlock_user_struct(target_fl64, arg, 1); 4617 } 4618 break; 4619 case TARGET_F_SETLK64: 4620 case TARGET_F_SETLKW64: 4621 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1)) 4622 return -TARGET_EFAULT; 4623 fl64.l_type = 4624 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1; 4625 fl64.l_whence = tswap16(target_fl64->l_whence); 4626 fl64.l_start = tswap64(target_fl64->l_start); 4627 fl64.l_len = tswap64(target_fl64->l_len); 4628 fl64.l_pid = tswap32(target_fl64->l_pid); 4629 unlock_user_struct(target_fl64, arg, 0); 4630 ret = get_errno(fcntl(fd, host_cmd, &fl64)); 4631 break; 4632 4633 case TARGET_F_GETFL: 4634 ret = get_errno(fcntl(fd, host_cmd, arg)); 4635 if (ret >= 0) { 4636 ret = host_to_target_bitmask(ret, fcntl_flags_tbl); 4637 } 4638 break; 4639 4640 case TARGET_F_SETFL: 4641 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl))); 4642 break; 4643 4644 case TARGET_F_SETOWN: 4645 case TARGET_F_GETOWN: 4646 case TARGET_F_SETSIG: 4647 case TARGET_F_GETSIG: 4648 case TARGET_F_SETLEASE: 4649 case TARGET_F_GETLEASE: 4650 ret = get_errno(fcntl(fd, host_cmd, arg)); 4651 break; 4652 4653 default: 4654 ret = get_errno(fcntl(fd, cmd, arg)); 4655 break; 4656 } 4657 return ret; 4658 } 4659 4660 #ifdef USE_UID16 4661 4662 static inline int high2lowuid(int uid) 4663 { 4664 if (uid > 65535) 4665 return 65534; 4666 else 4667 return uid; 4668 } 4669 4670 static inline int high2lowgid(int gid) 4671 { 4672 if (gid > 65535) 4673 return 65534; 4674 else 4675 return gid; 4676 } 4677 4678 static inline int low2highuid(int uid) 4679 { 4680 if ((int16_t)uid == -1) 4681 return -1; 4682 else 4683 return uid; 4684 } 4685 4686 static inline int low2highgid(int gid) 4687 { 4688 if ((int16_t)gid == -1) 4689 return -1; 4690 else 4691 return gid; 4692 } 4693 static inline int tswapid(int id) 4694 { 4695 return tswap16(id); 4696 } 4697 #else /* !USE_UID16 */ 4698 static inline int high2lowuid(int uid) 4699 { 4700 return uid; 4701 } 4702 static inline int high2lowgid(int gid) 4703 { 4704 return gid; 4705 } 4706 static inline int low2highuid(int uid) 4707 { 4708 return uid; 4709 } 4710 static inline int low2highgid(int gid) 4711 { 4712 return gid; 4713 } 4714 static inline int tswapid(int id) 4715 { 4716 return tswap32(id); 4717 } 4718 #endif /* USE_UID16 */ 4719 4720 void syscall_init(void) 4721 { 4722 IOCTLEntry *ie; 4723 const argtype *arg_type; 4724 int size; 4725 int i; 4726 4727 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def); 4728 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def); 4729 #include "syscall_types.h" 4730 #undef STRUCT 4731 #undef STRUCT_SPECIAL 4732 4733 /* Build target_to_host_errno_table[] table from 4734 * host_to_target_errno_table[]. */ 4735 for (i = 0; i < ERRNO_TABLE_SIZE; i++) { 4736 target_to_host_errno_table[host_to_target_errno_table[i]] = i; 4737 } 4738 4739 /* we patch the ioctl size if necessary. We rely on the fact that 4740 no ioctl has all the bits at '1' in the size field */ 4741 ie = ioctl_entries; 4742 while (ie->target_cmd != 0) { 4743 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) == 4744 TARGET_IOC_SIZEMASK) { 4745 arg_type = ie->arg_type; 4746 if (arg_type[0] != TYPE_PTR) { 4747 fprintf(stderr, "cannot patch size for ioctl 0x%x\n", 4748 ie->target_cmd); 4749 exit(1); 4750 } 4751 arg_type++; 4752 size = thunk_type_size(arg_type, 0); 4753 ie->target_cmd = (ie->target_cmd & 4754 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) | 4755 (size << TARGET_IOC_SIZESHIFT); 4756 } 4757 4758 /* automatic consistency check if same arch */ 4759 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 4760 (defined(__x86_64__) && defined(TARGET_X86_64)) 4761 if (unlikely(ie->target_cmd != ie->host_cmd)) { 4762 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n", 4763 ie->name, ie->target_cmd, ie->host_cmd); 4764 } 4765 #endif 4766 ie++; 4767 } 4768 } 4769 4770 #if TARGET_ABI_BITS == 32 4771 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1) 4772 { 4773 #ifdef TARGET_WORDS_BIGENDIAN 4774 return ((uint64_t)word0 << 32) | word1; 4775 #else 4776 return ((uint64_t)word1 << 32) | word0; 4777 #endif 4778 } 4779 #else /* TARGET_ABI_BITS == 32 */ 4780 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1) 4781 { 4782 return word0; 4783 } 4784 #endif /* TARGET_ABI_BITS != 32 */ 4785 4786 #ifdef TARGET_NR_truncate64 4787 static inline abi_long target_truncate64(void *cpu_env, const char *arg1, 4788 abi_long arg2, 4789 abi_long arg3, 4790 abi_long arg4) 4791 { 4792 if (regpairs_aligned(cpu_env)) { 4793 arg2 = arg3; 4794 arg3 = arg4; 4795 } 4796 return get_errno(truncate64(arg1, target_offset64(arg2, arg3))); 4797 } 4798 #endif 4799 4800 #ifdef TARGET_NR_ftruncate64 4801 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1, 4802 abi_long arg2, 4803 abi_long arg3, 4804 abi_long arg4) 4805 { 4806 if (regpairs_aligned(cpu_env)) { 4807 arg2 = arg3; 4808 arg3 = arg4; 4809 } 4810 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3))); 4811 } 4812 #endif 4813 4814 static inline abi_long target_to_host_timespec(struct timespec *host_ts, 4815 abi_ulong target_addr) 4816 { 4817 struct target_timespec *target_ts; 4818 4819 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) 4820 return -TARGET_EFAULT; 4821 host_ts->tv_sec = tswapal(target_ts->tv_sec); 4822 host_ts->tv_nsec = tswapal(target_ts->tv_nsec); 4823 unlock_user_struct(target_ts, target_addr, 0); 4824 return 0; 4825 } 4826 4827 static inline abi_long host_to_target_timespec(abi_ulong target_addr, 4828 struct timespec *host_ts) 4829 { 4830 struct target_timespec *target_ts; 4831 4832 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) 4833 return -TARGET_EFAULT; 4834 target_ts->tv_sec = tswapal(host_ts->tv_sec); 4835 target_ts->tv_nsec = tswapal(host_ts->tv_nsec); 4836 unlock_user_struct(target_ts, target_addr, 1); 4837 return 0; 4838 } 4839 4840 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec, 4841 abi_ulong target_addr) 4842 { 4843 struct target_itimerspec *target_itspec; 4844 4845 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) { 4846 return -TARGET_EFAULT; 4847 } 4848 4849 host_itspec->it_interval.tv_sec = 4850 tswapal(target_itspec->it_interval.tv_sec); 4851 host_itspec->it_interval.tv_nsec = 4852 tswapal(target_itspec->it_interval.tv_nsec); 4853 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec); 4854 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec); 4855 4856 unlock_user_struct(target_itspec, target_addr, 1); 4857 return 0; 4858 } 4859 4860 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr, 4861 struct itimerspec *host_its) 4862 { 4863 struct target_itimerspec *target_itspec; 4864 4865 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) { 4866 return -TARGET_EFAULT; 4867 } 4868 4869 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec); 4870 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec); 4871 4872 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec); 4873 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec); 4874 4875 unlock_user_struct(target_itspec, target_addr, 0); 4876 return 0; 4877 } 4878 4879 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat) 4880 static inline abi_long host_to_target_stat64(void *cpu_env, 4881 abi_ulong target_addr, 4882 struct stat *host_st) 4883 { 4884 #if defined(TARGET_ARM) && defined(TARGET_ABI32) 4885 if (((CPUARMState *)cpu_env)->eabi) { 4886 struct target_eabi_stat64 *target_st; 4887 4888 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 4889 return -TARGET_EFAULT; 4890 memset(target_st, 0, sizeof(struct target_eabi_stat64)); 4891 __put_user(host_st->st_dev, &target_st->st_dev); 4892 __put_user(host_st->st_ino, &target_st->st_ino); 4893 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 4894 __put_user(host_st->st_ino, &target_st->__st_ino); 4895 #endif 4896 __put_user(host_st->st_mode, &target_st->st_mode); 4897 __put_user(host_st->st_nlink, &target_st->st_nlink); 4898 __put_user(host_st->st_uid, &target_st->st_uid); 4899 __put_user(host_st->st_gid, &target_st->st_gid); 4900 __put_user(host_st->st_rdev, &target_st->st_rdev); 4901 __put_user(host_st->st_size, &target_st->st_size); 4902 __put_user(host_st->st_blksize, &target_st->st_blksize); 4903 __put_user(host_st->st_blocks, &target_st->st_blocks); 4904 __put_user(host_st->st_atime, &target_st->target_st_atime); 4905 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 4906 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 4907 unlock_user_struct(target_st, target_addr, 1); 4908 } else 4909 #endif 4910 { 4911 #if defined(TARGET_HAS_STRUCT_STAT64) 4912 struct target_stat64 *target_st; 4913 #else 4914 struct target_stat *target_st; 4915 #endif 4916 4917 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 4918 return -TARGET_EFAULT; 4919 memset(target_st, 0, sizeof(*target_st)); 4920 __put_user(host_st->st_dev, &target_st->st_dev); 4921 __put_user(host_st->st_ino, &target_st->st_ino); 4922 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 4923 __put_user(host_st->st_ino, &target_st->__st_ino); 4924 #endif 4925 __put_user(host_st->st_mode, &target_st->st_mode); 4926 __put_user(host_st->st_nlink, &target_st->st_nlink); 4927 __put_user(host_st->st_uid, &target_st->st_uid); 4928 __put_user(host_st->st_gid, &target_st->st_gid); 4929 __put_user(host_st->st_rdev, &target_st->st_rdev); 4930 /* XXX: better use of kernel struct */ 4931 __put_user(host_st->st_size, &target_st->st_size); 4932 __put_user(host_st->st_blksize, &target_st->st_blksize); 4933 __put_user(host_st->st_blocks, &target_st->st_blocks); 4934 __put_user(host_st->st_atime, &target_st->target_st_atime); 4935 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 4936 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 4937 unlock_user_struct(target_st, target_addr, 1); 4938 } 4939 4940 return 0; 4941 } 4942 #endif 4943 4944 /* ??? Using host futex calls even when target atomic operations 4945 are not really atomic probably breaks things. However implementing 4946 futexes locally would make futexes shared between multiple processes 4947 tricky. However they're probably useless because guest atomic 4948 operations won't work either. */ 4949 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout, 4950 target_ulong uaddr2, int val3) 4951 { 4952 struct timespec ts, *pts; 4953 int base_op; 4954 4955 /* ??? We assume FUTEX_* constants are the same on both host 4956 and target. */ 4957 #ifdef FUTEX_CMD_MASK 4958 base_op = op & FUTEX_CMD_MASK; 4959 #else 4960 base_op = op; 4961 #endif 4962 switch (base_op) { 4963 case FUTEX_WAIT: 4964 case FUTEX_WAIT_BITSET: 4965 if (timeout) { 4966 pts = &ts; 4967 target_to_host_timespec(pts, timeout); 4968 } else { 4969 pts = NULL; 4970 } 4971 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val), 4972 pts, NULL, val3)); 4973 case FUTEX_WAKE: 4974 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 4975 case FUTEX_FD: 4976 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 4977 case FUTEX_REQUEUE: 4978 case FUTEX_CMP_REQUEUE: 4979 case FUTEX_WAKE_OP: 4980 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the 4981 TIMEOUT parameter is interpreted as a uint32_t by the kernel. 4982 But the prototype takes a `struct timespec *'; insert casts 4983 to satisfy the compiler. We do not need to tswap TIMEOUT 4984 since it's not compared to guest memory. */ 4985 pts = (struct timespec *)(uintptr_t) timeout; 4986 return get_errno(sys_futex(g2h(uaddr), op, val, pts, 4987 g2h(uaddr2), 4988 (base_op == FUTEX_CMP_REQUEUE 4989 ? tswap32(val3) 4990 : val3))); 4991 default: 4992 return -TARGET_ENOSYS; 4993 } 4994 } 4995 4996 /* Map host to target signal numbers for the wait family of syscalls. 4997 Assume all other status bits are the same. */ 4998 int host_to_target_waitstatus(int status) 4999 { 5000 if (WIFSIGNALED(status)) { 5001 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f); 5002 } 5003 if (WIFSTOPPED(status)) { 5004 return (host_to_target_signal(WSTOPSIG(status)) << 8) 5005 | (status & 0xff); 5006 } 5007 return status; 5008 } 5009 5010 static int relstr_to_int(const char *s) 5011 { 5012 /* Convert a uname release string like "2.6.18" to an integer 5013 * of the form 0x020612. (Beware that 0x020612 is *not* 2.6.12.) 5014 */ 5015 int i, n, tmp; 5016 5017 tmp = 0; 5018 for (i = 0; i < 3; i++) { 5019 n = 0; 5020 while (*s >= '0' && *s <= '9') { 5021 n *= 10; 5022 n += *s - '0'; 5023 s++; 5024 } 5025 tmp = (tmp << 8) + n; 5026 if (*s == '.') { 5027 s++; 5028 } 5029 } 5030 return tmp; 5031 } 5032 5033 int get_osversion(void) 5034 { 5035 static int osversion; 5036 struct new_utsname buf; 5037 const char *s; 5038 5039 if (osversion) 5040 return osversion; 5041 if (qemu_uname_release && *qemu_uname_release) { 5042 s = qemu_uname_release; 5043 } else { 5044 if (sys_uname(&buf)) 5045 return 0; 5046 s = buf.release; 5047 } 5048 osversion = relstr_to_int(s); 5049 return osversion; 5050 } 5051 5052 void init_qemu_uname_release(void) 5053 { 5054 /* Initialize qemu_uname_release for later use. 5055 * If the host kernel is too old and the user hasn't asked for 5056 * a specific fake version number, we might want to fake a minimum 5057 * target kernel version. 5058 */ 5059 #ifdef UNAME_MINIMUM_RELEASE 5060 struct new_utsname buf; 5061 5062 if (qemu_uname_release && *qemu_uname_release) { 5063 return; 5064 } 5065 5066 if (sys_uname(&buf)) { 5067 return; 5068 } 5069 5070 if (relstr_to_int(buf.release) < relstr_to_int(UNAME_MINIMUM_RELEASE)) { 5071 qemu_uname_release = UNAME_MINIMUM_RELEASE; 5072 } 5073 #endif 5074 } 5075 5076 static int open_self_maps(void *cpu_env, int fd) 5077 { 5078 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32) 5079 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 5080 #endif 5081 FILE *fp; 5082 char *line = NULL; 5083 size_t len = 0; 5084 ssize_t read; 5085 5086 fp = fopen("/proc/self/maps", "r"); 5087 if (fp == NULL) { 5088 return -EACCES; 5089 } 5090 5091 while ((read = getline(&line, &len, fp)) != -1) { 5092 int fields, dev_maj, dev_min, inode; 5093 uint64_t min, max, offset; 5094 char flag_r, flag_w, flag_x, flag_p; 5095 char path[512] = ""; 5096 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d" 5097 " %512s", &min, &max, &flag_r, &flag_w, &flag_x, 5098 &flag_p, &offset, &dev_maj, &dev_min, &inode, path); 5099 5100 if ((fields < 10) || (fields > 11)) { 5101 continue; 5102 } 5103 if (!strncmp(path, "[stack]", 7)) { 5104 continue; 5105 } 5106 if (h2g_valid(min) && h2g_valid(max)) { 5107 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx 5108 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n", 5109 h2g(min), h2g(max), flag_r, flag_w, 5110 flag_x, flag_p, offset, dev_maj, dev_min, inode, 5111 path[0] ? " " : "", path); 5112 } 5113 } 5114 5115 free(line); 5116 fclose(fp); 5117 5118 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32) 5119 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n", 5120 (unsigned long long)ts->info->stack_limit, 5121 (unsigned long long)(ts->info->start_stack + 5122 (TARGET_PAGE_SIZE - 1)) & TARGET_PAGE_MASK, 5123 (unsigned long long)0); 5124 #endif 5125 5126 return 0; 5127 } 5128 5129 static int open_self_stat(void *cpu_env, int fd) 5130 { 5131 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 5132 abi_ulong start_stack = ts->info->start_stack; 5133 int i; 5134 5135 for (i = 0; i < 44; i++) { 5136 char buf[128]; 5137 int len; 5138 uint64_t val = 0; 5139 5140 if (i == 0) { 5141 /* pid */ 5142 val = getpid(); 5143 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 5144 } else if (i == 1) { 5145 /* app name */ 5146 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]); 5147 } else if (i == 27) { 5148 /* stack bottom */ 5149 val = start_stack; 5150 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 5151 } else { 5152 /* for the rest, there is MasterCard */ 5153 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' '); 5154 } 5155 5156 len = strlen(buf); 5157 if (write(fd, buf, len) != len) { 5158 return -1; 5159 } 5160 } 5161 5162 return 0; 5163 } 5164 5165 static int open_self_auxv(void *cpu_env, int fd) 5166 { 5167 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 5168 abi_ulong auxv = ts->info->saved_auxv; 5169 abi_ulong len = ts->info->auxv_len; 5170 char *ptr; 5171 5172 /* 5173 * Auxiliary vector is stored in target process stack. 5174 * read in whole auxv vector and copy it to file 5175 */ 5176 ptr = lock_user(VERIFY_READ, auxv, len, 0); 5177 if (ptr != NULL) { 5178 while (len > 0) { 5179 ssize_t r; 5180 r = write(fd, ptr, len); 5181 if (r <= 0) { 5182 break; 5183 } 5184 len -= r; 5185 ptr += r; 5186 } 5187 lseek(fd, 0, SEEK_SET); 5188 unlock_user(ptr, auxv, len); 5189 } 5190 5191 return 0; 5192 } 5193 5194 static int is_proc_myself(const char *filename, const char *entry) 5195 { 5196 if (!strncmp(filename, "/proc/", strlen("/proc/"))) { 5197 filename += strlen("/proc/"); 5198 if (!strncmp(filename, "self/", strlen("self/"))) { 5199 filename += strlen("self/"); 5200 } else if (*filename >= '1' && *filename <= '9') { 5201 char myself[80]; 5202 snprintf(myself, sizeof(myself), "%d/", getpid()); 5203 if (!strncmp(filename, myself, strlen(myself))) { 5204 filename += strlen(myself); 5205 } else { 5206 return 0; 5207 } 5208 } else { 5209 return 0; 5210 } 5211 if (!strcmp(filename, entry)) { 5212 return 1; 5213 } 5214 } 5215 return 0; 5216 } 5217 5218 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) 5219 static int is_proc(const char *filename, const char *entry) 5220 { 5221 return strcmp(filename, entry) == 0; 5222 } 5223 5224 static int open_net_route(void *cpu_env, int fd) 5225 { 5226 FILE *fp; 5227 char *line = NULL; 5228 size_t len = 0; 5229 ssize_t read; 5230 5231 fp = fopen("/proc/net/route", "r"); 5232 if (fp == NULL) { 5233 return -EACCES; 5234 } 5235 5236 /* read header */ 5237 5238 read = getline(&line, &len, fp); 5239 dprintf(fd, "%s", line); 5240 5241 /* read routes */ 5242 5243 while ((read = getline(&line, &len, fp)) != -1) { 5244 char iface[16]; 5245 uint32_t dest, gw, mask; 5246 unsigned int flags, refcnt, use, metric, mtu, window, irtt; 5247 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 5248 iface, &dest, &gw, &flags, &refcnt, &use, &metric, 5249 &mask, &mtu, &window, &irtt); 5250 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 5251 iface, tswap32(dest), tswap32(gw), flags, refcnt, use, 5252 metric, tswap32(mask), mtu, window, irtt); 5253 } 5254 5255 free(line); 5256 fclose(fp); 5257 5258 return 0; 5259 } 5260 #endif 5261 5262 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode) 5263 { 5264 struct fake_open { 5265 const char *filename; 5266 int (*fill)(void *cpu_env, int fd); 5267 int (*cmp)(const char *s1, const char *s2); 5268 }; 5269 const struct fake_open *fake_open; 5270 static const struct fake_open fakes[] = { 5271 { "maps", open_self_maps, is_proc_myself }, 5272 { "stat", open_self_stat, is_proc_myself }, 5273 { "auxv", open_self_auxv, is_proc_myself }, 5274 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) 5275 { "/proc/net/route", open_net_route, is_proc }, 5276 #endif 5277 { NULL, NULL, NULL } 5278 }; 5279 5280 for (fake_open = fakes; fake_open->filename; fake_open++) { 5281 if (fake_open->cmp(pathname, fake_open->filename)) { 5282 break; 5283 } 5284 } 5285 5286 if (fake_open->filename) { 5287 const char *tmpdir; 5288 char filename[PATH_MAX]; 5289 int fd, r; 5290 5291 /* create temporary file to map stat to */ 5292 tmpdir = getenv("TMPDIR"); 5293 if (!tmpdir) 5294 tmpdir = "/tmp"; 5295 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir); 5296 fd = mkstemp(filename); 5297 if (fd < 0) { 5298 return fd; 5299 } 5300 unlink(filename); 5301 5302 if ((r = fake_open->fill(cpu_env, fd))) { 5303 close(fd); 5304 return r; 5305 } 5306 lseek(fd, 0, SEEK_SET); 5307 5308 return fd; 5309 } 5310 5311 return get_errno(open(path(pathname), flags, mode)); 5312 } 5313 5314 /* do_syscall() should always have a single exit point at the end so 5315 that actions, such as logging of syscall results, can be performed. 5316 All errnos that do_syscall() returns must be -TARGET_<errcode>. */ 5317 abi_long do_syscall(void *cpu_env, int num, abi_long arg1, 5318 abi_long arg2, abi_long arg3, abi_long arg4, 5319 abi_long arg5, abi_long arg6, abi_long arg7, 5320 abi_long arg8) 5321 { 5322 CPUState *cpu = ENV_GET_CPU(cpu_env); 5323 abi_long ret; 5324 struct stat st; 5325 struct statfs stfs; 5326 void *p; 5327 5328 #ifdef DEBUG 5329 gemu_log("syscall %d", num); 5330 #endif 5331 if(do_strace) 5332 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6); 5333 5334 switch(num) { 5335 case TARGET_NR_exit: 5336 /* In old applications this may be used to implement _exit(2). 5337 However in threaded applictions it is used for thread termination, 5338 and _exit_group is used for application termination. 5339 Do thread termination if we have more then one thread. */ 5340 /* FIXME: This probably breaks if a signal arrives. We should probably 5341 be disabling signals. */ 5342 if (CPU_NEXT(first_cpu)) { 5343 TaskState *ts; 5344 5345 cpu_list_lock(); 5346 /* Remove the CPU from the list. */ 5347 QTAILQ_REMOVE(&cpus, cpu, node); 5348 cpu_list_unlock(); 5349 ts = ((CPUArchState *)cpu_env)->opaque; 5350 if (ts->child_tidptr) { 5351 put_user_u32(0, ts->child_tidptr); 5352 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX, 5353 NULL, NULL, 0); 5354 } 5355 thread_cpu = NULL; 5356 object_unref(OBJECT(ENV_GET_CPU(cpu_env))); 5357 g_free(ts); 5358 pthread_exit(NULL); 5359 } 5360 #ifdef TARGET_GPROF 5361 _mcleanup(); 5362 #endif 5363 gdb_exit(cpu_env, arg1); 5364 _exit(arg1); 5365 ret = 0; /* avoid warning */ 5366 break; 5367 case TARGET_NR_read: 5368 if (arg3 == 0) 5369 ret = 0; 5370 else { 5371 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 5372 goto efault; 5373 ret = get_errno(read(arg1, p, arg3)); 5374 unlock_user(p, arg2, ret); 5375 } 5376 break; 5377 case TARGET_NR_write: 5378 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 5379 goto efault; 5380 ret = get_errno(write(arg1, p, arg3)); 5381 unlock_user(p, arg2, 0); 5382 break; 5383 case TARGET_NR_open: 5384 if (!(p = lock_user_string(arg1))) 5385 goto efault; 5386 ret = get_errno(do_open(cpu_env, p, 5387 target_to_host_bitmask(arg2, fcntl_flags_tbl), 5388 arg3)); 5389 unlock_user(p, arg1, 0); 5390 break; 5391 #if defined(TARGET_NR_openat) && defined(__NR_openat) 5392 case TARGET_NR_openat: 5393 if (!(p = lock_user_string(arg2))) 5394 goto efault; 5395 ret = get_errno(sys_openat(arg1, 5396 path(p), 5397 target_to_host_bitmask(arg3, fcntl_flags_tbl), 5398 arg4)); 5399 unlock_user(p, arg2, 0); 5400 break; 5401 #endif 5402 case TARGET_NR_close: 5403 ret = get_errno(close(arg1)); 5404 break; 5405 case TARGET_NR_brk: 5406 ret = do_brk(arg1); 5407 break; 5408 case TARGET_NR_fork: 5409 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0)); 5410 break; 5411 #ifdef TARGET_NR_waitpid 5412 case TARGET_NR_waitpid: 5413 { 5414 int status; 5415 ret = get_errno(waitpid(arg1, &status, arg3)); 5416 if (!is_error(ret) && arg2 && ret 5417 && put_user_s32(host_to_target_waitstatus(status), arg2)) 5418 goto efault; 5419 } 5420 break; 5421 #endif 5422 #ifdef TARGET_NR_waitid 5423 case TARGET_NR_waitid: 5424 { 5425 siginfo_t info; 5426 info.si_pid = 0; 5427 ret = get_errno(waitid(arg1, arg2, &info, arg4)); 5428 if (!is_error(ret) && arg3 && info.si_pid != 0) { 5429 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0))) 5430 goto efault; 5431 host_to_target_siginfo(p, &info); 5432 unlock_user(p, arg3, sizeof(target_siginfo_t)); 5433 } 5434 } 5435 break; 5436 #endif 5437 #ifdef TARGET_NR_creat /* not on alpha */ 5438 case TARGET_NR_creat: 5439 if (!(p = lock_user_string(arg1))) 5440 goto efault; 5441 ret = get_errno(creat(p, arg2)); 5442 unlock_user(p, arg1, 0); 5443 break; 5444 #endif 5445 case TARGET_NR_link: 5446 { 5447 void * p2; 5448 p = lock_user_string(arg1); 5449 p2 = lock_user_string(arg2); 5450 if (!p || !p2) 5451 ret = -TARGET_EFAULT; 5452 else 5453 ret = get_errno(link(p, p2)); 5454 unlock_user(p2, arg2, 0); 5455 unlock_user(p, arg1, 0); 5456 } 5457 break; 5458 #if defined(TARGET_NR_linkat) 5459 case TARGET_NR_linkat: 5460 { 5461 void * p2 = NULL; 5462 if (!arg2 || !arg4) 5463 goto efault; 5464 p = lock_user_string(arg2); 5465 p2 = lock_user_string(arg4); 5466 if (!p || !p2) 5467 ret = -TARGET_EFAULT; 5468 else 5469 ret = get_errno(linkat(arg1, p, arg3, p2, arg5)); 5470 unlock_user(p, arg2, 0); 5471 unlock_user(p2, arg4, 0); 5472 } 5473 break; 5474 #endif 5475 case TARGET_NR_unlink: 5476 if (!(p = lock_user_string(arg1))) 5477 goto efault; 5478 ret = get_errno(unlink(p)); 5479 unlock_user(p, arg1, 0); 5480 break; 5481 #if defined(TARGET_NR_unlinkat) 5482 case TARGET_NR_unlinkat: 5483 if (!(p = lock_user_string(arg2))) 5484 goto efault; 5485 ret = get_errno(unlinkat(arg1, p, arg3)); 5486 unlock_user(p, arg2, 0); 5487 break; 5488 #endif 5489 case TARGET_NR_execve: 5490 { 5491 char **argp, **envp; 5492 int argc, envc; 5493 abi_ulong gp; 5494 abi_ulong guest_argp; 5495 abi_ulong guest_envp; 5496 abi_ulong addr; 5497 char **q; 5498 int total_size = 0; 5499 5500 argc = 0; 5501 guest_argp = arg2; 5502 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) { 5503 if (get_user_ual(addr, gp)) 5504 goto efault; 5505 if (!addr) 5506 break; 5507 argc++; 5508 } 5509 envc = 0; 5510 guest_envp = arg3; 5511 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) { 5512 if (get_user_ual(addr, gp)) 5513 goto efault; 5514 if (!addr) 5515 break; 5516 envc++; 5517 } 5518 5519 argp = alloca((argc + 1) * sizeof(void *)); 5520 envp = alloca((envc + 1) * sizeof(void *)); 5521 5522 for (gp = guest_argp, q = argp; gp; 5523 gp += sizeof(abi_ulong), q++) { 5524 if (get_user_ual(addr, gp)) 5525 goto execve_efault; 5526 if (!addr) 5527 break; 5528 if (!(*q = lock_user_string(addr))) 5529 goto execve_efault; 5530 total_size += strlen(*q) + 1; 5531 } 5532 *q = NULL; 5533 5534 for (gp = guest_envp, q = envp; gp; 5535 gp += sizeof(abi_ulong), q++) { 5536 if (get_user_ual(addr, gp)) 5537 goto execve_efault; 5538 if (!addr) 5539 break; 5540 if (!(*q = lock_user_string(addr))) 5541 goto execve_efault; 5542 total_size += strlen(*q) + 1; 5543 } 5544 *q = NULL; 5545 5546 /* This case will not be caught by the host's execve() if its 5547 page size is bigger than the target's. */ 5548 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) { 5549 ret = -TARGET_E2BIG; 5550 goto execve_end; 5551 } 5552 if (!(p = lock_user_string(arg1))) 5553 goto execve_efault; 5554 ret = get_errno(execve(p, argp, envp)); 5555 unlock_user(p, arg1, 0); 5556 5557 goto execve_end; 5558 5559 execve_efault: 5560 ret = -TARGET_EFAULT; 5561 5562 execve_end: 5563 for (gp = guest_argp, q = argp; *q; 5564 gp += sizeof(abi_ulong), q++) { 5565 if (get_user_ual(addr, gp) 5566 || !addr) 5567 break; 5568 unlock_user(*q, addr, 0); 5569 } 5570 for (gp = guest_envp, q = envp; *q; 5571 gp += sizeof(abi_ulong), q++) { 5572 if (get_user_ual(addr, gp) 5573 || !addr) 5574 break; 5575 unlock_user(*q, addr, 0); 5576 } 5577 } 5578 break; 5579 case TARGET_NR_chdir: 5580 if (!(p = lock_user_string(arg1))) 5581 goto efault; 5582 ret = get_errno(chdir(p)); 5583 unlock_user(p, arg1, 0); 5584 break; 5585 #ifdef TARGET_NR_time 5586 case TARGET_NR_time: 5587 { 5588 time_t host_time; 5589 ret = get_errno(time(&host_time)); 5590 if (!is_error(ret) 5591 && arg1 5592 && put_user_sal(host_time, arg1)) 5593 goto efault; 5594 } 5595 break; 5596 #endif 5597 case TARGET_NR_mknod: 5598 if (!(p = lock_user_string(arg1))) 5599 goto efault; 5600 ret = get_errno(mknod(p, arg2, arg3)); 5601 unlock_user(p, arg1, 0); 5602 break; 5603 #if defined(TARGET_NR_mknodat) 5604 case TARGET_NR_mknodat: 5605 if (!(p = lock_user_string(arg2))) 5606 goto efault; 5607 ret = get_errno(mknodat(arg1, p, arg3, arg4)); 5608 unlock_user(p, arg2, 0); 5609 break; 5610 #endif 5611 case TARGET_NR_chmod: 5612 if (!(p = lock_user_string(arg1))) 5613 goto efault; 5614 ret = get_errno(chmod(p, arg2)); 5615 unlock_user(p, arg1, 0); 5616 break; 5617 #ifdef TARGET_NR_break 5618 case TARGET_NR_break: 5619 goto unimplemented; 5620 #endif 5621 #ifdef TARGET_NR_oldstat 5622 case TARGET_NR_oldstat: 5623 goto unimplemented; 5624 #endif 5625 case TARGET_NR_lseek: 5626 ret = get_errno(lseek(arg1, arg2, arg3)); 5627 break; 5628 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA) 5629 /* Alpha specific */ 5630 case TARGET_NR_getxpid: 5631 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid(); 5632 ret = get_errno(getpid()); 5633 break; 5634 #endif 5635 #ifdef TARGET_NR_getpid 5636 case TARGET_NR_getpid: 5637 ret = get_errno(getpid()); 5638 break; 5639 #endif 5640 case TARGET_NR_mount: 5641 { 5642 /* need to look at the data field */ 5643 void *p2, *p3; 5644 p = lock_user_string(arg1); 5645 p2 = lock_user_string(arg2); 5646 p3 = lock_user_string(arg3); 5647 if (!p || !p2 || !p3) 5648 ret = -TARGET_EFAULT; 5649 else { 5650 /* FIXME - arg5 should be locked, but it isn't clear how to 5651 * do that since it's not guaranteed to be a NULL-terminated 5652 * string. 5653 */ 5654 if ( ! arg5 ) 5655 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL)); 5656 else 5657 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5))); 5658 } 5659 unlock_user(p, arg1, 0); 5660 unlock_user(p2, arg2, 0); 5661 unlock_user(p3, arg3, 0); 5662 break; 5663 } 5664 #ifdef TARGET_NR_umount 5665 case TARGET_NR_umount: 5666 if (!(p = lock_user_string(arg1))) 5667 goto efault; 5668 ret = get_errno(umount(p)); 5669 unlock_user(p, arg1, 0); 5670 break; 5671 #endif 5672 #ifdef TARGET_NR_stime /* not on alpha */ 5673 case TARGET_NR_stime: 5674 { 5675 time_t host_time; 5676 if (get_user_sal(host_time, arg1)) 5677 goto efault; 5678 ret = get_errno(stime(&host_time)); 5679 } 5680 break; 5681 #endif 5682 case TARGET_NR_ptrace: 5683 goto unimplemented; 5684 #ifdef TARGET_NR_alarm /* not on alpha */ 5685 case TARGET_NR_alarm: 5686 ret = alarm(arg1); 5687 break; 5688 #endif 5689 #ifdef TARGET_NR_oldfstat 5690 case TARGET_NR_oldfstat: 5691 goto unimplemented; 5692 #endif 5693 #ifdef TARGET_NR_pause /* not on alpha */ 5694 case TARGET_NR_pause: 5695 ret = get_errno(pause()); 5696 break; 5697 #endif 5698 #ifdef TARGET_NR_utime 5699 case TARGET_NR_utime: 5700 { 5701 struct utimbuf tbuf, *host_tbuf; 5702 struct target_utimbuf *target_tbuf; 5703 if (arg2) { 5704 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1)) 5705 goto efault; 5706 tbuf.actime = tswapal(target_tbuf->actime); 5707 tbuf.modtime = tswapal(target_tbuf->modtime); 5708 unlock_user_struct(target_tbuf, arg2, 0); 5709 host_tbuf = &tbuf; 5710 } else { 5711 host_tbuf = NULL; 5712 } 5713 if (!(p = lock_user_string(arg1))) 5714 goto efault; 5715 ret = get_errno(utime(p, host_tbuf)); 5716 unlock_user(p, arg1, 0); 5717 } 5718 break; 5719 #endif 5720 case TARGET_NR_utimes: 5721 { 5722 struct timeval *tvp, tv[2]; 5723 if (arg2) { 5724 if (copy_from_user_timeval(&tv[0], arg2) 5725 || copy_from_user_timeval(&tv[1], 5726 arg2 + sizeof(struct target_timeval))) 5727 goto efault; 5728 tvp = tv; 5729 } else { 5730 tvp = NULL; 5731 } 5732 if (!(p = lock_user_string(arg1))) 5733 goto efault; 5734 ret = get_errno(utimes(p, tvp)); 5735 unlock_user(p, arg1, 0); 5736 } 5737 break; 5738 #if defined(TARGET_NR_futimesat) 5739 case TARGET_NR_futimesat: 5740 { 5741 struct timeval *tvp, tv[2]; 5742 if (arg3) { 5743 if (copy_from_user_timeval(&tv[0], arg3) 5744 || copy_from_user_timeval(&tv[1], 5745 arg3 + sizeof(struct target_timeval))) 5746 goto efault; 5747 tvp = tv; 5748 } else { 5749 tvp = NULL; 5750 } 5751 if (!(p = lock_user_string(arg2))) 5752 goto efault; 5753 ret = get_errno(futimesat(arg1, path(p), tvp)); 5754 unlock_user(p, arg2, 0); 5755 } 5756 break; 5757 #endif 5758 #ifdef TARGET_NR_stty 5759 case TARGET_NR_stty: 5760 goto unimplemented; 5761 #endif 5762 #ifdef TARGET_NR_gtty 5763 case TARGET_NR_gtty: 5764 goto unimplemented; 5765 #endif 5766 case TARGET_NR_access: 5767 if (!(p = lock_user_string(arg1))) 5768 goto efault; 5769 ret = get_errno(access(path(p), arg2)); 5770 unlock_user(p, arg1, 0); 5771 break; 5772 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) 5773 case TARGET_NR_faccessat: 5774 if (!(p = lock_user_string(arg2))) 5775 goto efault; 5776 ret = get_errno(faccessat(arg1, p, arg3, 0)); 5777 unlock_user(p, arg2, 0); 5778 break; 5779 #endif 5780 #ifdef TARGET_NR_nice /* not on alpha */ 5781 case TARGET_NR_nice: 5782 ret = get_errno(nice(arg1)); 5783 break; 5784 #endif 5785 #ifdef TARGET_NR_ftime 5786 case TARGET_NR_ftime: 5787 goto unimplemented; 5788 #endif 5789 case TARGET_NR_sync: 5790 sync(); 5791 ret = 0; 5792 break; 5793 case TARGET_NR_kill: 5794 ret = get_errno(kill(arg1, target_to_host_signal(arg2))); 5795 break; 5796 case TARGET_NR_rename: 5797 { 5798 void *p2; 5799 p = lock_user_string(arg1); 5800 p2 = lock_user_string(arg2); 5801 if (!p || !p2) 5802 ret = -TARGET_EFAULT; 5803 else 5804 ret = get_errno(rename(p, p2)); 5805 unlock_user(p2, arg2, 0); 5806 unlock_user(p, arg1, 0); 5807 } 5808 break; 5809 #if defined(TARGET_NR_renameat) 5810 case TARGET_NR_renameat: 5811 { 5812 void *p2; 5813 p = lock_user_string(arg2); 5814 p2 = lock_user_string(arg4); 5815 if (!p || !p2) 5816 ret = -TARGET_EFAULT; 5817 else 5818 ret = get_errno(renameat(arg1, p, arg3, p2)); 5819 unlock_user(p2, arg4, 0); 5820 unlock_user(p, arg2, 0); 5821 } 5822 break; 5823 #endif 5824 case TARGET_NR_mkdir: 5825 if (!(p = lock_user_string(arg1))) 5826 goto efault; 5827 ret = get_errno(mkdir(p, arg2)); 5828 unlock_user(p, arg1, 0); 5829 break; 5830 #if defined(TARGET_NR_mkdirat) 5831 case TARGET_NR_mkdirat: 5832 if (!(p = lock_user_string(arg2))) 5833 goto efault; 5834 ret = get_errno(mkdirat(arg1, p, arg3)); 5835 unlock_user(p, arg2, 0); 5836 break; 5837 #endif 5838 case TARGET_NR_rmdir: 5839 if (!(p = lock_user_string(arg1))) 5840 goto efault; 5841 ret = get_errno(rmdir(p)); 5842 unlock_user(p, arg1, 0); 5843 break; 5844 case TARGET_NR_dup: 5845 ret = get_errno(dup(arg1)); 5846 break; 5847 case TARGET_NR_pipe: 5848 ret = do_pipe(cpu_env, arg1, 0, 0); 5849 break; 5850 #ifdef TARGET_NR_pipe2 5851 case TARGET_NR_pipe2: 5852 ret = do_pipe(cpu_env, arg1, 5853 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1); 5854 break; 5855 #endif 5856 case TARGET_NR_times: 5857 { 5858 struct target_tms *tmsp; 5859 struct tms tms; 5860 ret = get_errno(times(&tms)); 5861 if (arg1) { 5862 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0); 5863 if (!tmsp) 5864 goto efault; 5865 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime)); 5866 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime)); 5867 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime)); 5868 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime)); 5869 } 5870 if (!is_error(ret)) 5871 ret = host_to_target_clock_t(ret); 5872 } 5873 break; 5874 #ifdef TARGET_NR_prof 5875 case TARGET_NR_prof: 5876 goto unimplemented; 5877 #endif 5878 #ifdef TARGET_NR_signal 5879 case TARGET_NR_signal: 5880 goto unimplemented; 5881 #endif 5882 case TARGET_NR_acct: 5883 if (arg1 == 0) { 5884 ret = get_errno(acct(NULL)); 5885 } else { 5886 if (!(p = lock_user_string(arg1))) 5887 goto efault; 5888 ret = get_errno(acct(path(p))); 5889 unlock_user(p, arg1, 0); 5890 } 5891 break; 5892 #ifdef TARGET_NR_umount2 5893 case TARGET_NR_umount2: 5894 if (!(p = lock_user_string(arg1))) 5895 goto efault; 5896 ret = get_errno(umount2(p, arg2)); 5897 unlock_user(p, arg1, 0); 5898 break; 5899 #endif 5900 #ifdef TARGET_NR_lock 5901 case TARGET_NR_lock: 5902 goto unimplemented; 5903 #endif 5904 case TARGET_NR_ioctl: 5905 ret = do_ioctl(arg1, arg2, arg3); 5906 break; 5907 case TARGET_NR_fcntl: 5908 ret = do_fcntl(arg1, arg2, arg3); 5909 break; 5910 #ifdef TARGET_NR_mpx 5911 case TARGET_NR_mpx: 5912 goto unimplemented; 5913 #endif 5914 case TARGET_NR_setpgid: 5915 ret = get_errno(setpgid(arg1, arg2)); 5916 break; 5917 #ifdef TARGET_NR_ulimit 5918 case TARGET_NR_ulimit: 5919 goto unimplemented; 5920 #endif 5921 #ifdef TARGET_NR_oldolduname 5922 case TARGET_NR_oldolduname: 5923 goto unimplemented; 5924 #endif 5925 case TARGET_NR_umask: 5926 ret = get_errno(umask(arg1)); 5927 break; 5928 case TARGET_NR_chroot: 5929 if (!(p = lock_user_string(arg1))) 5930 goto efault; 5931 ret = get_errno(chroot(p)); 5932 unlock_user(p, arg1, 0); 5933 break; 5934 case TARGET_NR_ustat: 5935 goto unimplemented; 5936 case TARGET_NR_dup2: 5937 ret = get_errno(dup2(arg1, arg2)); 5938 break; 5939 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3) 5940 case TARGET_NR_dup3: 5941 ret = get_errno(dup3(arg1, arg2, arg3)); 5942 break; 5943 #endif 5944 #ifdef TARGET_NR_getppid /* not on alpha */ 5945 case TARGET_NR_getppid: 5946 ret = get_errno(getppid()); 5947 break; 5948 #endif 5949 case TARGET_NR_getpgrp: 5950 ret = get_errno(getpgrp()); 5951 break; 5952 case TARGET_NR_setsid: 5953 ret = get_errno(setsid()); 5954 break; 5955 #ifdef TARGET_NR_sigaction 5956 case TARGET_NR_sigaction: 5957 { 5958 #if defined(TARGET_ALPHA) 5959 struct target_sigaction act, oact, *pact = 0; 5960 struct target_old_sigaction *old_act; 5961 if (arg2) { 5962 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5963 goto efault; 5964 act._sa_handler = old_act->_sa_handler; 5965 target_siginitset(&act.sa_mask, old_act->sa_mask); 5966 act.sa_flags = old_act->sa_flags; 5967 act.sa_restorer = 0; 5968 unlock_user_struct(old_act, arg2, 0); 5969 pact = &act; 5970 } 5971 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5972 if (!is_error(ret) && arg3) { 5973 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5974 goto efault; 5975 old_act->_sa_handler = oact._sa_handler; 5976 old_act->sa_mask = oact.sa_mask.sig[0]; 5977 old_act->sa_flags = oact.sa_flags; 5978 unlock_user_struct(old_act, arg3, 1); 5979 } 5980 #elif defined(TARGET_MIPS) 5981 struct target_sigaction act, oact, *pact, *old_act; 5982 5983 if (arg2) { 5984 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 5985 goto efault; 5986 act._sa_handler = old_act->_sa_handler; 5987 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]); 5988 act.sa_flags = old_act->sa_flags; 5989 unlock_user_struct(old_act, arg2, 0); 5990 pact = &act; 5991 } else { 5992 pact = NULL; 5993 } 5994 5995 ret = get_errno(do_sigaction(arg1, pact, &oact)); 5996 5997 if (!is_error(ret) && arg3) { 5998 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 5999 goto efault; 6000 old_act->_sa_handler = oact._sa_handler; 6001 old_act->sa_flags = oact.sa_flags; 6002 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0]; 6003 old_act->sa_mask.sig[1] = 0; 6004 old_act->sa_mask.sig[2] = 0; 6005 old_act->sa_mask.sig[3] = 0; 6006 unlock_user_struct(old_act, arg3, 1); 6007 } 6008 #else 6009 struct target_old_sigaction *old_act; 6010 struct target_sigaction act, oact, *pact; 6011 if (arg2) { 6012 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 6013 goto efault; 6014 act._sa_handler = old_act->_sa_handler; 6015 target_siginitset(&act.sa_mask, old_act->sa_mask); 6016 act.sa_flags = old_act->sa_flags; 6017 act.sa_restorer = old_act->sa_restorer; 6018 unlock_user_struct(old_act, arg2, 0); 6019 pact = &act; 6020 } else { 6021 pact = NULL; 6022 } 6023 ret = get_errno(do_sigaction(arg1, pact, &oact)); 6024 if (!is_error(ret) && arg3) { 6025 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 6026 goto efault; 6027 old_act->_sa_handler = oact._sa_handler; 6028 old_act->sa_mask = oact.sa_mask.sig[0]; 6029 old_act->sa_flags = oact.sa_flags; 6030 old_act->sa_restorer = oact.sa_restorer; 6031 unlock_user_struct(old_act, arg3, 1); 6032 } 6033 #endif 6034 } 6035 break; 6036 #endif 6037 case TARGET_NR_rt_sigaction: 6038 { 6039 #if defined(TARGET_ALPHA) 6040 struct target_sigaction act, oact, *pact = 0; 6041 struct target_rt_sigaction *rt_act; 6042 /* ??? arg4 == sizeof(sigset_t). */ 6043 if (arg2) { 6044 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1)) 6045 goto efault; 6046 act._sa_handler = rt_act->_sa_handler; 6047 act.sa_mask = rt_act->sa_mask; 6048 act.sa_flags = rt_act->sa_flags; 6049 act.sa_restorer = arg5; 6050 unlock_user_struct(rt_act, arg2, 0); 6051 pact = &act; 6052 } 6053 ret = get_errno(do_sigaction(arg1, pact, &oact)); 6054 if (!is_error(ret) && arg3) { 6055 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0)) 6056 goto efault; 6057 rt_act->_sa_handler = oact._sa_handler; 6058 rt_act->sa_mask = oact.sa_mask; 6059 rt_act->sa_flags = oact.sa_flags; 6060 unlock_user_struct(rt_act, arg3, 1); 6061 } 6062 #else 6063 struct target_sigaction *act; 6064 struct target_sigaction *oact; 6065 6066 if (arg2) { 6067 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) 6068 goto efault; 6069 } else 6070 act = NULL; 6071 if (arg3) { 6072 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) { 6073 ret = -TARGET_EFAULT; 6074 goto rt_sigaction_fail; 6075 } 6076 } else 6077 oact = NULL; 6078 ret = get_errno(do_sigaction(arg1, act, oact)); 6079 rt_sigaction_fail: 6080 if (act) 6081 unlock_user_struct(act, arg2, 0); 6082 if (oact) 6083 unlock_user_struct(oact, arg3, 1); 6084 #endif 6085 } 6086 break; 6087 #ifdef TARGET_NR_sgetmask /* not on alpha */ 6088 case TARGET_NR_sgetmask: 6089 { 6090 sigset_t cur_set; 6091 abi_ulong target_set; 6092 sigprocmask(0, NULL, &cur_set); 6093 host_to_target_old_sigset(&target_set, &cur_set); 6094 ret = target_set; 6095 } 6096 break; 6097 #endif 6098 #ifdef TARGET_NR_ssetmask /* not on alpha */ 6099 case TARGET_NR_ssetmask: 6100 { 6101 sigset_t set, oset, cur_set; 6102 abi_ulong target_set = arg1; 6103 sigprocmask(0, NULL, &cur_set); 6104 target_to_host_old_sigset(&set, &target_set); 6105 sigorset(&set, &set, &cur_set); 6106 sigprocmask(SIG_SETMASK, &set, &oset); 6107 host_to_target_old_sigset(&target_set, &oset); 6108 ret = target_set; 6109 } 6110 break; 6111 #endif 6112 #ifdef TARGET_NR_sigprocmask 6113 case TARGET_NR_sigprocmask: 6114 { 6115 #if defined(TARGET_ALPHA) 6116 sigset_t set, oldset; 6117 abi_ulong mask; 6118 int how; 6119 6120 switch (arg1) { 6121 case TARGET_SIG_BLOCK: 6122 how = SIG_BLOCK; 6123 break; 6124 case TARGET_SIG_UNBLOCK: 6125 how = SIG_UNBLOCK; 6126 break; 6127 case TARGET_SIG_SETMASK: 6128 how = SIG_SETMASK; 6129 break; 6130 default: 6131 ret = -TARGET_EINVAL; 6132 goto fail; 6133 } 6134 mask = arg2; 6135 target_to_host_old_sigset(&set, &mask); 6136 6137 ret = get_errno(sigprocmask(how, &set, &oldset)); 6138 if (!is_error(ret)) { 6139 host_to_target_old_sigset(&mask, &oldset); 6140 ret = mask; 6141 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */ 6142 } 6143 #else 6144 sigset_t set, oldset, *set_ptr; 6145 int how; 6146 6147 if (arg2) { 6148 switch (arg1) { 6149 case TARGET_SIG_BLOCK: 6150 how = SIG_BLOCK; 6151 break; 6152 case TARGET_SIG_UNBLOCK: 6153 how = SIG_UNBLOCK; 6154 break; 6155 case TARGET_SIG_SETMASK: 6156 how = SIG_SETMASK; 6157 break; 6158 default: 6159 ret = -TARGET_EINVAL; 6160 goto fail; 6161 } 6162 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 6163 goto efault; 6164 target_to_host_old_sigset(&set, p); 6165 unlock_user(p, arg2, 0); 6166 set_ptr = &set; 6167 } else { 6168 how = 0; 6169 set_ptr = NULL; 6170 } 6171 ret = get_errno(sigprocmask(how, set_ptr, &oldset)); 6172 if (!is_error(ret) && arg3) { 6173 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 6174 goto efault; 6175 host_to_target_old_sigset(p, &oldset); 6176 unlock_user(p, arg3, sizeof(target_sigset_t)); 6177 } 6178 #endif 6179 } 6180 break; 6181 #endif 6182 case TARGET_NR_rt_sigprocmask: 6183 { 6184 int how = arg1; 6185 sigset_t set, oldset, *set_ptr; 6186 6187 if (arg2) { 6188 switch(how) { 6189 case TARGET_SIG_BLOCK: 6190 how = SIG_BLOCK; 6191 break; 6192 case TARGET_SIG_UNBLOCK: 6193 how = SIG_UNBLOCK; 6194 break; 6195 case TARGET_SIG_SETMASK: 6196 how = SIG_SETMASK; 6197 break; 6198 default: 6199 ret = -TARGET_EINVAL; 6200 goto fail; 6201 } 6202 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 6203 goto efault; 6204 target_to_host_sigset(&set, p); 6205 unlock_user(p, arg2, 0); 6206 set_ptr = &set; 6207 } else { 6208 how = 0; 6209 set_ptr = NULL; 6210 } 6211 ret = get_errno(sigprocmask(how, set_ptr, &oldset)); 6212 if (!is_error(ret) && arg3) { 6213 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 6214 goto efault; 6215 host_to_target_sigset(p, &oldset); 6216 unlock_user(p, arg3, sizeof(target_sigset_t)); 6217 } 6218 } 6219 break; 6220 #ifdef TARGET_NR_sigpending 6221 case TARGET_NR_sigpending: 6222 { 6223 sigset_t set; 6224 ret = get_errno(sigpending(&set)); 6225 if (!is_error(ret)) { 6226 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 6227 goto efault; 6228 host_to_target_old_sigset(p, &set); 6229 unlock_user(p, arg1, sizeof(target_sigset_t)); 6230 } 6231 } 6232 break; 6233 #endif 6234 case TARGET_NR_rt_sigpending: 6235 { 6236 sigset_t set; 6237 ret = get_errno(sigpending(&set)); 6238 if (!is_error(ret)) { 6239 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 6240 goto efault; 6241 host_to_target_sigset(p, &set); 6242 unlock_user(p, arg1, sizeof(target_sigset_t)); 6243 } 6244 } 6245 break; 6246 #ifdef TARGET_NR_sigsuspend 6247 case TARGET_NR_sigsuspend: 6248 { 6249 sigset_t set; 6250 #if defined(TARGET_ALPHA) 6251 abi_ulong mask = arg1; 6252 target_to_host_old_sigset(&set, &mask); 6253 #else 6254 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6255 goto efault; 6256 target_to_host_old_sigset(&set, p); 6257 unlock_user(p, arg1, 0); 6258 #endif 6259 ret = get_errno(sigsuspend(&set)); 6260 } 6261 break; 6262 #endif 6263 case TARGET_NR_rt_sigsuspend: 6264 { 6265 sigset_t set; 6266 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6267 goto efault; 6268 target_to_host_sigset(&set, p); 6269 unlock_user(p, arg1, 0); 6270 ret = get_errno(sigsuspend(&set)); 6271 } 6272 break; 6273 case TARGET_NR_rt_sigtimedwait: 6274 { 6275 sigset_t set; 6276 struct timespec uts, *puts; 6277 siginfo_t uinfo; 6278 6279 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6280 goto efault; 6281 target_to_host_sigset(&set, p); 6282 unlock_user(p, arg1, 0); 6283 if (arg3) { 6284 puts = &uts; 6285 target_to_host_timespec(puts, arg3); 6286 } else { 6287 puts = NULL; 6288 } 6289 ret = get_errno(sigtimedwait(&set, &uinfo, puts)); 6290 if (!is_error(ret) && arg2) { 6291 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0))) 6292 goto efault; 6293 host_to_target_siginfo(p, &uinfo); 6294 unlock_user(p, arg2, sizeof(target_siginfo_t)); 6295 } 6296 } 6297 break; 6298 case TARGET_NR_rt_sigqueueinfo: 6299 { 6300 siginfo_t uinfo; 6301 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1))) 6302 goto efault; 6303 target_to_host_siginfo(&uinfo, p); 6304 unlock_user(p, arg1, 0); 6305 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo)); 6306 } 6307 break; 6308 #ifdef TARGET_NR_sigreturn 6309 case TARGET_NR_sigreturn: 6310 /* NOTE: ret is eax, so not transcoding must be done */ 6311 ret = do_sigreturn(cpu_env); 6312 break; 6313 #endif 6314 case TARGET_NR_rt_sigreturn: 6315 /* NOTE: ret is eax, so not transcoding must be done */ 6316 ret = do_rt_sigreturn(cpu_env); 6317 break; 6318 case TARGET_NR_sethostname: 6319 if (!(p = lock_user_string(arg1))) 6320 goto efault; 6321 ret = get_errno(sethostname(p, arg2)); 6322 unlock_user(p, arg1, 0); 6323 break; 6324 case TARGET_NR_setrlimit: 6325 { 6326 int resource = target_to_host_resource(arg1); 6327 struct target_rlimit *target_rlim; 6328 struct rlimit rlim; 6329 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1)) 6330 goto efault; 6331 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur); 6332 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max); 6333 unlock_user_struct(target_rlim, arg2, 0); 6334 ret = get_errno(setrlimit(resource, &rlim)); 6335 } 6336 break; 6337 case TARGET_NR_getrlimit: 6338 { 6339 int resource = target_to_host_resource(arg1); 6340 struct target_rlimit *target_rlim; 6341 struct rlimit rlim; 6342 6343 ret = get_errno(getrlimit(resource, &rlim)); 6344 if (!is_error(ret)) { 6345 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 6346 goto efault; 6347 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 6348 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 6349 unlock_user_struct(target_rlim, arg2, 1); 6350 } 6351 } 6352 break; 6353 case TARGET_NR_getrusage: 6354 { 6355 struct rusage rusage; 6356 ret = get_errno(getrusage(arg1, &rusage)); 6357 if (!is_error(ret)) { 6358 host_to_target_rusage(arg2, &rusage); 6359 } 6360 } 6361 break; 6362 case TARGET_NR_gettimeofday: 6363 { 6364 struct timeval tv; 6365 ret = get_errno(gettimeofday(&tv, NULL)); 6366 if (!is_error(ret)) { 6367 if (copy_to_user_timeval(arg1, &tv)) 6368 goto efault; 6369 } 6370 } 6371 break; 6372 case TARGET_NR_settimeofday: 6373 { 6374 struct timeval tv; 6375 if (copy_from_user_timeval(&tv, arg1)) 6376 goto efault; 6377 ret = get_errno(settimeofday(&tv, NULL)); 6378 } 6379 break; 6380 #if defined(TARGET_NR_select) 6381 case TARGET_NR_select: 6382 #if defined(TARGET_S390X) || defined(TARGET_ALPHA) 6383 ret = do_select(arg1, arg2, arg3, arg4, arg5); 6384 #else 6385 { 6386 struct target_sel_arg_struct *sel; 6387 abi_ulong inp, outp, exp, tvp; 6388 long nsel; 6389 6390 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) 6391 goto efault; 6392 nsel = tswapal(sel->n); 6393 inp = tswapal(sel->inp); 6394 outp = tswapal(sel->outp); 6395 exp = tswapal(sel->exp); 6396 tvp = tswapal(sel->tvp); 6397 unlock_user_struct(sel, arg1, 0); 6398 ret = do_select(nsel, inp, outp, exp, tvp); 6399 } 6400 #endif 6401 break; 6402 #endif 6403 #ifdef TARGET_NR_pselect6 6404 case TARGET_NR_pselect6: 6405 { 6406 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr; 6407 fd_set rfds, wfds, efds; 6408 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 6409 struct timespec ts, *ts_ptr; 6410 6411 /* 6412 * The 6th arg is actually two args smashed together, 6413 * so we cannot use the C library. 6414 */ 6415 sigset_t set; 6416 struct { 6417 sigset_t *set; 6418 size_t size; 6419 } sig, *sig_ptr; 6420 6421 abi_ulong arg_sigset, arg_sigsize, *arg7; 6422 target_sigset_t *target_sigset; 6423 6424 n = arg1; 6425 rfd_addr = arg2; 6426 wfd_addr = arg3; 6427 efd_addr = arg4; 6428 ts_addr = arg5; 6429 6430 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 6431 if (ret) { 6432 goto fail; 6433 } 6434 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 6435 if (ret) { 6436 goto fail; 6437 } 6438 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 6439 if (ret) { 6440 goto fail; 6441 } 6442 6443 /* 6444 * This takes a timespec, and not a timeval, so we cannot 6445 * use the do_select() helper ... 6446 */ 6447 if (ts_addr) { 6448 if (target_to_host_timespec(&ts, ts_addr)) { 6449 goto efault; 6450 } 6451 ts_ptr = &ts; 6452 } else { 6453 ts_ptr = NULL; 6454 } 6455 6456 /* Extract the two packed args for the sigset */ 6457 if (arg6) { 6458 sig_ptr = &sig; 6459 sig.size = _NSIG / 8; 6460 6461 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1); 6462 if (!arg7) { 6463 goto efault; 6464 } 6465 arg_sigset = tswapal(arg7[0]); 6466 arg_sigsize = tswapal(arg7[1]); 6467 unlock_user(arg7, arg6, 0); 6468 6469 if (arg_sigset) { 6470 sig.set = &set; 6471 if (arg_sigsize != sizeof(*target_sigset)) { 6472 /* Like the kernel, we enforce correct size sigsets */ 6473 ret = -TARGET_EINVAL; 6474 goto fail; 6475 } 6476 target_sigset = lock_user(VERIFY_READ, arg_sigset, 6477 sizeof(*target_sigset), 1); 6478 if (!target_sigset) { 6479 goto efault; 6480 } 6481 target_to_host_sigset(&set, target_sigset); 6482 unlock_user(target_sigset, arg_sigset, 0); 6483 } else { 6484 sig.set = NULL; 6485 } 6486 } else { 6487 sig_ptr = NULL; 6488 } 6489 6490 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 6491 ts_ptr, sig_ptr)); 6492 6493 if (!is_error(ret)) { 6494 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 6495 goto efault; 6496 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 6497 goto efault; 6498 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 6499 goto efault; 6500 6501 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) 6502 goto efault; 6503 } 6504 } 6505 break; 6506 #endif 6507 case TARGET_NR_symlink: 6508 { 6509 void *p2; 6510 p = lock_user_string(arg1); 6511 p2 = lock_user_string(arg2); 6512 if (!p || !p2) 6513 ret = -TARGET_EFAULT; 6514 else 6515 ret = get_errno(symlink(p, p2)); 6516 unlock_user(p2, arg2, 0); 6517 unlock_user(p, arg1, 0); 6518 } 6519 break; 6520 #if defined(TARGET_NR_symlinkat) 6521 case TARGET_NR_symlinkat: 6522 { 6523 void *p2; 6524 p = lock_user_string(arg1); 6525 p2 = lock_user_string(arg3); 6526 if (!p || !p2) 6527 ret = -TARGET_EFAULT; 6528 else 6529 ret = get_errno(symlinkat(p, arg2, p2)); 6530 unlock_user(p2, arg3, 0); 6531 unlock_user(p, arg1, 0); 6532 } 6533 break; 6534 #endif 6535 #ifdef TARGET_NR_oldlstat 6536 case TARGET_NR_oldlstat: 6537 goto unimplemented; 6538 #endif 6539 case TARGET_NR_readlink: 6540 { 6541 void *p2; 6542 p = lock_user_string(arg1); 6543 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0); 6544 if (!p || !p2) { 6545 ret = -TARGET_EFAULT; 6546 } else if (is_proc_myself((const char *)p, "exe")) { 6547 char real[PATH_MAX], *temp; 6548 temp = realpath(exec_path, real); 6549 ret = temp == NULL ? get_errno(-1) : strlen(real) ; 6550 snprintf((char *)p2, arg3, "%s", real); 6551 } else { 6552 ret = get_errno(readlink(path(p), p2, arg3)); 6553 } 6554 unlock_user(p2, arg2, ret); 6555 unlock_user(p, arg1, 0); 6556 } 6557 break; 6558 #if defined(TARGET_NR_readlinkat) 6559 case TARGET_NR_readlinkat: 6560 { 6561 void *p2; 6562 p = lock_user_string(arg2); 6563 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0); 6564 if (!p || !p2) { 6565 ret = -TARGET_EFAULT; 6566 } else if (is_proc_myself((const char *)p, "exe")) { 6567 char real[PATH_MAX], *temp; 6568 temp = realpath(exec_path, real); 6569 ret = temp == NULL ? get_errno(-1) : strlen(real) ; 6570 snprintf((char *)p2, arg4, "%s", real); 6571 } else { 6572 ret = get_errno(readlinkat(arg1, path(p), p2, arg4)); 6573 } 6574 unlock_user(p2, arg3, ret); 6575 unlock_user(p, arg2, 0); 6576 } 6577 break; 6578 #endif 6579 #ifdef TARGET_NR_uselib 6580 case TARGET_NR_uselib: 6581 goto unimplemented; 6582 #endif 6583 #ifdef TARGET_NR_swapon 6584 case TARGET_NR_swapon: 6585 if (!(p = lock_user_string(arg1))) 6586 goto efault; 6587 ret = get_errno(swapon(p, arg2)); 6588 unlock_user(p, arg1, 0); 6589 break; 6590 #endif 6591 case TARGET_NR_reboot: 6592 if (arg3 == LINUX_REBOOT_CMD_RESTART2) { 6593 /* arg4 must be ignored in all other cases */ 6594 p = lock_user_string(arg4); 6595 if (!p) { 6596 goto efault; 6597 } 6598 ret = get_errno(reboot(arg1, arg2, arg3, p)); 6599 unlock_user(p, arg4, 0); 6600 } else { 6601 ret = get_errno(reboot(arg1, arg2, arg3, NULL)); 6602 } 6603 break; 6604 #ifdef TARGET_NR_readdir 6605 case TARGET_NR_readdir: 6606 goto unimplemented; 6607 #endif 6608 #ifdef TARGET_NR_mmap 6609 case TARGET_NR_mmap: 6610 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 6611 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \ 6612 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \ 6613 || defined(TARGET_S390X) 6614 { 6615 abi_ulong *v; 6616 abi_ulong v1, v2, v3, v4, v5, v6; 6617 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1))) 6618 goto efault; 6619 v1 = tswapal(v[0]); 6620 v2 = tswapal(v[1]); 6621 v3 = tswapal(v[2]); 6622 v4 = tswapal(v[3]); 6623 v5 = tswapal(v[4]); 6624 v6 = tswapal(v[5]); 6625 unlock_user(v, arg1, 0); 6626 ret = get_errno(target_mmap(v1, v2, v3, 6627 target_to_host_bitmask(v4, mmap_flags_tbl), 6628 v5, v6)); 6629 } 6630 #else 6631 ret = get_errno(target_mmap(arg1, arg2, arg3, 6632 target_to_host_bitmask(arg4, mmap_flags_tbl), 6633 arg5, 6634 arg6)); 6635 #endif 6636 break; 6637 #endif 6638 #ifdef TARGET_NR_mmap2 6639 case TARGET_NR_mmap2: 6640 #ifndef MMAP_SHIFT 6641 #define MMAP_SHIFT 12 6642 #endif 6643 ret = get_errno(target_mmap(arg1, arg2, arg3, 6644 target_to_host_bitmask(arg4, mmap_flags_tbl), 6645 arg5, 6646 arg6 << MMAP_SHIFT)); 6647 break; 6648 #endif 6649 case TARGET_NR_munmap: 6650 ret = get_errno(target_munmap(arg1, arg2)); 6651 break; 6652 case TARGET_NR_mprotect: 6653 { 6654 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 6655 /* Special hack to detect libc making the stack executable. */ 6656 if ((arg3 & PROT_GROWSDOWN) 6657 && arg1 >= ts->info->stack_limit 6658 && arg1 <= ts->info->start_stack) { 6659 arg3 &= ~PROT_GROWSDOWN; 6660 arg2 = arg2 + arg1 - ts->info->stack_limit; 6661 arg1 = ts->info->stack_limit; 6662 } 6663 } 6664 ret = get_errno(target_mprotect(arg1, arg2, arg3)); 6665 break; 6666 #ifdef TARGET_NR_mremap 6667 case TARGET_NR_mremap: 6668 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5)); 6669 break; 6670 #endif 6671 /* ??? msync/mlock/munlock are broken for softmmu. */ 6672 #ifdef TARGET_NR_msync 6673 case TARGET_NR_msync: 6674 ret = get_errno(msync(g2h(arg1), arg2, arg3)); 6675 break; 6676 #endif 6677 #ifdef TARGET_NR_mlock 6678 case TARGET_NR_mlock: 6679 ret = get_errno(mlock(g2h(arg1), arg2)); 6680 break; 6681 #endif 6682 #ifdef TARGET_NR_munlock 6683 case TARGET_NR_munlock: 6684 ret = get_errno(munlock(g2h(arg1), arg2)); 6685 break; 6686 #endif 6687 #ifdef TARGET_NR_mlockall 6688 case TARGET_NR_mlockall: 6689 ret = get_errno(mlockall(arg1)); 6690 break; 6691 #endif 6692 #ifdef TARGET_NR_munlockall 6693 case TARGET_NR_munlockall: 6694 ret = get_errno(munlockall()); 6695 break; 6696 #endif 6697 case TARGET_NR_truncate: 6698 if (!(p = lock_user_string(arg1))) 6699 goto efault; 6700 ret = get_errno(truncate(p, arg2)); 6701 unlock_user(p, arg1, 0); 6702 break; 6703 case TARGET_NR_ftruncate: 6704 ret = get_errno(ftruncate(arg1, arg2)); 6705 break; 6706 case TARGET_NR_fchmod: 6707 ret = get_errno(fchmod(arg1, arg2)); 6708 break; 6709 #if defined(TARGET_NR_fchmodat) 6710 case TARGET_NR_fchmodat: 6711 if (!(p = lock_user_string(arg2))) 6712 goto efault; 6713 ret = get_errno(fchmodat(arg1, p, arg3, 0)); 6714 unlock_user(p, arg2, 0); 6715 break; 6716 #endif 6717 case TARGET_NR_getpriority: 6718 /* Note that negative values are valid for getpriority, so we must 6719 differentiate based on errno settings. */ 6720 errno = 0; 6721 ret = getpriority(arg1, arg2); 6722 if (ret == -1 && errno != 0) { 6723 ret = -host_to_target_errno(errno); 6724 break; 6725 } 6726 #ifdef TARGET_ALPHA 6727 /* Return value is the unbiased priority. Signal no error. */ 6728 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; 6729 #else 6730 /* Return value is a biased priority to avoid negative numbers. */ 6731 ret = 20 - ret; 6732 #endif 6733 break; 6734 case TARGET_NR_setpriority: 6735 ret = get_errno(setpriority(arg1, arg2, arg3)); 6736 break; 6737 #ifdef TARGET_NR_profil 6738 case TARGET_NR_profil: 6739 goto unimplemented; 6740 #endif 6741 case TARGET_NR_statfs: 6742 if (!(p = lock_user_string(arg1))) 6743 goto efault; 6744 ret = get_errno(statfs(path(p), &stfs)); 6745 unlock_user(p, arg1, 0); 6746 convert_statfs: 6747 if (!is_error(ret)) { 6748 struct target_statfs *target_stfs; 6749 6750 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0)) 6751 goto efault; 6752 __put_user(stfs.f_type, &target_stfs->f_type); 6753 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 6754 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 6755 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 6756 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 6757 __put_user(stfs.f_files, &target_stfs->f_files); 6758 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 6759 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 6760 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 6761 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 6762 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 6763 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 6764 unlock_user_struct(target_stfs, arg2, 1); 6765 } 6766 break; 6767 case TARGET_NR_fstatfs: 6768 ret = get_errno(fstatfs(arg1, &stfs)); 6769 goto convert_statfs; 6770 #ifdef TARGET_NR_statfs64 6771 case TARGET_NR_statfs64: 6772 if (!(p = lock_user_string(arg1))) 6773 goto efault; 6774 ret = get_errno(statfs(path(p), &stfs)); 6775 unlock_user(p, arg1, 0); 6776 convert_statfs64: 6777 if (!is_error(ret)) { 6778 struct target_statfs64 *target_stfs; 6779 6780 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0)) 6781 goto efault; 6782 __put_user(stfs.f_type, &target_stfs->f_type); 6783 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 6784 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 6785 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 6786 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 6787 __put_user(stfs.f_files, &target_stfs->f_files); 6788 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 6789 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 6790 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 6791 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 6792 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 6793 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 6794 unlock_user_struct(target_stfs, arg3, 1); 6795 } 6796 break; 6797 case TARGET_NR_fstatfs64: 6798 ret = get_errno(fstatfs(arg1, &stfs)); 6799 goto convert_statfs64; 6800 #endif 6801 #ifdef TARGET_NR_ioperm 6802 case TARGET_NR_ioperm: 6803 goto unimplemented; 6804 #endif 6805 #ifdef TARGET_NR_socketcall 6806 case TARGET_NR_socketcall: 6807 ret = do_socketcall(arg1, arg2); 6808 break; 6809 #endif 6810 #ifdef TARGET_NR_accept 6811 case TARGET_NR_accept: 6812 ret = do_accept4(arg1, arg2, arg3, 0); 6813 break; 6814 #endif 6815 #ifdef TARGET_NR_accept4 6816 case TARGET_NR_accept4: 6817 #ifdef CONFIG_ACCEPT4 6818 ret = do_accept4(arg1, arg2, arg3, arg4); 6819 #else 6820 goto unimplemented; 6821 #endif 6822 break; 6823 #endif 6824 #ifdef TARGET_NR_bind 6825 case TARGET_NR_bind: 6826 ret = do_bind(arg1, arg2, arg3); 6827 break; 6828 #endif 6829 #ifdef TARGET_NR_connect 6830 case TARGET_NR_connect: 6831 ret = do_connect(arg1, arg2, arg3); 6832 break; 6833 #endif 6834 #ifdef TARGET_NR_getpeername 6835 case TARGET_NR_getpeername: 6836 ret = do_getpeername(arg1, arg2, arg3); 6837 break; 6838 #endif 6839 #ifdef TARGET_NR_getsockname 6840 case TARGET_NR_getsockname: 6841 ret = do_getsockname(arg1, arg2, arg3); 6842 break; 6843 #endif 6844 #ifdef TARGET_NR_getsockopt 6845 case TARGET_NR_getsockopt: 6846 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5); 6847 break; 6848 #endif 6849 #ifdef TARGET_NR_listen 6850 case TARGET_NR_listen: 6851 ret = get_errno(listen(arg1, arg2)); 6852 break; 6853 #endif 6854 #ifdef TARGET_NR_recv 6855 case TARGET_NR_recv: 6856 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0); 6857 break; 6858 #endif 6859 #ifdef TARGET_NR_recvfrom 6860 case TARGET_NR_recvfrom: 6861 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6); 6862 break; 6863 #endif 6864 #ifdef TARGET_NR_recvmsg 6865 case TARGET_NR_recvmsg: 6866 ret = do_sendrecvmsg(arg1, arg2, arg3, 0); 6867 break; 6868 #endif 6869 #ifdef TARGET_NR_send 6870 case TARGET_NR_send: 6871 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0); 6872 break; 6873 #endif 6874 #ifdef TARGET_NR_sendmsg 6875 case TARGET_NR_sendmsg: 6876 ret = do_sendrecvmsg(arg1, arg2, arg3, 1); 6877 break; 6878 #endif 6879 #ifdef TARGET_NR_sendto 6880 case TARGET_NR_sendto: 6881 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6); 6882 break; 6883 #endif 6884 #ifdef TARGET_NR_shutdown 6885 case TARGET_NR_shutdown: 6886 ret = get_errno(shutdown(arg1, arg2)); 6887 break; 6888 #endif 6889 #ifdef TARGET_NR_socket 6890 case TARGET_NR_socket: 6891 ret = do_socket(arg1, arg2, arg3); 6892 break; 6893 #endif 6894 #ifdef TARGET_NR_socketpair 6895 case TARGET_NR_socketpair: 6896 ret = do_socketpair(arg1, arg2, arg3, arg4); 6897 break; 6898 #endif 6899 #ifdef TARGET_NR_setsockopt 6900 case TARGET_NR_setsockopt: 6901 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5); 6902 break; 6903 #endif 6904 6905 case TARGET_NR_syslog: 6906 if (!(p = lock_user_string(arg2))) 6907 goto efault; 6908 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3)); 6909 unlock_user(p, arg2, 0); 6910 break; 6911 6912 case TARGET_NR_setitimer: 6913 { 6914 struct itimerval value, ovalue, *pvalue; 6915 6916 if (arg2) { 6917 pvalue = &value; 6918 if (copy_from_user_timeval(&pvalue->it_interval, arg2) 6919 || copy_from_user_timeval(&pvalue->it_value, 6920 arg2 + sizeof(struct target_timeval))) 6921 goto efault; 6922 } else { 6923 pvalue = NULL; 6924 } 6925 ret = get_errno(setitimer(arg1, pvalue, &ovalue)); 6926 if (!is_error(ret) && arg3) { 6927 if (copy_to_user_timeval(arg3, 6928 &ovalue.it_interval) 6929 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval), 6930 &ovalue.it_value)) 6931 goto efault; 6932 } 6933 } 6934 break; 6935 case TARGET_NR_getitimer: 6936 { 6937 struct itimerval value; 6938 6939 ret = get_errno(getitimer(arg1, &value)); 6940 if (!is_error(ret) && arg2) { 6941 if (copy_to_user_timeval(arg2, 6942 &value.it_interval) 6943 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval), 6944 &value.it_value)) 6945 goto efault; 6946 } 6947 } 6948 break; 6949 case TARGET_NR_stat: 6950 if (!(p = lock_user_string(arg1))) 6951 goto efault; 6952 ret = get_errno(stat(path(p), &st)); 6953 unlock_user(p, arg1, 0); 6954 goto do_stat; 6955 case TARGET_NR_lstat: 6956 if (!(p = lock_user_string(arg1))) 6957 goto efault; 6958 ret = get_errno(lstat(path(p), &st)); 6959 unlock_user(p, arg1, 0); 6960 goto do_stat; 6961 case TARGET_NR_fstat: 6962 { 6963 ret = get_errno(fstat(arg1, &st)); 6964 do_stat: 6965 if (!is_error(ret)) { 6966 struct target_stat *target_st; 6967 6968 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0)) 6969 goto efault; 6970 memset(target_st, 0, sizeof(*target_st)); 6971 __put_user(st.st_dev, &target_st->st_dev); 6972 __put_user(st.st_ino, &target_st->st_ino); 6973 __put_user(st.st_mode, &target_st->st_mode); 6974 __put_user(st.st_uid, &target_st->st_uid); 6975 __put_user(st.st_gid, &target_st->st_gid); 6976 __put_user(st.st_nlink, &target_st->st_nlink); 6977 __put_user(st.st_rdev, &target_st->st_rdev); 6978 __put_user(st.st_size, &target_st->st_size); 6979 __put_user(st.st_blksize, &target_st->st_blksize); 6980 __put_user(st.st_blocks, &target_st->st_blocks); 6981 __put_user(st.st_atime, &target_st->target_st_atime); 6982 __put_user(st.st_mtime, &target_st->target_st_mtime); 6983 __put_user(st.st_ctime, &target_st->target_st_ctime); 6984 unlock_user_struct(target_st, arg2, 1); 6985 } 6986 } 6987 break; 6988 #ifdef TARGET_NR_olduname 6989 case TARGET_NR_olduname: 6990 goto unimplemented; 6991 #endif 6992 #ifdef TARGET_NR_iopl 6993 case TARGET_NR_iopl: 6994 goto unimplemented; 6995 #endif 6996 case TARGET_NR_vhangup: 6997 ret = get_errno(vhangup()); 6998 break; 6999 #ifdef TARGET_NR_idle 7000 case TARGET_NR_idle: 7001 goto unimplemented; 7002 #endif 7003 #ifdef TARGET_NR_syscall 7004 case TARGET_NR_syscall: 7005 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5, 7006 arg6, arg7, arg8, 0); 7007 break; 7008 #endif 7009 case TARGET_NR_wait4: 7010 { 7011 int status; 7012 abi_long status_ptr = arg2; 7013 struct rusage rusage, *rusage_ptr; 7014 abi_ulong target_rusage = arg4; 7015 if (target_rusage) 7016 rusage_ptr = &rusage; 7017 else 7018 rusage_ptr = NULL; 7019 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr)); 7020 if (!is_error(ret)) { 7021 if (status_ptr && ret) { 7022 status = host_to_target_waitstatus(status); 7023 if (put_user_s32(status, status_ptr)) 7024 goto efault; 7025 } 7026 if (target_rusage) 7027 host_to_target_rusage(target_rusage, &rusage); 7028 } 7029 } 7030 break; 7031 #ifdef TARGET_NR_swapoff 7032 case TARGET_NR_swapoff: 7033 if (!(p = lock_user_string(arg1))) 7034 goto efault; 7035 ret = get_errno(swapoff(p)); 7036 unlock_user(p, arg1, 0); 7037 break; 7038 #endif 7039 case TARGET_NR_sysinfo: 7040 { 7041 struct target_sysinfo *target_value; 7042 struct sysinfo value; 7043 ret = get_errno(sysinfo(&value)); 7044 if (!is_error(ret) && arg1) 7045 { 7046 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0)) 7047 goto efault; 7048 __put_user(value.uptime, &target_value->uptime); 7049 __put_user(value.loads[0], &target_value->loads[0]); 7050 __put_user(value.loads[1], &target_value->loads[1]); 7051 __put_user(value.loads[2], &target_value->loads[2]); 7052 __put_user(value.totalram, &target_value->totalram); 7053 __put_user(value.freeram, &target_value->freeram); 7054 __put_user(value.sharedram, &target_value->sharedram); 7055 __put_user(value.bufferram, &target_value->bufferram); 7056 __put_user(value.totalswap, &target_value->totalswap); 7057 __put_user(value.freeswap, &target_value->freeswap); 7058 __put_user(value.procs, &target_value->procs); 7059 __put_user(value.totalhigh, &target_value->totalhigh); 7060 __put_user(value.freehigh, &target_value->freehigh); 7061 __put_user(value.mem_unit, &target_value->mem_unit); 7062 unlock_user_struct(target_value, arg1, 1); 7063 } 7064 } 7065 break; 7066 #ifdef TARGET_NR_ipc 7067 case TARGET_NR_ipc: 7068 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6); 7069 break; 7070 #endif 7071 #ifdef TARGET_NR_semget 7072 case TARGET_NR_semget: 7073 ret = get_errno(semget(arg1, arg2, arg3)); 7074 break; 7075 #endif 7076 #ifdef TARGET_NR_semop 7077 case TARGET_NR_semop: 7078 ret = do_semop(arg1, arg2, arg3); 7079 break; 7080 #endif 7081 #ifdef TARGET_NR_semctl 7082 case TARGET_NR_semctl: 7083 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4); 7084 break; 7085 #endif 7086 #ifdef TARGET_NR_msgctl 7087 case TARGET_NR_msgctl: 7088 ret = do_msgctl(arg1, arg2, arg3); 7089 break; 7090 #endif 7091 #ifdef TARGET_NR_msgget 7092 case TARGET_NR_msgget: 7093 ret = get_errno(msgget(arg1, arg2)); 7094 break; 7095 #endif 7096 #ifdef TARGET_NR_msgrcv 7097 case TARGET_NR_msgrcv: 7098 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5); 7099 break; 7100 #endif 7101 #ifdef TARGET_NR_msgsnd 7102 case TARGET_NR_msgsnd: 7103 ret = do_msgsnd(arg1, arg2, arg3, arg4); 7104 break; 7105 #endif 7106 #ifdef TARGET_NR_shmget 7107 case TARGET_NR_shmget: 7108 ret = get_errno(shmget(arg1, arg2, arg3)); 7109 break; 7110 #endif 7111 #ifdef TARGET_NR_shmctl 7112 case TARGET_NR_shmctl: 7113 ret = do_shmctl(arg1, arg2, arg3); 7114 break; 7115 #endif 7116 #ifdef TARGET_NR_shmat 7117 case TARGET_NR_shmat: 7118 ret = do_shmat(arg1, arg2, arg3); 7119 break; 7120 #endif 7121 #ifdef TARGET_NR_shmdt 7122 case TARGET_NR_shmdt: 7123 ret = do_shmdt(arg1); 7124 break; 7125 #endif 7126 case TARGET_NR_fsync: 7127 ret = get_errno(fsync(arg1)); 7128 break; 7129 case TARGET_NR_clone: 7130 /* Linux manages to have three different orderings for its 7131 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines 7132 * match the kernel's CONFIG_CLONE_* settings. 7133 * Microblaze is further special in that it uses a sixth 7134 * implicit argument to clone for the TLS pointer. 7135 */ 7136 #if defined(TARGET_MICROBLAZE) 7137 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5)); 7138 #elif defined(TARGET_CLONE_BACKWARDS) 7139 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5)); 7140 #elif defined(TARGET_CLONE_BACKWARDS2) 7141 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4)); 7142 #else 7143 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4)); 7144 #endif 7145 break; 7146 #ifdef __NR_exit_group 7147 /* new thread calls */ 7148 case TARGET_NR_exit_group: 7149 #ifdef TARGET_GPROF 7150 _mcleanup(); 7151 #endif 7152 gdb_exit(cpu_env, arg1); 7153 ret = get_errno(exit_group(arg1)); 7154 break; 7155 #endif 7156 case TARGET_NR_setdomainname: 7157 if (!(p = lock_user_string(arg1))) 7158 goto efault; 7159 ret = get_errno(setdomainname(p, arg2)); 7160 unlock_user(p, arg1, 0); 7161 break; 7162 case TARGET_NR_uname: 7163 /* no need to transcode because we use the linux syscall */ 7164 { 7165 struct new_utsname * buf; 7166 7167 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0)) 7168 goto efault; 7169 ret = get_errno(sys_uname(buf)); 7170 if (!is_error(ret)) { 7171 /* Overrite the native machine name with whatever is being 7172 emulated. */ 7173 strcpy (buf->machine, cpu_to_uname_machine(cpu_env)); 7174 /* Allow the user to override the reported release. */ 7175 if (qemu_uname_release && *qemu_uname_release) 7176 strcpy (buf->release, qemu_uname_release); 7177 } 7178 unlock_user_struct(buf, arg1, 1); 7179 } 7180 break; 7181 #ifdef TARGET_I386 7182 case TARGET_NR_modify_ldt: 7183 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3); 7184 break; 7185 #if !defined(TARGET_X86_64) 7186 case TARGET_NR_vm86old: 7187 goto unimplemented; 7188 case TARGET_NR_vm86: 7189 ret = do_vm86(cpu_env, arg1, arg2); 7190 break; 7191 #endif 7192 #endif 7193 case TARGET_NR_adjtimex: 7194 goto unimplemented; 7195 #ifdef TARGET_NR_create_module 7196 case TARGET_NR_create_module: 7197 #endif 7198 case TARGET_NR_init_module: 7199 case TARGET_NR_delete_module: 7200 #ifdef TARGET_NR_get_kernel_syms 7201 case TARGET_NR_get_kernel_syms: 7202 #endif 7203 goto unimplemented; 7204 case TARGET_NR_quotactl: 7205 goto unimplemented; 7206 case TARGET_NR_getpgid: 7207 ret = get_errno(getpgid(arg1)); 7208 break; 7209 case TARGET_NR_fchdir: 7210 ret = get_errno(fchdir(arg1)); 7211 break; 7212 #ifdef TARGET_NR_bdflush /* not on x86_64 */ 7213 case TARGET_NR_bdflush: 7214 goto unimplemented; 7215 #endif 7216 #ifdef TARGET_NR_sysfs 7217 case TARGET_NR_sysfs: 7218 goto unimplemented; 7219 #endif 7220 case TARGET_NR_personality: 7221 ret = get_errno(personality(arg1)); 7222 break; 7223 #ifdef TARGET_NR_afs_syscall 7224 case TARGET_NR_afs_syscall: 7225 goto unimplemented; 7226 #endif 7227 #ifdef TARGET_NR__llseek /* Not on alpha */ 7228 case TARGET_NR__llseek: 7229 { 7230 int64_t res; 7231 #if !defined(__NR_llseek) 7232 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5); 7233 if (res == -1) { 7234 ret = get_errno(res); 7235 } else { 7236 ret = 0; 7237 } 7238 #else 7239 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5)); 7240 #endif 7241 if ((ret == 0) && put_user_s64(res, arg4)) { 7242 goto efault; 7243 } 7244 } 7245 break; 7246 #endif 7247 case TARGET_NR_getdents: 7248 #ifdef __NR_getdents 7249 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64 7250 { 7251 struct target_dirent *target_dirp; 7252 struct linux_dirent *dirp; 7253 abi_long count = arg3; 7254 7255 dirp = malloc(count); 7256 if (!dirp) { 7257 ret = -TARGET_ENOMEM; 7258 goto fail; 7259 } 7260 7261 ret = get_errno(sys_getdents(arg1, dirp, count)); 7262 if (!is_error(ret)) { 7263 struct linux_dirent *de; 7264 struct target_dirent *tde; 7265 int len = ret; 7266 int reclen, treclen; 7267 int count1, tnamelen; 7268 7269 count1 = 0; 7270 de = dirp; 7271 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7272 goto efault; 7273 tde = target_dirp; 7274 while (len > 0) { 7275 reclen = de->d_reclen; 7276 tnamelen = reclen - offsetof(struct linux_dirent, d_name); 7277 assert(tnamelen >= 0); 7278 treclen = tnamelen + offsetof(struct target_dirent, d_name); 7279 assert(count1 + treclen <= count); 7280 tde->d_reclen = tswap16(treclen); 7281 tde->d_ino = tswapal(de->d_ino); 7282 tde->d_off = tswapal(de->d_off); 7283 memcpy(tde->d_name, de->d_name, tnamelen); 7284 de = (struct linux_dirent *)((char *)de + reclen); 7285 len -= reclen; 7286 tde = (struct target_dirent *)((char *)tde + treclen); 7287 count1 += treclen; 7288 } 7289 ret = count1; 7290 unlock_user(target_dirp, arg2, ret); 7291 } 7292 free(dirp); 7293 } 7294 #else 7295 { 7296 struct linux_dirent *dirp; 7297 abi_long count = arg3; 7298 7299 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7300 goto efault; 7301 ret = get_errno(sys_getdents(arg1, dirp, count)); 7302 if (!is_error(ret)) { 7303 struct linux_dirent *de; 7304 int len = ret; 7305 int reclen; 7306 de = dirp; 7307 while (len > 0) { 7308 reclen = de->d_reclen; 7309 if (reclen > len) 7310 break; 7311 de->d_reclen = tswap16(reclen); 7312 tswapls(&de->d_ino); 7313 tswapls(&de->d_off); 7314 de = (struct linux_dirent *)((char *)de + reclen); 7315 len -= reclen; 7316 } 7317 } 7318 unlock_user(dirp, arg2, ret); 7319 } 7320 #endif 7321 #else 7322 /* Implement getdents in terms of getdents64 */ 7323 { 7324 struct linux_dirent64 *dirp; 7325 abi_long count = arg3; 7326 7327 dirp = lock_user(VERIFY_WRITE, arg2, count, 0); 7328 if (!dirp) { 7329 goto efault; 7330 } 7331 ret = get_errno(sys_getdents64(arg1, dirp, count)); 7332 if (!is_error(ret)) { 7333 /* Convert the dirent64 structs to target dirent. We do this 7334 * in-place, since we can guarantee that a target_dirent is no 7335 * larger than a dirent64; however this means we have to be 7336 * careful to read everything before writing in the new format. 7337 */ 7338 struct linux_dirent64 *de; 7339 struct target_dirent *tde; 7340 int len = ret; 7341 int tlen = 0; 7342 7343 de = dirp; 7344 tde = (struct target_dirent *)dirp; 7345 while (len > 0) { 7346 int namelen, treclen; 7347 int reclen = de->d_reclen; 7348 uint64_t ino = de->d_ino; 7349 int64_t off = de->d_off; 7350 uint8_t type = de->d_type; 7351 7352 namelen = strlen(de->d_name); 7353 treclen = offsetof(struct target_dirent, d_name) 7354 + namelen + 2; 7355 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long)); 7356 7357 memmove(tde->d_name, de->d_name, namelen + 1); 7358 tde->d_ino = tswapal(ino); 7359 tde->d_off = tswapal(off); 7360 tde->d_reclen = tswap16(treclen); 7361 /* The target_dirent type is in what was formerly a padding 7362 * byte at the end of the structure: 7363 */ 7364 *(((char *)tde) + treclen - 1) = type; 7365 7366 de = (struct linux_dirent64 *)((char *)de + reclen); 7367 tde = (struct target_dirent *)((char *)tde + treclen); 7368 len -= reclen; 7369 tlen += treclen; 7370 } 7371 ret = tlen; 7372 } 7373 unlock_user(dirp, arg2, ret); 7374 } 7375 #endif 7376 break; 7377 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 7378 case TARGET_NR_getdents64: 7379 { 7380 struct linux_dirent64 *dirp; 7381 abi_long count = arg3; 7382 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7383 goto efault; 7384 ret = get_errno(sys_getdents64(arg1, dirp, count)); 7385 if (!is_error(ret)) { 7386 struct linux_dirent64 *de; 7387 int len = ret; 7388 int reclen; 7389 de = dirp; 7390 while (len > 0) { 7391 reclen = de->d_reclen; 7392 if (reclen > len) 7393 break; 7394 de->d_reclen = tswap16(reclen); 7395 tswap64s((uint64_t *)&de->d_ino); 7396 tswap64s((uint64_t *)&de->d_off); 7397 de = (struct linux_dirent64 *)((char *)de + reclen); 7398 len -= reclen; 7399 } 7400 } 7401 unlock_user(dirp, arg2, ret); 7402 } 7403 break; 7404 #endif /* TARGET_NR_getdents64 */ 7405 #if defined(TARGET_NR__newselect) 7406 case TARGET_NR__newselect: 7407 ret = do_select(arg1, arg2, arg3, arg4, arg5); 7408 break; 7409 #endif 7410 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) 7411 # ifdef TARGET_NR_poll 7412 case TARGET_NR_poll: 7413 # endif 7414 # ifdef TARGET_NR_ppoll 7415 case TARGET_NR_ppoll: 7416 # endif 7417 { 7418 struct target_pollfd *target_pfd; 7419 unsigned int nfds = arg2; 7420 int timeout = arg3; 7421 struct pollfd *pfd; 7422 unsigned int i; 7423 7424 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1); 7425 if (!target_pfd) 7426 goto efault; 7427 7428 pfd = alloca(sizeof(struct pollfd) * nfds); 7429 for(i = 0; i < nfds; i++) { 7430 pfd[i].fd = tswap32(target_pfd[i].fd); 7431 pfd[i].events = tswap16(target_pfd[i].events); 7432 } 7433 7434 # ifdef TARGET_NR_ppoll 7435 if (num == TARGET_NR_ppoll) { 7436 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts; 7437 target_sigset_t *target_set; 7438 sigset_t _set, *set = &_set; 7439 7440 if (arg3) { 7441 if (target_to_host_timespec(timeout_ts, arg3)) { 7442 unlock_user(target_pfd, arg1, 0); 7443 goto efault; 7444 } 7445 } else { 7446 timeout_ts = NULL; 7447 } 7448 7449 if (arg4) { 7450 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1); 7451 if (!target_set) { 7452 unlock_user(target_pfd, arg1, 0); 7453 goto efault; 7454 } 7455 target_to_host_sigset(set, target_set); 7456 } else { 7457 set = NULL; 7458 } 7459 7460 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8)); 7461 7462 if (!is_error(ret) && arg3) { 7463 host_to_target_timespec(arg3, timeout_ts); 7464 } 7465 if (arg4) { 7466 unlock_user(target_set, arg4, 0); 7467 } 7468 } else 7469 # endif 7470 ret = get_errno(poll(pfd, nfds, timeout)); 7471 7472 if (!is_error(ret)) { 7473 for(i = 0; i < nfds; i++) { 7474 target_pfd[i].revents = tswap16(pfd[i].revents); 7475 } 7476 } 7477 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds); 7478 } 7479 break; 7480 #endif 7481 case TARGET_NR_flock: 7482 /* NOTE: the flock constant seems to be the same for every 7483 Linux platform */ 7484 ret = get_errno(flock(arg1, arg2)); 7485 break; 7486 case TARGET_NR_readv: 7487 { 7488 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0); 7489 if (vec != NULL) { 7490 ret = get_errno(readv(arg1, vec, arg3)); 7491 unlock_iovec(vec, arg2, arg3, 1); 7492 } else { 7493 ret = -host_to_target_errno(errno); 7494 } 7495 } 7496 break; 7497 case TARGET_NR_writev: 7498 { 7499 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 7500 if (vec != NULL) { 7501 ret = get_errno(writev(arg1, vec, arg3)); 7502 unlock_iovec(vec, arg2, arg3, 0); 7503 } else { 7504 ret = -host_to_target_errno(errno); 7505 } 7506 } 7507 break; 7508 case TARGET_NR_getsid: 7509 ret = get_errno(getsid(arg1)); 7510 break; 7511 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */ 7512 case TARGET_NR_fdatasync: 7513 ret = get_errno(fdatasync(arg1)); 7514 break; 7515 #endif 7516 case TARGET_NR__sysctl: 7517 /* We don't implement this, but ENOTDIR is always a safe 7518 return value. */ 7519 ret = -TARGET_ENOTDIR; 7520 break; 7521 case TARGET_NR_sched_getaffinity: 7522 { 7523 unsigned int mask_size; 7524 unsigned long *mask; 7525 7526 /* 7527 * sched_getaffinity needs multiples of ulong, so need to take 7528 * care of mismatches between target ulong and host ulong sizes. 7529 */ 7530 if (arg2 & (sizeof(abi_ulong) - 1)) { 7531 ret = -TARGET_EINVAL; 7532 break; 7533 } 7534 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 7535 7536 mask = alloca(mask_size); 7537 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask)); 7538 7539 if (!is_error(ret)) { 7540 if (copy_to_user(arg3, mask, ret)) { 7541 goto efault; 7542 } 7543 } 7544 } 7545 break; 7546 case TARGET_NR_sched_setaffinity: 7547 { 7548 unsigned int mask_size; 7549 unsigned long *mask; 7550 7551 /* 7552 * sched_setaffinity needs multiples of ulong, so need to take 7553 * care of mismatches between target ulong and host ulong sizes. 7554 */ 7555 if (arg2 & (sizeof(abi_ulong) - 1)) { 7556 ret = -TARGET_EINVAL; 7557 break; 7558 } 7559 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 7560 7561 mask = alloca(mask_size); 7562 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) { 7563 goto efault; 7564 } 7565 memcpy(mask, p, arg2); 7566 unlock_user_struct(p, arg2, 0); 7567 7568 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask)); 7569 } 7570 break; 7571 case TARGET_NR_sched_setparam: 7572 { 7573 struct sched_param *target_schp; 7574 struct sched_param schp; 7575 7576 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) 7577 goto efault; 7578 schp.sched_priority = tswap32(target_schp->sched_priority); 7579 unlock_user_struct(target_schp, arg2, 0); 7580 ret = get_errno(sched_setparam(arg1, &schp)); 7581 } 7582 break; 7583 case TARGET_NR_sched_getparam: 7584 { 7585 struct sched_param *target_schp; 7586 struct sched_param schp; 7587 ret = get_errno(sched_getparam(arg1, &schp)); 7588 if (!is_error(ret)) { 7589 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) 7590 goto efault; 7591 target_schp->sched_priority = tswap32(schp.sched_priority); 7592 unlock_user_struct(target_schp, arg2, 1); 7593 } 7594 } 7595 break; 7596 case TARGET_NR_sched_setscheduler: 7597 { 7598 struct sched_param *target_schp; 7599 struct sched_param schp; 7600 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) 7601 goto efault; 7602 schp.sched_priority = tswap32(target_schp->sched_priority); 7603 unlock_user_struct(target_schp, arg3, 0); 7604 ret = get_errno(sched_setscheduler(arg1, arg2, &schp)); 7605 } 7606 break; 7607 case TARGET_NR_sched_getscheduler: 7608 ret = get_errno(sched_getscheduler(arg1)); 7609 break; 7610 case TARGET_NR_sched_yield: 7611 ret = get_errno(sched_yield()); 7612 break; 7613 case TARGET_NR_sched_get_priority_max: 7614 ret = get_errno(sched_get_priority_max(arg1)); 7615 break; 7616 case TARGET_NR_sched_get_priority_min: 7617 ret = get_errno(sched_get_priority_min(arg1)); 7618 break; 7619 case TARGET_NR_sched_rr_get_interval: 7620 { 7621 struct timespec ts; 7622 ret = get_errno(sched_rr_get_interval(arg1, &ts)); 7623 if (!is_error(ret)) { 7624 host_to_target_timespec(arg2, &ts); 7625 } 7626 } 7627 break; 7628 case TARGET_NR_nanosleep: 7629 { 7630 struct timespec req, rem; 7631 target_to_host_timespec(&req, arg1); 7632 ret = get_errno(nanosleep(&req, &rem)); 7633 if (is_error(ret) && arg2) { 7634 host_to_target_timespec(arg2, &rem); 7635 } 7636 } 7637 break; 7638 #ifdef TARGET_NR_query_module 7639 case TARGET_NR_query_module: 7640 goto unimplemented; 7641 #endif 7642 #ifdef TARGET_NR_nfsservctl 7643 case TARGET_NR_nfsservctl: 7644 goto unimplemented; 7645 #endif 7646 case TARGET_NR_prctl: 7647 switch (arg1) { 7648 case PR_GET_PDEATHSIG: 7649 { 7650 int deathsig; 7651 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5)); 7652 if (!is_error(ret) && arg2 7653 && put_user_ual(deathsig, arg2)) { 7654 goto efault; 7655 } 7656 break; 7657 } 7658 #ifdef PR_GET_NAME 7659 case PR_GET_NAME: 7660 { 7661 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1); 7662 if (!name) { 7663 goto efault; 7664 } 7665 ret = get_errno(prctl(arg1, (unsigned long)name, 7666 arg3, arg4, arg5)); 7667 unlock_user(name, arg2, 16); 7668 break; 7669 } 7670 case PR_SET_NAME: 7671 { 7672 void *name = lock_user(VERIFY_READ, arg2, 16, 1); 7673 if (!name) { 7674 goto efault; 7675 } 7676 ret = get_errno(prctl(arg1, (unsigned long)name, 7677 arg3, arg4, arg5)); 7678 unlock_user(name, arg2, 0); 7679 break; 7680 } 7681 #endif 7682 default: 7683 /* Most prctl options have no pointer arguments */ 7684 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5)); 7685 break; 7686 } 7687 break; 7688 #ifdef TARGET_NR_arch_prctl 7689 case TARGET_NR_arch_prctl: 7690 #if defined(TARGET_I386) && !defined(TARGET_ABI32) 7691 ret = do_arch_prctl(cpu_env, arg1, arg2); 7692 break; 7693 #else 7694 goto unimplemented; 7695 #endif 7696 #endif 7697 #ifdef TARGET_NR_pread64 7698 case TARGET_NR_pread64: 7699 if (regpairs_aligned(cpu_env)) { 7700 arg4 = arg5; 7701 arg5 = arg6; 7702 } 7703 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 7704 goto efault; 7705 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5))); 7706 unlock_user(p, arg2, ret); 7707 break; 7708 case TARGET_NR_pwrite64: 7709 if (regpairs_aligned(cpu_env)) { 7710 arg4 = arg5; 7711 arg5 = arg6; 7712 } 7713 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 7714 goto efault; 7715 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5))); 7716 unlock_user(p, arg2, 0); 7717 break; 7718 #endif 7719 case TARGET_NR_getcwd: 7720 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0))) 7721 goto efault; 7722 ret = get_errno(sys_getcwd1(p, arg2)); 7723 unlock_user(p, arg1, ret); 7724 break; 7725 case TARGET_NR_capget: 7726 goto unimplemented; 7727 case TARGET_NR_capset: 7728 goto unimplemented; 7729 case TARGET_NR_sigaltstack: 7730 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \ 7731 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \ 7732 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC) 7733 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env)); 7734 break; 7735 #else 7736 goto unimplemented; 7737 #endif 7738 7739 #ifdef CONFIG_SENDFILE 7740 case TARGET_NR_sendfile: 7741 { 7742 off_t *offp = NULL; 7743 off_t off; 7744 if (arg3) { 7745 ret = get_user_sal(off, arg3); 7746 if (is_error(ret)) { 7747 break; 7748 } 7749 offp = &off; 7750 } 7751 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 7752 if (!is_error(ret) && arg3) { 7753 abi_long ret2 = put_user_sal(off, arg3); 7754 if (is_error(ret2)) { 7755 ret = ret2; 7756 } 7757 } 7758 break; 7759 } 7760 #ifdef TARGET_NR_sendfile64 7761 case TARGET_NR_sendfile64: 7762 { 7763 off_t *offp = NULL; 7764 off_t off; 7765 if (arg3) { 7766 ret = get_user_s64(off, arg3); 7767 if (is_error(ret)) { 7768 break; 7769 } 7770 offp = &off; 7771 } 7772 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 7773 if (!is_error(ret) && arg3) { 7774 abi_long ret2 = put_user_s64(off, arg3); 7775 if (is_error(ret2)) { 7776 ret = ret2; 7777 } 7778 } 7779 break; 7780 } 7781 #endif 7782 #else 7783 case TARGET_NR_sendfile: 7784 #ifdef TARGET_NR_sendfile64 7785 case TARGET_NR_sendfile64: 7786 #endif 7787 goto unimplemented; 7788 #endif 7789 7790 #ifdef TARGET_NR_getpmsg 7791 case TARGET_NR_getpmsg: 7792 goto unimplemented; 7793 #endif 7794 #ifdef TARGET_NR_putpmsg 7795 case TARGET_NR_putpmsg: 7796 goto unimplemented; 7797 #endif 7798 #ifdef TARGET_NR_vfork 7799 case TARGET_NR_vfork: 7800 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD, 7801 0, 0, 0, 0)); 7802 break; 7803 #endif 7804 #ifdef TARGET_NR_ugetrlimit 7805 case TARGET_NR_ugetrlimit: 7806 { 7807 struct rlimit rlim; 7808 int resource = target_to_host_resource(arg1); 7809 ret = get_errno(getrlimit(resource, &rlim)); 7810 if (!is_error(ret)) { 7811 struct target_rlimit *target_rlim; 7812 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 7813 goto efault; 7814 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 7815 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 7816 unlock_user_struct(target_rlim, arg2, 1); 7817 } 7818 break; 7819 } 7820 #endif 7821 #ifdef TARGET_NR_truncate64 7822 case TARGET_NR_truncate64: 7823 if (!(p = lock_user_string(arg1))) 7824 goto efault; 7825 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4); 7826 unlock_user(p, arg1, 0); 7827 break; 7828 #endif 7829 #ifdef TARGET_NR_ftruncate64 7830 case TARGET_NR_ftruncate64: 7831 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4); 7832 break; 7833 #endif 7834 #ifdef TARGET_NR_stat64 7835 case TARGET_NR_stat64: 7836 if (!(p = lock_user_string(arg1))) 7837 goto efault; 7838 ret = get_errno(stat(path(p), &st)); 7839 unlock_user(p, arg1, 0); 7840 if (!is_error(ret)) 7841 ret = host_to_target_stat64(cpu_env, arg2, &st); 7842 break; 7843 #endif 7844 #ifdef TARGET_NR_lstat64 7845 case TARGET_NR_lstat64: 7846 if (!(p = lock_user_string(arg1))) 7847 goto efault; 7848 ret = get_errno(lstat(path(p), &st)); 7849 unlock_user(p, arg1, 0); 7850 if (!is_error(ret)) 7851 ret = host_to_target_stat64(cpu_env, arg2, &st); 7852 break; 7853 #endif 7854 #ifdef TARGET_NR_fstat64 7855 case TARGET_NR_fstat64: 7856 ret = get_errno(fstat(arg1, &st)); 7857 if (!is_error(ret)) 7858 ret = host_to_target_stat64(cpu_env, arg2, &st); 7859 break; 7860 #endif 7861 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) 7862 #ifdef TARGET_NR_fstatat64 7863 case TARGET_NR_fstatat64: 7864 #endif 7865 #ifdef TARGET_NR_newfstatat 7866 case TARGET_NR_newfstatat: 7867 #endif 7868 if (!(p = lock_user_string(arg2))) 7869 goto efault; 7870 ret = get_errno(fstatat(arg1, path(p), &st, arg4)); 7871 if (!is_error(ret)) 7872 ret = host_to_target_stat64(cpu_env, arg3, &st); 7873 break; 7874 #endif 7875 case TARGET_NR_lchown: 7876 if (!(p = lock_user_string(arg1))) 7877 goto efault; 7878 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3))); 7879 unlock_user(p, arg1, 0); 7880 break; 7881 #ifdef TARGET_NR_getuid 7882 case TARGET_NR_getuid: 7883 ret = get_errno(high2lowuid(getuid())); 7884 break; 7885 #endif 7886 #ifdef TARGET_NR_getgid 7887 case TARGET_NR_getgid: 7888 ret = get_errno(high2lowgid(getgid())); 7889 break; 7890 #endif 7891 #ifdef TARGET_NR_geteuid 7892 case TARGET_NR_geteuid: 7893 ret = get_errno(high2lowuid(geteuid())); 7894 break; 7895 #endif 7896 #ifdef TARGET_NR_getegid 7897 case TARGET_NR_getegid: 7898 ret = get_errno(high2lowgid(getegid())); 7899 break; 7900 #endif 7901 case TARGET_NR_setreuid: 7902 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2))); 7903 break; 7904 case TARGET_NR_setregid: 7905 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2))); 7906 break; 7907 case TARGET_NR_getgroups: 7908 { 7909 int gidsetsize = arg1; 7910 target_id *target_grouplist; 7911 gid_t *grouplist; 7912 int i; 7913 7914 grouplist = alloca(gidsetsize * sizeof(gid_t)); 7915 ret = get_errno(getgroups(gidsetsize, grouplist)); 7916 if (gidsetsize == 0) 7917 break; 7918 if (!is_error(ret)) { 7919 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0); 7920 if (!target_grouplist) 7921 goto efault; 7922 for(i = 0;i < ret; i++) 7923 target_grouplist[i] = tswapid(high2lowgid(grouplist[i])); 7924 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id)); 7925 } 7926 } 7927 break; 7928 case TARGET_NR_setgroups: 7929 { 7930 int gidsetsize = arg1; 7931 target_id *target_grouplist; 7932 gid_t *grouplist = NULL; 7933 int i; 7934 if (gidsetsize) { 7935 grouplist = alloca(gidsetsize * sizeof(gid_t)); 7936 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1); 7937 if (!target_grouplist) { 7938 ret = -TARGET_EFAULT; 7939 goto fail; 7940 } 7941 for (i = 0; i < gidsetsize; i++) { 7942 grouplist[i] = low2highgid(tswapid(target_grouplist[i])); 7943 } 7944 unlock_user(target_grouplist, arg2, 0); 7945 } 7946 ret = get_errno(setgroups(gidsetsize, grouplist)); 7947 } 7948 break; 7949 case TARGET_NR_fchown: 7950 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3))); 7951 break; 7952 #if defined(TARGET_NR_fchownat) 7953 case TARGET_NR_fchownat: 7954 if (!(p = lock_user_string(arg2))) 7955 goto efault; 7956 ret = get_errno(fchownat(arg1, p, low2highuid(arg3), 7957 low2highgid(arg4), arg5)); 7958 unlock_user(p, arg2, 0); 7959 break; 7960 #endif 7961 #ifdef TARGET_NR_setresuid 7962 case TARGET_NR_setresuid: 7963 ret = get_errno(setresuid(low2highuid(arg1), 7964 low2highuid(arg2), 7965 low2highuid(arg3))); 7966 break; 7967 #endif 7968 #ifdef TARGET_NR_getresuid 7969 case TARGET_NR_getresuid: 7970 { 7971 uid_t ruid, euid, suid; 7972 ret = get_errno(getresuid(&ruid, &euid, &suid)); 7973 if (!is_error(ret)) { 7974 if (put_user_u16(high2lowuid(ruid), arg1) 7975 || put_user_u16(high2lowuid(euid), arg2) 7976 || put_user_u16(high2lowuid(suid), arg3)) 7977 goto efault; 7978 } 7979 } 7980 break; 7981 #endif 7982 #ifdef TARGET_NR_getresgid 7983 case TARGET_NR_setresgid: 7984 ret = get_errno(setresgid(low2highgid(arg1), 7985 low2highgid(arg2), 7986 low2highgid(arg3))); 7987 break; 7988 #endif 7989 #ifdef TARGET_NR_getresgid 7990 case TARGET_NR_getresgid: 7991 { 7992 gid_t rgid, egid, sgid; 7993 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 7994 if (!is_error(ret)) { 7995 if (put_user_u16(high2lowgid(rgid), arg1) 7996 || put_user_u16(high2lowgid(egid), arg2) 7997 || put_user_u16(high2lowgid(sgid), arg3)) 7998 goto efault; 7999 } 8000 } 8001 break; 8002 #endif 8003 case TARGET_NR_chown: 8004 if (!(p = lock_user_string(arg1))) 8005 goto efault; 8006 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3))); 8007 unlock_user(p, arg1, 0); 8008 break; 8009 case TARGET_NR_setuid: 8010 ret = get_errno(setuid(low2highuid(arg1))); 8011 break; 8012 case TARGET_NR_setgid: 8013 ret = get_errno(setgid(low2highgid(arg1))); 8014 break; 8015 case TARGET_NR_setfsuid: 8016 ret = get_errno(setfsuid(arg1)); 8017 break; 8018 case TARGET_NR_setfsgid: 8019 ret = get_errno(setfsgid(arg1)); 8020 break; 8021 8022 #ifdef TARGET_NR_lchown32 8023 case TARGET_NR_lchown32: 8024 if (!(p = lock_user_string(arg1))) 8025 goto efault; 8026 ret = get_errno(lchown(p, arg2, arg3)); 8027 unlock_user(p, arg1, 0); 8028 break; 8029 #endif 8030 #ifdef TARGET_NR_getuid32 8031 case TARGET_NR_getuid32: 8032 ret = get_errno(getuid()); 8033 break; 8034 #endif 8035 8036 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA) 8037 /* Alpha specific */ 8038 case TARGET_NR_getxuid: 8039 { 8040 uid_t euid; 8041 euid=geteuid(); 8042 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid; 8043 } 8044 ret = get_errno(getuid()); 8045 break; 8046 #endif 8047 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA) 8048 /* Alpha specific */ 8049 case TARGET_NR_getxgid: 8050 { 8051 uid_t egid; 8052 egid=getegid(); 8053 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid; 8054 } 8055 ret = get_errno(getgid()); 8056 break; 8057 #endif 8058 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA) 8059 /* Alpha specific */ 8060 case TARGET_NR_osf_getsysinfo: 8061 ret = -TARGET_EOPNOTSUPP; 8062 switch (arg1) { 8063 case TARGET_GSI_IEEE_FP_CONTROL: 8064 { 8065 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env); 8066 8067 /* Copied from linux ieee_fpcr_to_swcr. */ 8068 swcr = (fpcr >> 35) & SWCR_STATUS_MASK; 8069 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ; 8070 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV 8071 | SWCR_TRAP_ENABLE_DZE 8072 | SWCR_TRAP_ENABLE_OVF); 8073 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF 8074 | SWCR_TRAP_ENABLE_INE); 8075 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ; 8076 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO; 8077 8078 if (put_user_u64 (swcr, arg2)) 8079 goto efault; 8080 ret = 0; 8081 } 8082 break; 8083 8084 /* case GSI_IEEE_STATE_AT_SIGNAL: 8085 -- Not implemented in linux kernel. 8086 case GSI_UACPROC: 8087 -- Retrieves current unaligned access state; not much used. 8088 case GSI_PROC_TYPE: 8089 -- Retrieves implver information; surely not used. 8090 case GSI_GET_HWRPB: 8091 -- Grabs a copy of the HWRPB; surely not used. 8092 */ 8093 } 8094 break; 8095 #endif 8096 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA) 8097 /* Alpha specific */ 8098 case TARGET_NR_osf_setsysinfo: 8099 ret = -TARGET_EOPNOTSUPP; 8100 switch (arg1) { 8101 case TARGET_SSI_IEEE_FP_CONTROL: 8102 { 8103 uint64_t swcr, fpcr, orig_fpcr; 8104 8105 if (get_user_u64 (swcr, arg2)) { 8106 goto efault; 8107 } 8108 orig_fpcr = cpu_alpha_load_fpcr(cpu_env); 8109 fpcr = orig_fpcr & FPCR_DYN_MASK; 8110 8111 /* Copied from linux ieee_swcr_to_fpcr. */ 8112 fpcr |= (swcr & SWCR_STATUS_MASK) << 35; 8113 fpcr |= (swcr & SWCR_MAP_DMZ) << 36; 8114 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV 8115 | SWCR_TRAP_ENABLE_DZE 8116 | SWCR_TRAP_ENABLE_OVF)) << 48; 8117 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF 8118 | SWCR_TRAP_ENABLE_INE)) << 57; 8119 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0); 8120 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41; 8121 8122 cpu_alpha_store_fpcr(cpu_env, fpcr); 8123 ret = 0; 8124 } 8125 break; 8126 8127 case TARGET_SSI_IEEE_RAISE_EXCEPTION: 8128 { 8129 uint64_t exc, fpcr, orig_fpcr; 8130 int si_code; 8131 8132 if (get_user_u64(exc, arg2)) { 8133 goto efault; 8134 } 8135 8136 orig_fpcr = cpu_alpha_load_fpcr(cpu_env); 8137 8138 /* We only add to the exception status here. */ 8139 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35); 8140 8141 cpu_alpha_store_fpcr(cpu_env, fpcr); 8142 ret = 0; 8143 8144 /* Old exceptions are not signaled. */ 8145 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK); 8146 8147 /* If any exceptions set by this call, 8148 and are unmasked, send a signal. */ 8149 si_code = 0; 8150 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) { 8151 si_code = TARGET_FPE_FLTRES; 8152 } 8153 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) { 8154 si_code = TARGET_FPE_FLTUND; 8155 } 8156 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) { 8157 si_code = TARGET_FPE_FLTOVF; 8158 } 8159 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) { 8160 si_code = TARGET_FPE_FLTDIV; 8161 } 8162 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) { 8163 si_code = TARGET_FPE_FLTINV; 8164 } 8165 if (si_code != 0) { 8166 target_siginfo_t info; 8167 info.si_signo = SIGFPE; 8168 info.si_errno = 0; 8169 info.si_code = si_code; 8170 info._sifields._sigfault._addr 8171 = ((CPUArchState *)cpu_env)->pc; 8172 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info); 8173 } 8174 } 8175 break; 8176 8177 /* case SSI_NVPAIRS: 8178 -- Used with SSIN_UACPROC to enable unaligned accesses. 8179 case SSI_IEEE_STATE_AT_SIGNAL: 8180 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: 8181 -- Not implemented in linux kernel 8182 */ 8183 } 8184 break; 8185 #endif 8186 #ifdef TARGET_NR_osf_sigprocmask 8187 /* Alpha specific. */ 8188 case TARGET_NR_osf_sigprocmask: 8189 { 8190 abi_ulong mask; 8191 int how; 8192 sigset_t set, oldset; 8193 8194 switch(arg1) { 8195 case TARGET_SIG_BLOCK: 8196 how = SIG_BLOCK; 8197 break; 8198 case TARGET_SIG_UNBLOCK: 8199 how = SIG_UNBLOCK; 8200 break; 8201 case TARGET_SIG_SETMASK: 8202 how = SIG_SETMASK; 8203 break; 8204 default: 8205 ret = -TARGET_EINVAL; 8206 goto fail; 8207 } 8208 mask = arg2; 8209 target_to_host_old_sigset(&set, &mask); 8210 sigprocmask(how, &set, &oldset); 8211 host_to_target_old_sigset(&mask, &oldset); 8212 ret = mask; 8213 } 8214 break; 8215 #endif 8216 8217 #ifdef TARGET_NR_getgid32 8218 case TARGET_NR_getgid32: 8219 ret = get_errno(getgid()); 8220 break; 8221 #endif 8222 #ifdef TARGET_NR_geteuid32 8223 case TARGET_NR_geteuid32: 8224 ret = get_errno(geteuid()); 8225 break; 8226 #endif 8227 #ifdef TARGET_NR_getegid32 8228 case TARGET_NR_getegid32: 8229 ret = get_errno(getegid()); 8230 break; 8231 #endif 8232 #ifdef TARGET_NR_setreuid32 8233 case TARGET_NR_setreuid32: 8234 ret = get_errno(setreuid(arg1, arg2)); 8235 break; 8236 #endif 8237 #ifdef TARGET_NR_setregid32 8238 case TARGET_NR_setregid32: 8239 ret = get_errno(setregid(arg1, arg2)); 8240 break; 8241 #endif 8242 #ifdef TARGET_NR_getgroups32 8243 case TARGET_NR_getgroups32: 8244 { 8245 int gidsetsize = arg1; 8246 uint32_t *target_grouplist; 8247 gid_t *grouplist; 8248 int i; 8249 8250 grouplist = alloca(gidsetsize * sizeof(gid_t)); 8251 ret = get_errno(getgroups(gidsetsize, grouplist)); 8252 if (gidsetsize == 0) 8253 break; 8254 if (!is_error(ret)) { 8255 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0); 8256 if (!target_grouplist) { 8257 ret = -TARGET_EFAULT; 8258 goto fail; 8259 } 8260 for(i = 0;i < ret; i++) 8261 target_grouplist[i] = tswap32(grouplist[i]); 8262 unlock_user(target_grouplist, arg2, gidsetsize * 4); 8263 } 8264 } 8265 break; 8266 #endif 8267 #ifdef TARGET_NR_setgroups32 8268 case TARGET_NR_setgroups32: 8269 { 8270 int gidsetsize = arg1; 8271 uint32_t *target_grouplist; 8272 gid_t *grouplist; 8273 int i; 8274 8275 grouplist = alloca(gidsetsize * sizeof(gid_t)); 8276 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1); 8277 if (!target_grouplist) { 8278 ret = -TARGET_EFAULT; 8279 goto fail; 8280 } 8281 for(i = 0;i < gidsetsize; i++) 8282 grouplist[i] = tswap32(target_grouplist[i]); 8283 unlock_user(target_grouplist, arg2, 0); 8284 ret = get_errno(setgroups(gidsetsize, grouplist)); 8285 } 8286 break; 8287 #endif 8288 #ifdef TARGET_NR_fchown32 8289 case TARGET_NR_fchown32: 8290 ret = get_errno(fchown(arg1, arg2, arg3)); 8291 break; 8292 #endif 8293 #ifdef TARGET_NR_setresuid32 8294 case TARGET_NR_setresuid32: 8295 ret = get_errno(setresuid(arg1, arg2, arg3)); 8296 break; 8297 #endif 8298 #ifdef TARGET_NR_getresuid32 8299 case TARGET_NR_getresuid32: 8300 { 8301 uid_t ruid, euid, suid; 8302 ret = get_errno(getresuid(&ruid, &euid, &suid)); 8303 if (!is_error(ret)) { 8304 if (put_user_u32(ruid, arg1) 8305 || put_user_u32(euid, arg2) 8306 || put_user_u32(suid, arg3)) 8307 goto efault; 8308 } 8309 } 8310 break; 8311 #endif 8312 #ifdef TARGET_NR_setresgid32 8313 case TARGET_NR_setresgid32: 8314 ret = get_errno(setresgid(arg1, arg2, arg3)); 8315 break; 8316 #endif 8317 #ifdef TARGET_NR_getresgid32 8318 case TARGET_NR_getresgid32: 8319 { 8320 gid_t rgid, egid, sgid; 8321 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 8322 if (!is_error(ret)) { 8323 if (put_user_u32(rgid, arg1) 8324 || put_user_u32(egid, arg2) 8325 || put_user_u32(sgid, arg3)) 8326 goto efault; 8327 } 8328 } 8329 break; 8330 #endif 8331 #ifdef TARGET_NR_chown32 8332 case TARGET_NR_chown32: 8333 if (!(p = lock_user_string(arg1))) 8334 goto efault; 8335 ret = get_errno(chown(p, arg2, arg3)); 8336 unlock_user(p, arg1, 0); 8337 break; 8338 #endif 8339 #ifdef TARGET_NR_setuid32 8340 case TARGET_NR_setuid32: 8341 ret = get_errno(setuid(arg1)); 8342 break; 8343 #endif 8344 #ifdef TARGET_NR_setgid32 8345 case TARGET_NR_setgid32: 8346 ret = get_errno(setgid(arg1)); 8347 break; 8348 #endif 8349 #ifdef TARGET_NR_setfsuid32 8350 case TARGET_NR_setfsuid32: 8351 ret = get_errno(setfsuid(arg1)); 8352 break; 8353 #endif 8354 #ifdef TARGET_NR_setfsgid32 8355 case TARGET_NR_setfsgid32: 8356 ret = get_errno(setfsgid(arg1)); 8357 break; 8358 #endif 8359 8360 case TARGET_NR_pivot_root: 8361 goto unimplemented; 8362 #ifdef TARGET_NR_mincore 8363 case TARGET_NR_mincore: 8364 { 8365 void *a; 8366 ret = -TARGET_EFAULT; 8367 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0))) 8368 goto efault; 8369 if (!(p = lock_user_string(arg3))) 8370 goto mincore_fail; 8371 ret = get_errno(mincore(a, arg2, p)); 8372 unlock_user(p, arg3, ret); 8373 mincore_fail: 8374 unlock_user(a, arg1, 0); 8375 } 8376 break; 8377 #endif 8378 #ifdef TARGET_NR_arm_fadvise64_64 8379 case TARGET_NR_arm_fadvise64_64: 8380 { 8381 /* 8382 * arm_fadvise64_64 looks like fadvise64_64 but 8383 * with different argument order 8384 */ 8385 abi_long temp; 8386 temp = arg3; 8387 arg3 = arg4; 8388 arg4 = temp; 8389 } 8390 #endif 8391 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64) 8392 #ifdef TARGET_NR_fadvise64_64 8393 case TARGET_NR_fadvise64_64: 8394 #endif 8395 #ifdef TARGET_NR_fadvise64 8396 case TARGET_NR_fadvise64: 8397 #endif 8398 #ifdef TARGET_S390X 8399 switch (arg4) { 8400 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */ 8401 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */ 8402 case 6: arg4 = POSIX_FADV_DONTNEED; break; 8403 case 7: arg4 = POSIX_FADV_NOREUSE; break; 8404 default: break; 8405 } 8406 #endif 8407 ret = -posix_fadvise(arg1, arg2, arg3, arg4); 8408 break; 8409 #endif 8410 #ifdef TARGET_NR_madvise 8411 case TARGET_NR_madvise: 8412 /* A straight passthrough may not be safe because qemu sometimes 8413 turns private file-backed mappings into anonymous mappings. 8414 This will break MADV_DONTNEED. 8415 This is a hint, so ignoring and returning success is ok. */ 8416 ret = get_errno(0); 8417 break; 8418 #endif 8419 #if TARGET_ABI_BITS == 32 8420 case TARGET_NR_fcntl64: 8421 { 8422 int cmd; 8423 struct flock64 fl; 8424 struct target_flock64 *target_fl; 8425 #ifdef TARGET_ARM 8426 struct target_eabi_flock64 *target_efl; 8427 #endif 8428 8429 cmd = target_to_host_fcntl_cmd(arg2); 8430 if (cmd == -TARGET_EINVAL) { 8431 ret = cmd; 8432 break; 8433 } 8434 8435 switch(arg2) { 8436 case TARGET_F_GETLK64: 8437 #ifdef TARGET_ARM 8438 if (((CPUARMState *)cpu_env)->eabi) { 8439 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) 8440 goto efault; 8441 fl.l_type = tswap16(target_efl->l_type); 8442 fl.l_whence = tswap16(target_efl->l_whence); 8443 fl.l_start = tswap64(target_efl->l_start); 8444 fl.l_len = tswap64(target_efl->l_len); 8445 fl.l_pid = tswap32(target_efl->l_pid); 8446 unlock_user_struct(target_efl, arg3, 0); 8447 } else 8448 #endif 8449 { 8450 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) 8451 goto efault; 8452 fl.l_type = tswap16(target_fl->l_type); 8453 fl.l_whence = tswap16(target_fl->l_whence); 8454 fl.l_start = tswap64(target_fl->l_start); 8455 fl.l_len = tswap64(target_fl->l_len); 8456 fl.l_pid = tswap32(target_fl->l_pid); 8457 unlock_user_struct(target_fl, arg3, 0); 8458 } 8459 ret = get_errno(fcntl(arg1, cmd, &fl)); 8460 if (ret == 0) { 8461 #ifdef TARGET_ARM 8462 if (((CPUARMState *)cpu_env)->eabi) { 8463 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0)) 8464 goto efault; 8465 target_efl->l_type = tswap16(fl.l_type); 8466 target_efl->l_whence = tswap16(fl.l_whence); 8467 target_efl->l_start = tswap64(fl.l_start); 8468 target_efl->l_len = tswap64(fl.l_len); 8469 target_efl->l_pid = tswap32(fl.l_pid); 8470 unlock_user_struct(target_efl, arg3, 1); 8471 } else 8472 #endif 8473 { 8474 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0)) 8475 goto efault; 8476 target_fl->l_type = tswap16(fl.l_type); 8477 target_fl->l_whence = tswap16(fl.l_whence); 8478 target_fl->l_start = tswap64(fl.l_start); 8479 target_fl->l_len = tswap64(fl.l_len); 8480 target_fl->l_pid = tswap32(fl.l_pid); 8481 unlock_user_struct(target_fl, arg3, 1); 8482 } 8483 } 8484 break; 8485 8486 case TARGET_F_SETLK64: 8487 case TARGET_F_SETLKW64: 8488 #ifdef TARGET_ARM 8489 if (((CPUARMState *)cpu_env)->eabi) { 8490 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) 8491 goto efault; 8492 fl.l_type = tswap16(target_efl->l_type); 8493 fl.l_whence = tswap16(target_efl->l_whence); 8494 fl.l_start = tswap64(target_efl->l_start); 8495 fl.l_len = tswap64(target_efl->l_len); 8496 fl.l_pid = tswap32(target_efl->l_pid); 8497 unlock_user_struct(target_efl, arg3, 0); 8498 } else 8499 #endif 8500 { 8501 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) 8502 goto efault; 8503 fl.l_type = tswap16(target_fl->l_type); 8504 fl.l_whence = tswap16(target_fl->l_whence); 8505 fl.l_start = tswap64(target_fl->l_start); 8506 fl.l_len = tswap64(target_fl->l_len); 8507 fl.l_pid = tswap32(target_fl->l_pid); 8508 unlock_user_struct(target_fl, arg3, 0); 8509 } 8510 ret = get_errno(fcntl(arg1, cmd, &fl)); 8511 break; 8512 default: 8513 ret = do_fcntl(arg1, arg2, arg3); 8514 break; 8515 } 8516 break; 8517 } 8518 #endif 8519 #ifdef TARGET_NR_cacheflush 8520 case TARGET_NR_cacheflush: 8521 /* self-modifying code is handled automatically, so nothing needed */ 8522 ret = 0; 8523 break; 8524 #endif 8525 #ifdef TARGET_NR_security 8526 case TARGET_NR_security: 8527 goto unimplemented; 8528 #endif 8529 #ifdef TARGET_NR_getpagesize 8530 case TARGET_NR_getpagesize: 8531 ret = TARGET_PAGE_SIZE; 8532 break; 8533 #endif 8534 case TARGET_NR_gettid: 8535 ret = get_errno(gettid()); 8536 break; 8537 #ifdef TARGET_NR_readahead 8538 case TARGET_NR_readahead: 8539 #if TARGET_ABI_BITS == 32 8540 if (regpairs_aligned(cpu_env)) { 8541 arg2 = arg3; 8542 arg3 = arg4; 8543 arg4 = arg5; 8544 } 8545 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4)); 8546 #else 8547 ret = get_errno(readahead(arg1, arg2, arg3)); 8548 #endif 8549 break; 8550 #endif 8551 #ifdef CONFIG_ATTR 8552 #ifdef TARGET_NR_setxattr 8553 case TARGET_NR_listxattr: 8554 case TARGET_NR_llistxattr: 8555 { 8556 void *p, *b = 0; 8557 if (arg2) { 8558 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 8559 if (!b) { 8560 ret = -TARGET_EFAULT; 8561 break; 8562 } 8563 } 8564 p = lock_user_string(arg1); 8565 if (p) { 8566 if (num == TARGET_NR_listxattr) { 8567 ret = get_errno(listxattr(p, b, arg3)); 8568 } else { 8569 ret = get_errno(llistxattr(p, b, arg3)); 8570 } 8571 } else { 8572 ret = -TARGET_EFAULT; 8573 } 8574 unlock_user(p, arg1, 0); 8575 unlock_user(b, arg2, arg3); 8576 break; 8577 } 8578 case TARGET_NR_flistxattr: 8579 { 8580 void *b = 0; 8581 if (arg2) { 8582 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 8583 if (!b) { 8584 ret = -TARGET_EFAULT; 8585 break; 8586 } 8587 } 8588 ret = get_errno(flistxattr(arg1, b, arg3)); 8589 unlock_user(b, arg2, arg3); 8590 break; 8591 } 8592 case TARGET_NR_setxattr: 8593 case TARGET_NR_lsetxattr: 8594 { 8595 void *p, *n, *v = 0; 8596 if (arg3) { 8597 v = lock_user(VERIFY_READ, arg3, arg4, 1); 8598 if (!v) { 8599 ret = -TARGET_EFAULT; 8600 break; 8601 } 8602 } 8603 p = lock_user_string(arg1); 8604 n = lock_user_string(arg2); 8605 if (p && n) { 8606 if (num == TARGET_NR_setxattr) { 8607 ret = get_errno(setxattr(p, n, v, arg4, arg5)); 8608 } else { 8609 ret = get_errno(lsetxattr(p, n, v, arg4, arg5)); 8610 } 8611 } else { 8612 ret = -TARGET_EFAULT; 8613 } 8614 unlock_user(p, arg1, 0); 8615 unlock_user(n, arg2, 0); 8616 unlock_user(v, arg3, 0); 8617 } 8618 break; 8619 case TARGET_NR_fsetxattr: 8620 { 8621 void *n, *v = 0; 8622 if (arg3) { 8623 v = lock_user(VERIFY_READ, arg3, arg4, 1); 8624 if (!v) { 8625 ret = -TARGET_EFAULT; 8626 break; 8627 } 8628 } 8629 n = lock_user_string(arg2); 8630 if (n) { 8631 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5)); 8632 } else { 8633 ret = -TARGET_EFAULT; 8634 } 8635 unlock_user(n, arg2, 0); 8636 unlock_user(v, arg3, 0); 8637 } 8638 break; 8639 case TARGET_NR_getxattr: 8640 case TARGET_NR_lgetxattr: 8641 { 8642 void *p, *n, *v = 0; 8643 if (arg3) { 8644 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 8645 if (!v) { 8646 ret = -TARGET_EFAULT; 8647 break; 8648 } 8649 } 8650 p = lock_user_string(arg1); 8651 n = lock_user_string(arg2); 8652 if (p && n) { 8653 if (num == TARGET_NR_getxattr) { 8654 ret = get_errno(getxattr(p, n, v, arg4)); 8655 } else { 8656 ret = get_errno(lgetxattr(p, n, v, arg4)); 8657 } 8658 } else { 8659 ret = -TARGET_EFAULT; 8660 } 8661 unlock_user(p, arg1, 0); 8662 unlock_user(n, arg2, 0); 8663 unlock_user(v, arg3, arg4); 8664 } 8665 break; 8666 case TARGET_NR_fgetxattr: 8667 { 8668 void *n, *v = 0; 8669 if (arg3) { 8670 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 8671 if (!v) { 8672 ret = -TARGET_EFAULT; 8673 break; 8674 } 8675 } 8676 n = lock_user_string(arg2); 8677 if (n) { 8678 ret = get_errno(fgetxattr(arg1, n, v, arg4)); 8679 } else { 8680 ret = -TARGET_EFAULT; 8681 } 8682 unlock_user(n, arg2, 0); 8683 unlock_user(v, arg3, arg4); 8684 } 8685 break; 8686 case TARGET_NR_removexattr: 8687 case TARGET_NR_lremovexattr: 8688 { 8689 void *p, *n; 8690 p = lock_user_string(arg1); 8691 n = lock_user_string(arg2); 8692 if (p && n) { 8693 if (num == TARGET_NR_removexattr) { 8694 ret = get_errno(removexattr(p, n)); 8695 } else { 8696 ret = get_errno(lremovexattr(p, n)); 8697 } 8698 } else { 8699 ret = -TARGET_EFAULT; 8700 } 8701 unlock_user(p, arg1, 0); 8702 unlock_user(n, arg2, 0); 8703 } 8704 break; 8705 case TARGET_NR_fremovexattr: 8706 { 8707 void *n; 8708 n = lock_user_string(arg2); 8709 if (n) { 8710 ret = get_errno(fremovexattr(arg1, n)); 8711 } else { 8712 ret = -TARGET_EFAULT; 8713 } 8714 unlock_user(n, arg2, 0); 8715 } 8716 break; 8717 #endif 8718 #endif /* CONFIG_ATTR */ 8719 #ifdef TARGET_NR_set_thread_area 8720 case TARGET_NR_set_thread_area: 8721 #if defined(TARGET_MIPS) 8722 ((CPUMIPSState *) cpu_env)->tls_value = arg1; 8723 ret = 0; 8724 break; 8725 #elif defined(TARGET_CRIS) 8726 if (arg1 & 0xff) 8727 ret = -TARGET_EINVAL; 8728 else { 8729 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1; 8730 ret = 0; 8731 } 8732 break; 8733 #elif defined(TARGET_I386) && defined(TARGET_ABI32) 8734 ret = do_set_thread_area(cpu_env, arg1); 8735 break; 8736 #elif defined(TARGET_M68K) 8737 { 8738 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 8739 ts->tp_value = arg1; 8740 ret = 0; 8741 break; 8742 } 8743 #else 8744 goto unimplemented_nowarn; 8745 #endif 8746 #endif 8747 #ifdef TARGET_NR_get_thread_area 8748 case TARGET_NR_get_thread_area: 8749 #if defined(TARGET_I386) && defined(TARGET_ABI32) 8750 ret = do_get_thread_area(cpu_env, arg1); 8751 break; 8752 #elif defined(TARGET_M68K) 8753 { 8754 TaskState *ts = ((CPUArchState *)cpu_env)->opaque; 8755 ret = ts->tp_value; 8756 break; 8757 } 8758 #else 8759 goto unimplemented_nowarn; 8760 #endif 8761 #endif 8762 #ifdef TARGET_NR_getdomainname 8763 case TARGET_NR_getdomainname: 8764 goto unimplemented_nowarn; 8765 #endif 8766 8767 #ifdef TARGET_NR_clock_gettime 8768 case TARGET_NR_clock_gettime: 8769 { 8770 struct timespec ts; 8771 ret = get_errno(clock_gettime(arg1, &ts)); 8772 if (!is_error(ret)) { 8773 host_to_target_timespec(arg2, &ts); 8774 } 8775 break; 8776 } 8777 #endif 8778 #ifdef TARGET_NR_clock_getres 8779 case TARGET_NR_clock_getres: 8780 { 8781 struct timespec ts; 8782 ret = get_errno(clock_getres(arg1, &ts)); 8783 if (!is_error(ret)) { 8784 host_to_target_timespec(arg2, &ts); 8785 } 8786 break; 8787 } 8788 #endif 8789 #ifdef TARGET_NR_clock_nanosleep 8790 case TARGET_NR_clock_nanosleep: 8791 { 8792 struct timespec ts; 8793 target_to_host_timespec(&ts, arg3); 8794 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL)); 8795 if (arg4) 8796 host_to_target_timespec(arg4, &ts); 8797 break; 8798 } 8799 #endif 8800 8801 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 8802 case TARGET_NR_set_tid_address: 8803 ret = get_errno(set_tid_address((int *)g2h(arg1))); 8804 break; 8805 #endif 8806 8807 #if defined(TARGET_NR_tkill) && defined(__NR_tkill) 8808 case TARGET_NR_tkill: 8809 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2))); 8810 break; 8811 #endif 8812 8813 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill) 8814 case TARGET_NR_tgkill: 8815 ret = get_errno(sys_tgkill((int)arg1, (int)arg2, 8816 target_to_host_signal(arg3))); 8817 break; 8818 #endif 8819 8820 #ifdef TARGET_NR_set_robust_list 8821 case TARGET_NR_set_robust_list: 8822 case TARGET_NR_get_robust_list: 8823 /* The ABI for supporting robust futexes has userspace pass 8824 * the kernel a pointer to a linked list which is updated by 8825 * userspace after the syscall; the list is walked by the kernel 8826 * when the thread exits. Since the linked list in QEMU guest 8827 * memory isn't a valid linked list for the host and we have 8828 * no way to reliably intercept the thread-death event, we can't 8829 * support these. Silently return ENOSYS so that guest userspace 8830 * falls back to a non-robust futex implementation (which should 8831 * be OK except in the corner case of the guest crashing while 8832 * holding a mutex that is shared with another process via 8833 * shared memory). 8834 */ 8835 goto unimplemented_nowarn; 8836 #endif 8837 8838 #if defined(TARGET_NR_utimensat) 8839 case TARGET_NR_utimensat: 8840 { 8841 struct timespec *tsp, ts[2]; 8842 if (!arg3) { 8843 tsp = NULL; 8844 } else { 8845 target_to_host_timespec(ts, arg3); 8846 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec)); 8847 tsp = ts; 8848 } 8849 if (!arg2) 8850 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4)); 8851 else { 8852 if (!(p = lock_user_string(arg2))) { 8853 ret = -TARGET_EFAULT; 8854 goto fail; 8855 } 8856 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4)); 8857 unlock_user(p, arg2, 0); 8858 } 8859 } 8860 break; 8861 #endif 8862 case TARGET_NR_futex: 8863 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6); 8864 break; 8865 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 8866 case TARGET_NR_inotify_init: 8867 ret = get_errno(sys_inotify_init()); 8868 break; 8869 #endif 8870 #ifdef CONFIG_INOTIFY1 8871 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 8872 case TARGET_NR_inotify_init1: 8873 ret = get_errno(sys_inotify_init1(arg1)); 8874 break; 8875 #endif 8876 #endif 8877 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 8878 case TARGET_NR_inotify_add_watch: 8879 p = lock_user_string(arg2); 8880 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3)); 8881 unlock_user(p, arg2, 0); 8882 break; 8883 #endif 8884 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 8885 case TARGET_NR_inotify_rm_watch: 8886 ret = get_errno(sys_inotify_rm_watch(arg1, arg2)); 8887 break; 8888 #endif 8889 8890 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 8891 case TARGET_NR_mq_open: 8892 { 8893 struct mq_attr posix_mq_attr; 8894 8895 p = lock_user_string(arg1 - 1); 8896 if (arg4 != 0) 8897 copy_from_user_mq_attr (&posix_mq_attr, arg4); 8898 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr)); 8899 unlock_user (p, arg1, 0); 8900 } 8901 break; 8902 8903 case TARGET_NR_mq_unlink: 8904 p = lock_user_string(arg1 - 1); 8905 ret = get_errno(mq_unlink(p)); 8906 unlock_user (p, arg1, 0); 8907 break; 8908 8909 case TARGET_NR_mq_timedsend: 8910 { 8911 struct timespec ts; 8912 8913 p = lock_user (VERIFY_READ, arg2, arg3, 1); 8914 if (arg5 != 0) { 8915 target_to_host_timespec(&ts, arg5); 8916 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts)); 8917 host_to_target_timespec(arg5, &ts); 8918 } 8919 else 8920 ret = get_errno(mq_send(arg1, p, arg3, arg4)); 8921 unlock_user (p, arg2, arg3); 8922 } 8923 break; 8924 8925 case TARGET_NR_mq_timedreceive: 8926 { 8927 struct timespec ts; 8928 unsigned int prio; 8929 8930 p = lock_user (VERIFY_READ, arg2, arg3, 1); 8931 if (arg5 != 0) { 8932 target_to_host_timespec(&ts, arg5); 8933 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts)); 8934 host_to_target_timespec(arg5, &ts); 8935 } 8936 else 8937 ret = get_errno(mq_receive(arg1, p, arg3, &prio)); 8938 unlock_user (p, arg2, arg3); 8939 if (arg4 != 0) 8940 put_user_u32(prio, arg4); 8941 } 8942 break; 8943 8944 /* Not implemented for now... */ 8945 /* case TARGET_NR_mq_notify: */ 8946 /* break; */ 8947 8948 case TARGET_NR_mq_getsetattr: 8949 { 8950 struct mq_attr posix_mq_attr_in, posix_mq_attr_out; 8951 ret = 0; 8952 if (arg3 != 0) { 8953 ret = mq_getattr(arg1, &posix_mq_attr_out); 8954 copy_to_user_mq_attr(arg3, &posix_mq_attr_out); 8955 } 8956 if (arg2 != 0) { 8957 copy_from_user_mq_attr(&posix_mq_attr_in, arg2); 8958 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out); 8959 } 8960 8961 } 8962 break; 8963 #endif 8964 8965 #ifdef CONFIG_SPLICE 8966 #ifdef TARGET_NR_tee 8967 case TARGET_NR_tee: 8968 { 8969 ret = get_errno(tee(arg1,arg2,arg3,arg4)); 8970 } 8971 break; 8972 #endif 8973 #ifdef TARGET_NR_splice 8974 case TARGET_NR_splice: 8975 { 8976 loff_t loff_in, loff_out; 8977 loff_t *ploff_in = NULL, *ploff_out = NULL; 8978 if(arg2) { 8979 get_user_u64(loff_in, arg2); 8980 ploff_in = &loff_in; 8981 } 8982 if(arg4) { 8983 get_user_u64(loff_out, arg2); 8984 ploff_out = &loff_out; 8985 } 8986 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6)); 8987 } 8988 break; 8989 #endif 8990 #ifdef TARGET_NR_vmsplice 8991 case TARGET_NR_vmsplice: 8992 { 8993 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 8994 if (vec != NULL) { 8995 ret = get_errno(vmsplice(arg1, vec, arg3, arg4)); 8996 unlock_iovec(vec, arg2, arg3, 0); 8997 } else { 8998 ret = -host_to_target_errno(errno); 8999 } 9000 } 9001 break; 9002 #endif 9003 #endif /* CONFIG_SPLICE */ 9004 #ifdef CONFIG_EVENTFD 9005 #if defined(TARGET_NR_eventfd) 9006 case TARGET_NR_eventfd: 9007 ret = get_errno(eventfd(arg1, 0)); 9008 break; 9009 #endif 9010 #if defined(TARGET_NR_eventfd2) 9011 case TARGET_NR_eventfd2: 9012 { 9013 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)); 9014 if (arg2 & TARGET_O_NONBLOCK) { 9015 host_flags |= O_NONBLOCK; 9016 } 9017 if (arg2 & TARGET_O_CLOEXEC) { 9018 host_flags |= O_CLOEXEC; 9019 } 9020 ret = get_errno(eventfd(arg1, host_flags)); 9021 break; 9022 } 9023 #endif 9024 #endif /* CONFIG_EVENTFD */ 9025 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate) 9026 case TARGET_NR_fallocate: 9027 #if TARGET_ABI_BITS == 32 9028 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4), 9029 target_offset64(arg5, arg6))); 9030 #else 9031 ret = get_errno(fallocate(arg1, arg2, arg3, arg4)); 9032 #endif 9033 break; 9034 #endif 9035 #if defined(CONFIG_SYNC_FILE_RANGE) 9036 #if defined(TARGET_NR_sync_file_range) 9037 case TARGET_NR_sync_file_range: 9038 #if TARGET_ABI_BITS == 32 9039 #if defined(TARGET_MIPS) 9040 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 9041 target_offset64(arg5, arg6), arg7)); 9042 #else 9043 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3), 9044 target_offset64(arg4, arg5), arg6)); 9045 #endif /* !TARGET_MIPS */ 9046 #else 9047 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4)); 9048 #endif 9049 break; 9050 #endif 9051 #if defined(TARGET_NR_sync_file_range2) 9052 case TARGET_NR_sync_file_range2: 9053 /* This is like sync_file_range but the arguments are reordered */ 9054 #if TARGET_ABI_BITS == 32 9055 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 9056 target_offset64(arg5, arg6), arg2)); 9057 #else 9058 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2)); 9059 #endif 9060 break; 9061 #endif 9062 #endif 9063 #if defined(CONFIG_EPOLL) 9064 #if defined(TARGET_NR_epoll_create) 9065 case TARGET_NR_epoll_create: 9066 ret = get_errno(epoll_create(arg1)); 9067 break; 9068 #endif 9069 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1) 9070 case TARGET_NR_epoll_create1: 9071 ret = get_errno(epoll_create1(arg1)); 9072 break; 9073 #endif 9074 #if defined(TARGET_NR_epoll_ctl) 9075 case TARGET_NR_epoll_ctl: 9076 { 9077 struct epoll_event ep; 9078 struct epoll_event *epp = 0; 9079 if (arg4) { 9080 struct target_epoll_event *target_ep; 9081 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) { 9082 goto efault; 9083 } 9084 ep.events = tswap32(target_ep->events); 9085 /* The epoll_data_t union is just opaque data to the kernel, 9086 * so we transfer all 64 bits across and need not worry what 9087 * actual data type it is. 9088 */ 9089 ep.data.u64 = tswap64(target_ep->data.u64); 9090 unlock_user_struct(target_ep, arg4, 0); 9091 epp = &ep; 9092 } 9093 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp)); 9094 break; 9095 } 9096 #endif 9097 9098 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT) 9099 #define IMPLEMENT_EPOLL_PWAIT 9100 #endif 9101 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT) 9102 #if defined(TARGET_NR_epoll_wait) 9103 case TARGET_NR_epoll_wait: 9104 #endif 9105 #if defined(IMPLEMENT_EPOLL_PWAIT) 9106 case TARGET_NR_epoll_pwait: 9107 #endif 9108 { 9109 struct target_epoll_event *target_ep; 9110 struct epoll_event *ep; 9111 int epfd = arg1; 9112 int maxevents = arg3; 9113 int timeout = arg4; 9114 9115 target_ep = lock_user(VERIFY_WRITE, arg2, 9116 maxevents * sizeof(struct target_epoll_event), 1); 9117 if (!target_ep) { 9118 goto efault; 9119 } 9120 9121 ep = alloca(maxevents * sizeof(struct epoll_event)); 9122 9123 switch (num) { 9124 #if defined(IMPLEMENT_EPOLL_PWAIT) 9125 case TARGET_NR_epoll_pwait: 9126 { 9127 target_sigset_t *target_set; 9128 sigset_t _set, *set = &_set; 9129 9130 if (arg5) { 9131 target_set = lock_user(VERIFY_READ, arg5, 9132 sizeof(target_sigset_t), 1); 9133 if (!target_set) { 9134 unlock_user(target_ep, arg2, 0); 9135 goto efault; 9136 } 9137 target_to_host_sigset(set, target_set); 9138 unlock_user(target_set, arg5, 0); 9139 } else { 9140 set = NULL; 9141 } 9142 9143 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set)); 9144 break; 9145 } 9146 #endif 9147 #if defined(TARGET_NR_epoll_wait) 9148 case TARGET_NR_epoll_wait: 9149 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout)); 9150 break; 9151 #endif 9152 default: 9153 ret = -TARGET_ENOSYS; 9154 } 9155 if (!is_error(ret)) { 9156 int i; 9157 for (i = 0; i < ret; i++) { 9158 target_ep[i].events = tswap32(ep[i].events); 9159 target_ep[i].data.u64 = tswap64(ep[i].data.u64); 9160 } 9161 } 9162 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event)); 9163 break; 9164 } 9165 #endif 9166 #endif 9167 #ifdef TARGET_NR_prlimit64 9168 case TARGET_NR_prlimit64: 9169 { 9170 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */ 9171 struct target_rlimit64 *target_rnew, *target_rold; 9172 struct host_rlimit64 rnew, rold, *rnewp = 0; 9173 if (arg3) { 9174 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) { 9175 goto efault; 9176 } 9177 rnew.rlim_cur = tswap64(target_rnew->rlim_cur); 9178 rnew.rlim_max = tswap64(target_rnew->rlim_max); 9179 unlock_user_struct(target_rnew, arg3, 0); 9180 rnewp = &rnew; 9181 } 9182 9183 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0)); 9184 if (!is_error(ret) && arg4) { 9185 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) { 9186 goto efault; 9187 } 9188 target_rold->rlim_cur = tswap64(rold.rlim_cur); 9189 target_rold->rlim_max = tswap64(rold.rlim_max); 9190 unlock_user_struct(target_rold, arg4, 1); 9191 } 9192 break; 9193 } 9194 #endif 9195 #ifdef TARGET_NR_gethostname 9196 case TARGET_NR_gethostname: 9197 { 9198 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0); 9199 if (name) { 9200 ret = get_errno(gethostname(name, arg2)); 9201 unlock_user(name, arg1, arg2); 9202 } else { 9203 ret = -TARGET_EFAULT; 9204 } 9205 break; 9206 } 9207 #endif 9208 #ifdef TARGET_NR_atomic_cmpxchg_32 9209 case TARGET_NR_atomic_cmpxchg_32: 9210 { 9211 /* should use start_exclusive from main.c */ 9212 abi_ulong mem_value; 9213 if (get_user_u32(mem_value, arg6)) { 9214 target_siginfo_t info; 9215 info.si_signo = SIGSEGV; 9216 info.si_errno = 0; 9217 info.si_code = TARGET_SEGV_MAPERR; 9218 info._sifields._sigfault._addr = arg6; 9219 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info); 9220 ret = 0xdeadbeef; 9221 9222 } 9223 if (mem_value == arg2) 9224 put_user_u32(arg1, arg6); 9225 ret = mem_value; 9226 break; 9227 } 9228 #endif 9229 #ifdef TARGET_NR_atomic_barrier 9230 case TARGET_NR_atomic_barrier: 9231 { 9232 /* Like the kernel implementation and the qemu arm barrier, no-op this? */ 9233 break; 9234 } 9235 #endif 9236 9237 #ifdef TARGET_NR_timer_create 9238 case TARGET_NR_timer_create: 9239 { 9240 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */ 9241 9242 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL; 9243 struct target_sigevent *ptarget_sevp; 9244 struct target_timer_t *ptarget_timer; 9245 9246 int clkid = arg1; 9247 int timer_index = next_free_host_timer(); 9248 9249 if (timer_index < 0) { 9250 ret = -TARGET_EAGAIN; 9251 } else { 9252 timer_t *phtimer = g_posix_timers + timer_index; 9253 9254 if (arg2) { 9255 if (!lock_user_struct(VERIFY_READ, ptarget_sevp, arg2, 1)) { 9256 goto efault; 9257 } 9258 9259 host_sevp.sigev_signo = tswap32(ptarget_sevp->sigev_signo); 9260 host_sevp.sigev_notify = tswap32(ptarget_sevp->sigev_notify); 9261 9262 phost_sevp = &host_sevp; 9263 } 9264 9265 ret = get_errno(timer_create(clkid, phost_sevp, phtimer)); 9266 if (ret) { 9267 phtimer = NULL; 9268 } else { 9269 if (!lock_user_struct(VERIFY_WRITE, ptarget_timer, arg3, 1)) { 9270 goto efault; 9271 } 9272 ptarget_timer->ptr = tswap32(0xcafe0000 | timer_index); 9273 unlock_user_struct(ptarget_timer, arg3, 1); 9274 } 9275 } 9276 break; 9277 } 9278 #endif 9279 9280 #ifdef TARGET_NR_timer_settime 9281 case TARGET_NR_timer_settime: 9282 { 9283 /* args: timer_t timerid, int flags, const struct itimerspec *new_value, 9284 * struct itimerspec * old_value */ 9285 arg1 &= 0xffff; 9286 if (arg3 == 0 || arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) { 9287 ret = -TARGET_EINVAL; 9288 } else { 9289 timer_t htimer = g_posix_timers[arg1]; 9290 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},}; 9291 9292 target_to_host_itimerspec(&hspec_new, arg3); 9293 ret = get_errno( 9294 timer_settime(htimer, arg2, &hspec_new, &hspec_old)); 9295 host_to_target_itimerspec(arg2, &hspec_old); 9296 } 9297 break; 9298 } 9299 #endif 9300 9301 #ifdef TARGET_NR_timer_gettime 9302 case TARGET_NR_timer_gettime: 9303 { 9304 /* args: timer_t timerid, struct itimerspec *curr_value */ 9305 arg1 &= 0xffff; 9306 if (!arg2) { 9307 return -TARGET_EFAULT; 9308 } else if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) { 9309 ret = -TARGET_EINVAL; 9310 } else { 9311 timer_t htimer = g_posix_timers[arg1]; 9312 struct itimerspec hspec; 9313 ret = get_errno(timer_gettime(htimer, &hspec)); 9314 9315 if (host_to_target_itimerspec(arg2, &hspec)) { 9316 ret = -TARGET_EFAULT; 9317 } 9318 } 9319 break; 9320 } 9321 #endif 9322 9323 #ifdef TARGET_NR_timer_getoverrun 9324 case TARGET_NR_timer_getoverrun: 9325 { 9326 /* args: timer_t timerid */ 9327 arg1 &= 0xffff; 9328 if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) { 9329 ret = -TARGET_EINVAL; 9330 } else { 9331 timer_t htimer = g_posix_timers[arg1]; 9332 ret = get_errno(timer_getoverrun(htimer)); 9333 } 9334 break; 9335 } 9336 #endif 9337 9338 #ifdef TARGET_NR_timer_delete 9339 case TARGET_NR_timer_delete: 9340 { 9341 /* args: timer_t timerid */ 9342 arg1 &= 0xffff; 9343 if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) { 9344 ret = -TARGET_EINVAL; 9345 } else { 9346 timer_t htimer = g_posix_timers[arg1]; 9347 ret = get_errno(timer_delete(htimer)); 9348 g_posix_timers[arg1] = 0; 9349 } 9350 break; 9351 } 9352 #endif 9353 9354 default: 9355 unimplemented: 9356 gemu_log("qemu: Unsupported syscall: %d\n", num); 9357 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list) 9358 unimplemented_nowarn: 9359 #endif 9360 ret = -TARGET_ENOSYS; 9361 break; 9362 } 9363 fail: 9364 #ifdef DEBUG 9365 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret); 9366 #endif 9367 if(do_strace) 9368 print_syscall_ret(num, ret); 9369 return ret; 9370 efault: 9371 ret = -TARGET_EFAULT; 9372 goto fail; 9373 } 9374