1 /* 2 * Linux syscalls 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #define _ATFILE_SOURCE 20 #include <stdlib.h> 21 #include <stdio.h> 22 #include <stdarg.h> 23 #include <string.h> 24 #include <elf.h> 25 #include <endian.h> 26 #include <errno.h> 27 #include <unistd.h> 28 #include <fcntl.h> 29 #include <time.h> 30 #include <limits.h> 31 #include <grp.h> 32 #include <sys/types.h> 33 #include <sys/ipc.h> 34 #include <sys/msg.h> 35 #include <sys/wait.h> 36 #include <sys/time.h> 37 #include <sys/stat.h> 38 #include <sys/mount.h> 39 #include <sys/file.h> 40 #include <sys/fsuid.h> 41 #include <sys/personality.h> 42 #include <sys/prctl.h> 43 #include <sys/resource.h> 44 #include <sys/mman.h> 45 #include <sys/swap.h> 46 #include <linux/capability.h> 47 #include <signal.h> 48 #include <sched.h> 49 #ifdef __ia64__ 50 int __clone2(int (*fn)(void *), void *child_stack_base, 51 size_t stack_size, int flags, void *arg, ...); 52 #endif 53 #include <sys/socket.h> 54 #include <sys/un.h> 55 #include <sys/uio.h> 56 #include <sys/poll.h> 57 #include <sys/times.h> 58 #include <sys/shm.h> 59 #include <sys/sem.h> 60 #include <sys/statfs.h> 61 #include <utime.h> 62 #include <sys/sysinfo.h> 63 //#include <sys/user.h> 64 #include <netinet/ip.h> 65 #include <netinet/tcp.h> 66 #include <linux/wireless.h> 67 #include <linux/icmp.h> 68 #include "qemu-common.h" 69 #ifdef CONFIG_TIMERFD 70 #include <sys/timerfd.h> 71 #endif 72 #ifdef TARGET_GPROF 73 #include <sys/gmon.h> 74 #endif 75 #ifdef CONFIG_EVENTFD 76 #include <sys/eventfd.h> 77 #endif 78 #ifdef CONFIG_EPOLL 79 #include <sys/epoll.h> 80 #endif 81 #ifdef CONFIG_ATTR 82 #include "qemu/xattr.h" 83 #endif 84 #ifdef CONFIG_SENDFILE 85 #include <sys/sendfile.h> 86 #endif 87 88 #define termios host_termios 89 #define winsize host_winsize 90 #define termio host_termio 91 #define sgttyb host_sgttyb /* same as target */ 92 #define tchars host_tchars /* same as target */ 93 #define ltchars host_ltchars /* same as target */ 94 95 #include <linux/termios.h> 96 #include <linux/unistd.h> 97 #include <linux/cdrom.h> 98 #include <linux/hdreg.h> 99 #include <linux/soundcard.h> 100 #include <linux/kd.h> 101 #include <linux/mtio.h> 102 #include <linux/fs.h> 103 #if defined(CONFIG_FIEMAP) 104 #include <linux/fiemap.h> 105 #endif 106 #include <linux/fb.h> 107 #include <linux/vt.h> 108 #include <linux/dm-ioctl.h> 109 #include <linux/reboot.h> 110 #include <linux/route.h> 111 #include <linux/filter.h> 112 #include <linux/blkpg.h> 113 #include "linux_loop.h" 114 #include "uname.h" 115 116 #include "qemu.h" 117 118 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \ 119 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID) 120 121 //#define DEBUG 122 123 //#include <linux/msdos_fs.h> 124 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2]) 125 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2]) 126 127 128 #undef _syscall0 129 #undef _syscall1 130 #undef _syscall2 131 #undef _syscall3 132 #undef _syscall4 133 #undef _syscall5 134 #undef _syscall6 135 136 #define _syscall0(type,name) \ 137 static type name (void) \ 138 { \ 139 return syscall(__NR_##name); \ 140 } 141 142 #define _syscall1(type,name,type1,arg1) \ 143 static type name (type1 arg1) \ 144 { \ 145 return syscall(__NR_##name, arg1); \ 146 } 147 148 #define _syscall2(type,name,type1,arg1,type2,arg2) \ 149 static type name (type1 arg1,type2 arg2) \ 150 { \ 151 return syscall(__NR_##name, arg1, arg2); \ 152 } 153 154 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ 155 static type name (type1 arg1,type2 arg2,type3 arg3) \ 156 { \ 157 return syscall(__NR_##name, arg1, arg2, arg3); \ 158 } 159 160 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ 161 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \ 162 { \ 163 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 164 } 165 166 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 167 type5,arg5) \ 168 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ 169 { \ 170 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 171 } 172 173 174 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 175 type5,arg5,type6,arg6) \ 176 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \ 177 type6 arg6) \ 178 { \ 179 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 180 } 181 182 183 #define __NR_sys_uname __NR_uname 184 #define __NR_sys_getcwd1 __NR_getcwd 185 #define __NR_sys_getdents __NR_getdents 186 #define __NR_sys_getdents64 __NR_getdents64 187 #define __NR_sys_getpriority __NR_getpriority 188 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo 189 #define __NR_sys_syslog __NR_syslog 190 #define __NR_sys_tgkill __NR_tgkill 191 #define __NR_sys_tkill __NR_tkill 192 #define __NR_sys_futex __NR_futex 193 #define __NR_sys_inotify_init __NR_inotify_init 194 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch 195 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch 196 197 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \ 198 defined(__s390x__) 199 #define __NR__llseek __NR_lseek 200 #endif 201 202 /* Newer kernel ports have llseek() instead of _llseek() */ 203 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek) 204 #define TARGET_NR__llseek TARGET_NR_llseek 205 #endif 206 207 #ifdef __NR_gettid 208 _syscall0(int, gettid) 209 #else 210 /* This is a replacement for the host gettid() and must return a host 211 errno. */ 212 static int gettid(void) { 213 return -ENOSYS; 214 } 215 #endif 216 #ifdef __NR_getdents 217 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count); 218 #endif 219 #if !defined(__NR_getdents) || \ 220 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64)) 221 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count); 222 #endif 223 #if defined(TARGET_NR__llseek) && defined(__NR_llseek) 224 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo, 225 loff_t *, res, uint, wh); 226 #endif 227 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo) 228 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len) 229 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill) 230 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig) 231 #endif 232 #if defined(TARGET_NR_tkill) && defined(__NR_tkill) 233 _syscall2(int,sys_tkill,int,tid,int,sig) 234 #endif 235 #ifdef __NR_exit_group 236 _syscall1(int,exit_group,int,error_code) 237 #endif 238 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 239 _syscall1(int,set_tid_address,int *,tidptr) 240 #endif 241 #if defined(TARGET_NR_futex) && defined(__NR_futex) 242 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val, 243 const struct timespec *,timeout,int *,uaddr2,int,val3) 244 #endif 245 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity 246 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len, 247 unsigned long *, user_mask_ptr); 248 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity 249 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len, 250 unsigned long *, user_mask_ptr); 251 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd, 252 void *, arg); 253 _syscall2(int, capget, struct __user_cap_header_struct *, header, 254 struct __user_cap_data_struct *, data); 255 _syscall2(int, capset, struct __user_cap_header_struct *, header, 256 struct __user_cap_data_struct *, data); 257 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get) 258 _syscall2(int, ioprio_get, int, which, int, who) 259 #endif 260 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set) 261 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio) 262 #endif 263 264 static bitmask_transtbl fcntl_flags_tbl[] = { 265 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, }, 266 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, }, 267 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, }, 268 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, }, 269 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, }, 270 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, }, 271 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, }, 272 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, }, 273 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, }, 274 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, }, 275 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, }, 276 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, }, 277 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, }, 278 #if defined(O_DIRECT) 279 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, }, 280 #endif 281 #if defined(O_NOATIME) 282 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME }, 283 #endif 284 #if defined(O_CLOEXEC) 285 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC }, 286 #endif 287 #if defined(O_PATH) 288 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH }, 289 #endif 290 /* Don't terminate the list prematurely on 64-bit host+guest. */ 291 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0 292 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, }, 293 #endif 294 { 0, 0, 0, 0 } 295 }; 296 297 static int sys_getcwd1(char *buf, size_t size) 298 { 299 if (getcwd(buf, size) == NULL) { 300 /* getcwd() sets errno */ 301 return (-1); 302 } 303 return strlen(buf)+1; 304 } 305 306 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode) 307 { 308 /* 309 * open(2) has extra parameter 'mode' when called with 310 * flag O_CREAT. 311 */ 312 if ((flags & O_CREAT) != 0) { 313 return (openat(dirfd, pathname, flags, mode)); 314 } 315 return (openat(dirfd, pathname, flags)); 316 } 317 318 #ifdef TARGET_NR_utimensat 319 #ifdef CONFIG_UTIMENSAT 320 static int sys_utimensat(int dirfd, const char *pathname, 321 const struct timespec times[2], int flags) 322 { 323 if (pathname == NULL) 324 return futimens(dirfd, times); 325 else 326 return utimensat(dirfd, pathname, times, flags); 327 } 328 #elif defined(__NR_utimensat) 329 #define __NR_sys_utimensat __NR_utimensat 330 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname, 331 const struct timespec *,tsp,int,flags) 332 #else 333 static int sys_utimensat(int dirfd, const char *pathname, 334 const struct timespec times[2], int flags) 335 { 336 errno = ENOSYS; 337 return -1; 338 } 339 #endif 340 #endif /* TARGET_NR_utimensat */ 341 342 #ifdef CONFIG_INOTIFY 343 #include <sys/inotify.h> 344 345 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 346 static int sys_inotify_init(void) 347 { 348 return (inotify_init()); 349 } 350 #endif 351 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 352 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask) 353 { 354 return (inotify_add_watch(fd, pathname, mask)); 355 } 356 #endif 357 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 358 static int sys_inotify_rm_watch(int fd, int32_t wd) 359 { 360 return (inotify_rm_watch(fd, wd)); 361 } 362 #endif 363 #ifdef CONFIG_INOTIFY1 364 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 365 static int sys_inotify_init1(int flags) 366 { 367 return (inotify_init1(flags)); 368 } 369 #endif 370 #endif 371 #else 372 /* Userspace can usually survive runtime without inotify */ 373 #undef TARGET_NR_inotify_init 374 #undef TARGET_NR_inotify_init1 375 #undef TARGET_NR_inotify_add_watch 376 #undef TARGET_NR_inotify_rm_watch 377 #endif /* CONFIG_INOTIFY */ 378 379 #if defined(TARGET_NR_ppoll) 380 #ifndef __NR_ppoll 381 # define __NR_ppoll -1 382 #endif 383 #define __NR_sys_ppoll __NR_ppoll 384 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds, 385 struct timespec *, timeout, const sigset_t *, sigmask, 386 size_t, sigsetsize) 387 #endif 388 389 #if defined(TARGET_NR_pselect6) 390 #ifndef __NR_pselect6 391 # define __NR_pselect6 -1 392 #endif 393 #define __NR_sys_pselect6 __NR_pselect6 394 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, 395 fd_set *, exceptfds, struct timespec *, timeout, void *, sig); 396 #endif 397 398 #if defined(TARGET_NR_prlimit64) 399 #ifndef __NR_prlimit64 400 # define __NR_prlimit64 -1 401 #endif 402 #define __NR_sys_prlimit64 __NR_prlimit64 403 /* The glibc rlimit structure may not be that used by the underlying syscall */ 404 struct host_rlimit64 { 405 uint64_t rlim_cur; 406 uint64_t rlim_max; 407 }; 408 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource, 409 const struct host_rlimit64 *, new_limit, 410 struct host_rlimit64 *, old_limit) 411 #endif 412 413 414 #if defined(TARGET_NR_timer_create) 415 /* Maxiumum of 32 active POSIX timers allowed at any one time. */ 416 static timer_t g_posix_timers[32] = { 0, } ; 417 418 static inline int next_free_host_timer(void) 419 { 420 int k ; 421 /* FIXME: Does finding the next free slot require a lock? */ 422 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) { 423 if (g_posix_timers[k] == 0) { 424 g_posix_timers[k] = (timer_t) 1; 425 return k; 426 } 427 } 428 return -1; 429 } 430 #endif 431 432 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */ 433 #ifdef TARGET_ARM 434 static inline int regpairs_aligned(void *cpu_env) { 435 return ((((CPUARMState *)cpu_env)->eabi) == 1) ; 436 } 437 #elif defined(TARGET_MIPS) 438 static inline int regpairs_aligned(void *cpu_env) { return 1; } 439 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64) 440 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs 441 * of registers which translates to the same as ARM/MIPS, because we start with 442 * r3 as arg1 */ 443 static inline int regpairs_aligned(void *cpu_env) { return 1; } 444 #else 445 static inline int regpairs_aligned(void *cpu_env) { return 0; } 446 #endif 447 448 #define ERRNO_TABLE_SIZE 1200 449 450 /* target_to_host_errno_table[] is initialized from 451 * host_to_target_errno_table[] in syscall_init(). */ 452 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = { 453 }; 454 455 /* 456 * This list is the union of errno values overridden in asm-<arch>/errno.h 457 * minus the errnos that are not actually generic to all archs. 458 */ 459 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = { 460 [EIDRM] = TARGET_EIDRM, 461 [ECHRNG] = TARGET_ECHRNG, 462 [EL2NSYNC] = TARGET_EL2NSYNC, 463 [EL3HLT] = TARGET_EL3HLT, 464 [EL3RST] = TARGET_EL3RST, 465 [ELNRNG] = TARGET_ELNRNG, 466 [EUNATCH] = TARGET_EUNATCH, 467 [ENOCSI] = TARGET_ENOCSI, 468 [EL2HLT] = TARGET_EL2HLT, 469 [EDEADLK] = TARGET_EDEADLK, 470 [ENOLCK] = TARGET_ENOLCK, 471 [EBADE] = TARGET_EBADE, 472 [EBADR] = TARGET_EBADR, 473 [EXFULL] = TARGET_EXFULL, 474 [ENOANO] = TARGET_ENOANO, 475 [EBADRQC] = TARGET_EBADRQC, 476 [EBADSLT] = TARGET_EBADSLT, 477 [EBFONT] = TARGET_EBFONT, 478 [ENOSTR] = TARGET_ENOSTR, 479 [ENODATA] = TARGET_ENODATA, 480 [ETIME] = TARGET_ETIME, 481 [ENOSR] = TARGET_ENOSR, 482 [ENONET] = TARGET_ENONET, 483 [ENOPKG] = TARGET_ENOPKG, 484 [EREMOTE] = TARGET_EREMOTE, 485 [ENOLINK] = TARGET_ENOLINK, 486 [EADV] = TARGET_EADV, 487 [ESRMNT] = TARGET_ESRMNT, 488 [ECOMM] = TARGET_ECOMM, 489 [EPROTO] = TARGET_EPROTO, 490 [EDOTDOT] = TARGET_EDOTDOT, 491 [EMULTIHOP] = TARGET_EMULTIHOP, 492 [EBADMSG] = TARGET_EBADMSG, 493 [ENAMETOOLONG] = TARGET_ENAMETOOLONG, 494 [EOVERFLOW] = TARGET_EOVERFLOW, 495 [ENOTUNIQ] = TARGET_ENOTUNIQ, 496 [EBADFD] = TARGET_EBADFD, 497 [EREMCHG] = TARGET_EREMCHG, 498 [ELIBACC] = TARGET_ELIBACC, 499 [ELIBBAD] = TARGET_ELIBBAD, 500 [ELIBSCN] = TARGET_ELIBSCN, 501 [ELIBMAX] = TARGET_ELIBMAX, 502 [ELIBEXEC] = TARGET_ELIBEXEC, 503 [EILSEQ] = TARGET_EILSEQ, 504 [ENOSYS] = TARGET_ENOSYS, 505 [ELOOP] = TARGET_ELOOP, 506 [ERESTART] = TARGET_ERESTART, 507 [ESTRPIPE] = TARGET_ESTRPIPE, 508 [ENOTEMPTY] = TARGET_ENOTEMPTY, 509 [EUSERS] = TARGET_EUSERS, 510 [ENOTSOCK] = TARGET_ENOTSOCK, 511 [EDESTADDRREQ] = TARGET_EDESTADDRREQ, 512 [EMSGSIZE] = TARGET_EMSGSIZE, 513 [EPROTOTYPE] = TARGET_EPROTOTYPE, 514 [ENOPROTOOPT] = TARGET_ENOPROTOOPT, 515 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT, 516 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT, 517 [EOPNOTSUPP] = TARGET_EOPNOTSUPP, 518 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT, 519 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT, 520 [EADDRINUSE] = TARGET_EADDRINUSE, 521 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL, 522 [ENETDOWN] = TARGET_ENETDOWN, 523 [ENETUNREACH] = TARGET_ENETUNREACH, 524 [ENETRESET] = TARGET_ENETRESET, 525 [ECONNABORTED] = TARGET_ECONNABORTED, 526 [ECONNRESET] = TARGET_ECONNRESET, 527 [ENOBUFS] = TARGET_ENOBUFS, 528 [EISCONN] = TARGET_EISCONN, 529 [ENOTCONN] = TARGET_ENOTCONN, 530 [EUCLEAN] = TARGET_EUCLEAN, 531 [ENOTNAM] = TARGET_ENOTNAM, 532 [ENAVAIL] = TARGET_ENAVAIL, 533 [EISNAM] = TARGET_EISNAM, 534 [EREMOTEIO] = TARGET_EREMOTEIO, 535 [ESHUTDOWN] = TARGET_ESHUTDOWN, 536 [ETOOMANYREFS] = TARGET_ETOOMANYREFS, 537 [ETIMEDOUT] = TARGET_ETIMEDOUT, 538 [ECONNREFUSED] = TARGET_ECONNREFUSED, 539 [EHOSTDOWN] = TARGET_EHOSTDOWN, 540 [EHOSTUNREACH] = TARGET_EHOSTUNREACH, 541 [EALREADY] = TARGET_EALREADY, 542 [EINPROGRESS] = TARGET_EINPROGRESS, 543 [ESTALE] = TARGET_ESTALE, 544 [ECANCELED] = TARGET_ECANCELED, 545 [ENOMEDIUM] = TARGET_ENOMEDIUM, 546 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE, 547 #ifdef ENOKEY 548 [ENOKEY] = TARGET_ENOKEY, 549 #endif 550 #ifdef EKEYEXPIRED 551 [EKEYEXPIRED] = TARGET_EKEYEXPIRED, 552 #endif 553 #ifdef EKEYREVOKED 554 [EKEYREVOKED] = TARGET_EKEYREVOKED, 555 #endif 556 #ifdef EKEYREJECTED 557 [EKEYREJECTED] = TARGET_EKEYREJECTED, 558 #endif 559 #ifdef EOWNERDEAD 560 [EOWNERDEAD] = TARGET_EOWNERDEAD, 561 #endif 562 #ifdef ENOTRECOVERABLE 563 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE, 564 #endif 565 }; 566 567 static inline int host_to_target_errno(int err) 568 { 569 if(host_to_target_errno_table[err]) 570 return host_to_target_errno_table[err]; 571 return err; 572 } 573 574 static inline int target_to_host_errno(int err) 575 { 576 if (target_to_host_errno_table[err]) 577 return target_to_host_errno_table[err]; 578 return err; 579 } 580 581 static inline abi_long get_errno(abi_long ret) 582 { 583 if (ret == -1) 584 return -host_to_target_errno(errno); 585 else 586 return ret; 587 } 588 589 static inline int is_error(abi_long ret) 590 { 591 return (abi_ulong)ret >= (abi_ulong)(-4096); 592 } 593 594 char *target_strerror(int err) 595 { 596 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) { 597 return NULL; 598 } 599 return strerror(target_to_host_errno(err)); 600 } 601 602 static inline int host_to_target_sock_type(int host_type) 603 { 604 int target_type; 605 606 switch (host_type & 0xf /* SOCK_TYPE_MASK */) { 607 case SOCK_DGRAM: 608 target_type = TARGET_SOCK_DGRAM; 609 break; 610 case SOCK_STREAM: 611 target_type = TARGET_SOCK_STREAM; 612 break; 613 default: 614 target_type = host_type & 0xf /* SOCK_TYPE_MASK */; 615 break; 616 } 617 618 #if defined(SOCK_CLOEXEC) 619 if (host_type & SOCK_CLOEXEC) { 620 target_type |= TARGET_SOCK_CLOEXEC; 621 } 622 #endif 623 624 #if defined(SOCK_NONBLOCK) 625 if (host_type & SOCK_NONBLOCK) { 626 target_type |= TARGET_SOCK_NONBLOCK; 627 } 628 #endif 629 630 return target_type; 631 } 632 633 static abi_ulong target_brk; 634 static abi_ulong target_original_brk; 635 static abi_ulong brk_page; 636 637 void target_set_brk(abi_ulong new_brk) 638 { 639 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk); 640 brk_page = HOST_PAGE_ALIGN(target_brk); 641 } 642 643 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0) 644 #define DEBUGF_BRK(message, args...) 645 646 /* do_brk() must return target values and target errnos. */ 647 abi_long do_brk(abi_ulong new_brk) 648 { 649 abi_long mapped_addr; 650 int new_alloc_size; 651 652 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk); 653 654 if (!new_brk) { 655 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk); 656 return target_brk; 657 } 658 if (new_brk < target_original_brk) { 659 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n", 660 target_brk); 661 return target_brk; 662 } 663 664 /* If the new brk is less than the highest page reserved to the 665 * target heap allocation, set it and we're almost done... */ 666 if (new_brk <= brk_page) { 667 /* Heap contents are initialized to zero, as for anonymous 668 * mapped pages. */ 669 if (new_brk > target_brk) { 670 memset(g2h(target_brk), 0, new_brk - target_brk); 671 } 672 target_brk = new_brk; 673 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk); 674 return target_brk; 675 } 676 677 /* We need to allocate more memory after the brk... Note that 678 * we don't use MAP_FIXED because that will map over the top of 679 * any existing mapping (like the one with the host libc or qemu 680 * itself); instead we treat "mapped but at wrong address" as 681 * a failure and unmap again. 682 */ 683 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page); 684 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size, 685 PROT_READ|PROT_WRITE, 686 MAP_ANON|MAP_PRIVATE, 0, 0)); 687 688 if (mapped_addr == brk_page) { 689 /* Heap contents are initialized to zero, as for anonymous 690 * mapped pages. Technically the new pages are already 691 * initialized to zero since they *are* anonymous mapped 692 * pages, however we have to take care with the contents that 693 * come from the remaining part of the previous page: it may 694 * contains garbage data due to a previous heap usage (grown 695 * then shrunken). */ 696 memset(g2h(target_brk), 0, brk_page - target_brk); 697 698 target_brk = new_brk; 699 brk_page = HOST_PAGE_ALIGN(target_brk); 700 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n", 701 target_brk); 702 return target_brk; 703 } else if (mapped_addr != -1) { 704 /* Mapped but at wrong address, meaning there wasn't actually 705 * enough space for this brk. 706 */ 707 target_munmap(mapped_addr, new_alloc_size); 708 mapped_addr = -1; 709 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk); 710 } 711 else { 712 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk); 713 } 714 715 #if defined(TARGET_ALPHA) 716 /* We (partially) emulate OSF/1 on Alpha, which requires we 717 return a proper errno, not an unchanged brk value. */ 718 return -TARGET_ENOMEM; 719 #endif 720 /* For everything else, return the previous break. */ 721 return target_brk; 722 } 723 724 static inline abi_long copy_from_user_fdset(fd_set *fds, 725 abi_ulong target_fds_addr, 726 int n) 727 { 728 int i, nw, j, k; 729 abi_ulong b, *target_fds; 730 731 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS; 732 if (!(target_fds = lock_user(VERIFY_READ, 733 target_fds_addr, 734 sizeof(abi_ulong) * nw, 735 1))) 736 return -TARGET_EFAULT; 737 738 FD_ZERO(fds); 739 k = 0; 740 for (i = 0; i < nw; i++) { 741 /* grab the abi_ulong */ 742 __get_user(b, &target_fds[i]); 743 for (j = 0; j < TARGET_ABI_BITS; j++) { 744 /* check the bit inside the abi_ulong */ 745 if ((b >> j) & 1) 746 FD_SET(k, fds); 747 k++; 748 } 749 } 750 751 unlock_user(target_fds, target_fds_addr, 0); 752 753 return 0; 754 } 755 756 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr, 757 abi_ulong target_fds_addr, 758 int n) 759 { 760 if (target_fds_addr) { 761 if (copy_from_user_fdset(fds, target_fds_addr, n)) 762 return -TARGET_EFAULT; 763 *fds_ptr = fds; 764 } else { 765 *fds_ptr = NULL; 766 } 767 return 0; 768 } 769 770 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr, 771 const fd_set *fds, 772 int n) 773 { 774 int i, nw, j, k; 775 abi_long v; 776 abi_ulong *target_fds; 777 778 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS; 779 if (!(target_fds = lock_user(VERIFY_WRITE, 780 target_fds_addr, 781 sizeof(abi_ulong) * nw, 782 0))) 783 return -TARGET_EFAULT; 784 785 k = 0; 786 for (i = 0; i < nw; i++) { 787 v = 0; 788 for (j = 0; j < TARGET_ABI_BITS; j++) { 789 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j); 790 k++; 791 } 792 __put_user(v, &target_fds[i]); 793 } 794 795 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw); 796 797 return 0; 798 } 799 800 #if defined(__alpha__) 801 #define HOST_HZ 1024 802 #else 803 #define HOST_HZ 100 804 #endif 805 806 static inline abi_long host_to_target_clock_t(long ticks) 807 { 808 #if HOST_HZ == TARGET_HZ 809 return ticks; 810 #else 811 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ; 812 #endif 813 } 814 815 static inline abi_long host_to_target_rusage(abi_ulong target_addr, 816 const struct rusage *rusage) 817 { 818 struct target_rusage *target_rusage; 819 820 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0)) 821 return -TARGET_EFAULT; 822 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec); 823 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec); 824 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec); 825 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec); 826 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss); 827 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss); 828 target_rusage->ru_idrss = tswapal(rusage->ru_idrss); 829 target_rusage->ru_isrss = tswapal(rusage->ru_isrss); 830 target_rusage->ru_minflt = tswapal(rusage->ru_minflt); 831 target_rusage->ru_majflt = tswapal(rusage->ru_majflt); 832 target_rusage->ru_nswap = tswapal(rusage->ru_nswap); 833 target_rusage->ru_inblock = tswapal(rusage->ru_inblock); 834 target_rusage->ru_oublock = tswapal(rusage->ru_oublock); 835 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd); 836 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv); 837 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals); 838 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw); 839 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw); 840 unlock_user_struct(target_rusage, target_addr, 1); 841 842 return 0; 843 } 844 845 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim) 846 { 847 abi_ulong target_rlim_swap; 848 rlim_t result; 849 850 target_rlim_swap = tswapal(target_rlim); 851 if (target_rlim_swap == TARGET_RLIM_INFINITY) 852 return RLIM_INFINITY; 853 854 result = target_rlim_swap; 855 if (target_rlim_swap != (rlim_t)result) 856 return RLIM_INFINITY; 857 858 return result; 859 } 860 861 static inline abi_ulong host_to_target_rlim(rlim_t rlim) 862 { 863 abi_ulong target_rlim_swap; 864 abi_ulong result; 865 866 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim) 867 target_rlim_swap = TARGET_RLIM_INFINITY; 868 else 869 target_rlim_swap = rlim; 870 result = tswapal(target_rlim_swap); 871 872 return result; 873 } 874 875 static inline int target_to_host_resource(int code) 876 { 877 switch (code) { 878 case TARGET_RLIMIT_AS: 879 return RLIMIT_AS; 880 case TARGET_RLIMIT_CORE: 881 return RLIMIT_CORE; 882 case TARGET_RLIMIT_CPU: 883 return RLIMIT_CPU; 884 case TARGET_RLIMIT_DATA: 885 return RLIMIT_DATA; 886 case TARGET_RLIMIT_FSIZE: 887 return RLIMIT_FSIZE; 888 case TARGET_RLIMIT_LOCKS: 889 return RLIMIT_LOCKS; 890 case TARGET_RLIMIT_MEMLOCK: 891 return RLIMIT_MEMLOCK; 892 case TARGET_RLIMIT_MSGQUEUE: 893 return RLIMIT_MSGQUEUE; 894 case TARGET_RLIMIT_NICE: 895 return RLIMIT_NICE; 896 case TARGET_RLIMIT_NOFILE: 897 return RLIMIT_NOFILE; 898 case TARGET_RLIMIT_NPROC: 899 return RLIMIT_NPROC; 900 case TARGET_RLIMIT_RSS: 901 return RLIMIT_RSS; 902 case TARGET_RLIMIT_RTPRIO: 903 return RLIMIT_RTPRIO; 904 case TARGET_RLIMIT_SIGPENDING: 905 return RLIMIT_SIGPENDING; 906 case TARGET_RLIMIT_STACK: 907 return RLIMIT_STACK; 908 default: 909 return code; 910 } 911 } 912 913 static inline abi_long copy_from_user_timeval(struct timeval *tv, 914 abi_ulong target_tv_addr) 915 { 916 struct target_timeval *target_tv; 917 918 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) 919 return -TARGET_EFAULT; 920 921 __get_user(tv->tv_sec, &target_tv->tv_sec); 922 __get_user(tv->tv_usec, &target_tv->tv_usec); 923 924 unlock_user_struct(target_tv, target_tv_addr, 0); 925 926 return 0; 927 } 928 929 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr, 930 const struct timeval *tv) 931 { 932 struct target_timeval *target_tv; 933 934 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) 935 return -TARGET_EFAULT; 936 937 __put_user(tv->tv_sec, &target_tv->tv_sec); 938 __put_user(tv->tv_usec, &target_tv->tv_usec); 939 940 unlock_user_struct(target_tv, target_tv_addr, 1); 941 942 return 0; 943 } 944 945 static inline abi_long copy_from_user_timezone(struct timezone *tz, 946 abi_ulong target_tz_addr) 947 { 948 struct target_timezone *target_tz; 949 950 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) { 951 return -TARGET_EFAULT; 952 } 953 954 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest); 955 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime); 956 957 unlock_user_struct(target_tz, target_tz_addr, 0); 958 959 return 0; 960 } 961 962 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 963 #include <mqueue.h> 964 965 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr, 966 abi_ulong target_mq_attr_addr) 967 { 968 struct target_mq_attr *target_mq_attr; 969 970 if (!lock_user_struct(VERIFY_READ, target_mq_attr, 971 target_mq_attr_addr, 1)) 972 return -TARGET_EFAULT; 973 974 __get_user(attr->mq_flags, &target_mq_attr->mq_flags); 975 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 976 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 977 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 978 979 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0); 980 981 return 0; 982 } 983 984 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr, 985 const struct mq_attr *attr) 986 { 987 struct target_mq_attr *target_mq_attr; 988 989 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr, 990 target_mq_attr_addr, 0)) 991 return -TARGET_EFAULT; 992 993 __put_user(attr->mq_flags, &target_mq_attr->mq_flags); 994 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 995 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 996 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 997 998 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1); 999 1000 return 0; 1001 } 1002 #endif 1003 1004 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) 1005 /* do_select() must return target values and target errnos. */ 1006 static abi_long do_select(int n, 1007 abi_ulong rfd_addr, abi_ulong wfd_addr, 1008 abi_ulong efd_addr, abi_ulong target_tv_addr) 1009 { 1010 fd_set rfds, wfds, efds; 1011 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 1012 struct timeval tv, *tv_ptr; 1013 abi_long ret; 1014 1015 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 1016 if (ret) { 1017 return ret; 1018 } 1019 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 1020 if (ret) { 1021 return ret; 1022 } 1023 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 1024 if (ret) { 1025 return ret; 1026 } 1027 1028 if (target_tv_addr) { 1029 if (copy_from_user_timeval(&tv, target_tv_addr)) 1030 return -TARGET_EFAULT; 1031 tv_ptr = &tv; 1032 } else { 1033 tv_ptr = NULL; 1034 } 1035 1036 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr)); 1037 1038 if (!is_error(ret)) { 1039 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 1040 return -TARGET_EFAULT; 1041 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 1042 return -TARGET_EFAULT; 1043 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 1044 return -TARGET_EFAULT; 1045 1046 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv)) 1047 return -TARGET_EFAULT; 1048 } 1049 1050 return ret; 1051 } 1052 #endif 1053 1054 static abi_long do_pipe2(int host_pipe[], int flags) 1055 { 1056 #ifdef CONFIG_PIPE2 1057 return pipe2(host_pipe, flags); 1058 #else 1059 return -ENOSYS; 1060 #endif 1061 } 1062 1063 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes, 1064 int flags, int is_pipe2) 1065 { 1066 int host_pipe[2]; 1067 abi_long ret; 1068 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe); 1069 1070 if (is_error(ret)) 1071 return get_errno(ret); 1072 1073 /* Several targets have special calling conventions for the original 1074 pipe syscall, but didn't replicate this into the pipe2 syscall. */ 1075 if (!is_pipe2) { 1076 #if defined(TARGET_ALPHA) 1077 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1]; 1078 return host_pipe[0]; 1079 #elif defined(TARGET_MIPS) 1080 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1]; 1081 return host_pipe[0]; 1082 #elif defined(TARGET_SH4) 1083 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1]; 1084 return host_pipe[0]; 1085 #elif defined(TARGET_SPARC) 1086 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1]; 1087 return host_pipe[0]; 1088 #endif 1089 } 1090 1091 if (put_user_s32(host_pipe[0], pipedes) 1092 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0]))) 1093 return -TARGET_EFAULT; 1094 return get_errno(ret); 1095 } 1096 1097 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn, 1098 abi_ulong target_addr, 1099 socklen_t len) 1100 { 1101 struct target_ip_mreqn *target_smreqn; 1102 1103 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1); 1104 if (!target_smreqn) 1105 return -TARGET_EFAULT; 1106 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr; 1107 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr; 1108 if (len == sizeof(struct target_ip_mreqn)) 1109 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex); 1110 unlock_user(target_smreqn, target_addr, 0); 1111 1112 return 0; 1113 } 1114 1115 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr, 1116 abi_ulong target_addr, 1117 socklen_t len) 1118 { 1119 const socklen_t unix_maxlen = sizeof (struct sockaddr_un); 1120 sa_family_t sa_family; 1121 struct target_sockaddr *target_saddr; 1122 1123 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1); 1124 if (!target_saddr) 1125 return -TARGET_EFAULT; 1126 1127 sa_family = tswap16(target_saddr->sa_family); 1128 1129 /* Oops. The caller might send a incomplete sun_path; sun_path 1130 * must be terminated by \0 (see the manual page), but 1131 * unfortunately it is quite common to specify sockaddr_un 1132 * length as "strlen(x->sun_path)" while it should be 1133 * "strlen(...) + 1". We'll fix that here if needed. 1134 * Linux kernel has a similar feature. 1135 */ 1136 1137 if (sa_family == AF_UNIX) { 1138 if (len < unix_maxlen && len > 0) { 1139 char *cp = (char*)target_saddr; 1140 1141 if ( cp[len-1] && !cp[len] ) 1142 len++; 1143 } 1144 if (len > unix_maxlen) 1145 len = unix_maxlen; 1146 } 1147 1148 memcpy(addr, target_saddr, len); 1149 addr->sa_family = sa_family; 1150 if (sa_family == AF_PACKET) { 1151 struct target_sockaddr_ll *lladdr; 1152 1153 lladdr = (struct target_sockaddr_ll *)addr; 1154 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex); 1155 lladdr->sll_hatype = tswap16(lladdr->sll_hatype); 1156 } 1157 unlock_user(target_saddr, target_addr, 0); 1158 1159 return 0; 1160 } 1161 1162 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr, 1163 struct sockaddr *addr, 1164 socklen_t len) 1165 { 1166 struct target_sockaddr *target_saddr; 1167 1168 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0); 1169 if (!target_saddr) 1170 return -TARGET_EFAULT; 1171 memcpy(target_saddr, addr, len); 1172 target_saddr->sa_family = tswap16(addr->sa_family); 1173 unlock_user(target_saddr, target_addr, len); 1174 1175 return 0; 1176 } 1177 1178 static inline abi_long target_to_host_cmsg(struct msghdr *msgh, 1179 struct target_msghdr *target_msgh) 1180 { 1181 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1182 abi_long msg_controllen; 1183 abi_ulong target_cmsg_addr; 1184 struct target_cmsghdr *target_cmsg; 1185 socklen_t space = 0; 1186 1187 msg_controllen = tswapal(target_msgh->msg_controllen); 1188 if (msg_controllen < sizeof (struct target_cmsghdr)) 1189 goto the_end; 1190 target_cmsg_addr = tswapal(target_msgh->msg_control); 1191 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1); 1192 if (!target_cmsg) 1193 return -TARGET_EFAULT; 1194 1195 while (cmsg && target_cmsg) { 1196 void *data = CMSG_DATA(cmsg); 1197 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1198 1199 int len = tswapal(target_cmsg->cmsg_len) 1200 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr)); 1201 1202 space += CMSG_SPACE(len); 1203 if (space > msgh->msg_controllen) { 1204 space -= CMSG_SPACE(len); 1205 gemu_log("Host cmsg overflow\n"); 1206 break; 1207 } 1208 1209 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) { 1210 cmsg->cmsg_level = SOL_SOCKET; 1211 } else { 1212 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level); 1213 } 1214 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type); 1215 cmsg->cmsg_len = CMSG_LEN(len); 1216 1217 if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) { 1218 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type); 1219 memcpy(data, target_data, len); 1220 } else { 1221 int *fd = (int *)data; 1222 int *target_fd = (int *)target_data; 1223 int i, numfds = len / sizeof(int); 1224 1225 for (i = 0; i < numfds; i++) 1226 fd[i] = tswap32(target_fd[i]); 1227 } 1228 1229 cmsg = CMSG_NXTHDR(msgh, cmsg); 1230 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg); 1231 } 1232 unlock_user(target_cmsg, target_cmsg_addr, 0); 1233 the_end: 1234 msgh->msg_controllen = space; 1235 return 0; 1236 } 1237 1238 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh, 1239 struct msghdr *msgh) 1240 { 1241 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1242 abi_long msg_controllen; 1243 abi_ulong target_cmsg_addr; 1244 struct target_cmsghdr *target_cmsg; 1245 socklen_t space = 0; 1246 1247 msg_controllen = tswapal(target_msgh->msg_controllen); 1248 if (msg_controllen < sizeof (struct target_cmsghdr)) 1249 goto the_end; 1250 target_cmsg_addr = tswapal(target_msgh->msg_control); 1251 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0); 1252 if (!target_cmsg) 1253 return -TARGET_EFAULT; 1254 1255 while (cmsg && target_cmsg) { 1256 void *data = CMSG_DATA(cmsg); 1257 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1258 1259 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr)); 1260 1261 space += TARGET_CMSG_SPACE(len); 1262 if (space > msg_controllen) { 1263 space -= TARGET_CMSG_SPACE(len); 1264 gemu_log("Target cmsg overflow\n"); 1265 break; 1266 } 1267 1268 if (cmsg->cmsg_level == SOL_SOCKET) { 1269 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET); 1270 } else { 1271 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level); 1272 } 1273 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type); 1274 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len)); 1275 1276 switch (cmsg->cmsg_level) { 1277 case SOL_SOCKET: 1278 switch (cmsg->cmsg_type) { 1279 case SCM_RIGHTS: 1280 { 1281 int *fd = (int *)data; 1282 int *target_fd = (int *)target_data; 1283 int i, numfds = len / sizeof(int); 1284 1285 for (i = 0; i < numfds; i++) 1286 target_fd[i] = tswap32(fd[i]); 1287 break; 1288 } 1289 case SO_TIMESTAMP: 1290 { 1291 struct timeval *tv = (struct timeval *)data; 1292 struct target_timeval *target_tv = 1293 (struct target_timeval *)target_data; 1294 1295 if (len != sizeof(struct timeval)) 1296 goto unimplemented; 1297 1298 /* copy struct timeval to target */ 1299 target_tv->tv_sec = tswapal(tv->tv_sec); 1300 target_tv->tv_usec = tswapal(tv->tv_usec); 1301 break; 1302 } 1303 case SCM_CREDENTIALS: 1304 { 1305 struct ucred *cred = (struct ucred *)data; 1306 struct target_ucred *target_cred = 1307 (struct target_ucred *)target_data; 1308 1309 __put_user(cred->pid, &target_cred->pid); 1310 __put_user(cred->uid, &target_cred->uid); 1311 __put_user(cred->gid, &target_cred->gid); 1312 break; 1313 } 1314 default: 1315 goto unimplemented; 1316 } 1317 break; 1318 1319 default: 1320 unimplemented: 1321 gemu_log("Unsupported ancillary data: %d/%d\n", 1322 cmsg->cmsg_level, cmsg->cmsg_type); 1323 memcpy(target_data, data, len); 1324 } 1325 1326 cmsg = CMSG_NXTHDR(msgh, cmsg); 1327 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg); 1328 } 1329 unlock_user(target_cmsg, target_cmsg_addr, space); 1330 the_end: 1331 target_msgh->msg_controllen = tswapal(space); 1332 return 0; 1333 } 1334 1335 /* do_setsockopt() Must return target values and target errnos. */ 1336 static abi_long do_setsockopt(int sockfd, int level, int optname, 1337 abi_ulong optval_addr, socklen_t optlen) 1338 { 1339 abi_long ret; 1340 int val; 1341 struct ip_mreqn *ip_mreq; 1342 struct ip_mreq_source *ip_mreq_source; 1343 1344 switch(level) { 1345 case SOL_TCP: 1346 /* TCP options all take an 'int' value. */ 1347 if (optlen < sizeof(uint32_t)) 1348 return -TARGET_EINVAL; 1349 1350 if (get_user_u32(val, optval_addr)) 1351 return -TARGET_EFAULT; 1352 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 1353 break; 1354 case SOL_IP: 1355 switch(optname) { 1356 case IP_TOS: 1357 case IP_TTL: 1358 case IP_HDRINCL: 1359 case IP_ROUTER_ALERT: 1360 case IP_RECVOPTS: 1361 case IP_RETOPTS: 1362 case IP_PKTINFO: 1363 case IP_MTU_DISCOVER: 1364 case IP_RECVERR: 1365 case IP_RECVTOS: 1366 #ifdef IP_FREEBIND 1367 case IP_FREEBIND: 1368 #endif 1369 case IP_MULTICAST_TTL: 1370 case IP_MULTICAST_LOOP: 1371 val = 0; 1372 if (optlen >= sizeof(uint32_t)) { 1373 if (get_user_u32(val, optval_addr)) 1374 return -TARGET_EFAULT; 1375 } else if (optlen >= 1) { 1376 if (get_user_u8(val, optval_addr)) 1377 return -TARGET_EFAULT; 1378 } 1379 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 1380 break; 1381 case IP_ADD_MEMBERSHIP: 1382 case IP_DROP_MEMBERSHIP: 1383 if (optlen < sizeof (struct target_ip_mreq) || 1384 optlen > sizeof (struct target_ip_mreqn)) 1385 return -TARGET_EINVAL; 1386 1387 ip_mreq = (struct ip_mreqn *) alloca(optlen); 1388 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen); 1389 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen)); 1390 break; 1391 1392 case IP_BLOCK_SOURCE: 1393 case IP_UNBLOCK_SOURCE: 1394 case IP_ADD_SOURCE_MEMBERSHIP: 1395 case IP_DROP_SOURCE_MEMBERSHIP: 1396 if (optlen != sizeof (struct target_ip_mreq_source)) 1397 return -TARGET_EINVAL; 1398 1399 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1); 1400 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen)); 1401 unlock_user (ip_mreq_source, optval_addr, 0); 1402 break; 1403 1404 default: 1405 goto unimplemented; 1406 } 1407 break; 1408 case SOL_IPV6: 1409 switch (optname) { 1410 case IPV6_MTU_DISCOVER: 1411 case IPV6_MTU: 1412 case IPV6_V6ONLY: 1413 case IPV6_RECVPKTINFO: 1414 val = 0; 1415 if (optlen < sizeof(uint32_t)) { 1416 return -TARGET_EINVAL; 1417 } 1418 if (get_user_u32(val, optval_addr)) { 1419 return -TARGET_EFAULT; 1420 } 1421 ret = get_errno(setsockopt(sockfd, level, optname, 1422 &val, sizeof(val))); 1423 break; 1424 default: 1425 goto unimplemented; 1426 } 1427 break; 1428 case SOL_RAW: 1429 switch (optname) { 1430 case ICMP_FILTER: 1431 /* struct icmp_filter takes an u32 value */ 1432 if (optlen < sizeof(uint32_t)) { 1433 return -TARGET_EINVAL; 1434 } 1435 1436 if (get_user_u32(val, optval_addr)) { 1437 return -TARGET_EFAULT; 1438 } 1439 ret = get_errno(setsockopt(sockfd, level, optname, 1440 &val, sizeof(val))); 1441 break; 1442 1443 default: 1444 goto unimplemented; 1445 } 1446 break; 1447 case TARGET_SOL_SOCKET: 1448 switch (optname) { 1449 case TARGET_SO_RCVTIMEO: 1450 { 1451 struct timeval tv; 1452 1453 optname = SO_RCVTIMEO; 1454 1455 set_timeout: 1456 if (optlen != sizeof(struct target_timeval)) { 1457 return -TARGET_EINVAL; 1458 } 1459 1460 if (copy_from_user_timeval(&tv, optval_addr)) { 1461 return -TARGET_EFAULT; 1462 } 1463 1464 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, 1465 &tv, sizeof(tv))); 1466 return ret; 1467 } 1468 case TARGET_SO_SNDTIMEO: 1469 optname = SO_SNDTIMEO; 1470 goto set_timeout; 1471 case TARGET_SO_ATTACH_FILTER: 1472 { 1473 struct target_sock_fprog *tfprog; 1474 struct target_sock_filter *tfilter; 1475 struct sock_fprog fprog; 1476 struct sock_filter *filter; 1477 int i; 1478 1479 if (optlen != sizeof(*tfprog)) { 1480 return -TARGET_EINVAL; 1481 } 1482 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) { 1483 return -TARGET_EFAULT; 1484 } 1485 if (!lock_user_struct(VERIFY_READ, tfilter, 1486 tswapal(tfprog->filter), 0)) { 1487 unlock_user_struct(tfprog, optval_addr, 1); 1488 return -TARGET_EFAULT; 1489 } 1490 1491 fprog.len = tswap16(tfprog->len); 1492 filter = malloc(fprog.len * sizeof(*filter)); 1493 if (filter == NULL) { 1494 unlock_user_struct(tfilter, tfprog->filter, 1); 1495 unlock_user_struct(tfprog, optval_addr, 1); 1496 return -TARGET_ENOMEM; 1497 } 1498 for (i = 0; i < fprog.len; i++) { 1499 filter[i].code = tswap16(tfilter[i].code); 1500 filter[i].jt = tfilter[i].jt; 1501 filter[i].jf = tfilter[i].jf; 1502 filter[i].k = tswap32(tfilter[i].k); 1503 } 1504 fprog.filter = filter; 1505 1506 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, 1507 SO_ATTACH_FILTER, &fprog, sizeof(fprog))); 1508 free(filter); 1509 1510 unlock_user_struct(tfilter, tfprog->filter, 1); 1511 unlock_user_struct(tfprog, optval_addr, 1); 1512 return ret; 1513 } 1514 case TARGET_SO_BINDTODEVICE: 1515 { 1516 char *dev_ifname, *addr_ifname; 1517 1518 if (optlen > IFNAMSIZ - 1) { 1519 optlen = IFNAMSIZ - 1; 1520 } 1521 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1); 1522 if (!dev_ifname) { 1523 return -TARGET_EFAULT; 1524 } 1525 optname = SO_BINDTODEVICE; 1526 addr_ifname = alloca(IFNAMSIZ); 1527 memcpy(addr_ifname, dev_ifname, optlen); 1528 addr_ifname[optlen] = 0; 1529 ret = get_errno(setsockopt(sockfd, level, optname, addr_ifname, optlen)); 1530 unlock_user (dev_ifname, optval_addr, 0); 1531 return ret; 1532 } 1533 /* Options with 'int' argument. */ 1534 case TARGET_SO_DEBUG: 1535 optname = SO_DEBUG; 1536 break; 1537 case TARGET_SO_REUSEADDR: 1538 optname = SO_REUSEADDR; 1539 break; 1540 case TARGET_SO_TYPE: 1541 optname = SO_TYPE; 1542 break; 1543 case TARGET_SO_ERROR: 1544 optname = SO_ERROR; 1545 break; 1546 case TARGET_SO_DONTROUTE: 1547 optname = SO_DONTROUTE; 1548 break; 1549 case TARGET_SO_BROADCAST: 1550 optname = SO_BROADCAST; 1551 break; 1552 case TARGET_SO_SNDBUF: 1553 optname = SO_SNDBUF; 1554 break; 1555 case TARGET_SO_SNDBUFFORCE: 1556 optname = SO_SNDBUFFORCE; 1557 break; 1558 case TARGET_SO_RCVBUF: 1559 optname = SO_RCVBUF; 1560 break; 1561 case TARGET_SO_RCVBUFFORCE: 1562 optname = SO_RCVBUFFORCE; 1563 break; 1564 case TARGET_SO_KEEPALIVE: 1565 optname = SO_KEEPALIVE; 1566 break; 1567 case TARGET_SO_OOBINLINE: 1568 optname = SO_OOBINLINE; 1569 break; 1570 case TARGET_SO_NO_CHECK: 1571 optname = SO_NO_CHECK; 1572 break; 1573 case TARGET_SO_PRIORITY: 1574 optname = SO_PRIORITY; 1575 break; 1576 #ifdef SO_BSDCOMPAT 1577 case TARGET_SO_BSDCOMPAT: 1578 optname = SO_BSDCOMPAT; 1579 break; 1580 #endif 1581 case TARGET_SO_PASSCRED: 1582 optname = SO_PASSCRED; 1583 break; 1584 case TARGET_SO_PASSSEC: 1585 optname = SO_PASSSEC; 1586 break; 1587 case TARGET_SO_TIMESTAMP: 1588 optname = SO_TIMESTAMP; 1589 break; 1590 case TARGET_SO_RCVLOWAT: 1591 optname = SO_RCVLOWAT; 1592 break; 1593 break; 1594 default: 1595 goto unimplemented; 1596 } 1597 if (optlen < sizeof(uint32_t)) 1598 return -TARGET_EINVAL; 1599 1600 if (get_user_u32(val, optval_addr)) 1601 return -TARGET_EFAULT; 1602 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val))); 1603 break; 1604 default: 1605 unimplemented: 1606 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname); 1607 ret = -TARGET_ENOPROTOOPT; 1608 } 1609 return ret; 1610 } 1611 1612 /* do_getsockopt() Must return target values and target errnos. */ 1613 static abi_long do_getsockopt(int sockfd, int level, int optname, 1614 abi_ulong optval_addr, abi_ulong optlen) 1615 { 1616 abi_long ret; 1617 int len, val; 1618 socklen_t lv; 1619 1620 switch(level) { 1621 case TARGET_SOL_SOCKET: 1622 level = SOL_SOCKET; 1623 switch (optname) { 1624 /* These don't just return a single integer */ 1625 case TARGET_SO_LINGER: 1626 case TARGET_SO_RCVTIMEO: 1627 case TARGET_SO_SNDTIMEO: 1628 case TARGET_SO_PEERNAME: 1629 goto unimplemented; 1630 case TARGET_SO_PEERCRED: { 1631 struct ucred cr; 1632 socklen_t crlen; 1633 struct target_ucred *tcr; 1634 1635 if (get_user_u32(len, optlen)) { 1636 return -TARGET_EFAULT; 1637 } 1638 if (len < 0) { 1639 return -TARGET_EINVAL; 1640 } 1641 1642 crlen = sizeof(cr); 1643 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED, 1644 &cr, &crlen)); 1645 if (ret < 0) { 1646 return ret; 1647 } 1648 if (len > crlen) { 1649 len = crlen; 1650 } 1651 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) { 1652 return -TARGET_EFAULT; 1653 } 1654 __put_user(cr.pid, &tcr->pid); 1655 __put_user(cr.uid, &tcr->uid); 1656 __put_user(cr.gid, &tcr->gid); 1657 unlock_user_struct(tcr, optval_addr, 1); 1658 if (put_user_u32(len, optlen)) { 1659 return -TARGET_EFAULT; 1660 } 1661 break; 1662 } 1663 /* Options with 'int' argument. */ 1664 case TARGET_SO_DEBUG: 1665 optname = SO_DEBUG; 1666 goto int_case; 1667 case TARGET_SO_REUSEADDR: 1668 optname = SO_REUSEADDR; 1669 goto int_case; 1670 case TARGET_SO_TYPE: 1671 optname = SO_TYPE; 1672 goto int_case; 1673 case TARGET_SO_ERROR: 1674 optname = SO_ERROR; 1675 goto int_case; 1676 case TARGET_SO_DONTROUTE: 1677 optname = SO_DONTROUTE; 1678 goto int_case; 1679 case TARGET_SO_BROADCAST: 1680 optname = SO_BROADCAST; 1681 goto int_case; 1682 case TARGET_SO_SNDBUF: 1683 optname = SO_SNDBUF; 1684 goto int_case; 1685 case TARGET_SO_RCVBUF: 1686 optname = SO_RCVBUF; 1687 goto int_case; 1688 case TARGET_SO_KEEPALIVE: 1689 optname = SO_KEEPALIVE; 1690 goto int_case; 1691 case TARGET_SO_OOBINLINE: 1692 optname = SO_OOBINLINE; 1693 goto int_case; 1694 case TARGET_SO_NO_CHECK: 1695 optname = SO_NO_CHECK; 1696 goto int_case; 1697 case TARGET_SO_PRIORITY: 1698 optname = SO_PRIORITY; 1699 goto int_case; 1700 #ifdef SO_BSDCOMPAT 1701 case TARGET_SO_BSDCOMPAT: 1702 optname = SO_BSDCOMPAT; 1703 goto int_case; 1704 #endif 1705 case TARGET_SO_PASSCRED: 1706 optname = SO_PASSCRED; 1707 goto int_case; 1708 case TARGET_SO_TIMESTAMP: 1709 optname = SO_TIMESTAMP; 1710 goto int_case; 1711 case TARGET_SO_RCVLOWAT: 1712 optname = SO_RCVLOWAT; 1713 goto int_case; 1714 case TARGET_SO_ACCEPTCONN: 1715 optname = SO_ACCEPTCONN; 1716 goto int_case; 1717 default: 1718 goto int_case; 1719 } 1720 break; 1721 case SOL_TCP: 1722 /* TCP options all take an 'int' value. */ 1723 int_case: 1724 if (get_user_u32(len, optlen)) 1725 return -TARGET_EFAULT; 1726 if (len < 0) 1727 return -TARGET_EINVAL; 1728 lv = sizeof(lv); 1729 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 1730 if (ret < 0) 1731 return ret; 1732 if (optname == SO_TYPE) { 1733 val = host_to_target_sock_type(val); 1734 } 1735 if (len > lv) 1736 len = lv; 1737 if (len == 4) { 1738 if (put_user_u32(val, optval_addr)) 1739 return -TARGET_EFAULT; 1740 } else { 1741 if (put_user_u8(val, optval_addr)) 1742 return -TARGET_EFAULT; 1743 } 1744 if (put_user_u32(len, optlen)) 1745 return -TARGET_EFAULT; 1746 break; 1747 case SOL_IP: 1748 switch(optname) { 1749 case IP_TOS: 1750 case IP_TTL: 1751 case IP_HDRINCL: 1752 case IP_ROUTER_ALERT: 1753 case IP_RECVOPTS: 1754 case IP_RETOPTS: 1755 case IP_PKTINFO: 1756 case IP_MTU_DISCOVER: 1757 case IP_RECVERR: 1758 case IP_RECVTOS: 1759 #ifdef IP_FREEBIND 1760 case IP_FREEBIND: 1761 #endif 1762 case IP_MULTICAST_TTL: 1763 case IP_MULTICAST_LOOP: 1764 if (get_user_u32(len, optlen)) 1765 return -TARGET_EFAULT; 1766 if (len < 0) 1767 return -TARGET_EINVAL; 1768 lv = sizeof(lv); 1769 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 1770 if (ret < 0) 1771 return ret; 1772 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) { 1773 len = 1; 1774 if (put_user_u32(len, optlen) 1775 || put_user_u8(val, optval_addr)) 1776 return -TARGET_EFAULT; 1777 } else { 1778 if (len > sizeof(int)) 1779 len = sizeof(int); 1780 if (put_user_u32(len, optlen) 1781 || put_user_u32(val, optval_addr)) 1782 return -TARGET_EFAULT; 1783 } 1784 break; 1785 default: 1786 ret = -TARGET_ENOPROTOOPT; 1787 break; 1788 } 1789 break; 1790 default: 1791 unimplemented: 1792 gemu_log("getsockopt level=%d optname=%d not yet supported\n", 1793 level, optname); 1794 ret = -TARGET_EOPNOTSUPP; 1795 break; 1796 } 1797 return ret; 1798 } 1799 1800 static struct iovec *lock_iovec(int type, abi_ulong target_addr, 1801 int count, int copy) 1802 { 1803 struct target_iovec *target_vec; 1804 struct iovec *vec; 1805 abi_ulong total_len, max_len; 1806 int i; 1807 int err = 0; 1808 bool bad_address = false; 1809 1810 if (count == 0) { 1811 errno = 0; 1812 return NULL; 1813 } 1814 if (count < 0 || count > IOV_MAX) { 1815 errno = EINVAL; 1816 return NULL; 1817 } 1818 1819 vec = calloc(count, sizeof(struct iovec)); 1820 if (vec == NULL) { 1821 errno = ENOMEM; 1822 return NULL; 1823 } 1824 1825 target_vec = lock_user(VERIFY_READ, target_addr, 1826 count * sizeof(struct target_iovec), 1); 1827 if (target_vec == NULL) { 1828 err = EFAULT; 1829 goto fail2; 1830 } 1831 1832 /* ??? If host page size > target page size, this will result in a 1833 value larger than what we can actually support. */ 1834 max_len = 0x7fffffff & TARGET_PAGE_MASK; 1835 total_len = 0; 1836 1837 for (i = 0; i < count; i++) { 1838 abi_ulong base = tswapal(target_vec[i].iov_base); 1839 abi_long len = tswapal(target_vec[i].iov_len); 1840 1841 if (len < 0) { 1842 err = EINVAL; 1843 goto fail; 1844 } else if (len == 0) { 1845 /* Zero length pointer is ignored. */ 1846 vec[i].iov_base = 0; 1847 } else { 1848 vec[i].iov_base = lock_user(type, base, len, copy); 1849 /* If the first buffer pointer is bad, this is a fault. But 1850 * subsequent bad buffers will result in a partial write; this 1851 * is realized by filling the vector with null pointers and 1852 * zero lengths. */ 1853 if (!vec[i].iov_base) { 1854 if (i == 0) { 1855 err = EFAULT; 1856 goto fail; 1857 } else { 1858 bad_address = true; 1859 } 1860 } 1861 if (bad_address) { 1862 len = 0; 1863 } 1864 if (len > max_len - total_len) { 1865 len = max_len - total_len; 1866 } 1867 } 1868 vec[i].iov_len = len; 1869 total_len += len; 1870 } 1871 1872 unlock_user(target_vec, target_addr, 0); 1873 return vec; 1874 1875 fail: 1876 unlock_user(target_vec, target_addr, 0); 1877 fail2: 1878 free(vec); 1879 errno = err; 1880 return NULL; 1881 } 1882 1883 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr, 1884 int count, int copy) 1885 { 1886 struct target_iovec *target_vec; 1887 int i; 1888 1889 target_vec = lock_user(VERIFY_READ, target_addr, 1890 count * sizeof(struct target_iovec), 1); 1891 if (target_vec) { 1892 for (i = 0; i < count; i++) { 1893 abi_ulong base = tswapal(target_vec[i].iov_base); 1894 abi_long len = tswapal(target_vec[i].iov_base); 1895 if (len < 0) { 1896 break; 1897 } 1898 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0); 1899 } 1900 unlock_user(target_vec, target_addr, 0); 1901 } 1902 1903 free(vec); 1904 } 1905 1906 static inline int target_to_host_sock_type(int *type) 1907 { 1908 int host_type = 0; 1909 int target_type = *type; 1910 1911 switch (target_type & TARGET_SOCK_TYPE_MASK) { 1912 case TARGET_SOCK_DGRAM: 1913 host_type = SOCK_DGRAM; 1914 break; 1915 case TARGET_SOCK_STREAM: 1916 host_type = SOCK_STREAM; 1917 break; 1918 default: 1919 host_type = target_type & TARGET_SOCK_TYPE_MASK; 1920 break; 1921 } 1922 if (target_type & TARGET_SOCK_CLOEXEC) { 1923 #if defined(SOCK_CLOEXEC) 1924 host_type |= SOCK_CLOEXEC; 1925 #else 1926 return -TARGET_EINVAL; 1927 #endif 1928 } 1929 if (target_type & TARGET_SOCK_NONBLOCK) { 1930 #if defined(SOCK_NONBLOCK) 1931 host_type |= SOCK_NONBLOCK; 1932 #elif !defined(O_NONBLOCK) 1933 return -TARGET_EINVAL; 1934 #endif 1935 } 1936 *type = host_type; 1937 return 0; 1938 } 1939 1940 /* Try to emulate socket type flags after socket creation. */ 1941 static int sock_flags_fixup(int fd, int target_type) 1942 { 1943 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK) 1944 if (target_type & TARGET_SOCK_NONBLOCK) { 1945 int flags = fcntl(fd, F_GETFL); 1946 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) { 1947 close(fd); 1948 return -TARGET_EINVAL; 1949 } 1950 } 1951 #endif 1952 return fd; 1953 } 1954 1955 /* do_socket() Must return target values and target errnos. */ 1956 static abi_long do_socket(int domain, int type, int protocol) 1957 { 1958 int target_type = type; 1959 int ret; 1960 1961 ret = target_to_host_sock_type(&type); 1962 if (ret) { 1963 return ret; 1964 } 1965 1966 if (domain == PF_NETLINK) 1967 return -TARGET_EAFNOSUPPORT; 1968 ret = get_errno(socket(domain, type, protocol)); 1969 if (ret >= 0) { 1970 ret = sock_flags_fixup(ret, target_type); 1971 } 1972 return ret; 1973 } 1974 1975 /* do_bind() Must return target values and target errnos. */ 1976 static abi_long do_bind(int sockfd, abi_ulong target_addr, 1977 socklen_t addrlen) 1978 { 1979 void *addr; 1980 abi_long ret; 1981 1982 if ((int)addrlen < 0) { 1983 return -TARGET_EINVAL; 1984 } 1985 1986 addr = alloca(addrlen+1); 1987 1988 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 1989 if (ret) 1990 return ret; 1991 1992 return get_errno(bind(sockfd, addr, addrlen)); 1993 } 1994 1995 /* do_connect() Must return target values and target errnos. */ 1996 static abi_long do_connect(int sockfd, abi_ulong target_addr, 1997 socklen_t addrlen) 1998 { 1999 void *addr; 2000 abi_long ret; 2001 2002 if ((int)addrlen < 0) { 2003 return -TARGET_EINVAL; 2004 } 2005 2006 addr = alloca(addrlen+1); 2007 2008 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 2009 if (ret) 2010 return ret; 2011 2012 return get_errno(connect(sockfd, addr, addrlen)); 2013 } 2014 2015 /* do_sendrecvmsg_locked() Must return target values and target errnos. */ 2016 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp, 2017 int flags, int send) 2018 { 2019 abi_long ret, len; 2020 struct msghdr msg; 2021 int count; 2022 struct iovec *vec; 2023 abi_ulong target_vec; 2024 2025 if (msgp->msg_name) { 2026 msg.msg_namelen = tswap32(msgp->msg_namelen); 2027 msg.msg_name = alloca(msg.msg_namelen+1); 2028 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name), 2029 msg.msg_namelen); 2030 if (ret) { 2031 goto out2; 2032 } 2033 } else { 2034 msg.msg_name = NULL; 2035 msg.msg_namelen = 0; 2036 } 2037 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen); 2038 msg.msg_control = alloca(msg.msg_controllen); 2039 msg.msg_flags = tswap32(msgp->msg_flags); 2040 2041 count = tswapal(msgp->msg_iovlen); 2042 target_vec = tswapal(msgp->msg_iov); 2043 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, 2044 target_vec, count, send); 2045 if (vec == NULL) { 2046 ret = -host_to_target_errno(errno); 2047 goto out2; 2048 } 2049 msg.msg_iovlen = count; 2050 msg.msg_iov = vec; 2051 2052 if (send) { 2053 ret = target_to_host_cmsg(&msg, msgp); 2054 if (ret == 0) 2055 ret = get_errno(sendmsg(fd, &msg, flags)); 2056 } else { 2057 ret = get_errno(recvmsg(fd, &msg, flags)); 2058 if (!is_error(ret)) { 2059 len = ret; 2060 ret = host_to_target_cmsg(msgp, &msg); 2061 if (!is_error(ret)) { 2062 msgp->msg_namelen = tswap32(msg.msg_namelen); 2063 if (msg.msg_name != NULL) { 2064 ret = host_to_target_sockaddr(tswapal(msgp->msg_name), 2065 msg.msg_name, msg.msg_namelen); 2066 if (ret) { 2067 goto out; 2068 } 2069 } 2070 2071 ret = len; 2072 } 2073 } 2074 } 2075 2076 out: 2077 unlock_iovec(vec, target_vec, count, !send); 2078 out2: 2079 return ret; 2080 } 2081 2082 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg, 2083 int flags, int send) 2084 { 2085 abi_long ret; 2086 struct target_msghdr *msgp; 2087 2088 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE, 2089 msgp, 2090 target_msg, 2091 send ? 1 : 0)) { 2092 return -TARGET_EFAULT; 2093 } 2094 ret = do_sendrecvmsg_locked(fd, msgp, flags, send); 2095 unlock_user_struct(msgp, target_msg, send ? 0 : 1); 2096 return ret; 2097 } 2098 2099 #ifdef TARGET_NR_sendmmsg 2100 /* We don't rely on the C library to have sendmmsg/recvmmsg support, 2101 * so it might not have this *mmsg-specific flag either. 2102 */ 2103 #ifndef MSG_WAITFORONE 2104 #define MSG_WAITFORONE 0x10000 2105 #endif 2106 2107 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec, 2108 unsigned int vlen, unsigned int flags, 2109 int send) 2110 { 2111 struct target_mmsghdr *mmsgp; 2112 abi_long ret = 0; 2113 int i; 2114 2115 if (vlen > UIO_MAXIOV) { 2116 vlen = UIO_MAXIOV; 2117 } 2118 2119 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1); 2120 if (!mmsgp) { 2121 return -TARGET_EFAULT; 2122 } 2123 2124 for (i = 0; i < vlen; i++) { 2125 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send); 2126 if (is_error(ret)) { 2127 break; 2128 } 2129 mmsgp[i].msg_len = tswap32(ret); 2130 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */ 2131 if (flags & MSG_WAITFORONE) { 2132 flags |= MSG_DONTWAIT; 2133 } 2134 } 2135 2136 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i); 2137 2138 /* Return number of datagrams sent if we sent any at all; 2139 * otherwise return the error. 2140 */ 2141 if (i) { 2142 return i; 2143 } 2144 return ret; 2145 } 2146 #endif 2147 2148 /* If we don't have a system accept4() then just call accept. 2149 * The callsites to do_accept4() will ensure that they don't 2150 * pass a non-zero flags argument in this config. 2151 */ 2152 #ifndef CONFIG_ACCEPT4 2153 static inline int accept4(int sockfd, struct sockaddr *addr, 2154 socklen_t *addrlen, int flags) 2155 { 2156 assert(flags == 0); 2157 return accept(sockfd, addr, addrlen); 2158 } 2159 #endif 2160 2161 /* do_accept4() Must return target values and target errnos. */ 2162 static abi_long do_accept4(int fd, abi_ulong target_addr, 2163 abi_ulong target_addrlen_addr, int flags) 2164 { 2165 socklen_t addrlen; 2166 void *addr; 2167 abi_long ret; 2168 int host_flags; 2169 2170 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl); 2171 2172 if (target_addr == 0) { 2173 return get_errno(accept4(fd, NULL, NULL, host_flags)); 2174 } 2175 2176 /* linux returns EINVAL if addrlen pointer is invalid */ 2177 if (get_user_u32(addrlen, target_addrlen_addr)) 2178 return -TARGET_EINVAL; 2179 2180 if ((int)addrlen < 0) { 2181 return -TARGET_EINVAL; 2182 } 2183 2184 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2185 return -TARGET_EINVAL; 2186 2187 addr = alloca(addrlen); 2188 2189 ret = get_errno(accept4(fd, addr, &addrlen, host_flags)); 2190 if (!is_error(ret)) { 2191 host_to_target_sockaddr(target_addr, addr, addrlen); 2192 if (put_user_u32(addrlen, target_addrlen_addr)) 2193 ret = -TARGET_EFAULT; 2194 } 2195 return ret; 2196 } 2197 2198 /* do_getpeername() Must return target values and target errnos. */ 2199 static abi_long do_getpeername(int fd, abi_ulong target_addr, 2200 abi_ulong target_addrlen_addr) 2201 { 2202 socklen_t addrlen; 2203 void *addr; 2204 abi_long ret; 2205 2206 if (get_user_u32(addrlen, target_addrlen_addr)) 2207 return -TARGET_EFAULT; 2208 2209 if ((int)addrlen < 0) { 2210 return -TARGET_EINVAL; 2211 } 2212 2213 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2214 return -TARGET_EFAULT; 2215 2216 addr = alloca(addrlen); 2217 2218 ret = get_errno(getpeername(fd, addr, &addrlen)); 2219 if (!is_error(ret)) { 2220 host_to_target_sockaddr(target_addr, addr, addrlen); 2221 if (put_user_u32(addrlen, target_addrlen_addr)) 2222 ret = -TARGET_EFAULT; 2223 } 2224 return ret; 2225 } 2226 2227 /* do_getsockname() Must return target values and target errnos. */ 2228 static abi_long do_getsockname(int fd, abi_ulong target_addr, 2229 abi_ulong target_addrlen_addr) 2230 { 2231 socklen_t addrlen; 2232 void *addr; 2233 abi_long ret; 2234 2235 if (get_user_u32(addrlen, target_addrlen_addr)) 2236 return -TARGET_EFAULT; 2237 2238 if ((int)addrlen < 0) { 2239 return -TARGET_EINVAL; 2240 } 2241 2242 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2243 return -TARGET_EFAULT; 2244 2245 addr = alloca(addrlen); 2246 2247 ret = get_errno(getsockname(fd, addr, &addrlen)); 2248 if (!is_error(ret)) { 2249 host_to_target_sockaddr(target_addr, addr, addrlen); 2250 if (put_user_u32(addrlen, target_addrlen_addr)) 2251 ret = -TARGET_EFAULT; 2252 } 2253 return ret; 2254 } 2255 2256 /* do_socketpair() Must return target values and target errnos. */ 2257 static abi_long do_socketpair(int domain, int type, int protocol, 2258 abi_ulong target_tab_addr) 2259 { 2260 int tab[2]; 2261 abi_long ret; 2262 2263 target_to_host_sock_type(&type); 2264 2265 ret = get_errno(socketpair(domain, type, protocol, tab)); 2266 if (!is_error(ret)) { 2267 if (put_user_s32(tab[0], target_tab_addr) 2268 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0]))) 2269 ret = -TARGET_EFAULT; 2270 } 2271 return ret; 2272 } 2273 2274 /* do_sendto() Must return target values and target errnos. */ 2275 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags, 2276 abi_ulong target_addr, socklen_t addrlen) 2277 { 2278 void *addr; 2279 void *host_msg; 2280 abi_long ret; 2281 2282 if ((int)addrlen < 0) { 2283 return -TARGET_EINVAL; 2284 } 2285 2286 host_msg = lock_user(VERIFY_READ, msg, len, 1); 2287 if (!host_msg) 2288 return -TARGET_EFAULT; 2289 if (target_addr) { 2290 addr = alloca(addrlen+1); 2291 ret = target_to_host_sockaddr(addr, target_addr, addrlen); 2292 if (ret) { 2293 unlock_user(host_msg, msg, 0); 2294 return ret; 2295 } 2296 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen)); 2297 } else { 2298 ret = get_errno(send(fd, host_msg, len, flags)); 2299 } 2300 unlock_user(host_msg, msg, 0); 2301 return ret; 2302 } 2303 2304 /* do_recvfrom() Must return target values and target errnos. */ 2305 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags, 2306 abi_ulong target_addr, 2307 abi_ulong target_addrlen) 2308 { 2309 socklen_t addrlen; 2310 void *addr; 2311 void *host_msg; 2312 abi_long ret; 2313 2314 host_msg = lock_user(VERIFY_WRITE, msg, len, 0); 2315 if (!host_msg) 2316 return -TARGET_EFAULT; 2317 if (target_addr) { 2318 if (get_user_u32(addrlen, target_addrlen)) { 2319 ret = -TARGET_EFAULT; 2320 goto fail; 2321 } 2322 if ((int)addrlen < 0) { 2323 ret = -TARGET_EINVAL; 2324 goto fail; 2325 } 2326 addr = alloca(addrlen); 2327 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen)); 2328 } else { 2329 addr = NULL; /* To keep compiler quiet. */ 2330 ret = get_errno(qemu_recv(fd, host_msg, len, flags)); 2331 } 2332 if (!is_error(ret)) { 2333 if (target_addr) { 2334 host_to_target_sockaddr(target_addr, addr, addrlen); 2335 if (put_user_u32(addrlen, target_addrlen)) { 2336 ret = -TARGET_EFAULT; 2337 goto fail; 2338 } 2339 } 2340 unlock_user(host_msg, msg, len); 2341 } else { 2342 fail: 2343 unlock_user(host_msg, msg, 0); 2344 } 2345 return ret; 2346 } 2347 2348 #ifdef TARGET_NR_socketcall 2349 /* do_socketcall() Must return target values and target errnos. */ 2350 static abi_long do_socketcall(int num, abi_ulong vptr) 2351 { 2352 static const unsigned ac[] = { /* number of arguments per call */ 2353 [SOCKOP_socket] = 3, /* domain, type, protocol */ 2354 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */ 2355 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */ 2356 [SOCKOP_listen] = 2, /* sockfd, backlog */ 2357 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */ 2358 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */ 2359 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */ 2360 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */ 2361 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */ 2362 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */ 2363 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */ 2364 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */ 2365 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */ 2366 [SOCKOP_shutdown] = 2, /* sockfd, how */ 2367 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */ 2368 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */ 2369 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */ 2370 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */ 2371 }; 2372 abi_long a[6]; /* max 6 args */ 2373 2374 /* first, collect the arguments in a[] according to ac[] */ 2375 if (num >= 0 && num < ARRAY_SIZE(ac)) { 2376 unsigned i; 2377 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */ 2378 for (i = 0; i < ac[num]; ++i) { 2379 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) { 2380 return -TARGET_EFAULT; 2381 } 2382 } 2383 } 2384 2385 /* now when we have the args, actually handle the call */ 2386 switch (num) { 2387 case SOCKOP_socket: /* domain, type, protocol */ 2388 return do_socket(a[0], a[1], a[2]); 2389 case SOCKOP_bind: /* sockfd, addr, addrlen */ 2390 return do_bind(a[0], a[1], a[2]); 2391 case SOCKOP_connect: /* sockfd, addr, addrlen */ 2392 return do_connect(a[0], a[1], a[2]); 2393 case SOCKOP_listen: /* sockfd, backlog */ 2394 return get_errno(listen(a[0], a[1])); 2395 case SOCKOP_accept: /* sockfd, addr, addrlen */ 2396 return do_accept4(a[0], a[1], a[2], 0); 2397 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */ 2398 return do_accept4(a[0], a[1], a[2], a[3]); 2399 case SOCKOP_getsockname: /* sockfd, addr, addrlen */ 2400 return do_getsockname(a[0], a[1], a[2]); 2401 case SOCKOP_getpeername: /* sockfd, addr, addrlen */ 2402 return do_getpeername(a[0], a[1], a[2]); 2403 case SOCKOP_socketpair: /* domain, type, protocol, tab */ 2404 return do_socketpair(a[0], a[1], a[2], a[3]); 2405 case SOCKOP_send: /* sockfd, msg, len, flags */ 2406 return do_sendto(a[0], a[1], a[2], a[3], 0, 0); 2407 case SOCKOP_recv: /* sockfd, msg, len, flags */ 2408 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0); 2409 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */ 2410 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]); 2411 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */ 2412 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]); 2413 case SOCKOP_shutdown: /* sockfd, how */ 2414 return get_errno(shutdown(a[0], a[1])); 2415 case SOCKOP_sendmsg: /* sockfd, msg, flags */ 2416 return do_sendrecvmsg(a[0], a[1], a[2], 1); 2417 case SOCKOP_recvmsg: /* sockfd, msg, flags */ 2418 return do_sendrecvmsg(a[0], a[1], a[2], 0); 2419 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */ 2420 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]); 2421 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */ 2422 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]); 2423 default: 2424 gemu_log("Unsupported socketcall: %d\n", num); 2425 return -TARGET_ENOSYS; 2426 } 2427 } 2428 #endif 2429 2430 #define N_SHM_REGIONS 32 2431 2432 static struct shm_region { 2433 abi_ulong start; 2434 abi_ulong size; 2435 } shm_regions[N_SHM_REGIONS]; 2436 2437 struct target_semid_ds 2438 { 2439 struct target_ipc_perm sem_perm; 2440 abi_ulong sem_otime; 2441 #if !defined(TARGET_PPC64) 2442 abi_ulong __unused1; 2443 #endif 2444 abi_ulong sem_ctime; 2445 #if !defined(TARGET_PPC64) 2446 abi_ulong __unused2; 2447 #endif 2448 abi_ulong sem_nsems; 2449 abi_ulong __unused3; 2450 abi_ulong __unused4; 2451 }; 2452 2453 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip, 2454 abi_ulong target_addr) 2455 { 2456 struct target_ipc_perm *target_ip; 2457 struct target_semid_ds *target_sd; 2458 2459 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2460 return -TARGET_EFAULT; 2461 target_ip = &(target_sd->sem_perm); 2462 host_ip->__key = tswap32(target_ip->__key); 2463 host_ip->uid = tswap32(target_ip->uid); 2464 host_ip->gid = tswap32(target_ip->gid); 2465 host_ip->cuid = tswap32(target_ip->cuid); 2466 host_ip->cgid = tswap32(target_ip->cgid); 2467 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 2468 host_ip->mode = tswap32(target_ip->mode); 2469 #else 2470 host_ip->mode = tswap16(target_ip->mode); 2471 #endif 2472 #if defined(TARGET_PPC) 2473 host_ip->__seq = tswap32(target_ip->__seq); 2474 #else 2475 host_ip->__seq = tswap16(target_ip->__seq); 2476 #endif 2477 unlock_user_struct(target_sd, target_addr, 0); 2478 return 0; 2479 } 2480 2481 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr, 2482 struct ipc_perm *host_ip) 2483 { 2484 struct target_ipc_perm *target_ip; 2485 struct target_semid_ds *target_sd; 2486 2487 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2488 return -TARGET_EFAULT; 2489 target_ip = &(target_sd->sem_perm); 2490 target_ip->__key = tswap32(host_ip->__key); 2491 target_ip->uid = tswap32(host_ip->uid); 2492 target_ip->gid = tswap32(host_ip->gid); 2493 target_ip->cuid = tswap32(host_ip->cuid); 2494 target_ip->cgid = tswap32(host_ip->cgid); 2495 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 2496 target_ip->mode = tswap32(host_ip->mode); 2497 #else 2498 target_ip->mode = tswap16(host_ip->mode); 2499 #endif 2500 #if defined(TARGET_PPC) 2501 target_ip->__seq = tswap32(host_ip->__seq); 2502 #else 2503 target_ip->__seq = tswap16(host_ip->__seq); 2504 #endif 2505 unlock_user_struct(target_sd, target_addr, 1); 2506 return 0; 2507 } 2508 2509 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd, 2510 abi_ulong target_addr) 2511 { 2512 struct target_semid_ds *target_sd; 2513 2514 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2515 return -TARGET_EFAULT; 2516 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr)) 2517 return -TARGET_EFAULT; 2518 host_sd->sem_nsems = tswapal(target_sd->sem_nsems); 2519 host_sd->sem_otime = tswapal(target_sd->sem_otime); 2520 host_sd->sem_ctime = tswapal(target_sd->sem_ctime); 2521 unlock_user_struct(target_sd, target_addr, 0); 2522 return 0; 2523 } 2524 2525 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr, 2526 struct semid_ds *host_sd) 2527 { 2528 struct target_semid_ds *target_sd; 2529 2530 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2531 return -TARGET_EFAULT; 2532 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm))) 2533 return -TARGET_EFAULT; 2534 target_sd->sem_nsems = tswapal(host_sd->sem_nsems); 2535 target_sd->sem_otime = tswapal(host_sd->sem_otime); 2536 target_sd->sem_ctime = tswapal(host_sd->sem_ctime); 2537 unlock_user_struct(target_sd, target_addr, 1); 2538 return 0; 2539 } 2540 2541 struct target_seminfo { 2542 int semmap; 2543 int semmni; 2544 int semmns; 2545 int semmnu; 2546 int semmsl; 2547 int semopm; 2548 int semume; 2549 int semusz; 2550 int semvmx; 2551 int semaem; 2552 }; 2553 2554 static inline abi_long host_to_target_seminfo(abi_ulong target_addr, 2555 struct seminfo *host_seminfo) 2556 { 2557 struct target_seminfo *target_seminfo; 2558 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0)) 2559 return -TARGET_EFAULT; 2560 __put_user(host_seminfo->semmap, &target_seminfo->semmap); 2561 __put_user(host_seminfo->semmni, &target_seminfo->semmni); 2562 __put_user(host_seminfo->semmns, &target_seminfo->semmns); 2563 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu); 2564 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl); 2565 __put_user(host_seminfo->semopm, &target_seminfo->semopm); 2566 __put_user(host_seminfo->semume, &target_seminfo->semume); 2567 __put_user(host_seminfo->semusz, &target_seminfo->semusz); 2568 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx); 2569 __put_user(host_seminfo->semaem, &target_seminfo->semaem); 2570 unlock_user_struct(target_seminfo, target_addr, 1); 2571 return 0; 2572 } 2573 2574 union semun { 2575 int val; 2576 struct semid_ds *buf; 2577 unsigned short *array; 2578 struct seminfo *__buf; 2579 }; 2580 2581 union target_semun { 2582 int val; 2583 abi_ulong buf; 2584 abi_ulong array; 2585 abi_ulong __buf; 2586 }; 2587 2588 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array, 2589 abi_ulong target_addr) 2590 { 2591 int nsems; 2592 unsigned short *array; 2593 union semun semun; 2594 struct semid_ds semid_ds; 2595 int i, ret; 2596 2597 semun.buf = &semid_ds; 2598 2599 ret = semctl(semid, 0, IPC_STAT, semun); 2600 if (ret == -1) 2601 return get_errno(ret); 2602 2603 nsems = semid_ds.sem_nsems; 2604 2605 *host_array = malloc(nsems*sizeof(unsigned short)); 2606 if (!*host_array) { 2607 return -TARGET_ENOMEM; 2608 } 2609 array = lock_user(VERIFY_READ, target_addr, 2610 nsems*sizeof(unsigned short), 1); 2611 if (!array) { 2612 free(*host_array); 2613 return -TARGET_EFAULT; 2614 } 2615 2616 for(i=0; i<nsems; i++) { 2617 __get_user((*host_array)[i], &array[i]); 2618 } 2619 unlock_user(array, target_addr, 0); 2620 2621 return 0; 2622 } 2623 2624 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr, 2625 unsigned short **host_array) 2626 { 2627 int nsems; 2628 unsigned short *array; 2629 union semun semun; 2630 struct semid_ds semid_ds; 2631 int i, ret; 2632 2633 semun.buf = &semid_ds; 2634 2635 ret = semctl(semid, 0, IPC_STAT, semun); 2636 if (ret == -1) 2637 return get_errno(ret); 2638 2639 nsems = semid_ds.sem_nsems; 2640 2641 array = lock_user(VERIFY_WRITE, target_addr, 2642 nsems*sizeof(unsigned short), 0); 2643 if (!array) 2644 return -TARGET_EFAULT; 2645 2646 for(i=0; i<nsems; i++) { 2647 __put_user((*host_array)[i], &array[i]); 2648 } 2649 free(*host_array); 2650 unlock_user(array, target_addr, 1); 2651 2652 return 0; 2653 } 2654 2655 static inline abi_long do_semctl(int semid, int semnum, int cmd, 2656 union target_semun target_su) 2657 { 2658 union semun arg; 2659 struct semid_ds dsarg; 2660 unsigned short *array = NULL; 2661 struct seminfo seminfo; 2662 abi_long ret = -TARGET_EINVAL; 2663 abi_long err; 2664 cmd &= 0xff; 2665 2666 switch( cmd ) { 2667 case GETVAL: 2668 case SETVAL: 2669 /* In 64 bit cross-endian situations, we will erroneously pick up 2670 * the wrong half of the union for the "val" element. To rectify 2671 * this, the entire 8-byte structure is byteswapped, followed by 2672 * a swap of the 4 byte val field. In other cases, the data is 2673 * already in proper host byte order. */ 2674 if (sizeof(target_su.val) != (sizeof(target_su.buf))) { 2675 target_su.buf = tswapal(target_su.buf); 2676 arg.val = tswap32(target_su.val); 2677 } else { 2678 arg.val = target_su.val; 2679 } 2680 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2681 break; 2682 case GETALL: 2683 case SETALL: 2684 err = target_to_host_semarray(semid, &array, target_su.array); 2685 if (err) 2686 return err; 2687 arg.array = array; 2688 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2689 err = host_to_target_semarray(semid, target_su.array, &array); 2690 if (err) 2691 return err; 2692 break; 2693 case IPC_STAT: 2694 case IPC_SET: 2695 case SEM_STAT: 2696 err = target_to_host_semid_ds(&dsarg, target_su.buf); 2697 if (err) 2698 return err; 2699 arg.buf = &dsarg; 2700 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2701 err = host_to_target_semid_ds(target_su.buf, &dsarg); 2702 if (err) 2703 return err; 2704 break; 2705 case IPC_INFO: 2706 case SEM_INFO: 2707 arg.__buf = &seminfo; 2708 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2709 err = host_to_target_seminfo(target_su.__buf, &seminfo); 2710 if (err) 2711 return err; 2712 break; 2713 case IPC_RMID: 2714 case GETPID: 2715 case GETNCNT: 2716 case GETZCNT: 2717 ret = get_errno(semctl(semid, semnum, cmd, NULL)); 2718 break; 2719 } 2720 2721 return ret; 2722 } 2723 2724 struct target_sembuf { 2725 unsigned short sem_num; 2726 short sem_op; 2727 short sem_flg; 2728 }; 2729 2730 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf, 2731 abi_ulong target_addr, 2732 unsigned nsops) 2733 { 2734 struct target_sembuf *target_sembuf; 2735 int i; 2736 2737 target_sembuf = lock_user(VERIFY_READ, target_addr, 2738 nsops*sizeof(struct target_sembuf), 1); 2739 if (!target_sembuf) 2740 return -TARGET_EFAULT; 2741 2742 for(i=0; i<nsops; i++) { 2743 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num); 2744 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op); 2745 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg); 2746 } 2747 2748 unlock_user(target_sembuf, target_addr, 0); 2749 2750 return 0; 2751 } 2752 2753 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops) 2754 { 2755 struct sembuf sops[nsops]; 2756 2757 if (target_to_host_sembuf(sops, ptr, nsops)) 2758 return -TARGET_EFAULT; 2759 2760 return get_errno(semop(semid, sops, nsops)); 2761 } 2762 2763 struct target_msqid_ds 2764 { 2765 struct target_ipc_perm msg_perm; 2766 abi_ulong msg_stime; 2767 #if TARGET_ABI_BITS == 32 2768 abi_ulong __unused1; 2769 #endif 2770 abi_ulong msg_rtime; 2771 #if TARGET_ABI_BITS == 32 2772 abi_ulong __unused2; 2773 #endif 2774 abi_ulong msg_ctime; 2775 #if TARGET_ABI_BITS == 32 2776 abi_ulong __unused3; 2777 #endif 2778 abi_ulong __msg_cbytes; 2779 abi_ulong msg_qnum; 2780 abi_ulong msg_qbytes; 2781 abi_ulong msg_lspid; 2782 abi_ulong msg_lrpid; 2783 abi_ulong __unused4; 2784 abi_ulong __unused5; 2785 }; 2786 2787 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md, 2788 abi_ulong target_addr) 2789 { 2790 struct target_msqid_ds *target_md; 2791 2792 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1)) 2793 return -TARGET_EFAULT; 2794 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr)) 2795 return -TARGET_EFAULT; 2796 host_md->msg_stime = tswapal(target_md->msg_stime); 2797 host_md->msg_rtime = tswapal(target_md->msg_rtime); 2798 host_md->msg_ctime = tswapal(target_md->msg_ctime); 2799 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes); 2800 host_md->msg_qnum = tswapal(target_md->msg_qnum); 2801 host_md->msg_qbytes = tswapal(target_md->msg_qbytes); 2802 host_md->msg_lspid = tswapal(target_md->msg_lspid); 2803 host_md->msg_lrpid = tswapal(target_md->msg_lrpid); 2804 unlock_user_struct(target_md, target_addr, 0); 2805 return 0; 2806 } 2807 2808 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr, 2809 struct msqid_ds *host_md) 2810 { 2811 struct target_msqid_ds *target_md; 2812 2813 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0)) 2814 return -TARGET_EFAULT; 2815 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm))) 2816 return -TARGET_EFAULT; 2817 target_md->msg_stime = tswapal(host_md->msg_stime); 2818 target_md->msg_rtime = tswapal(host_md->msg_rtime); 2819 target_md->msg_ctime = tswapal(host_md->msg_ctime); 2820 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes); 2821 target_md->msg_qnum = tswapal(host_md->msg_qnum); 2822 target_md->msg_qbytes = tswapal(host_md->msg_qbytes); 2823 target_md->msg_lspid = tswapal(host_md->msg_lspid); 2824 target_md->msg_lrpid = tswapal(host_md->msg_lrpid); 2825 unlock_user_struct(target_md, target_addr, 1); 2826 return 0; 2827 } 2828 2829 struct target_msginfo { 2830 int msgpool; 2831 int msgmap; 2832 int msgmax; 2833 int msgmnb; 2834 int msgmni; 2835 int msgssz; 2836 int msgtql; 2837 unsigned short int msgseg; 2838 }; 2839 2840 static inline abi_long host_to_target_msginfo(abi_ulong target_addr, 2841 struct msginfo *host_msginfo) 2842 { 2843 struct target_msginfo *target_msginfo; 2844 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0)) 2845 return -TARGET_EFAULT; 2846 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool); 2847 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap); 2848 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax); 2849 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb); 2850 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni); 2851 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz); 2852 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql); 2853 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg); 2854 unlock_user_struct(target_msginfo, target_addr, 1); 2855 return 0; 2856 } 2857 2858 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr) 2859 { 2860 struct msqid_ds dsarg; 2861 struct msginfo msginfo; 2862 abi_long ret = -TARGET_EINVAL; 2863 2864 cmd &= 0xff; 2865 2866 switch (cmd) { 2867 case IPC_STAT: 2868 case IPC_SET: 2869 case MSG_STAT: 2870 if (target_to_host_msqid_ds(&dsarg,ptr)) 2871 return -TARGET_EFAULT; 2872 ret = get_errno(msgctl(msgid, cmd, &dsarg)); 2873 if (host_to_target_msqid_ds(ptr,&dsarg)) 2874 return -TARGET_EFAULT; 2875 break; 2876 case IPC_RMID: 2877 ret = get_errno(msgctl(msgid, cmd, NULL)); 2878 break; 2879 case IPC_INFO: 2880 case MSG_INFO: 2881 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo)); 2882 if (host_to_target_msginfo(ptr, &msginfo)) 2883 return -TARGET_EFAULT; 2884 break; 2885 } 2886 2887 return ret; 2888 } 2889 2890 struct target_msgbuf { 2891 abi_long mtype; 2892 char mtext[1]; 2893 }; 2894 2895 static inline abi_long do_msgsnd(int msqid, abi_long msgp, 2896 ssize_t msgsz, int msgflg) 2897 { 2898 struct target_msgbuf *target_mb; 2899 struct msgbuf *host_mb; 2900 abi_long ret = 0; 2901 2902 if (msgsz < 0) { 2903 return -TARGET_EINVAL; 2904 } 2905 2906 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0)) 2907 return -TARGET_EFAULT; 2908 host_mb = malloc(msgsz+sizeof(long)); 2909 if (!host_mb) { 2910 unlock_user_struct(target_mb, msgp, 0); 2911 return -TARGET_ENOMEM; 2912 } 2913 host_mb->mtype = (abi_long) tswapal(target_mb->mtype); 2914 memcpy(host_mb->mtext, target_mb->mtext, msgsz); 2915 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg)); 2916 free(host_mb); 2917 unlock_user_struct(target_mb, msgp, 0); 2918 2919 return ret; 2920 } 2921 2922 static inline abi_long do_msgrcv(int msqid, abi_long msgp, 2923 unsigned int msgsz, abi_long msgtyp, 2924 int msgflg) 2925 { 2926 struct target_msgbuf *target_mb; 2927 char *target_mtext; 2928 struct msgbuf *host_mb; 2929 abi_long ret = 0; 2930 2931 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0)) 2932 return -TARGET_EFAULT; 2933 2934 host_mb = g_malloc(msgsz+sizeof(long)); 2935 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg)); 2936 2937 if (ret > 0) { 2938 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong); 2939 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0); 2940 if (!target_mtext) { 2941 ret = -TARGET_EFAULT; 2942 goto end; 2943 } 2944 memcpy(target_mb->mtext, host_mb->mtext, ret); 2945 unlock_user(target_mtext, target_mtext_addr, ret); 2946 } 2947 2948 target_mb->mtype = tswapal(host_mb->mtype); 2949 2950 end: 2951 if (target_mb) 2952 unlock_user_struct(target_mb, msgp, 1); 2953 g_free(host_mb); 2954 return ret; 2955 } 2956 2957 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd, 2958 abi_ulong target_addr) 2959 { 2960 struct target_shmid_ds *target_sd; 2961 2962 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2963 return -TARGET_EFAULT; 2964 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr)) 2965 return -TARGET_EFAULT; 2966 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz); 2967 __get_user(host_sd->shm_atime, &target_sd->shm_atime); 2968 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime); 2969 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime); 2970 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid); 2971 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid); 2972 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch); 2973 unlock_user_struct(target_sd, target_addr, 0); 2974 return 0; 2975 } 2976 2977 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr, 2978 struct shmid_ds *host_sd) 2979 { 2980 struct target_shmid_ds *target_sd; 2981 2982 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2983 return -TARGET_EFAULT; 2984 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm))) 2985 return -TARGET_EFAULT; 2986 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz); 2987 __put_user(host_sd->shm_atime, &target_sd->shm_atime); 2988 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime); 2989 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime); 2990 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid); 2991 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid); 2992 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch); 2993 unlock_user_struct(target_sd, target_addr, 1); 2994 return 0; 2995 } 2996 2997 struct target_shminfo { 2998 abi_ulong shmmax; 2999 abi_ulong shmmin; 3000 abi_ulong shmmni; 3001 abi_ulong shmseg; 3002 abi_ulong shmall; 3003 }; 3004 3005 static inline abi_long host_to_target_shminfo(abi_ulong target_addr, 3006 struct shminfo *host_shminfo) 3007 { 3008 struct target_shminfo *target_shminfo; 3009 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0)) 3010 return -TARGET_EFAULT; 3011 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax); 3012 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin); 3013 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni); 3014 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg); 3015 __put_user(host_shminfo->shmall, &target_shminfo->shmall); 3016 unlock_user_struct(target_shminfo, target_addr, 1); 3017 return 0; 3018 } 3019 3020 struct target_shm_info { 3021 int used_ids; 3022 abi_ulong shm_tot; 3023 abi_ulong shm_rss; 3024 abi_ulong shm_swp; 3025 abi_ulong swap_attempts; 3026 abi_ulong swap_successes; 3027 }; 3028 3029 static inline abi_long host_to_target_shm_info(abi_ulong target_addr, 3030 struct shm_info *host_shm_info) 3031 { 3032 struct target_shm_info *target_shm_info; 3033 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0)) 3034 return -TARGET_EFAULT; 3035 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids); 3036 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot); 3037 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss); 3038 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp); 3039 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts); 3040 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes); 3041 unlock_user_struct(target_shm_info, target_addr, 1); 3042 return 0; 3043 } 3044 3045 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf) 3046 { 3047 struct shmid_ds dsarg; 3048 struct shminfo shminfo; 3049 struct shm_info shm_info; 3050 abi_long ret = -TARGET_EINVAL; 3051 3052 cmd &= 0xff; 3053 3054 switch(cmd) { 3055 case IPC_STAT: 3056 case IPC_SET: 3057 case SHM_STAT: 3058 if (target_to_host_shmid_ds(&dsarg, buf)) 3059 return -TARGET_EFAULT; 3060 ret = get_errno(shmctl(shmid, cmd, &dsarg)); 3061 if (host_to_target_shmid_ds(buf, &dsarg)) 3062 return -TARGET_EFAULT; 3063 break; 3064 case IPC_INFO: 3065 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo)); 3066 if (host_to_target_shminfo(buf, &shminfo)) 3067 return -TARGET_EFAULT; 3068 break; 3069 case SHM_INFO: 3070 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info)); 3071 if (host_to_target_shm_info(buf, &shm_info)) 3072 return -TARGET_EFAULT; 3073 break; 3074 case IPC_RMID: 3075 case SHM_LOCK: 3076 case SHM_UNLOCK: 3077 ret = get_errno(shmctl(shmid, cmd, NULL)); 3078 break; 3079 } 3080 3081 return ret; 3082 } 3083 3084 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg) 3085 { 3086 abi_long raddr; 3087 void *host_raddr; 3088 struct shmid_ds shm_info; 3089 int i,ret; 3090 3091 /* find out the length of the shared memory segment */ 3092 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info)); 3093 if (is_error(ret)) { 3094 /* can't get length, bail out */ 3095 return ret; 3096 } 3097 3098 mmap_lock(); 3099 3100 if (shmaddr) 3101 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg); 3102 else { 3103 abi_ulong mmap_start; 3104 3105 mmap_start = mmap_find_vma(0, shm_info.shm_segsz); 3106 3107 if (mmap_start == -1) { 3108 errno = ENOMEM; 3109 host_raddr = (void *)-1; 3110 } else 3111 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP); 3112 } 3113 3114 if (host_raddr == (void *)-1) { 3115 mmap_unlock(); 3116 return get_errno((long)host_raddr); 3117 } 3118 raddr=h2g((unsigned long)host_raddr); 3119 3120 page_set_flags(raddr, raddr + shm_info.shm_segsz, 3121 PAGE_VALID | PAGE_READ | 3122 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE)); 3123 3124 for (i = 0; i < N_SHM_REGIONS; i++) { 3125 if (shm_regions[i].start == 0) { 3126 shm_regions[i].start = raddr; 3127 shm_regions[i].size = shm_info.shm_segsz; 3128 break; 3129 } 3130 } 3131 3132 mmap_unlock(); 3133 return raddr; 3134 3135 } 3136 3137 static inline abi_long do_shmdt(abi_ulong shmaddr) 3138 { 3139 int i; 3140 3141 for (i = 0; i < N_SHM_REGIONS; ++i) { 3142 if (shm_regions[i].start == shmaddr) { 3143 shm_regions[i].start = 0; 3144 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0); 3145 break; 3146 } 3147 } 3148 3149 return get_errno(shmdt(g2h(shmaddr))); 3150 } 3151 3152 #ifdef TARGET_NR_ipc 3153 /* ??? This only works with linear mappings. */ 3154 /* do_ipc() must return target values and target errnos. */ 3155 static abi_long do_ipc(unsigned int call, abi_long first, 3156 abi_long second, abi_long third, 3157 abi_long ptr, abi_long fifth) 3158 { 3159 int version; 3160 abi_long ret = 0; 3161 3162 version = call >> 16; 3163 call &= 0xffff; 3164 3165 switch (call) { 3166 case IPCOP_semop: 3167 ret = do_semop(first, ptr, second); 3168 break; 3169 3170 case IPCOP_semget: 3171 ret = get_errno(semget(first, second, third)); 3172 break; 3173 3174 case IPCOP_semctl: { 3175 /* The semun argument to semctl is passed by value, so dereference the 3176 * ptr argument. */ 3177 abi_ulong atptr; 3178 get_user_ual(atptr, ptr); 3179 ret = do_semctl(first, second, third, 3180 (union target_semun) atptr); 3181 break; 3182 } 3183 3184 case IPCOP_msgget: 3185 ret = get_errno(msgget(first, second)); 3186 break; 3187 3188 case IPCOP_msgsnd: 3189 ret = do_msgsnd(first, ptr, second, third); 3190 break; 3191 3192 case IPCOP_msgctl: 3193 ret = do_msgctl(first, second, ptr); 3194 break; 3195 3196 case IPCOP_msgrcv: 3197 switch (version) { 3198 case 0: 3199 { 3200 struct target_ipc_kludge { 3201 abi_long msgp; 3202 abi_long msgtyp; 3203 } *tmp; 3204 3205 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) { 3206 ret = -TARGET_EFAULT; 3207 break; 3208 } 3209 3210 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third); 3211 3212 unlock_user_struct(tmp, ptr, 0); 3213 break; 3214 } 3215 default: 3216 ret = do_msgrcv(first, ptr, second, fifth, third); 3217 } 3218 break; 3219 3220 case IPCOP_shmat: 3221 switch (version) { 3222 default: 3223 { 3224 abi_ulong raddr; 3225 raddr = do_shmat(first, ptr, second); 3226 if (is_error(raddr)) 3227 return get_errno(raddr); 3228 if (put_user_ual(raddr, third)) 3229 return -TARGET_EFAULT; 3230 break; 3231 } 3232 case 1: 3233 ret = -TARGET_EINVAL; 3234 break; 3235 } 3236 break; 3237 case IPCOP_shmdt: 3238 ret = do_shmdt(ptr); 3239 break; 3240 3241 case IPCOP_shmget: 3242 /* IPC_* flag values are the same on all linux platforms */ 3243 ret = get_errno(shmget(first, second, third)); 3244 break; 3245 3246 /* IPC_* and SHM_* command values are the same on all linux platforms */ 3247 case IPCOP_shmctl: 3248 ret = do_shmctl(first, second, ptr); 3249 break; 3250 default: 3251 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version); 3252 ret = -TARGET_ENOSYS; 3253 break; 3254 } 3255 return ret; 3256 } 3257 #endif 3258 3259 /* kernel structure types definitions */ 3260 3261 #define STRUCT(name, ...) STRUCT_ ## name, 3262 #define STRUCT_SPECIAL(name) STRUCT_ ## name, 3263 enum { 3264 #include "syscall_types.h" 3265 }; 3266 #undef STRUCT 3267 #undef STRUCT_SPECIAL 3268 3269 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL }; 3270 #define STRUCT_SPECIAL(name) 3271 #include "syscall_types.h" 3272 #undef STRUCT 3273 #undef STRUCT_SPECIAL 3274 3275 typedef struct IOCTLEntry IOCTLEntry; 3276 3277 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp, 3278 int fd, abi_long cmd, abi_long arg); 3279 3280 struct IOCTLEntry { 3281 unsigned int target_cmd; 3282 unsigned int host_cmd; 3283 const char *name; 3284 int access; 3285 do_ioctl_fn *do_ioctl; 3286 const argtype arg_type[5]; 3287 }; 3288 3289 #define IOC_R 0x0001 3290 #define IOC_W 0x0002 3291 #define IOC_RW (IOC_R | IOC_W) 3292 3293 #define MAX_STRUCT_SIZE 4096 3294 3295 #ifdef CONFIG_FIEMAP 3296 /* So fiemap access checks don't overflow on 32 bit systems. 3297 * This is very slightly smaller than the limit imposed by 3298 * the underlying kernel. 3299 */ 3300 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \ 3301 / sizeof(struct fiemap_extent)) 3302 3303 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp, 3304 int fd, abi_long cmd, abi_long arg) 3305 { 3306 /* The parameter for this ioctl is a struct fiemap followed 3307 * by an array of struct fiemap_extent whose size is set 3308 * in fiemap->fm_extent_count. The array is filled in by the 3309 * ioctl. 3310 */ 3311 int target_size_in, target_size_out; 3312 struct fiemap *fm; 3313 const argtype *arg_type = ie->arg_type; 3314 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) }; 3315 void *argptr, *p; 3316 abi_long ret; 3317 int i, extent_size = thunk_type_size(extent_arg_type, 0); 3318 uint32_t outbufsz; 3319 int free_fm = 0; 3320 3321 assert(arg_type[0] == TYPE_PTR); 3322 assert(ie->access == IOC_RW); 3323 arg_type++; 3324 target_size_in = thunk_type_size(arg_type, 0); 3325 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1); 3326 if (!argptr) { 3327 return -TARGET_EFAULT; 3328 } 3329 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3330 unlock_user(argptr, arg, 0); 3331 fm = (struct fiemap *)buf_temp; 3332 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) { 3333 return -TARGET_EINVAL; 3334 } 3335 3336 outbufsz = sizeof (*fm) + 3337 (sizeof(struct fiemap_extent) * fm->fm_extent_count); 3338 3339 if (outbufsz > MAX_STRUCT_SIZE) { 3340 /* We can't fit all the extents into the fixed size buffer. 3341 * Allocate one that is large enough and use it instead. 3342 */ 3343 fm = malloc(outbufsz); 3344 if (!fm) { 3345 return -TARGET_ENOMEM; 3346 } 3347 memcpy(fm, buf_temp, sizeof(struct fiemap)); 3348 free_fm = 1; 3349 } 3350 ret = get_errno(ioctl(fd, ie->host_cmd, fm)); 3351 if (!is_error(ret)) { 3352 target_size_out = target_size_in; 3353 /* An extent_count of 0 means we were only counting the extents 3354 * so there are no structs to copy 3355 */ 3356 if (fm->fm_extent_count != 0) { 3357 target_size_out += fm->fm_mapped_extents * extent_size; 3358 } 3359 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0); 3360 if (!argptr) { 3361 ret = -TARGET_EFAULT; 3362 } else { 3363 /* Convert the struct fiemap */ 3364 thunk_convert(argptr, fm, arg_type, THUNK_TARGET); 3365 if (fm->fm_extent_count != 0) { 3366 p = argptr + target_size_in; 3367 /* ...and then all the struct fiemap_extents */ 3368 for (i = 0; i < fm->fm_mapped_extents; i++) { 3369 thunk_convert(p, &fm->fm_extents[i], extent_arg_type, 3370 THUNK_TARGET); 3371 p += extent_size; 3372 } 3373 } 3374 unlock_user(argptr, arg, target_size_out); 3375 } 3376 } 3377 if (free_fm) { 3378 free(fm); 3379 } 3380 return ret; 3381 } 3382 #endif 3383 3384 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp, 3385 int fd, abi_long cmd, abi_long arg) 3386 { 3387 const argtype *arg_type = ie->arg_type; 3388 int target_size; 3389 void *argptr; 3390 int ret; 3391 struct ifconf *host_ifconf; 3392 uint32_t outbufsz; 3393 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) }; 3394 int target_ifreq_size; 3395 int nb_ifreq; 3396 int free_buf = 0; 3397 int i; 3398 int target_ifc_len; 3399 abi_long target_ifc_buf; 3400 int host_ifc_len; 3401 char *host_ifc_buf; 3402 3403 assert(arg_type[0] == TYPE_PTR); 3404 assert(ie->access == IOC_RW); 3405 3406 arg_type++; 3407 target_size = thunk_type_size(arg_type, 0); 3408 3409 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3410 if (!argptr) 3411 return -TARGET_EFAULT; 3412 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3413 unlock_user(argptr, arg, 0); 3414 3415 host_ifconf = (struct ifconf *)(unsigned long)buf_temp; 3416 target_ifc_len = host_ifconf->ifc_len; 3417 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf; 3418 3419 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0); 3420 nb_ifreq = target_ifc_len / target_ifreq_size; 3421 host_ifc_len = nb_ifreq * sizeof(struct ifreq); 3422 3423 outbufsz = sizeof(*host_ifconf) + host_ifc_len; 3424 if (outbufsz > MAX_STRUCT_SIZE) { 3425 /* We can't fit all the extents into the fixed size buffer. 3426 * Allocate one that is large enough and use it instead. 3427 */ 3428 host_ifconf = malloc(outbufsz); 3429 if (!host_ifconf) { 3430 return -TARGET_ENOMEM; 3431 } 3432 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf)); 3433 free_buf = 1; 3434 } 3435 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf); 3436 3437 host_ifconf->ifc_len = host_ifc_len; 3438 host_ifconf->ifc_buf = host_ifc_buf; 3439 3440 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf)); 3441 if (!is_error(ret)) { 3442 /* convert host ifc_len to target ifc_len */ 3443 3444 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq); 3445 target_ifc_len = nb_ifreq * target_ifreq_size; 3446 host_ifconf->ifc_len = target_ifc_len; 3447 3448 /* restore target ifc_buf */ 3449 3450 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf; 3451 3452 /* copy struct ifconf to target user */ 3453 3454 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3455 if (!argptr) 3456 return -TARGET_EFAULT; 3457 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET); 3458 unlock_user(argptr, arg, target_size); 3459 3460 /* copy ifreq[] to target user */ 3461 3462 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0); 3463 for (i = 0; i < nb_ifreq ; i++) { 3464 thunk_convert(argptr + i * target_ifreq_size, 3465 host_ifc_buf + i * sizeof(struct ifreq), 3466 ifreq_arg_type, THUNK_TARGET); 3467 } 3468 unlock_user(argptr, target_ifc_buf, target_ifc_len); 3469 } 3470 3471 if (free_buf) { 3472 free(host_ifconf); 3473 } 3474 3475 return ret; 3476 } 3477 3478 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 3479 abi_long cmd, abi_long arg) 3480 { 3481 void *argptr; 3482 struct dm_ioctl *host_dm; 3483 abi_long guest_data; 3484 uint32_t guest_data_size; 3485 int target_size; 3486 const argtype *arg_type = ie->arg_type; 3487 abi_long ret; 3488 void *big_buf = NULL; 3489 char *host_data; 3490 3491 arg_type++; 3492 target_size = thunk_type_size(arg_type, 0); 3493 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3494 if (!argptr) { 3495 ret = -TARGET_EFAULT; 3496 goto out; 3497 } 3498 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3499 unlock_user(argptr, arg, 0); 3500 3501 /* buf_temp is too small, so fetch things into a bigger buffer */ 3502 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2); 3503 memcpy(big_buf, buf_temp, target_size); 3504 buf_temp = big_buf; 3505 host_dm = big_buf; 3506 3507 guest_data = arg + host_dm->data_start; 3508 if ((guest_data - arg) < 0) { 3509 ret = -EINVAL; 3510 goto out; 3511 } 3512 guest_data_size = host_dm->data_size - host_dm->data_start; 3513 host_data = (char*)host_dm + host_dm->data_start; 3514 3515 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1); 3516 switch (ie->host_cmd) { 3517 case DM_REMOVE_ALL: 3518 case DM_LIST_DEVICES: 3519 case DM_DEV_CREATE: 3520 case DM_DEV_REMOVE: 3521 case DM_DEV_SUSPEND: 3522 case DM_DEV_STATUS: 3523 case DM_DEV_WAIT: 3524 case DM_TABLE_STATUS: 3525 case DM_TABLE_CLEAR: 3526 case DM_TABLE_DEPS: 3527 case DM_LIST_VERSIONS: 3528 /* no input data */ 3529 break; 3530 case DM_DEV_RENAME: 3531 case DM_DEV_SET_GEOMETRY: 3532 /* data contains only strings */ 3533 memcpy(host_data, argptr, guest_data_size); 3534 break; 3535 case DM_TARGET_MSG: 3536 memcpy(host_data, argptr, guest_data_size); 3537 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr); 3538 break; 3539 case DM_TABLE_LOAD: 3540 { 3541 void *gspec = argptr; 3542 void *cur_data = host_data; 3543 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 3544 int spec_size = thunk_type_size(arg_type, 0); 3545 int i; 3546 3547 for (i = 0; i < host_dm->target_count; i++) { 3548 struct dm_target_spec *spec = cur_data; 3549 uint32_t next; 3550 int slen; 3551 3552 thunk_convert(spec, gspec, arg_type, THUNK_HOST); 3553 slen = strlen((char*)gspec + spec_size) + 1; 3554 next = spec->next; 3555 spec->next = sizeof(*spec) + slen; 3556 strcpy((char*)&spec[1], gspec + spec_size); 3557 gspec += next; 3558 cur_data += spec->next; 3559 } 3560 break; 3561 } 3562 default: 3563 ret = -TARGET_EINVAL; 3564 goto out; 3565 } 3566 unlock_user(argptr, guest_data, 0); 3567 3568 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3569 if (!is_error(ret)) { 3570 guest_data = arg + host_dm->data_start; 3571 guest_data_size = host_dm->data_size - host_dm->data_start; 3572 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0); 3573 switch (ie->host_cmd) { 3574 case DM_REMOVE_ALL: 3575 case DM_DEV_CREATE: 3576 case DM_DEV_REMOVE: 3577 case DM_DEV_RENAME: 3578 case DM_DEV_SUSPEND: 3579 case DM_DEV_STATUS: 3580 case DM_TABLE_LOAD: 3581 case DM_TABLE_CLEAR: 3582 case DM_TARGET_MSG: 3583 case DM_DEV_SET_GEOMETRY: 3584 /* no return data */ 3585 break; 3586 case DM_LIST_DEVICES: 3587 { 3588 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start; 3589 uint32_t remaining_data = guest_data_size; 3590 void *cur_data = argptr; 3591 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) }; 3592 int nl_size = 12; /* can't use thunk_size due to alignment */ 3593 3594 while (1) { 3595 uint32_t next = nl->next; 3596 if (next) { 3597 nl->next = nl_size + (strlen(nl->name) + 1); 3598 } 3599 if (remaining_data < nl->next) { 3600 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3601 break; 3602 } 3603 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET); 3604 strcpy(cur_data + nl_size, nl->name); 3605 cur_data += nl->next; 3606 remaining_data -= nl->next; 3607 if (!next) { 3608 break; 3609 } 3610 nl = (void*)nl + next; 3611 } 3612 break; 3613 } 3614 case DM_DEV_WAIT: 3615 case DM_TABLE_STATUS: 3616 { 3617 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start; 3618 void *cur_data = argptr; 3619 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 3620 int spec_size = thunk_type_size(arg_type, 0); 3621 int i; 3622 3623 for (i = 0; i < host_dm->target_count; i++) { 3624 uint32_t next = spec->next; 3625 int slen = strlen((char*)&spec[1]) + 1; 3626 spec->next = (cur_data - argptr) + spec_size + slen; 3627 if (guest_data_size < spec->next) { 3628 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3629 break; 3630 } 3631 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET); 3632 strcpy(cur_data + spec_size, (char*)&spec[1]); 3633 cur_data = argptr + spec->next; 3634 spec = (void*)host_dm + host_dm->data_start + next; 3635 } 3636 break; 3637 } 3638 case DM_TABLE_DEPS: 3639 { 3640 void *hdata = (void*)host_dm + host_dm->data_start; 3641 int count = *(uint32_t*)hdata; 3642 uint64_t *hdev = hdata + 8; 3643 uint64_t *gdev = argptr + 8; 3644 int i; 3645 3646 *(uint32_t*)argptr = tswap32(count); 3647 for (i = 0; i < count; i++) { 3648 *gdev = tswap64(*hdev); 3649 gdev++; 3650 hdev++; 3651 } 3652 break; 3653 } 3654 case DM_LIST_VERSIONS: 3655 { 3656 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start; 3657 uint32_t remaining_data = guest_data_size; 3658 void *cur_data = argptr; 3659 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) }; 3660 int vers_size = thunk_type_size(arg_type, 0); 3661 3662 while (1) { 3663 uint32_t next = vers->next; 3664 if (next) { 3665 vers->next = vers_size + (strlen(vers->name) + 1); 3666 } 3667 if (remaining_data < vers->next) { 3668 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3669 break; 3670 } 3671 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET); 3672 strcpy(cur_data + vers_size, vers->name); 3673 cur_data += vers->next; 3674 remaining_data -= vers->next; 3675 if (!next) { 3676 break; 3677 } 3678 vers = (void*)vers + next; 3679 } 3680 break; 3681 } 3682 default: 3683 ret = -TARGET_EINVAL; 3684 goto out; 3685 } 3686 unlock_user(argptr, guest_data, guest_data_size); 3687 3688 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3689 if (!argptr) { 3690 ret = -TARGET_EFAULT; 3691 goto out; 3692 } 3693 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3694 unlock_user(argptr, arg, target_size); 3695 } 3696 out: 3697 g_free(big_buf); 3698 return ret; 3699 } 3700 3701 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 3702 abi_long cmd, abi_long arg) 3703 { 3704 void *argptr; 3705 int target_size; 3706 const argtype *arg_type = ie->arg_type; 3707 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) }; 3708 abi_long ret; 3709 3710 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp; 3711 struct blkpg_partition host_part; 3712 3713 /* Read and convert blkpg */ 3714 arg_type++; 3715 target_size = thunk_type_size(arg_type, 0); 3716 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3717 if (!argptr) { 3718 ret = -TARGET_EFAULT; 3719 goto out; 3720 } 3721 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3722 unlock_user(argptr, arg, 0); 3723 3724 switch (host_blkpg->op) { 3725 case BLKPG_ADD_PARTITION: 3726 case BLKPG_DEL_PARTITION: 3727 /* payload is struct blkpg_partition */ 3728 break; 3729 default: 3730 /* Unknown opcode */ 3731 ret = -TARGET_EINVAL; 3732 goto out; 3733 } 3734 3735 /* Read and convert blkpg->data */ 3736 arg = (abi_long)(uintptr_t)host_blkpg->data; 3737 target_size = thunk_type_size(part_arg_type, 0); 3738 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3739 if (!argptr) { 3740 ret = -TARGET_EFAULT; 3741 goto out; 3742 } 3743 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST); 3744 unlock_user(argptr, arg, 0); 3745 3746 /* Swizzle the data pointer to our local copy and call! */ 3747 host_blkpg->data = &host_part; 3748 ret = get_errno(ioctl(fd, ie->host_cmd, host_blkpg)); 3749 3750 out: 3751 return ret; 3752 } 3753 3754 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp, 3755 int fd, abi_long cmd, abi_long arg) 3756 { 3757 const argtype *arg_type = ie->arg_type; 3758 const StructEntry *se; 3759 const argtype *field_types; 3760 const int *dst_offsets, *src_offsets; 3761 int target_size; 3762 void *argptr; 3763 abi_ulong *target_rt_dev_ptr; 3764 unsigned long *host_rt_dev_ptr; 3765 abi_long ret; 3766 int i; 3767 3768 assert(ie->access == IOC_W); 3769 assert(*arg_type == TYPE_PTR); 3770 arg_type++; 3771 assert(*arg_type == TYPE_STRUCT); 3772 target_size = thunk_type_size(arg_type, 0); 3773 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3774 if (!argptr) { 3775 return -TARGET_EFAULT; 3776 } 3777 arg_type++; 3778 assert(*arg_type == (int)STRUCT_rtentry); 3779 se = struct_entries + *arg_type++; 3780 assert(se->convert[0] == NULL); 3781 /* convert struct here to be able to catch rt_dev string */ 3782 field_types = se->field_types; 3783 dst_offsets = se->field_offsets[THUNK_HOST]; 3784 src_offsets = se->field_offsets[THUNK_TARGET]; 3785 for (i = 0; i < se->nb_fields; i++) { 3786 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) { 3787 assert(*field_types == TYPE_PTRVOID); 3788 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]); 3789 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]); 3790 if (*target_rt_dev_ptr != 0) { 3791 *host_rt_dev_ptr = (unsigned long)lock_user_string( 3792 tswapal(*target_rt_dev_ptr)); 3793 if (!*host_rt_dev_ptr) { 3794 unlock_user(argptr, arg, 0); 3795 return -TARGET_EFAULT; 3796 } 3797 } else { 3798 *host_rt_dev_ptr = 0; 3799 } 3800 field_types++; 3801 continue; 3802 } 3803 field_types = thunk_convert(buf_temp + dst_offsets[i], 3804 argptr + src_offsets[i], 3805 field_types, THUNK_HOST); 3806 } 3807 unlock_user(argptr, arg, 0); 3808 3809 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3810 if (*host_rt_dev_ptr != 0) { 3811 unlock_user((void *)*host_rt_dev_ptr, 3812 *target_rt_dev_ptr, 0); 3813 } 3814 return ret; 3815 } 3816 3817 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp, 3818 int fd, abi_long cmd, abi_long arg) 3819 { 3820 int sig = target_to_host_signal(arg); 3821 return get_errno(ioctl(fd, ie->host_cmd, sig)); 3822 } 3823 3824 static IOCTLEntry ioctl_entries[] = { 3825 #define IOCTL(cmd, access, ...) \ 3826 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } }, 3827 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \ 3828 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } }, 3829 #include "ioctls.h" 3830 { 0, 0, }, 3831 }; 3832 3833 /* ??? Implement proper locking for ioctls. */ 3834 /* do_ioctl() Must return target values and target errnos. */ 3835 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg) 3836 { 3837 const IOCTLEntry *ie; 3838 const argtype *arg_type; 3839 abi_long ret; 3840 uint8_t buf_temp[MAX_STRUCT_SIZE]; 3841 int target_size; 3842 void *argptr; 3843 3844 ie = ioctl_entries; 3845 for(;;) { 3846 if (ie->target_cmd == 0) { 3847 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd); 3848 return -TARGET_ENOSYS; 3849 } 3850 if (ie->target_cmd == cmd) 3851 break; 3852 ie++; 3853 } 3854 arg_type = ie->arg_type; 3855 #if defined(DEBUG) 3856 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name); 3857 #endif 3858 if (ie->do_ioctl) { 3859 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg); 3860 } 3861 3862 switch(arg_type[0]) { 3863 case TYPE_NULL: 3864 /* no argument */ 3865 ret = get_errno(ioctl(fd, ie->host_cmd)); 3866 break; 3867 case TYPE_PTRVOID: 3868 case TYPE_INT: 3869 /* int argment */ 3870 ret = get_errno(ioctl(fd, ie->host_cmd, arg)); 3871 break; 3872 case TYPE_PTR: 3873 arg_type++; 3874 target_size = thunk_type_size(arg_type, 0); 3875 switch(ie->access) { 3876 case IOC_R: 3877 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3878 if (!is_error(ret)) { 3879 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3880 if (!argptr) 3881 return -TARGET_EFAULT; 3882 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3883 unlock_user(argptr, arg, target_size); 3884 } 3885 break; 3886 case IOC_W: 3887 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3888 if (!argptr) 3889 return -TARGET_EFAULT; 3890 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3891 unlock_user(argptr, arg, 0); 3892 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3893 break; 3894 default: 3895 case IOC_RW: 3896 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3897 if (!argptr) 3898 return -TARGET_EFAULT; 3899 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3900 unlock_user(argptr, arg, 0); 3901 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3902 if (!is_error(ret)) { 3903 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3904 if (!argptr) 3905 return -TARGET_EFAULT; 3906 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3907 unlock_user(argptr, arg, target_size); 3908 } 3909 break; 3910 } 3911 break; 3912 default: 3913 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n", 3914 (long)cmd, arg_type[0]); 3915 ret = -TARGET_ENOSYS; 3916 break; 3917 } 3918 return ret; 3919 } 3920 3921 static const bitmask_transtbl iflag_tbl[] = { 3922 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK }, 3923 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT }, 3924 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR }, 3925 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK }, 3926 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK }, 3927 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP }, 3928 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR }, 3929 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR }, 3930 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL }, 3931 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC }, 3932 { TARGET_IXON, TARGET_IXON, IXON, IXON }, 3933 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY }, 3934 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF }, 3935 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL }, 3936 { 0, 0, 0, 0 } 3937 }; 3938 3939 static const bitmask_transtbl oflag_tbl[] = { 3940 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST }, 3941 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC }, 3942 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR }, 3943 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL }, 3944 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR }, 3945 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET }, 3946 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL }, 3947 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL }, 3948 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 }, 3949 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 }, 3950 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 }, 3951 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 }, 3952 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 }, 3953 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 }, 3954 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 }, 3955 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 }, 3956 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 }, 3957 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 }, 3958 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 }, 3959 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 }, 3960 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 }, 3961 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 }, 3962 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 }, 3963 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 }, 3964 { 0, 0, 0, 0 } 3965 }; 3966 3967 static const bitmask_transtbl cflag_tbl[] = { 3968 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 }, 3969 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 }, 3970 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 }, 3971 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 }, 3972 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 }, 3973 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 }, 3974 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 }, 3975 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 }, 3976 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 }, 3977 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 }, 3978 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 }, 3979 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 }, 3980 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 }, 3981 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 }, 3982 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 }, 3983 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 }, 3984 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 }, 3985 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 }, 3986 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 }, 3987 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 }, 3988 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 }, 3989 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 }, 3990 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 }, 3991 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 }, 3992 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB }, 3993 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD }, 3994 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB }, 3995 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD }, 3996 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL }, 3997 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL }, 3998 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS }, 3999 { 0, 0, 0, 0 } 4000 }; 4001 4002 static const bitmask_transtbl lflag_tbl[] = { 4003 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG }, 4004 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON }, 4005 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE }, 4006 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO }, 4007 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE }, 4008 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK }, 4009 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL }, 4010 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH }, 4011 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP }, 4012 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL }, 4013 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT }, 4014 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE }, 4015 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO }, 4016 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN }, 4017 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN }, 4018 { 0, 0, 0, 0 } 4019 }; 4020 4021 static void target_to_host_termios (void *dst, const void *src) 4022 { 4023 struct host_termios *host = dst; 4024 const struct target_termios *target = src; 4025 4026 host->c_iflag = 4027 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl); 4028 host->c_oflag = 4029 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl); 4030 host->c_cflag = 4031 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl); 4032 host->c_lflag = 4033 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl); 4034 host->c_line = target->c_line; 4035 4036 memset(host->c_cc, 0, sizeof(host->c_cc)); 4037 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR]; 4038 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT]; 4039 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE]; 4040 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL]; 4041 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF]; 4042 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME]; 4043 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN]; 4044 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC]; 4045 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART]; 4046 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP]; 4047 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP]; 4048 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL]; 4049 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT]; 4050 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD]; 4051 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE]; 4052 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT]; 4053 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2]; 4054 } 4055 4056 static void host_to_target_termios (void *dst, const void *src) 4057 { 4058 struct target_termios *target = dst; 4059 const struct host_termios *host = src; 4060 4061 target->c_iflag = 4062 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl)); 4063 target->c_oflag = 4064 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl)); 4065 target->c_cflag = 4066 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl)); 4067 target->c_lflag = 4068 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl)); 4069 target->c_line = host->c_line; 4070 4071 memset(target->c_cc, 0, sizeof(target->c_cc)); 4072 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR]; 4073 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT]; 4074 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE]; 4075 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL]; 4076 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF]; 4077 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME]; 4078 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN]; 4079 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC]; 4080 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART]; 4081 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP]; 4082 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP]; 4083 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL]; 4084 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT]; 4085 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD]; 4086 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE]; 4087 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT]; 4088 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2]; 4089 } 4090 4091 static const StructEntry struct_termios_def = { 4092 .convert = { host_to_target_termios, target_to_host_termios }, 4093 .size = { sizeof(struct target_termios), sizeof(struct host_termios) }, 4094 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) }, 4095 }; 4096 4097 static bitmask_transtbl mmap_flags_tbl[] = { 4098 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED }, 4099 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE }, 4100 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED }, 4101 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS }, 4102 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN }, 4103 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE }, 4104 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE }, 4105 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED }, 4106 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE, 4107 MAP_NORESERVE }, 4108 { 0, 0, 0, 0 } 4109 }; 4110 4111 #if defined(TARGET_I386) 4112 4113 /* NOTE: there is really one LDT for all the threads */ 4114 static uint8_t *ldt_table; 4115 4116 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount) 4117 { 4118 int size; 4119 void *p; 4120 4121 if (!ldt_table) 4122 return 0; 4123 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE; 4124 if (size > bytecount) 4125 size = bytecount; 4126 p = lock_user(VERIFY_WRITE, ptr, size, 0); 4127 if (!p) 4128 return -TARGET_EFAULT; 4129 /* ??? Should this by byteswapped? */ 4130 memcpy(p, ldt_table, size); 4131 unlock_user(p, ptr, size); 4132 return size; 4133 } 4134 4135 /* XXX: add locking support */ 4136 static abi_long write_ldt(CPUX86State *env, 4137 abi_ulong ptr, unsigned long bytecount, int oldmode) 4138 { 4139 struct target_modify_ldt_ldt_s ldt_info; 4140 struct target_modify_ldt_ldt_s *target_ldt_info; 4141 int seg_32bit, contents, read_exec_only, limit_in_pages; 4142 int seg_not_present, useable, lm; 4143 uint32_t *lp, entry_1, entry_2; 4144 4145 if (bytecount != sizeof(ldt_info)) 4146 return -TARGET_EINVAL; 4147 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1)) 4148 return -TARGET_EFAULT; 4149 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 4150 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 4151 ldt_info.limit = tswap32(target_ldt_info->limit); 4152 ldt_info.flags = tswap32(target_ldt_info->flags); 4153 unlock_user_struct(target_ldt_info, ptr, 0); 4154 4155 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES) 4156 return -TARGET_EINVAL; 4157 seg_32bit = ldt_info.flags & 1; 4158 contents = (ldt_info.flags >> 1) & 3; 4159 read_exec_only = (ldt_info.flags >> 3) & 1; 4160 limit_in_pages = (ldt_info.flags >> 4) & 1; 4161 seg_not_present = (ldt_info.flags >> 5) & 1; 4162 useable = (ldt_info.flags >> 6) & 1; 4163 #ifdef TARGET_ABI32 4164 lm = 0; 4165 #else 4166 lm = (ldt_info.flags >> 7) & 1; 4167 #endif 4168 if (contents == 3) { 4169 if (oldmode) 4170 return -TARGET_EINVAL; 4171 if (seg_not_present == 0) 4172 return -TARGET_EINVAL; 4173 } 4174 /* allocate the LDT */ 4175 if (!ldt_table) { 4176 env->ldt.base = target_mmap(0, 4177 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE, 4178 PROT_READ|PROT_WRITE, 4179 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 4180 if (env->ldt.base == -1) 4181 return -TARGET_ENOMEM; 4182 memset(g2h(env->ldt.base), 0, 4183 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE); 4184 env->ldt.limit = 0xffff; 4185 ldt_table = g2h(env->ldt.base); 4186 } 4187 4188 /* NOTE: same code as Linux kernel */ 4189 /* Allow LDTs to be cleared by the user. */ 4190 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 4191 if (oldmode || 4192 (contents == 0 && 4193 read_exec_only == 1 && 4194 seg_32bit == 0 && 4195 limit_in_pages == 0 && 4196 seg_not_present == 1 && 4197 useable == 0 )) { 4198 entry_1 = 0; 4199 entry_2 = 0; 4200 goto install; 4201 } 4202 } 4203 4204 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 4205 (ldt_info.limit & 0x0ffff); 4206 entry_2 = (ldt_info.base_addr & 0xff000000) | 4207 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 4208 (ldt_info.limit & 0xf0000) | 4209 ((read_exec_only ^ 1) << 9) | 4210 (contents << 10) | 4211 ((seg_not_present ^ 1) << 15) | 4212 (seg_32bit << 22) | 4213 (limit_in_pages << 23) | 4214 (lm << 21) | 4215 0x7000; 4216 if (!oldmode) 4217 entry_2 |= (useable << 20); 4218 4219 /* Install the new entry ... */ 4220 install: 4221 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3)); 4222 lp[0] = tswap32(entry_1); 4223 lp[1] = tswap32(entry_2); 4224 return 0; 4225 } 4226 4227 /* specific and weird i386 syscalls */ 4228 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr, 4229 unsigned long bytecount) 4230 { 4231 abi_long ret; 4232 4233 switch (func) { 4234 case 0: 4235 ret = read_ldt(ptr, bytecount); 4236 break; 4237 case 1: 4238 ret = write_ldt(env, ptr, bytecount, 1); 4239 break; 4240 case 0x11: 4241 ret = write_ldt(env, ptr, bytecount, 0); 4242 break; 4243 default: 4244 ret = -TARGET_ENOSYS; 4245 break; 4246 } 4247 return ret; 4248 } 4249 4250 #if defined(TARGET_I386) && defined(TARGET_ABI32) 4251 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr) 4252 { 4253 uint64_t *gdt_table = g2h(env->gdt.base); 4254 struct target_modify_ldt_ldt_s ldt_info; 4255 struct target_modify_ldt_ldt_s *target_ldt_info; 4256 int seg_32bit, contents, read_exec_only, limit_in_pages; 4257 int seg_not_present, useable, lm; 4258 uint32_t *lp, entry_1, entry_2; 4259 int i; 4260 4261 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 4262 if (!target_ldt_info) 4263 return -TARGET_EFAULT; 4264 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 4265 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 4266 ldt_info.limit = tswap32(target_ldt_info->limit); 4267 ldt_info.flags = tswap32(target_ldt_info->flags); 4268 if (ldt_info.entry_number == -1) { 4269 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) { 4270 if (gdt_table[i] == 0) { 4271 ldt_info.entry_number = i; 4272 target_ldt_info->entry_number = tswap32(i); 4273 break; 4274 } 4275 } 4276 } 4277 unlock_user_struct(target_ldt_info, ptr, 1); 4278 4279 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN || 4280 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX) 4281 return -TARGET_EINVAL; 4282 seg_32bit = ldt_info.flags & 1; 4283 contents = (ldt_info.flags >> 1) & 3; 4284 read_exec_only = (ldt_info.flags >> 3) & 1; 4285 limit_in_pages = (ldt_info.flags >> 4) & 1; 4286 seg_not_present = (ldt_info.flags >> 5) & 1; 4287 useable = (ldt_info.flags >> 6) & 1; 4288 #ifdef TARGET_ABI32 4289 lm = 0; 4290 #else 4291 lm = (ldt_info.flags >> 7) & 1; 4292 #endif 4293 4294 if (contents == 3) { 4295 if (seg_not_present == 0) 4296 return -TARGET_EINVAL; 4297 } 4298 4299 /* NOTE: same code as Linux kernel */ 4300 /* Allow LDTs to be cleared by the user. */ 4301 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 4302 if ((contents == 0 && 4303 read_exec_only == 1 && 4304 seg_32bit == 0 && 4305 limit_in_pages == 0 && 4306 seg_not_present == 1 && 4307 useable == 0 )) { 4308 entry_1 = 0; 4309 entry_2 = 0; 4310 goto install; 4311 } 4312 } 4313 4314 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 4315 (ldt_info.limit & 0x0ffff); 4316 entry_2 = (ldt_info.base_addr & 0xff000000) | 4317 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 4318 (ldt_info.limit & 0xf0000) | 4319 ((read_exec_only ^ 1) << 9) | 4320 (contents << 10) | 4321 ((seg_not_present ^ 1) << 15) | 4322 (seg_32bit << 22) | 4323 (limit_in_pages << 23) | 4324 (useable << 20) | 4325 (lm << 21) | 4326 0x7000; 4327 4328 /* Install the new entry ... */ 4329 install: 4330 lp = (uint32_t *)(gdt_table + ldt_info.entry_number); 4331 lp[0] = tswap32(entry_1); 4332 lp[1] = tswap32(entry_2); 4333 return 0; 4334 } 4335 4336 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr) 4337 { 4338 struct target_modify_ldt_ldt_s *target_ldt_info; 4339 uint64_t *gdt_table = g2h(env->gdt.base); 4340 uint32_t base_addr, limit, flags; 4341 int seg_32bit, contents, read_exec_only, limit_in_pages, idx; 4342 int seg_not_present, useable, lm; 4343 uint32_t *lp, entry_1, entry_2; 4344 4345 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 4346 if (!target_ldt_info) 4347 return -TARGET_EFAULT; 4348 idx = tswap32(target_ldt_info->entry_number); 4349 if (idx < TARGET_GDT_ENTRY_TLS_MIN || 4350 idx > TARGET_GDT_ENTRY_TLS_MAX) { 4351 unlock_user_struct(target_ldt_info, ptr, 1); 4352 return -TARGET_EINVAL; 4353 } 4354 lp = (uint32_t *)(gdt_table + idx); 4355 entry_1 = tswap32(lp[0]); 4356 entry_2 = tswap32(lp[1]); 4357 4358 read_exec_only = ((entry_2 >> 9) & 1) ^ 1; 4359 contents = (entry_2 >> 10) & 3; 4360 seg_not_present = ((entry_2 >> 15) & 1) ^ 1; 4361 seg_32bit = (entry_2 >> 22) & 1; 4362 limit_in_pages = (entry_2 >> 23) & 1; 4363 useable = (entry_2 >> 20) & 1; 4364 #ifdef TARGET_ABI32 4365 lm = 0; 4366 #else 4367 lm = (entry_2 >> 21) & 1; 4368 #endif 4369 flags = (seg_32bit << 0) | (contents << 1) | 4370 (read_exec_only << 3) | (limit_in_pages << 4) | 4371 (seg_not_present << 5) | (useable << 6) | (lm << 7); 4372 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000); 4373 base_addr = (entry_1 >> 16) | 4374 (entry_2 & 0xff000000) | 4375 ((entry_2 & 0xff) << 16); 4376 target_ldt_info->base_addr = tswapal(base_addr); 4377 target_ldt_info->limit = tswap32(limit); 4378 target_ldt_info->flags = tswap32(flags); 4379 unlock_user_struct(target_ldt_info, ptr, 1); 4380 return 0; 4381 } 4382 #endif /* TARGET_I386 && TARGET_ABI32 */ 4383 4384 #ifndef TARGET_ABI32 4385 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) 4386 { 4387 abi_long ret = 0; 4388 abi_ulong val; 4389 int idx; 4390 4391 switch(code) { 4392 case TARGET_ARCH_SET_GS: 4393 case TARGET_ARCH_SET_FS: 4394 if (code == TARGET_ARCH_SET_GS) 4395 idx = R_GS; 4396 else 4397 idx = R_FS; 4398 cpu_x86_load_seg(env, idx, 0); 4399 env->segs[idx].base = addr; 4400 break; 4401 case TARGET_ARCH_GET_GS: 4402 case TARGET_ARCH_GET_FS: 4403 if (code == TARGET_ARCH_GET_GS) 4404 idx = R_GS; 4405 else 4406 idx = R_FS; 4407 val = env->segs[idx].base; 4408 if (put_user(val, addr, abi_ulong)) 4409 ret = -TARGET_EFAULT; 4410 break; 4411 default: 4412 ret = -TARGET_EINVAL; 4413 break; 4414 } 4415 return ret; 4416 } 4417 #endif 4418 4419 #endif /* defined(TARGET_I386) */ 4420 4421 #define NEW_STACK_SIZE 0x40000 4422 4423 4424 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER; 4425 typedef struct { 4426 CPUArchState *env; 4427 pthread_mutex_t mutex; 4428 pthread_cond_t cond; 4429 pthread_t thread; 4430 uint32_t tid; 4431 abi_ulong child_tidptr; 4432 abi_ulong parent_tidptr; 4433 sigset_t sigmask; 4434 } new_thread_info; 4435 4436 static void *clone_func(void *arg) 4437 { 4438 new_thread_info *info = arg; 4439 CPUArchState *env; 4440 CPUState *cpu; 4441 TaskState *ts; 4442 4443 env = info->env; 4444 cpu = ENV_GET_CPU(env); 4445 thread_cpu = cpu; 4446 ts = (TaskState *)cpu->opaque; 4447 info->tid = gettid(); 4448 cpu->host_tid = info->tid; 4449 task_settid(ts); 4450 if (info->child_tidptr) 4451 put_user_u32(info->tid, info->child_tidptr); 4452 if (info->parent_tidptr) 4453 put_user_u32(info->tid, info->parent_tidptr); 4454 /* Enable signals. */ 4455 sigprocmask(SIG_SETMASK, &info->sigmask, NULL); 4456 /* Signal to the parent that we're ready. */ 4457 pthread_mutex_lock(&info->mutex); 4458 pthread_cond_broadcast(&info->cond); 4459 pthread_mutex_unlock(&info->mutex); 4460 /* Wait until the parent has finshed initializing the tls state. */ 4461 pthread_mutex_lock(&clone_lock); 4462 pthread_mutex_unlock(&clone_lock); 4463 cpu_loop(env); 4464 /* never exits */ 4465 return NULL; 4466 } 4467 4468 /* do_fork() Must return host values and target errnos (unlike most 4469 do_*() functions). */ 4470 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, 4471 abi_ulong parent_tidptr, target_ulong newtls, 4472 abi_ulong child_tidptr) 4473 { 4474 CPUState *cpu = ENV_GET_CPU(env); 4475 int ret; 4476 TaskState *ts; 4477 CPUState *new_cpu; 4478 CPUArchState *new_env; 4479 unsigned int nptl_flags; 4480 sigset_t sigmask; 4481 4482 /* Emulate vfork() with fork() */ 4483 if (flags & CLONE_VFORK) 4484 flags &= ~(CLONE_VFORK | CLONE_VM); 4485 4486 if (flags & CLONE_VM) { 4487 TaskState *parent_ts = (TaskState *)cpu->opaque; 4488 new_thread_info info; 4489 pthread_attr_t attr; 4490 4491 ts = g_malloc0(sizeof(TaskState)); 4492 init_task_state(ts); 4493 /* we create a new CPU instance. */ 4494 new_env = cpu_copy(env); 4495 /* Init regs that differ from the parent. */ 4496 cpu_clone_regs(new_env, newsp); 4497 new_cpu = ENV_GET_CPU(new_env); 4498 new_cpu->opaque = ts; 4499 ts->bprm = parent_ts->bprm; 4500 ts->info = parent_ts->info; 4501 nptl_flags = flags; 4502 flags &= ~CLONE_NPTL_FLAGS2; 4503 4504 if (nptl_flags & CLONE_CHILD_CLEARTID) { 4505 ts->child_tidptr = child_tidptr; 4506 } 4507 4508 if (nptl_flags & CLONE_SETTLS) 4509 cpu_set_tls (new_env, newtls); 4510 4511 /* Grab a mutex so that thread setup appears atomic. */ 4512 pthread_mutex_lock(&clone_lock); 4513 4514 memset(&info, 0, sizeof(info)); 4515 pthread_mutex_init(&info.mutex, NULL); 4516 pthread_mutex_lock(&info.mutex); 4517 pthread_cond_init(&info.cond, NULL); 4518 info.env = new_env; 4519 if (nptl_flags & CLONE_CHILD_SETTID) 4520 info.child_tidptr = child_tidptr; 4521 if (nptl_flags & CLONE_PARENT_SETTID) 4522 info.parent_tidptr = parent_tidptr; 4523 4524 ret = pthread_attr_init(&attr); 4525 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE); 4526 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 4527 /* It is not safe to deliver signals until the child has finished 4528 initializing, so temporarily block all signals. */ 4529 sigfillset(&sigmask); 4530 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask); 4531 4532 ret = pthread_create(&info.thread, &attr, clone_func, &info); 4533 /* TODO: Free new CPU state if thread creation failed. */ 4534 4535 sigprocmask(SIG_SETMASK, &info.sigmask, NULL); 4536 pthread_attr_destroy(&attr); 4537 if (ret == 0) { 4538 /* Wait for the child to initialize. */ 4539 pthread_cond_wait(&info.cond, &info.mutex); 4540 ret = info.tid; 4541 if (flags & CLONE_PARENT_SETTID) 4542 put_user_u32(ret, parent_tidptr); 4543 } else { 4544 ret = -1; 4545 } 4546 pthread_mutex_unlock(&info.mutex); 4547 pthread_cond_destroy(&info.cond); 4548 pthread_mutex_destroy(&info.mutex); 4549 pthread_mutex_unlock(&clone_lock); 4550 } else { 4551 /* if no CLONE_VM, we consider it is a fork */ 4552 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) 4553 return -EINVAL; 4554 fork_start(); 4555 ret = fork(); 4556 if (ret == 0) { 4557 /* Child Process. */ 4558 cpu_clone_regs(env, newsp); 4559 fork_end(1); 4560 /* There is a race condition here. The parent process could 4561 theoretically read the TID in the child process before the child 4562 tid is set. This would require using either ptrace 4563 (not implemented) or having *_tidptr to point at a shared memory 4564 mapping. We can't repeat the spinlock hack used above because 4565 the child process gets its own copy of the lock. */ 4566 if (flags & CLONE_CHILD_SETTID) 4567 put_user_u32(gettid(), child_tidptr); 4568 if (flags & CLONE_PARENT_SETTID) 4569 put_user_u32(gettid(), parent_tidptr); 4570 ts = (TaskState *)cpu->opaque; 4571 if (flags & CLONE_SETTLS) 4572 cpu_set_tls (env, newtls); 4573 if (flags & CLONE_CHILD_CLEARTID) 4574 ts->child_tidptr = child_tidptr; 4575 } else { 4576 fork_end(0); 4577 } 4578 } 4579 return ret; 4580 } 4581 4582 /* warning : doesn't handle linux specific flags... */ 4583 static int target_to_host_fcntl_cmd(int cmd) 4584 { 4585 switch(cmd) { 4586 case TARGET_F_DUPFD: 4587 case TARGET_F_GETFD: 4588 case TARGET_F_SETFD: 4589 case TARGET_F_GETFL: 4590 case TARGET_F_SETFL: 4591 return cmd; 4592 case TARGET_F_GETLK: 4593 return F_GETLK; 4594 case TARGET_F_SETLK: 4595 return F_SETLK; 4596 case TARGET_F_SETLKW: 4597 return F_SETLKW; 4598 case TARGET_F_GETOWN: 4599 return F_GETOWN; 4600 case TARGET_F_SETOWN: 4601 return F_SETOWN; 4602 case TARGET_F_GETSIG: 4603 return F_GETSIG; 4604 case TARGET_F_SETSIG: 4605 return F_SETSIG; 4606 #if TARGET_ABI_BITS == 32 4607 case TARGET_F_GETLK64: 4608 return F_GETLK64; 4609 case TARGET_F_SETLK64: 4610 return F_SETLK64; 4611 case TARGET_F_SETLKW64: 4612 return F_SETLKW64; 4613 #endif 4614 case TARGET_F_SETLEASE: 4615 return F_SETLEASE; 4616 case TARGET_F_GETLEASE: 4617 return F_GETLEASE; 4618 #ifdef F_DUPFD_CLOEXEC 4619 case TARGET_F_DUPFD_CLOEXEC: 4620 return F_DUPFD_CLOEXEC; 4621 #endif 4622 case TARGET_F_NOTIFY: 4623 return F_NOTIFY; 4624 #ifdef F_GETOWN_EX 4625 case TARGET_F_GETOWN_EX: 4626 return F_GETOWN_EX; 4627 #endif 4628 #ifdef F_SETOWN_EX 4629 case TARGET_F_SETOWN_EX: 4630 return F_SETOWN_EX; 4631 #endif 4632 default: 4633 return -TARGET_EINVAL; 4634 } 4635 return -TARGET_EINVAL; 4636 } 4637 4638 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a } 4639 static const bitmask_transtbl flock_tbl[] = { 4640 TRANSTBL_CONVERT(F_RDLCK), 4641 TRANSTBL_CONVERT(F_WRLCK), 4642 TRANSTBL_CONVERT(F_UNLCK), 4643 TRANSTBL_CONVERT(F_EXLCK), 4644 TRANSTBL_CONVERT(F_SHLCK), 4645 { 0, 0, 0, 0 } 4646 }; 4647 4648 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg) 4649 { 4650 struct flock fl; 4651 struct target_flock *target_fl; 4652 struct flock64 fl64; 4653 struct target_flock64 *target_fl64; 4654 #ifdef F_GETOWN_EX 4655 struct f_owner_ex fox; 4656 struct target_f_owner_ex *target_fox; 4657 #endif 4658 abi_long ret; 4659 int host_cmd = target_to_host_fcntl_cmd(cmd); 4660 4661 if (host_cmd == -TARGET_EINVAL) 4662 return host_cmd; 4663 4664 switch(cmd) { 4665 case TARGET_F_GETLK: 4666 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1)) 4667 return -TARGET_EFAULT; 4668 fl.l_type = 4669 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl); 4670 fl.l_whence = tswap16(target_fl->l_whence); 4671 fl.l_start = tswapal(target_fl->l_start); 4672 fl.l_len = tswapal(target_fl->l_len); 4673 fl.l_pid = tswap32(target_fl->l_pid); 4674 unlock_user_struct(target_fl, arg, 0); 4675 ret = get_errno(fcntl(fd, host_cmd, &fl)); 4676 if (ret == 0) { 4677 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0)) 4678 return -TARGET_EFAULT; 4679 target_fl->l_type = 4680 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl); 4681 target_fl->l_whence = tswap16(fl.l_whence); 4682 target_fl->l_start = tswapal(fl.l_start); 4683 target_fl->l_len = tswapal(fl.l_len); 4684 target_fl->l_pid = tswap32(fl.l_pid); 4685 unlock_user_struct(target_fl, arg, 1); 4686 } 4687 break; 4688 4689 case TARGET_F_SETLK: 4690 case TARGET_F_SETLKW: 4691 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1)) 4692 return -TARGET_EFAULT; 4693 fl.l_type = 4694 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl); 4695 fl.l_whence = tswap16(target_fl->l_whence); 4696 fl.l_start = tswapal(target_fl->l_start); 4697 fl.l_len = tswapal(target_fl->l_len); 4698 fl.l_pid = tswap32(target_fl->l_pid); 4699 unlock_user_struct(target_fl, arg, 0); 4700 ret = get_errno(fcntl(fd, host_cmd, &fl)); 4701 break; 4702 4703 case TARGET_F_GETLK64: 4704 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1)) 4705 return -TARGET_EFAULT; 4706 fl64.l_type = 4707 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1; 4708 fl64.l_whence = tswap16(target_fl64->l_whence); 4709 fl64.l_start = tswap64(target_fl64->l_start); 4710 fl64.l_len = tswap64(target_fl64->l_len); 4711 fl64.l_pid = tswap32(target_fl64->l_pid); 4712 unlock_user_struct(target_fl64, arg, 0); 4713 ret = get_errno(fcntl(fd, host_cmd, &fl64)); 4714 if (ret == 0) { 4715 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0)) 4716 return -TARGET_EFAULT; 4717 target_fl64->l_type = 4718 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1; 4719 target_fl64->l_whence = tswap16(fl64.l_whence); 4720 target_fl64->l_start = tswap64(fl64.l_start); 4721 target_fl64->l_len = tswap64(fl64.l_len); 4722 target_fl64->l_pid = tswap32(fl64.l_pid); 4723 unlock_user_struct(target_fl64, arg, 1); 4724 } 4725 break; 4726 case TARGET_F_SETLK64: 4727 case TARGET_F_SETLKW64: 4728 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1)) 4729 return -TARGET_EFAULT; 4730 fl64.l_type = 4731 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1; 4732 fl64.l_whence = tswap16(target_fl64->l_whence); 4733 fl64.l_start = tswap64(target_fl64->l_start); 4734 fl64.l_len = tswap64(target_fl64->l_len); 4735 fl64.l_pid = tswap32(target_fl64->l_pid); 4736 unlock_user_struct(target_fl64, arg, 0); 4737 ret = get_errno(fcntl(fd, host_cmd, &fl64)); 4738 break; 4739 4740 case TARGET_F_GETFL: 4741 ret = get_errno(fcntl(fd, host_cmd, arg)); 4742 if (ret >= 0) { 4743 ret = host_to_target_bitmask(ret, fcntl_flags_tbl); 4744 } 4745 break; 4746 4747 case TARGET_F_SETFL: 4748 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl))); 4749 break; 4750 4751 #ifdef F_GETOWN_EX 4752 case TARGET_F_GETOWN_EX: 4753 ret = get_errno(fcntl(fd, host_cmd, &fox)); 4754 if (ret >= 0) { 4755 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0)) 4756 return -TARGET_EFAULT; 4757 target_fox->type = tswap32(fox.type); 4758 target_fox->pid = tswap32(fox.pid); 4759 unlock_user_struct(target_fox, arg, 1); 4760 } 4761 break; 4762 #endif 4763 4764 #ifdef F_SETOWN_EX 4765 case TARGET_F_SETOWN_EX: 4766 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1)) 4767 return -TARGET_EFAULT; 4768 fox.type = tswap32(target_fox->type); 4769 fox.pid = tswap32(target_fox->pid); 4770 unlock_user_struct(target_fox, arg, 0); 4771 ret = get_errno(fcntl(fd, host_cmd, &fox)); 4772 break; 4773 #endif 4774 4775 case TARGET_F_SETOWN: 4776 case TARGET_F_GETOWN: 4777 case TARGET_F_SETSIG: 4778 case TARGET_F_GETSIG: 4779 case TARGET_F_SETLEASE: 4780 case TARGET_F_GETLEASE: 4781 ret = get_errno(fcntl(fd, host_cmd, arg)); 4782 break; 4783 4784 default: 4785 ret = get_errno(fcntl(fd, cmd, arg)); 4786 break; 4787 } 4788 return ret; 4789 } 4790 4791 #ifdef USE_UID16 4792 4793 static inline int high2lowuid(int uid) 4794 { 4795 if (uid > 65535) 4796 return 65534; 4797 else 4798 return uid; 4799 } 4800 4801 static inline int high2lowgid(int gid) 4802 { 4803 if (gid > 65535) 4804 return 65534; 4805 else 4806 return gid; 4807 } 4808 4809 static inline int low2highuid(int uid) 4810 { 4811 if ((int16_t)uid == -1) 4812 return -1; 4813 else 4814 return uid; 4815 } 4816 4817 static inline int low2highgid(int gid) 4818 { 4819 if ((int16_t)gid == -1) 4820 return -1; 4821 else 4822 return gid; 4823 } 4824 static inline int tswapid(int id) 4825 { 4826 return tswap16(id); 4827 } 4828 4829 #define put_user_id(x, gaddr) put_user_u16(x, gaddr) 4830 4831 #else /* !USE_UID16 */ 4832 static inline int high2lowuid(int uid) 4833 { 4834 return uid; 4835 } 4836 static inline int high2lowgid(int gid) 4837 { 4838 return gid; 4839 } 4840 static inline int low2highuid(int uid) 4841 { 4842 return uid; 4843 } 4844 static inline int low2highgid(int gid) 4845 { 4846 return gid; 4847 } 4848 static inline int tswapid(int id) 4849 { 4850 return tswap32(id); 4851 } 4852 4853 #define put_user_id(x, gaddr) put_user_u32(x, gaddr) 4854 4855 #endif /* USE_UID16 */ 4856 4857 void syscall_init(void) 4858 { 4859 IOCTLEntry *ie; 4860 const argtype *arg_type; 4861 int size; 4862 int i; 4863 4864 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def); 4865 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def); 4866 #include "syscall_types.h" 4867 #undef STRUCT 4868 #undef STRUCT_SPECIAL 4869 4870 /* Build target_to_host_errno_table[] table from 4871 * host_to_target_errno_table[]. */ 4872 for (i = 0; i < ERRNO_TABLE_SIZE; i++) { 4873 target_to_host_errno_table[host_to_target_errno_table[i]] = i; 4874 } 4875 4876 /* we patch the ioctl size if necessary. We rely on the fact that 4877 no ioctl has all the bits at '1' in the size field */ 4878 ie = ioctl_entries; 4879 while (ie->target_cmd != 0) { 4880 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) == 4881 TARGET_IOC_SIZEMASK) { 4882 arg_type = ie->arg_type; 4883 if (arg_type[0] != TYPE_PTR) { 4884 fprintf(stderr, "cannot patch size for ioctl 0x%x\n", 4885 ie->target_cmd); 4886 exit(1); 4887 } 4888 arg_type++; 4889 size = thunk_type_size(arg_type, 0); 4890 ie->target_cmd = (ie->target_cmd & 4891 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) | 4892 (size << TARGET_IOC_SIZESHIFT); 4893 } 4894 4895 /* automatic consistency check if same arch */ 4896 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 4897 (defined(__x86_64__) && defined(TARGET_X86_64)) 4898 if (unlikely(ie->target_cmd != ie->host_cmd)) { 4899 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n", 4900 ie->name, ie->target_cmd, ie->host_cmd); 4901 } 4902 #endif 4903 ie++; 4904 } 4905 } 4906 4907 #if TARGET_ABI_BITS == 32 4908 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1) 4909 { 4910 #ifdef TARGET_WORDS_BIGENDIAN 4911 return ((uint64_t)word0 << 32) | word1; 4912 #else 4913 return ((uint64_t)word1 << 32) | word0; 4914 #endif 4915 } 4916 #else /* TARGET_ABI_BITS == 32 */ 4917 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1) 4918 { 4919 return word0; 4920 } 4921 #endif /* TARGET_ABI_BITS != 32 */ 4922 4923 #ifdef TARGET_NR_truncate64 4924 static inline abi_long target_truncate64(void *cpu_env, const char *arg1, 4925 abi_long arg2, 4926 abi_long arg3, 4927 abi_long arg4) 4928 { 4929 if (regpairs_aligned(cpu_env)) { 4930 arg2 = arg3; 4931 arg3 = arg4; 4932 } 4933 return get_errno(truncate64(arg1, target_offset64(arg2, arg3))); 4934 } 4935 #endif 4936 4937 #ifdef TARGET_NR_ftruncate64 4938 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1, 4939 abi_long arg2, 4940 abi_long arg3, 4941 abi_long arg4) 4942 { 4943 if (regpairs_aligned(cpu_env)) { 4944 arg2 = arg3; 4945 arg3 = arg4; 4946 } 4947 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3))); 4948 } 4949 #endif 4950 4951 static inline abi_long target_to_host_timespec(struct timespec *host_ts, 4952 abi_ulong target_addr) 4953 { 4954 struct target_timespec *target_ts; 4955 4956 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) 4957 return -TARGET_EFAULT; 4958 host_ts->tv_sec = tswapal(target_ts->tv_sec); 4959 host_ts->tv_nsec = tswapal(target_ts->tv_nsec); 4960 unlock_user_struct(target_ts, target_addr, 0); 4961 return 0; 4962 } 4963 4964 static inline abi_long host_to_target_timespec(abi_ulong target_addr, 4965 struct timespec *host_ts) 4966 { 4967 struct target_timespec *target_ts; 4968 4969 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) 4970 return -TARGET_EFAULT; 4971 target_ts->tv_sec = tswapal(host_ts->tv_sec); 4972 target_ts->tv_nsec = tswapal(host_ts->tv_nsec); 4973 unlock_user_struct(target_ts, target_addr, 1); 4974 return 0; 4975 } 4976 4977 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec, 4978 abi_ulong target_addr) 4979 { 4980 struct target_itimerspec *target_itspec; 4981 4982 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) { 4983 return -TARGET_EFAULT; 4984 } 4985 4986 host_itspec->it_interval.tv_sec = 4987 tswapal(target_itspec->it_interval.tv_sec); 4988 host_itspec->it_interval.tv_nsec = 4989 tswapal(target_itspec->it_interval.tv_nsec); 4990 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec); 4991 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec); 4992 4993 unlock_user_struct(target_itspec, target_addr, 1); 4994 return 0; 4995 } 4996 4997 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr, 4998 struct itimerspec *host_its) 4999 { 5000 struct target_itimerspec *target_itspec; 5001 5002 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) { 5003 return -TARGET_EFAULT; 5004 } 5005 5006 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec); 5007 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec); 5008 5009 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec); 5010 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec); 5011 5012 unlock_user_struct(target_itspec, target_addr, 0); 5013 return 0; 5014 } 5015 5016 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp, 5017 abi_ulong target_addr) 5018 { 5019 struct target_sigevent *target_sevp; 5020 5021 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) { 5022 return -TARGET_EFAULT; 5023 } 5024 5025 /* This union is awkward on 64 bit systems because it has a 32 bit 5026 * integer and a pointer in it; we follow the conversion approach 5027 * used for handling sigval types in signal.c so the guest should get 5028 * the correct value back even if we did a 64 bit byteswap and it's 5029 * using the 32 bit integer. 5030 */ 5031 host_sevp->sigev_value.sival_ptr = 5032 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr); 5033 host_sevp->sigev_signo = 5034 target_to_host_signal(tswap32(target_sevp->sigev_signo)); 5035 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify); 5036 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid); 5037 5038 unlock_user_struct(target_sevp, target_addr, 1); 5039 return 0; 5040 } 5041 5042 #if defined(TARGET_NR_mlockall) 5043 static inline int target_to_host_mlockall_arg(int arg) 5044 { 5045 int result = 0; 5046 5047 if (arg & TARGET_MLOCKALL_MCL_CURRENT) { 5048 result |= MCL_CURRENT; 5049 } 5050 if (arg & TARGET_MLOCKALL_MCL_FUTURE) { 5051 result |= MCL_FUTURE; 5052 } 5053 return result; 5054 } 5055 #endif 5056 5057 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat) 5058 static inline abi_long host_to_target_stat64(void *cpu_env, 5059 abi_ulong target_addr, 5060 struct stat *host_st) 5061 { 5062 #if defined(TARGET_ARM) && defined(TARGET_ABI32) 5063 if (((CPUARMState *)cpu_env)->eabi) { 5064 struct target_eabi_stat64 *target_st; 5065 5066 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 5067 return -TARGET_EFAULT; 5068 memset(target_st, 0, sizeof(struct target_eabi_stat64)); 5069 __put_user(host_st->st_dev, &target_st->st_dev); 5070 __put_user(host_st->st_ino, &target_st->st_ino); 5071 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 5072 __put_user(host_st->st_ino, &target_st->__st_ino); 5073 #endif 5074 __put_user(host_st->st_mode, &target_st->st_mode); 5075 __put_user(host_st->st_nlink, &target_st->st_nlink); 5076 __put_user(host_st->st_uid, &target_st->st_uid); 5077 __put_user(host_st->st_gid, &target_st->st_gid); 5078 __put_user(host_st->st_rdev, &target_st->st_rdev); 5079 __put_user(host_st->st_size, &target_st->st_size); 5080 __put_user(host_st->st_blksize, &target_st->st_blksize); 5081 __put_user(host_st->st_blocks, &target_st->st_blocks); 5082 __put_user(host_st->st_atime, &target_st->target_st_atime); 5083 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 5084 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 5085 unlock_user_struct(target_st, target_addr, 1); 5086 } else 5087 #endif 5088 { 5089 #if defined(TARGET_HAS_STRUCT_STAT64) 5090 struct target_stat64 *target_st; 5091 #else 5092 struct target_stat *target_st; 5093 #endif 5094 5095 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 5096 return -TARGET_EFAULT; 5097 memset(target_st, 0, sizeof(*target_st)); 5098 __put_user(host_st->st_dev, &target_st->st_dev); 5099 __put_user(host_st->st_ino, &target_st->st_ino); 5100 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 5101 __put_user(host_st->st_ino, &target_st->__st_ino); 5102 #endif 5103 __put_user(host_st->st_mode, &target_st->st_mode); 5104 __put_user(host_st->st_nlink, &target_st->st_nlink); 5105 __put_user(host_st->st_uid, &target_st->st_uid); 5106 __put_user(host_st->st_gid, &target_st->st_gid); 5107 __put_user(host_st->st_rdev, &target_st->st_rdev); 5108 /* XXX: better use of kernel struct */ 5109 __put_user(host_st->st_size, &target_st->st_size); 5110 __put_user(host_st->st_blksize, &target_st->st_blksize); 5111 __put_user(host_st->st_blocks, &target_st->st_blocks); 5112 __put_user(host_st->st_atime, &target_st->target_st_atime); 5113 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 5114 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 5115 unlock_user_struct(target_st, target_addr, 1); 5116 } 5117 5118 return 0; 5119 } 5120 #endif 5121 5122 /* ??? Using host futex calls even when target atomic operations 5123 are not really atomic probably breaks things. However implementing 5124 futexes locally would make futexes shared between multiple processes 5125 tricky. However they're probably useless because guest atomic 5126 operations won't work either. */ 5127 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout, 5128 target_ulong uaddr2, int val3) 5129 { 5130 struct timespec ts, *pts; 5131 int base_op; 5132 5133 /* ??? We assume FUTEX_* constants are the same on both host 5134 and target. */ 5135 #ifdef FUTEX_CMD_MASK 5136 base_op = op & FUTEX_CMD_MASK; 5137 #else 5138 base_op = op; 5139 #endif 5140 switch (base_op) { 5141 case FUTEX_WAIT: 5142 case FUTEX_WAIT_BITSET: 5143 if (timeout) { 5144 pts = &ts; 5145 target_to_host_timespec(pts, timeout); 5146 } else { 5147 pts = NULL; 5148 } 5149 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val), 5150 pts, NULL, val3)); 5151 case FUTEX_WAKE: 5152 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 5153 case FUTEX_FD: 5154 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 5155 case FUTEX_REQUEUE: 5156 case FUTEX_CMP_REQUEUE: 5157 case FUTEX_WAKE_OP: 5158 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the 5159 TIMEOUT parameter is interpreted as a uint32_t by the kernel. 5160 But the prototype takes a `struct timespec *'; insert casts 5161 to satisfy the compiler. We do not need to tswap TIMEOUT 5162 since it's not compared to guest memory. */ 5163 pts = (struct timespec *)(uintptr_t) timeout; 5164 return get_errno(sys_futex(g2h(uaddr), op, val, pts, 5165 g2h(uaddr2), 5166 (base_op == FUTEX_CMP_REQUEUE 5167 ? tswap32(val3) 5168 : val3))); 5169 default: 5170 return -TARGET_ENOSYS; 5171 } 5172 } 5173 5174 /* Map host to target signal numbers for the wait family of syscalls. 5175 Assume all other status bits are the same. */ 5176 int host_to_target_waitstatus(int status) 5177 { 5178 if (WIFSIGNALED(status)) { 5179 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f); 5180 } 5181 if (WIFSTOPPED(status)) { 5182 return (host_to_target_signal(WSTOPSIG(status)) << 8) 5183 | (status & 0xff); 5184 } 5185 return status; 5186 } 5187 5188 static int open_self_cmdline(void *cpu_env, int fd) 5189 { 5190 int fd_orig = -1; 5191 bool word_skipped = false; 5192 5193 fd_orig = open("/proc/self/cmdline", O_RDONLY); 5194 if (fd_orig < 0) { 5195 return fd_orig; 5196 } 5197 5198 while (true) { 5199 ssize_t nb_read; 5200 char buf[128]; 5201 char *cp_buf = buf; 5202 5203 nb_read = read(fd_orig, buf, sizeof(buf)); 5204 if (nb_read < 0) { 5205 fd_orig = close(fd_orig); 5206 return -1; 5207 } else if (nb_read == 0) { 5208 break; 5209 } 5210 5211 if (!word_skipped) { 5212 /* Skip the first string, which is the path to qemu-*-static 5213 instead of the actual command. */ 5214 cp_buf = memchr(buf, 0, sizeof(buf)); 5215 if (cp_buf) { 5216 /* Null byte found, skip one string */ 5217 cp_buf++; 5218 nb_read -= cp_buf - buf; 5219 word_skipped = true; 5220 } 5221 } 5222 5223 if (word_skipped) { 5224 if (write(fd, cp_buf, nb_read) != nb_read) { 5225 close(fd_orig); 5226 return -1; 5227 } 5228 } 5229 } 5230 5231 return close(fd_orig); 5232 } 5233 5234 static int open_self_maps(void *cpu_env, int fd) 5235 { 5236 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 5237 TaskState *ts = cpu->opaque; 5238 FILE *fp; 5239 char *line = NULL; 5240 size_t len = 0; 5241 ssize_t read; 5242 5243 fp = fopen("/proc/self/maps", "r"); 5244 if (fp == NULL) { 5245 return -EACCES; 5246 } 5247 5248 while ((read = getline(&line, &len, fp)) != -1) { 5249 int fields, dev_maj, dev_min, inode; 5250 uint64_t min, max, offset; 5251 char flag_r, flag_w, flag_x, flag_p; 5252 char path[512] = ""; 5253 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d" 5254 " %512s", &min, &max, &flag_r, &flag_w, &flag_x, 5255 &flag_p, &offset, &dev_maj, &dev_min, &inode, path); 5256 5257 if ((fields < 10) || (fields > 11)) { 5258 continue; 5259 } 5260 if (h2g_valid(min)) { 5261 int flags = page_get_flags(h2g(min)); 5262 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX); 5263 if (page_check_range(h2g(min), max - min, flags) == -1) { 5264 continue; 5265 } 5266 if (h2g(min) == ts->info->stack_limit) { 5267 pstrcpy(path, sizeof(path), " [stack]"); 5268 } 5269 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx 5270 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n", 5271 h2g(min), h2g(max - 1) + 1, flag_r, flag_w, 5272 flag_x, flag_p, offset, dev_maj, dev_min, inode, 5273 path[0] ? " " : "", path); 5274 } 5275 } 5276 5277 free(line); 5278 fclose(fp); 5279 5280 return 0; 5281 } 5282 5283 static int open_self_stat(void *cpu_env, int fd) 5284 { 5285 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 5286 TaskState *ts = cpu->opaque; 5287 abi_ulong start_stack = ts->info->start_stack; 5288 int i; 5289 5290 for (i = 0; i < 44; i++) { 5291 char buf[128]; 5292 int len; 5293 uint64_t val = 0; 5294 5295 if (i == 0) { 5296 /* pid */ 5297 val = getpid(); 5298 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 5299 } else if (i == 1) { 5300 /* app name */ 5301 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]); 5302 } else if (i == 27) { 5303 /* stack bottom */ 5304 val = start_stack; 5305 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 5306 } else { 5307 /* for the rest, there is MasterCard */ 5308 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' '); 5309 } 5310 5311 len = strlen(buf); 5312 if (write(fd, buf, len) != len) { 5313 return -1; 5314 } 5315 } 5316 5317 return 0; 5318 } 5319 5320 static int open_self_auxv(void *cpu_env, int fd) 5321 { 5322 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 5323 TaskState *ts = cpu->opaque; 5324 abi_ulong auxv = ts->info->saved_auxv; 5325 abi_ulong len = ts->info->auxv_len; 5326 char *ptr; 5327 5328 /* 5329 * Auxiliary vector is stored in target process stack. 5330 * read in whole auxv vector and copy it to file 5331 */ 5332 ptr = lock_user(VERIFY_READ, auxv, len, 0); 5333 if (ptr != NULL) { 5334 while (len > 0) { 5335 ssize_t r; 5336 r = write(fd, ptr, len); 5337 if (r <= 0) { 5338 break; 5339 } 5340 len -= r; 5341 ptr += r; 5342 } 5343 lseek(fd, 0, SEEK_SET); 5344 unlock_user(ptr, auxv, len); 5345 } 5346 5347 return 0; 5348 } 5349 5350 static int is_proc_myself(const char *filename, const char *entry) 5351 { 5352 if (!strncmp(filename, "/proc/", strlen("/proc/"))) { 5353 filename += strlen("/proc/"); 5354 if (!strncmp(filename, "self/", strlen("self/"))) { 5355 filename += strlen("self/"); 5356 } else if (*filename >= '1' && *filename <= '9') { 5357 char myself[80]; 5358 snprintf(myself, sizeof(myself), "%d/", getpid()); 5359 if (!strncmp(filename, myself, strlen(myself))) { 5360 filename += strlen(myself); 5361 } else { 5362 return 0; 5363 } 5364 } else { 5365 return 0; 5366 } 5367 if (!strcmp(filename, entry)) { 5368 return 1; 5369 } 5370 } 5371 return 0; 5372 } 5373 5374 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) 5375 static int is_proc(const char *filename, const char *entry) 5376 { 5377 return strcmp(filename, entry) == 0; 5378 } 5379 5380 static int open_net_route(void *cpu_env, int fd) 5381 { 5382 FILE *fp; 5383 char *line = NULL; 5384 size_t len = 0; 5385 ssize_t read; 5386 5387 fp = fopen("/proc/net/route", "r"); 5388 if (fp == NULL) { 5389 return -EACCES; 5390 } 5391 5392 /* read header */ 5393 5394 read = getline(&line, &len, fp); 5395 dprintf(fd, "%s", line); 5396 5397 /* read routes */ 5398 5399 while ((read = getline(&line, &len, fp)) != -1) { 5400 char iface[16]; 5401 uint32_t dest, gw, mask; 5402 unsigned int flags, refcnt, use, metric, mtu, window, irtt; 5403 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 5404 iface, &dest, &gw, &flags, &refcnt, &use, &metric, 5405 &mask, &mtu, &window, &irtt); 5406 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 5407 iface, tswap32(dest), tswap32(gw), flags, refcnt, use, 5408 metric, tswap32(mask), mtu, window, irtt); 5409 } 5410 5411 free(line); 5412 fclose(fp); 5413 5414 return 0; 5415 } 5416 #endif 5417 5418 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode) 5419 { 5420 struct fake_open { 5421 const char *filename; 5422 int (*fill)(void *cpu_env, int fd); 5423 int (*cmp)(const char *s1, const char *s2); 5424 }; 5425 const struct fake_open *fake_open; 5426 static const struct fake_open fakes[] = { 5427 { "maps", open_self_maps, is_proc_myself }, 5428 { "stat", open_self_stat, is_proc_myself }, 5429 { "auxv", open_self_auxv, is_proc_myself }, 5430 { "cmdline", open_self_cmdline, is_proc_myself }, 5431 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) 5432 { "/proc/net/route", open_net_route, is_proc }, 5433 #endif 5434 { NULL, NULL, NULL } 5435 }; 5436 5437 if (is_proc_myself(pathname, "exe")) { 5438 int execfd = qemu_getauxval(AT_EXECFD); 5439 return execfd ? execfd : get_errno(sys_openat(dirfd, exec_path, flags, mode)); 5440 } 5441 5442 for (fake_open = fakes; fake_open->filename; fake_open++) { 5443 if (fake_open->cmp(pathname, fake_open->filename)) { 5444 break; 5445 } 5446 } 5447 5448 if (fake_open->filename) { 5449 const char *tmpdir; 5450 char filename[PATH_MAX]; 5451 int fd, r; 5452 5453 /* create temporary file to map stat to */ 5454 tmpdir = getenv("TMPDIR"); 5455 if (!tmpdir) 5456 tmpdir = "/tmp"; 5457 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir); 5458 fd = mkstemp(filename); 5459 if (fd < 0) { 5460 return fd; 5461 } 5462 unlink(filename); 5463 5464 if ((r = fake_open->fill(cpu_env, fd))) { 5465 close(fd); 5466 return r; 5467 } 5468 lseek(fd, 0, SEEK_SET); 5469 5470 return fd; 5471 } 5472 5473 return get_errno(sys_openat(dirfd, path(pathname), flags, mode)); 5474 } 5475 5476 #define TIMER_MAGIC 0x0caf0000 5477 #define TIMER_MAGIC_MASK 0xffff0000 5478 5479 /* Convert QEMU provided timer ID back to internal 16bit index format */ 5480 static target_timer_t get_timer_id(abi_long arg) 5481 { 5482 target_timer_t timerid = arg; 5483 5484 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) { 5485 return -TARGET_EINVAL; 5486 } 5487 5488 timerid &= 0xffff; 5489 5490 if (timerid >= ARRAY_SIZE(g_posix_timers)) { 5491 return -TARGET_EINVAL; 5492 } 5493 5494 return timerid; 5495 } 5496 5497 /* do_syscall() should always have a single exit point at the end so 5498 that actions, such as logging of syscall results, can be performed. 5499 All errnos that do_syscall() returns must be -TARGET_<errcode>. */ 5500 abi_long do_syscall(void *cpu_env, int num, abi_long arg1, 5501 abi_long arg2, abi_long arg3, abi_long arg4, 5502 abi_long arg5, abi_long arg6, abi_long arg7, 5503 abi_long arg8) 5504 { 5505 CPUState *cpu = ENV_GET_CPU(cpu_env); 5506 abi_long ret; 5507 struct stat st; 5508 struct statfs stfs; 5509 void *p; 5510 5511 #ifdef DEBUG 5512 gemu_log("syscall %d", num); 5513 #endif 5514 if(do_strace) 5515 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6); 5516 5517 switch(num) { 5518 case TARGET_NR_exit: 5519 /* In old applications this may be used to implement _exit(2). 5520 However in threaded applictions it is used for thread termination, 5521 and _exit_group is used for application termination. 5522 Do thread termination if we have more then one thread. */ 5523 /* FIXME: This probably breaks if a signal arrives. We should probably 5524 be disabling signals. */ 5525 if (CPU_NEXT(first_cpu)) { 5526 TaskState *ts; 5527 5528 cpu_list_lock(); 5529 /* Remove the CPU from the list. */ 5530 QTAILQ_REMOVE(&cpus, cpu, node); 5531 cpu_list_unlock(); 5532 ts = cpu->opaque; 5533 if (ts->child_tidptr) { 5534 put_user_u32(0, ts->child_tidptr); 5535 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX, 5536 NULL, NULL, 0); 5537 } 5538 thread_cpu = NULL; 5539 object_unref(OBJECT(cpu)); 5540 g_free(ts); 5541 pthread_exit(NULL); 5542 } 5543 #ifdef TARGET_GPROF 5544 _mcleanup(); 5545 #endif 5546 gdb_exit(cpu_env, arg1); 5547 _exit(arg1); 5548 ret = 0; /* avoid warning */ 5549 break; 5550 case TARGET_NR_read: 5551 if (arg3 == 0) 5552 ret = 0; 5553 else { 5554 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 5555 goto efault; 5556 ret = get_errno(read(arg1, p, arg3)); 5557 unlock_user(p, arg2, ret); 5558 } 5559 break; 5560 case TARGET_NR_write: 5561 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 5562 goto efault; 5563 ret = get_errno(write(arg1, p, arg3)); 5564 unlock_user(p, arg2, 0); 5565 break; 5566 case TARGET_NR_open: 5567 if (!(p = lock_user_string(arg1))) 5568 goto efault; 5569 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p, 5570 target_to_host_bitmask(arg2, fcntl_flags_tbl), 5571 arg3)); 5572 unlock_user(p, arg1, 0); 5573 break; 5574 case TARGET_NR_openat: 5575 if (!(p = lock_user_string(arg2))) 5576 goto efault; 5577 ret = get_errno(do_openat(cpu_env, arg1, p, 5578 target_to_host_bitmask(arg3, fcntl_flags_tbl), 5579 arg4)); 5580 unlock_user(p, arg2, 0); 5581 break; 5582 case TARGET_NR_close: 5583 ret = get_errno(close(arg1)); 5584 break; 5585 case TARGET_NR_brk: 5586 ret = do_brk(arg1); 5587 break; 5588 case TARGET_NR_fork: 5589 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0)); 5590 break; 5591 #ifdef TARGET_NR_waitpid 5592 case TARGET_NR_waitpid: 5593 { 5594 int status; 5595 ret = get_errno(waitpid(arg1, &status, arg3)); 5596 if (!is_error(ret) && arg2 && ret 5597 && put_user_s32(host_to_target_waitstatus(status), arg2)) 5598 goto efault; 5599 } 5600 break; 5601 #endif 5602 #ifdef TARGET_NR_waitid 5603 case TARGET_NR_waitid: 5604 { 5605 siginfo_t info; 5606 info.si_pid = 0; 5607 ret = get_errno(waitid(arg1, arg2, &info, arg4)); 5608 if (!is_error(ret) && arg3 && info.si_pid != 0) { 5609 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0))) 5610 goto efault; 5611 host_to_target_siginfo(p, &info); 5612 unlock_user(p, arg3, sizeof(target_siginfo_t)); 5613 } 5614 } 5615 break; 5616 #endif 5617 #ifdef TARGET_NR_creat /* not on alpha */ 5618 case TARGET_NR_creat: 5619 if (!(p = lock_user_string(arg1))) 5620 goto efault; 5621 ret = get_errno(creat(p, arg2)); 5622 unlock_user(p, arg1, 0); 5623 break; 5624 #endif 5625 case TARGET_NR_link: 5626 { 5627 void * p2; 5628 p = lock_user_string(arg1); 5629 p2 = lock_user_string(arg2); 5630 if (!p || !p2) 5631 ret = -TARGET_EFAULT; 5632 else 5633 ret = get_errno(link(p, p2)); 5634 unlock_user(p2, arg2, 0); 5635 unlock_user(p, arg1, 0); 5636 } 5637 break; 5638 #if defined(TARGET_NR_linkat) 5639 case TARGET_NR_linkat: 5640 { 5641 void * p2 = NULL; 5642 if (!arg2 || !arg4) 5643 goto efault; 5644 p = lock_user_string(arg2); 5645 p2 = lock_user_string(arg4); 5646 if (!p || !p2) 5647 ret = -TARGET_EFAULT; 5648 else 5649 ret = get_errno(linkat(arg1, p, arg3, p2, arg5)); 5650 unlock_user(p, arg2, 0); 5651 unlock_user(p2, arg4, 0); 5652 } 5653 break; 5654 #endif 5655 case TARGET_NR_unlink: 5656 if (!(p = lock_user_string(arg1))) 5657 goto efault; 5658 ret = get_errno(unlink(p)); 5659 unlock_user(p, arg1, 0); 5660 break; 5661 #if defined(TARGET_NR_unlinkat) 5662 case TARGET_NR_unlinkat: 5663 if (!(p = lock_user_string(arg2))) 5664 goto efault; 5665 ret = get_errno(unlinkat(arg1, p, arg3)); 5666 unlock_user(p, arg2, 0); 5667 break; 5668 #endif 5669 case TARGET_NR_execve: 5670 { 5671 char **argp, **envp; 5672 int argc, envc; 5673 abi_ulong gp; 5674 abi_ulong guest_argp; 5675 abi_ulong guest_envp; 5676 abi_ulong addr; 5677 char **q; 5678 int total_size = 0; 5679 5680 argc = 0; 5681 guest_argp = arg2; 5682 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) { 5683 if (get_user_ual(addr, gp)) 5684 goto efault; 5685 if (!addr) 5686 break; 5687 argc++; 5688 } 5689 envc = 0; 5690 guest_envp = arg3; 5691 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) { 5692 if (get_user_ual(addr, gp)) 5693 goto efault; 5694 if (!addr) 5695 break; 5696 envc++; 5697 } 5698 5699 argp = alloca((argc + 1) * sizeof(void *)); 5700 envp = alloca((envc + 1) * sizeof(void *)); 5701 5702 for (gp = guest_argp, q = argp; gp; 5703 gp += sizeof(abi_ulong), q++) { 5704 if (get_user_ual(addr, gp)) 5705 goto execve_efault; 5706 if (!addr) 5707 break; 5708 if (!(*q = lock_user_string(addr))) 5709 goto execve_efault; 5710 total_size += strlen(*q) + 1; 5711 } 5712 *q = NULL; 5713 5714 for (gp = guest_envp, q = envp; gp; 5715 gp += sizeof(abi_ulong), q++) { 5716 if (get_user_ual(addr, gp)) 5717 goto execve_efault; 5718 if (!addr) 5719 break; 5720 if (!(*q = lock_user_string(addr))) 5721 goto execve_efault; 5722 total_size += strlen(*q) + 1; 5723 } 5724 *q = NULL; 5725 5726 /* This case will not be caught by the host's execve() if its 5727 page size is bigger than the target's. */ 5728 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) { 5729 ret = -TARGET_E2BIG; 5730 goto execve_end; 5731 } 5732 if (!(p = lock_user_string(arg1))) 5733 goto execve_efault; 5734 ret = get_errno(execve(p, argp, envp)); 5735 unlock_user(p, arg1, 0); 5736 5737 goto execve_end; 5738 5739 execve_efault: 5740 ret = -TARGET_EFAULT; 5741 5742 execve_end: 5743 for (gp = guest_argp, q = argp; *q; 5744 gp += sizeof(abi_ulong), q++) { 5745 if (get_user_ual(addr, gp) 5746 || !addr) 5747 break; 5748 unlock_user(*q, addr, 0); 5749 } 5750 for (gp = guest_envp, q = envp; *q; 5751 gp += sizeof(abi_ulong), q++) { 5752 if (get_user_ual(addr, gp) 5753 || !addr) 5754 break; 5755 unlock_user(*q, addr, 0); 5756 } 5757 } 5758 break; 5759 case TARGET_NR_chdir: 5760 if (!(p = lock_user_string(arg1))) 5761 goto efault; 5762 ret = get_errno(chdir(p)); 5763 unlock_user(p, arg1, 0); 5764 break; 5765 #ifdef TARGET_NR_time 5766 case TARGET_NR_time: 5767 { 5768 time_t host_time; 5769 ret = get_errno(time(&host_time)); 5770 if (!is_error(ret) 5771 && arg1 5772 && put_user_sal(host_time, arg1)) 5773 goto efault; 5774 } 5775 break; 5776 #endif 5777 case TARGET_NR_mknod: 5778 if (!(p = lock_user_string(arg1))) 5779 goto efault; 5780 ret = get_errno(mknod(p, arg2, arg3)); 5781 unlock_user(p, arg1, 0); 5782 break; 5783 #if defined(TARGET_NR_mknodat) 5784 case TARGET_NR_mknodat: 5785 if (!(p = lock_user_string(arg2))) 5786 goto efault; 5787 ret = get_errno(mknodat(arg1, p, arg3, arg4)); 5788 unlock_user(p, arg2, 0); 5789 break; 5790 #endif 5791 case TARGET_NR_chmod: 5792 if (!(p = lock_user_string(arg1))) 5793 goto efault; 5794 ret = get_errno(chmod(p, arg2)); 5795 unlock_user(p, arg1, 0); 5796 break; 5797 #ifdef TARGET_NR_break 5798 case TARGET_NR_break: 5799 goto unimplemented; 5800 #endif 5801 #ifdef TARGET_NR_oldstat 5802 case TARGET_NR_oldstat: 5803 goto unimplemented; 5804 #endif 5805 case TARGET_NR_lseek: 5806 ret = get_errno(lseek(arg1, arg2, arg3)); 5807 break; 5808 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA) 5809 /* Alpha specific */ 5810 case TARGET_NR_getxpid: 5811 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid(); 5812 ret = get_errno(getpid()); 5813 break; 5814 #endif 5815 #ifdef TARGET_NR_getpid 5816 case TARGET_NR_getpid: 5817 ret = get_errno(getpid()); 5818 break; 5819 #endif 5820 case TARGET_NR_mount: 5821 { 5822 /* need to look at the data field */ 5823 void *p2, *p3; 5824 5825 if (arg1) { 5826 p = lock_user_string(arg1); 5827 if (!p) { 5828 goto efault; 5829 } 5830 } else { 5831 p = NULL; 5832 } 5833 5834 p2 = lock_user_string(arg2); 5835 if (!p2) { 5836 if (arg1) { 5837 unlock_user(p, arg1, 0); 5838 } 5839 goto efault; 5840 } 5841 5842 if (arg3) { 5843 p3 = lock_user_string(arg3); 5844 if (!p3) { 5845 if (arg1) { 5846 unlock_user(p, arg1, 0); 5847 } 5848 unlock_user(p2, arg2, 0); 5849 goto efault; 5850 } 5851 } else { 5852 p3 = NULL; 5853 } 5854 5855 /* FIXME - arg5 should be locked, but it isn't clear how to 5856 * do that since it's not guaranteed to be a NULL-terminated 5857 * string. 5858 */ 5859 if (!arg5) { 5860 ret = mount(p, p2, p3, (unsigned long)arg4, NULL); 5861 } else { 5862 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)); 5863 } 5864 ret = get_errno(ret); 5865 5866 if (arg1) { 5867 unlock_user(p, arg1, 0); 5868 } 5869 unlock_user(p2, arg2, 0); 5870 if (arg3) { 5871 unlock_user(p3, arg3, 0); 5872 } 5873 } 5874 break; 5875 #ifdef TARGET_NR_umount 5876 case TARGET_NR_umount: 5877 if (!(p = lock_user_string(arg1))) 5878 goto efault; 5879 ret = get_errno(umount(p)); 5880 unlock_user(p, arg1, 0); 5881 break; 5882 #endif 5883 #ifdef TARGET_NR_stime /* not on alpha */ 5884 case TARGET_NR_stime: 5885 { 5886 time_t host_time; 5887 if (get_user_sal(host_time, arg1)) 5888 goto efault; 5889 ret = get_errno(stime(&host_time)); 5890 } 5891 break; 5892 #endif 5893 case TARGET_NR_ptrace: 5894 goto unimplemented; 5895 #ifdef TARGET_NR_alarm /* not on alpha */ 5896 case TARGET_NR_alarm: 5897 ret = alarm(arg1); 5898 break; 5899 #endif 5900 #ifdef TARGET_NR_oldfstat 5901 case TARGET_NR_oldfstat: 5902 goto unimplemented; 5903 #endif 5904 #ifdef TARGET_NR_pause /* not on alpha */ 5905 case TARGET_NR_pause: 5906 ret = get_errno(pause()); 5907 break; 5908 #endif 5909 #ifdef TARGET_NR_utime 5910 case TARGET_NR_utime: 5911 { 5912 struct utimbuf tbuf, *host_tbuf; 5913 struct target_utimbuf *target_tbuf; 5914 if (arg2) { 5915 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1)) 5916 goto efault; 5917 tbuf.actime = tswapal(target_tbuf->actime); 5918 tbuf.modtime = tswapal(target_tbuf->modtime); 5919 unlock_user_struct(target_tbuf, arg2, 0); 5920 host_tbuf = &tbuf; 5921 } else { 5922 host_tbuf = NULL; 5923 } 5924 if (!(p = lock_user_string(arg1))) 5925 goto efault; 5926 ret = get_errno(utime(p, host_tbuf)); 5927 unlock_user(p, arg1, 0); 5928 } 5929 break; 5930 #endif 5931 case TARGET_NR_utimes: 5932 { 5933 struct timeval *tvp, tv[2]; 5934 if (arg2) { 5935 if (copy_from_user_timeval(&tv[0], arg2) 5936 || copy_from_user_timeval(&tv[1], 5937 arg2 + sizeof(struct target_timeval))) 5938 goto efault; 5939 tvp = tv; 5940 } else { 5941 tvp = NULL; 5942 } 5943 if (!(p = lock_user_string(arg1))) 5944 goto efault; 5945 ret = get_errno(utimes(p, tvp)); 5946 unlock_user(p, arg1, 0); 5947 } 5948 break; 5949 #if defined(TARGET_NR_futimesat) 5950 case TARGET_NR_futimesat: 5951 { 5952 struct timeval *tvp, tv[2]; 5953 if (arg3) { 5954 if (copy_from_user_timeval(&tv[0], arg3) 5955 || copy_from_user_timeval(&tv[1], 5956 arg3 + sizeof(struct target_timeval))) 5957 goto efault; 5958 tvp = tv; 5959 } else { 5960 tvp = NULL; 5961 } 5962 if (!(p = lock_user_string(arg2))) 5963 goto efault; 5964 ret = get_errno(futimesat(arg1, path(p), tvp)); 5965 unlock_user(p, arg2, 0); 5966 } 5967 break; 5968 #endif 5969 #ifdef TARGET_NR_stty 5970 case TARGET_NR_stty: 5971 goto unimplemented; 5972 #endif 5973 #ifdef TARGET_NR_gtty 5974 case TARGET_NR_gtty: 5975 goto unimplemented; 5976 #endif 5977 case TARGET_NR_access: 5978 if (!(p = lock_user_string(arg1))) 5979 goto efault; 5980 ret = get_errno(access(path(p), arg2)); 5981 unlock_user(p, arg1, 0); 5982 break; 5983 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) 5984 case TARGET_NR_faccessat: 5985 if (!(p = lock_user_string(arg2))) 5986 goto efault; 5987 ret = get_errno(faccessat(arg1, p, arg3, 0)); 5988 unlock_user(p, arg2, 0); 5989 break; 5990 #endif 5991 #ifdef TARGET_NR_nice /* not on alpha */ 5992 case TARGET_NR_nice: 5993 ret = get_errno(nice(arg1)); 5994 break; 5995 #endif 5996 #ifdef TARGET_NR_ftime 5997 case TARGET_NR_ftime: 5998 goto unimplemented; 5999 #endif 6000 case TARGET_NR_sync: 6001 sync(); 6002 ret = 0; 6003 break; 6004 case TARGET_NR_kill: 6005 ret = get_errno(kill(arg1, target_to_host_signal(arg2))); 6006 break; 6007 case TARGET_NR_rename: 6008 { 6009 void *p2; 6010 p = lock_user_string(arg1); 6011 p2 = lock_user_string(arg2); 6012 if (!p || !p2) 6013 ret = -TARGET_EFAULT; 6014 else 6015 ret = get_errno(rename(p, p2)); 6016 unlock_user(p2, arg2, 0); 6017 unlock_user(p, arg1, 0); 6018 } 6019 break; 6020 #if defined(TARGET_NR_renameat) 6021 case TARGET_NR_renameat: 6022 { 6023 void *p2; 6024 p = lock_user_string(arg2); 6025 p2 = lock_user_string(arg4); 6026 if (!p || !p2) 6027 ret = -TARGET_EFAULT; 6028 else 6029 ret = get_errno(renameat(arg1, p, arg3, p2)); 6030 unlock_user(p2, arg4, 0); 6031 unlock_user(p, arg2, 0); 6032 } 6033 break; 6034 #endif 6035 case TARGET_NR_mkdir: 6036 if (!(p = lock_user_string(arg1))) 6037 goto efault; 6038 ret = get_errno(mkdir(p, arg2)); 6039 unlock_user(p, arg1, 0); 6040 break; 6041 #if defined(TARGET_NR_mkdirat) 6042 case TARGET_NR_mkdirat: 6043 if (!(p = lock_user_string(arg2))) 6044 goto efault; 6045 ret = get_errno(mkdirat(arg1, p, arg3)); 6046 unlock_user(p, arg2, 0); 6047 break; 6048 #endif 6049 case TARGET_NR_rmdir: 6050 if (!(p = lock_user_string(arg1))) 6051 goto efault; 6052 ret = get_errno(rmdir(p)); 6053 unlock_user(p, arg1, 0); 6054 break; 6055 case TARGET_NR_dup: 6056 ret = get_errno(dup(arg1)); 6057 break; 6058 case TARGET_NR_pipe: 6059 ret = do_pipe(cpu_env, arg1, 0, 0); 6060 break; 6061 #ifdef TARGET_NR_pipe2 6062 case TARGET_NR_pipe2: 6063 ret = do_pipe(cpu_env, arg1, 6064 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1); 6065 break; 6066 #endif 6067 case TARGET_NR_times: 6068 { 6069 struct target_tms *tmsp; 6070 struct tms tms; 6071 ret = get_errno(times(&tms)); 6072 if (arg1) { 6073 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0); 6074 if (!tmsp) 6075 goto efault; 6076 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime)); 6077 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime)); 6078 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime)); 6079 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime)); 6080 } 6081 if (!is_error(ret)) 6082 ret = host_to_target_clock_t(ret); 6083 } 6084 break; 6085 #ifdef TARGET_NR_prof 6086 case TARGET_NR_prof: 6087 goto unimplemented; 6088 #endif 6089 #ifdef TARGET_NR_signal 6090 case TARGET_NR_signal: 6091 goto unimplemented; 6092 #endif 6093 case TARGET_NR_acct: 6094 if (arg1 == 0) { 6095 ret = get_errno(acct(NULL)); 6096 } else { 6097 if (!(p = lock_user_string(arg1))) 6098 goto efault; 6099 ret = get_errno(acct(path(p))); 6100 unlock_user(p, arg1, 0); 6101 } 6102 break; 6103 #ifdef TARGET_NR_umount2 6104 case TARGET_NR_umount2: 6105 if (!(p = lock_user_string(arg1))) 6106 goto efault; 6107 ret = get_errno(umount2(p, arg2)); 6108 unlock_user(p, arg1, 0); 6109 break; 6110 #endif 6111 #ifdef TARGET_NR_lock 6112 case TARGET_NR_lock: 6113 goto unimplemented; 6114 #endif 6115 case TARGET_NR_ioctl: 6116 ret = do_ioctl(arg1, arg2, arg3); 6117 break; 6118 case TARGET_NR_fcntl: 6119 ret = do_fcntl(arg1, arg2, arg3); 6120 break; 6121 #ifdef TARGET_NR_mpx 6122 case TARGET_NR_mpx: 6123 goto unimplemented; 6124 #endif 6125 case TARGET_NR_setpgid: 6126 ret = get_errno(setpgid(arg1, arg2)); 6127 break; 6128 #ifdef TARGET_NR_ulimit 6129 case TARGET_NR_ulimit: 6130 goto unimplemented; 6131 #endif 6132 #ifdef TARGET_NR_oldolduname 6133 case TARGET_NR_oldolduname: 6134 goto unimplemented; 6135 #endif 6136 case TARGET_NR_umask: 6137 ret = get_errno(umask(arg1)); 6138 break; 6139 case TARGET_NR_chroot: 6140 if (!(p = lock_user_string(arg1))) 6141 goto efault; 6142 ret = get_errno(chroot(p)); 6143 unlock_user(p, arg1, 0); 6144 break; 6145 case TARGET_NR_ustat: 6146 goto unimplemented; 6147 case TARGET_NR_dup2: 6148 ret = get_errno(dup2(arg1, arg2)); 6149 break; 6150 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3) 6151 case TARGET_NR_dup3: 6152 ret = get_errno(dup3(arg1, arg2, arg3)); 6153 break; 6154 #endif 6155 #ifdef TARGET_NR_getppid /* not on alpha */ 6156 case TARGET_NR_getppid: 6157 ret = get_errno(getppid()); 6158 break; 6159 #endif 6160 case TARGET_NR_getpgrp: 6161 ret = get_errno(getpgrp()); 6162 break; 6163 case TARGET_NR_setsid: 6164 ret = get_errno(setsid()); 6165 break; 6166 #ifdef TARGET_NR_sigaction 6167 case TARGET_NR_sigaction: 6168 { 6169 #if defined(TARGET_ALPHA) 6170 struct target_sigaction act, oact, *pact = 0; 6171 struct target_old_sigaction *old_act; 6172 if (arg2) { 6173 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 6174 goto efault; 6175 act._sa_handler = old_act->_sa_handler; 6176 target_siginitset(&act.sa_mask, old_act->sa_mask); 6177 act.sa_flags = old_act->sa_flags; 6178 act.sa_restorer = 0; 6179 unlock_user_struct(old_act, arg2, 0); 6180 pact = &act; 6181 } 6182 ret = get_errno(do_sigaction(arg1, pact, &oact)); 6183 if (!is_error(ret) && arg3) { 6184 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 6185 goto efault; 6186 old_act->_sa_handler = oact._sa_handler; 6187 old_act->sa_mask = oact.sa_mask.sig[0]; 6188 old_act->sa_flags = oact.sa_flags; 6189 unlock_user_struct(old_act, arg3, 1); 6190 } 6191 #elif defined(TARGET_MIPS) 6192 struct target_sigaction act, oact, *pact, *old_act; 6193 6194 if (arg2) { 6195 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 6196 goto efault; 6197 act._sa_handler = old_act->_sa_handler; 6198 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]); 6199 act.sa_flags = old_act->sa_flags; 6200 unlock_user_struct(old_act, arg2, 0); 6201 pact = &act; 6202 } else { 6203 pact = NULL; 6204 } 6205 6206 ret = get_errno(do_sigaction(arg1, pact, &oact)); 6207 6208 if (!is_error(ret) && arg3) { 6209 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 6210 goto efault; 6211 old_act->_sa_handler = oact._sa_handler; 6212 old_act->sa_flags = oact.sa_flags; 6213 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0]; 6214 old_act->sa_mask.sig[1] = 0; 6215 old_act->sa_mask.sig[2] = 0; 6216 old_act->sa_mask.sig[3] = 0; 6217 unlock_user_struct(old_act, arg3, 1); 6218 } 6219 #else 6220 struct target_old_sigaction *old_act; 6221 struct target_sigaction act, oact, *pact; 6222 if (arg2) { 6223 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 6224 goto efault; 6225 act._sa_handler = old_act->_sa_handler; 6226 target_siginitset(&act.sa_mask, old_act->sa_mask); 6227 act.sa_flags = old_act->sa_flags; 6228 act.sa_restorer = old_act->sa_restorer; 6229 unlock_user_struct(old_act, arg2, 0); 6230 pact = &act; 6231 } else { 6232 pact = NULL; 6233 } 6234 ret = get_errno(do_sigaction(arg1, pact, &oact)); 6235 if (!is_error(ret) && arg3) { 6236 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 6237 goto efault; 6238 old_act->_sa_handler = oact._sa_handler; 6239 old_act->sa_mask = oact.sa_mask.sig[0]; 6240 old_act->sa_flags = oact.sa_flags; 6241 old_act->sa_restorer = oact.sa_restorer; 6242 unlock_user_struct(old_act, arg3, 1); 6243 } 6244 #endif 6245 } 6246 break; 6247 #endif 6248 case TARGET_NR_rt_sigaction: 6249 { 6250 #if defined(TARGET_ALPHA) 6251 struct target_sigaction act, oact, *pact = 0; 6252 struct target_rt_sigaction *rt_act; 6253 /* ??? arg4 == sizeof(sigset_t). */ 6254 if (arg2) { 6255 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1)) 6256 goto efault; 6257 act._sa_handler = rt_act->_sa_handler; 6258 act.sa_mask = rt_act->sa_mask; 6259 act.sa_flags = rt_act->sa_flags; 6260 act.sa_restorer = arg5; 6261 unlock_user_struct(rt_act, arg2, 0); 6262 pact = &act; 6263 } 6264 ret = get_errno(do_sigaction(arg1, pact, &oact)); 6265 if (!is_error(ret) && arg3) { 6266 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0)) 6267 goto efault; 6268 rt_act->_sa_handler = oact._sa_handler; 6269 rt_act->sa_mask = oact.sa_mask; 6270 rt_act->sa_flags = oact.sa_flags; 6271 unlock_user_struct(rt_act, arg3, 1); 6272 } 6273 #else 6274 struct target_sigaction *act; 6275 struct target_sigaction *oact; 6276 6277 if (arg2) { 6278 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) 6279 goto efault; 6280 } else 6281 act = NULL; 6282 if (arg3) { 6283 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) { 6284 ret = -TARGET_EFAULT; 6285 goto rt_sigaction_fail; 6286 } 6287 } else 6288 oact = NULL; 6289 ret = get_errno(do_sigaction(arg1, act, oact)); 6290 rt_sigaction_fail: 6291 if (act) 6292 unlock_user_struct(act, arg2, 0); 6293 if (oact) 6294 unlock_user_struct(oact, arg3, 1); 6295 #endif 6296 } 6297 break; 6298 #ifdef TARGET_NR_sgetmask /* not on alpha */ 6299 case TARGET_NR_sgetmask: 6300 { 6301 sigset_t cur_set; 6302 abi_ulong target_set; 6303 do_sigprocmask(0, NULL, &cur_set); 6304 host_to_target_old_sigset(&target_set, &cur_set); 6305 ret = target_set; 6306 } 6307 break; 6308 #endif 6309 #ifdef TARGET_NR_ssetmask /* not on alpha */ 6310 case TARGET_NR_ssetmask: 6311 { 6312 sigset_t set, oset, cur_set; 6313 abi_ulong target_set = arg1; 6314 do_sigprocmask(0, NULL, &cur_set); 6315 target_to_host_old_sigset(&set, &target_set); 6316 sigorset(&set, &set, &cur_set); 6317 do_sigprocmask(SIG_SETMASK, &set, &oset); 6318 host_to_target_old_sigset(&target_set, &oset); 6319 ret = target_set; 6320 } 6321 break; 6322 #endif 6323 #ifdef TARGET_NR_sigprocmask 6324 case TARGET_NR_sigprocmask: 6325 { 6326 #if defined(TARGET_ALPHA) 6327 sigset_t set, oldset; 6328 abi_ulong mask; 6329 int how; 6330 6331 switch (arg1) { 6332 case TARGET_SIG_BLOCK: 6333 how = SIG_BLOCK; 6334 break; 6335 case TARGET_SIG_UNBLOCK: 6336 how = SIG_UNBLOCK; 6337 break; 6338 case TARGET_SIG_SETMASK: 6339 how = SIG_SETMASK; 6340 break; 6341 default: 6342 ret = -TARGET_EINVAL; 6343 goto fail; 6344 } 6345 mask = arg2; 6346 target_to_host_old_sigset(&set, &mask); 6347 6348 ret = get_errno(do_sigprocmask(how, &set, &oldset)); 6349 if (!is_error(ret)) { 6350 host_to_target_old_sigset(&mask, &oldset); 6351 ret = mask; 6352 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */ 6353 } 6354 #else 6355 sigset_t set, oldset, *set_ptr; 6356 int how; 6357 6358 if (arg2) { 6359 switch (arg1) { 6360 case TARGET_SIG_BLOCK: 6361 how = SIG_BLOCK; 6362 break; 6363 case TARGET_SIG_UNBLOCK: 6364 how = SIG_UNBLOCK; 6365 break; 6366 case TARGET_SIG_SETMASK: 6367 how = SIG_SETMASK; 6368 break; 6369 default: 6370 ret = -TARGET_EINVAL; 6371 goto fail; 6372 } 6373 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 6374 goto efault; 6375 target_to_host_old_sigset(&set, p); 6376 unlock_user(p, arg2, 0); 6377 set_ptr = &set; 6378 } else { 6379 how = 0; 6380 set_ptr = NULL; 6381 } 6382 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset)); 6383 if (!is_error(ret) && arg3) { 6384 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 6385 goto efault; 6386 host_to_target_old_sigset(p, &oldset); 6387 unlock_user(p, arg3, sizeof(target_sigset_t)); 6388 } 6389 #endif 6390 } 6391 break; 6392 #endif 6393 case TARGET_NR_rt_sigprocmask: 6394 { 6395 int how = arg1; 6396 sigset_t set, oldset, *set_ptr; 6397 6398 if (arg2) { 6399 switch(how) { 6400 case TARGET_SIG_BLOCK: 6401 how = SIG_BLOCK; 6402 break; 6403 case TARGET_SIG_UNBLOCK: 6404 how = SIG_UNBLOCK; 6405 break; 6406 case TARGET_SIG_SETMASK: 6407 how = SIG_SETMASK; 6408 break; 6409 default: 6410 ret = -TARGET_EINVAL; 6411 goto fail; 6412 } 6413 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 6414 goto efault; 6415 target_to_host_sigset(&set, p); 6416 unlock_user(p, arg2, 0); 6417 set_ptr = &set; 6418 } else { 6419 how = 0; 6420 set_ptr = NULL; 6421 } 6422 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset)); 6423 if (!is_error(ret) && arg3) { 6424 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 6425 goto efault; 6426 host_to_target_sigset(p, &oldset); 6427 unlock_user(p, arg3, sizeof(target_sigset_t)); 6428 } 6429 } 6430 break; 6431 #ifdef TARGET_NR_sigpending 6432 case TARGET_NR_sigpending: 6433 { 6434 sigset_t set; 6435 ret = get_errno(sigpending(&set)); 6436 if (!is_error(ret)) { 6437 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 6438 goto efault; 6439 host_to_target_old_sigset(p, &set); 6440 unlock_user(p, arg1, sizeof(target_sigset_t)); 6441 } 6442 } 6443 break; 6444 #endif 6445 case TARGET_NR_rt_sigpending: 6446 { 6447 sigset_t set; 6448 ret = get_errno(sigpending(&set)); 6449 if (!is_error(ret)) { 6450 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 6451 goto efault; 6452 host_to_target_sigset(p, &set); 6453 unlock_user(p, arg1, sizeof(target_sigset_t)); 6454 } 6455 } 6456 break; 6457 #ifdef TARGET_NR_sigsuspend 6458 case TARGET_NR_sigsuspend: 6459 { 6460 sigset_t set; 6461 #if defined(TARGET_ALPHA) 6462 abi_ulong mask = arg1; 6463 target_to_host_old_sigset(&set, &mask); 6464 #else 6465 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6466 goto efault; 6467 target_to_host_old_sigset(&set, p); 6468 unlock_user(p, arg1, 0); 6469 #endif 6470 ret = get_errno(sigsuspend(&set)); 6471 } 6472 break; 6473 #endif 6474 case TARGET_NR_rt_sigsuspend: 6475 { 6476 sigset_t set; 6477 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6478 goto efault; 6479 target_to_host_sigset(&set, p); 6480 unlock_user(p, arg1, 0); 6481 ret = get_errno(sigsuspend(&set)); 6482 } 6483 break; 6484 case TARGET_NR_rt_sigtimedwait: 6485 { 6486 sigset_t set; 6487 struct timespec uts, *puts; 6488 siginfo_t uinfo; 6489 6490 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6491 goto efault; 6492 target_to_host_sigset(&set, p); 6493 unlock_user(p, arg1, 0); 6494 if (arg3) { 6495 puts = &uts; 6496 target_to_host_timespec(puts, arg3); 6497 } else { 6498 puts = NULL; 6499 } 6500 ret = get_errno(sigtimedwait(&set, &uinfo, puts)); 6501 if (!is_error(ret)) { 6502 if (arg2) { 6503 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 6504 0); 6505 if (!p) { 6506 goto efault; 6507 } 6508 host_to_target_siginfo(p, &uinfo); 6509 unlock_user(p, arg2, sizeof(target_siginfo_t)); 6510 } 6511 ret = host_to_target_signal(ret); 6512 } 6513 } 6514 break; 6515 case TARGET_NR_rt_sigqueueinfo: 6516 { 6517 siginfo_t uinfo; 6518 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1))) 6519 goto efault; 6520 target_to_host_siginfo(&uinfo, p); 6521 unlock_user(p, arg1, 0); 6522 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo)); 6523 } 6524 break; 6525 #ifdef TARGET_NR_sigreturn 6526 case TARGET_NR_sigreturn: 6527 /* NOTE: ret is eax, so not transcoding must be done */ 6528 ret = do_sigreturn(cpu_env); 6529 break; 6530 #endif 6531 case TARGET_NR_rt_sigreturn: 6532 /* NOTE: ret is eax, so not transcoding must be done */ 6533 ret = do_rt_sigreturn(cpu_env); 6534 break; 6535 case TARGET_NR_sethostname: 6536 if (!(p = lock_user_string(arg1))) 6537 goto efault; 6538 ret = get_errno(sethostname(p, arg2)); 6539 unlock_user(p, arg1, 0); 6540 break; 6541 case TARGET_NR_setrlimit: 6542 { 6543 int resource = target_to_host_resource(arg1); 6544 struct target_rlimit *target_rlim; 6545 struct rlimit rlim; 6546 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1)) 6547 goto efault; 6548 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur); 6549 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max); 6550 unlock_user_struct(target_rlim, arg2, 0); 6551 ret = get_errno(setrlimit(resource, &rlim)); 6552 } 6553 break; 6554 case TARGET_NR_getrlimit: 6555 { 6556 int resource = target_to_host_resource(arg1); 6557 struct target_rlimit *target_rlim; 6558 struct rlimit rlim; 6559 6560 ret = get_errno(getrlimit(resource, &rlim)); 6561 if (!is_error(ret)) { 6562 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 6563 goto efault; 6564 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 6565 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 6566 unlock_user_struct(target_rlim, arg2, 1); 6567 } 6568 } 6569 break; 6570 case TARGET_NR_getrusage: 6571 { 6572 struct rusage rusage; 6573 ret = get_errno(getrusage(arg1, &rusage)); 6574 if (!is_error(ret)) { 6575 ret = host_to_target_rusage(arg2, &rusage); 6576 } 6577 } 6578 break; 6579 case TARGET_NR_gettimeofday: 6580 { 6581 struct timeval tv; 6582 ret = get_errno(gettimeofday(&tv, NULL)); 6583 if (!is_error(ret)) { 6584 if (copy_to_user_timeval(arg1, &tv)) 6585 goto efault; 6586 } 6587 } 6588 break; 6589 case TARGET_NR_settimeofday: 6590 { 6591 struct timeval tv, *ptv = NULL; 6592 struct timezone tz, *ptz = NULL; 6593 6594 if (arg1) { 6595 if (copy_from_user_timeval(&tv, arg1)) { 6596 goto efault; 6597 } 6598 ptv = &tv; 6599 } 6600 6601 if (arg2) { 6602 if (copy_from_user_timezone(&tz, arg2)) { 6603 goto efault; 6604 } 6605 ptz = &tz; 6606 } 6607 6608 ret = get_errno(settimeofday(ptv, ptz)); 6609 } 6610 break; 6611 #if defined(TARGET_NR_select) 6612 case TARGET_NR_select: 6613 #if defined(TARGET_S390X) || defined(TARGET_ALPHA) 6614 ret = do_select(arg1, arg2, arg3, arg4, arg5); 6615 #else 6616 { 6617 struct target_sel_arg_struct *sel; 6618 abi_ulong inp, outp, exp, tvp; 6619 long nsel; 6620 6621 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) 6622 goto efault; 6623 nsel = tswapal(sel->n); 6624 inp = tswapal(sel->inp); 6625 outp = tswapal(sel->outp); 6626 exp = tswapal(sel->exp); 6627 tvp = tswapal(sel->tvp); 6628 unlock_user_struct(sel, arg1, 0); 6629 ret = do_select(nsel, inp, outp, exp, tvp); 6630 } 6631 #endif 6632 break; 6633 #endif 6634 #ifdef TARGET_NR_pselect6 6635 case TARGET_NR_pselect6: 6636 { 6637 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr; 6638 fd_set rfds, wfds, efds; 6639 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 6640 struct timespec ts, *ts_ptr; 6641 6642 /* 6643 * The 6th arg is actually two args smashed together, 6644 * so we cannot use the C library. 6645 */ 6646 sigset_t set; 6647 struct { 6648 sigset_t *set; 6649 size_t size; 6650 } sig, *sig_ptr; 6651 6652 abi_ulong arg_sigset, arg_sigsize, *arg7; 6653 target_sigset_t *target_sigset; 6654 6655 n = arg1; 6656 rfd_addr = arg2; 6657 wfd_addr = arg3; 6658 efd_addr = arg4; 6659 ts_addr = arg5; 6660 6661 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 6662 if (ret) { 6663 goto fail; 6664 } 6665 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 6666 if (ret) { 6667 goto fail; 6668 } 6669 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 6670 if (ret) { 6671 goto fail; 6672 } 6673 6674 /* 6675 * This takes a timespec, and not a timeval, so we cannot 6676 * use the do_select() helper ... 6677 */ 6678 if (ts_addr) { 6679 if (target_to_host_timespec(&ts, ts_addr)) { 6680 goto efault; 6681 } 6682 ts_ptr = &ts; 6683 } else { 6684 ts_ptr = NULL; 6685 } 6686 6687 /* Extract the two packed args for the sigset */ 6688 if (arg6) { 6689 sig_ptr = &sig; 6690 sig.size = _NSIG / 8; 6691 6692 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1); 6693 if (!arg7) { 6694 goto efault; 6695 } 6696 arg_sigset = tswapal(arg7[0]); 6697 arg_sigsize = tswapal(arg7[1]); 6698 unlock_user(arg7, arg6, 0); 6699 6700 if (arg_sigset) { 6701 sig.set = &set; 6702 if (arg_sigsize != sizeof(*target_sigset)) { 6703 /* Like the kernel, we enforce correct size sigsets */ 6704 ret = -TARGET_EINVAL; 6705 goto fail; 6706 } 6707 target_sigset = lock_user(VERIFY_READ, arg_sigset, 6708 sizeof(*target_sigset), 1); 6709 if (!target_sigset) { 6710 goto efault; 6711 } 6712 target_to_host_sigset(&set, target_sigset); 6713 unlock_user(target_sigset, arg_sigset, 0); 6714 } else { 6715 sig.set = NULL; 6716 } 6717 } else { 6718 sig_ptr = NULL; 6719 } 6720 6721 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 6722 ts_ptr, sig_ptr)); 6723 6724 if (!is_error(ret)) { 6725 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 6726 goto efault; 6727 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 6728 goto efault; 6729 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 6730 goto efault; 6731 6732 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) 6733 goto efault; 6734 } 6735 } 6736 break; 6737 #endif 6738 case TARGET_NR_symlink: 6739 { 6740 void *p2; 6741 p = lock_user_string(arg1); 6742 p2 = lock_user_string(arg2); 6743 if (!p || !p2) 6744 ret = -TARGET_EFAULT; 6745 else 6746 ret = get_errno(symlink(p, p2)); 6747 unlock_user(p2, arg2, 0); 6748 unlock_user(p, arg1, 0); 6749 } 6750 break; 6751 #if defined(TARGET_NR_symlinkat) 6752 case TARGET_NR_symlinkat: 6753 { 6754 void *p2; 6755 p = lock_user_string(arg1); 6756 p2 = lock_user_string(arg3); 6757 if (!p || !p2) 6758 ret = -TARGET_EFAULT; 6759 else 6760 ret = get_errno(symlinkat(p, arg2, p2)); 6761 unlock_user(p2, arg3, 0); 6762 unlock_user(p, arg1, 0); 6763 } 6764 break; 6765 #endif 6766 #ifdef TARGET_NR_oldlstat 6767 case TARGET_NR_oldlstat: 6768 goto unimplemented; 6769 #endif 6770 case TARGET_NR_readlink: 6771 { 6772 void *p2; 6773 p = lock_user_string(arg1); 6774 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0); 6775 if (!p || !p2) { 6776 ret = -TARGET_EFAULT; 6777 } else if (!arg3) { 6778 /* Short circuit this for the magic exe check. */ 6779 ret = -TARGET_EINVAL; 6780 } else if (is_proc_myself((const char *)p, "exe")) { 6781 char real[PATH_MAX], *temp; 6782 temp = realpath(exec_path, real); 6783 /* Return value is # of bytes that we wrote to the buffer. */ 6784 if (temp == NULL) { 6785 ret = get_errno(-1); 6786 } else { 6787 /* Don't worry about sign mismatch as earlier mapping 6788 * logic would have thrown a bad address error. */ 6789 ret = MIN(strlen(real), arg3); 6790 /* We cannot NUL terminate the string. */ 6791 memcpy(p2, real, ret); 6792 } 6793 } else { 6794 ret = get_errno(readlink(path(p), p2, arg3)); 6795 } 6796 unlock_user(p2, arg2, ret); 6797 unlock_user(p, arg1, 0); 6798 } 6799 break; 6800 #if defined(TARGET_NR_readlinkat) 6801 case TARGET_NR_readlinkat: 6802 { 6803 void *p2; 6804 p = lock_user_string(arg2); 6805 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0); 6806 if (!p || !p2) { 6807 ret = -TARGET_EFAULT; 6808 } else if (is_proc_myself((const char *)p, "exe")) { 6809 char real[PATH_MAX], *temp; 6810 temp = realpath(exec_path, real); 6811 ret = temp == NULL ? get_errno(-1) : strlen(real) ; 6812 snprintf((char *)p2, arg4, "%s", real); 6813 } else { 6814 ret = get_errno(readlinkat(arg1, path(p), p2, arg4)); 6815 } 6816 unlock_user(p2, arg3, ret); 6817 unlock_user(p, arg2, 0); 6818 } 6819 break; 6820 #endif 6821 #ifdef TARGET_NR_uselib 6822 case TARGET_NR_uselib: 6823 goto unimplemented; 6824 #endif 6825 #ifdef TARGET_NR_swapon 6826 case TARGET_NR_swapon: 6827 if (!(p = lock_user_string(arg1))) 6828 goto efault; 6829 ret = get_errno(swapon(p, arg2)); 6830 unlock_user(p, arg1, 0); 6831 break; 6832 #endif 6833 case TARGET_NR_reboot: 6834 if (arg3 == LINUX_REBOOT_CMD_RESTART2) { 6835 /* arg4 must be ignored in all other cases */ 6836 p = lock_user_string(arg4); 6837 if (!p) { 6838 goto efault; 6839 } 6840 ret = get_errno(reboot(arg1, arg2, arg3, p)); 6841 unlock_user(p, arg4, 0); 6842 } else { 6843 ret = get_errno(reboot(arg1, arg2, arg3, NULL)); 6844 } 6845 break; 6846 #ifdef TARGET_NR_readdir 6847 case TARGET_NR_readdir: 6848 goto unimplemented; 6849 #endif 6850 #ifdef TARGET_NR_mmap 6851 case TARGET_NR_mmap: 6852 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 6853 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \ 6854 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \ 6855 || defined(TARGET_S390X) 6856 { 6857 abi_ulong *v; 6858 abi_ulong v1, v2, v3, v4, v5, v6; 6859 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1))) 6860 goto efault; 6861 v1 = tswapal(v[0]); 6862 v2 = tswapal(v[1]); 6863 v3 = tswapal(v[2]); 6864 v4 = tswapal(v[3]); 6865 v5 = tswapal(v[4]); 6866 v6 = tswapal(v[5]); 6867 unlock_user(v, arg1, 0); 6868 ret = get_errno(target_mmap(v1, v2, v3, 6869 target_to_host_bitmask(v4, mmap_flags_tbl), 6870 v5, v6)); 6871 } 6872 #else 6873 ret = get_errno(target_mmap(arg1, arg2, arg3, 6874 target_to_host_bitmask(arg4, mmap_flags_tbl), 6875 arg5, 6876 arg6)); 6877 #endif 6878 break; 6879 #endif 6880 #ifdef TARGET_NR_mmap2 6881 case TARGET_NR_mmap2: 6882 #ifndef MMAP_SHIFT 6883 #define MMAP_SHIFT 12 6884 #endif 6885 ret = get_errno(target_mmap(arg1, arg2, arg3, 6886 target_to_host_bitmask(arg4, mmap_flags_tbl), 6887 arg5, 6888 arg6 << MMAP_SHIFT)); 6889 break; 6890 #endif 6891 case TARGET_NR_munmap: 6892 ret = get_errno(target_munmap(arg1, arg2)); 6893 break; 6894 case TARGET_NR_mprotect: 6895 { 6896 TaskState *ts = cpu->opaque; 6897 /* Special hack to detect libc making the stack executable. */ 6898 if ((arg3 & PROT_GROWSDOWN) 6899 && arg1 >= ts->info->stack_limit 6900 && arg1 <= ts->info->start_stack) { 6901 arg3 &= ~PROT_GROWSDOWN; 6902 arg2 = arg2 + arg1 - ts->info->stack_limit; 6903 arg1 = ts->info->stack_limit; 6904 } 6905 } 6906 ret = get_errno(target_mprotect(arg1, arg2, arg3)); 6907 break; 6908 #ifdef TARGET_NR_mremap 6909 case TARGET_NR_mremap: 6910 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5)); 6911 break; 6912 #endif 6913 /* ??? msync/mlock/munlock are broken for softmmu. */ 6914 #ifdef TARGET_NR_msync 6915 case TARGET_NR_msync: 6916 ret = get_errno(msync(g2h(arg1), arg2, arg3)); 6917 break; 6918 #endif 6919 #ifdef TARGET_NR_mlock 6920 case TARGET_NR_mlock: 6921 ret = get_errno(mlock(g2h(arg1), arg2)); 6922 break; 6923 #endif 6924 #ifdef TARGET_NR_munlock 6925 case TARGET_NR_munlock: 6926 ret = get_errno(munlock(g2h(arg1), arg2)); 6927 break; 6928 #endif 6929 #ifdef TARGET_NR_mlockall 6930 case TARGET_NR_mlockall: 6931 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1))); 6932 break; 6933 #endif 6934 #ifdef TARGET_NR_munlockall 6935 case TARGET_NR_munlockall: 6936 ret = get_errno(munlockall()); 6937 break; 6938 #endif 6939 case TARGET_NR_truncate: 6940 if (!(p = lock_user_string(arg1))) 6941 goto efault; 6942 ret = get_errno(truncate(p, arg2)); 6943 unlock_user(p, arg1, 0); 6944 break; 6945 case TARGET_NR_ftruncate: 6946 ret = get_errno(ftruncate(arg1, arg2)); 6947 break; 6948 case TARGET_NR_fchmod: 6949 ret = get_errno(fchmod(arg1, arg2)); 6950 break; 6951 #if defined(TARGET_NR_fchmodat) 6952 case TARGET_NR_fchmodat: 6953 if (!(p = lock_user_string(arg2))) 6954 goto efault; 6955 ret = get_errno(fchmodat(arg1, p, arg3, 0)); 6956 unlock_user(p, arg2, 0); 6957 break; 6958 #endif 6959 case TARGET_NR_getpriority: 6960 /* Note that negative values are valid for getpriority, so we must 6961 differentiate based on errno settings. */ 6962 errno = 0; 6963 ret = getpriority(arg1, arg2); 6964 if (ret == -1 && errno != 0) { 6965 ret = -host_to_target_errno(errno); 6966 break; 6967 } 6968 #ifdef TARGET_ALPHA 6969 /* Return value is the unbiased priority. Signal no error. */ 6970 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; 6971 #else 6972 /* Return value is a biased priority to avoid negative numbers. */ 6973 ret = 20 - ret; 6974 #endif 6975 break; 6976 case TARGET_NR_setpriority: 6977 ret = get_errno(setpriority(arg1, arg2, arg3)); 6978 break; 6979 #ifdef TARGET_NR_profil 6980 case TARGET_NR_profil: 6981 goto unimplemented; 6982 #endif 6983 case TARGET_NR_statfs: 6984 if (!(p = lock_user_string(arg1))) 6985 goto efault; 6986 ret = get_errno(statfs(path(p), &stfs)); 6987 unlock_user(p, arg1, 0); 6988 convert_statfs: 6989 if (!is_error(ret)) { 6990 struct target_statfs *target_stfs; 6991 6992 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0)) 6993 goto efault; 6994 __put_user(stfs.f_type, &target_stfs->f_type); 6995 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 6996 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 6997 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 6998 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 6999 __put_user(stfs.f_files, &target_stfs->f_files); 7000 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 7001 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 7002 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 7003 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 7004 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 7005 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 7006 unlock_user_struct(target_stfs, arg2, 1); 7007 } 7008 break; 7009 case TARGET_NR_fstatfs: 7010 ret = get_errno(fstatfs(arg1, &stfs)); 7011 goto convert_statfs; 7012 #ifdef TARGET_NR_statfs64 7013 case TARGET_NR_statfs64: 7014 if (!(p = lock_user_string(arg1))) 7015 goto efault; 7016 ret = get_errno(statfs(path(p), &stfs)); 7017 unlock_user(p, arg1, 0); 7018 convert_statfs64: 7019 if (!is_error(ret)) { 7020 struct target_statfs64 *target_stfs; 7021 7022 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0)) 7023 goto efault; 7024 __put_user(stfs.f_type, &target_stfs->f_type); 7025 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 7026 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 7027 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 7028 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 7029 __put_user(stfs.f_files, &target_stfs->f_files); 7030 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 7031 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 7032 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 7033 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 7034 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 7035 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 7036 unlock_user_struct(target_stfs, arg3, 1); 7037 } 7038 break; 7039 case TARGET_NR_fstatfs64: 7040 ret = get_errno(fstatfs(arg1, &stfs)); 7041 goto convert_statfs64; 7042 #endif 7043 #ifdef TARGET_NR_ioperm 7044 case TARGET_NR_ioperm: 7045 goto unimplemented; 7046 #endif 7047 #ifdef TARGET_NR_socketcall 7048 case TARGET_NR_socketcall: 7049 ret = do_socketcall(arg1, arg2); 7050 break; 7051 #endif 7052 #ifdef TARGET_NR_accept 7053 case TARGET_NR_accept: 7054 ret = do_accept4(arg1, arg2, arg3, 0); 7055 break; 7056 #endif 7057 #ifdef TARGET_NR_accept4 7058 case TARGET_NR_accept4: 7059 #ifdef CONFIG_ACCEPT4 7060 ret = do_accept4(arg1, arg2, arg3, arg4); 7061 #else 7062 goto unimplemented; 7063 #endif 7064 break; 7065 #endif 7066 #ifdef TARGET_NR_bind 7067 case TARGET_NR_bind: 7068 ret = do_bind(arg1, arg2, arg3); 7069 break; 7070 #endif 7071 #ifdef TARGET_NR_connect 7072 case TARGET_NR_connect: 7073 ret = do_connect(arg1, arg2, arg3); 7074 break; 7075 #endif 7076 #ifdef TARGET_NR_getpeername 7077 case TARGET_NR_getpeername: 7078 ret = do_getpeername(arg1, arg2, arg3); 7079 break; 7080 #endif 7081 #ifdef TARGET_NR_getsockname 7082 case TARGET_NR_getsockname: 7083 ret = do_getsockname(arg1, arg2, arg3); 7084 break; 7085 #endif 7086 #ifdef TARGET_NR_getsockopt 7087 case TARGET_NR_getsockopt: 7088 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5); 7089 break; 7090 #endif 7091 #ifdef TARGET_NR_listen 7092 case TARGET_NR_listen: 7093 ret = get_errno(listen(arg1, arg2)); 7094 break; 7095 #endif 7096 #ifdef TARGET_NR_recv 7097 case TARGET_NR_recv: 7098 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0); 7099 break; 7100 #endif 7101 #ifdef TARGET_NR_recvfrom 7102 case TARGET_NR_recvfrom: 7103 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6); 7104 break; 7105 #endif 7106 #ifdef TARGET_NR_recvmsg 7107 case TARGET_NR_recvmsg: 7108 ret = do_sendrecvmsg(arg1, arg2, arg3, 0); 7109 break; 7110 #endif 7111 #ifdef TARGET_NR_send 7112 case TARGET_NR_send: 7113 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0); 7114 break; 7115 #endif 7116 #ifdef TARGET_NR_sendmsg 7117 case TARGET_NR_sendmsg: 7118 ret = do_sendrecvmsg(arg1, arg2, arg3, 1); 7119 break; 7120 #endif 7121 #ifdef TARGET_NR_sendmmsg 7122 case TARGET_NR_sendmmsg: 7123 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1); 7124 break; 7125 case TARGET_NR_recvmmsg: 7126 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0); 7127 break; 7128 #endif 7129 #ifdef TARGET_NR_sendto 7130 case TARGET_NR_sendto: 7131 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6); 7132 break; 7133 #endif 7134 #ifdef TARGET_NR_shutdown 7135 case TARGET_NR_shutdown: 7136 ret = get_errno(shutdown(arg1, arg2)); 7137 break; 7138 #endif 7139 #ifdef TARGET_NR_socket 7140 case TARGET_NR_socket: 7141 ret = do_socket(arg1, arg2, arg3); 7142 break; 7143 #endif 7144 #ifdef TARGET_NR_socketpair 7145 case TARGET_NR_socketpair: 7146 ret = do_socketpair(arg1, arg2, arg3, arg4); 7147 break; 7148 #endif 7149 #ifdef TARGET_NR_setsockopt 7150 case TARGET_NR_setsockopt: 7151 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5); 7152 break; 7153 #endif 7154 7155 case TARGET_NR_syslog: 7156 if (!(p = lock_user_string(arg2))) 7157 goto efault; 7158 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3)); 7159 unlock_user(p, arg2, 0); 7160 break; 7161 7162 case TARGET_NR_setitimer: 7163 { 7164 struct itimerval value, ovalue, *pvalue; 7165 7166 if (arg2) { 7167 pvalue = &value; 7168 if (copy_from_user_timeval(&pvalue->it_interval, arg2) 7169 || copy_from_user_timeval(&pvalue->it_value, 7170 arg2 + sizeof(struct target_timeval))) 7171 goto efault; 7172 } else { 7173 pvalue = NULL; 7174 } 7175 ret = get_errno(setitimer(arg1, pvalue, &ovalue)); 7176 if (!is_error(ret) && arg3) { 7177 if (copy_to_user_timeval(arg3, 7178 &ovalue.it_interval) 7179 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval), 7180 &ovalue.it_value)) 7181 goto efault; 7182 } 7183 } 7184 break; 7185 case TARGET_NR_getitimer: 7186 { 7187 struct itimerval value; 7188 7189 ret = get_errno(getitimer(arg1, &value)); 7190 if (!is_error(ret) && arg2) { 7191 if (copy_to_user_timeval(arg2, 7192 &value.it_interval) 7193 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval), 7194 &value.it_value)) 7195 goto efault; 7196 } 7197 } 7198 break; 7199 case TARGET_NR_stat: 7200 if (!(p = lock_user_string(arg1))) 7201 goto efault; 7202 ret = get_errno(stat(path(p), &st)); 7203 unlock_user(p, arg1, 0); 7204 goto do_stat; 7205 case TARGET_NR_lstat: 7206 if (!(p = lock_user_string(arg1))) 7207 goto efault; 7208 ret = get_errno(lstat(path(p), &st)); 7209 unlock_user(p, arg1, 0); 7210 goto do_stat; 7211 case TARGET_NR_fstat: 7212 { 7213 ret = get_errno(fstat(arg1, &st)); 7214 do_stat: 7215 if (!is_error(ret)) { 7216 struct target_stat *target_st; 7217 7218 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0)) 7219 goto efault; 7220 memset(target_st, 0, sizeof(*target_st)); 7221 __put_user(st.st_dev, &target_st->st_dev); 7222 __put_user(st.st_ino, &target_st->st_ino); 7223 __put_user(st.st_mode, &target_st->st_mode); 7224 __put_user(st.st_uid, &target_st->st_uid); 7225 __put_user(st.st_gid, &target_st->st_gid); 7226 __put_user(st.st_nlink, &target_st->st_nlink); 7227 __put_user(st.st_rdev, &target_st->st_rdev); 7228 __put_user(st.st_size, &target_st->st_size); 7229 __put_user(st.st_blksize, &target_st->st_blksize); 7230 __put_user(st.st_blocks, &target_st->st_blocks); 7231 __put_user(st.st_atime, &target_st->target_st_atime); 7232 __put_user(st.st_mtime, &target_st->target_st_mtime); 7233 __put_user(st.st_ctime, &target_st->target_st_ctime); 7234 unlock_user_struct(target_st, arg2, 1); 7235 } 7236 } 7237 break; 7238 #ifdef TARGET_NR_olduname 7239 case TARGET_NR_olduname: 7240 goto unimplemented; 7241 #endif 7242 #ifdef TARGET_NR_iopl 7243 case TARGET_NR_iopl: 7244 goto unimplemented; 7245 #endif 7246 case TARGET_NR_vhangup: 7247 ret = get_errno(vhangup()); 7248 break; 7249 #ifdef TARGET_NR_idle 7250 case TARGET_NR_idle: 7251 goto unimplemented; 7252 #endif 7253 #ifdef TARGET_NR_syscall 7254 case TARGET_NR_syscall: 7255 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5, 7256 arg6, arg7, arg8, 0); 7257 break; 7258 #endif 7259 case TARGET_NR_wait4: 7260 { 7261 int status; 7262 abi_long status_ptr = arg2; 7263 struct rusage rusage, *rusage_ptr; 7264 abi_ulong target_rusage = arg4; 7265 abi_long rusage_err; 7266 if (target_rusage) 7267 rusage_ptr = &rusage; 7268 else 7269 rusage_ptr = NULL; 7270 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr)); 7271 if (!is_error(ret)) { 7272 if (status_ptr && ret) { 7273 status = host_to_target_waitstatus(status); 7274 if (put_user_s32(status, status_ptr)) 7275 goto efault; 7276 } 7277 if (target_rusage) { 7278 rusage_err = host_to_target_rusage(target_rusage, &rusage); 7279 if (rusage_err) { 7280 ret = rusage_err; 7281 } 7282 } 7283 } 7284 } 7285 break; 7286 #ifdef TARGET_NR_swapoff 7287 case TARGET_NR_swapoff: 7288 if (!(p = lock_user_string(arg1))) 7289 goto efault; 7290 ret = get_errno(swapoff(p)); 7291 unlock_user(p, arg1, 0); 7292 break; 7293 #endif 7294 case TARGET_NR_sysinfo: 7295 { 7296 struct target_sysinfo *target_value; 7297 struct sysinfo value; 7298 ret = get_errno(sysinfo(&value)); 7299 if (!is_error(ret) && arg1) 7300 { 7301 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0)) 7302 goto efault; 7303 __put_user(value.uptime, &target_value->uptime); 7304 __put_user(value.loads[0], &target_value->loads[0]); 7305 __put_user(value.loads[1], &target_value->loads[1]); 7306 __put_user(value.loads[2], &target_value->loads[2]); 7307 __put_user(value.totalram, &target_value->totalram); 7308 __put_user(value.freeram, &target_value->freeram); 7309 __put_user(value.sharedram, &target_value->sharedram); 7310 __put_user(value.bufferram, &target_value->bufferram); 7311 __put_user(value.totalswap, &target_value->totalswap); 7312 __put_user(value.freeswap, &target_value->freeswap); 7313 __put_user(value.procs, &target_value->procs); 7314 __put_user(value.totalhigh, &target_value->totalhigh); 7315 __put_user(value.freehigh, &target_value->freehigh); 7316 __put_user(value.mem_unit, &target_value->mem_unit); 7317 unlock_user_struct(target_value, arg1, 1); 7318 } 7319 } 7320 break; 7321 #ifdef TARGET_NR_ipc 7322 case TARGET_NR_ipc: 7323 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6); 7324 break; 7325 #endif 7326 #ifdef TARGET_NR_semget 7327 case TARGET_NR_semget: 7328 ret = get_errno(semget(arg1, arg2, arg3)); 7329 break; 7330 #endif 7331 #ifdef TARGET_NR_semop 7332 case TARGET_NR_semop: 7333 ret = do_semop(arg1, arg2, arg3); 7334 break; 7335 #endif 7336 #ifdef TARGET_NR_semctl 7337 case TARGET_NR_semctl: 7338 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4); 7339 break; 7340 #endif 7341 #ifdef TARGET_NR_msgctl 7342 case TARGET_NR_msgctl: 7343 ret = do_msgctl(arg1, arg2, arg3); 7344 break; 7345 #endif 7346 #ifdef TARGET_NR_msgget 7347 case TARGET_NR_msgget: 7348 ret = get_errno(msgget(arg1, arg2)); 7349 break; 7350 #endif 7351 #ifdef TARGET_NR_msgrcv 7352 case TARGET_NR_msgrcv: 7353 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5); 7354 break; 7355 #endif 7356 #ifdef TARGET_NR_msgsnd 7357 case TARGET_NR_msgsnd: 7358 ret = do_msgsnd(arg1, arg2, arg3, arg4); 7359 break; 7360 #endif 7361 #ifdef TARGET_NR_shmget 7362 case TARGET_NR_shmget: 7363 ret = get_errno(shmget(arg1, arg2, arg3)); 7364 break; 7365 #endif 7366 #ifdef TARGET_NR_shmctl 7367 case TARGET_NR_shmctl: 7368 ret = do_shmctl(arg1, arg2, arg3); 7369 break; 7370 #endif 7371 #ifdef TARGET_NR_shmat 7372 case TARGET_NR_shmat: 7373 ret = do_shmat(arg1, arg2, arg3); 7374 break; 7375 #endif 7376 #ifdef TARGET_NR_shmdt 7377 case TARGET_NR_shmdt: 7378 ret = do_shmdt(arg1); 7379 break; 7380 #endif 7381 case TARGET_NR_fsync: 7382 ret = get_errno(fsync(arg1)); 7383 break; 7384 case TARGET_NR_clone: 7385 /* Linux manages to have three different orderings for its 7386 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines 7387 * match the kernel's CONFIG_CLONE_* settings. 7388 * Microblaze is further special in that it uses a sixth 7389 * implicit argument to clone for the TLS pointer. 7390 */ 7391 #if defined(TARGET_MICROBLAZE) 7392 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5)); 7393 #elif defined(TARGET_CLONE_BACKWARDS) 7394 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5)); 7395 #elif defined(TARGET_CLONE_BACKWARDS2) 7396 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4)); 7397 #else 7398 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4)); 7399 #endif 7400 break; 7401 #ifdef __NR_exit_group 7402 /* new thread calls */ 7403 case TARGET_NR_exit_group: 7404 #ifdef TARGET_GPROF 7405 _mcleanup(); 7406 #endif 7407 gdb_exit(cpu_env, arg1); 7408 ret = get_errno(exit_group(arg1)); 7409 break; 7410 #endif 7411 case TARGET_NR_setdomainname: 7412 if (!(p = lock_user_string(arg1))) 7413 goto efault; 7414 ret = get_errno(setdomainname(p, arg2)); 7415 unlock_user(p, arg1, 0); 7416 break; 7417 case TARGET_NR_uname: 7418 /* no need to transcode because we use the linux syscall */ 7419 { 7420 struct new_utsname * buf; 7421 7422 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0)) 7423 goto efault; 7424 ret = get_errno(sys_uname(buf)); 7425 if (!is_error(ret)) { 7426 /* Overrite the native machine name with whatever is being 7427 emulated. */ 7428 strcpy (buf->machine, cpu_to_uname_machine(cpu_env)); 7429 /* Allow the user to override the reported release. */ 7430 if (qemu_uname_release && *qemu_uname_release) 7431 strcpy (buf->release, qemu_uname_release); 7432 } 7433 unlock_user_struct(buf, arg1, 1); 7434 } 7435 break; 7436 #ifdef TARGET_I386 7437 case TARGET_NR_modify_ldt: 7438 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3); 7439 break; 7440 #if !defined(TARGET_X86_64) 7441 case TARGET_NR_vm86old: 7442 goto unimplemented; 7443 case TARGET_NR_vm86: 7444 ret = do_vm86(cpu_env, arg1, arg2); 7445 break; 7446 #endif 7447 #endif 7448 case TARGET_NR_adjtimex: 7449 goto unimplemented; 7450 #ifdef TARGET_NR_create_module 7451 case TARGET_NR_create_module: 7452 #endif 7453 case TARGET_NR_init_module: 7454 case TARGET_NR_delete_module: 7455 #ifdef TARGET_NR_get_kernel_syms 7456 case TARGET_NR_get_kernel_syms: 7457 #endif 7458 goto unimplemented; 7459 case TARGET_NR_quotactl: 7460 goto unimplemented; 7461 case TARGET_NR_getpgid: 7462 ret = get_errno(getpgid(arg1)); 7463 break; 7464 case TARGET_NR_fchdir: 7465 ret = get_errno(fchdir(arg1)); 7466 break; 7467 #ifdef TARGET_NR_bdflush /* not on x86_64 */ 7468 case TARGET_NR_bdflush: 7469 goto unimplemented; 7470 #endif 7471 #ifdef TARGET_NR_sysfs 7472 case TARGET_NR_sysfs: 7473 goto unimplemented; 7474 #endif 7475 case TARGET_NR_personality: 7476 ret = get_errno(personality(arg1)); 7477 break; 7478 #ifdef TARGET_NR_afs_syscall 7479 case TARGET_NR_afs_syscall: 7480 goto unimplemented; 7481 #endif 7482 #ifdef TARGET_NR__llseek /* Not on alpha */ 7483 case TARGET_NR__llseek: 7484 { 7485 int64_t res; 7486 #if !defined(__NR_llseek) 7487 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5); 7488 if (res == -1) { 7489 ret = get_errno(res); 7490 } else { 7491 ret = 0; 7492 } 7493 #else 7494 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5)); 7495 #endif 7496 if ((ret == 0) && put_user_s64(res, arg4)) { 7497 goto efault; 7498 } 7499 } 7500 break; 7501 #endif 7502 case TARGET_NR_getdents: 7503 #ifdef __NR_getdents 7504 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64 7505 { 7506 struct target_dirent *target_dirp; 7507 struct linux_dirent *dirp; 7508 abi_long count = arg3; 7509 7510 dirp = malloc(count); 7511 if (!dirp) { 7512 ret = -TARGET_ENOMEM; 7513 goto fail; 7514 } 7515 7516 ret = get_errno(sys_getdents(arg1, dirp, count)); 7517 if (!is_error(ret)) { 7518 struct linux_dirent *de; 7519 struct target_dirent *tde; 7520 int len = ret; 7521 int reclen, treclen; 7522 int count1, tnamelen; 7523 7524 count1 = 0; 7525 de = dirp; 7526 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7527 goto efault; 7528 tde = target_dirp; 7529 while (len > 0) { 7530 reclen = de->d_reclen; 7531 tnamelen = reclen - offsetof(struct linux_dirent, d_name); 7532 assert(tnamelen >= 0); 7533 treclen = tnamelen + offsetof(struct target_dirent, d_name); 7534 assert(count1 + treclen <= count); 7535 tde->d_reclen = tswap16(treclen); 7536 tde->d_ino = tswapal(de->d_ino); 7537 tde->d_off = tswapal(de->d_off); 7538 memcpy(tde->d_name, de->d_name, tnamelen); 7539 de = (struct linux_dirent *)((char *)de + reclen); 7540 len -= reclen; 7541 tde = (struct target_dirent *)((char *)tde + treclen); 7542 count1 += treclen; 7543 } 7544 ret = count1; 7545 unlock_user(target_dirp, arg2, ret); 7546 } 7547 free(dirp); 7548 } 7549 #else 7550 { 7551 struct linux_dirent *dirp; 7552 abi_long count = arg3; 7553 7554 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7555 goto efault; 7556 ret = get_errno(sys_getdents(arg1, dirp, count)); 7557 if (!is_error(ret)) { 7558 struct linux_dirent *de; 7559 int len = ret; 7560 int reclen; 7561 de = dirp; 7562 while (len > 0) { 7563 reclen = de->d_reclen; 7564 if (reclen > len) 7565 break; 7566 de->d_reclen = tswap16(reclen); 7567 tswapls(&de->d_ino); 7568 tswapls(&de->d_off); 7569 de = (struct linux_dirent *)((char *)de + reclen); 7570 len -= reclen; 7571 } 7572 } 7573 unlock_user(dirp, arg2, ret); 7574 } 7575 #endif 7576 #else 7577 /* Implement getdents in terms of getdents64 */ 7578 { 7579 struct linux_dirent64 *dirp; 7580 abi_long count = arg3; 7581 7582 dirp = lock_user(VERIFY_WRITE, arg2, count, 0); 7583 if (!dirp) { 7584 goto efault; 7585 } 7586 ret = get_errno(sys_getdents64(arg1, dirp, count)); 7587 if (!is_error(ret)) { 7588 /* Convert the dirent64 structs to target dirent. We do this 7589 * in-place, since we can guarantee that a target_dirent is no 7590 * larger than a dirent64; however this means we have to be 7591 * careful to read everything before writing in the new format. 7592 */ 7593 struct linux_dirent64 *de; 7594 struct target_dirent *tde; 7595 int len = ret; 7596 int tlen = 0; 7597 7598 de = dirp; 7599 tde = (struct target_dirent *)dirp; 7600 while (len > 0) { 7601 int namelen, treclen; 7602 int reclen = de->d_reclen; 7603 uint64_t ino = de->d_ino; 7604 int64_t off = de->d_off; 7605 uint8_t type = de->d_type; 7606 7607 namelen = strlen(de->d_name); 7608 treclen = offsetof(struct target_dirent, d_name) 7609 + namelen + 2; 7610 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long)); 7611 7612 memmove(tde->d_name, de->d_name, namelen + 1); 7613 tde->d_ino = tswapal(ino); 7614 tde->d_off = tswapal(off); 7615 tde->d_reclen = tswap16(treclen); 7616 /* The target_dirent type is in what was formerly a padding 7617 * byte at the end of the structure: 7618 */ 7619 *(((char *)tde) + treclen - 1) = type; 7620 7621 de = (struct linux_dirent64 *)((char *)de + reclen); 7622 tde = (struct target_dirent *)((char *)tde + treclen); 7623 len -= reclen; 7624 tlen += treclen; 7625 } 7626 ret = tlen; 7627 } 7628 unlock_user(dirp, arg2, ret); 7629 } 7630 #endif 7631 break; 7632 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 7633 case TARGET_NR_getdents64: 7634 { 7635 struct linux_dirent64 *dirp; 7636 abi_long count = arg3; 7637 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7638 goto efault; 7639 ret = get_errno(sys_getdents64(arg1, dirp, count)); 7640 if (!is_error(ret)) { 7641 struct linux_dirent64 *de; 7642 int len = ret; 7643 int reclen; 7644 de = dirp; 7645 while (len > 0) { 7646 reclen = de->d_reclen; 7647 if (reclen > len) 7648 break; 7649 de->d_reclen = tswap16(reclen); 7650 tswap64s((uint64_t *)&de->d_ino); 7651 tswap64s((uint64_t *)&de->d_off); 7652 de = (struct linux_dirent64 *)((char *)de + reclen); 7653 len -= reclen; 7654 } 7655 } 7656 unlock_user(dirp, arg2, ret); 7657 } 7658 break; 7659 #endif /* TARGET_NR_getdents64 */ 7660 #if defined(TARGET_NR__newselect) 7661 case TARGET_NR__newselect: 7662 ret = do_select(arg1, arg2, arg3, arg4, arg5); 7663 break; 7664 #endif 7665 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) 7666 # ifdef TARGET_NR_poll 7667 case TARGET_NR_poll: 7668 # endif 7669 # ifdef TARGET_NR_ppoll 7670 case TARGET_NR_ppoll: 7671 # endif 7672 { 7673 struct target_pollfd *target_pfd; 7674 unsigned int nfds = arg2; 7675 int timeout = arg3; 7676 struct pollfd *pfd; 7677 unsigned int i; 7678 7679 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1); 7680 if (!target_pfd) 7681 goto efault; 7682 7683 pfd = alloca(sizeof(struct pollfd) * nfds); 7684 for(i = 0; i < nfds; i++) { 7685 pfd[i].fd = tswap32(target_pfd[i].fd); 7686 pfd[i].events = tswap16(target_pfd[i].events); 7687 } 7688 7689 # ifdef TARGET_NR_ppoll 7690 if (num == TARGET_NR_ppoll) { 7691 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts; 7692 target_sigset_t *target_set; 7693 sigset_t _set, *set = &_set; 7694 7695 if (arg3) { 7696 if (target_to_host_timespec(timeout_ts, arg3)) { 7697 unlock_user(target_pfd, arg1, 0); 7698 goto efault; 7699 } 7700 } else { 7701 timeout_ts = NULL; 7702 } 7703 7704 if (arg4) { 7705 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1); 7706 if (!target_set) { 7707 unlock_user(target_pfd, arg1, 0); 7708 goto efault; 7709 } 7710 target_to_host_sigset(set, target_set); 7711 } else { 7712 set = NULL; 7713 } 7714 7715 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8)); 7716 7717 if (!is_error(ret) && arg3) { 7718 host_to_target_timespec(arg3, timeout_ts); 7719 } 7720 if (arg4) { 7721 unlock_user(target_set, arg4, 0); 7722 } 7723 } else 7724 # endif 7725 ret = get_errno(poll(pfd, nfds, timeout)); 7726 7727 if (!is_error(ret)) { 7728 for(i = 0; i < nfds; i++) { 7729 target_pfd[i].revents = tswap16(pfd[i].revents); 7730 } 7731 } 7732 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds); 7733 } 7734 break; 7735 #endif 7736 case TARGET_NR_flock: 7737 /* NOTE: the flock constant seems to be the same for every 7738 Linux platform */ 7739 ret = get_errno(flock(arg1, arg2)); 7740 break; 7741 case TARGET_NR_readv: 7742 { 7743 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0); 7744 if (vec != NULL) { 7745 ret = get_errno(readv(arg1, vec, arg3)); 7746 unlock_iovec(vec, arg2, arg3, 1); 7747 } else { 7748 ret = -host_to_target_errno(errno); 7749 } 7750 } 7751 break; 7752 case TARGET_NR_writev: 7753 { 7754 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 7755 if (vec != NULL) { 7756 ret = get_errno(writev(arg1, vec, arg3)); 7757 unlock_iovec(vec, arg2, arg3, 0); 7758 } else { 7759 ret = -host_to_target_errno(errno); 7760 } 7761 } 7762 break; 7763 case TARGET_NR_getsid: 7764 ret = get_errno(getsid(arg1)); 7765 break; 7766 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */ 7767 case TARGET_NR_fdatasync: 7768 ret = get_errno(fdatasync(arg1)); 7769 break; 7770 #endif 7771 case TARGET_NR__sysctl: 7772 /* We don't implement this, but ENOTDIR is always a safe 7773 return value. */ 7774 ret = -TARGET_ENOTDIR; 7775 break; 7776 case TARGET_NR_sched_getaffinity: 7777 { 7778 unsigned int mask_size; 7779 unsigned long *mask; 7780 7781 /* 7782 * sched_getaffinity needs multiples of ulong, so need to take 7783 * care of mismatches between target ulong and host ulong sizes. 7784 */ 7785 if (arg2 & (sizeof(abi_ulong) - 1)) { 7786 ret = -TARGET_EINVAL; 7787 break; 7788 } 7789 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 7790 7791 mask = alloca(mask_size); 7792 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask)); 7793 7794 if (!is_error(ret)) { 7795 if (ret > arg2) { 7796 /* More data returned than the caller's buffer will fit. 7797 * This only happens if sizeof(abi_long) < sizeof(long) 7798 * and the caller passed us a buffer holding an odd number 7799 * of abi_longs. If the host kernel is actually using the 7800 * extra 4 bytes then fail EINVAL; otherwise we can just 7801 * ignore them and only copy the interesting part. 7802 */ 7803 int numcpus = sysconf(_SC_NPROCESSORS_CONF); 7804 if (numcpus > arg2 * 8) { 7805 ret = -TARGET_EINVAL; 7806 break; 7807 } 7808 ret = arg2; 7809 } 7810 7811 if (copy_to_user(arg3, mask, ret)) { 7812 goto efault; 7813 } 7814 } 7815 } 7816 break; 7817 case TARGET_NR_sched_setaffinity: 7818 { 7819 unsigned int mask_size; 7820 unsigned long *mask; 7821 7822 /* 7823 * sched_setaffinity needs multiples of ulong, so need to take 7824 * care of mismatches between target ulong and host ulong sizes. 7825 */ 7826 if (arg2 & (sizeof(abi_ulong) - 1)) { 7827 ret = -TARGET_EINVAL; 7828 break; 7829 } 7830 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 7831 7832 mask = alloca(mask_size); 7833 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) { 7834 goto efault; 7835 } 7836 memcpy(mask, p, arg2); 7837 unlock_user_struct(p, arg2, 0); 7838 7839 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask)); 7840 } 7841 break; 7842 case TARGET_NR_sched_setparam: 7843 { 7844 struct sched_param *target_schp; 7845 struct sched_param schp; 7846 7847 if (arg2 == 0) { 7848 return -TARGET_EINVAL; 7849 } 7850 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) 7851 goto efault; 7852 schp.sched_priority = tswap32(target_schp->sched_priority); 7853 unlock_user_struct(target_schp, arg2, 0); 7854 ret = get_errno(sched_setparam(arg1, &schp)); 7855 } 7856 break; 7857 case TARGET_NR_sched_getparam: 7858 { 7859 struct sched_param *target_schp; 7860 struct sched_param schp; 7861 7862 if (arg2 == 0) { 7863 return -TARGET_EINVAL; 7864 } 7865 ret = get_errno(sched_getparam(arg1, &schp)); 7866 if (!is_error(ret)) { 7867 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) 7868 goto efault; 7869 target_schp->sched_priority = tswap32(schp.sched_priority); 7870 unlock_user_struct(target_schp, arg2, 1); 7871 } 7872 } 7873 break; 7874 case TARGET_NR_sched_setscheduler: 7875 { 7876 struct sched_param *target_schp; 7877 struct sched_param schp; 7878 if (arg3 == 0) { 7879 return -TARGET_EINVAL; 7880 } 7881 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) 7882 goto efault; 7883 schp.sched_priority = tswap32(target_schp->sched_priority); 7884 unlock_user_struct(target_schp, arg3, 0); 7885 ret = get_errno(sched_setscheduler(arg1, arg2, &schp)); 7886 } 7887 break; 7888 case TARGET_NR_sched_getscheduler: 7889 ret = get_errno(sched_getscheduler(arg1)); 7890 break; 7891 case TARGET_NR_sched_yield: 7892 ret = get_errno(sched_yield()); 7893 break; 7894 case TARGET_NR_sched_get_priority_max: 7895 ret = get_errno(sched_get_priority_max(arg1)); 7896 break; 7897 case TARGET_NR_sched_get_priority_min: 7898 ret = get_errno(sched_get_priority_min(arg1)); 7899 break; 7900 case TARGET_NR_sched_rr_get_interval: 7901 { 7902 struct timespec ts; 7903 ret = get_errno(sched_rr_get_interval(arg1, &ts)); 7904 if (!is_error(ret)) { 7905 ret = host_to_target_timespec(arg2, &ts); 7906 } 7907 } 7908 break; 7909 case TARGET_NR_nanosleep: 7910 { 7911 struct timespec req, rem; 7912 target_to_host_timespec(&req, arg1); 7913 ret = get_errno(nanosleep(&req, &rem)); 7914 if (is_error(ret) && arg2) { 7915 host_to_target_timespec(arg2, &rem); 7916 } 7917 } 7918 break; 7919 #ifdef TARGET_NR_query_module 7920 case TARGET_NR_query_module: 7921 goto unimplemented; 7922 #endif 7923 #ifdef TARGET_NR_nfsservctl 7924 case TARGET_NR_nfsservctl: 7925 goto unimplemented; 7926 #endif 7927 case TARGET_NR_prctl: 7928 switch (arg1) { 7929 case PR_GET_PDEATHSIG: 7930 { 7931 int deathsig; 7932 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5)); 7933 if (!is_error(ret) && arg2 7934 && put_user_ual(deathsig, arg2)) { 7935 goto efault; 7936 } 7937 break; 7938 } 7939 #ifdef PR_GET_NAME 7940 case PR_GET_NAME: 7941 { 7942 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1); 7943 if (!name) { 7944 goto efault; 7945 } 7946 ret = get_errno(prctl(arg1, (unsigned long)name, 7947 arg3, arg4, arg5)); 7948 unlock_user(name, arg2, 16); 7949 break; 7950 } 7951 case PR_SET_NAME: 7952 { 7953 void *name = lock_user(VERIFY_READ, arg2, 16, 1); 7954 if (!name) { 7955 goto efault; 7956 } 7957 ret = get_errno(prctl(arg1, (unsigned long)name, 7958 arg3, arg4, arg5)); 7959 unlock_user(name, arg2, 0); 7960 break; 7961 } 7962 #endif 7963 default: 7964 /* Most prctl options have no pointer arguments */ 7965 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5)); 7966 break; 7967 } 7968 break; 7969 #ifdef TARGET_NR_arch_prctl 7970 case TARGET_NR_arch_prctl: 7971 #if defined(TARGET_I386) && !defined(TARGET_ABI32) 7972 ret = do_arch_prctl(cpu_env, arg1, arg2); 7973 break; 7974 #else 7975 goto unimplemented; 7976 #endif 7977 #endif 7978 #ifdef TARGET_NR_pread64 7979 case TARGET_NR_pread64: 7980 if (regpairs_aligned(cpu_env)) { 7981 arg4 = arg5; 7982 arg5 = arg6; 7983 } 7984 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 7985 goto efault; 7986 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5))); 7987 unlock_user(p, arg2, ret); 7988 break; 7989 case TARGET_NR_pwrite64: 7990 if (regpairs_aligned(cpu_env)) { 7991 arg4 = arg5; 7992 arg5 = arg6; 7993 } 7994 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 7995 goto efault; 7996 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5))); 7997 unlock_user(p, arg2, 0); 7998 break; 7999 #endif 8000 case TARGET_NR_getcwd: 8001 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0))) 8002 goto efault; 8003 ret = get_errno(sys_getcwd1(p, arg2)); 8004 unlock_user(p, arg1, ret); 8005 break; 8006 case TARGET_NR_capget: 8007 case TARGET_NR_capset: 8008 { 8009 struct target_user_cap_header *target_header; 8010 struct target_user_cap_data *target_data = NULL; 8011 struct __user_cap_header_struct header; 8012 struct __user_cap_data_struct data[2]; 8013 struct __user_cap_data_struct *dataptr = NULL; 8014 int i, target_datalen; 8015 int data_items = 1; 8016 8017 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) { 8018 goto efault; 8019 } 8020 header.version = tswap32(target_header->version); 8021 header.pid = tswap32(target_header->pid); 8022 8023 if (header.version != _LINUX_CAPABILITY_VERSION) { 8024 /* Version 2 and up takes pointer to two user_data structs */ 8025 data_items = 2; 8026 } 8027 8028 target_datalen = sizeof(*target_data) * data_items; 8029 8030 if (arg2) { 8031 if (num == TARGET_NR_capget) { 8032 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0); 8033 } else { 8034 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1); 8035 } 8036 if (!target_data) { 8037 unlock_user_struct(target_header, arg1, 0); 8038 goto efault; 8039 } 8040 8041 if (num == TARGET_NR_capset) { 8042 for (i = 0; i < data_items; i++) { 8043 data[i].effective = tswap32(target_data[i].effective); 8044 data[i].permitted = tswap32(target_data[i].permitted); 8045 data[i].inheritable = tswap32(target_data[i].inheritable); 8046 } 8047 } 8048 8049 dataptr = data; 8050 } 8051 8052 if (num == TARGET_NR_capget) { 8053 ret = get_errno(capget(&header, dataptr)); 8054 } else { 8055 ret = get_errno(capset(&header, dataptr)); 8056 } 8057 8058 /* The kernel always updates version for both capget and capset */ 8059 target_header->version = tswap32(header.version); 8060 unlock_user_struct(target_header, arg1, 1); 8061 8062 if (arg2) { 8063 if (num == TARGET_NR_capget) { 8064 for (i = 0; i < data_items; i++) { 8065 target_data[i].effective = tswap32(data[i].effective); 8066 target_data[i].permitted = tswap32(data[i].permitted); 8067 target_data[i].inheritable = tswap32(data[i].inheritable); 8068 } 8069 unlock_user(target_data, arg2, target_datalen); 8070 } else { 8071 unlock_user(target_data, arg2, 0); 8072 } 8073 } 8074 break; 8075 } 8076 case TARGET_NR_sigaltstack: 8077 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \ 8078 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \ 8079 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC) 8080 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env)); 8081 break; 8082 #else 8083 goto unimplemented; 8084 #endif 8085 8086 #ifdef CONFIG_SENDFILE 8087 case TARGET_NR_sendfile: 8088 { 8089 off_t *offp = NULL; 8090 off_t off; 8091 if (arg3) { 8092 ret = get_user_sal(off, arg3); 8093 if (is_error(ret)) { 8094 break; 8095 } 8096 offp = &off; 8097 } 8098 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 8099 if (!is_error(ret) && arg3) { 8100 abi_long ret2 = put_user_sal(off, arg3); 8101 if (is_error(ret2)) { 8102 ret = ret2; 8103 } 8104 } 8105 break; 8106 } 8107 #ifdef TARGET_NR_sendfile64 8108 case TARGET_NR_sendfile64: 8109 { 8110 off_t *offp = NULL; 8111 off_t off; 8112 if (arg3) { 8113 ret = get_user_s64(off, arg3); 8114 if (is_error(ret)) { 8115 break; 8116 } 8117 offp = &off; 8118 } 8119 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 8120 if (!is_error(ret) && arg3) { 8121 abi_long ret2 = put_user_s64(off, arg3); 8122 if (is_error(ret2)) { 8123 ret = ret2; 8124 } 8125 } 8126 break; 8127 } 8128 #endif 8129 #else 8130 case TARGET_NR_sendfile: 8131 #ifdef TARGET_NR_sendfile64 8132 case TARGET_NR_sendfile64: 8133 #endif 8134 goto unimplemented; 8135 #endif 8136 8137 #ifdef TARGET_NR_getpmsg 8138 case TARGET_NR_getpmsg: 8139 goto unimplemented; 8140 #endif 8141 #ifdef TARGET_NR_putpmsg 8142 case TARGET_NR_putpmsg: 8143 goto unimplemented; 8144 #endif 8145 #ifdef TARGET_NR_vfork 8146 case TARGET_NR_vfork: 8147 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD, 8148 0, 0, 0, 0)); 8149 break; 8150 #endif 8151 #ifdef TARGET_NR_ugetrlimit 8152 case TARGET_NR_ugetrlimit: 8153 { 8154 struct rlimit rlim; 8155 int resource = target_to_host_resource(arg1); 8156 ret = get_errno(getrlimit(resource, &rlim)); 8157 if (!is_error(ret)) { 8158 struct target_rlimit *target_rlim; 8159 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 8160 goto efault; 8161 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 8162 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 8163 unlock_user_struct(target_rlim, arg2, 1); 8164 } 8165 break; 8166 } 8167 #endif 8168 #ifdef TARGET_NR_truncate64 8169 case TARGET_NR_truncate64: 8170 if (!(p = lock_user_string(arg1))) 8171 goto efault; 8172 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4); 8173 unlock_user(p, arg1, 0); 8174 break; 8175 #endif 8176 #ifdef TARGET_NR_ftruncate64 8177 case TARGET_NR_ftruncate64: 8178 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4); 8179 break; 8180 #endif 8181 #ifdef TARGET_NR_stat64 8182 case TARGET_NR_stat64: 8183 if (!(p = lock_user_string(arg1))) 8184 goto efault; 8185 ret = get_errno(stat(path(p), &st)); 8186 unlock_user(p, arg1, 0); 8187 if (!is_error(ret)) 8188 ret = host_to_target_stat64(cpu_env, arg2, &st); 8189 break; 8190 #endif 8191 #ifdef TARGET_NR_lstat64 8192 case TARGET_NR_lstat64: 8193 if (!(p = lock_user_string(arg1))) 8194 goto efault; 8195 ret = get_errno(lstat(path(p), &st)); 8196 unlock_user(p, arg1, 0); 8197 if (!is_error(ret)) 8198 ret = host_to_target_stat64(cpu_env, arg2, &st); 8199 break; 8200 #endif 8201 #ifdef TARGET_NR_fstat64 8202 case TARGET_NR_fstat64: 8203 ret = get_errno(fstat(arg1, &st)); 8204 if (!is_error(ret)) 8205 ret = host_to_target_stat64(cpu_env, arg2, &st); 8206 break; 8207 #endif 8208 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) 8209 #ifdef TARGET_NR_fstatat64 8210 case TARGET_NR_fstatat64: 8211 #endif 8212 #ifdef TARGET_NR_newfstatat 8213 case TARGET_NR_newfstatat: 8214 #endif 8215 if (!(p = lock_user_string(arg2))) 8216 goto efault; 8217 ret = get_errno(fstatat(arg1, path(p), &st, arg4)); 8218 if (!is_error(ret)) 8219 ret = host_to_target_stat64(cpu_env, arg3, &st); 8220 break; 8221 #endif 8222 case TARGET_NR_lchown: 8223 if (!(p = lock_user_string(arg1))) 8224 goto efault; 8225 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3))); 8226 unlock_user(p, arg1, 0); 8227 break; 8228 #ifdef TARGET_NR_getuid 8229 case TARGET_NR_getuid: 8230 ret = get_errno(high2lowuid(getuid())); 8231 break; 8232 #endif 8233 #ifdef TARGET_NR_getgid 8234 case TARGET_NR_getgid: 8235 ret = get_errno(high2lowgid(getgid())); 8236 break; 8237 #endif 8238 #ifdef TARGET_NR_geteuid 8239 case TARGET_NR_geteuid: 8240 ret = get_errno(high2lowuid(geteuid())); 8241 break; 8242 #endif 8243 #ifdef TARGET_NR_getegid 8244 case TARGET_NR_getegid: 8245 ret = get_errno(high2lowgid(getegid())); 8246 break; 8247 #endif 8248 case TARGET_NR_setreuid: 8249 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2))); 8250 break; 8251 case TARGET_NR_setregid: 8252 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2))); 8253 break; 8254 case TARGET_NR_getgroups: 8255 { 8256 int gidsetsize = arg1; 8257 target_id *target_grouplist; 8258 gid_t *grouplist; 8259 int i; 8260 8261 grouplist = alloca(gidsetsize * sizeof(gid_t)); 8262 ret = get_errno(getgroups(gidsetsize, grouplist)); 8263 if (gidsetsize == 0) 8264 break; 8265 if (!is_error(ret)) { 8266 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0); 8267 if (!target_grouplist) 8268 goto efault; 8269 for(i = 0;i < ret; i++) 8270 target_grouplist[i] = tswapid(high2lowgid(grouplist[i])); 8271 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id)); 8272 } 8273 } 8274 break; 8275 case TARGET_NR_setgroups: 8276 { 8277 int gidsetsize = arg1; 8278 target_id *target_grouplist; 8279 gid_t *grouplist = NULL; 8280 int i; 8281 if (gidsetsize) { 8282 grouplist = alloca(gidsetsize * sizeof(gid_t)); 8283 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1); 8284 if (!target_grouplist) { 8285 ret = -TARGET_EFAULT; 8286 goto fail; 8287 } 8288 for (i = 0; i < gidsetsize; i++) { 8289 grouplist[i] = low2highgid(tswapid(target_grouplist[i])); 8290 } 8291 unlock_user(target_grouplist, arg2, 0); 8292 } 8293 ret = get_errno(setgroups(gidsetsize, grouplist)); 8294 } 8295 break; 8296 case TARGET_NR_fchown: 8297 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3))); 8298 break; 8299 #if defined(TARGET_NR_fchownat) 8300 case TARGET_NR_fchownat: 8301 if (!(p = lock_user_string(arg2))) 8302 goto efault; 8303 ret = get_errno(fchownat(arg1, p, low2highuid(arg3), 8304 low2highgid(arg4), arg5)); 8305 unlock_user(p, arg2, 0); 8306 break; 8307 #endif 8308 #ifdef TARGET_NR_setresuid 8309 case TARGET_NR_setresuid: 8310 ret = get_errno(setresuid(low2highuid(arg1), 8311 low2highuid(arg2), 8312 low2highuid(arg3))); 8313 break; 8314 #endif 8315 #ifdef TARGET_NR_getresuid 8316 case TARGET_NR_getresuid: 8317 { 8318 uid_t ruid, euid, suid; 8319 ret = get_errno(getresuid(&ruid, &euid, &suid)); 8320 if (!is_error(ret)) { 8321 if (put_user_id(high2lowuid(ruid), arg1) 8322 || put_user_id(high2lowuid(euid), arg2) 8323 || put_user_id(high2lowuid(suid), arg3)) 8324 goto efault; 8325 } 8326 } 8327 break; 8328 #endif 8329 #ifdef TARGET_NR_getresgid 8330 case TARGET_NR_setresgid: 8331 ret = get_errno(setresgid(low2highgid(arg1), 8332 low2highgid(arg2), 8333 low2highgid(arg3))); 8334 break; 8335 #endif 8336 #ifdef TARGET_NR_getresgid 8337 case TARGET_NR_getresgid: 8338 { 8339 gid_t rgid, egid, sgid; 8340 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 8341 if (!is_error(ret)) { 8342 if (put_user_id(high2lowgid(rgid), arg1) 8343 || put_user_id(high2lowgid(egid), arg2) 8344 || put_user_id(high2lowgid(sgid), arg3)) 8345 goto efault; 8346 } 8347 } 8348 break; 8349 #endif 8350 case TARGET_NR_chown: 8351 if (!(p = lock_user_string(arg1))) 8352 goto efault; 8353 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3))); 8354 unlock_user(p, arg1, 0); 8355 break; 8356 case TARGET_NR_setuid: 8357 ret = get_errno(setuid(low2highuid(arg1))); 8358 break; 8359 case TARGET_NR_setgid: 8360 ret = get_errno(setgid(low2highgid(arg1))); 8361 break; 8362 case TARGET_NR_setfsuid: 8363 ret = get_errno(setfsuid(arg1)); 8364 break; 8365 case TARGET_NR_setfsgid: 8366 ret = get_errno(setfsgid(arg1)); 8367 break; 8368 8369 #ifdef TARGET_NR_lchown32 8370 case TARGET_NR_lchown32: 8371 if (!(p = lock_user_string(arg1))) 8372 goto efault; 8373 ret = get_errno(lchown(p, arg2, arg3)); 8374 unlock_user(p, arg1, 0); 8375 break; 8376 #endif 8377 #ifdef TARGET_NR_getuid32 8378 case TARGET_NR_getuid32: 8379 ret = get_errno(getuid()); 8380 break; 8381 #endif 8382 8383 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA) 8384 /* Alpha specific */ 8385 case TARGET_NR_getxuid: 8386 { 8387 uid_t euid; 8388 euid=geteuid(); 8389 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid; 8390 } 8391 ret = get_errno(getuid()); 8392 break; 8393 #endif 8394 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA) 8395 /* Alpha specific */ 8396 case TARGET_NR_getxgid: 8397 { 8398 uid_t egid; 8399 egid=getegid(); 8400 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid; 8401 } 8402 ret = get_errno(getgid()); 8403 break; 8404 #endif 8405 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA) 8406 /* Alpha specific */ 8407 case TARGET_NR_osf_getsysinfo: 8408 ret = -TARGET_EOPNOTSUPP; 8409 switch (arg1) { 8410 case TARGET_GSI_IEEE_FP_CONTROL: 8411 { 8412 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env); 8413 8414 /* Copied from linux ieee_fpcr_to_swcr. */ 8415 swcr = (fpcr >> 35) & SWCR_STATUS_MASK; 8416 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ; 8417 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV 8418 | SWCR_TRAP_ENABLE_DZE 8419 | SWCR_TRAP_ENABLE_OVF); 8420 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF 8421 | SWCR_TRAP_ENABLE_INE); 8422 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ; 8423 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO; 8424 8425 if (put_user_u64 (swcr, arg2)) 8426 goto efault; 8427 ret = 0; 8428 } 8429 break; 8430 8431 /* case GSI_IEEE_STATE_AT_SIGNAL: 8432 -- Not implemented in linux kernel. 8433 case GSI_UACPROC: 8434 -- Retrieves current unaligned access state; not much used. 8435 case GSI_PROC_TYPE: 8436 -- Retrieves implver information; surely not used. 8437 case GSI_GET_HWRPB: 8438 -- Grabs a copy of the HWRPB; surely not used. 8439 */ 8440 } 8441 break; 8442 #endif 8443 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA) 8444 /* Alpha specific */ 8445 case TARGET_NR_osf_setsysinfo: 8446 ret = -TARGET_EOPNOTSUPP; 8447 switch (arg1) { 8448 case TARGET_SSI_IEEE_FP_CONTROL: 8449 { 8450 uint64_t swcr, fpcr, orig_fpcr; 8451 8452 if (get_user_u64 (swcr, arg2)) { 8453 goto efault; 8454 } 8455 orig_fpcr = cpu_alpha_load_fpcr(cpu_env); 8456 fpcr = orig_fpcr & FPCR_DYN_MASK; 8457 8458 /* Copied from linux ieee_swcr_to_fpcr. */ 8459 fpcr |= (swcr & SWCR_STATUS_MASK) << 35; 8460 fpcr |= (swcr & SWCR_MAP_DMZ) << 36; 8461 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV 8462 | SWCR_TRAP_ENABLE_DZE 8463 | SWCR_TRAP_ENABLE_OVF)) << 48; 8464 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF 8465 | SWCR_TRAP_ENABLE_INE)) << 57; 8466 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0); 8467 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41; 8468 8469 cpu_alpha_store_fpcr(cpu_env, fpcr); 8470 ret = 0; 8471 } 8472 break; 8473 8474 case TARGET_SSI_IEEE_RAISE_EXCEPTION: 8475 { 8476 uint64_t exc, fpcr, orig_fpcr; 8477 int si_code; 8478 8479 if (get_user_u64(exc, arg2)) { 8480 goto efault; 8481 } 8482 8483 orig_fpcr = cpu_alpha_load_fpcr(cpu_env); 8484 8485 /* We only add to the exception status here. */ 8486 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35); 8487 8488 cpu_alpha_store_fpcr(cpu_env, fpcr); 8489 ret = 0; 8490 8491 /* Old exceptions are not signaled. */ 8492 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK); 8493 8494 /* If any exceptions set by this call, 8495 and are unmasked, send a signal. */ 8496 si_code = 0; 8497 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) { 8498 si_code = TARGET_FPE_FLTRES; 8499 } 8500 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) { 8501 si_code = TARGET_FPE_FLTUND; 8502 } 8503 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) { 8504 si_code = TARGET_FPE_FLTOVF; 8505 } 8506 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) { 8507 si_code = TARGET_FPE_FLTDIV; 8508 } 8509 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) { 8510 si_code = TARGET_FPE_FLTINV; 8511 } 8512 if (si_code != 0) { 8513 target_siginfo_t info; 8514 info.si_signo = SIGFPE; 8515 info.si_errno = 0; 8516 info.si_code = si_code; 8517 info._sifields._sigfault._addr 8518 = ((CPUArchState *)cpu_env)->pc; 8519 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info); 8520 } 8521 } 8522 break; 8523 8524 /* case SSI_NVPAIRS: 8525 -- Used with SSIN_UACPROC to enable unaligned accesses. 8526 case SSI_IEEE_STATE_AT_SIGNAL: 8527 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: 8528 -- Not implemented in linux kernel 8529 */ 8530 } 8531 break; 8532 #endif 8533 #ifdef TARGET_NR_osf_sigprocmask 8534 /* Alpha specific. */ 8535 case TARGET_NR_osf_sigprocmask: 8536 { 8537 abi_ulong mask; 8538 int how; 8539 sigset_t set, oldset; 8540 8541 switch(arg1) { 8542 case TARGET_SIG_BLOCK: 8543 how = SIG_BLOCK; 8544 break; 8545 case TARGET_SIG_UNBLOCK: 8546 how = SIG_UNBLOCK; 8547 break; 8548 case TARGET_SIG_SETMASK: 8549 how = SIG_SETMASK; 8550 break; 8551 default: 8552 ret = -TARGET_EINVAL; 8553 goto fail; 8554 } 8555 mask = arg2; 8556 target_to_host_old_sigset(&set, &mask); 8557 do_sigprocmask(how, &set, &oldset); 8558 host_to_target_old_sigset(&mask, &oldset); 8559 ret = mask; 8560 } 8561 break; 8562 #endif 8563 8564 #ifdef TARGET_NR_getgid32 8565 case TARGET_NR_getgid32: 8566 ret = get_errno(getgid()); 8567 break; 8568 #endif 8569 #ifdef TARGET_NR_geteuid32 8570 case TARGET_NR_geteuid32: 8571 ret = get_errno(geteuid()); 8572 break; 8573 #endif 8574 #ifdef TARGET_NR_getegid32 8575 case TARGET_NR_getegid32: 8576 ret = get_errno(getegid()); 8577 break; 8578 #endif 8579 #ifdef TARGET_NR_setreuid32 8580 case TARGET_NR_setreuid32: 8581 ret = get_errno(setreuid(arg1, arg2)); 8582 break; 8583 #endif 8584 #ifdef TARGET_NR_setregid32 8585 case TARGET_NR_setregid32: 8586 ret = get_errno(setregid(arg1, arg2)); 8587 break; 8588 #endif 8589 #ifdef TARGET_NR_getgroups32 8590 case TARGET_NR_getgroups32: 8591 { 8592 int gidsetsize = arg1; 8593 uint32_t *target_grouplist; 8594 gid_t *grouplist; 8595 int i; 8596 8597 grouplist = alloca(gidsetsize * sizeof(gid_t)); 8598 ret = get_errno(getgroups(gidsetsize, grouplist)); 8599 if (gidsetsize == 0) 8600 break; 8601 if (!is_error(ret)) { 8602 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0); 8603 if (!target_grouplist) { 8604 ret = -TARGET_EFAULT; 8605 goto fail; 8606 } 8607 for(i = 0;i < ret; i++) 8608 target_grouplist[i] = tswap32(grouplist[i]); 8609 unlock_user(target_grouplist, arg2, gidsetsize * 4); 8610 } 8611 } 8612 break; 8613 #endif 8614 #ifdef TARGET_NR_setgroups32 8615 case TARGET_NR_setgroups32: 8616 { 8617 int gidsetsize = arg1; 8618 uint32_t *target_grouplist; 8619 gid_t *grouplist; 8620 int i; 8621 8622 grouplist = alloca(gidsetsize * sizeof(gid_t)); 8623 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1); 8624 if (!target_grouplist) { 8625 ret = -TARGET_EFAULT; 8626 goto fail; 8627 } 8628 for(i = 0;i < gidsetsize; i++) 8629 grouplist[i] = tswap32(target_grouplist[i]); 8630 unlock_user(target_grouplist, arg2, 0); 8631 ret = get_errno(setgroups(gidsetsize, grouplist)); 8632 } 8633 break; 8634 #endif 8635 #ifdef TARGET_NR_fchown32 8636 case TARGET_NR_fchown32: 8637 ret = get_errno(fchown(arg1, arg2, arg3)); 8638 break; 8639 #endif 8640 #ifdef TARGET_NR_setresuid32 8641 case TARGET_NR_setresuid32: 8642 ret = get_errno(setresuid(arg1, arg2, arg3)); 8643 break; 8644 #endif 8645 #ifdef TARGET_NR_getresuid32 8646 case TARGET_NR_getresuid32: 8647 { 8648 uid_t ruid, euid, suid; 8649 ret = get_errno(getresuid(&ruid, &euid, &suid)); 8650 if (!is_error(ret)) { 8651 if (put_user_u32(ruid, arg1) 8652 || put_user_u32(euid, arg2) 8653 || put_user_u32(suid, arg3)) 8654 goto efault; 8655 } 8656 } 8657 break; 8658 #endif 8659 #ifdef TARGET_NR_setresgid32 8660 case TARGET_NR_setresgid32: 8661 ret = get_errno(setresgid(arg1, arg2, arg3)); 8662 break; 8663 #endif 8664 #ifdef TARGET_NR_getresgid32 8665 case TARGET_NR_getresgid32: 8666 { 8667 gid_t rgid, egid, sgid; 8668 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 8669 if (!is_error(ret)) { 8670 if (put_user_u32(rgid, arg1) 8671 || put_user_u32(egid, arg2) 8672 || put_user_u32(sgid, arg3)) 8673 goto efault; 8674 } 8675 } 8676 break; 8677 #endif 8678 #ifdef TARGET_NR_chown32 8679 case TARGET_NR_chown32: 8680 if (!(p = lock_user_string(arg1))) 8681 goto efault; 8682 ret = get_errno(chown(p, arg2, arg3)); 8683 unlock_user(p, arg1, 0); 8684 break; 8685 #endif 8686 #ifdef TARGET_NR_setuid32 8687 case TARGET_NR_setuid32: 8688 ret = get_errno(setuid(arg1)); 8689 break; 8690 #endif 8691 #ifdef TARGET_NR_setgid32 8692 case TARGET_NR_setgid32: 8693 ret = get_errno(setgid(arg1)); 8694 break; 8695 #endif 8696 #ifdef TARGET_NR_setfsuid32 8697 case TARGET_NR_setfsuid32: 8698 ret = get_errno(setfsuid(arg1)); 8699 break; 8700 #endif 8701 #ifdef TARGET_NR_setfsgid32 8702 case TARGET_NR_setfsgid32: 8703 ret = get_errno(setfsgid(arg1)); 8704 break; 8705 #endif 8706 8707 case TARGET_NR_pivot_root: 8708 goto unimplemented; 8709 #ifdef TARGET_NR_mincore 8710 case TARGET_NR_mincore: 8711 { 8712 void *a; 8713 ret = -TARGET_EFAULT; 8714 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0))) 8715 goto efault; 8716 if (!(p = lock_user_string(arg3))) 8717 goto mincore_fail; 8718 ret = get_errno(mincore(a, arg2, p)); 8719 unlock_user(p, arg3, ret); 8720 mincore_fail: 8721 unlock_user(a, arg1, 0); 8722 } 8723 break; 8724 #endif 8725 #ifdef TARGET_NR_arm_fadvise64_64 8726 case TARGET_NR_arm_fadvise64_64: 8727 { 8728 /* 8729 * arm_fadvise64_64 looks like fadvise64_64 but 8730 * with different argument order 8731 */ 8732 abi_long temp; 8733 temp = arg3; 8734 arg3 = arg4; 8735 arg4 = temp; 8736 } 8737 #endif 8738 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64) 8739 #ifdef TARGET_NR_fadvise64_64 8740 case TARGET_NR_fadvise64_64: 8741 #endif 8742 #ifdef TARGET_NR_fadvise64 8743 case TARGET_NR_fadvise64: 8744 #endif 8745 #ifdef TARGET_S390X 8746 switch (arg4) { 8747 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */ 8748 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */ 8749 case 6: arg4 = POSIX_FADV_DONTNEED; break; 8750 case 7: arg4 = POSIX_FADV_NOREUSE; break; 8751 default: break; 8752 } 8753 #endif 8754 ret = -posix_fadvise(arg1, arg2, arg3, arg4); 8755 break; 8756 #endif 8757 #ifdef TARGET_NR_madvise 8758 case TARGET_NR_madvise: 8759 /* A straight passthrough may not be safe because qemu sometimes 8760 turns private file-backed mappings into anonymous mappings. 8761 This will break MADV_DONTNEED. 8762 This is a hint, so ignoring and returning success is ok. */ 8763 ret = get_errno(0); 8764 break; 8765 #endif 8766 #if TARGET_ABI_BITS == 32 8767 case TARGET_NR_fcntl64: 8768 { 8769 int cmd; 8770 struct flock64 fl; 8771 struct target_flock64 *target_fl; 8772 #ifdef TARGET_ARM 8773 struct target_eabi_flock64 *target_efl; 8774 #endif 8775 8776 cmd = target_to_host_fcntl_cmd(arg2); 8777 if (cmd == -TARGET_EINVAL) { 8778 ret = cmd; 8779 break; 8780 } 8781 8782 switch(arg2) { 8783 case TARGET_F_GETLK64: 8784 #ifdef TARGET_ARM 8785 if (((CPUARMState *)cpu_env)->eabi) { 8786 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) 8787 goto efault; 8788 fl.l_type = tswap16(target_efl->l_type); 8789 fl.l_whence = tswap16(target_efl->l_whence); 8790 fl.l_start = tswap64(target_efl->l_start); 8791 fl.l_len = tswap64(target_efl->l_len); 8792 fl.l_pid = tswap32(target_efl->l_pid); 8793 unlock_user_struct(target_efl, arg3, 0); 8794 } else 8795 #endif 8796 { 8797 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) 8798 goto efault; 8799 fl.l_type = tswap16(target_fl->l_type); 8800 fl.l_whence = tswap16(target_fl->l_whence); 8801 fl.l_start = tswap64(target_fl->l_start); 8802 fl.l_len = tswap64(target_fl->l_len); 8803 fl.l_pid = tswap32(target_fl->l_pid); 8804 unlock_user_struct(target_fl, arg3, 0); 8805 } 8806 ret = get_errno(fcntl(arg1, cmd, &fl)); 8807 if (ret == 0) { 8808 #ifdef TARGET_ARM 8809 if (((CPUARMState *)cpu_env)->eabi) { 8810 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0)) 8811 goto efault; 8812 target_efl->l_type = tswap16(fl.l_type); 8813 target_efl->l_whence = tswap16(fl.l_whence); 8814 target_efl->l_start = tswap64(fl.l_start); 8815 target_efl->l_len = tswap64(fl.l_len); 8816 target_efl->l_pid = tswap32(fl.l_pid); 8817 unlock_user_struct(target_efl, arg3, 1); 8818 } else 8819 #endif 8820 { 8821 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0)) 8822 goto efault; 8823 target_fl->l_type = tswap16(fl.l_type); 8824 target_fl->l_whence = tswap16(fl.l_whence); 8825 target_fl->l_start = tswap64(fl.l_start); 8826 target_fl->l_len = tswap64(fl.l_len); 8827 target_fl->l_pid = tswap32(fl.l_pid); 8828 unlock_user_struct(target_fl, arg3, 1); 8829 } 8830 } 8831 break; 8832 8833 case TARGET_F_SETLK64: 8834 case TARGET_F_SETLKW64: 8835 #ifdef TARGET_ARM 8836 if (((CPUARMState *)cpu_env)->eabi) { 8837 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) 8838 goto efault; 8839 fl.l_type = tswap16(target_efl->l_type); 8840 fl.l_whence = tswap16(target_efl->l_whence); 8841 fl.l_start = tswap64(target_efl->l_start); 8842 fl.l_len = tswap64(target_efl->l_len); 8843 fl.l_pid = tswap32(target_efl->l_pid); 8844 unlock_user_struct(target_efl, arg3, 0); 8845 } else 8846 #endif 8847 { 8848 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) 8849 goto efault; 8850 fl.l_type = tswap16(target_fl->l_type); 8851 fl.l_whence = tswap16(target_fl->l_whence); 8852 fl.l_start = tswap64(target_fl->l_start); 8853 fl.l_len = tswap64(target_fl->l_len); 8854 fl.l_pid = tswap32(target_fl->l_pid); 8855 unlock_user_struct(target_fl, arg3, 0); 8856 } 8857 ret = get_errno(fcntl(arg1, cmd, &fl)); 8858 break; 8859 default: 8860 ret = do_fcntl(arg1, arg2, arg3); 8861 break; 8862 } 8863 break; 8864 } 8865 #endif 8866 #ifdef TARGET_NR_cacheflush 8867 case TARGET_NR_cacheflush: 8868 /* self-modifying code is handled automatically, so nothing needed */ 8869 ret = 0; 8870 break; 8871 #endif 8872 #ifdef TARGET_NR_security 8873 case TARGET_NR_security: 8874 goto unimplemented; 8875 #endif 8876 #ifdef TARGET_NR_getpagesize 8877 case TARGET_NR_getpagesize: 8878 ret = TARGET_PAGE_SIZE; 8879 break; 8880 #endif 8881 case TARGET_NR_gettid: 8882 ret = get_errno(gettid()); 8883 break; 8884 #ifdef TARGET_NR_readahead 8885 case TARGET_NR_readahead: 8886 #if TARGET_ABI_BITS == 32 8887 if (regpairs_aligned(cpu_env)) { 8888 arg2 = arg3; 8889 arg3 = arg4; 8890 arg4 = arg5; 8891 } 8892 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4)); 8893 #else 8894 ret = get_errno(readahead(arg1, arg2, arg3)); 8895 #endif 8896 break; 8897 #endif 8898 #ifdef CONFIG_ATTR 8899 #ifdef TARGET_NR_setxattr 8900 case TARGET_NR_listxattr: 8901 case TARGET_NR_llistxattr: 8902 { 8903 void *p, *b = 0; 8904 if (arg2) { 8905 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 8906 if (!b) { 8907 ret = -TARGET_EFAULT; 8908 break; 8909 } 8910 } 8911 p = lock_user_string(arg1); 8912 if (p) { 8913 if (num == TARGET_NR_listxattr) { 8914 ret = get_errno(listxattr(p, b, arg3)); 8915 } else { 8916 ret = get_errno(llistxattr(p, b, arg3)); 8917 } 8918 } else { 8919 ret = -TARGET_EFAULT; 8920 } 8921 unlock_user(p, arg1, 0); 8922 unlock_user(b, arg2, arg3); 8923 break; 8924 } 8925 case TARGET_NR_flistxattr: 8926 { 8927 void *b = 0; 8928 if (arg2) { 8929 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 8930 if (!b) { 8931 ret = -TARGET_EFAULT; 8932 break; 8933 } 8934 } 8935 ret = get_errno(flistxattr(arg1, b, arg3)); 8936 unlock_user(b, arg2, arg3); 8937 break; 8938 } 8939 case TARGET_NR_setxattr: 8940 case TARGET_NR_lsetxattr: 8941 { 8942 void *p, *n, *v = 0; 8943 if (arg3) { 8944 v = lock_user(VERIFY_READ, arg3, arg4, 1); 8945 if (!v) { 8946 ret = -TARGET_EFAULT; 8947 break; 8948 } 8949 } 8950 p = lock_user_string(arg1); 8951 n = lock_user_string(arg2); 8952 if (p && n) { 8953 if (num == TARGET_NR_setxattr) { 8954 ret = get_errno(setxattr(p, n, v, arg4, arg5)); 8955 } else { 8956 ret = get_errno(lsetxattr(p, n, v, arg4, arg5)); 8957 } 8958 } else { 8959 ret = -TARGET_EFAULT; 8960 } 8961 unlock_user(p, arg1, 0); 8962 unlock_user(n, arg2, 0); 8963 unlock_user(v, arg3, 0); 8964 } 8965 break; 8966 case TARGET_NR_fsetxattr: 8967 { 8968 void *n, *v = 0; 8969 if (arg3) { 8970 v = lock_user(VERIFY_READ, arg3, arg4, 1); 8971 if (!v) { 8972 ret = -TARGET_EFAULT; 8973 break; 8974 } 8975 } 8976 n = lock_user_string(arg2); 8977 if (n) { 8978 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5)); 8979 } else { 8980 ret = -TARGET_EFAULT; 8981 } 8982 unlock_user(n, arg2, 0); 8983 unlock_user(v, arg3, 0); 8984 } 8985 break; 8986 case TARGET_NR_getxattr: 8987 case TARGET_NR_lgetxattr: 8988 { 8989 void *p, *n, *v = 0; 8990 if (arg3) { 8991 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 8992 if (!v) { 8993 ret = -TARGET_EFAULT; 8994 break; 8995 } 8996 } 8997 p = lock_user_string(arg1); 8998 n = lock_user_string(arg2); 8999 if (p && n) { 9000 if (num == TARGET_NR_getxattr) { 9001 ret = get_errno(getxattr(p, n, v, arg4)); 9002 } else { 9003 ret = get_errno(lgetxattr(p, n, v, arg4)); 9004 } 9005 } else { 9006 ret = -TARGET_EFAULT; 9007 } 9008 unlock_user(p, arg1, 0); 9009 unlock_user(n, arg2, 0); 9010 unlock_user(v, arg3, arg4); 9011 } 9012 break; 9013 case TARGET_NR_fgetxattr: 9014 { 9015 void *n, *v = 0; 9016 if (arg3) { 9017 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 9018 if (!v) { 9019 ret = -TARGET_EFAULT; 9020 break; 9021 } 9022 } 9023 n = lock_user_string(arg2); 9024 if (n) { 9025 ret = get_errno(fgetxattr(arg1, n, v, arg4)); 9026 } else { 9027 ret = -TARGET_EFAULT; 9028 } 9029 unlock_user(n, arg2, 0); 9030 unlock_user(v, arg3, arg4); 9031 } 9032 break; 9033 case TARGET_NR_removexattr: 9034 case TARGET_NR_lremovexattr: 9035 { 9036 void *p, *n; 9037 p = lock_user_string(arg1); 9038 n = lock_user_string(arg2); 9039 if (p && n) { 9040 if (num == TARGET_NR_removexattr) { 9041 ret = get_errno(removexattr(p, n)); 9042 } else { 9043 ret = get_errno(lremovexattr(p, n)); 9044 } 9045 } else { 9046 ret = -TARGET_EFAULT; 9047 } 9048 unlock_user(p, arg1, 0); 9049 unlock_user(n, arg2, 0); 9050 } 9051 break; 9052 case TARGET_NR_fremovexattr: 9053 { 9054 void *n; 9055 n = lock_user_string(arg2); 9056 if (n) { 9057 ret = get_errno(fremovexattr(arg1, n)); 9058 } else { 9059 ret = -TARGET_EFAULT; 9060 } 9061 unlock_user(n, arg2, 0); 9062 } 9063 break; 9064 #endif 9065 #endif /* CONFIG_ATTR */ 9066 #ifdef TARGET_NR_set_thread_area 9067 case TARGET_NR_set_thread_area: 9068 #if defined(TARGET_MIPS) 9069 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1; 9070 ret = 0; 9071 break; 9072 #elif defined(TARGET_CRIS) 9073 if (arg1 & 0xff) 9074 ret = -TARGET_EINVAL; 9075 else { 9076 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1; 9077 ret = 0; 9078 } 9079 break; 9080 #elif defined(TARGET_I386) && defined(TARGET_ABI32) 9081 ret = do_set_thread_area(cpu_env, arg1); 9082 break; 9083 #elif defined(TARGET_M68K) 9084 { 9085 TaskState *ts = cpu->opaque; 9086 ts->tp_value = arg1; 9087 ret = 0; 9088 break; 9089 } 9090 #else 9091 goto unimplemented_nowarn; 9092 #endif 9093 #endif 9094 #ifdef TARGET_NR_get_thread_area 9095 case TARGET_NR_get_thread_area: 9096 #if defined(TARGET_I386) && defined(TARGET_ABI32) 9097 ret = do_get_thread_area(cpu_env, arg1); 9098 break; 9099 #elif defined(TARGET_M68K) 9100 { 9101 TaskState *ts = cpu->opaque; 9102 ret = ts->tp_value; 9103 break; 9104 } 9105 #else 9106 goto unimplemented_nowarn; 9107 #endif 9108 #endif 9109 #ifdef TARGET_NR_getdomainname 9110 case TARGET_NR_getdomainname: 9111 goto unimplemented_nowarn; 9112 #endif 9113 9114 #ifdef TARGET_NR_clock_gettime 9115 case TARGET_NR_clock_gettime: 9116 { 9117 struct timespec ts; 9118 ret = get_errno(clock_gettime(arg1, &ts)); 9119 if (!is_error(ret)) { 9120 host_to_target_timespec(arg2, &ts); 9121 } 9122 break; 9123 } 9124 #endif 9125 #ifdef TARGET_NR_clock_getres 9126 case TARGET_NR_clock_getres: 9127 { 9128 struct timespec ts; 9129 ret = get_errno(clock_getres(arg1, &ts)); 9130 if (!is_error(ret)) { 9131 host_to_target_timespec(arg2, &ts); 9132 } 9133 break; 9134 } 9135 #endif 9136 #ifdef TARGET_NR_clock_nanosleep 9137 case TARGET_NR_clock_nanosleep: 9138 { 9139 struct timespec ts; 9140 target_to_host_timespec(&ts, arg3); 9141 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL)); 9142 if (arg4) 9143 host_to_target_timespec(arg4, &ts); 9144 9145 #if defined(TARGET_PPC) 9146 /* clock_nanosleep is odd in that it returns positive errno values. 9147 * On PPC, CR0 bit 3 should be set in such a situation. */ 9148 if (ret) { 9149 ((CPUPPCState *)cpu_env)->crf[0] |= 1; 9150 } 9151 #endif 9152 break; 9153 } 9154 #endif 9155 9156 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 9157 case TARGET_NR_set_tid_address: 9158 ret = get_errno(set_tid_address((int *)g2h(arg1))); 9159 break; 9160 #endif 9161 9162 #if defined(TARGET_NR_tkill) && defined(__NR_tkill) 9163 case TARGET_NR_tkill: 9164 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2))); 9165 break; 9166 #endif 9167 9168 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill) 9169 case TARGET_NR_tgkill: 9170 ret = get_errno(sys_tgkill((int)arg1, (int)arg2, 9171 target_to_host_signal(arg3))); 9172 break; 9173 #endif 9174 9175 #ifdef TARGET_NR_set_robust_list 9176 case TARGET_NR_set_robust_list: 9177 case TARGET_NR_get_robust_list: 9178 /* The ABI for supporting robust futexes has userspace pass 9179 * the kernel a pointer to a linked list which is updated by 9180 * userspace after the syscall; the list is walked by the kernel 9181 * when the thread exits. Since the linked list in QEMU guest 9182 * memory isn't a valid linked list for the host and we have 9183 * no way to reliably intercept the thread-death event, we can't 9184 * support these. Silently return ENOSYS so that guest userspace 9185 * falls back to a non-robust futex implementation (which should 9186 * be OK except in the corner case of the guest crashing while 9187 * holding a mutex that is shared with another process via 9188 * shared memory). 9189 */ 9190 goto unimplemented_nowarn; 9191 #endif 9192 9193 #if defined(TARGET_NR_utimensat) 9194 case TARGET_NR_utimensat: 9195 { 9196 struct timespec *tsp, ts[2]; 9197 if (!arg3) { 9198 tsp = NULL; 9199 } else { 9200 target_to_host_timespec(ts, arg3); 9201 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec)); 9202 tsp = ts; 9203 } 9204 if (!arg2) 9205 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4)); 9206 else { 9207 if (!(p = lock_user_string(arg2))) { 9208 ret = -TARGET_EFAULT; 9209 goto fail; 9210 } 9211 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4)); 9212 unlock_user(p, arg2, 0); 9213 } 9214 } 9215 break; 9216 #endif 9217 case TARGET_NR_futex: 9218 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6); 9219 break; 9220 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 9221 case TARGET_NR_inotify_init: 9222 ret = get_errno(sys_inotify_init()); 9223 break; 9224 #endif 9225 #ifdef CONFIG_INOTIFY1 9226 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 9227 case TARGET_NR_inotify_init1: 9228 ret = get_errno(sys_inotify_init1(arg1)); 9229 break; 9230 #endif 9231 #endif 9232 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 9233 case TARGET_NR_inotify_add_watch: 9234 p = lock_user_string(arg2); 9235 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3)); 9236 unlock_user(p, arg2, 0); 9237 break; 9238 #endif 9239 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 9240 case TARGET_NR_inotify_rm_watch: 9241 ret = get_errno(sys_inotify_rm_watch(arg1, arg2)); 9242 break; 9243 #endif 9244 9245 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 9246 case TARGET_NR_mq_open: 9247 { 9248 struct mq_attr posix_mq_attr, *attrp; 9249 9250 p = lock_user_string(arg1 - 1); 9251 if (arg4 != 0) { 9252 copy_from_user_mq_attr (&posix_mq_attr, arg4); 9253 attrp = &posix_mq_attr; 9254 } else { 9255 attrp = 0; 9256 } 9257 ret = get_errno(mq_open(p, arg2, arg3, attrp)); 9258 unlock_user (p, arg1, 0); 9259 } 9260 break; 9261 9262 case TARGET_NR_mq_unlink: 9263 p = lock_user_string(arg1 - 1); 9264 ret = get_errno(mq_unlink(p)); 9265 unlock_user (p, arg1, 0); 9266 break; 9267 9268 case TARGET_NR_mq_timedsend: 9269 { 9270 struct timespec ts; 9271 9272 p = lock_user (VERIFY_READ, arg2, arg3, 1); 9273 if (arg5 != 0) { 9274 target_to_host_timespec(&ts, arg5); 9275 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts)); 9276 host_to_target_timespec(arg5, &ts); 9277 } 9278 else 9279 ret = get_errno(mq_send(arg1, p, arg3, arg4)); 9280 unlock_user (p, arg2, arg3); 9281 } 9282 break; 9283 9284 case TARGET_NR_mq_timedreceive: 9285 { 9286 struct timespec ts; 9287 unsigned int prio; 9288 9289 p = lock_user (VERIFY_READ, arg2, arg3, 1); 9290 if (arg5 != 0) { 9291 target_to_host_timespec(&ts, arg5); 9292 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts)); 9293 host_to_target_timespec(arg5, &ts); 9294 } 9295 else 9296 ret = get_errno(mq_receive(arg1, p, arg3, &prio)); 9297 unlock_user (p, arg2, arg3); 9298 if (arg4 != 0) 9299 put_user_u32(prio, arg4); 9300 } 9301 break; 9302 9303 /* Not implemented for now... */ 9304 /* case TARGET_NR_mq_notify: */ 9305 /* break; */ 9306 9307 case TARGET_NR_mq_getsetattr: 9308 { 9309 struct mq_attr posix_mq_attr_in, posix_mq_attr_out; 9310 ret = 0; 9311 if (arg3 != 0) { 9312 ret = mq_getattr(arg1, &posix_mq_attr_out); 9313 copy_to_user_mq_attr(arg3, &posix_mq_attr_out); 9314 } 9315 if (arg2 != 0) { 9316 copy_from_user_mq_attr(&posix_mq_attr_in, arg2); 9317 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out); 9318 } 9319 9320 } 9321 break; 9322 #endif 9323 9324 #ifdef CONFIG_SPLICE 9325 #ifdef TARGET_NR_tee 9326 case TARGET_NR_tee: 9327 { 9328 ret = get_errno(tee(arg1,arg2,arg3,arg4)); 9329 } 9330 break; 9331 #endif 9332 #ifdef TARGET_NR_splice 9333 case TARGET_NR_splice: 9334 { 9335 loff_t loff_in, loff_out; 9336 loff_t *ploff_in = NULL, *ploff_out = NULL; 9337 if(arg2) { 9338 get_user_u64(loff_in, arg2); 9339 ploff_in = &loff_in; 9340 } 9341 if(arg4) { 9342 get_user_u64(loff_out, arg2); 9343 ploff_out = &loff_out; 9344 } 9345 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6)); 9346 } 9347 break; 9348 #endif 9349 #ifdef TARGET_NR_vmsplice 9350 case TARGET_NR_vmsplice: 9351 { 9352 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 9353 if (vec != NULL) { 9354 ret = get_errno(vmsplice(arg1, vec, arg3, arg4)); 9355 unlock_iovec(vec, arg2, arg3, 0); 9356 } else { 9357 ret = -host_to_target_errno(errno); 9358 } 9359 } 9360 break; 9361 #endif 9362 #endif /* CONFIG_SPLICE */ 9363 #ifdef CONFIG_EVENTFD 9364 #if defined(TARGET_NR_eventfd) 9365 case TARGET_NR_eventfd: 9366 ret = get_errno(eventfd(arg1, 0)); 9367 break; 9368 #endif 9369 #if defined(TARGET_NR_eventfd2) 9370 case TARGET_NR_eventfd2: 9371 { 9372 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)); 9373 if (arg2 & TARGET_O_NONBLOCK) { 9374 host_flags |= O_NONBLOCK; 9375 } 9376 if (arg2 & TARGET_O_CLOEXEC) { 9377 host_flags |= O_CLOEXEC; 9378 } 9379 ret = get_errno(eventfd(arg1, host_flags)); 9380 break; 9381 } 9382 #endif 9383 #endif /* CONFIG_EVENTFD */ 9384 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate) 9385 case TARGET_NR_fallocate: 9386 #if TARGET_ABI_BITS == 32 9387 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4), 9388 target_offset64(arg5, arg6))); 9389 #else 9390 ret = get_errno(fallocate(arg1, arg2, arg3, arg4)); 9391 #endif 9392 break; 9393 #endif 9394 #if defined(CONFIG_SYNC_FILE_RANGE) 9395 #if defined(TARGET_NR_sync_file_range) 9396 case TARGET_NR_sync_file_range: 9397 #if TARGET_ABI_BITS == 32 9398 #if defined(TARGET_MIPS) 9399 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 9400 target_offset64(arg5, arg6), arg7)); 9401 #else 9402 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3), 9403 target_offset64(arg4, arg5), arg6)); 9404 #endif /* !TARGET_MIPS */ 9405 #else 9406 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4)); 9407 #endif 9408 break; 9409 #endif 9410 #if defined(TARGET_NR_sync_file_range2) 9411 case TARGET_NR_sync_file_range2: 9412 /* This is like sync_file_range but the arguments are reordered */ 9413 #if TARGET_ABI_BITS == 32 9414 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 9415 target_offset64(arg5, arg6), arg2)); 9416 #else 9417 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2)); 9418 #endif 9419 break; 9420 #endif 9421 #endif 9422 #if defined(CONFIG_EPOLL) 9423 #if defined(TARGET_NR_epoll_create) 9424 case TARGET_NR_epoll_create: 9425 ret = get_errno(epoll_create(arg1)); 9426 break; 9427 #endif 9428 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1) 9429 case TARGET_NR_epoll_create1: 9430 ret = get_errno(epoll_create1(arg1)); 9431 break; 9432 #endif 9433 #if defined(TARGET_NR_epoll_ctl) 9434 case TARGET_NR_epoll_ctl: 9435 { 9436 struct epoll_event ep; 9437 struct epoll_event *epp = 0; 9438 if (arg4) { 9439 struct target_epoll_event *target_ep; 9440 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) { 9441 goto efault; 9442 } 9443 ep.events = tswap32(target_ep->events); 9444 /* The epoll_data_t union is just opaque data to the kernel, 9445 * so we transfer all 64 bits across and need not worry what 9446 * actual data type it is. 9447 */ 9448 ep.data.u64 = tswap64(target_ep->data.u64); 9449 unlock_user_struct(target_ep, arg4, 0); 9450 epp = &ep; 9451 } 9452 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp)); 9453 break; 9454 } 9455 #endif 9456 9457 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT) 9458 #define IMPLEMENT_EPOLL_PWAIT 9459 #endif 9460 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT) 9461 #if defined(TARGET_NR_epoll_wait) 9462 case TARGET_NR_epoll_wait: 9463 #endif 9464 #if defined(IMPLEMENT_EPOLL_PWAIT) 9465 case TARGET_NR_epoll_pwait: 9466 #endif 9467 { 9468 struct target_epoll_event *target_ep; 9469 struct epoll_event *ep; 9470 int epfd = arg1; 9471 int maxevents = arg3; 9472 int timeout = arg4; 9473 9474 target_ep = lock_user(VERIFY_WRITE, arg2, 9475 maxevents * sizeof(struct target_epoll_event), 1); 9476 if (!target_ep) { 9477 goto efault; 9478 } 9479 9480 ep = alloca(maxevents * sizeof(struct epoll_event)); 9481 9482 switch (num) { 9483 #if defined(IMPLEMENT_EPOLL_PWAIT) 9484 case TARGET_NR_epoll_pwait: 9485 { 9486 target_sigset_t *target_set; 9487 sigset_t _set, *set = &_set; 9488 9489 if (arg5) { 9490 target_set = lock_user(VERIFY_READ, arg5, 9491 sizeof(target_sigset_t), 1); 9492 if (!target_set) { 9493 unlock_user(target_ep, arg2, 0); 9494 goto efault; 9495 } 9496 target_to_host_sigset(set, target_set); 9497 unlock_user(target_set, arg5, 0); 9498 } else { 9499 set = NULL; 9500 } 9501 9502 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set)); 9503 break; 9504 } 9505 #endif 9506 #if defined(TARGET_NR_epoll_wait) 9507 case TARGET_NR_epoll_wait: 9508 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout)); 9509 break; 9510 #endif 9511 default: 9512 ret = -TARGET_ENOSYS; 9513 } 9514 if (!is_error(ret)) { 9515 int i; 9516 for (i = 0; i < ret; i++) { 9517 target_ep[i].events = tswap32(ep[i].events); 9518 target_ep[i].data.u64 = tswap64(ep[i].data.u64); 9519 } 9520 } 9521 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event)); 9522 break; 9523 } 9524 #endif 9525 #endif 9526 #ifdef TARGET_NR_prlimit64 9527 case TARGET_NR_prlimit64: 9528 { 9529 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */ 9530 struct target_rlimit64 *target_rnew, *target_rold; 9531 struct host_rlimit64 rnew, rold, *rnewp = 0; 9532 if (arg3) { 9533 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) { 9534 goto efault; 9535 } 9536 rnew.rlim_cur = tswap64(target_rnew->rlim_cur); 9537 rnew.rlim_max = tswap64(target_rnew->rlim_max); 9538 unlock_user_struct(target_rnew, arg3, 0); 9539 rnewp = &rnew; 9540 } 9541 9542 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0)); 9543 if (!is_error(ret) && arg4) { 9544 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) { 9545 goto efault; 9546 } 9547 target_rold->rlim_cur = tswap64(rold.rlim_cur); 9548 target_rold->rlim_max = tswap64(rold.rlim_max); 9549 unlock_user_struct(target_rold, arg4, 1); 9550 } 9551 break; 9552 } 9553 #endif 9554 #ifdef TARGET_NR_gethostname 9555 case TARGET_NR_gethostname: 9556 { 9557 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0); 9558 if (name) { 9559 ret = get_errno(gethostname(name, arg2)); 9560 unlock_user(name, arg1, arg2); 9561 } else { 9562 ret = -TARGET_EFAULT; 9563 } 9564 break; 9565 } 9566 #endif 9567 #ifdef TARGET_NR_atomic_cmpxchg_32 9568 case TARGET_NR_atomic_cmpxchg_32: 9569 { 9570 /* should use start_exclusive from main.c */ 9571 abi_ulong mem_value; 9572 if (get_user_u32(mem_value, arg6)) { 9573 target_siginfo_t info; 9574 info.si_signo = SIGSEGV; 9575 info.si_errno = 0; 9576 info.si_code = TARGET_SEGV_MAPERR; 9577 info._sifields._sigfault._addr = arg6; 9578 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info); 9579 ret = 0xdeadbeef; 9580 9581 } 9582 if (mem_value == arg2) 9583 put_user_u32(arg1, arg6); 9584 ret = mem_value; 9585 break; 9586 } 9587 #endif 9588 #ifdef TARGET_NR_atomic_barrier 9589 case TARGET_NR_atomic_barrier: 9590 { 9591 /* Like the kernel implementation and the qemu arm barrier, no-op this? */ 9592 ret = 0; 9593 break; 9594 } 9595 #endif 9596 9597 #ifdef TARGET_NR_timer_create 9598 case TARGET_NR_timer_create: 9599 { 9600 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */ 9601 9602 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL; 9603 9604 int clkid = arg1; 9605 int timer_index = next_free_host_timer(); 9606 9607 if (timer_index < 0) { 9608 ret = -TARGET_EAGAIN; 9609 } else { 9610 timer_t *phtimer = g_posix_timers + timer_index; 9611 9612 if (arg2) { 9613 phost_sevp = &host_sevp; 9614 ret = target_to_host_sigevent(phost_sevp, arg2); 9615 if (ret != 0) { 9616 break; 9617 } 9618 } 9619 9620 ret = get_errno(timer_create(clkid, phost_sevp, phtimer)); 9621 if (ret) { 9622 phtimer = NULL; 9623 } else { 9624 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) { 9625 goto efault; 9626 } 9627 } 9628 } 9629 break; 9630 } 9631 #endif 9632 9633 #ifdef TARGET_NR_timer_settime 9634 case TARGET_NR_timer_settime: 9635 { 9636 /* args: timer_t timerid, int flags, const struct itimerspec *new_value, 9637 * struct itimerspec * old_value */ 9638 target_timer_t timerid = get_timer_id(arg1); 9639 9640 if (timerid < 0) { 9641 ret = timerid; 9642 } else if (arg3 == 0) { 9643 ret = -TARGET_EINVAL; 9644 } else { 9645 timer_t htimer = g_posix_timers[timerid]; 9646 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},}; 9647 9648 target_to_host_itimerspec(&hspec_new, arg3); 9649 ret = get_errno( 9650 timer_settime(htimer, arg2, &hspec_new, &hspec_old)); 9651 host_to_target_itimerspec(arg2, &hspec_old); 9652 } 9653 break; 9654 } 9655 #endif 9656 9657 #ifdef TARGET_NR_timer_gettime 9658 case TARGET_NR_timer_gettime: 9659 { 9660 /* args: timer_t timerid, struct itimerspec *curr_value */ 9661 target_timer_t timerid = get_timer_id(arg1); 9662 9663 if (timerid < 0) { 9664 ret = timerid; 9665 } else if (!arg2) { 9666 ret = -TARGET_EFAULT; 9667 } else { 9668 timer_t htimer = g_posix_timers[timerid]; 9669 struct itimerspec hspec; 9670 ret = get_errno(timer_gettime(htimer, &hspec)); 9671 9672 if (host_to_target_itimerspec(arg2, &hspec)) { 9673 ret = -TARGET_EFAULT; 9674 } 9675 } 9676 break; 9677 } 9678 #endif 9679 9680 #ifdef TARGET_NR_timer_getoverrun 9681 case TARGET_NR_timer_getoverrun: 9682 { 9683 /* args: timer_t timerid */ 9684 target_timer_t timerid = get_timer_id(arg1); 9685 9686 if (timerid < 0) { 9687 ret = timerid; 9688 } else { 9689 timer_t htimer = g_posix_timers[timerid]; 9690 ret = get_errno(timer_getoverrun(htimer)); 9691 } 9692 break; 9693 } 9694 #endif 9695 9696 #ifdef TARGET_NR_timer_delete 9697 case TARGET_NR_timer_delete: 9698 { 9699 /* args: timer_t timerid */ 9700 target_timer_t timerid = get_timer_id(arg1); 9701 9702 if (timerid < 0) { 9703 ret = timerid; 9704 } else { 9705 timer_t htimer = g_posix_timers[timerid]; 9706 ret = get_errno(timer_delete(htimer)); 9707 g_posix_timers[timerid] = 0; 9708 } 9709 break; 9710 } 9711 #endif 9712 9713 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD) 9714 case TARGET_NR_timerfd_create: 9715 ret = get_errno(timerfd_create(arg1, 9716 target_to_host_bitmask(arg2, fcntl_flags_tbl))); 9717 break; 9718 #endif 9719 9720 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD) 9721 case TARGET_NR_timerfd_gettime: 9722 { 9723 struct itimerspec its_curr; 9724 9725 ret = get_errno(timerfd_gettime(arg1, &its_curr)); 9726 9727 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) { 9728 goto efault; 9729 } 9730 } 9731 break; 9732 #endif 9733 9734 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD) 9735 case TARGET_NR_timerfd_settime: 9736 { 9737 struct itimerspec its_new, its_old, *p_new; 9738 9739 if (arg3) { 9740 if (target_to_host_itimerspec(&its_new, arg3)) { 9741 goto efault; 9742 } 9743 p_new = &its_new; 9744 } else { 9745 p_new = NULL; 9746 } 9747 9748 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old)); 9749 9750 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) { 9751 goto efault; 9752 } 9753 } 9754 break; 9755 #endif 9756 9757 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get) 9758 case TARGET_NR_ioprio_get: 9759 ret = get_errno(ioprio_get(arg1, arg2)); 9760 break; 9761 #endif 9762 9763 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set) 9764 case TARGET_NR_ioprio_set: 9765 ret = get_errno(ioprio_set(arg1, arg2, arg3)); 9766 break; 9767 #endif 9768 9769 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS) 9770 case TARGET_NR_setns: 9771 ret = get_errno(setns(arg1, arg2)); 9772 break; 9773 #endif 9774 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS) 9775 case TARGET_NR_unshare: 9776 ret = get_errno(unshare(arg1)); 9777 break; 9778 #endif 9779 9780 default: 9781 unimplemented: 9782 gemu_log("qemu: Unsupported syscall: %d\n", num); 9783 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list) 9784 unimplemented_nowarn: 9785 #endif 9786 ret = -TARGET_ENOSYS; 9787 break; 9788 } 9789 fail: 9790 #ifdef DEBUG 9791 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret); 9792 #endif 9793 if(do_strace) 9794 print_syscall_ret(num, ret); 9795 return ret; 9796 efault: 9797 ret = -TARGET_EFAULT; 9798 goto fail; 9799 } 9800