xref: /qemu/linux-user/syscall.c (revision b74c89815841abd80cca9d2bba13b19afb62d1ca)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "qemu/plugin.h"
26 #include "tcg/startup.h"
27 #include "target_mman.h"
28 #include "exec/page-protection.h"
29 #include <elf.h>
30 #include <endian.h>
31 #include <grp.h>
32 #include <sys/ipc.h>
33 #include <sys/msg.h>
34 #include <sys/wait.h>
35 #include <sys/mount.h>
36 #include <sys/file.h>
37 #include <sys/fsuid.h>
38 #include <sys/personality.h>
39 #include <sys/prctl.h>
40 #include <sys/resource.h>
41 #include <sys/swap.h>
42 #include <linux/capability.h>
43 #include <sched.h>
44 #include <sys/timex.h>
45 #include <sys/socket.h>
46 #include <linux/sockios.h>
47 #include <sys/un.h>
48 #include <sys/uio.h>
49 #include <poll.h>
50 #include <sys/times.h>
51 #include <sys/shm.h>
52 #include <sys/sem.h>
53 #include <sys/statfs.h>
54 #include <utime.h>
55 #include <sys/sysinfo.h>
56 #include <sys/signalfd.h>
57 #include <netinet/in.h>
58 #include <netinet/ip.h>
59 #include <netinet/tcp.h>
60 #include <netinet/udp.h>
61 #include <linux/wireless.h>
62 #include <linux/icmp.h>
63 #include <linux/icmpv6.h>
64 #include <linux/if_tun.h>
65 #include <linux/in6.h>
66 #include <linux/errqueue.h>
67 #include <linux/random.h>
68 #ifdef CONFIG_TIMERFD
69 #include <sys/timerfd.h>
70 #endif
71 #ifdef CONFIG_EVENTFD
72 #include <sys/eventfd.h>
73 #endif
74 #ifdef CONFIG_EPOLL
75 #include <sys/epoll.h>
76 #endif
77 #ifdef CONFIG_ATTR
78 #include "qemu/xattr.h"
79 #endif
80 #ifdef CONFIG_SENDFILE
81 #include <sys/sendfile.h>
82 #endif
83 #ifdef HAVE_SYS_KCOV_H
84 #include <sys/kcov.h>
85 #endif
86 
87 #define termios host_termios
88 #define winsize host_winsize
89 #define termio host_termio
90 #define sgttyb host_sgttyb /* same as target */
91 #define tchars host_tchars /* same as target */
92 #define ltchars host_ltchars /* same as target */
93 
94 #include <linux/termios.h>
95 #include <linux/unistd.h>
96 #include <linux/cdrom.h>
97 #include <linux/hdreg.h>
98 #include <linux/soundcard.h>
99 #include <linux/kd.h>
100 #include <linux/mtio.h>
101 #include <linux/fs.h>
102 #include <linux/fd.h>
103 #if defined(CONFIG_FIEMAP)
104 #include <linux/fiemap.h>
105 #endif
106 #include <linux/fb.h>
107 #if defined(CONFIG_USBFS)
108 #include <linux/usbdevice_fs.h>
109 #include <linux/usb/ch9.h>
110 #endif
111 #include <linux/vt.h>
112 #include <linux/dm-ioctl.h>
113 #include <linux/reboot.h>
114 #include <linux/route.h>
115 #include <linux/filter.h>
116 #include <linux/blkpg.h>
117 #include <netpacket/packet.h>
118 #include <linux/netlink.h>
119 #include <linux/if_alg.h>
120 #include <linux/rtc.h>
121 #include <sound/asound.h>
122 #ifdef HAVE_BTRFS_H
123 #include <linux/btrfs.h>
124 #endif
125 #ifdef HAVE_DRM_H
126 #include <libdrm/drm.h>
127 #include <libdrm/i915_drm.h>
128 #endif
129 #include "linux_loop.h"
130 #include "uname.h"
131 
132 #include "qemu.h"
133 #include "user-internals.h"
134 #include "strace.h"
135 #include "signal-common.h"
136 #include "loader.h"
137 #include "user-mmap.h"
138 #include "user/page-protection.h"
139 #include "user/safe-syscall.h"
140 #include "qemu/guest-random.h"
141 #include "qemu/selfmap.h"
142 #include "user/syscall-trace.h"
143 #include "special-errno.h"
144 #include "qapi/error.h"
145 #include "fd-trans.h"
146 #include "user/cpu_loop.h"
147 
148 #ifndef CLONE_IO
149 #define CLONE_IO                0x80000000      /* Clone io context */
150 #endif
151 
152 /* We can't directly call the host clone syscall, because this will
153  * badly confuse libc (breaking mutexes, for example). So we must
154  * divide clone flags into:
155  *  * flag combinations that look like pthread_create()
156  *  * flag combinations that look like fork()
157  *  * flags we can implement within QEMU itself
158  *  * flags we can't support and will return an error for
159  */
160 /* For thread creation, all these flags must be present; for
161  * fork, none must be present.
162  */
163 #define CLONE_THREAD_FLAGS                              \
164     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
165      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
166 
167 /* These flags are ignored:
168  * CLONE_DETACHED is now ignored by the kernel;
169  * CLONE_IO is just an optimisation hint to the I/O scheduler
170  */
171 #define CLONE_IGNORED_FLAGS                     \
172     (CLONE_DETACHED | CLONE_IO)
173 
174 #ifndef CLONE_PIDFD
175 # define CLONE_PIDFD 0x00001000
176 #endif
177 
178 /* Flags for fork which we can implement within QEMU itself */
179 #define CLONE_OPTIONAL_FORK_FLAGS               \
180     (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
181      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
182 
183 /* Flags for thread creation which we can implement within QEMU itself */
184 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
185     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
186      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
187 
188 #define CLONE_INVALID_FORK_FLAGS                                        \
189     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
190 
191 #define CLONE_INVALID_THREAD_FLAGS                                      \
192     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
193        CLONE_IGNORED_FLAGS))
194 
195 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
196  * have almost all been allocated. We cannot support any of
197  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
198  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
199  * The checks against the invalid thread masks above will catch these.
200  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
201  */
202 
203 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
204  * once. This exercises the codepaths for restart.
205  */
206 //#define DEBUG_ERESTARTSYS
207 
208 //#include <linux/msdos_fs.h>
209 #define VFAT_IOCTL_READDIR_BOTH \
210     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
211 #define VFAT_IOCTL_READDIR_SHORT \
212     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
213 
214 #undef _syscall0
215 #undef _syscall1
216 #undef _syscall2
217 #undef _syscall3
218 #undef _syscall4
219 #undef _syscall5
220 #undef _syscall6
221 
222 #define _syscall0(type,name)		\
223 static type name (void)			\
224 {					\
225 	return syscall(__NR_##name);	\
226 }
227 
228 #define _syscall1(type,name,type1,arg1)		\
229 static type name (type1 arg1)			\
230 {						\
231 	return syscall(__NR_##name, arg1);	\
232 }
233 
234 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
235 static type name (type1 arg1,type2 arg2)		\
236 {							\
237 	return syscall(__NR_##name, arg1, arg2);	\
238 }
239 
240 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
241 static type name (type1 arg1,type2 arg2,type3 arg3)		\
242 {								\
243 	return syscall(__NR_##name, arg1, arg2, arg3);		\
244 }
245 
246 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
247 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
248 {										\
249 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
250 }
251 
252 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
253 		  type5,arg5)							\
254 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
255 {										\
256 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
257 }
258 
259 
260 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
261 		  type5,arg5,type6,arg6)					\
262 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
263                   type6 arg6)							\
264 {										\
265 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
266 }
267 
268 
269 #define __NR_sys_uname __NR_uname
270 #define __NR_sys_getcwd1 __NR_getcwd
271 #define __NR_sys_getdents __NR_getdents
272 #define __NR_sys_getdents64 __NR_getdents64
273 #define __NR_sys_getpriority __NR_getpriority
274 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
275 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
276 #define __NR_sys_syslog __NR_syslog
277 #if defined(__NR_futex)
278 # define __NR_sys_futex __NR_futex
279 #endif
280 #if defined(__NR_futex_time64)
281 # define __NR_sys_futex_time64 __NR_futex_time64
282 #endif
283 #define __NR_sys_statx __NR_statx
284 
285 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
286 #define __NR__llseek __NR_lseek
287 #endif
288 
289 /* Newer kernel ports have llseek() instead of _llseek() */
290 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
291 #define TARGET_NR__llseek TARGET_NR_llseek
292 #endif
293 
294 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
295 #ifndef TARGET_O_NONBLOCK_MASK
296 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
297 #endif
298 
299 #define __NR_sys_gettid __NR_gettid
300 _syscall0(int, sys_gettid)
301 
302 /* For the 64-bit guest on 32-bit host case we must emulate
303  * getdents using getdents64, because otherwise the host
304  * might hand us back more dirent records than we can fit
305  * into the guest buffer after structure format conversion.
306  * Otherwise we emulate getdents with getdents if the host has it.
307  */
308 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
309 #define EMULATE_GETDENTS_WITH_GETDENTS
310 #endif
311 
312 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
313 _syscall3(int, sys_getdents, unsigned int, fd, struct linux_dirent *, dirp, unsigned int, count);
314 #endif
315 #if (defined(TARGET_NR_getdents) && \
316       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
317     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
318 _syscall3(int, sys_getdents64, unsigned int, fd, struct linux_dirent64 *, dirp, unsigned int, count);
319 #endif
320 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
321 _syscall5(int, _llseek,  unsigned int,  fd, unsigned long, hi, unsigned long, lo,
322           loff_t *, res, unsigned int, wh);
323 #endif
324 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
325 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
326           siginfo_t *, uinfo)
327 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
328 #ifdef __NR_exit_group
329 _syscall1(int,exit_group,int,error_code)
330 #endif
331 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
332 #define __NR_sys_close_range __NR_close_range
333 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
334 #ifndef CLOSE_RANGE_CLOEXEC
335 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
336 #endif
337 #endif
338 #if defined(__NR_futex)
339 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
340           const struct timespec *,timeout,int *,uaddr2,int,val3)
341 #endif
342 #if defined(__NR_futex_time64)
343 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
344           const struct timespec *,timeout,int *,uaddr2,int,val3)
345 #endif
346 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
347 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
348 #endif
349 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
350 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
351                              unsigned int, flags);
352 #endif
353 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
354 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
355 #endif
356 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
357 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
358           unsigned long *, user_mask_ptr);
359 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
360 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
361           unsigned long *, user_mask_ptr);
362 /* sched_attr is not defined in glibc */
363 struct sched_attr {
364     uint32_t size;
365     uint32_t sched_policy;
366     uint64_t sched_flags;
367     int32_t sched_nice;
368     uint32_t sched_priority;
369     uint64_t sched_runtime;
370     uint64_t sched_deadline;
371     uint64_t sched_period;
372     uint32_t sched_util_min;
373     uint32_t sched_util_max;
374 };
375 #define __NR_sys_sched_getattr __NR_sched_getattr
376 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
377           unsigned int, size, unsigned int, flags);
378 #define __NR_sys_sched_setattr __NR_sched_setattr
379 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
380           unsigned int, flags);
381 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
382 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
383 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
384 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
385           const struct sched_param *, param);
386 #define __NR_sys_sched_getparam __NR_sched_getparam
387 _syscall2(int, sys_sched_getparam, pid_t, pid,
388           struct sched_param *, param);
389 #define __NR_sys_sched_setparam __NR_sched_setparam
390 _syscall2(int, sys_sched_setparam, pid_t, pid,
391           const struct sched_param *, param);
392 #define __NR_sys_getcpu __NR_getcpu
393 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
394 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
395           void *, arg);
396 _syscall2(int, capget, struct __user_cap_header_struct *, header,
397           struct __user_cap_data_struct *, data);
398 _syscall2(int, capset, struct __user_cap_header_struct *, header,
399           struct __user_cap_data_struct *, data);
400 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
401 _syscall2(int, ioprio_get, int, which, int, who)
402 #endif
403 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
404 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
405 #endif
406 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
407 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
408 #endif
409 
410 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
411 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
412           unsigned long, idx1, unsigned long, idx2)
413 #endif
414 
415 /*
416  * It is assumed that struct statx is architecture independent.
417  */
418 #if defined(TARGET_NR_statx) && defined(__NR_statx)
419 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
420           unsigned int, mask, struct target_statx *, statxbuf)
421 #endif
422 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
423 _syscall2(int, membarrier, int, cmd, int, flags)
424 #endif
425 
426 static const bitmask_transtbl fcntl_flags_tbl[] = {
427   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
428   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
429   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
430   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
431   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
432   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
433   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
434   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
435   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
436   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
437   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
438   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
439   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
440 #if defined(O_DIRECT)
441   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
442 #endif
443 #if defined(O_NOATIME)
444   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
445 #endif
446 #if defined(O_CLOEXEC)
447   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
448 #endif
449 #if defined(O_PATH)
450   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
451 #endif
452 #if defined(O_TMPFILE)
453   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
454 #endif
455   /* Don't terminate the list prematurely on 64-bit host+guest.  */
456 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
457   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
458 #endif
459 };
460 
461 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
462 
463 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
464 #if defined(__NR_utimensat)
465 #define __NR_sys_utimensat __NR_utimensat
466 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
467           const struct timespec *,tsp,int,flags)
468 #else
469 static int sys_utimensat(int dirfd, const char *pathname,
470                          const struct timespec times[2], int flags)
471 {
472     errno = ENOSYS;
473     return -1;
474 }
475 #endif
476 #endif /* TARGET_NR_utimensat */
477 
478 #ifdef TARGET_NR_renameat2
479 #if defined(__NR_renameat2)
480 #define __NR_sys_renameat2 __NR_renameat2
481 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
482           const char *, new, unsigned int, flags)
483 #else
484 static int sys_renameat2(int oldfd, const char *old,
485                          int newfd, const char *new, int flags)
486 {
487     if (flags == 0) {
488         return renameat(oldfd, old, newfd, new);
489     }
490     errno = ENOSYS;
491     return -1;
492 }
493 #endif
494 #endif /* TARGET_NR_renameat2 */
495 
496 #ifdef CONFIG_INOTIFY
497 #include <sys/inotify.h>
498 #else
499 /* Userspace can usually survive runtime without inotify */
500 #undef TARGET_NR_inotify_init
501 #undef TARGET_NR_inotify_init1
502 #undef TARGET_NR_inotify_add_watch
503 #undef TARGET_NR_inotify_rm_watch
504 #endif /* CONFIG_INOTIFY  */
505 
506 #if defined(TARGET_NR_prlimit64)
507 #ifndef __NR_prlimit64
508 # define __NR_prlimit64 -1
509 #endif
510 #define __NR_sys_prlimit64 __NR_prlimit64
511 /* The glibc rlimit structure may not be that used by the underlying syscall */
512 struct host_rlimit64 {
513     uint64_t rlim_cur;
514     uint64_t rlim_max;
515 };
516 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
517           const struct host_rlimit64 *, new_limit,
518           struct host_rlimit64 *, old_limit)
519 #endif
520 
521 
522 #if defined(TARGET_NR_timer_create)
523 /* Maximum of 32 active POSIX timers allowed at any one time. */
524 #define GUEST_TIMER_MAX 32
525 static timer_t g_posix_timers[GUEST_TIMER_MAX];
526 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
527 
528 static inline int next_free_host_timer(void)
529 {
530     int k;
531     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
532         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
533             return k;
534         }
535     }
536     return -1;
537 }
538 
539 static inline void free_host_timer_slot(int id)
540 {
541     qatomic_store_release(g_posix_timer_allocated + id, 0);
542 }
543 #endif
544 
545 static inline int host_to_target_errno(int host_errno)
546 {
547     switch (host_errno) {
548 #define E(X)  case X: return TARGET_##X;
549 #include "errnos.c.inc"
550 #undef E
551     default:
552         return host_errno;
553     }
554 }
555 
556 static inline int target_to_host_errno(int target_errno)
557 {
558     switch (target_errno) {
559 #define E(X)  case TARGET_##X: return X;
560 #include "errnos.c.inc"
561 #undef E
562     default:
563         return target_errno;
564     }
565 }
566 
567 abi_long get_errno(abi_long ret)
568 {
569     if (ret == -1)
570         return -host_to_target_errno(errno);
571     else
572         return ret;
573 }
574 
575 const char *target_strerror(int err)
576 {
577     if (err == QEMU_ERESTARTSYS) {
578         return "To be restarted";
579     }
580     if (err == QEMU_ESIGRETURN) {
581         return "Successful exit from sigreturn";
582     }
583 
584     return strerror(target_to_host_errno(err));
585 }
586 
587 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
588 {
589     int i;
590     uint8_t b;
591     if (usize <= ksize) {
592         return 1;
593     }
594     for (i = ksize; i < usize; i++) {
595         if (get_user_u8(b, addr + i)) {
596             return -TARGET_EFAULT;
597         }
598         if (b != 0) {
599             return 0;
600         }
601     }
602     return 1;
603 }
604 
605 /*
606  * Copies a target struct to a host struct, in a way that guarantees
607  * backwards-compatibility for struct syscall arguments.
608  *
609  * Similar to kernels uaccess.h:copy_struct_from_user()
610  */
611 int copy_struct_from_user(void *dst, size_t ksize, abi_ptr src, size_t usize)
612 {
613     size_t size = MIN(ksize, usize);
614     size_t rest = MAX(ksize, usize) - size;
615 
616     /* Deal with trailing bytes. */
617     if (usize < ksize) {
618         memset(dst + size, 0, rest);
619     } else if (usize > ksize) {
620         int ret = check_zeroed_user(src, ksize, usize);
621         if (ret <= 0) {
622             return ret ?: -TARGET_E2BIG;
623         }
624     }
625     /* Copy the interoperable parts of the struct. */
626     if (copy_from_user(dst, src, size)) {
627         return -TARGET_EFAULT;
628     }
629     return 0;
630 }
631 
632 #define safe_syscall0(type, name) \
633 static type safe_##name(void) \
634 { \
635     return safe_syscall(__NR_##name); \
636 }
637 
638 #define safe_syscall1(type, name, type1, arg1) \
639 static type safe_##name(type1 arg1) \
640 { \
641     return safe_syscall(__NR_##name, arg1); \
642 }
643 
644 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
645 static type safe_##name(type1 arg1, type2 arg2) \
646 { \
647     return safe_syscall(__NR_##name, arg1, arg2); \
648 }
649 
650 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
651 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
652 { \
653     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
654 }
655 
656 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
657     type4, arg4) \
658 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
659 { \
660     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
661 }
662 
663 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
664     type4, arg4, type5, arg5) \
665 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
666     type5 arg5) \
667 { \
668     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
669 }
670 
671 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
672     type4, arg4, type5, arg5, type6, arg6) \
673 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
674     type5 arg5, type6 arg6) \
675 { \
676     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
677 }
678 
679 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
680 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
681 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
682               int, flags, mode_t, mode)
683 
684 safe_syscall4(int, openat2, int, dirfd, const char *, pathname, \
685               const struct open_how_ver0 *, how, size_t, size)
686 
687 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
688 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
689               struct rusage *, rusage)
690 #endif
691 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
692               int, options, struct rusage *, rusage)
693 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
694 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
695               char **, argv, char **, envp, int, flags)
696 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
697     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
698 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
699               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
700 #endif
701 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
702 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
703               struct timespec *, tsp, const sigset_t *, sigmask,
704               size_t, sigsetsize)
705 #endif
706 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
707               int, maxevents, int, timeout, const sigset_t *, sigmask,
708               size_t, sigsetsize)
709 #if defined(__NR_futex)
710 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
711               const struct timespec *,timeout,int *,uaddr2,int,val3)
712 #endif
713 #if defined(__NR_futex_time64)
714 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
715               const struct timespec *,timeout,int *,uaddr2,int,val3)
716 #endif
717 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
718 safe_syscall2(int, kill, pid_t, pid, int, sig)
719 safe_syscall2(int, tkill, int, tid, int, sig)
720 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
721 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
722 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
723 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
724               unsigned long, pos_l, unsigned long, pos_h)
725 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
726               unsigned long, pos_l, unsigned long, pos_h)
727 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
728               socklen_t, addrlen)
729 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
730               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
731 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
732               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
733 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
734 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
735 safe_syscall2(int, flock, int, fd, int, operation)
736 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
737 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
738               const struct timespec *, uts, size_t, sigsetsize)
739 #endif
740 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
741               int, flags)
742 #if defined(TARGET_NR_nanosleep)
743 safe_syscall2(int, nanosleep, const struct timespec *, req,
744               struct timespec *, rem)
745 #endif
746 #if defined(TARGET_NR_clock_nanosleep) || \
747     defined(TARGET_NR_clock_nanosleep_time64)
748 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
749               const struct timespec *, req, struct timespec *, rem)
750 #endif
751 #ifdef __NR_ipc
752 #ifdef __s390x__
753 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
754               void *, ptr)
755 #else
756 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
757               void *, ptr, long, fifth)
758 #endif
759 #endif
760 #ifdef __NR_msgsnd
761 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
762               int, flags)
763 #endif
764 #ifdef __NR_msgrcv
765 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
766               long, msgtype, int, flags)
767 #endif
768 #ifdef __NR_semtimedop
769 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
770               unsigned, nsops, const struct timespec *, timeout)
771 #endif
772 #if defined(TARGET_NR_mq_timedsend) || \
773     defined(TARGET_NR_mq_timedsend_time64)
774 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
775               size_t, len, unsigned, prio, const struct timespec *, timeout)
776 #endif
777 #if defined(TARGET_NR_mq_timedreceive) || \
778     defined(TARGET_NR_mq_timedreceive_time64)
779 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
780               size_t, len, unsigned *, prio, const struct timespec *, timeout)
781 #endif
782 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
783 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
784               int, outfd, loff_t *, poutoff, size_t, length,
785               unsigned int, flags)
786 #endif
787 
788 /* We do ioctl like this rather than via safe_syscall3 to preserve the
789  * "third argument might be integer or pointer or not present" behaviour of
790  * the libc function.
791  */
792 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
793 /* Similarly for fcntl. Since we always build with LFS enabled,
794  * we should be using the 64-bit structures automatically.
795  */
796 #ifdef __NR_fcntl64
797 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
798 #else
799 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
800 #endif
801 
802 static inline int host_to_target_sock_type(int host_type)
803 {
804     int target_type;
805 
806     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
807     case SOCK_DGRAM:
808         target_type = TARGET_SOCK_DGRAM;
809         break;
810     case SOCK_STREAM:
811         target_type = TARGET_SOCK_STREAM;
812         break;
813     default:
814         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
815         break;
816     }
817 
818 #if defined(SOCK_CLOEXEC)
819     if (host_type & SOCK_CLOEXEC) {
820         target_type |= TARGET_SOCK_CLOEXEC;
821     }
822 #endif
823 
824 #if defined(SOCK_NONBLOCK)
825     if (host_type & SOCK_NONBLOCK) {
826         target_type |= TARGET_SOCK_NONBLOCK;
827     }
828 #endif
829 
830     return target_type;
831 }
832 
833 static abi_ulong target_brk, initial_target_brk;
834 
835 void target_set_brk(abi_ulong new_brk)
836 {
837     target_brk = TARGET_PAGE_ALIGN(new_brk);
838     initial_target_brk = target_brk;
839 }
840 
841 /* do_brk() must return target values and target errnos. */
842 abi_long do_brk(abi_ulong brk_val)
843 {
844     abi_long mapped_addr;
845     abi_ulong new_brk;
846     abi_ulong old_brk;
847 
848     /* brk pointers are always untagged */
849 
850     /* do not allow to shrink below initial brk value */
851     if (brk_val < initial_target_brk) {
852         return target_brk;
853     }
854 
855     new_brk = TARGET_PAGE_ALIGN(brk_val);
856     old_brk = TARGET_PAGE_ALIGN(target_brk);
857 
858     /* new and old target_brk might be on the same page */
859     if (new_brk == old_brk) {
860         target_brk = brk_val;
861         return target_brk;
862     }
863 
864     /* Release heap if necessary */
865     if (new_brk < old_brk) {
866         target_munmap(new_brk, old_brk - new_brk);
867 
868         target_brk = brk_val;
869         return target_brk;
870     }
871 
872     mapped_addr = target_mmap(old_brk, new_brk - old_brk,
873                               PROT_READ | PROT_WRITE,
874                               MAP_FIXED_NOREPLACE | MAP_ANON | MAP_PRIVATE,
875                               -1, 0);
876 
877     if (mapped_addr == old_brk) {
878         target_brk = brk_val;
879         return target_brk;
880     }
881 
882 #if defined(TARGET_ALPHA)
883     /* We (partially) emulate OSF/1 on Alpha, which requires we
884        return a proper errno, not an unchanged brk value.  */
885     return -TARGET_ENOMEM;
886 #endif
887     /* For everything else, return the previous break. */
888     return target_brk;
889 }
890 
891 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
892     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
893 static inline abi_long copy_from_user_fdset(fd_set *fds,
894                                             abi_ulong target_fds_addr,
895                                             int n)
896 {
897     int i, nw, j, k;
898     abi_ulong b, *target_fds;
899 
900     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
901     if (!(target_fds = lock_user(VERIFY_READ,
902                                  target_fds_addr,
903                                  sizeof(abi_ulong) * nw,
904                                  1)))
905         return -TARGET_EFAULT;
906 
907     FD_ZERO(fds);
908     k = 0;
909     for (i = 0; i < nw; i++) {
910         /* grab the abi_ulong */
911         __get_user(b, &target_fds[i]);
912         for (j = 0; j < TARGET_ABI_BITS; j++) {
913             /* check the bit inside the abi_ulong */
914             if ((b >> j) & 1)
915                 FD_SET(k, fds);
916             k++;
917         }
918     }
919 
920     unlock_user(target_fds, target_fds_addr, 0);
921 
922     return 0;
923 }
924 
925 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
926                                                  abi_ulong target_fds_addr,
927                                                  int n)
928 {
929     if (target_fds_addr) {
930         if (copy_from_user_fdset(fds, target_fds_addr, n))
931             return -TARGET_EFAULT;
932         *fds_ptr = fds;
933     } else {
934         *fds_ptr = NULL;
935     }
936     return 0;
937 }
938 
939 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
940                                           const fd_set *fds,
941                                           int n)
942 {
943     int i, nw, j, k;
944     abi_long v;
945     abi_ulong *target_fds;
946 
947     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
948     if (!(target_fds = lock_user(VERIFY_WRITE,
949                                  target_fds_addr,
950                                  sizeof(abi_ulong) * nw,
951                                  0)))
952         return -TARGET_EFAULT;
953 
954     k = 0;
955     for (i = 0; i < nw; i++) {
956         v = 0;
957         for (j = 0; j < TARGET_ABI_BITS; j++) {
958             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
959             k++;
960         }
961         __put_user(v, &target_fds[i]);
962     }
963 
964     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
965 
966     return 0;
967 }
968 #endif
969 
970 #if defined(__alpha__)
971 #define HOST_HZ 1024
972 #else
973 #define HOST_HZ 100
974 #endif
975 
976 static inline abi_long host_to_target_clock_t(long ticks)
977 {
978 #if HOST_HZ == TARGET_HZ
979     return ticks;
980 #else
981     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
982 #endif
983 }
984 
985 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
986                                              const struct rusage *rusage)
987 {
988     struct target_rusage *target_rusage;
989 
990     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
991         return -TARGET_EFAULT;
992     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
993     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
994     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
995     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
996     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
997     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
998     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
999     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1000     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1001     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1002     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1003     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1004     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1005     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1006     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1007     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1008     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1009     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1010     unlock_user_struct(target_rusage, target_addr, 1);
1011 
1012     return 0;
1013 }
1014 
1015 #ifdef TARGET_NR_setrlimit
1016 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1017 {
1018     abi_ulong target_rlim_swap;
1019     rlim_t result;
1020 
1021     target_rlim_swap = tswapal(target_rlim);
1022     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1023         return RLIM_INFINITY;
1024 
1025     result = target_rlim_swap;
1026     if (target_rlim_swap != (rlim_t)result)
1027         return RLIM_INFINITY;
1028 
1029     return result;
1030 }
1031 #endif
1032 
1033 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1034 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1035 {
1036     abi_ulong target_rlim_swap;
1037     abi_ulong result;
1038 
1039     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1040         target_rlim_swap = TARGET_RLIM_INFINITY;
1041     else
1042         target_rlim_swap = rlim;
1043     result = tswapal(target_rlim_swap);
1044 
1045     return result;
1046 }
1047 #endif
1048 
1049 static inline int target_to_host_resource(int code)
1050 {
1051     switch (code) {
1052     case TARGET_RLIMIT_AS:
1053         return RLIMIT_AS;
1054     case TARGET_RLIMIT_CORE:
1055         return RLIMIT_CORE;
1056     case TARGET_RLIMIT_CPU:
1057         return RLIMIT_CPU;
1058     case TARGET_RLIMIT_DATA:
1059         return RLIMIT_DATA;
1060     case TARGET_RLIMIT_FSIZE:
1061         return RLIMIT_FSIZE;
1062     case TARGET_RLIMIT_LOCKS:
1063         return RLIMIT_LOCKS;
1064     case TARGET_RLIMIT_MEMLOCK:
1065         return RLIMIT_MEMLOCK;
1066     case TARGET_RLIMIT_MSGQUEUE:
1067         return RLIMIT_MSGQUEUE;
1068     case TARGET_RLIMIT_NICE:
1069         return RLIMIT_NICE;
1070     case TARGET_RLIMIT_NOFILE:
1071         return RLIMIT_NOFILE;
1072     case TARGET_RLIMIT_NPROC:
1073         return RLIMIT_NPROC;
1074     case TARGET_RLIMIT_RSS:
1075         return RLIMIT_RSS;
1076     case TARGET_RLIMIT_RTPRIO:
1077         return RLIMIT_RTPRIO;
1078 #ifdef RLIMIT_RTTIME
1079     case TARGET_RLIMIT_RTTIME:
1080         return RLIMIT_RTTIME;
1081 #endif
1082     case TARGET_RLIMIT_SIGPENDING:
1083         return RLIMIT_SIGPENDING;
1084     case TARGET_RLIMIT_STACK:
1085         return RLIMIT_STACK;
1086     default:
1087         return code;
1088     }
1089 }
1090 
1091 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1092                                               abi_ulong target_tv_addr)
1093 {
1094     struct target_timeval *target_tv;
1095 
1096     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1097         return -TARGET_EFAULT;
1098     }
1099 
1100     __get_user(tv->tv_sec, &target_tv->tv_sec);
1101     __get_user(tv->tv_usec, &target_tv->tv_usec);
1102 
1103     unlock_user_struct(target_tv, target_tv_addr, 0);
1104 
1105     return 0;
1106 }
1107 
1108 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1109                                             const struct timeval *tv)
1110 {
1111     struct target_timeval *target_tv;
1112 
1113     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1114         return -TARGET_EFAULT;
1115     }
1116 
1117     __put_user(tv->tv_sec, &target_tv->tv_sec);
1118     __put_user(tv->tv_usec, &target_tv->tv_usec);
1119 
1120     unlock_user_struct(target_tv, target_tv_addr, 1);
1121 
1122     return 0;
1123 }
1124 
1125 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1126 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1127                                                 abi_ulong target_tv_addr)
1128 {
1129     struct target__kernel_sock_timeval *target_tv;
1130 
1131     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1132         return -TARGET_EFAULT;
1133     }
1134 
1135     __get_user(tv->tv_sec, &target_tv->tv_sec);
1136     __get_user(tv->tv_usec, &target_tv->tv_usec);
1137 
1138     unlock_user_struct(target_tv, target_tv_addr, 0);
1139 
1140     return 0;
1141 }
1142 #endif
1143 
1144 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1145                                               const struct timeval *tv)
1146 {
1147     struct target__kernel_sock_timeval *target_tv;
1148 
1149     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1150         return -TARGET_EFAULT;
1151     }
1152 
1153     __put_user(tv->tv_sec, &target_tv->tv_sec);
1154     __put_user(tv->tv_usec, &target_tv->tv_usec);
1155 
1156     unlock_user_struct(target_tv, target_tv_addr, 1);
1157 
1158     return 0;
1159 }
1160 
1161 #if defined(TARGET_NR_futex) || \
1162     defined(TARGET_NR_rt_sigtimedwait) || \
1163     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1164     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1165     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1166     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1167     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1168     defined(TARGET_NR_timer_settime) || \
1169     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1170 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1171                                                abi_ulong target_addr)
1172 {
1173     struct target_timespec *target_ts;
1174 
1175     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1176         return -TARGET_EFAULT;
1177     }
1178     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1179     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1180     unlock_user_struct(target_ts, target_addr, 0);
1181     return 0;
1182 }
1183 #endif
1184 
1185 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1186     defined(TARGET_NR_timer_settime64) || \
1187     defined(TARGET_NR_mq_timedsend_time64) || \
1188     defined(TARGET_NR_mq_timedreceive_time64) || \
1189     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1190     defined(TARGET_NR_clock_nanosleep_time64) || \
1191     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1192     defined(TARGET_NR_utimensat) || \
1193     defined(TARGET_NR_utimensat_time64) || \
1194     defined(TARGET_NR_semtimedop_time64) || \
1195     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1196 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1197                                                  abi_ulong target_addr)
1198 {
1199     struct target__kernel_timespec *target_ts;
1200 
1201     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1202         return -TARGET_EFAULT;
1203     }
1204     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1205     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1206     /* in 32bit mode, this drops the padding */
1207     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1208     unlock_user_struct(target_ts, target_addr, 0);
1209     return 0;
1210 }
1211 #endif
1212 
1213 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1214                                                struct timespec *host_ts)
1215 {
1216     struct target_timespec *target_ts;
1217 
1218     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1219         return -TARGET_EFAULT;
1220     }
1221     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1222     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1223     unlock_user_struct(target_ts, target_addr, 1);
1224     return 0;
1225 }
1226 
1227 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1228                                                  struct timespec *host_ts)
1229 {
1230     struct target__kernel_timespec *target_ts;
1231 
1232     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1233         return -TARGET_EFAULT;
1234     }
1235     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1236     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1237     unlock_user_struct(target_ts, target_addr, 1);
1238     return 0;
1239 }
1240 
1241 #if defined(TARGET_NR_gettimeofday)
1242 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1243                                              struct timezone *tz)
1244 {
1245     struct target_timezone *target_tz;
1246 
1247     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1248         return -TARGET_EFAULT;
1249     }
1250 
1251     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1252     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1253 
1254     unlock_user_struct(target_tz, target_tz_addr, 1);
1255 
1256     return 0;
1257 }
1258 #endif
1259 
1260 #if defined(TARGET_NR_settimeofday)
1261 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1262                                                abi_ulong target_tz_addr)
1263 {
1264     struct target_timezone *target_tz;
1265 
1266     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1267         return -TARGET_EFAULT;
1268     }
1269 
1270     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1271     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1272 
1273     unlock_user_struct(target_tz, target_tz_addr, 0);
1274 
1275     return 0;
1276 }
1277 #endif
1278 
1279 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1280 #include <mqueue.h>
1281 
1282 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1283                                               abi_ulong target_mq_attr_addr)
1284 {
1285     struct target_mq_attr *target_mq_attr;
1286 
1287     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1288                           target_mq_attr_addr, 1))
1289         return -TARGET_EFAULT;
1290 
1291     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1292     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1293     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1294     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1295 
1296     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1297 
1298     return 0;
1299 }
1300 
1301 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1302                                             const struct mq_attr *attr)
1303 {
1304     struct target_mq_attr *target_mq_attr;
1305 
1306     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1307                           target_mq_attr_addr, 0))
1308         return -TARGET_EFAULT;
1309 
1310     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1311     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1312     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1313     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1314 
1315     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1316 
1317     return 0;
1318 }
1319 #endif
1320 
1321 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1322 /* do_select() must return target values and target errnos. */
1323 static abi_long do_select(int n,
1324                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1325                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1326 {
1327     fd_set rfds, wfds, efds;
1328     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1329     struct timeval tv;
1330     struct timespec ts, *ts_ptr;
1331     abi_long ret;
1332 
1333     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1334     if (ret) {
1335         return ret;
1336     }
1337     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1338     if (ret) {
1339         return ret;
1340     }
1341     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1342     if (ret) {
1343         return ret;
1344     }
1345 
1346     if (target_tv_addr) {
1347         if (copy_from_user_timeval(&tv, target_tv_addr))
1348             return -TARGET_EFAULT;
1349         ts.tv_sec = tv.tv_sec;
1350         ts.tv_nsec = tv.tv_usec * 1000;
1351         ts_ptr = &ts;
1352     } else {
1353         ts_ptr = NULL;
1354     }
1355 
1356     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1357                                   ts_ptr, NULL));
1358 
1359     if (!is_error(ret)) {
1360         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1361             return -TARGET_EFAULT;
1362         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1363             return -TARGET_EFAULT;
1364         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1365             return -TARGET_EFAULT;
1366 
1367         if (target_tv_addr) {
1368             tv.tv_sec = ts.tv_sec;
1369             tv.tv_usec = ts.tv_nsec / 1000;
1370             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1371                 return -TARGET_EFAULT;
1372             }
1373         }
1374     }
1375 
1376     return ret;
1377 }
1378 
1379 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1380 static abi_long do_old_select(abi_ulong arg1)
1381 {
1382     struct target_sel_arg_struct *sel;
1383     abi_ulong inp, outp, exp, tvp;
1384     long nsel;
1385 
1386     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1387         return -TARGET_EFAULT;
1388     }
1389 
1390     nsel = tswapal(sel->n);
1391     inp = tswapal(sel->inp);
1392     outp = tswapal(sel->outp);
1393     exp = tswapal(sel->exp);
1394     tvp = tswapal(sel->tvp);
1395 
1396     unlock_user_struct(sel, arg1, 0);
1397 
1398     return do_select(nsel, inp, outp, exp, tvp);
1399 }
1400 #endif
1401 #endif
1402 
1403 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1404 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1405                             abi_long arg4, abi_long arg5, abi_long arg6,
1406                             bool time64)
1407 {
1408     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1409     fd_set rfds, wfds, efds;
1410     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1411     struct timespec ts, *ts_ptr;
1412     abi_long ret;
1413 
1414     /*
1415      * The 6th arg is actually two args smashed together,
1416      * so we cannot use the C library.
1417      */
1418     struct {
1419         sigset_t *set;
1420         size_t size;
1421     } sig, *sig_ptr;
1422 
1423     abi_ulong arg_sigset, arg_sigsize, *arg7;
1424 
1425     n = arg1;
1426     rfd_addr = arg2;
1427     wfd_addr = arg3;
1428     efd_addr = arg4;
1429     ts_addr = arg5;
1430 
1431     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1432     if (ret) {
1433         return ret;
1434     }
1435     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1436     if (ret) {
1437         return ret;
1438     }
1439     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1440     if (ret) {
1441         return ret;
1442     }
1443 
1444     /*
1445      * This takes a timespec, and not a timeval, so we cannot
1446      * use the do_select() helper ...
1447      */
1448     if (ts_addr) {
1449         if (time64) {
1450             if (target_to_host_timespec64(&ts, ts_addr)) {
1451                 return -TARGET_EFAULT;
1452             }
1453         } else {
1454             if (target_to_host_timespec(&ts, ts_addr)) {
1455                 return -TARGET_EFAULT;
1456             }
1457         }
1458             ts_ptr = &ts;
1459     } else {
1460         ts_ptr = NULL;
1461     }
1462 
1463     /* Extract the two packed args for the sigset */
1464     sig_ptr = NULL;
1465     if (arg6) {
1466         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1467         if (!arg7) {
1468             return -TARGET_EFAULT;
1469         }
1470         arg_sigset = tswapal(arg7[0]);
1471         arg_sigsize = tswapal(arg7[1]);
1472         unlock_user(arg7, arg6, 0);
1473 
1474         if (arg_sigset) {
1475             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1476             if (ret != 0) {
1477                 return ret;
1478             }
1479             sig_ptr = &sig;
1480             sig.size = SIGSET_T_SIZE;
1481         }
1482     }
1483 
1484     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1485                                   ts_ptr, sig_ptr));
1486 
1487     if (sig_ptr) {
1488         finish_sigsuspend_mask(ret);
1489     }
1490 
1491     if (!is_error(ret)) {
1492         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1493             return -TARGET_EFAULT;
1494         }
1495         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1496             return -TARGET_EFAULT;
1497         }
1498         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1499             return -TARGET_EFAULT;
1500         }
1501         if (time64) {
1502             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1503                 return -TARGET_EFAULT;
1504             }
1505         } else {
1506             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1507                 return -TARGET_EFAULT;
1508             }
1509         }
1510     }
1511     return ret;
1512 }
1513 #endif
1514 
1515 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1516     defined(TARGET_NR_ppoll_time64)
1517 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1518                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1519 {
1520     struct target_pollfd *target_pfd;
1521     unsigned int nfds = arg2;
1522     struct pollfd *pfd;
1523     unsigned int i;
1524     abi_long ret;
1525 
1526     pfd = NULL;
1527     target_pfd = NULL;
1528     if (nfds) {
1529         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1530             return -TARGET_EINVAL;
1531         }
1532         target_pfd = lock_user(VERIFY_WRITE, arg1,
1533                                sizeof(struct target_pollfd) * nfds, 1);
1534         if (!target_pfd) {
1535             return -TARGET_EFAULT;
1536         }
1537 
1538         pfd = alloca(sizeof(struct pollfd) * nfds);
1539         for (i = 0; i < nfds; i++) {
1540             pfd[i].fd = tswap32(target_pfd[i].fd);
1541             pfd[i].events = tswap16(target_pfd[i].events);
1542         }
1543     }
1544     if (ppoll) {
1545         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1546         sigset_t *set = NULL;
1547 
1548         if (arg3) {
1549             if (time64) {
1550                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1551                     unlock_user(target_pfd, arg1, 0);
1552                     return -TARGET_EFAULT;
1553                 }
1554             } else {
1555                 if (target_to_host_timespec(timeout_ts, arg3)) {
1556                     unlock_user(target_pfd, arg1, 0);
1557                     return -TARGET_EFAULT;
1558                 }
1559             }
1560         } else {
1561             timeout_ts = NULL;
1562         }
1563 
1564         if (arg4) {
1565             ret = process_sigsuspend_mask(&set, arg4, arg5);
1566             if (ret != 0) {
1567                 unlock_user(target_pfd, arg1, 0);
1568                 return ret;
1569             }
1570         }
1571 
1572         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1573                                    set, SIGSET_T_SIZE));
1574 
1575         if (set) {
1576             finish_sigsuspend_mask(ret);
1577         }
1578         if (!is_error(ret) && arg3) {
1579             if (time64) {
1580                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1581                     return -TARGET_EFAULT;
1582                 }
1583             } else {
1584                 if (host_to_target_timespec(arg3, timeout_ts)) {
1585                     return -TARGET_EFAULT;
1586                 }
1587             }
1588         }
1589     } else {
1590           struct timespec ts, *pts;
1591 
1592           if (arg3 >= 0) {
1593               /* Convert ms to secs, ns */
1594               ts.tv_sec = arg3 / 1000;
1595               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1596               pts = &ts;
1597           } else {
1598               /* -ve poll() timeout means "infinite" */
1599               pts = NULL;
1600           }
1601           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1602     }
1603 
1604     if (!is_error(ret)) {
1605         for (i = 0; i < nfds; i++) {
1606             target_pfd[i].revents = tswap16(pfd[i].revents);
1607         }
1608     }
1609     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1610     return ret;
1611 }
1612 #endif
1613 
1614 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1615                         int flags, int is_pipe2)
1616 {
1617     int host_pipe[2];
1618     abi_long ret;
1619     ret = pipe2(host_pipe, flags);
1620 
1621     if (is_error(ret))
1622         return get_errno(ret);
1623 
1624     /* Several targets have special calling conventions for the original
1625        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1626     if (!is_pipe2) {
1627 #if defined(TARGET_ALPHA)
1628         cpu_env->ir[IR_A4] = host_pipe[1];
1629         return host_pipe[0];
1630 #elif defined(TARGET_MIPS)
1631         cpu_env->active_tc.gpr[3] = host_pipe[1];
1632         return host_pipe[0];
1633 #elif defined(TARGET_SH4)
1634         cpu_env->gregs[1] = host_pipe[1];
1635         return host_pipe[0];
1636 #elif defined(TARGET_SPARC)
1637         cpu_env->regwptr[1] = host_pipe[1];
1638         return host_pipe[0];
1639 #endif
1640     }
1641 
1642     if (put_user_s32(host_pipe[0], pipedes)
1643         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1644         return -TARGET_EFAULT;
1645     return get_errno(ret);
1646 }
1647 
1648 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1649                                                abi_ulong target_addr,
1650                                                socklen_t len)
1651 {
1652     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1653     sa_family_t sa_family;
1654     struct target_sockaddr *target_saddr;
1655 
1656     if (fd_trans_target_to_host_addr(fd)) {
1657         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1658     }
1659 
1660     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1661     if (!target_saddr)
1662         return -TARGET_EFAULT;
1663 
1664     sa_family = tswap16(target_saddr->sa_family);
1665 
1666     /* Oops. The caller might send a incomplete sun_path; sun_path
1667      * must be terminated by \0 (see the manual page), but
1668      * unfortunately it is quite common to specify sockaddr_un
1669      * length as "strlen(x->sun_path)" while it should be
1670      * "strlen(...) + 1". We'll fix that here if needed.
1671      * Linux kernel has a similar feature.
1672      */
1673 
1674     if (sa_family == AF_UNIX) {
1675         if (len < unix_maxlen && len > 0) {
1676             char *cp = (char*)target_saddr;
1677 
1678             if ( cp[len-1] && !cp[len] )
1679                 len++;
1680         }
1681         if (len > unix_maxlen)
1682             len = unix_maxlen;
1683     }
1684 
1685     memcpy(addr, target_saddr, len);
1686     addr->sa_family = sa_family;
1687     if (sa_family == AF_NETLINK) {
1688         struct sockaddr_nl *nladdr;
1689 
1690         nladdr = (struct sockaddr_nl *)addr;
1691         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1692         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1693     } else if (sa_family == AF_PACKET) {
1694 	struct target_sockaddr_ll *lladdr;
1695 
1696 	lladdr = (struct target_sockaddr_ll *)addr;
1697 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1698 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1699     } else if (sa_family == AF_INET6) {
1700         struct sockaddr_in6 *in6addr;
1701 
1702         in6addr = (struct sockaddr_in6 *)addr;
1703         in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
1704     }
1705     unlock_user(target_saddr, target_addr, 0);
1706 
1707     return 0;
1708 }
1709 
1710 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1711                                                struct sockaddr *addr,
1712                                                socklen_t len)
1713 {
1714     struct target_sockaddr *target_saddr;
1715 
1716     if (len == 0) {
1717         return 0;
1718     }
1719     assert(addr);
1720 
1721     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1722     if (!target_saddr)
1723         return -TARGET_EFAULT;
1724     memcpy(target_saddr, addr, len);
1725     if (len >= offsetof(struct target_sockaddr, sa_family) +
1726         sizeof(target_saddr->sa_family)) {
1727         target_saddr->sa_family = tswap16(addr->sa_family);
1728     }
1729     if (addr->sa_family == AF_NETLINK &&
1730         len >= sizeof(struct target_sockaddr_nl)) {
1731         struct target_sockaddr_nl *target_nl =
1732                (struct target_sockaddr_nl *)target_saddr;
1733         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1734         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1735     } else if (addr->sa_family == AF_PACKET) {
1736         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1737         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1738         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1739     } else if (addr->sa_family == AF_INET6 &&
1740                len >= sizeof(struct target_sockaddr_in6)) {
1741         struct target_sockaddr_in6 *target_in6 =
1742                (struct target_sockaddr_in6 *)target_saddr;
1743         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1744     }
1745     unlock_user(target_saddr, target_addr, len);
1746 
1747     return 0;
1748 }
1749 
1750 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1751                                            struct target_msghdr *target_msgh)
1752 {
1753     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1754     abi_long msg_controllen;
1755     abi_ulong target_cmsg_addr;
1756     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1757     socklen_t space = 0;
1758 
1759     msg_controllen = tswapal(target_msgh->msg_controllen);
1760     if (msg_controllen < sizeof (struct target_cmsghdr))
1761         goto the_end;
1762     target_cmsg_addr = tswapal(target_msgh->msg_control);
1763     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1764     target_cmsg_start = target_cmsg;
1765     if (!target_cmsg)
1766         return -TARGET_EFAULT;
1767 
1768     while (cmsg && target_cmsg) {
1769         void *data = CMSG_DATA(cmsg);
1770         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1771 
1772         int len = tswapal(target_cmsg->cmsg_len)
1773             - sizeof(struct target_cmsghdr);
1774 
1775         space += CMSG_SPACE(len);
1776         if (space > msgh->msg_controllen) {
1777             space -= CMSG_SPACE(len);
1778             /* This is a QEMU bug, since we allocated the payload
1779              * area ourselves (unlike overflow in host-to-target
1780              * conversion, which is just the guest giving us a buffer
1781              * that's too small). It can't happen for the payload types
1782              * we currently support; if it becomes an issue in future
1783              * we would need to improve our allocation strategy to
1784              * something more intelligent than "twice the size of the
1785              * target buffer we're reading from".
1786              */
1787             qemu_log_mask(LOG_UNIMP,
1788                           ("Unsupported ancillary data %d/%d: "
1789                            "unhandled msg size\n"),
1790                           tswap32(target_cmsg->cmsg_level),
1791                           tswap32(target_cmsg->cmsg_type));
1792             break;
1793         }
1794 
1795         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1796             cmsg->cmsg_level = SOL_SOCKET;
1797         } else {
1798             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1799         }
1800         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1801         cmsg->cmsg_len = CMSG_LEN(len);
1802 
1803         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1804             int *fd = (int *)data;
1805             int *target_fd = (int *)target_data;
1806             int i, numfds = len / sizeof(int);
1807 
1808             for (i = 0; i < numfds; i++) {
1809                 __get_user(fd[i], target_fd + i);
1810             }
1811         } else if (cmsg->cmsg_level == SOL_SOCKET
1812                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1813             struct ucred *cred = (struct ucred *)data;
1814             struct target_ucred *target_cred =
1815                 (struct target_ucred *)target_data;
1816 
1817             __get_user(cred->pid, &target_cred->pid);
1818             __get_user(cred->uid, &target_cred->uid);
1819             __get_user(cred->gid, &target_cred->gid);
1820         } else if (cmsg->cmsg_level == SOL_ALG) {
1821             uint32_t *dst = (uint32_t *)data;
1822 
1823             memcpy(dst, target_data, len);
1824             /* fix endianness of first 32-bit word */
1825             if (len >= sizeof(uint32_t)) {
1826                 *dst = tswap32(*dst);
1827             }
1828         } else {
1829             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1830                           cmsg->cmsg_level, cmsg->cmsg_type);
1831             memcpy(data, target_data, len);
1832         }
1833 
1834         cmsg = CMSG_NXTHDR(msgh, cmsg);
1835         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1836                                          target_cmsg_start);
1837     }
1838     unlock_user(target_cmsg, target_cmsg_addr, 0);
1839  the_end:
1840     msgh->msg_controllen = space;
1841     return 0;
1842 }
1843 
1844 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1845                                            struct msghdr *msgh)
1846 {
1847     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1848     abi_long msg_controllen;
1849     abi_ulong target_cmsg_addr;
1850     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1851     socklen_t space = 0;
1852 
1853     msg_controllen = tswapal(target_msgh->msg_controllen);
1854     if (msg_controllen < sizeof (struct target_cmsghdr))
1855         goto the_end;
1856     target_cmsg_addr = tswapal(target_msgh->msg_control);
1857     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1858     target_cmsg_start = target_cmsg;
1859     if (!target_cmsg)
1860         return -TARGET_EFAULT;
1861 
1862     while (cmsg && target_cmsg) {
1863         void *data = CMSG_DATA(cmsg);
1864         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1865 
1866         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1867         int tgt_len, tgt_space;
1868 
1869         /* We never copy a half-header but may copy half-data;
1870          * this is Linux's behaviour in put_cmsg(). Note that
1871          * truncation here is a guest problem (which we report
1872          * to the guest via the CTRUNC bit), unlike truncation
1873          * in target_to_host_cmsg, which is a QEMU bug.
1874          */
1875         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1876             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1877             break;
1878         }
1879 
1880         if (cmsg->cmsg_level == SOL_SOCKET) {
1881             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1882         } else {
1883             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1884         }
1885         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1886 
1887         /* Payload types which need a different size of payload on
1888          * the target must adjust tgt_len here.
1889          */
1890         tgt_len = len;
1891         switch (cmsg->cmsg_level) {
1892         case SOL_SOCKET:
1893             switch (cmsg->cmsg_type) {
1894             case SO_TIMESTAMP:
1895                 tgt_len = sizeof(struct target_timeval);
1896                 break;
1897             default:
1898                 break;
1899             }
1900             break;
1901         default:
1902             break;
1903         }
1904 
1905         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1906             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1907             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1908         }
1909 
1910         /* We must now copy-and-convert len bytes of payload
1911          * into tgt_len bytes of destination space. Bear in mind
1912          * that in both source and destination we may be dealing
1913          * with a truncated value!
1914          */
1915         switch (cmsg->cmsg_level) {
1916         case SOL_SOCKET:
1917             switch (cmsg->cmsg_type) {
1918             case SCM_RIGHTS:
1919             {
1920                 int *fd = (int *)data;
1921                 int *target_fd = (int *)target_data;
1922                 int i, numfds = tgt_len / sizeof(int);
1923 
1924                 for (i = 0; i < numfds; i++) {
1925                     __put_user(fd[i], target_fd + i);
1926                 }
1927                 break;
1928             }
1929             case SO_TIMESTAMP:
1930             {
1931                 struct timeval *tv = (struct timeval *)data;
1932                 struct target_timeval *target_tv =
1933                     (struct target_timeval *)target_data;
1934 
1935                 if (len != sizeof(struct timeval) ||
1936                     tgt_len != sizeof(struct target_timeval)) {
1937                     goto unimplemented;
1938                 }
1939 
1940                 /* copy struct timeval to target */
1941                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1942                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1943                 break;
1944             }
1945             case SCM_CREDENTIALS:
1946             {
1947                 struct ucred *cred = (struct ucred *)data;
1948                 struct target_ucred *target_cred =
1949                     (struct target_ucred *)target_data;
1950 
1951                 __put_user(cred->pid, &target_cred->pid);
1952                 __put_user(cred->uid, &target_cred->uid);
1953                 __put_user(cred->gid, &target_cred->gid);
1954                 break;
1955             }
1956             default:
1957                 goto unimplemented;
1958             }
1959             break;
1960 
1961         case SOL_IP:
1962             switch (cmsg->cmsg_type) {
1963             case IP_TTL:
1964             {
1965                 uint32_t *v = (uint32_t *)data;
1966                 uint32_t *t_int = (uint32_t *)target_data;
1967 
1968                 if (len != sizeof(uint32_t) ||
1969                     tgt_len != sizeof(uint32_t)) {
1970                     goto unimplemented;
1971                 }
1972                 __put_user(*v, t_int);
1973                 break;
1974             }
1975             case IP_RECVERR:
1976             {
1977                 struct errhdr_t {
1978                    struct sock_extended_err ee;
1979                    struct sockaddr_in offender;
1980                 };
1981                 struct errhdr_t *errh = (struct errhdr_t *)data;
1982                 struct errhdr_t *target_errh =
1983                     (struct errhdr_t *)target_data;
1984 
1985                 if (len != sizeof(struct errhdr_t) ||
1986                     tgt_len != sizeof(struct errhdr_t)) {
1987                     goto unimplemented;
1988                 }
1989                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1990                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1991                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1992                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1993                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1994                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1995                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1996                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1997                     (void *) &errh->offender, sizeof(errh->offender));
1998                 break;
1999             }
2000             default:
2001                 goto unimplemented;
2002             }
2003             break;
2004 
2005         case SOL_IPV6:
2006             switch (cmsg->cmsg_type) {
2007             case IPV6_HOPLIMIT:
2008             {
2009                 uint32_t *v = (uint32_t *)data;
2010                 uint32_t *t_int = (uint32_t *)target_data;
2011 
2012                 if (len != sizeof(uint32_t) ||
2013                     tgt_len != sizeof(uint32_t)) {
2014                     goto unimplemented;
2015                 }
2016                 __put_user(*v, t_int);
2017                 break;
2018             }
2019             case IPV6_RECVERR:
2020             {
2021                 struct errhdr6_t {
2022                    struct sock_extended_err ee;
2023                    struct sockaddr_in6 offender;
2024                 };
2025                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2026                 struct errhdr6_t *target_errh =
2027                     (struct errhdr6_t *)target_data;
2028 
2029                 if (len != sizeof(struct errhdr6_t) ||
2030                     tgt_len != sizeof(struct errhdr6_t)) {
2031                     goto unimplemented;
2032                 }
2033                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2034                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2035                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2036                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2037                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2038                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2039                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2040                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2041                     (void *) &errh->offender, sizeof(errh->offender));
2042                 break;
2043             }
2044             default:
2045                 goto unimplemented;
2046             }
2047             break;
2048 
2049         default:
2050         unimplemented:
2051             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2052                           cmsg->cmsg_level, cmsg->cmsg_type);
2053             memcpy(target_data, data, MIN(len, tgt_len));
2054             if (tgt_len > len) {
2055                 memset(target_data + len, 0, tgt_len - len);
2056             }
2057         }
2058 
2059         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2060         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2061         if (msg_controllen < tgt_space) {
2062             tgt_space = msg_controllen;
2063         }
2064         msg_controllen -= tgt_space;
2065         space += tgt_space;
2066         cmsg = CMSG_NXTHDR(msgh, cmsg);
2067         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2068                                          target_cmsg_start);
2069     }
2070     unlock_user(target_cmsg, target_cmsg_addr, space);
2071  the_end:
2072     target_msgh->msg_controllen = tswapal(space);
2073     return 0;
2074 }
2075 
2076 /* do_setsockopt() Must return target values and target errnos. */
2077 static abi_long do_setsockopt(int sockfd, int level, int optname,
2078                               abi_ulong optval_addr, socklen_t optlen)
2079 {
2080     abi_long ret;
2081     int val;
2082 
2083     switch(level) {
2084     case SOL_TCP:
2085     case SOL_UDP:
2086         /* TCP and UDP options all take an 'int' value.  */
2087         if (optlen < sizeof(uint32_t))
2088             return -TARGET_EINVAL;
2089 
2090         if (get_user_u32(val, optval_addr))
2091             return -TARGET_EFAULT;
2092         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2093         break;
2094     case SOL_IP:
2095         switch(optname) {
2096         case IP_TOS:
2097         case IP_TTL:
2098         case IP_HDRINCL:
2099         case IP_ROUTER_ALERT:
2100         case IP_RECVOPTS:
2101         case IP_RETOPTS:
2102         case IP_PKTINFO:
2103         case IP_MTU_DISCOVER:
2104         case IP_RECVERR:
2105         case IP_RECVTTL:
2106         case IP_RECVTOS:
2107 #ifdef IP_FREEBIND
2108         case IP_FREEBIND:
2109 #endif
2110         case IP_MULTICAST_TTL:
2111         case IP_MULTICAST_LOOP:
2112             val = 0;
2113             if (optlen >= sizeof(uint32_t)) {
2114                 if (get_user_u32(val, optval_addr))
2115                     return -TARGET_EFAULT;
2116             } else if (optlen >= 1) {
2117                 if (get_user_u8(val, optval_addr))
2118                     return -TARGET_EFAULT;
2119             }
2120             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2121             break;
2122         case IP_ADD_MEMBERSHIP:
2123         case IP_DROP_MEMBERSHIP:
2124         {
2125             struct ip_mreqn ip_mreq;
2126             struct target_ip_mreqn *target_smreqn;
2127 
2128             QEMU_BUILD_BUG_ON(sizeof(struct ip_mreq) !=
2129                               sizeof(struct target_ip_mreq));
2130 
2131             if (optlen < sizeof (struct target_ip_mreq) ||
2132                 optlen > sizeof (struct target_ip_mreqn)) {
2133                 return -TARGET_EINVAL;
2134             }
2135 
2136             target_smreqn = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2137             if (!target_smreqn) {
2138                 return -TARGET_EFAULT;
2139             }
2140             ip_mreq.imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
2141             ip_mreq.imr_address.s_addr = target_smreqn->imr_address.s_addr;
2142             if (optlen == sizeof(struct target_ip_mreqn)) {
2143                 ip_mreq.imr_ifindex = tswapal(target_smreqn->imr_ifindex);
2144                 optlen = sizeof(struct ip_mreqn);
2145             }
2146             unlock_user(target_smreqn, optval_addr, 0);
2147 
2148             ret = get_errno(setsockopt(sockfd, level, optname, &ip_mreq, optlen));
2149             break;
2150         }
2151         case IP_BLOCK_SOURCE:
2152         case IP_UNBLOCK_SOURCE:
2153         case IP_ADD_SOURCE_MEMBERSHIP:
2154         case IP_DROP_SOURCE_MEMBERSHIP:
2155         {
2156             struct ip_mreq_source *ip_mreq_source;
2157 
2158             if (optlen != sizeof (struct target_ip_mreq_source))
2159                 return -TARGET_EINVAL;
2160 
2161             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2162             if (!ip_mreq_source) {
2163                 return -TARGET_EFAULT;
2164             }
2165             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2166             unlock_user (ip_mreq_source, optval_addr, 0);
2167             break;
2168         }
2169         default:
2170             goto unimplemented;
2171         }
2172         break;
2173     case SOL_IPV6:
2174         switch (optname) {
2175         case IPV6_MTU_DISCOVER:
2176         case IPV6_MTU:
2177         case IPV6_V6ONLY:
2178         case IPV6_RECVPKTINFO:
2179         case IPV6_UNICAST_HOPS:
2180         case IPV6_MULTICAST_HOPS:
2181         case IPV6_MULTICAST_LOOP:
2182         case IPV6_RECVERR:
2183         case IPV6_RECVHOPLIMIT:
2184         case IPV6_2292HOPLIMIT:
2185         case IPV6_CHECKSUM:
2186         case IPV6_ADDRFORM:
2187         case IPV6_2292PKTINFO:
2188         case IPV6_RECVTCLASS:
2189         case IPV6_RECVRTHDR:
2190         case IPV6_2292RTHDR:
2191         case IPV6_RECVHOPOPTS:
2192         case IPV6_2292HOPOPTS:
2193         case IPV6_RECVDSTOPTS:
2194         case IPV6_2292DSTOPTS:
2195         case IPV6_TCLASS:
2196         case IPV6_ADDR_PREFERENCES:
2197 #ifdef IPV6_RECVPATHMTU
2198         case IPV6_RECVPATHMTU:
2199 #endif
2200 #ifdef IPV6_TRANSPARENT
2201         case IPV6_TRANSPARENT:
2202 #endif
2203 #ifdef IPV6_FREEBIND
2204         case IPV6_FREEBIND:
2205 #endif
2206 #ifdef IPV6_RECVORIGDSTADDR
2207         case IPV6_RECVORIGDSTADDR:
2208 #endif
2209             val = 0;
2210             if (optlen < sizeof(uint32_t)) {
2211                 return -TARGET_EINVAL;
2212             }
2213             if (get_user_u32(val, optval_addr)) {
2214                 return -TARGET_EFAULT;
2215             }
2216             ret = get_errno(setsockopt(sockfd, level, optname,
2217                                        &val, sizeof(val)));
2218             break;
2219         case IPV6_PKTINFO:
2220         {
2221             struct in6_pktinfo pki;
2222 
2223             if (optlen < sizeof(pki)) {
2224                 return -TARGET_EINVAL;
2225             }
2226 
2227             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2228                 return -TARGET_EFAULT;
2229             }
2230 
2231             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2232 
2233             ret = get_errno(setsockopt(sockfd, level, optname,
2234                                        &pki, sizeof(pki)));
2235             break;
2236         }
2237         case IPV6_ADD_MEMBERSHIP:
2238         case IPV6_DROP_MEMBERSHIP:
2239         {
2240             struct ipv6_mreq ipv6mreq;
2241 
2242             if (optlen < sizeof(ipv6mreq)) {
2243                 return -TARGET_EINVAL;
2244             }
2245 
2246             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2247                 return -TARGET_EFAULT;
2248             }
2249 
2250             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2251 
2252             ret = get_errno(setsockopt(sockfd, level, optname,
2253                                        &ipv6mreq, sizeof(ipv6mreq)));
2254             break;
2255         }
2256         default:
2257             goto unimplemented;
2258         }
2259         break;
2260     case SOL_ICMPV6:
2261         switch (optname) {
2262         case ICMPV6_FILTER:
2263         {
2264             struct icmp6_filter icmp6f;
2265 
2266             if (optlen > sizeof(icmp6f)) {
2267                 optlen = sizeof(icmp6f);
2268             }
2269 
2270             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2271                 return -TARGET_EFAULT;
2272             }
2273 
2274             for (val = 0; val < 8; val++) {
2275                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2276             }
2277 
2278             ret = get_errno(setsockopt(sockfd, level, optname,
2279                                        &icmp6f, optlen));
2280             break;
2281         }
2282         default:
2283             goto unimplemented;
2284         }
2285         break;
2286     case SOL_RAW:
2287         switch (optname) {
2288         case ICMP_FILTER:
2289         case IPV6_CHECKSUM:
2290             /* those take an u32 value */
2291             if (optlen < sizeof(uint32_t)) {
2292                 return -TARGET_EINVAL;
2293             }
2294 
2295             if (get_user_u32(val, optval_addr)) {
2296                 return -TARGET_EFAULT;
2297             }
2298             ret = get_errno(setsockopt(sockfd, level, optname,
2299                                        &val, sizeof(val)));
2300             break;
2301 
2302         default:
2303             goto unimplemented;
2304         }
2305         break;
2306 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2307     case SOL_ALG:
2308         switch (optname) {
2309         case ALG_SET_KEY:
2310         {
2311             char *alg_key = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2312             if (!alg_key) {
2313                 return -TARGET_EFAULT;
2314             }
2315             ret = get_errno(setsockopt(sockfd, level, optname,
2316                                        alg_key, optlen));
2317             unlock_user(alg_key, optval_addr, optlen);
2318             break;
2319         }
2320         case ALG_SET_AEAD_AUTHSIZE:
2321         {
2322             ret = get_errno(setsockopt(sockfd, level, optname,
2323                                        NULL, optlen));
2324             break;
2325         }
2326         default:
2327             goto unimplemented;
2328         }
2329         break;
2330 #endif
2331     case TARGET_SOL_SOCKET:
2332         switch (optname) {
2333         case TARGET_SO_RCVTIMEO:
2334         case TARGET_SO_SNDTIMEO:
2335         {
2336                 struct timeval tv;
2337 
2338                 if (optlen != sizeof(struct target_timeval)) {
2339                     return -TARGET_EINVAL;
2340                 }
2341 
2342                 if (copy_from_user_timeval(&tv, optval_addr)) {
2343                     return -TARGET_EFAULT;
2344                 }
2345 
2346                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2347                                 optname == TARGET_SO_RCVTIMEO ?
2348                                     SO_RCVTIMEO : SO_SNDTIMEO,
2349                                 &tv, sizeof(tv)));
2350                 return ret;
2351         }
2352         case TARGET_SO_ATTACH_FILTER:
2353         {
2354                 struct target_sock_fprog *tfprog;
2355                 struct target_sock_filter *tfilter;
2356                 struct sock_fprog fprog;
2357                 struct sock_filter *filter;
2358                 int i;
2359 
2360                 if (optlen != sizeof(*tfprog)) {
2361                     return -TARGET_EINVAL;
2362                 }
2363                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2364                     return -TARGET_EFAULT;
2365                 }
2366                 if (!lock_user_struct(VERIFY_READ, tfilter,
2367                                       tswapal(tfprog->filter), 0)) {
2368                     unlock_user_struct(tfprog, optval_addr, 1);
2369                     return -TARGET_EFAULT;
2370                 }
2371 
2372                 fprog.len = tswap16(tfprog->len);
2373                 filter = g_try_new(struct sock_filter, fprog.len);
2374                 if (filter == NULL) {
2375                     unlock_user_struct(tfilter, tfprog->filter, 1);
2376                     unlock_user_struct(tfprog, optval_addr, 1);
2377                     return -TARGET_ENOMEM;
2378                 }
2379                 for (i = 0; i < fprog.len; i++) {
2380                     filter[i].code = tswap16(tfilter[i].code);
2381                     filter[i].jt = tfilter[i].jt;
2382                     filter[i].jf = tfilter[i].jf;
2383                     filter[i].k = tswap32(tfilter[i].k);
2384                 }
2385                 fprog.filter = filter;
2386 
2387                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2388                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2389                 g_free(filter);
2390 
2391                 unlock_user_struct(tfilter, tfprog->filter, 1);
2392                 unlock_user_struct(tfprog, optval_addr, 1);
2393                 return ret;
2394         }
2395 	case TARGET_SO_BINDTODEVICE:
2396 	{
2397 		char *dev_ifname, *addr_ifname;
2398 
2399 		if (optlen > IFNAMSIZ - 1) {
2400 		    optlen = IFNAMSIZ - 1;
2401 		}
2402 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2403 		if (!dev_ifname) {
2404 		    return -TARGET_EFAULT;
2405 		}
2406 		optname = SO_BINDTODEVICE;
2407 		addr_ifname = alloca(IFNAMSIZ);
2408 		memcpy(addr_ifname, dev_ifname, optlen);
2409 		addr_ifname[optlen] = 0;
2410 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2411                                            addr_ifname, optlen));
2412 		unlock_user (dev_ifname, optval_addr, 0);
2413 		return ret;
2414 	}
2415         case TARGET_SO_LINGER:
2416         {
2417                 struct linger lg;
2418                 struct target_linger *tlg;
2419 
2420                 if (optlen != sizeof(struct target_linger)) {
2421                     return -TARGET_EINVAL;
2422                 }
2423                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2424                     return -TARGET_EFAULT;
2425                 }
2426                 __get_user(lg.l_onoff, &tlg->l_onoff);
2427                 __get_user(lg.l_linger, &tlg->l_linger);
2428                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2429                                 &lg, sizeof(lg)));
2430                 unlock_user_struct(tlg, optval_addr, 0);
2431                 return ret;
2432         }
2433             /* Options with 'int' argument.  */
2434         case TARGET_SO_DEBUG:
2435 		optname = SO_DEBUG;
2436 		break;
2437         case TARGET_SO_REUSEADDR:
2438 		optname = SO_REUSEADDR;
2439 		break;
2440 #ifdef SO_REUSEPORT
2441         case TARGET_SO_REUSEPORT:
2442                 optname = SO_REUSEPORT;
2443                 break;
2444 #endif
2445         case TARGET_SO_TYPE:
2446 		optname = SO_TYPE;
2447 		break;
2448         case TARGET_SO_ERROR:
2449 		optname = SO_ERROR;
2450 		break;
2451         case TARGET_SO_DONTROUTE:
2452 		optname = SO_DONTROUTE;
2453 		break;
2454         case TARGET_SO_BROADCAST:
2455 		optname = SO_BROADCAST;
2456 		break;
2457         case TARGET_SO_SNDBUF:
2458 		optname = SO_SNDBUF;
2459 		break;
2460         case TARGET_SO_SNDBUFFORCE:
2461                 optname = SO_SNDBUFFORCE;
2462                 break;
2463         case TARGET_SO_RCVBUF:
2464 		optname = SO_RCVBUF;
2465 		break;
2466         case TARGET_SO_RCVBUFFORCE:
2467                 optname = SO_RCVBUFFORCE;
2468                 break;
2469         case TARGET_SO_KEEPALIVE:
2470 		optname = SO_KEEPALIVE;
2471 		break;
2472         case TARGET_SO_OOBINLINE:
2473 		optname = SO_OOBINLINE;
2474 		break;
2475         case TARGET_SO_NO_CHECK:
2476 		optname = SO_NO_CHECK;
2477 		break;
2478         case TARGET_SO_PRIORITY:
2479 		optname = SO_PRIORITY;
2480 		break;
2481 #ifdef SO_BSDCOMPAT
2482         case TARGET_SO_BSDCOMPAT:
2483 		optname = SO_BSDCOMPAT;
2484 		break;
2485 #endif
2486         case TARGET_SO_PASSCRED:
2487 		optname = SO_PASSCRED;
2488 		break;
2489         case TARGET_SO_PASSSEC:
2490                 optname = SO_PASSSEC;
2491                 break;
2492         case TARGET_SO_TIMESTAMP:
2493 		optname = SO_TIMESTAMP;
2494 		break;
2495         case TARGET_SO_RCVLOWAT:
2496 		optname = SO_RCVLOWAT;
2497 		break;
2498         default:
2499             goto unimplemented;
2500         }
2501 	if (optlen < sizeof(uint32_t))
2502             return -TARGET_EINVAL;
2503 
2504 	if (get_user_u32(val, optval_addr))
2505             return -TARGET_EFAULT;
2506 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2507         break;
2508 #ifdef SOL_NETLINK
2509     case SOL_NETLINK:
2510         switch (optname) {
2511         case NETLINK_PKTINFO:
2512         case NETLINK_ADD_MEMBERSHIP:
2513         case NETLINK_DROP_MEMBERSHIP:
2514         case NETLINK_BROADCAST_ERROR:
2515         case NETLINK_NO_ENOBUFS:
2516 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2517         case NETLINK_LISTEN_ALL_NSID:
2518         case NETLINK_CAP_ACK:
2519 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2520 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2521         case NETLINK_EXT_ACK:
2522 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2523 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2524         case NETLINK_GET_STRICT_CHK:
2525 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2526             break;
2527         default:
2528             goto unimplemented;
2529         }
2530         val = 0;
2531         if (optlen < sizeof(uint32_t)) {
2532             return -TARGET_EINVAL;
2533         }
2534         if (get_user_u32(val, optval_addr)) {
2535             return -TARGET_EFAULT;
2536         }
2537         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2538                                    sizeof(val)));
2539         break;
2540 #endif /* SOL_NETLINK */
2541     default:
2542     unimplemented:
2543         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2544                       level, optname);
2545         ret = -TARGET_ENOPROTOOPT;
2546     }
2547     return ret;
2548 }
2549 
2550 /* do_getsockopt() Must return target values and target errnos. */
2551 static abi_long do_getsockopt(int sockfd, int level, int optname,
2552                               abi_ulong optval_addr, abi_ulong optlen)
2553 {
2554     abi_long ret;
2555     int len, val;
2556     socklen_t lv;
2557 
2558     switch(level) {
2559     case TARGET_SOL_SOCKET:
2560         level = SOL_SOCKET;
2561         switch (optname) {
2562         /* These don't just return a single integer */
2563         case TARGET_SO_PEERNAME:
2564             goto unimplemented;
2565         case TARGET_SO_RCVTIMEO: {
2566             struct timeval tv;
2567             socklen_t tvlen;
2568 
2569             optname = SO_RCVTIMEO;
2570 
2571 get_timeout:
2572             if (get_user_u32(len, optlen)) {
2573                 return -TARGET_EFAULT;
2574             }
2575             if (len < 0) {
2576                 return -TARGET_EINVAL;
2577             }
2578 
2579             tvlen = sizeof(tv);
2580             ret = get_errno(getsockopt(sockfd, level, optname,
2581                                        &tv, &tvlen));
2582             if (ret < 0) {
2583                 return ret;
2584             }
2585             if (len > sizeof(struct target_timeval)) {
2586                 len = sizeof(struct target_timeval);
2587             }
2588             if (copy_to_user_timeval(optval_addr, &tv)) {
2589                 return -TARGET_EFAULT;
2590             }
2591             if (put_user_u32(len, optlen)) {
2592                 return -TARGET_EFAULT;
2593             }
2594             break;
2595         }
2596         case TARGET_SO_SNDTIMEO:
2597             optname = SO_SNDTIMEO;
2598             goto get_timeout;
2599         case TARGET_SO_PEERCRED: {
2600             struct ucred cr;
2601             socklen_t crlen;
2602             struct target_ucred *tcr;
2603 
2604             if (get_user_u32(len, optlen)) {
2605                 return -TARGET_EFAULT;
2606             }
2607             if (len < 0) {
2608                 return -TARGET_EINVAL;
2609             }
2610 
2611             crlen = sizeof(cr);
2612             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2613                                        &cr, &crlen));
2614             if (ret < 0) {
2615                 return ret;
2616             }
2617             if (len > crlen) {
2618                 len = crlen;
2619             }
2620             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2621                 return -TARGET_EFAULT;
2622             }
2623             __put_user(cr.pid, &tcr->pid);
2624             __put_user(cr.uid, &tcr->uid);
2625             __put_user(cr.gid, &tcr->gid);
2626             unlock_user_struct(tcr, optval_addr, 1);
2627             if (put_user_u32(len, optlen)) {
2628                 return -TARGET_EFAULT;
2629             }
2630             break;
2631         }
2632         case TARGET_SO_PEERSEC: {
2633             char *name;
2634 
2635             if (get_user_u32(len, optlen)) {
2636                 return -TARGET_EFAULT;
2637             }
2638             if (len < 0) {
2639                 return -TARGET_EINVAL;
2640             }
2641             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2642             if (!name) {
2643                 return -TARGET_EFAULT;
2644             }
2645             lv = len;
2646             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2647                                        name, &lv));
2648             if (put_user_u32(lv, optlen)) {
2649                 ret = -TARGET_EFAULT;
2650             }
2651             unlock_user(name, optval_addr, lv);
2652             break;
2653         }
2654         case TARGET_SO_LINGER:
2655         {
2656             struct linger lg;
2657             socklen_t lglen;
2658             struct target_linger *tlg;
2659 
2660             if (get_user_u32(len, optlen)) {
2661                 return -TARGET_EFAULT;
2662             }
2663             if (len < 0) {
2664                 return -TARGET_EINVAL;
2665             }
2666 
2667             lglen = sizeof(lg);
2668             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2669                                        &lg, &lglen));
2670             if (ret < 0) {
2671                 return ret;
2672             }
2673             if (len > lglen) {
2674                 len = lglen;
2675             }
2676             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2677                 return -TARGET_EFAULT;
2678             }
2679             __put_user(lg.l_onoff, &tlg->l_onoff);
2680             __put_user(lg.l_linger, &tlg->l_linger);
2681             unlock_user_struct(tlg, optval_addr, 1);
2682             if (put_user_u32(len, optlen)) {
2683                 return -TARGET_EFAULT;
2684             }
2685             break;
2686         }
2687         /* Options with 'int' argument.  */
2688         case TARGET_SO_DEBUG:
2689             optname = SO_DEBUG;
2690             goto int_case;
2691         case TARGET_SO_REUSEADDR:
2692             optname = SO_REUSEADDR;
2693             goto int_case;
2694 #ifdef SO_REUSEPORT
2695         case TARGET_SO_REUSEPORT:
2696             optname = SO_REUSEPORT;
2697             goto int_case;
2698 #endif
2699         case TARGET_SO_TYPE:
2700             optname = SO_TYPE;
2701             goto int_case;
2702         case TARGET_SO_ERROR:
2703             optname = SO_ERROR;
2704             goto int_case;
2705         case TARGET_SO_DONTROUTE:
2706             optname = SO_DONTROUTE;
2707             goto int_case;
2708         case TARGET_SO_BROADCAST:
2709             optname = SO_BROADCAST;
2710             goto int_case;
2711         case TARGET_SO_SNDBUF:
2712             optname = SO_SNDBUF;
2713             goto int_case;
2714         case TARGET_SO_RCVBUF:
2715             optname = SO_RCVBUF;
2716             goto int_case;
2717         case TARGET_SO_KEEPALIVE:
2718             optname = SO_KEEPALIVE;
2719             goto int_case;
2720         case TARGET_SO_OOBINLINE:
2721             optname = SO_OOBINLINE;
2722             goto int_case;
2723         case TARGET_SO_NO_CHECK:
2724             optname = SO_NO_CHECK;
2725             goto int_case;
2726         case TARGET_SO_PRIORITY:
2727             optname = SO_PRIORITY;
2728             goto int_case;
2729 #ifdef SO_BSDCOMPAT
2730         case TARGET_SO_BSDCOMPAT:
2731             optname = SO_BSDCOMPAT;
2732             goto int_case;
2733 #endif
2734         case TARGET_SO_PASSCRED:
2735             optname = SO_PASSCRED;
2736             goto int_case;
2737         case TARGET_SO_TIMESTAMP:
2738             optname = SO_TIMESTAMP;
2739             goto int_case;
2740         case TARGET_SO_RCVLOWAT:
2741             optname = SO_RCVLOWAT;
2742             goto int_case;
2743         case TARGET_SO_ACCEPTCONN:
2744             optname = SO_ACCEPTCONN;
2745             goto int_case;
2746         case TARGET_SO_PROTOCOL:
2747             optname = SO_PROTOCOL;
2748             goto int_case;
2749         case TARGET_SO_DOMAIN:
2750             optname = SO_DOMAIN;
2751             goto int_case;
2752         default:
2753             goto int_case;
2754         }
2755         break;
2756     case SOL_TCP:
2757     case SOL_UDP:
2758         /* TCP and UDP options all take an 'int' value.  */
2759     int_case:
2760         if (get_user_u32(len, optlen))
2761             return -TARGET_EFAULT;
2762         if (len < 0)
2763             return -TARGET_EINVAL;
2764         lv = sizeof(lv);
2765         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2766         if (ret < 0)
2767             return ret;
2768         switch (optname) {
2769         case SO_TYPE:
2770             val = host_to_target_sock_type(val);
2771             break;
2772         case SO_ERROR:
2773             val = host_to_target_errno(val);
2774             break;
2775         }
2776         if (len > lv)
2777             len = lv;
2778         if (len == 4) {
2779             if (put_user_u32(val, optval_addr))
2780                 return -TARGET_EFAULT;
2781         } else {
2782             if (put_user_u8(val, optval_addr))
2783                 return -TARGET_EFAULT;
2784         }
2785         if (put_user_u32(len, optlen))
2786             return -TARGET_EFAULT;
2787         break;
2788     case SOL_IP:
2789         switch(optname) {
2790         case IP_TOS:
2791         case IP_TTL:
2792         case IP_HDRINCL:
2793         case IP_ROUTER_ALERT:
2794         case IP_RECVOPTS:
2795         case IP_RETOPTS:
2796         case IP_PKTINFO:
2797         case IP_MTU_DISCOVER:
2798         case IP_RECVERR:
2799         case IP_RECVTOS:
2800 #ifdef IP_FREEBIND
2801         case IP_FREEBIND:
2802 #endif
2803         case IP_MULTICAST_TTL:
2804         case IP_MULTICAST_LOOP:
2805             if (get_user_u32(len, optlen))
2806                 return -TARGET_EFAULT;
2807             if (len < 0)
2808                 return -TARGET_EINVAL;
2809             lv = sizeof(lv);
2810             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2811             if (ret < 0)
2812                 return ret;
2813             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2814                 len = 1;
2815                 if (put_user_u32(len, optlen)
2816                     || put_user_u8(val, optval_addr))
2817                     return -TARGET_EFAULT;
2818             } else {
2819                 if (len > sizeof(int))
2820                     len = sizeof(int);
2821                 if (put_user_u32(len, optlen)
2822                     || put_user_u32(val, optval_addr))
2823                     return -TARGET_EFAULT;
2824             }
2825             break;
2826         default:
2827             ret = -TARGET_ENOPROTOOPT;
2828             break;
2829         }
2830         break;
2831     case SOL_IPV6:
2832         switch (optname) {
2833         case IPV6_MTU_DISCOVER:
2834         case IPV6_MTU:
2835         case IPV6_V6ONLY:
2836         case IPV6_RECVPKTINFO:
2837         case IPV6_UNICAST_HOPS:
2838         case IPV6_MULTICAST_HOPS:
2839         case IPV6_MULTICAST_LOOP:
2840         case IPV6_RECVERR:
2841         case IPV6_RECVHOPLIMIT:
2842         case IPV6_2292HOPLIMIT:
2843         case IPV6_CHECKSUM:
2844         case IPV6_ADDRFORM:
2845         case IPV6_2292PKTINFO:
2846         case IPV6_RECVTCLASS:
2847         case IPV6_RECVRTHDR:
2848         case IPV6_2292RTHDR:
2849         case IPV6_RECVHOPOPTS:
2850         case IPV6_2292HOPOPTS:
2851         case IPV6_RECVDSTOPTS:
2852         case IPV6_2292DSTOPTS:
2853         case IPV6_TCLASS:
2854         case IPV6_ADDR_PREFERENCES:
2855 #ifdef IPV6_RECVPATHMTU
2856         case IPV6_RECVPATHMTU:
2857 #endif
2858 #ifdef IPV6_TRANSPARENT
2859         case IPV6_TRANSPARENT:
2860 #endif
2861 #ifdef IPV6_FREEBIND
2862         case IPV6_FREEBIND:
2863 #endif
2864 #ifdef IPV6_RECVORIGDSTADDR
2865         case IPV6_RECVORIGDSTADDR:
2866 #endif
2867             if (get_user_u32(len, optlen))
2868                 return -TARGET_EFAULT;
2869             if (len < 0)
2870                 return -TARGET_EINVAL;
2871             lv = sizeof(lv);
2872             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2873             if (ret < 0)
2874                 return ret;
2875             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2876                 len = 1;
2877                 if (put_user_u32(len, optlen)
2878                     || put_user_u8(val, optval_addr))
2879                     return -TARGET_EFAULT;
2880             } else {
2881                 if (len > sizeof(int))
2882                     len = sizeof(int);
2883                 if (put_user_u32(len, optlen)
2884                     || put_user_u32(val, optval_addr))
2885                     return -TARGET_EFAULT;
2886             }
2887             break;
2888         default:
2889             ret = -TARGET_ENOPROTOOPT;
2890             break;
2891         }
2892         break;
2893 #ifdef SOL_NETLINK
2894     case SOL_NETLINK:
2895         switch (optname) {
2896         case NETLINK_PKTINFO:
2897         case NETLINK_BROADCAST_ERROR:
2898         case NETLINK_NO_ENOBUFS:
2899 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2900         case NETLINK_LISTEN_ALL_NSID:
2901         case NETLINK_CAP_ACK:
2902 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2903 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2904         case NETLINK_EXT_ACK:
2905 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2906 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2907         case NETLINK_GET_STRICT_CHK:
2908 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2909             if (get_user_u32(len, optlen)) {
2910                 return -TARGET_EFAULT;
2911             }
2912             if (len != sizeof(val)) {
2913                 return -TARGET_EINVAL;
2914             }
2915             lv = len;
2916             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2917             if (ret < 0) {
2918                 return ret;
2919             }
2920             if (put_user_u32(lv, optlen)
2921                 || put_user_u32(val, optval_addr)) {
2922                 return -TARGET_EFAULT;
2923             }
2924             break;
2925 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2926         case NETLINK_LIST_MEMBERSHIPS:
2927         {
2928             uint32_t *results;
2929             int i;
2930             if (get_user_u32(len, optlen)) {
2931                 return -TARGET_EFAULT;
2932             }
2933             if (len < 0) {
2934                 return -TARGET_EINVAL;
2935             }
2936             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2937             if (!results && len > 0) {
2938                 return -TARGET_EFAULT;
2939             }
2940             lv = len;
2941             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2942             if (ret < 0) {
2943                 unlock_user(results, optval_addr, 0);
2944                 return ret;
2945             }
2946             /* swap host endianness to target endianness. */
2947             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2948                 results[i] = tswap32(results[i]);
2949             }
2950             if (put_user_u32(lv, optlen)) {
2951                 return -TARGET_EFAULT;
2952             }
2953             unlock_user(results, optval_addr, 0);
2954             break;
2955         }
2956 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2957         default:
2958             goto unimplemented;
2959         }
2960         break;
2961 #endif /* SOL_NETLINK */
2962     default:
2963     unimplemented:
2964         qemu_log_mask(LOG_UNIMP,
2965                       "getsockopt level=%d optname=%d not yet supported\n",
2966                       level, optname);
2967         ret = -TARGET_EOPNOTSUPP;
2968         break;
2969     }
2970     return ret;
2971 }
2972 
2973 /* Convert target low/high pair representing file offset into the host
2974  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2975  * as the kernel doesn't handle them either.
2976  */
2977 static void target_to_host_low_high(abi_ulong tlow,
2978                                     abi_ulong thigh,
2979                                     unsigned long *hlow,
2980                                     unsigned long *hhigh)
2981 {
2982     uint64_t off = tlow |
2983         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2984         TARGET_LONG_BITS / 2;
2985 
2986     *hlow = off;
2987     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2988 }
2989 
2990 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2991                                 abi_ulong count, int copy)
2992 {
2993     struct target_iovec *target_vec;
2994     struct iovec *vec;
2995     abi_ulong total_len, max_len;
2996     int i;
2997     int err = 0;
2998     bool bad_address = false;
2999 
3000     if (count == 0) {
3001         errno = 0;
3002         return NULL;
3003     }
3004     if (count > IOV_MAX) {
3005         errno = EINVAL;
3006         return NULL;
3007     }
3008 
3009     vec = g_try_new0(struct iovec, count);
3010     if (vec == NULL) {
3011         errno = ENOMEM;
3012         return NULL;
3013     }
3014 
3015     target_vec = lock_user(VERIFY_READ, target_addr,
3016                            count * sizeof(struct target_iovec), 1);
3017     if (target_vec == NULL) {
3018         err = EFAULT;
3019         goto fail2;
3020     }
3021 
3022     /* ??? If host page size > target page size, this will result in a
3023        value larger than what we can actually support.  */
3024     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3025     total_len = 0;
3026 
3027     for (i = 0; i < count; i++) {
3028         abi_ulong base = tswapal(target_vec[i].iov_base);
3029         abi_long len = tswapal(target_vec[i].iov_len);
3030 
3031         if (len < 0) {
3032             err = EINVAL;
3033             goto fail;
3034         } else if (len == 0) {
3035             /* Zero length pointer is ignored.  */
3036             vec[i].iov_base = 0;
3037         } else {
3038             vec[i].iov_base = lock_user(type, base, len, copy);
3039             /* If the first buffer pointer is bad, this is a fault.  But
3040              * subsequent bad buffers will result in a partial write; this
3041              * is realized by filling the vector with null pointers and
3042              * zero lengths. */
3043             if (!vec[i].iov_base) {
3044                 if (i == 0) {
3045                     err = EFAULT;
3046                     goto fail;
3047                 } else {
3048                     bad_address = true;
3049                 }
3050             }
3051             if (bad_address) {
3052                 len = 0;
3053             }
3054             if (len > max_len - total_len) {
3055                 len = max_len - total_len;
3056             }
3057         }
3058         vec[i].iov_len = len;
3059         total_len += len;
3060     }
3061 
3062     unlock_user(target_vec, target_addr, 0);
3063     return vec;
3064 
3065  fail:
3066     while (--i >= 0) {
3067         if (tswapal(target_vec[i].iov_len) > 0) {
3068             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3069         }
3070     }
3071     unlock_user(target_vec, target_addr, 0);
3072  fail2:
3073     g_free(vec);
3074     errno = err;
3075     return NULL;
3076 }
3077 
3078 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3079                          abi_ulong count, int copy)
3080 {
3081     struct target_iovec *target_vec;
3082     int i;
3083 
3084     target_vec = lock_user(VERIFY_READ, target_addr,
3085                            count * sizeof(struct target_iovec), 1);
3086     if (target_vec) {
3087         for (i = 0; i < count; i++) {
3088             abi_ulong base = tswapal(target_vec[i].iov_base);
3089             abi_long len = tswapal(target_vec[i].iov_len);
3090             if (len < 0) {
3091                 break;
3092             }
3093             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3094         }
3095         unlock_user(target_vec, target_addr, 0);
3096     }
3097 
3098     g_free(vec);
3099 }
3100 
3101 static inline int target_to_host_sock_type(int *type)
3102 {
3103     int host_type = 0;
3104     int target_type = *type;
3105 
3106     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3107     case TARGET_SOCK_DGRAM:
3108         host_type = SOCK_DGRAM;
3109         break;
3110     case TARGET_SOCK_STREAM:
3111         host_type = SOCK_STREAM;
3112         break;
3113     default:
3114         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3115         break;
3116     }
3117     if (target_type & TARGET_SOCK_CLOEXEC) {
3118 #if defined(SOCK_CLOEXEC)
3119         host_type |= SOCK_CLOEXEC;
3120 #else
3121         return -TARGET_EINVAL;
3122 #endif
3123     }
3124     if (target_type & TARGET_SOCK_NONBLOCK) {
3125 #if defined(SOCK_NONBLOCK)
3126         host_type |= SOCK_NONBLOCK;
3127 #elif !defined(O_NONBLOCK)
3128         return -TARGET_EINVAL;
3129 #endif
3130     }
3131     *type = host_type;
3132     return 0;
3133 }
3134 
3135 /* Try to emulate socket type flags after socket creation.  */
3136 static int sock_flags_fixup(int fd, int target_type)
3137 {
3138 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3139     if (target_type & TARGET_SOCK_NONBLOCK) {
3140         int flags = fcntl(fd, F_GETFL);
3141         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3142             close(fd);
3143             return -TARGET_EINVAL;
3144         }
3145     }
3146 #endif
3147     return fd;
3148 }
3149 
3150 /* do_socket() Must return target values and target errnos. */
3151 static abi_long do_socket(int domain, int type, int protocol)
3152 {
3153     int target_type = type;
3154     int ret;
3155 
3156     ret = target_to_host_sock_type(&type);
3157     if (ret) {
3158         return ret;
3159     }
3160 
3161     if (domain == PF_NETLINK && !(
3162 #ifdef CONFIG_RTNETLINK
3163          protocol == NETLINK_ROUTE ||
3164 #endif
3165          protocol == NETLINK_KOBJECT_UEVENT ||
3166          protocol == NETLINK_AUDIT)) {
3167         return -TARGET_EPROTONOSUPPORT;
3168     }
3169 
3170     if (domain == AF_PACKET ||
3171         (domain == AF_INET && type == SOCK_PACKET)) {
3172         protocol = tswap16(protocol);
3173     }
3174 
3175     ret = get_errno(socket(domain, type, protocol));
3176     if (ret >= 0) {
3177         ret = sock_flags_fixup(ret, target_type);
3178         if (type == SOCK_PACKET) {
3179             /* Manage an obsolete case :
3180              * if socket type is SOCK_PACKET, bind by name
3181              */
3182             fd_trans_register(ret, &target_packet_trans);
3183         } else if (domain == PF_NETLINK) {
3184             switch (protocol) {
3185 #ifdef CONFIG_RTNETLINK
3186             case NETLINK_ROUTE:
3187                 fd_trans_register(ret, &target_netlink_route_trans);
3188                 break;
3189 #endif
3190             case NETLINK_KOBJECT_UEVENT:
3191                 /* nothing to do: messages are strings */
3192                 break;
3193             case NETLINK_AUDIT:
3194                 fd_trans_register(ret, &target_netlink_audit_trans);
3195                 break;
3196             default:
3197                 g_assert_not_reached();
3198             }
3199         }
3200     }
3201     return ret;
3202 }
3203 
3204 /* do_bind() Must return target values and target errnos. */
3205 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3206                         socklen_t addrlen)
3207 {
3208     void *addr;
3209     abi_long ret;
3210 
3211     if ((int)addrlen < 0) {
3212         return -TARGET_EINVAL;
3213     }
3214 
3215     addr = alloca(addrlen+1);
3216 
3217     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3218     if (ret)
3219         return ret;
3220 
3221     return get_errno(bind(sockfd, addr, addrlen));
3222 }
3223 
3224 /* do_connect() Must return target values and target errnos. */
3225 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3226                            socklen_t addrlen)
3227 {
3228     void *addr;
3229     abi_long ret;
3230 
3231     if ((int)addrlen < 0) {
3232         return -TARGET_EINVAL;
3233     }
3234 
3235     addr = alloca(addrlen+1);
3236 
3237     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3238     if (ret)
3239         return ret;
3240 
3241     return get_errno(safe_connect(sockfd, addr, addrlen));
3242 }
3243 
3244 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3245 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3246                                       int flags, int send)
3247 {
3248     abi_long ret, len;
3249     struct msghdr msg;
3250     abi_ulong count;
3251     struct iovec *vec;
3252     abi_ulong target_vec;
3253 
3254     if (msgp->msg_name) {
3255         msg.msg_namelen = tswap32(msgp->msg_namelen);
3256         msg.msg_name = alloca(msg.msg_namelen+1);
3257         ret = target_to_host_sockaddr(fd, msg.msg_name,
3258                                       tswapal(msgp->msg_name),
3259                                       msg.msg_namelen);
3260         if (ret == -TARGET_EFAULT) {
3261             /* For connected sockets msg_name and msg_namelen must
3262              * be ignored, so returning EFAULT immediately is wrong.
3263              * Instead, pass a bad msg_name to the host kernel, and
3264              * let it decide whether to return EFAULT or not.
3265              */
3266             msg.msg_name = (void *)-1;
3267         } else if (ret) {
3268             goto out2;
3269         }
3270     } else {
3271         msg.msg_name = NULL;
3272         msg.msg_namelen = 0;
3273     }
3274     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3275     msg.msg_control = alloca(msg.msg_controllen);
3276     memset(msg.msg_control, 0, msg.msg_controllen);
3277 
3278     msg.msg_flags = tswap32(msgp->msg_flags);
3279 
3280     count = tswapal(msgp->msg_iovlen);
3281     target_vec = tswapal(msgp->msg_iov);
3282 
3283     if (count > IOV_MAX) {
3284         /* sendrcvmsg returns a different errno for this condition than
3285          * readv/writev, so we must catch it here before lock_iovec() does.
3286          */
3287         ret = -TARGET_EMSGSIZE;
3288         goto out2;
3289     }
3290 
3291     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3292                      target_vec, count, send);
3293     if (vec == NULL) {
3294         ret = -host_to_target_errno(errno);
3295         /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3296         if (!send || ret) {
3297             goto out2;
3298         }
3299     }
3300     msg.msg_iovlen = count;
3301     msg.msg_iov = vec;
3302 
3303     if (send) {
3304         if (fd_trans_target_to_host_data(fd)) {
3305             void *host_msg;
3306 
3307             host_msg = g_malloc(msg.msg_iov->iov_len);
3308             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3309             ret = fd_trans_target_to_host_data(fd)(host_msg,
3310                                                    msg.msg_iov->iov_len);
3311             if (ret >= 0) {
3312                 msg.msg_iov->iov_base = host_msg;
3313                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3314             }
3315             g_free(host_msg);
3316         } else {
3317             ret = target_to_host_cmsg(&msg, msgp);
3318             if (ret == 0) {
3319                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3320             }
3321         }
3322     } else {
3323         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3324         if (!is_error(ret)) {
3325             len = ret;
3326             if (fd_trans_host_to_target_data(fd)) {
3327                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3328                                                MIN(msg.msg_iov->iov_len, len));
3329             }
3330             if (!is_error(ret)) {
3331                 ret = host_to_target_cmsg(msgp, &msg);
3332             }
3333             if (!is_error(ret)) {
3334                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3335                 msgp->msg_flags = tswap32(msg.msg_flags);
3336                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3337                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3338                                     msg.msg_name, msg.msg_namelen);
3339                     if (ret) {
3340                         goto out;
3341                     }
3342                 }
3343 
3344                 ret = len;
3345             }
3346         }
3347     }
3348 
3349 out:
3350     if (vec) {
3351         unlock_iovec(vec, target_vec, count, !send);
3352     }
3353 out2:
3354     return ret;
3355 }
3356 
3357 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3358                                int flags, int send)
3359 {
3360     abi_long ret;
3361     struct target_msghdr *msgp;
3362 
3363     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3364                           msgp,
3365                           target_msg,
3366                           send ? 1 : 0)) {
3367         return -TARGET_EFAULT;
3368     }
3369     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3370     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3371     return ret;
3372 }
3373 
3374 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3375  * so it might not have this *mmsg-specific flag either.
3376  */
3377 #ifndef MSG_WAITFORONE
3378 #define MSG_WAITFORONE 0x10000
3379 #endif
3380 
3381 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3382                                 unsigned int vlen, unsigned int flags,
3383                                 int send)
3384 {
3385     struct target_mmsghdr *mmsgp;
3386     abi_long ret = 0;
3387     int i;
3388 
3389     if (vlen > UIO_MAXIOV) {
3390         vlen = UIO_MAXIOV;
3391     }
3392 
3393     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3394     if (!mmsgp) {
3395         return -TARGET_EFAULT;
3396     }
3397 
3398     for (i = 0; i < vlen; i++) {
3399         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3400         if (is_error(ret)) {
3401             break;
3402         }
3403         mmsgp[i].msg_len = tswap32(ret);
3404         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3405         if (flags & MSG_WAITFORONE) {
3406             flags |= MSG_DONTWAIT;
3407         }
3408     }
3409 
3410     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3411 
3412     /* Return number of datagrams sent if we sent any at all;
3413      * otherwise return the error.
3414      */
3415     if (i) {
3416         return i;
3417     }
3418     return ret;
3419 }
3420 
3421 /* do_accept4() Must return target values and target errnos. */
3422 static abi_long do_accept4(int fd, abi_ulong target_addr,
3423                            abi_ulong target_addrlen_addr, int flags)
3424 {
3425     socklen_t addrlen, ret_addrlen;
3426     void *addr;
3427     abi_long ret;
3428     int host_flags;
3429 
3430     if (flags & ~(TARGET_SOCK_CLOEXEC | TARGET_SOCK_NONBLOCK)) {
3431         return -TARGET_EINVAL;
3432     }
3433 
3434     host_flags = 0;
3435     if (flags & TARGET_SOCK_NONBLOCK) {
3436         host_flags |= SOCK_NONBLOCK;
3437     }
3438     if (flags & TARGET_SOCK_CLOEXEC) {
3439         host_flags |= SOCK_CLOEXEC;
3440     }
3441 
3442     if (target_addr == 0) {
3443         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3444     }
3445 
3446     /* linux returns EFAULT if addrlen pointer is invalid */
3447     if (get_user_u32(addrlen, target_addrlen_addr))
3448         return -TARGET_EFAULT;
3449 
3450     if ((int)addrlen < 0) {
3451         return -TARGET_EINVAL;
3452     }
3453 
3454     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3455         return -TARGET_EFAULT;
3456     }
3457 
3458     addr = alloca(addrlen);
3459 
3460     ret_addrlen = addrlen;
3461     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3462     if (!is_error(ret)) {
3463         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3464         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3465             ret = -TARGET_EFAULT;
3466         }
3467     }
3468     return ret;
3469 }
3470 
3471 /* do_getpeername() Must return target values and target errnos. */
3472 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3473                                abi_ulong target_addrlen_addr)
3474 {
3475     socklen_t addrlen, ret_addrlen;
3476     void *addr;
3477     abi_long ret;
3478 
3479     if (get_user_u32(addrlen, target_addrlen_addr))
3480         return -TARGET_EFAULT;
3481 
3482     if ((int)addrlen < 0) {
3483         return -TARGET_EINVAL;
3484     }
3485 
3486     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3487         return -TARGET_EFAULT;
3488     }
3489 
3490     addr = alloca(addrlen);
3491 
3492     ret_addrlen = addrlen;
3493     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3494     if (!is_error(ret)) {
3495         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3496         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3497             ret = -TARGET_EFAULT;
3498         }
3499     }
3500     return ret;
3501 }
3502 
3503 /* do_getsockname() Must return target values and target errnos. */
3504 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3505                                abi_ulong target_addrlen_addr)
3506 {
3507     socklen_t addrlen, ret_addrlen;
3508     void *addr;
3509     abi_long ret;
3510 
3511     if (get_user_u32(addrlen, target_addrlen_addr))
3512         return -TARGET_EFAULT;
3513 
3514     if ((int)addrlen < 0) {
3515         return -TARGET_EINVAL;
3516     }
3517 
3518     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3519         return -TARGET_EFAULT;
3520     }
3521 
3522     addr = alloca(addrlen);
3523 
3524     ret_addrlen = addrlen;
3525     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3526     if (!is_error(ret)) {
3527         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3528         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3529             ret = -TARGET_EFAULT;
3530         }
3531     }
3532     return ret;
3533 }
3534 
3535 /* do_socketpair() Must return target values and target errnos. */
3536 static abi_long do_socketpair(int domain, int type, int protocol,
3537                               abi_ulong target_tab_addr)
3538 {
3539     int tab[2];
3540     abi_long ret;
3541 
3542     target_to_host_sock_type(&type);
3543 
3544     ret = get_errno(socketpair(domain, type, protocol, tab));
3545     if (!is_error(ret)) {
3546         if (put_user_s32(tab[0], target_tab_addr)
3547             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3548             ret = -TARGET_EFAULT;
3549     }
3550     return ret;
3551 }
3552 
3553 /* do_sendto() Must return target values and target errnos. */
3554 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3555                           abi_ulong target_addr, socklen_t addrlen)
3556 {
3557     void *addr;
3558     void *host_msg;
3559     void *copy_msg = NULL;
3560     abi_long ret;
3561 
3562     if ((int)addrlen < 0) {
3563         return -TARGET_EINVAL;
3564     }
3565 
3566     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3567     if (!host_msg)
3568         return -TARGET_EFAULT;
3569     if (fd_trans_target_to_host_data(fd)) {
3570         copy_msg = host_msg;
3571         host_msg = g_malloc(len);
3572         memcpy(host_msg, copy_msg, len);
3573         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3574         if (ret < 0) {
3575             goto fail;
3576         }
3577     }
3578     if (target_addr) {
3579         addr = alloca(addrlen+1);
3580         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3581         if (ret) {
3582             goto fail;
3583         }
3584         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3585     } else {
3586         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3587     }
3588 fail:
3589     if (copy_msg) {
3590         g_free(host_msg);
3591         host_msg = copy_msg;
3592     }
3593     unlock_user(host_msg, msg, 0);
3594     return ret;
3595 }
3596 
3597 /* do_recvfrom() Must return target values and target errnos. */
3598 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3599                             abi_ulong target_addr,
3600                             abi_ulong target_addrlen)
3601 {
3602     socklen_t addrlen, ret_addrlen;
3603     void *addr;
3604     void *host_msg;
3605     abi_long ret;
3606 
3607     if (!msg) {
3608         host_msg = NULL;
3609     } else {
3610         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3611         if (!host_msg) {
3612             return -TARGET_EFAULT;
3613         }
3614     }
3615     if (target_addr) {
3616         if (get_user_u32(addrlen, target_addrlen)) {
3617             ret = -TARGET_EFAULT;
3618             goto fail;
3619         }
3620         if ((int)addrlen < 0) {
3621             ret = -TARGET_EINVAL;
3622             goto fail;
3623         }
3624         addr = alloca(addrlen);
3625         ret_addrlen = addrlen;
3626         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3627                                       addr, &ret_addrlen));
3628     } else {
3629         addr = NULL; /* To keep compiler quiet.  */
3630         addrlen = 0; /* To keep compiler quiet.  */
3631         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3632     }
3633     if (!is_error(ret)) {
3634         if (fd_trans_host_to_target_data(fd)) {
3635             abi_long trans;
3636             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3637             if (is_error(trans)) {
3638                 ret = trans;
3639                 goto fail;
3640             }
3641         }
3642         if (target_addr) {
3643             host_to_target_sockaddr(target_addr, addr,
3644                                     MIN(addrlen, ret_addrlen));
3645             if (put_user_u32(ret_addrlen, target_addrlen)) {
3646                 ret = -TARGET_EFAULT;
3647                 goto fail;
3648             }
3649         }
3650         unlock_user(host_msg, msg, len);
3651     } else {
3652 fail:
3653         unlock_user(host_msg, msg, 0);
3654     }
3655     return ret;
3656 }
3657 
3658 #ifdef TARGET_NR_socketcall
3659 /* do_socketcall() must return target values and target errnos. */
3660 static abi_long do_socketcall(int num, abi_ulong vptr)
3661 {
3662     static const unsigned nargs[] = { /* number of arguments per operation */
3663         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3664         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3665         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3666         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3667         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3668         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3669         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3670         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3671         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3672         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3673         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3674         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3675         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3676         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3677         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3678         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3679         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3680         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3681         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3682         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3683     };
3684     abi_long a[6]; /* max 6 args */
3685     unsigned i;
3686 
3687     /* check the range of the first argument num */
3688     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3689     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3690         return -TARGET_EINVAL;
3691     }
3692     /* ensure we have space for args */
3693     if (nargs[num] > ARRAY_SIZE(a)) {
3694         return -TARGET_EINVAL;
3695     }
3696     /* collect the arguments in a[] according to nargs[] */
3697     for (i = 0; i < nargs[num]; ++i) {
3698         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3699             return -TARGET_EFAULT;
3700         }
3701     }
3702     /* now when we have the args, invoke the appropriate underlying function */
3703     switch (num) {
3704     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3705         return do_socket(a[0], a[1], a[2]);
3706     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3707         return do_bind(a[0], a[1], a[2]);
3708     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3709         return do_connect(a[0], a[1], a[2]);
3710     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3711         return get_errno(listen(a[0], a[1]));
3712     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3713         return do_accept4(a[0], a[1], a[2], 0);
3714     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3715         return do_getsockname(a[0], a[1], a[2]);
3716     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3717         return do_getpeername(a[0], a[1], a[2]);
3718     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3719         return do_socketpair(a[0], a[1], a[2], a[3]);
3720     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3721         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3722     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3723         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3724     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3725         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3726     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3727         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3728     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3729         return get_errno(shutdown(a[0], a[1]));
3730     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3731         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3732     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3733         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3734     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3735         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3736     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3737         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3738     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3739         return do_accept4(a[0], a[1], a[2], a[3]);
3740     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3741         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3742     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3743         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3744     default:
3745         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3746         return -TARGET_EINVAL;
3747     }
3748 }
3749 #endif
3750 
3751 #ifndef TARGET_SEMID64_DS
3752 /* asm-generic version of this struct */
3753 struct target_semid64_ds
3754 {
3755   struct target_ipc_perm sem_perm;
3756   abi_ulong sem_otime;
3757 #if TARGET_ABI_BITS == 32
3758   abi_ulong __unused1;
3759 #endif
3760   abi_ulong sem_ctime;
3761 #if TARGET_ABI_BITS == 32
3762   abi_ulong __unused2;
3763 #endif
3764   abi_ulong sem_nsems;
3765   abi_ulong __unused3;
3766   abi_ulong __unused4;
3767 };
3768 #endif
3769 
3770 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3771                                                abi_ulong target_addr)
3772 {
3773     struct target_ipc_perm *target_ip;
3774     struct target_semid64_ds *target_sd;
3775 
3776     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3777         return -TARGET_EFAULT;
3778     target_ip = &(target_sd->sem_perm);
3779     host_ip->__key = tswap32(target_ip->__key);
3780     host_ip->uid = tswap32(target_ip->uid);
3781     host_ip->gid = tswap32(target_ip->gid);
3782     host_ip->cuid = tswap32(target_ip->cuid);
3783     host_ip->cgid = tswap32(target_ip->cgid);
3784 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3785     host_ip->mode = tswap32(target_ip->mode);
3786 #else
3787     host_ip->mode = tswap16(target_ip->mode);
3788 #endif
3789 #if defined(TARGET_PPC)
3790     host_ip->__seq = tswap32(target_ip->__seq);
3791 #else
3792     host_ip->__seq = tswap16(target_ip->__seq);
3793 #endif
3794     unlock_user_struct(target_sd, target_addr, 0);
3795     return 0;
3796 }
3797 
3798 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3799                                                struct ipc_perm *host_ip)
3800 {
3801     struct target_ipc_perm *target_ip;
3802     struct target_semid64_ds *target_sd;
3803 
3804     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3805         return -TARGET_EFAULT;
3806     target_ip = &(target_sd->sem_perm);
3807     target_ip->__key = tswap32(host_ip->__key);
3808     target_ip->uid = tswap32(host_ip->uid);
3809     target_ip->gid = tswap32(host_ip->gid);
3810     target_ip->cuid = tswap32(host_ip->cuid);
3811     target_ip->cgid = tswap32(host_ip->cgid);
3812 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3813     target_ip->mode = tswap32(host_ip->mode);
3814 #else
3815     target_ip->mode = tswap16(host_ip->mode);
3816 #endif
3817 #if defined(TARGET_PPC)
3818     target_ip->__seq = tswap32(host_ip->__seq);
3819 #else
3820     target_ip->__seq = tswap16(host_ip->__seq);
3821 #endif
3822     unlock_user_struct(target_sd, target_addr, 1);
3823     return 0;
3824 }
3825 
3826 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3827                                                abi_ulong target_addr)
3828 {
3829     struct target_semid64_ds *target_sd;
3830 
3831     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3832         return -TARGET_EFAULT;
3833     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3834         return -TARGET_EFAULT;
3835     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3836     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3837     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3838     unlock_user_struct(target_sd, target_addr, 0);
3839     return 0;
3840 }
3841 
3842 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3843                                                struct semid_ds *host_sd)
3844 {
3845     struct target_semid64_ds *target_sd;
3846 
3847     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3848         return -TARGET_EFAULT;
3849     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3850         return -TARGET_EFAULT;
3851     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3852     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3853     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3854     unlock_user_struct(target_sd, target_addr, 1);
3855     return 0;
3856 }
3857 
3858 struct target_seminfo {
3859     int semmap;
3860     int semmni;
3861     int semmns;
3862     int semmnu;
3863     int semmsl;
3864     int semopm;
3865     int semume;
3866     int semusz;
3867     int semvmx;
3868     int semaem;
3869 };
3870 
3871 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3872                                               struct seminfo *host_seminfo)
3873 {
3874     struct target_seminfo *target_seminfo;
3875     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3876         return -TARGET_EFAULT;
3877     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3878     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3879     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3880     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3881     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3882     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3883     __put_user(host_seminfo->semume, &target_seminfo->semume);
3884     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3885     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3886     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3887     unlock_user_struct(target_seminfo, target_addr, 1);
3888     return 0;
3889 }
3890 
3891 union semun {
3892 	int val;
3893 	struct semid_ds *buf;
3894 	unsigned short *array;
3895 	struct seminfo *__buf;
3896 };
3897 
3898 union target_semun {
3899 	int val;
3900 	abi_ulong buf;
3901 	abi_ulong array;
3902 	abi_ulong __buf;
3903 };
3904 
3905 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3906                                                abi_ulong target_addr)
3907 {
3908     int nsems;
3909     unsigned short *array;
3910     union semun semun;
3911     struct semid_ds semid_ds;
3912     int i, ret;
3913 
3914     semun.buf = &semid_ds;
3915 
3916     ret = semctl(semid, 0, IPC_STAT, semun);
3917     if (ret == -1)
3918         return get_errno(ret);
3919 
3920     nsems = semid_ds.sem_nsems;
3921 
3922     *host_array = g_try_new(unsigned short, nsems);
3923     if (!*host_array) {
3924         return -TARGET_ENOMEM;
3925     }
3926     array = lock_user(VERIFY_READ, target_addr,
3927                       nsems*sizeof(unsigned short), 1);
3928     if (!array) {
3929         g_free(*host_array);
3930         return -TARGET_EFAULT;
3931     }
3932 
3933     for(i=0; i<nsems; i++) {
3934         __get_user((*host_array)[i], &array[i]);
3935     }
3936     unlock_user(array, target_addr, 0);
3937 
3938     return 0;
3939 }
3940 
3941 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3942                                                unsigned short **host_array)
3943 {
3944     int nsems;
3945     unsigned short *array;
3946     union semun semun;
3947     struct semid_ds semid_ds;
3948     int i, ret;
3949 
3950     semun.buf = &semid_ds;
3951 
3952     ret = semctl(semid, 0, IPC_STAT, semun);
3953     if (ret == -1)
3954         return get_errno(ret);
3955 
3956     nsems = semid_ds.sem_nsems;
3957 
3958     array = lock_user(VERIFY_WRITE, target_addr,
3959                       nsems*sizeof(unsigned short), 0);
3960     if (!array)
3961         return -TARGET_EFAULT;
3962 
3963     for(i=0; i<nsems; i++) {
3964         __put_user((*host_array)[i], &array[i]);
3965     }
3966     g_free(*host_array);
3967     unlock_user(array, target_addr, 1);
3968 
3969     return 0;
3970 }
3971 
3972 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3973                                  abi_ulong target_arg)
3974 {
3975     union target_semun target_su = { .buf = target_arg };
3976     union semun arg;
3977     struct semid_ds dsarg;
3978     unsigned short *array = NULL;
3979     struct seminfo seminfo;
3980     abi_long ret = -TARGET_EINVAL;
3981     abi_long err;
3982     cmd &= 0xff;
3983 
3984     switch( cmd ) {
3985 	case GETVAL:
3986 	case SETVAL:
3987             /* In 64 bit cross-endian situations, we will erroneously pick up
3988              * the wrong half of the union for the "val" element.  To rectify
3989              * this, the entire 8-byte structure is byteswapped, followed by
3990 	     * a swap of the 4 byte val field. In other cases, the data is
3991 	     * already in proper host byte order. */
3992 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3993 		target_su.buf = tswapal(target_su.buf);
3994 		arg.val = tswap32(target_su.val);
3995 	    } else {
3996 		arg.val = target_su.val;
3997 	    }
3998             ret = get_errno(semctl(semid, semnum, cmd, arg));
3999             break;
4000 	case GETALL:
4001 	case SETALL:
4002             err = target_to_host_semarray(semid, &array, target_su.array);
4003             if (err)
4004                 return err;
4005             arg.array = array;
4006             ret = get_errno(semctl(semid, semnum, cmd, arg));
4007             err = host_to_target_semarray(semid, target_su.array, &array);
4008             if (err)
4009                 return err;
4010             break;
4011 	case IPC_STAT:
4012 	case IPC_SET:
4013 	case SEM_STAT:
4014             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4015             if (err)
4016                 return err;
4017             arg.buf = &dsarg;
4018             ret = get_errno(semctl(semid, semnum, cmd, arg));
4019             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4020             if (err)
4021                 return err;
4022             break;
4023 	case IPC_INFO:
4024 	case SEM_INFO:
4025             arg.__buf = &seminfo;
4026             ret = get_errno(semctl(semid, semnum, cmd, arg));
4027             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4028             if (err)
4029                 return err;
4030             break;
4031 	case IPC_RMID:
4032 	case GETPID:
4033 	case GETNCNT:
4034 	case GETZCNT:
4035             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4036             break;
4037     }
4038 
4039     return ret;
4040 }
4041 
4042 struct target_sembuf {
4043     unsigned short sem_num;
4044     short sem_op;
4045     short sem_flg;
4046 };
4047 
4048 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4049                                              abi_ulong target_addr,
4050                                              unsigned nsops)
4051 {
4052     struct target_sembuf *target_sembuf;
4053     int i;
4054 
4055     target_sembuf = lock_user(VERIFY_READ, target_addr,
4056                               nsops*sizeof(struct target_sembuf), 1);
4057     if (!target_sembuf)
4058         return -TARGET_EFAULT;
4059 
4060     for(i=0; i<nsops; i++) {
4061         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4062         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4063         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4064     }
4065 
4066     unlock_user(target_sembuf, target_addr, 0);
4067 
4068     return 0;
4069 }
4070 
4071 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4072     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4073 
4074 /*
4075  * This macro is required to handle the s390 variants, which passes the
4076  * arguments in a different order than default.
4077  */
4078 #ifdef __s390x__
4079 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4080   (__nsops), (__timeout), (__sops)
4081 #else
4082 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4083   (__nsops), 0, (__sops), (__timeout)
4084 #endif
4085 
4086 static inline abi_long do_semtimedop(int semid,
4087                                      abi_long ptr,
4088                                      unsigned nsops,
4089                                      abi_long timeout, bool time64)
4090 {
4091     struct sembuf *sops;
4092     struct timespec ts, *pts = NULL;
4093     abi_long ret;
4094 
4095     if (timeout) {
4096         pts = &ts;
4097         if (time64) {
4098             if (target_to_host_timespec64(pts, timeout)) {
4099                 return -TARGET_EFAULT;
4100             }
4101         } else {
4102             if (target_to_host_timespec(pts, timeout)) {
4103                 return -TARGET_EFAULT;
4104             }
4105         }
4106     }
4107 
4108     if (nsops > TARGET_SEMOPM) {
4109         return -TARGET_E2BIG;
4110     }
4111 
4112     sops = g_new(struct sembuf, nsops);
4113 
4114     if (target_to_host_sembuf(sops, ptr, nsops)) {
4115         g_free(sops);
4116         return -TARGET_EFAULT;
4117     }
4118 
4119     ret = -TARGET_ENOSYS;
4120 #ifdef __NR_semtimedop
4121     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4122 #endif
4123 #ifdef __NR_ipc
4124     if (ret == -TARGET_ENOSYS) {
4125         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4126                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4127     }
4128 #endif
4129     g_free(sops);
4130     return ret;
4131 }
4132 #endif
4133 
4134 struct target_msqid_ds
4135 {
4136     struct target_ipc_perm msg_perm;
4137     abi_ulong msg_stime;
4138 #if TARGET_ABI_BITS == 32
4139     abi_ulong __unused1;
4140 #endif
4141     abi_ulong msg_rtime;
4142 #if TARGET_ABI_BITS == 32
4143     abi_ulong __unused2;
4144 #endif
4145     abi_ulong msg_ctime;
4146 #if TARGET_ABI_BITS == 32
4147     abi_ulong __unused3;
4148 #endif
4149     abi_ulong __msg_cbytes;
4150     abi_ulong msg_qnum;
4151     abi_ulong msg_qbytes;
4152     abi_ulong msg_lspid;
4153     abi_ulong msg_lrpid;
4154     abi_ulong __unused4;
4155     abi_ulong __unused5;
4156 };
4157 
4158 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4159                                                abi_ulong target_addr)
4160 {
4161     struct target_msqid_ds *target_md;
4162 
4163     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4164         return -TARGET_EFAULT;
4165     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4166         return -TARGET_EFAULT;
4167     host_md->msg_stime = tswapal(target_md->msg_stime);
4168     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4169     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4170     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4171     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4172     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4173     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4174     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4175     unlock_user_struct(target_md, target_addr, 0);
4176     return 0;
4177 }
4178 
4179 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4180                                                struct msqid_ds *host_md)
4181 {
4182     struct target_msqid_ds *target_md;
4183 
4184     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4185         return -TARGET_EFAULT;
4186     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4187         return -TARGET_EFAULT;
4188     target_md->msg_stime = tswapal(host_md->msg_stime);
4189     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4190     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4191     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4192     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4193     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4194     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4195     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4196     unlock_user_struct(target_md, target_addr, 1);
4197     return 0;
4198 }
4199 
4200 struct target_msginfo {
4201     int msgpool;
4202     int msgmap;
4203     int msgmax;
4204     int msgmnb;
4205     int msgmni;
4206     int msgssz;
4207     int msgtql;
4208     unsigned short int msgseg;
4209 };
4210 
4211 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4212                                               struct msginfo *host_msginfo)
4213 {
4214     struct target_msginfo *target_msginfo;
4215     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4216         return -TARGET_EFAULT;
4217     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4218     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4219     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4220     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4221     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4222     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4223     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4224     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4225     unlock_user_struct(target_msginfo, target_addr, 1);
4226     return 0;
4227 }
4228 
4229 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4230 {
4231     struct msqid_ds dsarg;
4232     struct msginfo msginfo;
4233     abi_long ret = -TARGET_EINVAL;
4234 
4235     cmd &= 0xff;
4236 
4237     switch (cmd) {
4238     case IPC_STAT:
4239     case IPC_SET:
4240     case MSG_STAT:
4241         if (target_to_host_msqid_ds(&dsarg,ptr))
4242             return -TARGET_EFAULT;
4243         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4244         if (host_to_target_msqid_ds(ptr,&dsarg))
4245             return -TARGET_EFAULT;
4246         break;
4247     case IPC_RMID:
4248         ret = get_errno(msgctl(msgid, cmd, NULL));
4249         break;
4250     case IPC_INFO:
4251     case MSG_INFO:
4252         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4253         if (host_to_target_msginfo(ptr, &msginfo))
4254             return -TARGET_EFAULT;
4255         break;
4256     }
4257 
4258     return ret;
4259 }
4260 
4261 struct target_msgbuf {
4262     abi_long mtype;
4263     char	mtext[1];
4264 };
4265 
4266 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4267                                  ssize_t msgsz, int msgflg)
4268 {
4269     struct target_msgbuf *target_mb;
4270     struct msgbuf *host_mb;
4271     abi_long ret = 0;
4272 
4273     if (msgsz < 0) {
4274         return -TARGET_EINVAL;
4275     }
4276 
4277     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4278         return -TARGET_EFAULT;
4279     host_mb = g_try_malloc(msgsz + sizeof(long));
4280     if (!host_mb) {
4281         unlock_user_struct(target_mb, msgp, 0);
4282         return -TARGET_ENOMEM;
4283     }
4284     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4285     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4286     ret = -TARGET_ENOSYS;
4287 #ifdef __NR_msgsnd
4288     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4289 #endif
4290 #ifdef __NR_ipc
4291     if (ret == -TARGET_ENOSYS) {
4292 #ifdef __s390x__
4293         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4294                                  host_mb));
4295 #else
4296         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4297                                  host_mb, 0));
4298 #endif
4299     }
4300 #endif
4301     g_free(host_mb);
4302     unlock_user_struct(target_mb, msgp, 0);
4303 
4304     return ret;
4305 }
4306 
4307 #ifdef __NR_ipc
4308 #if defined(__sparc__)
4309 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4310 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4311 #elif defined(__s390x__)
4312 /* The s390 sys_ipc variant has only five parameters.  */
4313 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4314     ((long int[]){(long int)__msgp, __msgtyp})
4315 #else
4316 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4317     ((long int[]){(long int)__msgp, __msgtyp}), 0
4318 #endif
4319 #endif
4320 
4321 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4322                                  ssize_t msgsz, abi_long msgtyp,
4323                                  int msgflg)
4324 {
4325     struct target_msgbuf *target_mb;
4326     char *target_mtext;
4327     struct msgbuf *host_mb;
4328     abi_long ret = 0;
4329 
4330     if (msgsz < 0) {
4331         return -TARGET_EINVAL;
4332     }
4333 
4334     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4335         return -TARGET_EFAULT;
4336 
4337     host_mb = g_try_malloc(msgsz + sizeof(long));
4338     if (!host_mb) {
4339         ret = -TARGET_ENOMEM;
4340         goto end;
4341     }
4342     ret = -TARGET_ENOSYS;
4343 #ifdef __NR_msgrcv
4344     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4345 #endif
4346 #ifdef __NR_ipc
4347     if (ret == -TARGET_ENOSYS) {
4348         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4349                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4350     }
4351 #endif
4352 
4353     if (ret > 0) {
4354         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4355         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4356         if (!target_mtext) {
4357             ret = -TARGET_EFAULT;
4358             goto end;
4359         }
4360         memcpy(target_mb->mtext, host_mb->mtext, ret);
4361         unlock_user(target_mtext, target_mtext_addr, ret);
4362     }
4363 
4364     target_mb->mtype = tswapal(host_mb->mtype);
4365 
4366 end:
4367     if (target_mb)
4368         unlock_user_struct(target_mb, msgp, 1);
4369     g_free(host_mb);
4370     return ret;
4371 }
4372 
4373 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4374                                                abi_ulong target_addr)
4375 {
4376     struct target_shmid_ds *target_sd;
4377 
4378     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4379         return -TARGET_EFAULT;
4380     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4381         return -TARGET_EFAULT;
4382     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4383     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4384     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4385     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4386     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4387     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4388     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4389     unlock_user_struct(target_sd, target_addr, 0);
4390     return 0;
4391 }
4392 
4393 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4394                                                struct shmid_ds *host_sd)
4395 {
4396     struct target_shmid_ds *target_sd;
4397 
4398     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4399         return -TARGET_EFAULT;
4400     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4401         return -TARGET_EFAULT;
4402     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4403     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4404     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4405     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4406     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4407     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4408     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4409     unlock_user_struct(target_sd, target_addr, 1);
4410     return 0;
4411 }
4412 
4413 struct  target_shminfo {
4414     abi_ulong shmmax;
4415     abi_ulong shmmin;
4416     abi_ulong shmmni;
4417     abi_ulong shmseg;
4418     abi_ulong shmall;
4419 };
4420 
4421 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4422                                               struct shminfo *host_shminfo)
4423 {
4424     struct target_shminfo *target_shminfo;
4425     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4426         return -TARGET_EFAULT;
4427     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4428     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4429     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4430     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4431     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4432     unlock_user_struct(target_shminfo, target_addr, 1);
4433     return 0;
4434 }
4435 
4436 struct target_shm_info {
4437     int used_ids;
4438     abi_ulong shm_tot;
4439     abi_ulong shm_rss;
4440     abi_ulong shm_swp;
4441     abi_ulong swap_attempts;
4442     abi_ulong swap_successes;
4443 };
4444 
4445 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4446                                                struct shm_info *host_shm_info)
4447 {
4448     struct target_shm_info *target_shm_info;
4449     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4450         return -TARGET_EFAULT;
4451     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4452     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4453     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4454     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4455     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4456     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4457     unlock_user_struct(target_shm_info, target_addr, 1);
4458     return 0;
4459 }
4460 
4461 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4462 {
4463     struct shmid_ds dsarg;
4464     struct shminfo shminfo;
4465     struct shm_info shm_info;
4466     abi_long ret = -TARGET_EINVAL;
4467 
4468     cmd &= 0xff;
4469 
4470     switch(cmd) {
4471     case IPC_STAT:
4472     case IPC_SET:
4473     case SHM_STAT:
4474         if (target_to_host_shmid_ds(&dsarg, buf))
4475             return -TARGET_EFAULT;
4476         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4477         if (host_to_target_shmid_ds(buf, &dsarg))
4478             return -TARGET_EFAULT;
4479         break;
4480     case IPC_INFO:
4481         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4482         if (host_to_target_shminfo(buf, &shminfo))
4483             return -TARGET_EFAULT;
4484         break;
4485     case SHM_INFO:
4486         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4487         if (host_to_target_shm_info(buf, &shm_info))
4488             return -TARGET_EFAULT;
4489         break;
4490     case IPC_RMID:
4491     case SHM_LOCK:
4492     case SHM_UNLOCK:
4493         ret = get_errno(shmctl(shmid, cmd, NULL));
4494         break;
4495     }
4496 
4497     return ret;
4498 }
4499 
4500 #ifdef TARGET_NR_ipc
4501 /* ??? This only works with linear mappings.  */
4502 /* do_ipc() must return target values and target errnos. */
4503 static abi_long do_ipc(CPUArchState *cpu_env,
4504                        unsigned int call, abi_long first,
4505                        abi_long second, abi_long third,
4506                        abi_long ptr, abi_long fifth)
4507 {
4508     int version;
4509     abi_long ret = 0;
4510 
4511     version = call >> 16;
4512     call &= 0xffff;
4513 
4514     switch (call) {
4515     case IPCOP_semop:
4516         ret = do_semtimedop(first, ptr, second, 0, false);
4517         break;
4518     case IPCOP_semtimedop:
4519     /*
4520      * The s390 sys_ipc variant has only five parameters instead of six
4521      * (as for default variant) and the only difference is the handling of
4522      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4523      * to a struct timespec where the generic variant uses fifth parameter.
4524      */
4525 #if defined(TARGET_S390X)
4526         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4527 #else
4528         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4529 #endif
4530         break;
4531 
4532     case IPCOP_semget:
4533         ret = get_errno(semget(first, second, third));
4534         break;
4535 
4536     case IPCOP_semctl: {
4537         /* The semun argument to semctl is passed by value, so dereference the
4538          * ptr argument. */
4539         abi_ulong atptr;
4540         get_user_ual(atptr, ptr);
4541         ret = do_semctl(first, second, third, atptr);
4542         break;
4543     }
4544 
4545     case IPCOP_msgget:
4546         ret = get_errno(msgget(first, second));
4547         break;
4548 
4549     case IPCOP_msgsnd:
4550         ret = do_msgsnd(first, ptr, second, third);
4551         break;
4552 
4553     case IPCOP_msgctl:
4554         ret = do_msgctl(first, second, ptr);
4555         break;
4556 
4557     case IPCOP_msgrcv:
4558         switch (version) {
4559         case 0:
4560             {
4561                 struct target_ipc_kludge {
4562                     abi_long msgp;
4563                     abi_long msgtyp;
4564                 } *tmp;
4565 
4566                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4567                     ret = -TARGET_EFAULT;
4568                     break;
4569                 }
4570 
4571                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4572 
4573                 unlock_user_struct(tmp, ptr, 0);
4574                 break;
4575             }
4576         default:
4577             ret = do_msgrcv(first, ptr, second, fifth, third);
4578         }
4579         break;
4580 
4581     case IPCOP_shmat:
4582         switch (version) {
4583         default:
4584         {
4585             abi_ulong raddr;
4586             raddr = target_shmat(cpu_env, first, ptr, second);
4587             if (is_error(raddr))
4588                 return get_errno(raddr);
4589             if (put_user_ual(raddr, third))
4590                 return -TARGET_EFAULT;
4591             break;
4592         }
4593         case 1:
4594             ret = -TARGET_EINVAL;
4595             break;
4596         }
4597 	break;
4598     case IPCOP_shmdt:
4599         ret = target_shmdt(ptr);
4600 	break;
4601 
4602     case IPCOP_shmget:
4603 	/* IPC_* flag values are the same on all linux platforms */
4604 	ret = get_errno(shmget(first, second, third));
4605 	break;
4606 
4607 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4608     case IPCOP_shmctl:
4609         ret = do_shmctl(first, second, ptr);
4610         break;
4611     default:
4612         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4613                       call, version);
4614 	ret = -TARGET_ENOSYS;
4615 	break;
4616     }
4617     return ret;
4618 }
4619 #endif
4620 
4621 /* kernel structure types definitions */
4622 
4623 #define STRUCT(name, ...) STRUCT_ ## name,
4624 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4625 enum {
4626 #include "syscall_types.h"
4627 STRUCT_MAX
4628 };
4629 #undef STRUCT
4630 #undef STRUCT_SPECIAL
4631 
4632 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4633 #define STRUCT_SPECIAL(name)
4634 #include "syscall_types.h"
4635 #undef STRUCT
4636 #undef STRUCT_SPECIAL
4637 
4638 #define MAX_STRUCT_SIZE 4096
4639 
4640 #ifdef CONFIG_FIEMAP
4641 /* So fiemap access checks don't overflow on 32 bit systems.
4642  * This is very slightly smaller than the limit imposed by
4643  * the underlying kernel.
4644  */
4645 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4646                             / sizeof(struct fiemap_extent))
4647 
4648 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4649                                        int fd, int cmd, abi_long arg)
4650 {
4651     /* The parameter for this ioctl is a struct fiemap followed
4652      * by an array of struct fiemap_extent whose size is set
4653      * in fiemap->fm_extent_count. The array is filled in by the
4654      * ioctl.
4655      */
4656     int target_size_in, target_size_out;
4657     struct fiemap *fm;
4658     const argtype *arg_type = ie->arg_type;
4659     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4660     void *argptr, *p;
4661     abi_long ret;
4662     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4663     uint32_t outbufsz;
4664     int free_fm = 0;
4665 
4666     assert(arg_type[0] == TYPE_PTR);
4667     assert(ie->access == IOC_RW);
4668     arg_type++;
4669     target_size_in = thunk_type_size(arg_type, 0);
4670     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4671     if (!argptr) {
4672         return -TARGET_EFAULT;
4673     }
4674     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4675     unlock_user(argptr, arg, 0);
4676     fm = (struct fiemap *)buf_temp;
4677     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4678         return -TARGET_EINVAL;
4679     }
4680 
4681     outbufsz = sizeof (*fm) +
4682         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4683 
4684     if (outbufsz > MAX_STRUCT_SIZE) {
4685         /* We can't fit all the extents into the fixed size buffer.
4686          * Allocate one that is large enough and use it instead.
4687          */
4688         fm = g_try_malloc(outbufsz);
4689         if (!fm) {
4690             return -TARGET_ENOMEM;
4691         }
4692         memcpy(fm, buf_temp, sizeof(struct fiemap));
4693         free_fm = 1;
4694     }
4695     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4696     if (!is_error(ret)) {
4697         target_size_out = target_size_in;
4698         /* An extent_count of 0 means we were only counting the extents
4699          * so there are no structs to copy
4700          */
4701         if (fm->fm_extent_count != 0) {
4702             target_size_out += fm->fm_mapped_extents * extent_size;
4703         }
4704         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4705         if (!argptr) {
4706             ret = -TARGET_EFAULT;
4707         } else {
4708             /* Convert the struct fiemap */
4709             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4710             if (fm->fm_extent_count != 0) {
4711                 p = argptr + target_size_in;
4712                 /* ...and then all the struct fiemap_extents */
4713                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4714                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4715                                   THUNK_TARGET);
4716                     p += extent_size;
4717                 }
4718             }
4719             unlock_user(argptr, arg, target_size_out);
4720         }
4721     }
4722     if (free_fm) {
4723         g_free(fm);
4724     }
4725     return ret;
4726 }
4727 #endif
4728 
4729 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4730                                 int fd, int cmd, abi_long arg)
4731 {
4732     const argtype *arg_type = ie->arg_type;
4733     int target_size;
4734     void *argptr;
4735     int ret;
4736     struct ifconf *host_ifconf;
4737     uint32_t outbufsz;
4738     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4739     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4740     int target_ifreq_size;
4741     int nb_ifreq;
4742     int free_buf = 0;
4743     int i;
4744     int target_ifc_len;
4745     abi_long target_ifc_buf;
4746     int host_ifc_len;
4747     char *host_ifc_buf;
4748 
4749     assert(arg_type[0] == TYPE_PTR);
4750     assert(ie->access == IOC_RW);
4751 
4752     arg_type++;
4753     target_size = thunk_type_size(arg_type, 0);
4754 
4755     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4756     if (!argptr)
4757         return -TARGET_EFAULT;
4758     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4759     unlock_user(argptr, arg, 0);
4760 
4761     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4762     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4763     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4764 
4765     if (target_ifc_buf != 0) {
4766         target_ifc_len = host_ifconf->ifc_len;
4767         nb_ifreq = target_ifc_len / target_ifreq_size;
4768         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4769 
4770         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4771         if (outbufsz > MAX_STRUCT_SIZE) {
4772             /*
4773              * We can't fit all the extents into the fixed size buffer.
4774              * Allocate one that is large enough and use it instead.
4775              */
4776             host_ifconf = g_try_malloc(outbufsz);
4777             if (!host_ifconf) {
4778                 return -TARGET_ENOMEM;
4779             }
4780             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4781             free_buf = 1;
4782         }
4783         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4784 
4785         host_ifconf->ifc_len = host_ifc_len;
4786     } else {
4787       host_ifc_buf = NULL;
4788     }
4789     host_ifconf->ifc_buf = host_ifc_buf;
4790 
4791     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4792     if (!is_error(ret)) {
4793 	/* convert host ifc_len to target ifc_len */
4794 
4795         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4796         target_ifc_len = nb_ifreq * target_ifreq_size;
4797         host_ifconf->ifc_len = target_ifc_len;
4798 
4799 	/* restore target ifc_buf */
4800 
4801         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4802 
4803 	/* copy struct ifconf to target user */
4804 
4805         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4806         if (!argptr)
4807             return -TARGET_EFAULT;
4808         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4809         unlock_user(argptr, arg, target_size);
4810 
4811         if (target_ifc_buf != 0) {
4812             /* copy ifreq[] to target user */
4813             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4814             for (i = 0; i < nb_ifreq ; i++) {
4815                 thunk_convert(argptr + i * target_ifreq_size,
4816                               host_ifc_buf + i * sizeof(struct ifreq),
4817                               ifreq_arg_type, THUNK_TARGET);
4818             }
4819             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4820         }
4821     }
4822 
4823     if (free_buf) {
4824         g_free(host_ifconf);
4825     }
4826 
4827     return ret;
4828 }
4829 
4830 #if defined(CONFIG_USBFS)
4831 #if HOST_LONG_BITS > 64
4832 #error USBDEVFS thunks do not support >64 bit hosts yet.
4833 #endif
4834 struct live_urb {
4835     uint64_t target_urb_adr;
4836     uint64_t target_buf_adr;
4837     char *target_buf_ptr;
4838     struct usbdevfs_urb host_urb;
4839 };
4840 
4841 static GHashTable *usbdevfs_urb_hashtable(void)
4842 {
4843     static GHashTable *urb_hashtable;
4844 
4845     if (!urb_hashtable) {
4846         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4847     }
4848     return urb_hashtable;
4849 }
4850 
4851 static void urb_hashtable_insert(struct live_urb *urb)
4852 {
4853     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4854     g_hash_table_insert(urb_hashtable, urb, urb);
4855 }
4856 
4857 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4858 {
4859     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4860     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4861 }
4862 
4863 static void urb_hashtable_remove(struct live_urb *urb)
4864 {
4865     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4866     g_hash_table_remove(urb_hashtable, urb);
4867 }
4868 
4869 static abi_long
4870 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4871                           int fd, int cmd, abi_long arg)
4872 {
4873     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4874     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4875     struct live_urb *lurb;
4876     void *argptr;
4877     uint64_t hurb;
4878     int target_size;
4879     uintptr_t target_urb_adr;
4880     abi_long ret;
4881 
4882     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4883 
4884     memset(buf_temp, 0, sizeof(uint64_t));
4885     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4886     if (is_error(ret)) {
4887         return ret;
4888     }
4889 
4890     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4891     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4892     if (!lurb->target_urb_adr) {
4893         return -TARGET_EFAULT;
4894     }
4895     urb_hashtable_remove(lurb);
4896     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4897         lurb->host_urb.buffer_length);
4898     lurb->target_buf_ptr = NULL;
4899 
4900     /* restore the guest buffer pointer */
4901     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4902 
4903     /* update the guest urb struct */
4904     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4905     if (!argptr) {
4906         g_free(lurb);
4907         return -TARGET_EFAULT;
4908     }
4909     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4910     unlock_user(argptr, lurb->target_urb_adr, target_size);
4911 
4912     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4913     /* write back the urb handle */
4914     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4915     if (!argptr) {
4916         g_free(lurb);
4917         return -TARGET_EFAULT;
4918     }
4919 
4920     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4921     target_urb_adr = lurb->target_urb_adr;
4922     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4923     unlock_user(argptr, arg, target_size);
4924 
4925     g_free(lurb);
4926     return ret;
4927 }
4928 
4929 static abi_long
4930 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4931                              uint8_t *buf_temp __attribute__((unused)),
4932                              int fd, int cmd, abi_long arg)
4933 {
4934     struct live_urb *lurb;
4935 
4936     /* map target address back to host URB with metadata. */
4937     lurb = urb_hashtable_lookup(arg);
4938     if (!lurb) {
4939         return -TARGET_EFAULT;
4940     }
4941     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4942 }
4943 
4944 static abi_long
4945 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4946                             int fd, int cmd, abi_long arg)
4947 {
4948     const argtype *arg_type = ie->arg_type;
4949     int target_size;
4950     abi_long ret;
4951     void *argptr;
4952     int rw_dir;
4953     struct live_urb *lurb;
4954 
4955     /*
4956      * each submitted URB needs to map to a unique ID for the
4957      * kernel, and that unique ID needs to be a pointer to
4958      * host memory.  hence, we need to malloc for each URB.
4959      * isochronous transfers have a variable length struct.
4960      */
4961     arg_type++;
4962     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4963 
4964     /* construct host copy of urb and metadata */
4965     lurb = g_try_new0(struct live_urb, 1);
4966     if (!lurb) {
4967         return -TARGET_ENOMEM;
4968     }
4969 
4970     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4971     if (!argptr) {
4972         g_free(lurb);
4973         return -TARGET_EFAULT;
4974     }
4975     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4976     unlock_user(argptr, arg, 0);
4977 
4978     lurb->target_urb_adr = arg;
4979     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4980 
4981     /* buffer space used depends on endpoint type so lock the entire buffer */
4982     /* control type urbs should check the buffer contents for true direction */
4983     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4984     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4985         lurb->host_urb.buffer_length, 1);
4986     if (lurb->target_buf_ptr == NULL) {
4987         g_free(lurb);
4988         return -TARGET_EFAULT;
4989     }
4990 
4991     /* update buffer pointer in host copy */
4992     lurb->host_urb.buffer = lurb->target_buf_ptr;
4993 
4994     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4995     if (is_error(ret)) {
4996         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4997         g_free(lurb);
4998     } else {
4999         urb_hashtable_insert(lurb);
5000     }
5001 
5002     return ret;
5003 }
5004 #endif /* CONFIG_USBFS */
5005 
5006 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5007                             int cmd, abi_long arg)
5008 {
5009     void *argptr;
5010     struct dm_ioctl *host_dm;
5011     abi_long guest_data;
5012     uint32_t guest_data_size;
5013     int target_size;
5014     const argtype *arg_type = ie->arg_type;
5015     abi_long ret;
5016     void *big_buf = NULL;
5017     char *host_data;
5018 
5019     arg_type++;
5020     target_size = thunk_type_size(arg_type, 0);
5021     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5022     if (!argptr) {
5023         ret = -TARGET_EFAULT;
5024         goto out;
5025     }
5026     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5027     unlock_user(argptr, arg, 0);
5028 
5029     /* buf_temp is too small, so fetch things into a bigger buffer */
5030     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5031     memcpy(big_buf, buf_temp, target_size);
5032     buf_temp = big_buf;
5033     host_dm = big_buf;
5034 
5035     guest_data = arg + host_dm->data_start;
5036     if ((guest_data - arg) < 0) {
5037         ret = -TARGET_EINVAL;
5038         goto out;
5039     }
5040     guest_data_size = host_dm->data_size - host_dm->data_start;
5041     host_data = (char*)host_dm + host_dm->data_start;
5042 
5043     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5044     if (!argptr) {
5045         ret = -TARGET_EFAULT;
5046         goto out;
5047     }
5048 
5049     switch (ie->host_cmd) {
5050     case DM_REMOVE_ALL:
5051     case DM_LIST_DEVICES:
5052     case DM_DEV_CREATE:
5053     case DM_DEV_REMOVE:
5054     case DM_DEV_SUSPEND:
5055     case DM_DEV_STATUS:
5056     case DM_DEV_WAIT:
5057     case DM_TABLE_STATUS:
5058     case DM_TABLE_CLEAR:
5059     case DM_TABLE_DEPS:
5060     case DM_LIST_VERSIONS:
5061         /* no input data */
5062         break;
5063     case DM_DEV_RENAME:
5064     case DM_DEV_SET_GEOMETRY:
5065         /* data contains only strings */
5066         memcpy(host_data, argptr, guest_data_size);
5067         break;
5068     case DM_TARGET_MSG:
5069         memcpy(host_data, argptr, guest_data_size);
5070         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5071         break;
5072     case DM_TABLE_LOAD:
5073     {
5074         void *gspec = argptr;
5075         void *cur_data = host_data;
5076         const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5077         int spec_size = thunk_type_size(dm_arg_type, 0);
5078         int i;
5079 
5080         for (i = 0; i < host_dm->target_count; i++) {
5081             struct dm_target_spec *spec = cur_data;
5082             uint32_t next;
5083             int slen;
5084 
5085             thunk_convert(spec, gspec, dm_arg_type, THUNK_HOST);
5086             slen = strlen((char*)gspec + spec_size) + 1;
5087             next = spec->next;
5088             spec->next = sizeof(*spec) + slen;
5089             strcpy((char*)&spec[1], gspec + spec_size);
5090             gspec += next;
5091             cur_data += spec->next;
5092         }
5093         break;
5094     }
5095     default:
5096         ret = -TARGET_EINVAL;
5097         unlock_user(argptr, guest_data, 0);
5098         goto out;
5099     }
5100     unlock_user(argptr, guest_data, 0);
5101 
5102     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5103     if (!is_error(ret)) {
5104         guest_data = arg + host_dm->data_start;
5105         guest_data_size = host_dm->data_size - host_dm->data_start;
5106         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5107         switch (ie->host_cmd) {
5108         case DM_REMOVE_ALL:
5109         case DM_DEV_CREATE:
5110         case DM_DEV_REMOVE:
5111         case DM_DEV_RENAME:
5112         case DM_DEV_SUSPEND:
5113         case DM_DEV_STATUS:
5114         case DM_TABLE_LOAD:
5115         case DM_TABLE_CLEAR:
5116         case DM_TARGET_MSG:
5117         case DM_DEV_SET_GEOMETRY:
5118             /* no return data */
5119             break;
5120         case DM_LIST_DEVICES:
5121         {
5122             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5123             uint32_t remaining_data = guest_data_size;
5124             void *cur_data = argptr;
5125             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5126             int nl_size = 12; /* can't use thunk_size due to alignment */
5127 
5128             while (1) {
5129                 uint32_t next = nl->next;
5130                 if (next) {
5131                     nl->next = nl_size + (strlen(nl->name) + 1);
5132                 }
5133                 if (remaining_data < nl->next) {
5134                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5135                     break;
5136                 }
5137                 thunk_convert(cur_data, nl, dm_arg_type, THUNK_TARGET);
5138                 strcpy(cur_data + nl_size, nl->name);
5139                 cur_data += nl->next;
5140                 remaining_data -= nl->next;
5141                 if (!next) {
5142                     break;
5143                 }
5144                 nl = (void*)nl + next;
5145             }
5146             break;
5147         }
5148         case DM_DEV_WAIT:
5149         case DM_TABLE_STATUS:
5150         {
5151             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5152             void *cur_data = argptr;
5153             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5154             int spec_size = thunk_type_size(dm_arg_type, 0);
5155             int i;
5156 
5157             for (i = 0; i < host_dm->target_count; i++) {
5158                 uint32_t next = spec->next;
5159                 int slen = strlen((char*)&spec[1]) + 1;
5160                 spec->next = (cur_data - argptr) + spec_size + slen;
5161                 if (guest_data_size < spec->next) {
5162                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5163                     break;
5164                 }
5165                 thunk_convert(cur_data, spec, dm_arg_type, THUNK_TARGET);
5166                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5167                 cur_data = argptr + spec->next;
5168                 spec = (void*)host_dm + host_dm->data_start + next;
5169             }
5170             break;
5171         }
5172         case DM_TABLE_DEPS:
5173         {
5174             void *hdata = (void*)host_dm + host_dm->data_start;
5175             int count = *(uint32_t*)hdata;
5176             uint64_t *hdev = hdata + 8;
5177             uint64_t *gdev = argptr + 8;
5178             int i;
5179 
5180             *(uint32_t*)argptr = tswap32(count);
5181             for (i = 0; i < count; i++) {
5182                 *gdev = tswap64(*hdev);
5183                 gdev++;
5184                 hdev++;
5185             }
5186             break;
5187         }
5188         case DM_LIST_VERSIONS:
5189         {
5190             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5191             uint32_t remaining_data = guest_data_size;
5192             void *cur_data = argptr;
5193             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5194             int vers_size = thunk_type_size(dm_arg_type, 0);
5195 
5196             while (1) {
5197                 uint32_t next = vers->next;
5198                 if (next) {
5199                     vers->next = vers_size + (strlen(vers->name) + 1);
5200                 }
5201                 if (remaining_data < vers->next) {
5202                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5203                     break;
5204                 }
5205                 thunk_convert(cur_data, vers, dm_arg_type, THUNK_TARGET);
5206                 strcpy(cur_data + vers_size, vers->name);
5207                 cur_data += vers->next;
5208                 remaining_data -= vers->next;
5209                 if (!next) {
5210                     break;
5211                 }
5212                 vers = (void*)vers + next;
5213             }
5214             break;
5215         }
5216         default:
5217             unlock_user(argptr, guest_data, 0);
5218             ret = -TARGET_EINVAL;
5219             goto out;
5220         }
5221         unlock_user(argptr, guest_data, guest_data_size);
5222 
5223         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5224         if (!argptr) {
5225             ret = -TARGET_EFAULT;
5226             goto out;
5227         }
5228         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5229         unlock_user(argptr, arg, target_size);
5230     }
5231 out:
5232     g_free(big_buf);
5233     return ret;
5234 }
5235 
5236 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5237                                int cmd, abi_long arg)
5238 {
5239     void *argptr;
5240     int target_size;
5241     const argtype *arg_type = ie->arg_type;
5242     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5243     abi_long ret;
5244 
5245     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5246     struct blkpg_partition host_part;
5247 
5248     /* Read and convert blkpg */
5249     arg_type++;
5250     target_size = thunk_type_size(arg_type, 0);
5251     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5252     if (!argptr) {
5253         ret = -TARGET_EFAULT;
5254         goto out;
5255     }
5256     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5257     unlock_user(argptr, arg, 0);
5258 
5259     switch (host_blkpg->op) {
5260     case BLKPG_ADD_PARTITION:
5261     case BLKPG_DEL_PARTITION:
5262         /* payload is struct blkpg_partition */
5263         break;
5264     default:
5265         /* Unknown opcode */
5266         ret = -TARGET_EINVAL;
5267         goto out;
5268     }
5269 
5270     /* Read and convert blkpg->data */
5271     arg = (abi_long)(uintptr_t)host_blkpg->data;
5272     target_size = thunk_type_size(part_arg_type, 0);
5273     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5274     if (!argptr) {
5275         ret = -TARGET_EFAULT;
5276         goto out;
5277     }
5278     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5279     unlock_user(argptr, arg, 0);
5280 
5281     /* Swizzle the data pointer to our local copy and call! */
5282     host_blkpg->data = &host_part;
5283     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5284 
5285 out:
5286     return ret;
5287 }
5288 
5289 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5290                                 int fd, int cmd, abi_long arg)
5291 {
5292     const argtype *arg_type = ie->arg_type;
5293     const StructEntry *se;
5294     const argtype *field_types;
5295     const int *dst_offsets, *src_offsets;
5296     int target_size;
5297     void *argptr;
5298     abi_ulong *target_rt_dev_ptr = NULL;
5299     unsigned long *host_rt_dev_ptr = NULL;
5300     abi_long ret;
5301     int i;
5302 
5303     assert(ie->access == IOC_W);
5304     assert(*arg_type == TYPE_PTR);
5305     arg_type++;
5306     assert(*arg_type == TYPE_STRUCT);
5307     target_size = thunk_type_size(arg_type, 0);
5308     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5309     if (!argptr) {
5310         return -TARGET_EFAULT;
5311     }
5312     arg_type++;
5313     assert(*arg_type == (int)STRUCT_rtentry);
5314     se = struct_entries + *arg_type++;
5315     assert(se->convert[0] == NULL);
5316     /* convert struct here to be able to catch rt_dev string */
5317     field_types = se->field_types;
5318     dst_offsets = se->field_offsets[THUNK_HOST];
5319     src_offsets = se->field_offsets[THUNK_TARGET];
5320     for (i = 0; i < se->nb_fields; i++) {
5321         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5322             assert(*field_types == TYPE_PTRVOID);
5323             target_rt_dev_ptr = argptr + src_offsets[i];
5324             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5325             if (*target_rt_dev_ptr != 0) {
5326                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5327                                                   tswapal(*target_rt_dev_ptr));
5328                 if (!*host_rt_dev_ptr) {
5329                     unlock_user(argptr, arg, 0);
5330                     return -TARGET_EFAULT;
5331                 }
5332             } else {
5333                 *host_rt_dev_ptr = 0;
5334             }
5335             field_types++;
5336             continue;
5337         }
5338         field_types = thunk_convert(buf_temp + dst_offsets[i],
5339                                     argptr + src_offsets[i],
5340                                     field_types, THUNK_HOST);
5341     }
5342     unlock_user(argptr, arg, 0);
5343 
5344     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5345 
5346     assert(host_rt_dev_ptr != NULL);
5347     assert(target_rt_dev_ptr != NULL);
5348     if (*host_rt_dev_ptr != 0) {
5349         unlock_user((void *)*host_rt_dev_ptr,
5350                     *target_rt_dev_ptr, 0);
5351     }
5352     return ret;
5353 }
5354 
5355 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5356                                      int fd, int cmd, abi_long arg)
5357 {
5358     int sig = target_to_host_signal(arg);
5359     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5360 }
5361 
5362 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5363                                     int fd, int cmd, abi_long arg)
5364 {
5365     struct timeval tv;
5366     abi_long ret;
5367 
5368     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5369     if (is_error(ret)) {
5370         return ret;
5371     }
5372 
5373     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5374         if (copy_to_user_timeval(arg, &tv)) {
5375             return -TARGET_EFAULT;
5376         }
5377     } else {
5378         if (copy_to_user_timeval64(arg, &tv)) {
5379             return -TARGET_EFAULT;
5380         }
5381     }
5382 
5383     return ret;
5384 }
5385 
5386 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5387                                       int fd, int cmd, abi_long arg)
5388 {
5389     struct timespec ts;
5390     abi_long ret;
5391 
5392     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5393     if (is_error(ret)) {
5394         return ret;
5395     }
5396 
5397     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5398         if (host_to_target_timespec(arg, &ts)) {
5399             return -TARGET_EFAULT;
5400         }
5401     } else{
5402         if (host_to_target_timespec64(arg, &ts)) {
5403             return -TARGET_EFAULT;
5404         }
5405     }
5406 
5407     return ret;
5408 }
5409 
5410 #ifdef TIOCGPTPEER
5411 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5412                                      int fd, int cmd, abi_long arg)
5413 {
5414     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5415     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5416 }
5417 #endif
5418 
5419 #ifdef HAVE_DRM_H
5420 
5421 static void unlock_drm_version(struct drm_version *host_ver,
5422                                struct target_drm_version *target_ver,
5423                                bool copy)
5424 {
5425     unlock_user(host_ver->name, target_ver->name,
5426                                 copy ? host_ver->name_len : 0);
5427     unlock_user(host_ver->date, target_ver->date,
5428                                 copy ? host_ver->date_len : 0);
5429     unlock_user(host_ver->desc, target_ver->desc,
5430                                 copy ? host_ver->desc_len : 0);
5431 }
5432 
5433 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5434                                           struct target_drm_version *target_ver)
5435 {
5436     memset(host_ver, 0, sizeof(*host_ver));
5437 
5438     __get_user(host_ver->name_len, &target_ver->name_len);
5439     if (host_ver->name_len) {
5440         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5441                                    target_ver->name_len, 0);
5442         if (!host_ver->name) {
5443             return -EFAULT;
5444         }
5445     }
5446 
5447     __get_user(host_ver->date_len, &target_ver->date_len);
5448     if (host_ver->date_len) {
5449         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5450                                    target_ver->date_len, 0);
5451         if (!host_ver->date) {
5452             goto err;
5453         }
5454     }
5455 
5456     __get_user(host_ver->desc_len, &target_ver->desc_len);
5457     if (host_ver->desc_len) {
5458         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5459                                    target_ver->desc_len, 0);
5460         if (!host_ver->desc) {
5461             goto err;
5462         }
5463     }
5464 
5465     return 0;
5466 err:
5467     unlock_drm_version(host_ver, target_ver, false);
5468     return -EFAULT;
5469 }
5470 
5471 static inline void host_to_target_drmversion(
5472                                           struct target_drm_version *target_ver,
5473                                           struct drm_version *host_ver)
5474 {
5475     __put_user(host_ver->version_major, &target_ver->version_major);
5476     __put_user(host_ver->version_minor, &target_ver->version_minor);
5477     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5478     __put_user(host_ver->name_len, &target_ver->name_len);
5479     __put_user(host_ver->date_len, &target_ver->date_len);
5480     __put_user(host_ver->desc_len, &target_ver->desc_len);
5481     unlock_drm_version(host_ver, target_ver, true);
5482 }
5483 
5484 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5485                              int fd, int cmd, abi_long arg)
5486 {
5487     struct drm_version *ver;
5488     struct target_drm_version *target_ver;
5489     abi_long ret;
5490 
5491     switch (ie->host_cmd) {
5492     case DRM_IOCTL_VERSION:
5493         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5494             return -TARGET_EFAULT;
5495         }
5496         ver = (struct drm_version *)buf_temp;
5497         ret = target_to_host_drmversion(ver, target_ver);
5498         if (!is_error(ret)) {
5499             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5500             if (is_error(ret)) {
5501                 unlock_drm_version(ver, target_ver, false);
5502             } else {
5503                 host_to_target_drmversion(target_ver, ver);
5504             }
5505         }
5506         unlock_user_struct(target_ver, arg, 0);
5507         return ret;
5508     }
5509     return -TARGET_ENOSYS;
5510 }
5511 
5512 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5513                                            struct drm_i915_getparam *gparam,
5514                                            int fd, abi_long arg)
5515 {
5516     abi_long ret;
5517     int value;
5518     struct target_drm_i915_getparam *target_gparam;
5519 
5520     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5521         return -TARGET_EFAULT;
5522     }
5523 
5524     __get_user(gparam->param, &target_gparam->param);
5525     gparam->value = &value;
5526     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5527     put_user_s32(value, target_gparam->value);
5528 
5529     unlock_user_struct(target_gparam, arg, 0);
5530     return ret;
5531 }
5532 
5533 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5534                                   int fd, int cmd, abi_long arg)
5535 {
5536     switch (ie->host_cmd) {
5537     case DRM_IOCTL_I915_GETPARAM:
5538         return do_ioctl_drm_i915_getparam(ie,
5539                                           (struct drm_i915_getparam *)buf_temp,
5540                                           fd, arg);
5541     default:
5542         return -TARGET_ENOSYS;
5543     }
5544 }
5545 
5546 #endif
5547 
5548 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5549                                         int fd, int cmd, abi_long arg)
5550 {
5551     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5552     struct tun_filter *target_filter;
5553     char *target_addr;
5554 
5555     assert(ie->access == IOC_W);
5556 
5557     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5558     if (!target_filter) {
5559         return -TARGET_EFAULT;
5560     }
5561     filter->flags = tswap16(target_filter->flags);
5562     filter->count = tswap16(target_filter->count);
5563     unlock_user(target_filter, arg, 0);
5564 
5565     if (filter->count) {
5566         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5567             MAX_STRUCT_SIZE) {
5568             return -TARGET_EFAULT;
5569         }
5570 
5571         target_addr = lock_user(VERIFY_READ,
5572                                 arg + offsetof(struct tun_filter, addr),
5573                                 filter->count * ETH_ALEN, 1);
5574         if (!target_addr) {
5575             return -TARGET_EFAULT;
5576         }
5577         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5578         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5579     }
5580 
5581     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5582 }
5583 
5584 IOCTLEntry ioctl_entries[] = {
5585 #define IOCTL(cmd, access, ...) \
5586     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5587 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5588     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5589 #define IOCTL_IGNORE(cmd) \
5590     { TARGET_ ## cmd, 0, #cmd },
5591 #include "ioctls.h"
5592     { 0, 0, },
5593 };
5594 
5595 /* ??? Implement proper locking for ioctls.  */
5596 /* do_ioctl() Must return target values and target errnos. */
5597 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5598 {
5599     const IOCTLEntry *ie;
5600     const argtype *arg_type;
5601     abi_long ret;
5602     uint8_t buf_temp[MAX_STRUCT_SIZE];
5603     int target_size;
5604     void *argptr;
5605 
5606     ie = ioctl_entries;
5607     for(;;) {
5608         if (ie->target_cmd == 0) {
5609             qemu_log_mask(
5610                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5611             return -TARGET_ENOTTY;
5612         }
5613         if (ie->target_cmd == cmd)
5614             break;
5615         ie++;
5616     }
5617     arg_type = ie->arg_type;
5618     if (ie->do_ioctl) {
5619         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5620     } else if (!ie->host_cmd) {
5621         /* Some architectures define BSD ioctls in their headers
5622            that are not implemented in Linux.  */
5623         return -TARGET_ENOTTY;
5624     }
5625 
5626     switch(arg_type[0]) {
5627     case TYPE_NULL:
5628         /* no argument */
5629         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5630         break;
5631     case TYPE_PTRVOID:
5632     case TYPE_INT:
5633     case TYPE_LONG:
5634     case TYPE_ULONG:
5635         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5636         break;
5637     case TYPE_PTR:
5638         arg_type++;
5639         target_size = thunk_type_size(arg_type, 0);
5640         switch(ie->access) {
5641         case IOC_R:
5642             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5643             if (!is_error(ret)) {
5644                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5645                 if (!argptr)
5646                     return -TARGET_EFAULT;
5647                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5648                 unlock_user(argptr, arg, target_size);
5649             }
5650             break;
5651         case IOC_W:
5652             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5653             if (!argptr)
5654                 return -TARGET_EFAULT;
5655             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5656             unlock_user(argptr, arg, 0);
5657             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5658             break;
5659         default:
5660         case IOC_RW:
5661             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5662             if (!argptr)
5663                 return -TARGET_EFAULT;
5664             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5665             unlock_user(argptr, arg, 0);
5666             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5667             if (!is_error(ret)) {
5668                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5669                 if (!argptr)
5670                     return -TARGET_EFAULT;
5671                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5672                 unlock_user(argptr, arg, target_size);
5673             }
5674             break;
5675         }
5676         break;
5677     default:
5678         qemu_log_mask(LOG_UNIMP,
5679                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5680                       (long)cmd, arg_type[0]);
5681         ret = -TARGET_ENOTTY;
5682         break;
5683     }
5684     return ret;
5685 }
5686 
5687 static const bitmask_transtbl iflag_tbl[] = {
5688         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5689         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5690         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5691         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5692         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5693         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5694         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5695         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5696         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5697         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5698         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5699         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5700         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5701         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5702         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5703 };
5704 
5705 static const bitmask_transtbl oflag_tbl[] = {
5706 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5707 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5708 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5709 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5710 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5711 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5712 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5713 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5714 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5715 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5716 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5717 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5718 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5719 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5720 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5721 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5722 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5723 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5724 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5725 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5726 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5727 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5728 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5729 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5730 };
5731 
5732 static const bitmask_transtbl cflag_tbl[] = {
5733 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5734 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5735 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5736 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5737 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5738 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5739 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5740 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5741 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5742 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5743 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5744 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5745 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5746 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5747 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5748 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5749 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5750 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5751 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5752 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5753 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5754 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5755 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5756 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5757 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5758 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5759 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5760 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5761 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5762 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5763 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5764 };
5765 
5766 static const bitmask_transtbl lflag_tbl[] = {
5767   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5768   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5769   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5770   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5771   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5772   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5773   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5774   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5775   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5776   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5777   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5778   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5779   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5780   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5781   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5782   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5783 };
5784 
5785 static void target_to_host_termios (void *dst, const void *src)
5786 {
5787     struct host_termios *host = dst;
5788     const struct target_termios *target = src;
5789 
5790     host->c_iflag =
5791         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5792     host->c_oflag =
5793         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5794     host->c_cflag =
5795         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5796     host->c_lflag =
5797         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5798     host->c_line = target->c_line;
5799 
5800     memset(host->c_cc, 0, sizeof(host->c_cc));
5801     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5802     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5803     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5804     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5805     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5806     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5807     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5808     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5809     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5810     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5811     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5812     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5813     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5814     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5815     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5816     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5817     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5818 }
5819 
5820 static void host_to_target_termios (void *dst, const void *src)
5821 {
5822     struct target_termios *target = dst;
5823     const struct host_termios *host = src;
5824 
5825     target->c_iflag =
5826         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5827     target->c_oflag =
5828         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5829     target->c_cflag =
5830         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5831     target->c_lflag =
5832         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5833     target->c_line = host->c_line;
5834 
5835     memset(target->c_cc, 0, sizeof(target->c_cc));
5836     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5837     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5838     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5839     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5840     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5841     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5842     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5843     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5844     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5845     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5846     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5847     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5848     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5849     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5850     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5851     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5852     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5853 }
5854 
5855 static const StructEntry struct_termios_def = {
5856     .convert = { host_to_target_termios, target_to_host_termios },
5857     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5858     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5859     .print = print_termios,
5860 };
5861 
5862 /* If the host does not provide these bits, they may be safely discarded. */
5863 #ifndef MAP_SYNC
5864 #define MAP_SYNC 0
5865 #endif
5866 #ifndef MAP_UNINITIALIZED
5867 #define MAP_UNINITIALIZED 0
5868 #endif
5869 
5870 static const bitmask_transtbl mmap_flags_tbl[] = {
5871     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5872     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5873       MAP_ANONYMOUS, MAP_ANONYMOUS },
5874     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5875       MAP_GROWSDOWN, MAP_GROWSDOWN },
5876     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5877       MAP_DENYWRITE, MAP_DENYWRITE },
5878     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5879       MAP_EXECUTABLE, MAP_EXECUTABLE },
5880     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5881     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5882       MAP_NORESERVE, MAP_NORESERVE },
5883     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5884     /* MAP_STACK had been ignored by the kernel for quite some time.
5885        Recognize it for the target insofar as we do not want to pass
5886        it through to the host.  */
5887     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5888     { TARGET_MAP_NONBLOCK, TARGET_MAP_NONBLOCK, MAP_NONBLOCK, MAP_NONBLOCK },
5889     { TARGET_MAP_POPULATE, TARGET_MAP_POPULATE, MAP_POPULATE, MAP_POPULATE },
5890     { TARGET_MAP_FIXED_NOREPLACE, TARGET_MAP_FIXED_NOREPLACE,
5891       MAP_FIXED_NOREPLACE, MAP_FIXED_NOREPLACE },
5892     { TARGET_MAP_UNINITIALIZED, TARGET_MAP_UNINITIALIZED,
5893       MAP_UNINITIALIZED, MAP_UNINITIALIZED },
5894 };
5895 
5896 /*
5897  * Arrange for legacy / undefined architecture specific flags to be
5898  * ignored by mmap handling code.
5899  */
5900 #ifndef TARGET_MAP_32BIT
5901 #define TARGET_MAP_32BIT 0
5902 #endif
5903 #ifndef TARGET_MAP_HUGE_2MB
5904 #define TARGET_MAP_HUGE_2MB 0
5905 #endif
5906 #ifndef TARGET_MAP_HUGE_1GB
5907 #define TARGET_MAP_HUGE_1GB 0
5908 #endif
5909 
5910 static abi_long do_mmap(abi_ulong addr, abi_ulong len, int prot,
5911                         int target_flags, int fd, off_t offset)
5912 {
5913     /*
5914      * The historical set of flags that all mmap types implicitly support.
5915      */
5916     enum {
5917         TARGET_LEGACY_MAP_MASK = TARGET_MAP_SHARED
5918                                | TARGET_MAP_PRIVATE
5919                                | TARGET_MAP_FIXED
5920                                | TARGET_MAP_ANONYMOUS
5921                                | TARGET_MAP_DENYWRITE
5922                                | TARGET_MAP_EXECUTABLE
5923                                | TARGET_MAP_UNINITIALIZED
5924                                | TARGET_MAP_GROWSDOWN
5925                                | TARGET_MAP_LOCKED
5926                                | TARGET_MAP_NORESERVE
5927                                | TARGET_MAP_POPULATE
5928                                | TARGET_MAP_NONBLOCK
5929                                | TARGET_MAP_STACK
5930                                | TARGET_MAP_HUGETLB
5931                                | TARGET_MAP_32BIT
5932                                | TARGET_MAP_HUGE_2MB
5933                                | TARGET_MAP_HUGE_1GB
5934     };
5935     int host_flags;
5936 
5937     switch (target_flags & TARGET_MAP_TYPE) {
5938     case TARGET_MAP_PRIVATE:
5939         host_flags = MAP_PRIVATE;
5940         break;
5941     case TARGET_MAP_SHARED:
5942         host_flags = MAP_SHARED;
5943         break;
5944     case TARGET_MAP_SHARED_VALIDATE:
5945         /*
5946          * MAP_SYNC is only supported for MAP_SHARED_VALIDATE, and is
5947          * therefore omitted from mmap_flags_tbl and TARGET_LEGACY_MAP_MASK.
5948          */
5949         if (target_flags & ~(TARGET_LEGACY_MAP_MASK | TARGET_MAP_SYNC)) {
5950             return -TARGET_EOPNOTSUPP;
5951         }
5952         host_flags = MAP_SHARED_VALIDATE;
5953         if (target_flags & TARGET_MAP_SYNC) {
5954             host_flags |= MAP_SYNC;
5955         }
5956         break;
5957     default:
5958         return -TARGET_EINVAL;
5959     }
5960     host_flags |= target_to_host_bitmask(target_flags, mmap_flags_tbl);
5961 
5962     return get_errno(target_mmap(addr, len, prot, host_flags, fd, offset));
5963 }
5964 
5965 /*
5966  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5967  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5968  */
5969 #if defined(TARGET_I386)
5970 
5971 /* NOTE: there is really one LDT for all the threads */
5972 static uint8_t *ldt_table;
5973 
5974 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5975 {
5976     int size;
5977     void *p;
5978 
5979     if (!ldt_table)
5980         return 0;
5981     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5982     if (size > bytecount)
5983         size = bytecount;
5984     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5985     if (!p)
5986         return -TARGET_EFAULT;
5987     /* ??? Should this by byteswapped?  */
5988     memcpy(p, ldt_table, size);
5989     unlock_user(p, ptr, size);
5990     return size;
5991 }
5992 
5993 /* XXX: add locking support */
5994 static abi_long write_ldt(CPUX86State *env,
5995                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5996 {
5997     struct target_modify_ldt_ldt_s ldt_info;
5998     struct target_modify_ldt_ldt_s *target_ldt_info;
5999     int seg_32bit, contents, read_exec_only, limit_in_pages;
6000     int seg_not_present, useable, lm;
6001     uint32_t *lp, entry_1, entry_2;
6002 
6003     if (bytecount != sizeof(ldt_info))
6004         return -TARGET_EINVAL;
6005     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6006         return -TARGET_EFAULT;
6007     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6008     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6009     ldt_info.limit = tswap32(target_ldt_info->limit);
6010     ldt_info.flags = tswap32(target_ldt_info->flags);
6011     unlock_user_struct(target_ldt_info, ptr, 0);
6012 
6013     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6014         return -TARGET_EINVAL;
6015     seg_32bit = ldt_info.flags & 1;
6016     contents = (ldt_info.flags >> 1) & 3;
6017     read_exec_only = (ldt_info.flags >> 3) & 1;
6018     limit_in_pages = (ldt_info.flags >> 4) & 1;
6019     seg_not_present = (ldt_info.flags >> 5) & 1;
6020     useable = (ldt_info.flags >> 6) & 1;
6021 #ifdef TARGET_ABI32
6022     lm = 0;
6023 #else
6024     lm = (ldt_info.flags >> 7) & 1;
6025 #endif
6026     if (contents == 3) {
6027         if (oldmode)
6028             return -TARGET_EINVAL;
6029         if (seg_not_present == 0)
6030             return -TARGET_EINVAL;
6031     }
6032     /* allocate the LDT */
6033     if (!ldt_table) {
6034         env->ldt.base = target_mmap(0,
6035                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6036                                     PROT_READ|PROT_WRITE,
6037                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6038         if (env->ldt.base == -1)
6039             return -TARGET_ENOMEM;
6040         memset(g2h_untagged(env->ldt.base), 0,
6041                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6042         env->ldt.limit = 0xffff;
6043         ldt_table = g2h_untagged(env->ldt.base);
6044     }
6045 
6046     /* NOTE: same code as Linux kernel */
6047     /* Allow LDTs to be cleared by the user. */
6048     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6049         if (oldmode ||
6050             (contents == 0		&&
6051              read_exec_only == 1	&&
6052              seg_32bit == 0		&&
6053              limit_in_pages == 0	&&
6054              seg_not_present == 1	&&
6055              useable == 0 )) {
6056             entry_1 = 0;
6057             entry_2 = 0;
6058             goto install;
6059         }
6060     }
6061 
6062     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6063         (ldt_info.limit & 0x0ffff);
6064     entry_2 = (ldt_info.base_addr & 0xff000000) |
6065         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6066         (ldt_info.limit & 0xf0000) |
6067         ((read_exec_only ^ 1) << 9) |
6068         (contents << 10) |
6069         ((seg_not_present ^ 1) << 15) |
6070         (seg_32bit << 22) |
6071         (limit_in_pages << 23) |
6072         (lm << 21) |
6073         0x7000;
6074     if (!oldmode)
6075         entry_2 |= (useable << 20);
6076 
6077     /* Install the new entry ...  */
6078 install:
6079     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6080     lp[0] = tswap32(entry_1);
6081     lp[1] = tswap32(entry_2);
6082     return 0;
6083 }
6084 
6085 /* specific and weird i386 syscalls */
6086 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6087                               unsigned long bytecount)
6088 {
6089     abi_long ret;
6090 
6091     switch (func) {
6092     case 0:
6093         ret = read_ldt(ptr, bytecount);
6094         break;
6095     case 1:
6096         ret = write_ldt(env, ptr, bytecount, 1);
6097         break;
6098     case 0x11:
6099         ret = write_ldt(env, ptr, bytecount, 0);
6100         break;
6101     default:
6102         ret = -TARGET_ENOSYS;
6103         break;
6104     }
6105     return ret;
6106 }
6107 
6108 #if defined(TARGET_ABI32)
6109 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6110 {
6111     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6112     struct target_modify_ldt_ldt_s ldt_info;
6113     struct target_modify_ldt_ldt_s *target_ldt_info;
6114     int seg_32bit, contents, read_exec_only, limit_in_pages;
6115     int seg_not_present, useable, lm;
6116     uint32_t *lp, entry_1, entry_2;
6117     int i;
6118 
6119     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6120     if (!target_ldt_info)
6121         return -TARGET_EFAULT;
6122     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6123     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6124     ldt_info.limit = tswap32(target_ldt_info->limit);
6125     ldt_info.flags = tswap32(target_ldt_info->flags);
6126     if (ldt_info.entry_number == -1) {
6127         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6128             if (gdt_table[i] == 0) {
6129                 ldt_info.entry_number = i;
6130                 target_ldt_info->entry_number = tswap32(i);
6131                 break;
6132             }
6133         }
6134     }
6135     unlock_user_struct(target_ldt_info, ptr, 1);
6136 
6137     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6138         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6139            return -TARGET_EINVAL;
6140     seg_32bit = ldt_info.flags & 1;
6141     contents = (ldt_info.flags >> 1) & 3;
6142     read_exec_only = (ldt_info.flags >> 3) & 1;
6143     limit_in_pages = (ldt_info.flags >> 4) & 1;
6144     seg_not_present = (ldt_info.flags >> 5) & 1;
6145     useable = (ldt_info.flags >> 6) & 1;
6146 #ifdef TARGET_ABI32
6147     lm = 0;
6148 #else
6149     lm = (ldt_info.flags >> 7) & 1;
6150 #endif
6151 
6152     if (contents == 3) {
6153         if (seg_not_present == 0)
6154             return -TARGET_EINVAL;
6155     }
6156 
6157     /* NOTE: same code as Linux kernel */
6158     /* Allow LDTs to be cleared by the user. */
6159     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6160         if ((contents == 0             &&
6161              read_exec_only == 1       &&
6162              seg_32bit == 0            &&
6163              limit_in_pages == 0       &&
6164              seg_not_present == 1      &&
6165              useable == 0 )) {
6166             entry_1 = 0;
6167             entry_2 = 0;
6168             goto install;
6169         }
6170     }
6171 
6172     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6173         (ldt_info.limit & 0x0ffff);
6174     entry_2 = (ldt_info.base_addr & 0xff000000) |
6175         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6176         (ldt_info.limit & 0xf0000) |
6177         ((read_exec_only ^ 1) << 9) |
6178         (contents << 10) |
6179         ((seg_not_present ^ 1) << 15) |
6180         (seg_32bit << 22) |
6181         (limit_in_pages << 23) |
6182         (useable << 20) |
6183         (lm << 21) |
6184         0x7000;
6185 
6186     /* Install the new entry ...  */
6187 install:
6188     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6189     lp[0] = tswap32(entry_1);
6190     lp[1] = tswap32(entry_2);
6191     return 0;
6192 }
6193 
6194 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6195 {
6196     struct target_modify_ldt_ldt_s *target_ldt_info;
6197     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6198     uint32_t base_addr, limit, flags;
6199     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6200     int seg_not_present, useable, lm;
6201     uint32_t *lp, entry_1, entry_2;
6202 
6203     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6204     if (!target_ldt_info)
6205         return -TARGET_EFAULT;
6206     idx = tswap32(target_ldt_info->entry_number);
6207     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6208         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6209         unlock_user_struct(target_ldt_info, ptr, 1);
6210         return -TARGET_EINVAL;
6211     }
6212     lp = (uint32_t *)(gdt_table + idx);
6213     entry_1 = tswap32(lp[0]);
6214     entry_2 = tswap32(lp[1]);
6215 
6216     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6217     contents = (entry_2 >> 10) & 3;
6218     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6219     seg_32bit = (entry_2 >> 22) & 1;
6220     limit_in_pages = (entry_2 >> 23) & 1;
6221     useable = (entry_2 >> 20) & 1;
6222 #ifdef TARGET_ABI32
6223     lm = 0;
6224 #else
6225     lm = (entry_2 >> 21) & 1;
6226 #endif
6227     flags = (seg_32bit << 0) | (contents << 1) |
6228         (read_exec_only << 3) | (limit_in_pages << 4) |
6229         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6230     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6231     base_addr = (entry_1 >> 16) |
6232         (entry_2 & 0xff000000) |
6233         ((entry_2 & 0xff) << 16);
6234     target_ldt_info->base_addr = tswapal(base_addr);
6235     target_ldt_info->limit = tswap32(limit);
6236     target_ldt_info->flags = tswap32(flags);
6237     unlock_user_struct(target_ldt_info, ptr, 1);
6238     return 0;
6239 }
6240 
6241 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6242 {
6243     return -TARGET_ENOSYS;
6244 }
6245 #else
6246 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6247 {
6248     abi_long ret = 0;
6249     abi_ulong val;
6250     int idx;
6251 
6252     switch(code) {
6253     case TARGET_ARCH_SET_GS:
6254     case TARGET_ARCH_SET_FS:
6255         if (code == TARGET_ARCH_SET_GS)
6256             idx = R_GS;
6257         else
6258             idx = R_FS;
6259         cpu_x86_load_seg(env, idx, 0);
6260         env->segs[idx].base = addr;
6261         break;
6262     case TARGET_ARCH_GET_GS:
6263     case TARGET_ARCH_GET_FS:
6264         if (code == TARGET_ARCH_GET_GS)
6265             idx = R_GS;
6266         else
6267             idx = R_FS;
6268         val = env->segs[idx].base;
6269         if (put_user(val, addr, abi_ulong))
6270             ret = -TARGET_EFAULT;
6271         break;
6272     default:
6273         ret = -TARGET_EINVAL;
6274         break;
6275     }
6276     return ret;
6277 }
6278 #endif /* defined(TARGET_ABI32 */
6279 #endif /* defined(TARGET_I386) */
6280 
6281 /*
6282  * These constants are generic.  Supply any that are missing from the host.
6283  */
6284 #ifndef PR_SET_NAME
6285 # define PR_SET_NAME    15
6286 # define PR_GET_NAME    16
6287 #endif
6288 #ifndef PR_SET_FP_MODE
6289 # define PR_SET_FP_MODE 45
6290 # define PR_GET_FP_MODE 46
6291 # define PR_FP_MODE_FR   (1 << 0)
6292 # define PR_FP_MODE_FRE  (1 << 1)
6293 #endif
6294 #ifndef PR_SVE_SET_VL
6295 # define PR_SVE_SET_VL  50
6296 # define PR_SVE_GET_VL  51
6297 # define PR_SVE_VL_LEN_MASK  0xffff
6298 # define PR_SVE_VL_INHERIT   (1 << 17)
6299 #endif
6300 #ifndef PR_PAC_RESET_KEYS
6301 # define PR_PAC_RESET_KEYS  54
6302 # define PR_PAC_APIAKEY   (1 << 0)
6303 # define PR_PAC_APIBKEY   (1 << 1)
6304 # define PR_PAC_APDAKEY   (1 << 2)
6305 # define PR_PAC_APDBKEY   (1 << 3)
6306 # define PR_PAC_APGAKEY   (1 << 4)
6307 #endif
6308 #ifndef PR_SET_TAGGED_ADDR_CTRL
6309 # define PR_SET_TAGGED_ADDR_CTRL 55
6310 # define PR_GET_TAGGED_ADDR_CTRL 56
6311 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6312 #endif
6313 #ifndef PR_SET_IO_FLUSHER
6314 # define PR_SET_IO_FLUSHER 57
6315 # define PR_GET_IO_FLUSHER 58
6316 #endif
6317 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6318 # define PR_SET_SYSCALL_USER_DISPATCH 59
6319 #endif
6320 #ifndef PR_SME_SET_VL
6321 # define PR_SME_SET_VL  63
6322 # define PR_SME_GET_VL  64
6323 # define PR_SME_VL_LEN_MASK  0xffff
6324 # define PR_SME_VL_INHERIT   (1 << 17)
6325 #endif
6326 
6327 #include "target_prctl.h"
6328 
6329 static abi_long do_prctl_inval0(CPUArchState *env)
6330 {
6331     return -TARGET_EINVAL;
6332 }
6333 
6334 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6335 {
6336     return -TARGET_EINVAL;
6337 }
6338 
6339 #ifndef do_prctl_get_fp_mode
6340 #define do_prctl_get_fp_mode do_prctl_inval0
6341 #endif
6342 #ifndef do_prctl_set_fp_mode
6343 #define do_prctl_set_fp_mode do_prctl_inval1
6344 #endif
6345 #ifndef do_prctl_sve_get_vl
6346 #define do_prctl_sve_get_vl do_prctl_inval0
6347 #endif
6348 #ifndef do_prctl_sve_set_vl
6349 #define do_prctl_sve_set_vl do_prctl_inval1
6350 #endif
6351 #ifndef do_prctl_reset_keys
6352 #define do_prctl_reset_keys do_prctl_inval1
6353 #endif
6354 #ifndef do_prctl_set_tagged_addr_ctrl
6355 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6356 #endif
6357 #ifndef do_prctl_get_tagged_addr_ctrl
6358 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6359 #endif
6360 #ifndef do_prctl_get_unalign
6361 #define do_prctl_get_unalign do_prctl_inval1
6362 #endif
6363 #ifndef do_prctl_set_unalign
6364 #define do_prctl_set_unalign do_prctl_inval1
6365 #endif
6366 #ifndef do_prctl_sme_get_vl
6367 #define do_prctl_sme_get_vl do_prctl_inval0
6368 #endif
6369 #ifndef do_prctl_sme_set_vl
6370 #define do_prctl_sme_set_vl do_prctl_inval1
6371 #endif
6372 
6373 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6374                          abi_long arg3, abi_long arg4, abi_long arg5)
6375 {
6376     abi_long ret;
6377 
6378     switch (option) {
6379     case PR_GET_PDEATHSIG:
6380         {
6381             int deathsig;
6382             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6383                                   arg3, arg4, arg5));
6384             if (!is_error(ret) &&
6385                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6386                 return -TARGET_EFAULT;
6387             }
6388             return ret;
6389         }
6390     case PR_SET_PDEATHSIG:
6391         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6392                                arg3, arg4, arg5));
6393     case PR_GET_NAME:
6394         {
6395             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6396             if (!name) {
6397                 return -TARGET_EFAULT;
6398             }
6399             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6400                                   arg3, arg4, arg5));
6401             unlock_user(name, arg2, 16);
6402             return ret;
6403         }
6404     case PR_SET_NAME:
6405         {
6406             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6407             if (!name) {
6408                 return -TARGET_EFAULT;
6409             }
6410             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6411                                   arg3, arg4, arg5));
6412             unlock_user(name, arg2, 0);
6413             return ret;
6414         }
6415     case PR_GET_FP_MODE:
6416         return do_prctl_get_fp_mode(env);
6417     case PR_SET_FP_MODE:
6418         return do_prctl_set_fp_mode(env, arg2);
6419     case PR_SVE_GET_VL:
6420         return do_prctl_sve_get_vl(env);
6421     case PR_SVE_SET_VL:
6422         return do_prctl_sve_set_vl(env, arg2);
6423     case PR_SME_GET_VL:
6424         return do_prctl_sme_get_vl(env);
6425     case PR_SME_SET_VL:
6426         return do_prctl_sme_set_vl(env, arg2);
6427     case PR_PAC_RESET_KEYS:
6428         if (arg3 || arg4 || arg5) {
6429             return -TARGET_EINVAL;
6430         }
6431         return do_prctl_reset_keys(env, arg2);
6432     case PR_SET_TAGGED_ADDR_CTRL:
6433         if (arg3 || arg4 || arg5) {
6434             return -TARGET_EINVAL;
6435         }
6436         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6437     case PR_GET_TAGGED_ADDR_CTRL:
6438         if (arg2 || arg3 || arg4 || arg5) {
6439             return -TARGET_EINVAL;
6440         }
6441         return do_prctl_get_tagged_addr_ctrl(env);
6442 
6443     case PR_GET_UNALIGN:
6444         return do_prctl_get_unalign(env, arg2);
6445     case PR_SET_UNALIGN:
6446         return do_prctl_set_unalign(env, arg2);
6447 
6448     case PR_CAP_AMBIENT:
6449     case PR_CAPBSET_READ:
6450     case PR_CAPBSET_DROP:
6451     case PR_GET_DUMPABLE:
6452     case PR_SET_DUMPABLE:
6453     case PR_GET_KEEPCAPS:
6454     case PR_SET_KEEPCAPS:
6455     case PR_GET_SECUREBITS:
6456     case PR_SET_SECUREBITS:
6457     case PR_GET_TIMING:
6458     case PR_SET_TIMING:
6459     case PR_GET_TIMERSLACK:
6460     case PR_SET_TIMERSLACK:
6461     case PR_MCE_KILL:
6462     case PR_MCE_KILL_GET:
6463     case PR_GET_NO_NEW_PRIVS:
6464     case PR_SET_NO_NEW_PRIVS:
6465     case PR_GET_IO_FLUSHER:
6466     case PR_SET_IO_FLUSHER:
6467     case PR_SET_CHILD_SUBREAPER:
6468     case PR_GET_SPECULATION_CTRL:
6469     case PR_SET_SPECULATION_CTRL:
6470         /* Some prctl options have no pointer arguments and we can pass on. */
6471         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6472 
6473     case PR_GET_CHILD_SUBREAPER:
6474         {
6475             int val;
6476             ret = get_errno(prctl(PR_GET_CHILD_SUBREAPER, &val,
6477                                   arg3, arg4, arg5));
6478             if (!is_error(ret) && put_user_s32(val, arg2)) {
6479                 return -TARGET_EFAULT;
6480             }
6481             return ret;
6482         }
6483 
6484     case PR_GET_TID_ADDRESS:
6485         {
6486             TaskState *ts = get_task_state(env_cpu(env));
6487             return put_user_ual(ts->child_tidptr, arg2);
6488         }
6489 
6490     case PR_GET_FPEXC:
6491     case PR_SET_FPEXC:
6492         /* Was used for SPE on PowerPC. */
6493         return -TARGET_EINVAL;
6494 
6495     case PR_GET_ENDIAN:
6496     case PR_SET_ENDIAN:
6497     case PR_GET_FPEMU:
6498     case PR_SET_FPEMU:
6499     case PR_SET_MM:
6500     case PR_GET_SECCOMP:
6501     case PR_SET_SECCOMP:
6502     case PR_SET_SYSCALL_USER_DISPATCH:
6503     case PR_GET_THP_DISABLE:
6504     case PR_SET_THP_DISABLE:
6505     case PR_GET_TSC:
6506     case PR_SET_TSC:
6507         /* Disable to prevent the target disabling stuff we need. */
6508         return -TARGET_EINVAL;
6509 
6510     default:
6511         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6512                       option);
6513         return -TARGET_EINVAL;
6514     }
6515 }
6516 
6517 #define NEW_STACK_SIZE 0x40000
6518 
6519 
6520 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6521 typedef struct {
6522     CPUArchState *env;
6523     pthread_mutex_t mutex;
6524     pthread_cond_t cond;
6525     pthread_t thread;
6526     uint32_t tid;
6527     abi_ulong child_tidptr;
6528     abi_ulong parent_tidptr;
6529     sigset_t sigmask;
6530 } new_thread_info;
6531 
6532 static void *clone_func(void *arg)
6533 {
6534     new_thread_info *info = arg;
6535     CPUArchState *env;
6536     CPUState *cpu;
6537     TaskState *ts;
6538 
6539     rcu_register_thread();
6540     tcg_register_thread();
6541     env = info->env;
6542     cpu = env_cpu(env);
6543     thread_cpu = cpu;
6544     ts = get_task_state(cpu);
6545     info->tid = sys_gettid();
6546     task_settid(ts);
6547     if (info->child_tidptr)
6548         put_user_u32(info->tid, info->child_tidptr);
6549     if (info->parent_tidptr)
6550         put_user_u32(info->tid, info->parent_tidptr);
6551     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6552     /* Enable signals.  */
6553     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6554     /* Signal to the parent that we're ready.  */
6555     pthread_mutex_lock(&info->mutex);
6556     pthread_cond_broadcast(&info->cond);
6557     pthread_mutex_unlock(&info->mutex);
6558     /* Wait until the parent has finished initializing the tls state.  */
6559     pthread_mutex_lock(&clone_lock);
6560     pthread_mutex_unlock(&clone_lock);
6561     cpu_loop(env);
6562     /* never exits */
6563     return NULL;
6564 }
6565 
6566 /* do_fork() Must return host values and target errnos (unlike most
6567    do_*() functions). */
6568 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6569                    abi_ulong parent_tidptr, target_ulong newtls,
6570                    abi_ulong child_tidptr)
6571 {
6572     CPUState *cpu = env_cpu(env);
6573     int ret;
6574     TaskState *ts;
6575     CPUState *new_cpu;
6576     CPUArchState *new_env;
6577     sigset_t sigmask;
6578 
6579     flags &= ~CLONE_IGNORED_FLAGS;
6580 
6581     /* Emulate vfork() with fork() */
6582     if (flags & CLONE_VFORK)
6583         flags &= ~(CLONE_VFORK | CLONE_VM);
6584 
6585     if (flags & CLONE_VM) {
6586         TaskState *parent_ts = get_task_state(cpu);
6587         new_thread_info info;
6588         pthread_attr_t attr;
6589 
6590         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6591             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6592             return -TARGET_EINVAL;
6593         }
6594 
6595         ts = g_new0(TaskState, 1);
6596         init_task_state(ts);
6597 
6598         /* Grab a mutex so that thread setup appears atomic.  */
6599         pthread_mutex_lock(&clone_lock);
6600 
6601         /*
6602          * If this is our first additional thread, we need to ensure we
6603          * generate code for parallel execution and flush old translations.
6604          * Do this now so that the copy gets CF_PARALLEL too.
6605          */
6606         if (!tcg_cflags_has(cpu, CF_PARALLEL)) {
6607             tcg_cflags_set(cpu, CF_PARALLEL);
6608             tb_flush(cpu);
6609         }
6610 
6611         /* we create a new CPU instance. */
6612         new_env = cpu_copy(env);
6613         /* Init regs that differ from the parent.  */
6614         cpu_clone_regs_child(new_env, newsp, flags);
6615         cpu_clone_regs_parent(env, flags);
6616         new_cpu = env_cpu(new_env);
6617         new_cpu->opaque = ts;
6618         ts->bprm = parent_ts->bprm;
6619         ts->info = parent_ts->info;
6620         ts->signal_mask = parent_ts->signal_mask;
6621 
6622         if (flags & CLONE_CHILD_CLEARTID) {
6623             ts->child_tidptr = child_tidptr;
6624         }
6625 
6626         if (flags & CLONE_SETTLS) {
6627             cpu_set_tls (new_env, newtls);
6628         }
6629 
6630         memset(&info, 0, sizeof(info));
6631         pthread_mutex_init(&info.mutex, NULL);
6632         pthread_mutex_lock(&info.mutex);
6633         pthread_cond_init(&info.cond, NULL);
6634         info.env = new_env;
6635         if (flags & CLONE_CHILD_SETTID) {
6636             info.child_tidptr = child_tidptr;
6637         }
6638         if (flags & CLONE_PARENT_SETTID) {
6639             info.parent_tidptr = parent_tidptr;
6640         }
6641 
6642         ret = pthread_attr_init(&attr);
6643         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6644         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6645         /* It is not safe to deliver signals until the child has finished
6646            initializing, so temporarily block all signals.  */
6647         sigfillset(&sigmask);
6648         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6649         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6650 
6651         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6652         /* TODO: Free new CPU state if thread creation failed.  */
6653 
6654         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6655         pthread_attr_destroy(&attr);
6656         if (ret == 0) {
6657             /* Wait for the child to initialize.  */
6658             pthread_cond_wait(&info.cond, &info.mutex);
6659             ret = info.tid;
6660         } else {
6661             ret = -1;
6662         }
6663         pthread_mutex_unlock(&info.mutex);
6664         pthread_cond_destroy(&info.cond);
6665         pthread_mutex_destroy(&info.mutex);
6666         pthread_mutex_unlock(&clone_lock);
6667     } else {
6668         /* if no CLONE_VM, we consider it is a fork */
6669         if (flags & CLONE_INVALID_FORK_FLAGS) {
6670             return -TARGET_EINVAL;
6671         }
6672 
6673         /* We can't support custom termination signals */
6674         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6675             return -TARGET_EINVAL;
6676         }
6677 
6678 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6679         if (flags & CLONE_PIDFD) {
6680             return -TARGET_EINVAL;
6681         }
6682 #endif
6683 
6684         /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6685         if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) {
6686             return -TARGET_EINVAL;
6687         }
6688 
6689         if (block_signals()) {
6690             return -QEMU_ERESTARTSYS;
6691         }
6692 
6693         fork_start();
6694         ret = fork();
6695         if (ret == 0) {
6696             /* Child Process.  */
6697             cpu_clone_regs_child(env, newsp, flags);
6698             fork_end(ret);
6699             /* There is a race condition here.  The parent process could
6700                theoretically read the TID in the child process before the child
6701                tid is set.  This would require using either ptrace
6702                (not implemented) or having *_tidptr to point at a shared memory
6703                mapping.  We can't repeat the spinlock hack used above because
6704                the child process gets its own copy of the lock.  */
6705             if (flags & CLONE_CHILD_SETTID)
6706                 put_user_u32(sys_gettid(), child_tidptr);
6707             if (flags & CLONE_PARENT_SETTID)
6708                 put_user_u32(sys_gettid(), parent_tidptr);
6709             ts = get_task_state(cpu);
6710             if (flags & CLONE_SETTLS)
6711                 cpu_set_tls (env, newtls);
6712             if (flags & CLONE_CHILD_CLEARTID)
6713                 ts->child_tidptr = child_tidptr;
6714         } else {
6715             cpu_clone_regs_parent(env, flags);
6716             if (flags & CLONE_PIDFD) {
6717                 int pid_fd = 0;
6718 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6719                 int pid_child = ret;
6720                 pid_fd = pidfd_open(pid_child, 0);
6721                 if (pid_fd >= 0) {
6722                         fcntl(pid_fd, F_SETFD, fcntl(pid_fd, F_GETFL)
6723                                                | FD_CLOEXEC);
6724                 } else {
6725                         pid_fd = 0;
6726                 }
6727 #endif
6728                 put_user_u32(pid_fd, parent_tidptr);
6729             }
6730             fork_end(ret);
6731         }
6732         g_assert(!cpu_in_exclusive_context(cpu));
6733     }
6734     return ret;
6735 }
6736 
6737 /* warning : doesn't handle linux specific flags... */
6738 static int target_to_host_fcntl_cmd(int cmd)
6739 {
6740     int ret;
6741 
6742     switch(cmd) {
6743     case TARGET_F_DUPFD:
6744     case TARGET_F_GETFD:
6745     case TARGET_F_SETFD:
6746     case TARGET_F_GETFL:
6747     case TARGET_F_SETFL:
6748     case TARGET_F_OFD_GETLK:
6749     case TARGET_F_OFD_SETLK:
6750     case TARGET_F_OFD_SETLKW:
6751         ret = cmd;
6752         break;
6753     case TARGET_F_GETLK:
6754         ret = F_GETLK;
6755         break;
6756     case TARGET_F_SETLK:
6757         ret = F_SETLK;
6758         break;
6759     case TARGET_F_SETLKW:
6760         ret = F_SETLKW;
6761         break;
6762     case TARGET_F_GETOWN:
6763         ret = F_GETOWN;
6764         break;
6765     case TARGET_F_SETOWN:
6766         ret = F_SETOWN;
6767         break;
6768     case TARGET_F_GETSIG:
6769         ret = F_GETSIG;
6770         break;
6771     case TARGET_F_SETSIG:
6772         ret = F_SETSIG;
6773         break;
6774 #if TARGET_ABI_BITS == 32
6775     case TARGET_F_GETLK64:
6776         ret = F_GETLK;
6777         break;
6778     case TARGET_F_SETLK64:
6779         ret = F_SETLK;
6780         break;
6781     case TARGET_F_SETLKW64:
6782         ret = F_SETLKW;
6783         break;
6784 #endif
6785     case TARGET_F_SETLEASE:
6786         ret = F_SETLEASE;
6787         break;
6788     case TARGET_F_GETLEASE:
6789         ret = F_GETLEASE;
6790         break;
6791 #ifdef F_DUPFD_CLOEXEC
6792     case TARGET_F_DUPFD_CLOEXEC:
6793         ret = F_DUPFD_CLOEXEC;
6794         break;
6795 #endif
6796     case TARGET_F_NOTIFY:
6797         ret = F_NOTIFY;
6798         break;
6799 #ifdef F_GETOWN_EX
6800     case TARGET_F_GETOWN_EX:
6801         ret = F_GETOWN_EX;
6802         break;
6803 #endif
6804 #ifdef F_SETOWN_EX
6805     case TARGET_F_SETOWN_EX:
6806         ret = F_SETOWN_EX;
6807         break;
6808 #endif
6809 #ifdef F_SETPIPE_SZ
6810     case TARGET_F_SETPIPE_SZ:
6811         ret = F_SETPIPE_SZ;
6812         break;
6813     case TARGET_F_GETPIPE_SZ:
6814         ret = F_GETPIPE_SZ;
6815         break;
6816 #endif
6817 #ifdef F_ADD_SEALS
6818     case TARGET_F_ADD_SEALS:
6819         ret = F_ADD_SEALS;
6820         break;
6821     case TARGET_F_GET_SEALS:
6822         ret = F_GET_SEALS;
6823         break;
6824 #endif
6825     default:
6826         ret = -TARGET_EINVAL;
6827         break;
6828     }
6829 
6830 #if defined(__powerpc64__)
6831     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6832      * is not supported by kernel. The glibc fcntl call actually adjusts
6833      * them to 5, 6 and 7 before making the syscall(). Since we make the
6834      * syscall directly, adjust to what is supported by the kernel.
6835      */
6836     if (ret >= F_GETLK && ret <= F_SETLKW) {
6837         ret -= F_GETLK - 5;
6838     }
6839 #endif
6840 
6841     return ret;
6842 }
6843 
6844 #define FLOCK_TRANSTBL \
6845     switch (type) { \
6846     TRANSTBL_CONVERT(F_RDLCK); \
6847     TRANSTBL_CONVERT(F_WRLCK); \
6848     TRANSTBL_CONVERT(F_UNLCK); \
6849     }
6850 
6851 static int target_to_host_flock(int type)
6852 {
6853 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6854     FLOCK_TRANSTBL
6855 #undef  TRANSTBL_CONVERT
6856     return -TARGET_EINVAL;
6857 }
6858 
6859 static int host_to_target_flock(int type)
6860 {
6861 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6862     FLOCK_TRANSTBL
6863 #undef  TRANSTBL_CONVERT
6864     /* if we don't know how to convert the value coming
6865      * from the host we copy to the target field as-is
6866      */
6867     return type;
6868 }
6869 
6870 static inline abi_long copy_from_user_flock(struct flock *fl,
6871                                             abi_ulong target_flock_addr)
6872 {
6873     struct target_flock *target_fl;
6874     int l_type;
6875 
6876     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6877         return -TARGET_EFAULT;
6878     }
6879 
6880     __get_user(l_type, &target_fl->l_type);
6881     l_type = target_to_host_flock(l_type);
6882     if (l_type < 0) {
6883         return l_type;
6884     }
6885     fl->l_type = l_type;
6886     __get_user(fl->l_whence, &target_fl->l_whence);
6887     __get_user(fl->l_start, &target_fl->l_start);
6888     __get_user(fl->l_len, &target_fl->l_len);
6889     __get_user(fl->l_pid, &target_fl->l_pid);
6890     unlock_user_struct(target_fl, target_flock_addr, 0);
6891     return 0;
6892 }
6893 
6894 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6895                                           const struct flock *fl)
6896 {
6897     struct target_flock *target_fl;
6898     short l_type;
6899 
6900     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6901         return -TARGET_EFAULT;
6902     }
6903 
6904     l_type = host_to_target_flock(fl->l_type);
6905     __put_user(l_type, &target_fl->l_type);
6906     __put_user(fl->l_whence, &target_fl->l_whence);
6907     __put_user(fl->l_start, &target_fl->l_start);
6908     __put_user(fl->l_len, &target_fl->l_len);
6909     __put_user(fl->l_pid, &target_fl->l_pid);
6910     unlock_user_struct(target_fl, target_flock_addr, 1);
6911     return 0;
6912 }
6913 
6914 typedef abi_long from_flock64_fn(struct flock *fl, abi_ulong target_addr);
6915 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock *fl);
6916 
6917 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6918 struct target_oabi_flock64 {
6919     abi_short l_type;
6920     abi_short l_whence;
6921     abi_llong l_start;
6922     abi_llong l_len;
6923     abi_int   l_pid;
6924 } QEMU_PACKED;
6925 
6926 static inline abi_long copy_from_user_oabi_flock64(struct flock *fl,
6927                                                    abi_ulong target_flock_addr)
6928 {
6929     struct target_oabi_flock64 *target_fl;
6930     int l_type;
6931 
6932     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6933         return -TARGET_EFAULT;
6934     }
6935 
6936     __get_user(l_type, &target_fl->l_type);
6937     l_type = target_to_host_flock(l_type);
6938     if (l_type < 0) {
6939         return l_type;
6940     }
6941     fl->l_type = l_type;
6942     __get_user(fl->l_whence, &target_fl->l_whence);
6943     __get_user(fl->l_start, &target_fl->l_start);
6944     __get_user(fl->l_len, &target_fl->l_len);
6945     __get_user(fl->l_pid, &target_fl->l_pid);
6946     unlock_user_struct(target_fl, target_flock_addr, 0);
6947     return 0;
6948 }
6949 
6950 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6951                                                  const struct flock *fl)
6952 {
6953     struct target_oabi_flock64 *target_fl;
6954     short l_type;
6955 
6956     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6957         return -TARGET_EFAULT;
6958     }
6959 
6960     l_type = host_to_target_flock(fl->l_type);
6961     __put_user(l_type, &target_fl->l_type);
6962     __put_user(fl->l_whence, &target_fl->l_whence);
6963     __put_user(fl->l_start, &target_fl->l_start);
6964     __put_user(fl->l_len, &target_fl->l_len);
6965     __put_user(fl->l_pid, &target_fl->l_pid);
6966     unlock_user_struct(target_fl, target_flock_addr, 1);
6967     return 0;
6968 }
6969 #endif
6970 
6971 static inline abi_long copy_from_user_flock64(struct flock *fl,
6972                                               abi_ulong target_flock_addr)
6973 {
6974     struct target_flock64 *target_fl;
6975     int l_type;
6976 
6977     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6978         return -TARGET_EFAULT;
6979     }
6980 
6981     __get_user(l_type, &target_fl->l_type);
6982     l_type = target_to_host_flock(l_type);
6983     if (l_type < 0) {
6984         return l_type;
6985     }
6986     fl->l_type = l_type;
6987     __get_user(fl->l_whence, &target_fl->l_whence);
6988     __get_user(fl->l_start, &target_fl->l_start);
6989     __get_user(fl->l_len, &target_fl->l_len);
6990     __get_user(fl->l_pid, &target_fl->l_pid);
6991     unlock_user_struct(target_fl, target_flock_addr, 0);
6992     return 0;
6993 }
6994 
6995 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6996                                             const struct flock *fl)
6997 {
6998     struct target_flock64 *target_fl;
6999     short l_type;
7000 
7001     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7002         return -TARGET_EFAULT;
7003     }
7004 
7005     l_type = host_to_target_flock(fl->l_type);
7006     __put_user(l_type, &target_fl->l_type);
7007     __put_user(fl->l_whence, &target_fl->l_whence);
7008     __put_user(fl->l_start, &target_fl->l_start);
7009     __put_user(fl->l_len, &target_fl->l_len);
7010     __put_user(fl->l_pid, &target_fl->l_pid);
7011     unlock_user_struct(target_fl, target_flock_addr, 1);
7012     return 0;
7013 }
7014 
7015 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7016 {
7017     struct flock fl;
7018 #ifdef F_GETOWN_EX
7019     struct f_owner_ex fox;
7020     struct target_f_owner_ex *target_fox;
7021 #endif
7022     abi_long ret;
7023     int host_cmd = target_to_host_fcntl_cmd(cmd);
7024 
7025     if (host_cmd == -TARGET_EINVAL)
7026 	    return host_cmd;
7027 
7028     switch(cmd) {
7029     case TARGET_F_GETLK:
7030         ret = copy_from_user_flock(&fl, arg);
7031         if (ret) {
7032             return ret;
7033         }
7034         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7035         if (ret == 0) {
7036             ret = copy_to_user_flock(arg, &fl);
7037         }
7038         break;
7039 
7040     case TARGET_F_SETLK:
7041     case TARGET_F_SETLKW:
7042         ret = copy_from_user_flock(&fl, arg);
7043         if (ret) {
7044             return ret;
7045         }
7046         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7047         break;
7048 
7049     case TARGET_F_GETLK64:
7050     case TARGET_F_OFD_GETLK:
7051         ret = copy_from_user_flock64(&fl, arg);
7052         if (ret) {
7053             return ret;
7054         }
7055         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7056         if (ret == 0) {
7057             ret = copy_to_user_flock64(arg, &fl);
7058         }
7059         break;
7060     case TARGET_F_SETLK64:
7061     case TARGET_F_SETLKW64:
7062     case TARGET_F_OFD_SETLK:
7063     case TARGET_F_OFD_SETLKW:
7064         ret = copy_from_user_flock64(&fl, arg);
7065         if (ret) {
7066             return ret;
7067         }
7068         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7069         break;
7070 
7071     case TARGET_F_GETFL:
7072         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7073         if (ret >= 0) {
7074             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7075             /* tell 32-bit guests it uses largefile on 64-bit hosts: */
7076             if (O_LARGEFILE == 0 && HOST_LONG_BITS == 64) {
7077                 ret |= TARGET_O_LARGEFILE;
7078             }
7079         }
7080         break;
7081 
7082     case TARGET_F_SETFL:
7083         ret = get_errno(safe_fcntl(fd, host_cmd,
7084                                    target_to_host_bitmask(arg,
7085                                                           fcntl_flags_tbl)));
7086         break;
7087 
7088 #ifdef F_GETOWN_EX
7089     case TARGET_F_GETOWN_EX:
7090         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7091         if (ret >= 0) {
7092             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7093                 return -TARGET_EFAULT;
7094             target_fox->type = tswap32(fox.type);
7095             target_fox->pid = tswap32(fox.pid);
7096             unlock_user_struct(target_fox, arg, 1);
7097         }
7098         break;
7099 #endif
7100 
7101 #ifdef F_SETOWN_EX
7102     case TARGET_F_SETOWN_EX:
7103         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7104             return -TARGET_EFAULT;
7105         fox.type = tswap32(target_fox->type);
7106         fox.pid = tswap32(target_fox->pid);
7107         unlock_user_struct(target_fox, arg, 0);
7108         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7109         break;
7110 #endif
7111 
7112     case TARGET_F_SETSIG:
7113         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7114         break;
7115 
7116     case TARGET_F_GETSIG:
7117         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7118         break;
7119 
7120     case TARGET_F_SETOWN:
7121     case TARGET_F_GETOWN:
7122     case TARGET_F_SETLEASE:
7123     case TARGET_F_GETLEASE:
7124     case TARGET_F_SETPIPE_SZ:
7125     case TARGET_F_GETPIPE_SZ:
7126     case TARGET_F_ADD_SEALS:
7127     case TARGET_F_GET_SEALS:
7128         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7129         break;
7130 
7131     default:
7132         ret = get_errno(safe_fcntl(fd, cmd, arg));
7133         break;
7134     }
7135     return ret;
7136 }
7137 
7138 #ifdef USE_UID16
7139 
7140 static inline int high2lowuid(int uid)
7141 {
7142     if (uid > 65535)
7143         return 65534;
7144     else
7145         return uid;
7146 }
7147 
7148 static inline int high2lowgid(int gid)
7149 {
7150     if (gid > 65535)
7151         return 65534;
7152     else
7153         return gid;
7154 }
7155 
7156 static inline int low2highuid(int uid)
7157 {
7158     if ((int16_t)uid == -1)
7159         return -1;
7160     else
7161         return uid;
7162 }
7163 
7164 static inline int low2highgid(int gid)
7165 {
7166     if ((int16_t)gid == -1)
7167         return -1;
7168     else
7169         return gid;
7170 }
7171 static inline int tswapid(int id)
7172 {
7173     return tswap16(id);
7174 }
7175 
7176 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7177 
7178 #else /* !USE_UID16 */
7179 static inline int high2lowuid(int uid)
7180 {
7181     return uid;
7182 }
7183 static inline int high2lowgid(int gid)
7184 {
7185     return gid;
7186 }
7187 static inline int low2highuid(int uid)
7188 {
7189     return uid;
7190 }
7191 static inline int low2highgid(int gid)
7192 {
7193     return gid;
7194 }
7195 static inline int tswapid(int id)
7196 {
7197     return tswap32(id);
7198 }
7199 
7200 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7201 
7202 #endif /* USE_UID16 */
7203 
7204 /* We must do direct syscalls for setting UID/GID, because we want to
7205  * implement the Linux system call semantics of "change only for this thread",
7206  * not the libc/POSIX semantics of "change for all threads in process".
7207  * (See http://ewontfix.com/17/ for more details.)
7208  * We use the 32-bit version of the syscalls if present; if it is not
7209  * then either the host architecture supports 32-bit UIDs natively with
7210  * the standard syscall, or the 16-bit UID is the best we can do.
7211  */
7212 #ifdef __NR_setuid32
7213 #define __NR_sys_setuid __NR_setuid32
7214 #else
7215 #define __NR_sys_setuid __NR_setuid
7216 #endif
7217 #ifdef __NR_setgid32
7218 #define __NR_sys_setgid __NR_setgid32
7219 #else
7220 #define __NR_sys_setgid __NR_setgid
7221 #endif
7222 #ifdef __NR_setresuid32
7223 #define __NR_sys_setresuid __NR_setresuid32
7224 #else
7225 #define __NR_sys_setresuid __NR_setresuid
7226 #endif
7227 #ifdef __NR_setresgid32
7228 #define __NR_sys_setresgid __NR_setresgid32
7229 #else
7230 #define __NR_sys_setresgid __NR_setresgid
7231 #endif
7232 #ifdef __NR_setgroups32
7233 #define __NR_sys_setgroups __NR_setgroups32
7234 #else
7235 #define __NR_sys_setgroups __NR_setgroups
7236 #endif
7237 #ifdef __NR_sys_setreuid32
7238 #define __NR_sys_setreuid __NR_setreuid32
7239 #else
7240 #define __NR_sys_setreuid __NR_setreuid
7241 #endif
7242 #ifdef __NR_sys_setregid32
7243 #define __NR_sys_setregid __NR_setregid32
7244 #else
7245 #define __NR_sys_setregid __NR_setregid
7246 #endif
7247 
7248 _syscall1(int, sys_setuid, uid_t, uid)
7249 _syscall1(int, sys_setgid, gid_t, gid)
7250 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7251 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7252 _syscall2(int, sys_setgroups, int, size, gid_t *, grouplist)
7253 _syscall2(int, sys_setreuid, uid_t, ruid, uid_t, euid);
7254 _syscall2(int, sys_setregid, gid_t, rgid, gid_t, egid);
7255 
7256 void syscall_init(void)
7257 {
7258     IOCTLEntry *ie;
7259     const argtype *arg_type;
7260     int size;
7261 
7262     thunk_init(STRUCT_MAX);
7263 
7264 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7265 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7266 #include "syscall_types.h"
7267 #undef STRUCT
7268 #undef STRUCT_SPECIAL
7269 
7270     /* we patch the ioctl size if necessary. We rely on the fact that
7271        no ioctl has all the bits at '1' in the size field */
7272     ie = ioctl_entries;
7273     while (ie->target_cmd != 0) {
7274         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7275             TARGET_IOC_SIZEMASK) {
7276             arg_type = ie->arg_type;
7277             if (arg_type[0] != TYPE_PTR) {
7278                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7279                         ie->target_cmd);
7280                 exit(1);
7281             }
7282             arg_type++;
7283             size = thunk_type_size(arg_type, 0);
7284             ie->target_cmd = (ie->target_cmd &
7285                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7286                 (size << TARGET_IOC_SIZESHIFT);
7287         }
7288 
7289         /* automatic consistency check if same arch */
7290 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7291     (defined(__x86_64__) && defined(TARGET_X86_64))
7292         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7293             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7294                     ie->name, ie->target_cmd, ie->host_cmd);
7295         }
7296 #endif
7297         ie++;
7298     }
7299 }
7300 
7301 #ifdef TARGET_NR_truncate64
7302 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7303                                          abi_long arg2,
7304                                          abi_long arg3,
7305                                          abi_long arg4)
7306 {
7307     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7308         arg2 = arg3;
7309         arg3 = arg4;
7310     }
7311     return get_errno(truncate(arg1, target_offset64(arg2, arg3)));
7312 }
7313 #endif
7314 
7315 #ifdef TARGET_NR_ftruncate64
7316 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7317                                           abi_long arg2,
7318                                           abi_long arg3,
7319                                           abi_long arg4)
7320 {
7321     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7322         arg2 = arg3;
7323         arg3 = arg4;
7324     }
7325     return get_errno(ftruncate(arg1, target_offset64(arg2, arg3)));
7326 }
7327 #endif
7328 
7329 #if defined(TARGET_NR_timer_settime) || \
7330     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7331 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7332                                                  abi_ulong target_addr)
7333 {
7334     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7335                                 offsetof(struct target_itimerspec,
7336                                          it_interval)) ||
7337         target_to_host_timespec(&host_its->it_value, target_addr +
7338                                 offsetof(struct target_itimerspec,
7339                                          it_value))) {
7340         return -TARGET_EFAULT;
7341     }
7342 
7343     return 0;
7344 }
7345 #endif
7346 
7347 #if defined(TARGET_NR_timer_settime64) || \
7348     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7349 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7350                                                    abi_ulong target_addr)
7351 {
7352     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7353                                   offsetof(struct target__kernel_itimerspec,
7354                                            it_interval)) ||
7355         target_to_host_timespec64(&host_its->it_value, target_addr +
7356                                   offsetof(struct target__kernel_itimerspec,
7357                                            it_value))) {
7358         return -TARGET_EFAULT;
7359     }
7360 
7361     return 0;
7362 }
7363 #endif
7364 
7365 #if ((defined(TARGET_NR_timerfd_gettime) || \
7366       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7367       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7368 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7369                                                  struct itimerspec *host_its)
7370 {
7371     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7372                                                        it_interval),
7373                                 &host_its->it_interval) ||
7374         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7375                                                        it_value),
7376                                 &host_its->it_value)) {
7377         return -TARGET_EFAULT;
7378     }
7379     return 0;
7380 }
7381 #endif
7382 
7383 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7384       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7385       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7386 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7387                                                    struct itimerspec *host_its)
7388 {
7389     if (host_to_target_timespec64(target_addr +
7390                                   offsetof(struct target__kernel_itimerspec,
7391                                            it_interval),
7392                                   &host_its->it_interval) ||
7393         host_to_target_timespec64(target_addr +
7394                                   offsetof(struct target__kernel_itimerspec,
7395                                            it_value),
7396                                   &host_its->it_value)) {
7397         return -TARGET_EFAULT;
7398     }
7399     return 0;
7400 }
7401 #endif
7402 
7403 #if defined(TARGET_NR_adjtimex) || \
7404     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7405 static inline abi_long target_to_host_timex(struct timex *host_tx,
7406                                             abi_long target_addr)
7407 {
7408     struct target_timex *target_tx;
7409 
7410     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7411         return -TARGET_EFAULT;
7412     }
7413 
7414     __get_user(host_tx->modes, &target_tx->modes);
7415     __get_user(host_tx->offset, &target_tx->offset);
7416     __get_user(host_tx->freq, &target_tx->freq);
7417     __get_user(host_tx->maxerror, &target_tx->maxerror);
7418     __get_user(host_tx->esterror, &target_tx->esterror);
7419     __get_user(host_tx->status, &target_tx->status);
7420     __get_user(host_tx->constant, &target_tx->constant);
7421     __get_user(host_tx->precision, &target_tx->precision);
7422     __get_user(host_tx->tolerance, &target_tx->tolerance);
7423     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7424     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7425     __get_user(host_tx->tick, &target_tx->tick);
7426     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7427     __get_user(host_tx->jitter, &target_tx->jitter);
7428     __get_user(host_tx->shift, &target_tx->shift);
7429     __get_user(host_tx->stabil, &target_tx->stabil);
7430     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7431     __get_user(host_tx->calcnt, &target_tx->calcnt);
7432     __get_user(host_tx->errcnt, &target_tx->errcnt);
7433     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7434     __get_user(host_tx->tai, &target_tx->tai);
7435 
7436     unlock_user_struct(target_tx, target_addr, 0);
7437     return 0;
7438 }
7439 
7440 static inline abi_long host_to_target_timex(abi_long target_addr,
7441                                             struct timex *host_tx)
7442 {
7443     struct target_timex *target_tx;
7444 
7445     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7446         return -TARGET_EFAULT;
7447     }
7448 
7449     __put_user(host_tx->modes, &target_tx->modes);
7450     __put_user(host_tx->offset, &target_tx->offset);
7451     __put_user(host_tx->freq, &target_tx->freq);
7452     __put_user(host_tx->maxerror, &target_tx->maxerror);
7453     __put_user(host_tx->esterror, &target_tx->esterror);
7454     __put_user(host_tx->status, &target_tx->status);
7455     __put_user(host_tx->constant, &target_tx->constant);
7456     __put_user(host_tx->precision, &target_tx->precision);
7457     __put_user(host_tx->tolerance, &target_tx->tolerance);
7458     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7459     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7460     __put_user(host_tx->tick, &target_tx->tick);
7461     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7462     __put_user(host_tx->jitter, &target_tx->jitter);
7463     __put_user(host_tx->shift, &target_tx->shift);
7464     __put_user(host_tx->stabil, &target_tx->stabil);
7465     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7466     __put_user(host_tx->calcnt, &target_tx->calcnt);
7467     __put_user(host_tx->errcnt, &target_tx->errcnt);
7468     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7469     __put_user(host_tx->tai, &target_tx->tai);
7470 
7471     unlock_user_struct(target_tx, target_addr, 1);
7472     return 0;
7473 }
7474 #endif
7475 
7476 
7477 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7478 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7479                                               abi_long target_addr)
7480 {
7481     struct target__kernel_timex *target_tx;
7482 
7483     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7484                                  offsetof(struct target__kernel_timex,
7485                                           time))) {
7486         return -TARGET_EFAULT;
7487     }
7488 
7489     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7490         return -TARGET_EFAULT;
7491     }
7492 
7493     __get_user(host_tx->modes, &target_tx->modes);
7494     __get_user(host_tx->offset, &target_tx->offset);
7495     __get_user(host_tx->freq, &target_tx->freq);
7496     __get_user(host_tx->maxerror, &target_tx->maxerror);
7497     __get_user(host_tx->esterror, &target_tx->esterror);
7498     __get_user(host_tx->status, &target_tx->status);
7499     __get_user(host_tx->constant, &target_tx->constant);
7500     __get_user(host_tx->precision, &target_tx->precision);
7501     __get_user(host_tx->tolerance, &target_tx->tolerance);
7502     __get_user(host_tx->tick, &target_tx->tick);
7503     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7504     __get_user(host_tx->jitter, &target_tx->jitter);
7505     __get_user(host_tx->shift, &target_tx->shift);
7506     __get_user(host_tx->stabil, &target_tx->stabil);
7507     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7508     __get_user(host_tx->calcnt, &target_tx->calcnt);
7509     __get_user(host_tx->errcnt, &target_tx->errcnt);
7510     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7511     __get_user(host_tx->tai, &target_tx->tai);
7512 
7513     unlock_user_struct(target_tx, target_addr, 0);
7514     return 0;
7515 }
7516 
7517 static inline abi_long host_to_target_timex64(abi_long target_addr,
7518                                               struct timex *host_tx)
7519 {
7520     struct target__kernel_timex *target_tx;
7521 
7522    if (copy_to_user_timeval64(target_addr +
7523                               offsetof(struct target__kernel_timex, time),
7524                               &host_tx->time)) {
7525         return -TARGET_EFAULT;
7526     }
7527 
7528     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7529         return -TARGET_EFAULT;
7530     }
7531 
7532     __put_user(host_tx->modes, &target_tx->modes);
7533     __put_user(host_tx->offset, &target_tx->offset);
7534     __put_user(host_tx->freq, &target_tx->freq);
7535     __put_user(host_tx->maxerror, &target_tx->maxerror);
7536     __put_user(host_tx->esterror, &target_tx->esterror);
7537     __put_user(host_tx->status, &target_tx->status);
7538     __put_user(host_tx->constant, &target_tx->constant);
7539     __put_user(host_tx->precision, &target_tx->precision);
7540     __put_user(host_tx->tolerance, &target_tx->tolerance);
7541     __put_user(host_tx->tick, &target_tx->tick);
7542     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7543     __put_user(host_tx->jitter, &target_tx->jitter);
7544     __put_user(host_tx->shift, &target_tx->shift);
7545     __put_user(host_tx->stabil, &target_tx->stabil);
7546     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7547     __put_user(host_tx->calcnt, &target_tx->calcnt);
7548     __put_user(host_tx->errcnt, &target_tx->errcnt);
7549     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7550     __put_user(host_tx->tai, &target_tx->tai);
7551 
7552     unlock_user_struct(target_tx, target_addr, 1);
7553     return 0;
7554 }
7555 #endif
7556 
7557 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7558 #define sigev_notify_thread_id _sigev_un._tid
7559 #endif
7560 
7561 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7562                                                abi_ulong target_addr)
7563 {
7564     struct target_sigevent *target_sevp;
7565 
7566     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7567         return -TARGET_EFAULT;
7568     }
7569 
7570     /* This union is awkward on 64 bit systems because it has a 32 bit
7571      * integer and a pointer in it; we follow the conversion approach
7572      * used for handling sigval types in signal.c so the guest should get
7573      * the correct value back even if we did a 64 bit byteswap and it's
7574      * using the 32 bit integer.
7575      */
7576     host_sevp->sigev_value.sival_ptr =
7577         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7578     host_sevp->sigev_signo =
7579         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7580     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7581     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7582 
7583     unlock_user_struct(target_sevp, target_addr, 1);
7584     return 0;
7585 }
7586 
7587 #if defined(TARGET_NR_mlockall)
7588 static inline int target_to_host_mlockall_arg(int arg)
7589 {
7590     int result = 0;
7591 
7592     if (arg & TARGET_MCL_CURRENT) {
7593         result |= MCL_CURRENT;
7594     }
7595     if (arg & TARGET_MCL_FUTURE) {
7596         result |= MCL_FUTURE;
7597     }
7598 #ifdef MCL_ONFAULT
7599     if (arg & TARGET_MCL_ONFAULT) {
7600         result |= MCL_ONFAULT;
7601     }
7602 #endif
7603 
7604     return result;
7605 }
7606 #endif
7607 
7608 static inline int target_to_host_msync_arg(abi_long arg)
7609 {
7610     return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
7611            ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
7612            ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
7613            (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
7614 }
7615 
7616 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7617      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7618      defined(TARGET_NR_newfstatat))
7619 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7620                                              abi_ulong target_addr,
7621                                              struct stat *host_st)
7622 {
7623 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7624     if (cpu_env->eabi) {
7625         struct target_eabi_stat64 *target_st;
7626 
7627         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7628             return -TARGET_EFAULT;
7629         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7630         __put_user(host_st->st_dev, &target_st->st_dev);
7631         __put_user(host_st->st_ino, &target_st->st_ino);
7632 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7633         __put_user(host_st->st_ino, &target_st->__st_ino);
7634 #endif
7635         __put_user(host_st->st_mode, &target_st->st_mode);
7636         __put_user(host_st->st_nlink, &target_st->st_nlink);
7637         __put_user(host_st->st_uid, &target_st->st_uid);
7638         __put_user(host_st->st_gid, &target_st->st_gid);
7639         __put_user(host_st->st_rdev, &target_st->st_rdev);
7640         __put_user(host_st->st_size, &target_st->st_size);
7641         __put_user(host_st->st_blksize, &target_st->st_blksize);
7642         __put_user(host_st->st_blocks, &target_st->st_blocks);
7643         __put_user(host_st->st_atime, &target_st->target_st_atime);
7644         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7645         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7646 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7647         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7648         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7649         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7650 #endif
7651         unlock_user_struct(target_st, target_addr, 1);
7652     } else
7653 #endif
7654     {
7655 #if defined(TARGET_HAS_STRUCT_STAT64)
7656         struct target_stat64 *target_st;
7657 #else
7658         struct target_stat *target_st;
7659 #endif
7660 
7661         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7662             return -TARGET_EFAULT;
7663         memset(target_st, 0, sizeof(*target_st));
7664         __put_user(host_st->st_dev, &target_st->st_dev);
7665         __put_user(host_st->st_ino, &target_st->st_ino);
7666 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7667         __put_user(host_st->st_ino, &target_st->__st_ino);
7668 #endif
7669         __put_user(host_st->st_mode, &target_st->st_mode);
7670         __put_user(host_st->st_nlink, &target_st->st_nlink);
7671         __put_user(host_st->st_uid, &target_st->st_uid);
7672         __put_user(host_st->st_gid, &target_st->st_gid);
7673         __put_user(host_st->st_rdev, &target_st->st_rdev);
7674         /* XXX: better use of kernel struct */
7675         __put_user(host_st->st_size, &target_st->st_size);
7676         __put_user(host_st->st_blksize, &target_st->st_blksize);
7677         __put_user(host_st->st_blocks, &target_st->st_blocks);
7678         __put_user(host_st->st_atime, &target_st->target_st_atime);
7679         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7680         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7681 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7682         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7683         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7684         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7685 #endif
7686         unlock_user_struct(target_st, target_addr, 1);
7687     }
7688 
7689     return 0;
7690 }
7691 #endif
7692 
7693 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7694 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7695                                             abi_ulong target_addr)
7696 {
7697     struct target_statx *target_stx;
7698 
7699     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7700         return -TARGET_EFAULT;
7701     }
7702     memset(target_stx, 0, sizeof(*target_stx));
7703 
7704     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7705     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7706     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7707     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7708     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7709     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7710     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7711     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7712     __put_user(host_stx->stx_size, &target_stx->stx_size);
7713     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7714     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7715     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7716     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7717     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7718     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7719     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7720     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7721     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7722     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7723     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7724     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7725     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7726     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7727 
7728     unlock_user_struct(target_stx, target_addr, 1);
7729 
7730     return 0;
7731 }
7732 #endif
7733 
7734 static int do_sys_futex(int *uaddr, int op, int val,
7735                          const struct timespec *timeout, int *uaddr2,
7736                          int val3)
7737 {
7738 #if HOST_LONG_BITS == 64
7739 #if defined(__NR_futex)
7740     /* always a 64-bit time_t, it doesn't define _time64 version  */
7741     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7742 
7743 #endif
7744 #else /* HOST_LONG_BITS == 64 */
7745 #if defined(__NR_futex_time64)
7746     if (sizeof(timeout->tv_sec) == 8) {
7747         /* _time64 function on 32bit arch */
7748         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7749     }
7750 #endif
7751 #if defined(__NR_futex)
7752     /* old function on 32bit arch */
7753     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7754 #endif
7755 #endif /* HOST_LONG_BITS == 64 */
7756     g_assert_not_reached();
7757 }
7758 
7759 static int do_safe_futex(int *uaddr, int op, int val,
7760                          const struct timespec *timeout, int *uaddr2,
7761                          int val3)
7762 {
7763 #if HOST_LONG_BITS == 64
7764 #if defined(__NR_futex)
7765     /* always a 64-bit time_t, it doesn't define _time64 version  */
7766     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7767 #endif
7768 #else /* HOST_LONG_BITS == 64 */
7769 #if defined(__NR_futex_time64)
7770     if (sizeof(timeout->tv_sec) == 8) {
7771         /* _time64 function on 32bit arch */
7772         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7773                                            val3));
7774     }
7775 #endif
7776 #if defined(__NR_futex)
7777     /* old function on 32bit arch */
7778     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7779 #endif
7780 #endif /* HOST_LONG_BITS == 64 */
7781     return -TARGET_ENOSYS;
7782 }
7783 
7784 /* ??? Using host futex calls even when target atomic operations
7785    are not really atomic probably breaks things.  However implementing
7786    futexes locally would make futexes shared between multiple processes
7787    tricky.  However they're probably useless because guest atomic
7788    operations won't work either.  */
7789 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7790 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7791                     int op, int val, target_ulong timeout,
7792                     target_ulong uaddr2, int val3)
7793 {
7794     struct timespec ts, *pts = NULL;
7795     void *haddr2 = NULL;
7796     int base_op;
7797 
7798     /* We assume FUTEX_* constants are the same on both host and target. */
7799 #ifdef FUTEX_CMD_MASK
7800     base_op = op & FUTEX_CMD_MASK;
7801 #else
7802     base_op = op;
7803 #endif
7804     switch (base_op) {
7805     case FUTEX_WAIT:
7806     case FUTEX_WAIT_BITSET:
7807         val = tswap32(val);
7808         break;
7809     case FUTEX_WAIT_REQUEUE_PI:
7810         val = tswap32(val);
7811         haddr2 = g2h(cpu, uaddr2);
7812         break;
7813     case FUTEX_LOCK_PI:
7814     case FUTEX_LOCK_PI2:
7815         break;
7816     case FUTEX_WAKE:
7817     case FUTEX_WAKE_BITSET:
7818     case FUTEX_TRYLOCK_PI:
7819     case FUTEX_UNLOCK_PI:
7820         timeout = 0;
7821         break;
7822     case FUTEX_FD:
7823         val = target_to_host_signal(val);
7824         timeout = 0;
7825         break;
7826     case FUTEX_CMP_REQUEUE:
7827     case FUTEX_CMP_REQUEUE_PI:
7828         val3 = tswap32(val3);
7829         /* fall through */
7830     case FUTEX_REQUEUE:
7831     case FUTEX_WAKE_OP:
7832         /*
7833          * For these, the 4th argument is not TIMEOUT, but VAL2.
7834          * But the prototype of do_safe_futex takes a pointer, so
7835          * insert casts to satisfy the compiler.  We do not need
7836          * to tswap VAL2 since it's not compared to guest memory.
7837           */
7838         pts = (struct timespec *)(uintptr_t)timeout;
7839         timeout = 0;
7840         haddr2 = g2h(cpu, uaddr2);
7841         break;
7842     default:
7843         return -TARGET_ENOSYS;
7844     }
7845     if (timeout) {
7846         pts = &ts;
7847         if (time64
7848             ? target_to_host_timespec64(pts, timeout)
7849             : target_to_host_timespec(pts, timeout)) {
7850             return -TARGET_EFAULT;
7851         }
7852     }
7853     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7854 }
7855 #endif
7856 
7857 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7858 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7859                                      abi_long handle, abi_long mount_id,
7860                                      abi_long flags)
7861 {
7862     struct file_handle *target_fh;
7863     struct file_handle *fh;
7864     int mid = 0;
7865     abi_long ret;
7866     char *name;
7867     unsigned int size, total_size;
7868 
7869     if (get_user_s32(size, handle)) {
7870         return -TARGET_EFAULT;
7871     }
7872 
7873     name = lock_user_string(pathname);
7874     if (!name) {
7875         return -TARGET_EFAULT;
7876     }
7877 
7878     total_size = sizeof(struct file_handle) + size;
7879     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7880     if (!target_fh) {
7881         unlock_user(name, pathname, 0);
7882         return -TARGET_EFAULT;
7883     }
7884 
7885     fh = g_malloc0(total_size);
7886     fh->handle_bytes = size;
7887 
7888     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7889     unlock_user(name, pathname, 0);
7890 
7891     /* man name_to_handle_at(2):
7892      * Other than the use of the handle_bytes field, the caller should treat
7893      * the file_handle structure as an opaque data type
7894      */
7895 
7896     memcpy(target_fh, fh, total_size);
7897     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7898     target_fh->handle_type = tswap32(fh->handle_type);
7899     g_free(fh);
7900     unlock_user(target_fh, handle, total_size);
7901 
7902     if (put_user_s32(mid, mount_id)) {
7903         return -TARGET_EFAULT;
7904     }
7905 
7906     return ret;
7907 
7908 }
7909 #endif
7910 
7911 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7912 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7913                                      abi_long flags)
7914 {
7915     struct file_handle *target_fh;
7916     struct file_handle *fh;
7917     unsigned int size, total_size;
7918     abi_long ret;
7919 
7920     if (get_user_s32(size, handle)) {
7921         return -TARGET_EFAULT;
7922     }
7923 
7924     total_size = sizeof(struct file_handle) + size;
7925     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7926     if (!target_fh) {
7927         return -TARGET_EFAULT;
7928     }
7929 
7930     fh = g_memdup(target_fh, total_size);
7931     fh->handle_bytes = size;
7932     fh->handle_type = tswap32(target_fh->handle_type);
7933 
7934     ret = get_errno(open_by_handle_at(mount_fd, fh,
7935                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7936 
7937     g_free(fh);
7938 
7939     unlock_user(target_fh, handle, total_size);
7940 
7941     return ret;
7942 }
7943 #endif
7944 
7945 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7946 
7947 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7948 {
7949     int host_flags;
7950     target_sigset_t *target_mask;
7951     sigset_t host_mask;
7952     abi_long ret;
7953 
7954     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7955         return -TARGET_EINVAL;
7956     }
7957     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7958         return -TARGET_EFAULT;
7959     }
7960 
7961     target_to_host_sigset(&host_mask, target_mask);
7962 
7963     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7964 
7965     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7966     if (ret >= 0) {
7967         fd_trans_register(ret, &target_signalfd_trans);
7968     }
7969 
7970     unlock_user_struct(target_mask, mask, 0);
7971 
7972     return ret;
7973 }
7974 #endif
7975 
7976 /* Map host to target signal numbers for the wait family of syscalls.
7977    Assume all other status bits are the same.  */
7978 int host_to_target_waitstatus(int status)
7979 {
7980     if (WIFSIGNALED(status)) {
7981         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7982     }
7983     if (WIFSTOPPED(status)) {
7984         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7985                | (status & 0xff);
7986     }
7987     return status;
7988 }
7989 
7990 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
7991 {
7992     CPUState *cpu = env_cpu(cpu_env);
7993     struct linux_binprm *bprm = get_task_state(cpu)->bprm;
7994     int i;
7995 
7996     for (i = 0; i < bprm->argc; i++) {
7997         size_t len = strlen(bprm->argv[i]) + 1;
7998 
7999         if (write(fd, bprm->argv[i], len) != len) {
8000             return -1;
8001         }
8002     }
8003 
8004     return 0;
8005 }
8006 
8007 struct open_self_maps_data {
8008     TaskState *ts;
8009     IntervalTreeRoot *host_maps;
8010     int fd;
8011     bool smaps;
8012 };
8013 
8014 /*
8015  * Subroutine to output one line of /proc/self/maps,
8016  * or one region of /proc/self/smaps.
8017  */
8018 
8019 #ifdef TARGET_HPPA
8020 # define test_stack(S, E, L)  (E == L)
8021 #else
8022 # define test_stack(S, E, L)  (S == L)
8023 #endif
8024 
8025 static void open_self_maps_4(const struct open_self_maps_data *d,
8026                              const MapInfo *mi, abi_ptr start,
8027                              abi_ptr end, unsigned flags)
8028 {
8029     const struct image_info *info = d->ts->info;
8030     const char *path = mi->path;
8031     uint64_t offset;
8032     int fd = d->fd;
8033     int count;
8034 
8035     if (test_stack(start, end, info->stack_limit)) {
8036         path = "[stack]";
8037     } else if (start == info->brk) {
8038         path = "[heap]";
8039     } else if (start == info->vdso) {
8040         path = "[vdso]";
8041 #ifdef TARGET_X86_64
8042     } else if (start == TARGET_VSYSCALL_PAGE) {
8043         path = "[vsyscall]";
8044 #endif
8045     }
8046 
8047     /* Except null device (MAP_ANON), adjust offset for this fragment. */
8048     offset = mi->offset;
8049     if (mi->dev) {
8050         uintptr_t hstart = (uintptr_t)g2h_untagged(start);
8051         offset += hstart - mi->itree.start;
8052     }
8053 
8054     count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8055                     " %c%c%c%c %08" PRIx64 " %02x:%02x %"PRId64,
8056                     start, end,
8057                     (flags & PAGE_READ) ? 'r' : '-',
8058                     (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8059                     (flags & PAGE_EXEC) ? 'x' : '-',
8060                     mi->is_priv ? 'p' : 's',
8061                     offset, major(mi->dev), minor(mi->dev),
8062                     (uint64_t)mi->inode);
8063     if (path) {
8064         dprintf(fd, "%*s%s\n", 73 - count, "", path);
8065     } else {
8066         dprintf(fd, "\n");
8067     }
8068 
8069     if (d->smaps) {
8070         unsigned long size = end - start;
8071         unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
8072         unsigned long size_kb = size >> 10;
8073 
8074         dprintf(fd, "Size:                  %lu kB\n"
8075                 "KernelPageSize:        %lu kB\n"
8076                 "MMUPageSize:           %lu kB\n"
8077                 "Rss:                   0 kB\n"
8078                 "Pss:                   0 kB\n"
8079                 "Pss_Dirty:             0 kB\n"
8080                 "Shared_Clean:          0 kB\n"
8081                 "Shared_Dirty:          0 kB\n"
8082                 "Private_Clean:         0 kB\n"
8083                 "Private_Dirty:         0 kB\n"
8084                 "Referenced:            0 kB\n"
8085                 "Anonymous:             %lu kB\n"
8086                 "LazyFree:              0 kB\n"
8087                 "AnonHugePages:         0 kB\n"
8088                 "ShmemPmdMapped:        0 kB\n"
8089                 "FilePmdMapped:         0 kB\n"
8090                 "Shared_Hugetlb:        0 kB\n"
8091                 "Private_Hugetlb:       0 kB\n"
8092                 "Swap:                  0 kB\n"
8093                 "SwapPss:               0 kB\n"
8094                 "Locked:                0 kB\n"
8095                 "THPeligible:    0\n"
8096                 "VmFlags:%s%s%s%s%s%s%s%s\n",
8097                 size_kb, page_size_kb, page_size_kb,
8098                 (flags & PAGE_ANON ? size_kb : 0),
8099                 (flags & PAGE_READ) ? " rd" : "",
8100                 (flags & PAGE_WRITE_ORG) ? " wr" : "",
8101                 (flags & PAGE_EXEC) ? " ex" : "",
8102                 mi->is_priv ? "" : " sh",
8103                 (flags & PAGE_READ) ? " mr" : "",
8104                 (flags & PAGE_WRITE_ORG) ? " mw" : "",
8105                 (flags & PAGE_EXEC) ? " me" : "",
8106                 mi->is_priv ? "" : " ms");
8107     }
8108 }
8109 
8110 /*
8111  * Callback for walk_memory_regions, when read_self_maps() fails.
8112  * Proceed without the benefit of host /proc/self/maps cross-check.
8113  */
8114 static int open_self_maps_3(void *opaque, target_ulong guest_start,
8115                             target_ulong guest_end, unsigned long flags)
8116 {
8117     static const MapInfo mi = { .is_priv = true };
8118 
8119     open_self_maps_4(opaque, &mi, guest_start, guest_end, flags);
8120     return 0;
8121 }
8122 
8123 /*
8124  * Callback for walk_memory_regions, when read_self_maps() succeeds.
8125  */
8126 static int open_self_maps_2(void *opaque, target_ulong guest_start,
8127                             target_ulong guest_end, unsigned long flags)
8128 {
8129     const struct open_self_maps_data *d = opaque;
8130     uintptr_t host_start = (uintptr_t)g2h_untagged(guest_start);
8131     uintptr_t host_last = (uintptr_t)g2h_untagged(guest_end - 1);
8132 
8133 #ifdef TARGET_X86_64
8134     /*
8135      * Because of the extremely high position of the page within the guest
8136      * virtual address space, this is not backed by host memory at all.
8137      * Therefore the loop below would fail.  This is the only instance
8138      * of not having host backing memory.
8139      */
8140     if (guest_start == TARGET_VSYSCALL_PAGE) {
8141         return open_self_maps_3(opaque, guest_start, guest_end, flags);
8142     }
8143 #endif
8144 
8145     while (1) {
8146         IntervalTreeNode *n =
8147             interval_tree_iter_first(d->host_maps, host_start, host_start);
8148         MapInfo *mi = container_of(n, MapInfo, itree);
8149         uintptr_t this_hlast = MIN(host_last, n->last);
8150         target_ulong this_gend = h2g(this_hlast) + 1;
8151 
8152         open_self_maps_4(d, mi, guest_start, this_gend, flags);
8153 
8154         if (this_hlast == host_last) {
8155             return 0;
8156         }
8157         host_start = this_hlast + 1;
8158         guest_start = h2g(host_start);
8159     }
8160 }
8161 
8162 static int open_self_maps_1(CPUArchState *env, int fd, bool smaps)
8163 {
8164     struct open_self_maps_data d = {
8165         .ts = get_task_state(env_cpu(env)),
8166         .fd = fd,
8167         .smaps = smaps
8168     };
8169 
8170     mmap_lock();
8171     d.host_maps = read_self_maps();
8172     if (d.host_maps) {
8173         walk_memory_regions(&d, open_self_maps_2);
8174         free_self_maps(d.host_maps);
8175     } else {
8176         walk_memory_regions(&d, open_self_maps_3);
8177     }
8178     mmap_unlock();
8179     return 0;
8180 }
8181 
8182 static int open_self_maps(CPUArchState *cpu_env, int fd)
8183 {
8184     return open_self_maps_1(cpu_env, fd, false);
8185 }
8186 
8187 static int open_self_smaps(CPUArchState *cpu_env, int fd)
8188 {
8189     return open_self_maps_1(cpu_env, fd, true);
8190 }
8191 
8192 static int open_self_stat(CPUArchState *cpu_env, int fd)
8193 {
8194     CPUState *cpu = env_cpu(cpu_env);
8195     TaskState *ts = get_task_state(cpu);
8196     g_autoptr(GString) buf = g_string_new(NULL);
8197     int i;
8198 
8199     for (i = 0; i < 44; i++) {
8200         if (i == 0) {
8201             /* pid */
8202             g_string_printf(buf, FMT_pid " ", getpid());
8203         } else if (i == 1) {
8204             /* app name */
8205             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8206             bin = bin ? bin + 1 : ts->bprm->argv[0];
8207             g_string_printf(buf, "(%.15s) ", bin);
8208         } else if (i == 2) {
8209             /* task state */
8210             g_string_assign(buf, "R "); /* we are running right now */
8211         } else if (i == 3) {
8212             /* ppid */
8213             g_string_printf(buf, FMT_pid " ", getppid());
8214         } else if (i == 19) {
8215             /* num_threads */
8216             int cpus = 0;
8217             WITH_RCU_READ_LOCK_GUARD() {
8218                 CPUState *cpu_iter;
8219                 CPU_FOREACH(cpu_iter) {
8220                     cpus++;
8221                 }
8222             }
8223             g_string_printf(buf, "%d ", cpus);
8224         } else if (i == 21) {
8225             /* starttime */
8226             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8227         } else if (i == 27) {
8228             /* stack bottom */
8229             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8230         } else {
8231             /* for the rest, there is MasterCard */
8232             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8233         }
8234 
8235         if (write(fd, buf->str, buf->len) != buf->len) {
8236             return -1;
8237         }
8238     }
8239 
8240     return 0;
8241 }
8242 
8243 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8244 {
8245     CPUState *cpu = env_cpu(cpu_env);
8246     TaskState *ts = get_task_state(cpu);
8247     abi_ulong auxv = ts->info->saved_auxv;
8248     abi_ulong len = ts->info->auxv_len;
8249     char *ptr;
8250 
8251     /*
8252      * Auxiliary vector is stored in target process stack.
8253      * read in whole auxv vector and copy it to file
8254      */
8255     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8256     if (ptr != NULL) {
8257         while (len > 0) {
8258             ssize_t r;
8259             r = write(fd, ptr, len);
8260             if (r <= 0) {
8261                 break;
8262             }
8263             len -= r;
8264             ptr += r;
8265         }
8266         lseek(fd, 0, SEEK_SET);
8267         unlock_user(ptr, auxv, len);
8268     }
8269 
8270     return 0;
8271 }
8272 
8273 static int is_proc_myself(const char *filename, const char *entry)
8274 {
8275     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8276         filename += strlen("/proc/");
8277         if (!strncmp(filename, "self/", strlen("self/"))) {
8278             filename += strlen("self/");
8279         } else if (*filename >= '1' && *filename <= '9') {
8280             char myself[80];
8281             snprintf(myself, sizeof(myself), "%d/", getpid());
8282             if (!strncmp(filename, myself, strlen(myself))) {
8283                 filename += strlen(myself);
8284             } else {
8285                 return 0;
8286             }
8287         } else {
8288             return 0;
8289         }
8290         if (!strcmp(filename, entry)) {
8291             return 1;
8292         }
8293     }
8294     return 0;
8295 }
8296 
8297 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8298                       const char *fmt, int code)
8299 {
8300     if (logfile) {
8301         CPUState *cs = env_cpu(env);
8302 
8303         fprintf(logfile, fmt, code);
8304         fprintf(logfile, "Failing executable: %s\n", exec_path);
8305         cpu_dump_state(cs, logfile, 0);
8306         open_self_maps(env, fileno(logfile));
8307     }
8308 }
8309 
8310 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8311 {
8312     /* dump to console */
8313     excp_dump_file(stderr, env, fmt, code);
8314 
8315     /* dump to log file */
8316     if (qemu_log_separate()) {
8317         FILE *logfile = qemu_log_trylock();
8318 
8319         excp_dump_file(logfile, env, fmt, code);
8320         qemu_log_unlock(logfile);
8321     }
8322 }
8323 
8324 #include "target_proc.h"
8325 
8326 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8327     defined(HAVE_ARCH_PROC_CPUINFO) || \
8328     defined(HAVE_ARCH_PROC_HARDWARE)
8329 static int is_proc(const char *filename, const char *entry)
8330 {
8331     return strcmp(filename, entry) == 0;
8332 }
8333 #endif
8334 
8335 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8336 static int open_net_route(CPUArchState *cpu_env, int fd)
8337 {
8338     FILE *fp;
8339     char *line = NULL;
8340     size_t len = 0;
8341     ssize_t read;
8342 
8343     fp = fopen("/proc/net/route", "r");
8344     if (fp == NULL) {
8345         return -1;
8346     }
8347 
8348     /* read header */
8349 
8350     read = getline(&line, &len, fp);
8351     dprintf(fd, "%s", line);
8352 
8353     /* read routes */
8354 
8355     while ((read = getline(&line, &len, fp)) != -1) {
8356         char iface[16];
8357         uint32_t dest, gw, mask;
8358         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8359         int fields;
8360 
8361         fields = sscanf(line,
8362                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8363                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8364                         &mask, &mtu, &window, &irtt);
8365         if (fields != 11) {
8366             continue;
8367         }
8368         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8369                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8370                 metric, tswap32(mask), mtu, window, irtt);
8371     }
8372 
8373     free(line);
8374     fclose(fp);
8375 
8376     return 0;
8377 }
8378 #endif
8379 
8380 static int maybe_do_fake_open(CPUArchState *cpu_env, int dirfd,
8381                               const char *fname, int flags, mode_t mode,
8382                               int openat2_resolve, bool safe)
8383 {
8384     g_autofree char *proc_name = NULL;
8385     const char *pathname;
8386     struct fake_open {
8387         const char *filename;
8388         int (*fill)(CPUArchState *cpu_env, int fd);
8389         int (*cmp)(const char *s1, const char *s2);
8390     };
8391     const struct fake_open *fake_open;
8392     static const struct fake_open fakes[] = {
8393         { "maps", open_self_maps, is_proc_myself },
8394         { "smaps", open_self_smaps, is_proc_myself },
8395         { "stat", open_self_stat, is_proc_myself },
8396         { "auxv", open_self_auxv, is_proc_myself },
8397         { "cmdline", open_self_cmdline, is_proc_myself },
8398 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8399         { "/proc/net/route", open_net_route, is_proc },
8400 #endif
8401 #if defined(HAVE_ARCH_PROC_CPUINFO)
8402         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8403 #endif
8404 #if defined(HAVE_ARCH_PROC_HARDWARE)
8405         { "/proc/hardware", open_hardware, is_proc },
8406 #endif
8407         { NULL, NULL, NULL }
8408     };
8409 
8410     /* if this is a file from /proc/ filesystem, expand full name */
8411     proc_name = realpath(fname, NULL);
8412     if (proc_name && strncmp(proc_name, "/proc/", 6) == 0) {
8413         pathname = proc_name;
8414     } else {
8415         pathname = fname;
8416     }
8417 
8418     if (is_proc_myself(pathname, "exe")) {
8419         /* Honor openat2 resolve flags */
8420         if ((openat2_resolve & RESOLVE_NO_MAGICLINKS) ||
8421             (openat2_resolve & RESOLVE_NO_SYMLINKS)) {
8422             errno = ELOOP;
8423             return -1;
8424         }
8425         if (safe) {
8426             return safe_openat(dirfd, exec_path, flags, mode);
8427         } else {
8428             return openat(dirfd, exec_path, flags, mode);
8429         }
8430     }
8431 
8432     for (fake_open = fakes; fake_open->filename; fake_open++) {
8433         if (fake_open->cmp(pathname, fake_open->filename)) {
8434             break;
8435         }
8436     }
8437 
8438     if (fake_open->filename) {
8439         const char *tmpdir;
8440         char filename[PATH_MAX];
8441         int fd, r;
8442 
8443         fd = memfd_create("qemu-open", 0);
8444         if (fd < 0) {
8445             if (errno != ENOSYS) {
8446                 return fd;
8447             }
8448             /* create temporary file to map stat to */
8449             tmpdir = getenv("TMPDIR");
8450             if (!tmpdir)
8451                 tmpdir = "/tmp";
8452             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8453             fd = mkstemp(filename);
8454             if (fd < 0) {
8455                 return fd;
8456             }
8457             unlink(filename);
8458         }
8459 
8460         if ((r = fake_open->fill(cpu_env, fd))) {
8461             int e = errno;
8462             close(fd);
8463             errno = e;
8464             return r;
8465         }
8466         lseek(fd, 0, SEEK_SET);
8467 
8468         return fd;
8469     }
8470 
8471     return -2;
8472 }
8473 
8474 int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *pathname,
8475                     int flags, mode_t mode, bool safe)
8476 {
8477     int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, flags, mode, 0, safe);
8478     if (fd > -2) {
8479         return fd;
8480     }
8481 
8482     if (safe) {
8483         return safe_openat(dirfd, path(pathname), flags, mode);
8484     } else {
8485         return openat(dirfd, path(pathname), flags, mode);
8486     }
8487 }
8488 
8489 
8490 static int do_openat2(CPUArchState *cpu_env, abi_long dirfd,
8491                       abi_ptr guest_pathname, abi_ptr guest_open_how,
8492                       abi_ulong guest_size)
8493 {
8494     struct open_how_ver0 how = {0};
8495     char *pathname;
8496     int ret;
8497 
8498     if (guest_size < sizeof(struct target_open_how_ver0)) {
8499         return -TARGET_EINVAL;
8500     }
8501     ret = copy_struct_from_user(&how, sizeof(how), guest_open_how, guest_size);
8502     if (ret) {
8503         if (ret == -TARGET_E2BIG) {
8504             qemu_log_mask(LOG_UNIMP,
8505                           "Unimplemented openat2 open_how size: "
8506                           TARGET_ABI_FMT_lu "\n", guest_size);
8507         }
8508         return ret;
8509     }
8510     pathname = lock_user_string(guest_pathname);
8511     if (!pathname) {
8512         return -TARGET_EFAULT;
8513     }
8514 
8515     how.flags = target_to_host_bitmask(tswap64(how.flags), fcntl_flags_tbl);
8516     how.mode = tswap64(how.mode);
8517     how.resolve = tswap64(how.resolve);
8518     int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, how.flags, how.mode,
8519                                 how.resolve, true);
8520     if (fd > -2) {
8521         ret = get_errno(fd);
8522     } else {
8523         ret = get_errno(safe_openat2(dirfd, pathname, &how,
8524                                      sizeof(struct open_how_ver0)));
8525     }
8526 
8527     fd_trans_unregister(ret);
8528     unlock_user(pathname, guest_pathname, 0);
8529     return ret;
8530 }
8531 
8532 ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz)
8533 {
8534     ssize_t ret;
8535 
8536     if (!pathname || !buf) {
8537         errno = EFAULT;
8538         return -1;
8539     }
8540 
8541     if (!bufsiz) {
8542         /* Short circuit this for the magic exe check. */
8543         errno = EINVAL;
8544         return -1;
8545     }
8546 
8547     if (is_proc_myself((const char *)pathname, "exe")) {
8548         /*
8549          * Don't worry about sign mismatch as earlier mapping
8550          * logic would have thrown a bad address error.
8551          */
8552         ret = MIN(strlen(exec_path), bufsiz);
8553         /* We cannot NUL terminate the string. */
8554         memcpy(buf, exec_path, ret);
8555     } else {
8556         ret = readlink(path(pathname), buf, bufsiz);
8557     }
8558 
8559     return ret;
8560 }
8561 
8562 static int do_execv(CPUArchState *cpu_env, int dirfd,
8563                     abi_long pathname, abi_long guest_argp,
8564                     abi_long guest_envp, int flags, bool is_execveat)
8565 {
8566     int ret;
8567     char **argp, **envp;
8568     int argc, envc;
8569     abi_ulong gp;
8570     abi_ulong addr;
8571     char **q;
8572     void *p;
8573 
8574     argc = 0;
8575 
8576     for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8577         if (get_user_ual(addr, gp)) {
8578             return -TARGET_EFAULT;
8579         }
8580         if (!addr) {
8581             break;
8582         }
8583         argc++;
8584     }
8585     envc = 0;
8586     for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8587         if (get_user_ual(addr, gp)) {
8588             return -TARGET_EFAULT;
8589         }
8590         if (!addr) {
8591             break;
8592         }
8593         envc++;
8594     }
8595 
8596     argp = g_new0(char *, argc + 1);
8597     envp = g_new0(char *, envc + 1);
8598 
8599     for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8600         if (get_user_ual(addr, gp)) {
8601             goto execve_efault;
8602         }
8603         if (!addr) {
8604             break;
8605         }
8606         *q = lock_user_string(addr);
8607         if (!*q) {
8608             goto execve_efault;
8609         }
8610     }
8611     *q = NULL;
8612 
8613     for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8614         if (get_user_ual(addr, gp)) {
8615             goto execve_efault;
8616         }
8617         if (!addr) {
8618             break;
8619         }
8620         *q = lock_user_string(addr);
8621         if (!*q) {
8622             goto execve_efault;
8623         }
8624     }
8625     *q = NULL;
8626 
8627     /*
8628      * Although execve() is not an interruptible syscall it is
8629      * a special case where we must use the safe_syscall wrapper:
8630      * if we allow a signal to happen before we make the host
8631      * syscall then we will 'lose' it, because at the point of
8632      * execve the process leaves QEMU's control. So we use the
8633      * safe syscall wrapper to ensure that we either take the
8634      * signal as a guest signal, or else it does not happen
8635      * before the execve completes and makes it the other
8636      * program's problem.
8637      */
8638     p = lock_user_string(pathname);
8639     if (!p) {
8640         goto execve_efault;
8641     }
8642 
8643     const char *exe = p;
8644     if (is_proc_myself(p, "exe")) {
8645         exe = exec_path;
8646     }
8647     ret = is_execveat
8648         ? safe_execveat(dirfd, exe, argp, envp, flags)
8649         : safe_execve(exe, argp, envp);
8650     ret = get_errno(ret);
8651 
8652     unlock_user(p, pathname, 0);
8653 
8654     goto execve_end;
8655 
8656 execve_efault:
8657     ret = -TARGET_EFAULT;
8658 
8659 execve_end:
8660     for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8661         if (get_user_ual(addr, gp) || !addr) {
8662             break;
8663         }
8664         unlock_user(*q, addr, 0);
8665     }
8666     for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8667         if (get_user_ual(addr, gp) || !addr) {
8668             break;
8669         }
8670         unlock_user(*q, addr, 0);
8671     }
8672 
8673     g_free(argp);
8674     g_free(envp);
8675     return ret;
8676 }
8677 
8678 #define TIMER_MAGIC 0x0caf0000
8679 #define TIMER_MAGIC_MASK 0xffff0000
8680 
8681 /* Convert QEMU provided timer ID back to internal 16bit index format */
8682 static target_timer_t get_timer_id(abi_long arg)
8683 {
8684     target_timer_t timerid = arg;
8685 
8686     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8687         return -TARGET_EINVAL;
8688     }
8689 
8690     timerid &= 0xffff;
8691 
8692     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8693         return -TARGET_EINVAL;
8694     }
8695 
8696     return timerid;
8697 }
8698 
8699 static int target_to_host_cpu_mask(unsigned long *host_mask,
8700                                    size_t host_size,
8701                                    abi_ulong target_addr,
8702                                    size_t target_size)
8703 {
8704     unsigned target_bits = sizeof(abi_ulong) * 8;
8705     unsigned host_bits = sizeof(*host_mask) * 8;
8706     abi_ulong *target_mask;
8707     unsigned i, j;
8708 
8709     assert(host_size >= target_size);
8710 
8711     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8712     if (!target_mask) {
8713         return -TARGET_EFAULT;
8714     }
8715     memset(host_mask, 0, host_size);
8716 
8717     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8718         unsigned bit = i * target_bits;
8719         abi_ulong val;
8720 
8721         __get_user(val, &target_mask[i]);
8722         for (j = 0; j < target_bits; j++, bit++) {
8723             if (val & (1UL << j)) {
8724                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8725             }
8726         }
8727     }
8728 
8729     unlock_user(target_mask, target_addr, 0);
8730     return 0;
8731 }
8732 
8733 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8734                                    size_t host_size,
8735                                    abi_ulong target_addr,
8736                                    size_t target_size)
8737 {
8738     unsigned target_bits = sizeof(abi_ulong) * 8;
8739     unsigned host_bits = sizeof(*host_mask) * 8;
8740     abi_ulong *target_mask;
8741     unsigned i, j;
8742 
8743     assert(host_size >= target_size);
8744 
8745     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8746     if (!target_mask) {
8747         return -TARGET_EFAULT;
8748     }
8749 
8750     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8751         unsigned bit = i * target_bits;
8752         abi_ulong val = 0;
8753 
8754         for (j = 0; j < target_bits; j++, bit++) {
8755             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8756                 val |= 1UL << j;
8757             }
8758         }
8759         __put_user(val, &target_mask[i]);
8760     }
8761 
8762     unlock_user(target_mask, target_addr, target_size);
8763     return 0;
8764 }
8765 
8766 #ifdef TARGET_NR_getdents
8767 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8768 {
8769     g_autofree void *hdirp = NULL;
8770     void *tdirp;
8771     int hlen, hoff, toff;
8772     int hreclen, treclen;
8773     off_t prev_diroff = 0;
8774 
8775     hdirp = g_try_malloc(count);
8776     if (!hdirp) {
8777         return -TARGET_ENOMEM;
8778     }
8779 
8780 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8781     hlen = sys_getdents(dirfd, hdirp, count);
8782 #else
8783     hlen = sys_getdents64(dirfd, hdirp, count);
8784 #endif
8785 
8786     hlen = get_errno(hlen);
8787     if (is_error(hlen)) {
8788         return hlen;
8789     }
8790 
8791     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8792     if (!tdirp) {
8793         return -TARGET_EFAULT;
8794     }
8795 
8796     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8797 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8798         struct linux_dirent *hde = hdirp + hoff;
8799 #else
8800         struct linux_dirent64 *hde = hdirp + hoff;
8801 #endif
8802         struct target_dirent *tde = tdirp + toff;
8803         int namelen;
8804         uint8_t type;
8805 
8806         namelen = strlen(hde->d_name);
8807         hreclen = hde->d_reclen;
8808         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8809         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8810 
8811         if (toff + treclen > count) {
8812             /*
8813              * If the host struct is smaller than the target struct, or
8814              * requires less alignment and thus packs into less space,
8815              * then the host can return more entries than we can pass
8816              * on to the guest.
8817              */
8818             if (toff == 0) {
8819                 toff = -TARGET_EINVAL; /* result buffer is too small */
8820                 break;
8821             }
8822             /*
8823              * Return what we have, resetting the file pointer to the
8824              * location of the first record not returned.
8825              */
8826             lseek(dirfd, prev_diroff, SEEK_SET);
8827             break;
8828         }
8829 
8830         prev_diroff = hde->d_off;
8831         tde->d_ino = tswapal(hde->d_ino);
8832         tde->d_off = tswapal(hde->d_off);
8833         tde->d_reclen = tswap16(treclen);
8834         memcpy(tde->d_name, hde->d_name, namelen + 1);
8835 
8836         /*
8837          * The getdents type is in what was formerly a padding byte at the
8838          * end of the structure.
8839          */
8840 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8841         type = *((uint8_t *)hde + hreclen - 1);
8842 #else
8843         type = hde->d_type;
8844 #endif
8845         *((uint8_t *)tde + treclen - 1) = type;
8846     }
8847 
8848     unlock_user(tdirp, arg2, toff);
8849     return toff;
8850 }
8851 #endif /* TARGET_NR_getdents */
8852 
8853 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8854 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8855 {
8856     g_autofree void *hdirp = NULL;
8857     void *tdirp;
8858     int hlen, hoff, toff;
8859     int hreclen, treclen;
8860     off_t prev_diroff = 0;
8861 
8862     hdirp = g_try_malloc(count);
8863     if (!hdirp) {
8864         return -TARGET_ENOMEM;
8865     }
8866 
8867     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8868     if (is_error(hlen)) {
8869         return hlen;
8870     }
8871 
8872     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8873     if (!tdirp) {
8874         return -TARGET_EFAULT;
8875     }
8876 
8877     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8878         struct linux_dirent64 *hde = hdirp + hoff;
8879         struct target_dirent64 *tde = tdirp + toff;
8880         int namelen;
8881 
8882         namelen = strlen(hde->d_name) + 1;
8883         hreclen = hde->d_reclen;
8884         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8885         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8886 
8887         if (toff + treclen > count) {
8888             /*
8889              * If the host struct is smaller than the target struct, or
8890              * requires less alignment and thus packs into less space,
8891              * then the host can return more entries than we can pass
8892              * on to the guest.
8893              */
8894             if (toff == 0) {
8895                 toff = -TARGET_EINVAL; /* result buffer is too small */
8896                 break;
8897             }
8898             /*
8899              * Return what we have, resetting the file pointer to the
8900              * location of the first record not returned.
8901              */
8902             lseek(dirfd, prev_diroff, SEEK_SET);
8903             break;
8904         }
8905 
8906         prev_diroff = hde->d_off;
8907         tde->d_ino = tswap64(hde->d_ino);
8908         tde->d_off = tswap64(hde->d_off);
8909         tde->d_reclen = tswap16(treclen);
8910         tde->d_type = hde->d_type;
8911         memcpy(tde->d_name, hde->d_name, namelen);
8912     }
8913 
8914     unlock_user(tdirp, arg2, toff);
8915     return toff;
8916 }
8917 #endif /* TARGET_NR_getdents64 */
8918 
8919 #if defined(TARGET_NR_riscv_hwprobe)
8920 
8921 #define RISCV_HWPROBE_KEY_MVENDORID     0
8922 #define RISCV_HWPROBE_KEY_MARCHID       1
8923 #define RISCV_HWPROBE_KEY_MIMPID        2
8924 
8925 #define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
8926 #define     RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
8927 
8928 #define RISCV_HWPROBE_KEY_IMA_EXT_0         4
8929 #define     RISCV_HWPROBE_IMA_FD            (1 << 0)
8930 #define     RISCV_HWPROBE_IMA_C             (1 << 1)
8931 #define     RISCV_HWPROBE_IMA_V             (1 << 2)
8932 #define     RISCV_HWPROBE_EXT_ZBA           (1 << 3)
8933 #define     RISCV_HWPROBE_EXT_ZBB           (1 << 4)
8934 #define     RISCV_HWPROBE_EXT_ZBS           (1 << 5)
8935 #define     RISCV_HWPROBE_EXT_ZICBOZ        (1 << 6)
8936 #define     RISCV_HWPROBE_EXT_ZBC           (1 << 7)
8937 #define     RISCV_HWPROBE_EXT_ZBKB          (1 << 8)
8938 #define     RISCV_HWPROBE_EXT_ZBKC          (1 << 9)
8939 #define     RISCV_HWPROBE_EXT_ZBKX          (1 << 10)
8940 #define     RISCV_HWPROBE_EXT_ZKND          (1 << 11)
8941 #define     RISCV_HWPROBE_EXT_ZKNE          (1 << 12)
8942 #define     RISCV_HWPROBE_EXT_ZKNH          (1 << 13)
8943 #define     RISCV_HWPROBE_EXT_ZKSED         (1 << 14)
8944 #define     RISCV_HWPROBE_EXT_ZKSH          (1 << 15)
8945 #define     RISCV_HWPROBE_EXT_ZKT           (1 << 16)
8946 #define     RISCV_HWPROBE_EXT_ZVBB          (1 << 17)
8947 #define     RISCV_HWPROBE_EXT_ZVBC          (1 << 18)
8948 #define     RISCV_HWPROBE_EXT_ZVKB          (1 << 19)
8949 #define     RISCV_HWPROBE_EXT_ZVKG          (1 << 20)
8950 #define     RISCV_HWPROBE_EXT_ZVKNED        (1 << 21)
8951 #define     RISCV_HWPROBE_EXT_ZVKNHA        (1 << 22)
8952 #define     RISCV_HWPROBE_EXT_ZVKNHB        (1 << 23)
8953 #define     RISCV_HWPROBE_EXT_ZVKSED        (1 << 24)
8954 #define     RISCV_HWPROBE_EXT_ZVKSH         (1 << 25)
8955 #define     RISCV_HWPROBE_EXT_ZVKT          (1 << 26)
8956 #define     RISCV_HWPROBE_EXT_ZFH           (1 << 27)
8957 #define     RISCV_HWPROBE_EXT_ZFHMIN        (1 << 28)
8958 #define     RISCV_HWPROBE_EXT_ZIHINTNTL     (1 << 29)
8959 #define     RISCV_HWPROBE_EXT_ZVFH          (1 << 30)
8960 #define     RISCV_HWPROBE_EXT_ZVFHMIN       (1ULL << 31)
8961 #define     RISCV_HWPROBE_EXT_ZFA           (1ULL << 32)
8962 #define     RISCV_HWPROBE_EXT_ZTSO          (1ULL << 33)
8963 #define     RISCV_HWPROBE_EXT_ZACAS         (1ULL << 34)
8964 #define     RISCV_HWPROBE_EXT_ZICOND        (1ULL << 35)
8965 
8966 #define RISCV_HWPROBE_KEY_CPUPERF_0     5
8967 #define     RISCV_HWPROBE_MISALIGNED_UNKNOWN     (0 << 0)
8968 #define     RISCV_HWPROBE_MISALIGNED_EMULATED    (1 << 0)
8969 #define     RISCV_HWPROBE_MISALIGNED_SLOW        (2 << 0)
8970 #define     RISCV_HWPROBE_MISALIGNED_FAST        (3 << 0)
8971 #define     RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
8972 #define     RISCV_HWPROBE_MISALIGNED_MASK        (7 << 0)
8973 
8974 #define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6
8975 
8976 struct riscv_hwprobe {
8977     abi_llong  key;
8978     abi_ullong value;
8979 };
8980 
8981 static void risc_hwprobe_fill_pairs(CPURISCVState *env,
8982                                     struct riscv_hwprobe *pair,
8983                                     size_t pair_count)
8984 {
8985     const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
8986 
8987     for (; pair_count > 0; pair_count--, pair++) {
8988         abi_llong key;
8989         abi_ullong value;
8990         __put_user(0, &pair->value);
8991         __get_user(key, &pair->key);
8992         switch (key) {
8993         case RISCV_HWPROBE_KEY_MVENDORID:
8994             __put_user(cfg->mvendorid, &pair->value);
8995             break;
8996         case RISCV_HWPROBE_KEY_MARCHID:
8997             __put_user(cfg->marchid, &pair->value);
8998             break;
8999         case RISCV_HWPROBE_KEY_MIMPID:
9000             __put_user(cfg->mimpid, &pair->value);
9001             break;
9002         case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
9003             value = riscv_has_ext(env, RVI) &&
9004                     riscv_has_ext(env, RVM) &&
9005                     riscv_has_ext(env, RVA) ?
9006                     RISCV_HWPROBE_BASE_BEHAVIOR_IMA : 0;
9007             __put_user(value, &pair->value);
9008             break;
9009         case RISCV_HWPROBE_KEY_IMA_EXT_0:
9010             value = riscv_has_ext(env, RVF) &&
9011                     riscv_has_ext(env, RVD) ?
9012                     RISCV_HWPROBE_IMA_FD : 0;
9013             value |= riscv_has_ext(env, RVC) ?
9014                      RISCV_HWPROBE_IMA_C : 0;
9015             value |= riscv_has_ext(env, RVV) ?
9016                      RISCV_HWPROBE_IMA_V : 0;
9017             value |= cfg->ext_zba ?
9018                      RISCV_HWPROBE_EXT_ZBA : 0;
9019             value |= cfg->ext_zbb ?
9020                      RISCV_HWPROBE_EXT_ZBB : 0;
9021             value |= cfg->ext_zbs ?
9022                      RISCV_HWPROBE_EXT_ZBS : 0;
9023             value |= cfg->ext_zicboz ?
9024                      RISCV_HWPROBE_EXT_ZICBOZ : 0;
9025             value |= cfg->ext_zbc ?
9026                      RISCV_HWPROBE_EXT_ZBC : 0;
9027             value |= cfg->ext_zbkb ?
9028                      RISCV_HWPROBE_EXT_ZBKB : 0;
9029             value |= cfg->ext_zbkc ?
9030                      RISCV_HWPROBE_EXT_ZBKC : 0;
9031             value |= cfg->ext_zbkx ?
9032                      RISCV_HWPROBE_EXT_ZBKX : 0;
9033             value |= cfg->ext_zknd ?
9034                      RISCV_HWPROBE_EXT_ZKND : 0;
9035             value |= cfg->ext_zkne ?
9036                      RISCV_HWPROBE_EXT_ZKNE : 0;
9037             value |= cfg->ext_zknh ?
9038                      RISCV_HWPROBE_EXT_ZKNH : 0;
9039             value |= cfg->ext_zksed ?
9040                      RISCV_HWPROBE_EXT_ZKSED : 0;
9041             value |= cfg->ext_zksh ?
9042                      RISCV_HWPROBE_EXT_ZKSH : 0;
9043             value |= cfg->ext_zkt ?
9044                      RISCV_HWPROBE_EXT_ZKT : 0;
9045             value |= cfg->ext_zvbb ?
9046                      RISCV_HWPROBE_EXT_ZVBB : 0;
9047             value |= cfg->ext_zvbc ?
9048                      RISCV_HWPROBE_EXT_ZVBC : 0;
9049             value |= cfg->ext_zvkb ?
9050                      RISCV_HWPROBE_EXT_ZVKB : 0;
9051             value |= cfg->ext_zvkg ?
9052                      RISCV_HWPROBE_EXT_ZVKG : 0;
9053             value |= cfg->ext_zvkned ?
9054                      RISCV_HWPROBE_EXT_ZVKNED : 0;
9055             value |= cfg->ext_zvknha ?
9056                      RISCV_HWPROBE_EXT_ZVKNHA : 0;
9057             value |= cfg->ext_zvknhb ?
9058                      RISCV_HWPROBE_EXT_ZVKNHB : 0;
9059             value |= cfg->ext_zvksed ?
9060                      RISCV_HWPROBE_EXT_ZVKSED : 0;
9061             value |= cfg->ext_zvksh ?
9062                      RISCV_HWPROBE_EXT_ZVKSH : 0;
9063             value |= cfg->ext_zvkt ?
9064                      RISCV_HWPROBE_EXT_ZVKT : 0;
9065             value |= cfg->ext_zfh ?
9066                      RISCV_HWPROBE_EXT_ZFH : 0;
9067             value |= cfg->ext_zfhmin ?
9068                      RISCV_HWPROBE_EXT_ZFHMIN : 0;
9069             value |= cfg->ext_zihintntl ?
9070                      RISCV_HWPROBE_EXT_ZIHINTNTL : 0;
9071             value |= cfg->ext_zvfh ?
9072                      RISCV_HWPROBE_EXT_ZVFH : 0;
9073             value |= cfg->ext_zvfhmin ?
9074                      RISCV_HWPROBE_EXT_ZVFHMIN : 0;
9075             value |= cfg->ext_zfa ?
9076                      RISCV_HWPROBE_EXT_ZFA : 0;
9077             value |= cfg->ext_ztso ?
9078                      RISCV_HWPROBE_EXT_ZTSO : 0;
9079             value |= cfg->ext_zacas ?
9080                      RISCV_HWPROBE_EXT_ZACAS : 0;
9081             value |= cfg->ext_zicond ?
9082                      RISCV_HWPROBE_EXT_ZICOND : 0;
9083             __put_user(value, &pair->value);
9084             break;
9085         case RISCV_HWPROBE_KEY_CPUPERF_0:
9086             __put_user(RISCV_HWPROBE_MISALIGNED_FAST, &pair->value);
9087             break;
9088         case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
9089             value = cfg->ext_zicboz ? cfg->cboz_blocksize : 0;
9090             __put_user(value, &pair->value);
9091             break;
9092         default:
9093             __put_user(-1, &pair->key);
9094             break;
9095         }
9096     }
9097 }
9098 
9099 static int cpu_set_valid(abi_long arg3, abi_long arg4)
9100 {
9101     int ret, i, tmp;
9102     size_t host_mask_size, target_mask_size;
9103     unsigned long *host_mask;
9104 
9105     /*
9106      * cpu_set_t represent CPU masks as bit masks of type unsigned long *.
9107      * arg3 contains the cpu count.
9108      */
9109     tmp = (8 * sizeof(abi_ulong));
9110     target_mask_size = ((arg3 + tmp - 1) / tmp) * sizeof(abi_ulong);
9111     host_mask_size = (target_mask_size + (sizeof(*host_mask) - 1)) &
9112                      ~(sizeof(*host_mask) - 1);
9113 
9114     host_mask = alloca(host_mask_size);
9115 
9116     ret = target_to_host_cpu_mask(host_mask, host_mask_size,
9117                                   arg4, target_mask_size);
9118     if (ret != 0) {
9119         return ret;
9120     }
9121 
9122     for (i = 0 ; i < host_mask_size / sizeof(*host_mask); i++) {
9123         if (host_mask[i] != 0) {
9124             return 0;
9125         }
9126     }
9127     return -TARGET_EINVAL;
9128 }
9129 
9130 static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
9131                                  abi_long arg2, abi_long arg3,
9132                                  abi_long arg4, abi_long arg5)
9133 {
9134     int ret;
9135     struct riscv_hwprobe *host_pairs;
9136 
9137     /* flags must be 0 */
9138     if (arg5 != 0) {
9139         return -TARGET_EINVAL;
9140     }
9141 
9142     /* check cpu_set */
9143     if (arg3 != 0) {
9144         ret = cpu_set_valid(arg3, arg4);
9145         if (ret != 0) {
9146             return ret;
9147         }
9148     } else if (arg4 != 0) {
9149         return -TARGET_EINVAL;
9150     }
9151 
9152     /* no pairs */
9153     if (arg2 == 0) {
9154         return 0;
9155     }
9156 
9157     host_pairs = lock_user(VERIFY_WRITE, arg1,
9158                            sizeof(*host_pairs) * (size_t)arg2, 0);
9159     if (host_pairs == NULL) {
9160         return -TARGET_EFAULT;
9161     }
9162     risc_hwprobe_fill_pairs(cpu_env, host_pairs, arg2);
9163     unlock_user(host_pairs, arg1, sizeof(*host_pairs) * (size_t)arg2);
9164     return 0;
9165 }
9166 #endif /* TARGET_NR_riscv_hwprobe */
9167 
9168 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
9169 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
9170 #endif
9171 
9172 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9173 #define __NR_sys_open_tree __NR_open_tree
9174 _syscall3(int, sys_open_tree, int, __dfd, const char *, __filename,
9175           unsigned int, __flags)
9176 #endif
9177 
9178 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9179 #define __NR_sys_move_mount __NR_move_mount
9180 _syscall5(int, sys_move_mount, int, __from_dfd, const char *, __from_pathname,
9181            int, __to_dfd, const char *, __to_pathname, unsigned int, flag)
9182 #endif
9183 
9184 /* This is an internal helper for do_syscall so that it is easier
9185  * to have a single return point, so that actions, such as logging
9186  * of syscall results, can be performed.
9187  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
9188  */
9189 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
9190                             abi_long arg2, abi_long arg3, abi_long arg4,
9191                             abi_long arg5, abi_long arg6, abi_long arg7,
9192                             abi_long arg8)
9193 {
9194     CPUState *cpu = env_cpu(cpu_env);
9195     abi_long ret;
9196 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
9197     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
9198     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
9199     || defined(TARGET_NR_statx)
9200     struct stat st;
9201 #endif
9202 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
9203     || defined(TARGET_NR_fstatfs)
9204     struct statfs stfs;
9205 #endif
9206     void *p;
9207 
9208     switch(num) {
9209     case TARGET_NR_exit:
9210         /* In old applications this may be used to implement _exit(2).
9211            However in threaded applications it is used for thread termination,
9212            and _exit_group is used for application termination.
9213            Do thread termination if we have more then one thread.  */
9214 
9215         if (block_signals()) {
9216             return -QEMU_ERESTARTSYS;
9217         }
9218 
9219         pthread_mutex_lock(&clone_lock);
9220 
9221         if (CPU_NEXT(first_cpu)) {
9222             TaskState *ts = get_task_state(cpu);
9223 
9224             if (ts->child_tidptr) {
9225                 put_user_u32(0, ts->child_tidptr);
9226                 do_sys_futex(g2h(cpu, ts->child_tidptr),
9227                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
9228             }
9229 
9230             object_unparent(OBJECT(cpu));
9231             object_unref(OBJECT(cpu));
9232             /*
9233              * At this point the CPU should be unrealized and removed
9234              * from cpu lists. We can clean-up the rest of the thread
9235              * data without the lock held.
9236              */
9237 
9238             pthread_mutex_unlock(&clone_lock);
9239 
9240             thread_cpu = NULL;
9241             g_free(ts);
9242             rcu_unregister_thread();
9243             pthread_exit(NULL);
9244         }
9245 
9246         pthread_mutex_unlock(&clone_lock);
9247         preexit_cleanup(cpu_env, arg1);
9248         _exit(arg1);
9249         return 0; /* avoid warning */
9250     case TARGET_NR_read:
9251         if (arg2 == 0 && arg3 == 0) {
9252             return get_errno(safe_read(arg1, 0, 0));
9253         } else {
9254             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9255                 return -TARGET_EFAULT;
9256             ret = get_errno(safe_read(arg1, p, arg3));
9257             if (ret >= 0 &&
9258                 fd_trans_host_to_target_data(arg1)) {
9259                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
9260             }
9261             unlock_user(p, arg2, ret);
9262         }
9263         return ret;
9264     case TARGET_NR_write:
9265         if (arg2 == 0 && arg3 == 0) {
9266             return get_errno(safe_write(arg1, 0, 0));
9267         }
9268         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9269             return -TARGET_EFAULT;
9270         if (fd_trans_target_to_host_data(arg1)) {
9271             void *copy = g_malloc(arg3);
9272             memcpy(copy, p, arg3);
9273             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
9274             if (ret >= 0) {
9275                 ret = get_errno(safe_write(arg1, copy, ret));
9276             }
9277             g_free(copy);
9278         } else {
9279             ret = get_errno(safe_write(arg1, p, arg3));
9280         }
9281         unlock_user(p, arg2, 0);
9282         return ret;
9283 
9284 #ifdef TARGET_NR_open
9285     case TARGET_NR_open:
9286         if (!(p = lock_user_string(arg1)))
9287             return -TARGET_EFAULT;
9288         ret = get_errno(do_guest_openat(cpu_env, AT_FDCWD, p,
9289                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
9290                                   arg3, true));
9291         fd_trans_unregister(ret);
9292         unlock_user(p, arg1, 0);
9293         return ret;
9294 #endif
9295     case TARGET_NR_openat:
9296         if (!(p = lock_user_string(arg2)))
9297             return -TARGET_EFAULT;
9298         ret = get_errno(do_guest_openat(cpu_env, arg1, p,
9299                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
9300                                   arg4, true));
9301         fd_trans_unregister(ret);
9302         unlock_user(p, arg2, 0);
9303         return ret;
9304     case TARGET_NR_openat2:
9305         ret = do_openat2(cpu_env, arg1, arg2, arg3, arg4);
9306         return ret;
9307 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9308     case TARGET_NR_name_to_handle_at:
9309         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
9310         return ret;
9311 #endif
9312 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9313     case TARGET_NR_open_by_handle_at:
9314         ret = do_open_by_handle_at(arg1, arg2, arg3);
9315         fd_trans_unregister(ret);
9316         return ret;
9317 #endif
9318 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
9319     case TARGET_NR_pidfd_open:
9320         return get_errno(pidfd_open(arg1, arg2));
9321 #endif
9322 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
9323     case TARGET_NR_pidfd_send_signal:
9324         {
9325             siginfo_t uinfo, *puinfo;
9326 
9327             if (arg3) {
9328                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9329                 if (!p) {
9330                     return -TARGET_EFAULT;
9331                  }
9332                  target_to_host_siginfo(&uinfo, p);
9333                  unlock_user(p, arg3, 0);
9334                  puinfo = &uinfo;
9335             } else {
9336                  puinfo = NULL;
9337             }
9338             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
9339                                               puinfo, arg4));
9340         }
9341         return ret;
9342 #endif
9343 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
9344     case TARGET_NR_pidfd_getfd:
9345         return get_errno(pidfd_getfd(arg1, arg2, arg3));
9346 #endif
9347     case TARGET_NR_close:
9348         fd_trans_unregister(arg1);
9349         return get_errno(close(arg1));
9350 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
9351     case TARGET_NR_close_range:
9352         ret = get_errno(sys_close_range(arg1, arg2, arg3));
9353         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
9354             abi_long fd, maxfd;
9355             maxfd = MIN(arg2, target_fd_max);
9356             for (fd = arg1; fd < maxfd; fd++) {
9357                 fd_trans_unregister(fd);
9358             }
9359         }
9360         return ret;
9361 #endif
9362 
9363     case TARGET_NR_brk:
9364         return do_brk(arg1);
9365 #ifdef TARGET_NR_fork
9366     case TARGET_NR_fork:
9367         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
9368 #endif
9369 #ifdef TARGET_NR_waitpid
9370     case TARGET_NR_waitpid:
9371         {
9372             int status;
9373             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
9374             if (!is_error(ret) && arg2 && ret
9375                 && put_user_s32(host_to_target_waitstatus(status), arg2))
9376                 return -TARGET_EFAULT;
9377         }
9378         return ret;
9379 #endif
9380 #ifdef TARGET_NR_waitid
9381     case TARGET_NR_waitid:
9382         {
9383             struct rusage ru;
9384             siginfo_t info;
9385 
9386             ret = get_errno(safe_waitid(arg1, arg2, (arg3 ? &info : NULL),
9387                                         arg4, (arg5 ? &ru : NULL)));
9388             if (!is_error(ret)) {
9389                 if (arg3) {
9390                     p = lock_user(VERIFY_WRITE, arg3,
9391                                   sizeof(target_siginfo_t), 0);
9392                     if (!p) {
9393                         return -TARGET_EFAULT;
9394                     }
9395                     host_to_target_siginfo(p, &info);
9396                     unlock_user(p, arg3, sizeof(target_siginfo_t));
9397                 }
9398                 if (arg5 && host_to_target_rusage(arg5, &ru)) {
9399                     return -TARGET_EFAULT;
9400                 }
9401             }
9402         }
9403         return ret;
9404 #endif
9405 #ifdef TARGET_NR_creat /* not on alpha */
9406     case TARGET_NR_creat:
9407         if (!(p = lock_user_string(arg1)))
9408             return -TARGET_EFAULT;
9409         ret = get_errno(creat(p, arg2));
9410         fd_trans_unregister(ret);
9411         unlock_user(p, arg1, 0);
9412         return ret;
9413 #endif
9414 #ifdef TARGET_NR_link
9415     case TARGET_NR_link:
9416         {
9417             void * p2;
9418             p = lock_user_string(arg1);
9419             p2 = lock_user_string(arg2);
9420             if (!p || !p2)
9421                 ret = -TARGET_EFAULT;
9422             else
9423                 ret = get_errno(link(p, p2));
9424             unlock_user(p2, arg2, 0);
9425             unlock_user(p, arg1, 0);
9426         }
9427         return ret;
9428 #endif
9429 #if defined(TARGET_NR_linkat)
9430     case TARGET_NR_linkat:
9431         {
9432             void * p2 = NULL;
9433             if (!arg2 || !arg4)
9434                 return -TARGET_EFAULT;
9435             p  = lock_user_string(arg2);
9436             p2 = lock_user_string(arg4);
9437             if (!p || !p2)
9438                 ret = -TARGET_EFAULT;
9439             else
9440                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
9441             unlock_user(p, arg2, 0);
9442             unlock_user(p2, arg4, 0);
9443         }
9444         return ret;
9445 #endif
9446 #ifdef TARGET_NR_unlink
9447     case TARGET_NR_unlink:
9448         if (!(p = lock_user_string(arg1)))
9449             return -TARGET_EFAULT;
9450         ret = get_errno(unlink(p));
9451         unlock_user(p, arg1, 0);
9452         return ret;
9453 #endif
9454 #if defined(TARGET_NR_unlinkat)
9455     case TARGET_NR_unlinkat:
9456         if (!(p = lock_user_string(arg2)))
9457             return -TARGET_EFAULT;
9458         ret = get_errno(unlinkat(arg1, p, arg3));
9459         unlock_user(p, arg2, 0);
9460         return ret;
9461 #endif
9462     case TARGET_NR_execveat:
9463         return do_execv(cpu_env, arg1, arg2, arg3, arg4, arg5, true);
9464     case TARGET_NR_execve:
9465         return do_execv(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0, false);
9466     case TARGET_NR_chdir:
9467         if (!(p = lock_user_string(arg1)))
9468             return -TARGET_EFAULT;
9469         ret = get_errno(chdir(p));
9470         unlock_user(p, arg1, 0);
9471         return ret;
9472 #ifdef TARGET_NR_time
9473     case TARGET_NR_time:
9474         {
9475             time_t host_time;
9476             ret = get_errno(time(&host_time));
9477             if (!is_error(ret)
9478                 && arg1
9479                 && put_user_sal(host_time, arg1))
9480                 return -TARGET_EFAULT;
9481         }
9482         return ret;
9483 #endif
9484 #ifdef TARGET_NR_mknod
9485     case TARGET_NR_mknod:
9486         if (!(p = lock_user_string(arg1)))
9487             return -TARGET_EFAULT;
9488         ret = get_errno(mknod(p, arg2, arg3));
9489         unlock_user(p, arg1, 0);
9490         return ret;
9491 #endif
9492 #if defined(TARGET_NR_mknodat)
9493     case TARGET_NR_mknodat:
9494         if (!(p = lock_user_string(arg2)))
9495             return -TARGET_EFAULT;
9496         ret = get_errno(mknodat(arg1, p, arg3, arg4));
9497         unlock_user(p, arg2, 0);
9498         return ret;
9499 #endif
9500 #ifdef TARGET_NR_chmod
9501     case TARGET_NR_chmod:
9502         if (!(p = lock_user_string(arg1)))
9503             return -TARGET_EFAULT;
9504         ret = get_errno(chmod(p, arg2));
9505         unlock_user(p, arg1, 0);
9506         return ret;
9507 #endif
9508 #ifdef TARGET_NR_lseek
9509     case TARGET_NR_lseek:
9510         return get_errno(lseek(arg1, arg2, arg3));
9511 #endif
9512 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9513     /* Alpha specific */
9514     case TARGET_NR_getxpid:
9515         cpu_env->ir[IR_A4] = getppid();
9516         return get_errno(getpid());
9517 #endif
9518 #ifdef TARGET_NR_getpid
9519     case TARGET_NR_getpid:
9520         return get_errno(getpid());
9521 #endif
9522     case TARGET_NR_mount:
9523         {
9524             /* need to look at the data field */
9525             void *p2, *p3;
9526 
9527             if (arg1) {
9528                 p = lock_user_string(arg1);
9529                 if (!p) {
9530                     return -TARGET_EFAULT;
9531                 }
9532             } else {
9533                 p = NULL;
9534             }
9535 
9536             p2 = lock_user_string(arg2);
9537             if (!p2) {
9538                 if (arg1) {
9539                     unlock_user(p, arg1, 0);
9540                 }
9541                 return -TARGET_EFAULT;
9542             }
9543 
9544             if (arg3) {
9545                 p3 = lock_user_string(arg3);
9546                 if (!p3) {
9547                     if (arg1) {
9548                         unlock_user(p, arg1, 0);
9549                     }
9550                     unlock_user(p2, arg2, 0);
9551                     return -TARGET_EFAULT;
9552                 }
9553             } else {
9554                 p3 = NULL;
9555             }
9556 
9557             /* FIXME - arg5 should be locked, but it isn't clear how to
9558              * do that since it's not guaranteed to be a NULL-terminated
9559              * string.
9560              */
9561             if (!arg5) {
9562                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9563             } else {
9564                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9565             }
9566             ret = get_errno(ret);
9567 
9568             if (arg1) {
9569                 unlock_user(p, arg1, 0);
9570             }
9571             unlock_user(p2, arg2, 0);
9572             if (arg3) {
9573                 unlock_user(p3, arg3, 0);
9574             }
9575         }
9576         return ret;
9577 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9578 #if defined(TARGET_NR_umount)
9579     case TARGET_NR_umount:
9580 #endif
9581 #if defined(TARGET_NR_oldumount)
9582     case TARGET_NR_oldumount:
9583 #endif
9584         if (!(p = lock_user_string(arg1)))
9585             return -TARGET_EFAULT;
9586         ret = get_errno(umount(p));
9587         unlock_user(p, arg1, 0);
9588         return ret;
9589 #endif
9590 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9591     case TARGET_NR_move_mount:
9592         {
9593             void *p2, *p4;
9594 
9595             if (!arg2 || !arg4) {
9596                 return -TARGET_EFAULT;
9597             }
9598 
9599             p2 = lock_user_string(arg2);
9600             if (!p2) {
9601                 return -TARGET_EFAULT;
9602             }
9603 
9604             p4 = lock_user_string(arg4);
9605             if (!p4) {
9606                 unlock_user(p2, arg2, 0);
9607                 return -TARGET_EFAULT;
9608             }
9609             ret = get_errno(sys_move_mount(arg1, p2, arg3, p4, arg5));
9610 
9611             unlock_user(p2, arg2, 0);
9612             unlock_user(p4, arg4, 0);
9613 
9614             return ret;
9615         }
9616 #endif
9617 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9618     case TARGET_NR_open_tree:
9619         {
9620             void *p2;
9621             int host_flags;
9622 
9623             if (!arg2) {
9624                 return -TARGET_EFAULT;
9625             }
9626 
9627             p2 = lock_user_string(arg2);
9628             if (!p2) {
9629                 return -TARGET_EFAULT;
9630             }
9631 
9632             host_flags = arg3 & ~TARGET_O_CLOEXEC;
9633             if (arg3 & TARGET_O_CLOEXEC) {
9634                 host_flags |= O_CLOEXEC;
9635             }
9636 
9637             ret = get_errno(sys_open_tree(arg1, p2, host_flags));
9638 
9639             unlock_user(p2, arg2, 0);
9640 
9641             return ret;
9642         }
9643 #endif
9644 #ifdef TARGET_NR_stime /* not on alpha */
9645     case TARGET_NR_stime:
9646         {
9647             struct timespec ts;
9648             ts.tv_nsec = 0;
9649             if (get_user_sal(ts.tv_sec, arg1)) {
9650                 return -TARGET_EFAULT;
9651             }
9652             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9653         }
9654 #endif
9655 #ifdef TARGET_NR_alarm /* not on alpha */
9656     case TARGET_NR_alarm:
9657         return alarm(arg1);
9658 #endif
9659 #ifdef TARGET_NR_pause /* not on alpha */
9660     case TARGET_NR_pause:
9661         if (!block_signals()) {
9662             sigsuspend(&get_task_state(cpu)->signal_mask);
9663         }
9664         return -TARGET_EINTR;
9665 #endif
9666 #ifdef TARGET_NR_utime
9667     case TARGET_NR_utime:
9668         {
9669             struct utimbuf tbuf, *host_tbuf;
9670             struct target_utimbuf *target_tbuf;
9671             if (arg2) {
9672                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9673                     return -TARGET_EFAULT;
9674                 tbuf.actime = tswapal(target_tbuf->actime);
9675                 tbuf.modtime = tswapal(target_tbuf->modtime);
9676                 unlock_user_struct(target_tbuf, arg2, 0);
9677                 host_tbuf = &tbuf;
9678             } else {
9679                 host_tbuf = NULL;
9680             }
9681             if (!(p = lock_user_string(arg1)))
9682                 return -TARGET_EFAULT;
9683             ret = get_errno(utime(p, host_tbuf));
9684             unlock_user(p, arg1, 0);
9685         }
9686         return ret;
9687 #endif
9688 #ifdef TARGET_NR_utimes
9689     case TARGET_NR_utimes:
9690         {
9691             struct timeval *tvp, tv[2];
9692             if (arg2) {
9693                 if (copy_from_user_timeval(&tv[0], arg2)
9694                     || copy_from_user_timeval(&tv[1],
9695                                               arg2 + sizeof(struct target_timeval)))
9696                     return -TARGET_EFAULT;
9697                 tvp = tv;
9698             } else {
9699                 tvp = NULL;
9700             }
9701             if (!(p = lock_user_string(arg1)))
9702                 return -TARGET_EFAULT;
9703             ret = get_errno(utimes(p, tvp));
9704             unlock_user(p, arg1, 0);
9705         }
9706         return ret;
9707 #endif
9708 #if defined(TARGET_NR_futimesat)
9709     case TARGET_NR_futimesat:
9710         {
9711             struct timeval *tvp, tv[2];
9712             if (arg3) {
9713                 if (copy_from_user_timeval(&tv[0], arg3)
9714                     || copy_from_user_timeval(&tv[1],
9715                                               arg3 + sizeof(struct target_timeval)))
9716                     return -TARGET_EFAULT;
9717                 tvp = tv;
9718             } else {
9719                 tvp = NULL;
9720             }
9721             if (!(p = lock_user_string(arg2))) {
9722                 return -TARGET_EFAULT;
9723             }
9724             ret = get_errno(futimesat(arg1, path(p), tvp));
9725             unlock_user(p, arg2, 0);
9726         }
9727         return ret;
9728 #endif
9729 #ifdef TARGET_NR_access
9730     case TARGET_NR_access:
9731         if (!(p = lock_user_string(arg1))) {
9732             return -TARGET_EFAULT;
9733         }
9734         ret = get_errno(access(path(p), arg2));
9735         unlock_user(p, arg1, 0);
9736         return ret;
9737 #endif
9738 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9739     case TARGET_NR_faccessat:
9740         if (!(p = lock_user_string(arg2))) {
9741             return -TARGET_EFAULT;
9742         }
9743         ret = get_errno(faccessat(arg1, p, arg3, 0));
9744         unlock_user(p, arg2, 0);
9745         return ret;
9746 #endif
9747 #if defined(TARGET_NR_faccessat2)
9748     case TARGET_NR_faccessat2:
9749         if (!(p = lock_user_string(arg2))) {
9750             return -TARGET_EFAULT;
9751         }
9752         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9753         unlock_user(p, arg2, 0);
9754         return ret;
9755 #endif
9756 #ifdef TARGET_NR_nice /* not on alpha */
9757     case TARGET_NR_nice:
9758         return get_errno(nice(arg1));
9759 #endif
9760     case TARGET_NR_sync:
9761         sync();
9762         return 0;
9763 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9764     case TARGET_NR_syncfs:
9765         return get_errno(syncfs(arg1));
9766 #endif
9767     case TARGET_NR_kill:
9768         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9769 #ifdef TARGET_NR_rename
9770     case TARGET_NR_rename:
9771         {
9772             void *p2;
9773             p = lock_user_string(arg1);
9774             p2 = lock_user_string(arg2);
9775             if (!p || !p2)
9776                 ret = -TARGET_EFAULT;
9777             else
9778                 ret = get_errno(rename(p, p2));
9779             unlock_user(p2, arg2, 0);
9780             unlock_user(p, arg1, 0);
9781         }
9782         return ret;
9783 #endif
9784 #if defined(TARGET_NR_renameat)
9785     case TARGET_NR_renameat:
9786         {
9787             void *p2;
9788             p  = lock_user_string(arg2);
9789             p2 = lock_user_string(arg4);
9790             if (!p || !p2)
9791                 ret = -TARGET_EFAULT;
9792             else
9793                 ret = get_errno(renameat(arg1, p, arg3, p2));
9794             unlock_user(p2, arg4, 0);
9795             unlock_user(p, arg2, 0);
9796         }
9797         return ret;
9798 #endif
9799 #if defined(TARGET_NR_renameat2)
9800     case TARGET_NR_renameat2:
9801         {
9802             void *p2;
9803             p  = lock_user_string(arg2);
9804             p2 = lock_user_string(arg4);
9805             if (!p || !p2) {
9806                 ret = -TARGET_EFAULT;
9807             } else {
9808                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9809             }
9810             unlock_user(p2, arg4, 0);
9811             unlock_user(p, arg2, 0);
9812         }
9813         return ret;
9814 #endif
9815 #ifdef TARGET_NR_mkdir
9816     case TARGET_NR_mkdir:
9817         if (!(p = lock_user_string(arg1)))
9818             return -TARGET_EFAULT;
9819         ret = get_errno(mkdir(p, arg2));
9820         unlock_user(p, arg1, 0);
9821         return ret;
9822 #endif
9823 #if defined(TARGET_NR_mkdirat)
9824     case TARGET_NR_mkdirat:
9825         if (!(p = lock_user_string(arg2)))
9826             return -TARGET_EFAULT;
9827         ret = get_errno(mkdirat(arg1, p, arg3));
9828         unlock_user(p, arg2, 0);
9829         return ret;
9830 #endif
9831 #ifdef TARGET_NR_rmdir
9832     case TARGET_NR_rmdir:
9833         if (!(p = lock_user_string(arg1)))
9834             return -TARGET_EFAULT;
9835         ret = get_errno(rmdir(p));
9836         unlock_user(p, arg1, 0);
9837         return ret;
9838 #endif
9839     case TARGET_NR_dup:
9840         ret = get_errno(dup(arg1));
9841         if (ret >= 0) {
9842             fd_trans_dup(arg1, ret);
9843         }
9844         return ret;
9845 #ifdef TARGET_NR_pipe
9846     case TARGET_NR_pipe:
9847         return do_pipe(cpu_env, arg1, 0, 0);
9848 #endif
9849 #ifdef TARGET_NR_pipe2
9850     case TARGET_NR_pipe2:
9851         return do_pipe(cpu_env, arg1,
9852                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9853 #endif
9854     case TARGET_NR_times:
9855         {
9856             struct target_tms *tmsp;
9857             struct tms tms;
9858             ret = get_errno(times(&tms));
9859             if (arg1) {
9860                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9861                 if (!tmsp)
9862                     return -TARGET_EFAULT;
9863                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9864                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9865                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9866                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9867             }
9868             if (!is_error(ret))
9869                 ret = host_to_target_clock_t(ret);
9870         }
9871         return ret;
9872     case TARGET_NR_acct:
9873         if (arg1 == 0) {
9874             ret = get_errno(acct(NULL));
9875         } else {
9876             if (!(p = lock_user_string(arg1))) {
9877                 return -TARGET_EFAULT;
9878             }
9879             ret = get_errno(acct(path(p)));
9880             unlock_user(p, arg1, 0);
9881         }
9882         return ret;
9883 #ifdef TARGET_NR_umount2
9884     case TARGET_NR_umount2:
9885         if (!(p = lock_user_string(arg1)))
9886             return -TARGET_EFAULT;
9887         ret = get_errno(umount2(p, arg2));
9888         unlock_user(p, arg1, 0);
9889         return ret;
9890 #endif
9891     case TARGET_NR_ioctl:
9892         return do_ioctl(arg1, arg2, arg3);
9893 #ifdef TARGET_NR_fcntl
9894     case TARGET_NR_fcntl:
9895         return do_fcntl(arg1, arg2, arg3);
9896 #endif
9897     case TARGET_NR_setpgid:
9898         return get_errno(setpgid(arg1, arg2));
9899     case TARGET_NR_umask:
9900         return get_errno(umask(arg1));
9901     case TARGET_NR_chroot:
9902         if (!(p = lock_user_string(arg1)))
9903             return -TARGET_EFAULT;
9904         ret = get_errno(chroot(p));
9905         unlock_user(p, arg1, 0);
9906         return ret;
9907 #ifdef TARGET_NR_dup2
9908     case TARGET_NR_dup2:
9909         ret = get_errno(dup2(arg1, arg2));
9910         if (ret >= 0) {
9911             fd_trans_dup(arg1, arg2);
9912         }
9913         return ret;
9914 #endif
9915 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9916     case TARGET_NR_dup3:
9917     {
9918         int host_flags;
9919 
9920         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9921             return -EINVAL;
9922         }
9923         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9924         ret = get_errno(dup3(arg1, arg2, host_flags));
9925         if (ret >= 0) {
9926             fd_trans_dup(arg1, arg2);
9927         }
9928         return ret;
9929     }
9930 #endif
9931 #ifdef TARGET_NR_getppid /* not on alpha */
9932     case TARGET_NR_getppid:
9933         return get_errno(getppid());
9934 #endif
9935 #ifdef TARGET_NR_getpgrp
9936     case TARGET_NR_getpgrp:
9937         return get_errno(getpgrp());
9938 #endif
9939     case TARGET_NR_setsid:
9940         return get_errno(setsid());
9941 #ifdef TARGET_NR_sigaction
9942     case TARGET_NR_sigaction:
9943         {
9944 #if defined(TARGET_MIPS)
9945 	    struct target_sigaction act, oact, *pact, *old_act;
9946 
9947 	    if (arg2) {
9948                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9949                     return -TARGET_EFAULT;
9950 		act._sa_handler = old_act->_sa_handler;
9951 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9952 		act.sa_flags = old_act->sa_flags;
9953 		unlock_user_struct(old_act, arg2, 0);
9954 		pact = &act;
9955 	    } else {
9956 		pact = NULL;
9957 	    }
9958 
9959         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9960 
9961 	    if (!is_error(ret) && arg3) {
9962                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9963                     return -TARGET_EFAULT;
9964 		old_act->_sa_handler = oact._sa_handler;
9965 		old_act->sa_flags = oact.sa_flags;
9966 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9967 		old_act->sa_mask.sig[1] = 0;
9968 		old_act->sa_mask.sig[2] = 0;
9969 		old_act->sa_mask.sig[3] = 0;
9970 		unlock_user_struct(old_act, arg3, 1);
9971 	    }
9972 #else
9973             struct target_old_sigaction *old_act;
9974             struct target_sigaction act, oact, *pact;
9975             if (arg2) {
9976                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9977                     return -TARGET_EFAULT;
9978                 act._sa_handler = old_act->_sa_handler;
9979                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9980                 act.sa_flags = old_act->sa_flags;
9981 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9982                 act.sa_restorer = old_act->sa_restorer;
9983 #endif
9984                 unlock_user_struct(old_act, arg2, 0);
9985                 pact = &act;
9986             } else {
9987                 pact = NULL;
9988             }
9989             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9990             if (!is_error(ret) && arg3) {
9991                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9992                     return -TARGET_EFAULT;
9993                 old_act->_sa_handler = oact._sa_handler;
9994                 old_act->sa_mask = oact.sa_mask.sig[0];
9995                 old_act->sa_flags = oact.sa_flags;
9996 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9997                 old_act->sa_restorer = oact.sa_restorer;
9998 #endif
9999                 unlock_user_struct(old_act, arg3, 1);
10000             }
10001 #endif
10002         }
10003         return ret;
10004 #endif
10005     case TARGET_NR_rt_sigaction:
10006         {
10007             /*
10008              * For Alpha and SPARC this is a 5 argument syscall, with
10009              * a 'restorer' parameter which must be copied into the
10010              * sa_restorer field of the sigaction struct.
10011              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
10012              * and arg5 is the sigsetsize.
10013              */
10014 #if defined(TARGET_ALPHA)
10015             target_ulong sigsetsize = arg4;
10016             target_ulong restorer = arg5;
10017 #elif defined(TARGET_SPARC)
10018             target_ulong restorer = arg4;
10019             target_ulong sigsetsize = arg5;
10020 #else
10021             target_ulong sigsetsize = arg4;
10022             target_ulong restorer = 0;
10023 #endif
10024             struct target_sigaction *act = NULL;
10025             struct target_sigaction *oact = NULL;
10026 
10027             if (sigsetsize != sizeof(target_sigset_t)) {
10028                 return -TARGET_EINVAL;
10029             }
10030             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
10031                 return -TARGET_EFAULT;
10032             }
10033             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
10034                 ret = -TARGET_EFAULT;
10035             } else {
10036                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
10037                 if (oact) {
10038                     unlock_user_struct(oact, arg3, 1);
10039                 }
10040             }
10041             if (act) {
10042                 unlock_user_struct(act, arg2, 0);
10043             }
10044         }
10045         return ret;
10046 #ifdef TARGET_NR_sgetmask /* not on alpha */
10047     case TARGET_NR_sgetmask:
10048         {
10049             sigset_t cur_set;
10050             abi_ulong target_set;
10051             ret = do_sigprocmask(0, NULL, &cur_set);
10052             if (!ret) {
10053                 host_to_target_old_sigset(&target_set, &cur_set);
10054                 ret = target_set;
10055             }
10056         }
10057         return ret;
10058 #endif
10059 #ifdef TARGET_NR_ssetmask /* not on alpha */
10060     case TARGET_NR_ssetmask:
10061         {
10062             sigset_t set, oset;
10063             abi_ulong target_set = arg1;
10064             target_to_host_old_sigset(&set, &target_set);
10065             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
10066             if (!ret) {
10067                 host_to_target_old_sigset(&target_set, &oset);
10068                 ret = target_set;
10069             }
10070         }
10071         return ret;
10072 #endif
10073 #ifdef TARGET_NR_sigprocmask
10074     case TARGET_NR_sigprocmask:
10075         {
10076 #if defined(TARGET_ALPHA)
10077             sigset_t set, oldset;
10078             abi_ulong mask;
10079             int how;
10080 
10081             switch (arg1) {
10082             case TARGET_SIG_BLOCK:
10083                 how = SIG_BLOCK;
10084                 break;
10085             case TARGET_SIG_UNBLOCK:
10086                 how = SIG_UNBLOCK;
10087                 break;
10088             case TARGET_SIG_SETMASK:
10089                 how = SIG_SETMASK;
10090                 break;
10091             default:
10092                 return -TARGET_EINVAL;
10093             }
10094             mask = arg2;
10095             target_to_host_old_sigset(&set, &mask);
10096 
10097             ret = do_sigprocmask(how, &set, &oldset);
10098             if (!is_error(ret)) {
10099                 host_to_target_old_sigset(&mask, &oldset);
10100                 ret = mask;
10101                 cpu_env->ir[IR_V0] = 0; /* force no error */
10102             }
10103 #else
10104             sigset_t set, oldset, *set_ptr;
10105             int how;
10106 
10107             if (arg2) {
10108                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10109                 if (!p) {
10110                     return -TARGET_EFAULT;
10111                 }
10112                 target_to_host_old_sigset(&set, p);
10113                 unlock_user(p, arg2, 0);
10114                 set_ptr = &set;
10115                 switch (arg1) {
10116                 case TARGET_SIG_BLOCK:
10117                     how = SIG_BLOCK;
10118                     break;
10119                 case TARGET_SIG_UNBLOCK:
10120                     how = SIG_UNBLOCK;
10121                     break;
10122                 case TARGET_SIG_SETMASK:
10123                     how = SIG_SETMASK;
10124                     break;
10125                 default:
10126                     return -TARGET_EINVAL;
10127                 }
10128             } else {
10129                 how = 0;
10130                 set_ptr = NULL;
10131             }
10132             ret = do_sigprocmask(how, set_ptr, &oldset);
10133             if (!is_error(ret) && arg3) {
10134                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10135                     return -TARGET_EFAULT;
10136                 host_to_target_old_sigset(p, &oldset);
10137                 unlock_user(p, arg3, sizeof(target_sigset_t));
10138             }
10139 #endif
10140         }
10141         return ret;
10142 #endif
10143     case TARGET_NR_rt_sigprocmask:
10144         {
10145             int how = arg1;
10146             sigset_t set, oldset, *set_ptr;
10147 
10148             if (arg4 != sizeof(target_sigset_t)) {
10149                 return -TARGET_EINVAL;
10150             }
10151 
10152             if (arg2) {
10153                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10154                 if (!p) {
10155                     return -TARGET_EFAULT;
10156                 }
10157                 target_to_host_sigset(&set, p);
10158                 unlock_user(p, arg2, 0);
10159                 set_ptr = &set;
10160                 switch(how) {
10161                 case TARGET_SIG_BLOCK:
10162                     how = SIG_BLOCK;
10163                     break;
10164                 case TARGET_SIG_UNBLOCK:
10165                     how = SIG_UNBLOCK;
10166                     break;
10167                 case TARGET_SIG_SETMASK:
10168                     how = SIG_SETMASK;
10169                     break;
10170                 default:
10171                     return -TARGET_EINVAL;
10172                 }
10173             } else {
10174                 how = 0;
10175                 set_ptr = NULL;
10176             }
10177             ret = do_sigprocmask(how, set_ptr, &oldset);
10178             if (!is_error(ret) && arg3) {
10179                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10180                     return -TARGET_EFAULT;
10181                 host_to_target_sigset(p, &oldset);
10182                 unlock_user(p, arg3, sizeof(target_sigset_t));
10183             }
10184         }
10185         return ret;
10186 #ifdef TARGET_NR_sigpending
10187     case TARGET_NR_sigpending:
10188         {
10189             sigset_t set;
10190             ret = get_errno(sigpending(&set));
10191             if (!is_error(ret)) {
10192                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10193                     return -TARGET_EFAULT;
10194                 host_to_target_old_sigset(p, &set);
10195                 unlock_user(p, arg1, sizeof(target_sigset_t));
10196             }
10197         }
10198         return ret;
10199 #endif
10200     case TARGET_NR_rt_sigpending:
10201         {
10202             sigset_t set;
10203 
10204             /* Yes, this check is >, not != like most. We follow the kernel's
10205              * logic and it does it like this because it implements
10206              * NR_sigpending through the same code path, and in that case
10207              * the old_sigset_t is smaller in size.
10208              */
10209             if (arg2 > sizeof(target_sigset_t)) {
10210                 return -TARGET_EINVAL;
10211             }
10212 
10213             ret = get_errno(sigpending(&set));
10214             if (!is_error(ret)) {
10215                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10216                     return -TARGET_EFAULT;
10217                 host_to_target_sigset(p, &set);
10218                 unlock_user(p, arg1, sizeof(target_sigset_t));
10219             }
10220         }
10221         return ret;
10222 #ifdef TARGET_NR_sigsuspend
10223     case TARGET_NR_sigsuspend:
10224         {
10225             sigset_t *set;
10226 
10227 #if defined(TARGET_ALPHA)
10228             TaskState *ts = get_task_state(cpu);
10229             /* target_to_host_old_sigset will bswap back */
10230             abi_ulong mask = tswapal(arg1);
10231             set = &ts->sigsuspend_mask;
10232             target_to_host_old_sigset(set, &mask);
10233 #else
10234             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
10235             if (ret != 0) {
10236                 return ret;
10237             }
10238 #endif
10239             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10240             finish_sigsuspend_mask(ret);
10241         }
10242         return ret;
10243 #endif
10244     case TARGET_NR_rt_sigsuspend:
10245         {
10246             sigset_t *set;
10247 
10248             ret = process_sigsuspend_mask(&set, arg1, arg2);
10249             if (ret != 0) {
10250                 return ret;
10251             }
10252             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10253             finish_sigsuspend_mask(ret);
10254         }
10255         return ret;
10256 #ifdef TARGET_NR_rt_sigtimedwait
10257     case TARGET_NR_rt_sigtimedwait:
10258         {
10259             sigset_t set;
10260             struct timespec uts, *puts;
10261             siginfo_t uinfo;
10262 
10263             if (arg4 != sizeof(target_sigset_t)) {
10264                 return -TARGET_EINVAL;
10265             }
10266 
10267             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
10268                 return -TARGET_EFAULT;
10269             target_to_host_sigset(&set, p);
10270             unlock_user(p, arg1, 0);
10271             if (arg3) {
10272                 puts = &uts;
10273                 if (target_to_host_timespec(puts, arg3)) {
10274                     return -TARGET_EFAULT;
10275                 }
10276             } else {
10277                 puts = NULL;
10278             }
10279             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10280                                                  SIGSET_T_SIZE));
10281             if (!is_error(ret)) {
10282                 if (arg2) {
10283                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
10284                                   0);
10285                     if (!p) {
10286                         return -TARGET_EFAULT;
10287                     }
10288                     host_to_target_siginfo(p, &uinfo);
10289                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10290                 }
10291                 ret = host_to_target_signal(ret);
10292             }
10293         }
10294         return ret;
10295 #endif
10296 #ifdef TARGET_NR_rt_sigtimedwait_time64
10297     case TARGET_NR_rt_sigtimedwait_time64:
10298         {
10299             sigset_t set;
10300             struct timespec uts, *puts;
10301             siginfo_t uinfo;
10302 
10303             if (arg4 != sizeof(target_sigset_t)) {
10304                 return -TARGET_EINVAL;
10305             }
10306 
10307             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
10308             if (!p) {
10309                 return -TARGET_EFAULT;
10310             }
10311             target_to_host_sigset(&set, p);
10312             unlock_user(p, arg1, 0);
10313             if (arg3) {
10314                 puts = &uts;
10315                 if (target_to_host_timespec64(puts, arg3)) {
10316                     return -TARGET_EFAULT;
10317                 }
10318             } else {
10319                 puts = NULL;
10320             }
10321             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10322                                                  SIGSET_T_SIZE));
10323             if (!is_error(ret)) {
10324                 if (arg2) {
10325                     p = lock_user(VERIFY_WRITE, arg2,
10326                                   sizeof(target_siginfo_t), 0);
10327                     if (!p) {
10328                         return -TARGET_EFAULT;
10329                     }
10330                     host_to_target_siginfo(p, &uinfo);
10331                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10332                 }
10333                 ret = host_to_target_signal(ret);
10334             }
10335         }
10336         return ret;
10337 #endif
10338     case TARGET_NR_rt_sigqueueinfo:
10339         {
10340             siginfo_t uinfo;
10341 
10342             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
10343             if (!p) {
10344                 return -TARGET_EFAULT;
10345             }
10346             target_to_host_siginfo(&uinfo, p);
10347             unlock_user(p, arg3, 0);
10348             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
10349         }
10350         return ret;
10351     case TARGET_NR_rt_tgsigqueueinfo:
10352         {
10353             siginfo_t uinfo;
10354 
10355             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
10356             if (!p) {
10357                 return -TARGET_EFAULT;
10358             }
10359             target_to_host_siginfo(&uinfo, p);
10360             unlock_user(p, arg4, 0);
10361             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
10362         }
10363         return ret;
10364 #ifdef TARGET_NR_sigreturn
10365     case TARGET_NR_sigreturn:
10366         if (block_signals()) {
10367             return -QEMU_ERESTARTSYS;
10368         }
10369         return do_sigreturn(cpu_env);
10370 #endif
10371     case TARGET_NR_rt_sigreturn:
10372         if (block_signals()) {
10373             return -QEMU_ERESTARTSYS;
10374         }
10375         return do_rt_sigreturn(cpu_env);
10376     case TARGET_NR_sethostname:
10377         if (!(p = lock_user_string(arg1)))
10378             return -TARGET_EFAULT;
10379         ret = get_errno(sethostname(p, arg2));
10380         unlock_user(p, arg1, 0);
10381         return ret;
10382 #ifdef TARGET_NR_setrlimit
10383     case TARGET_NR_setrlimit:
10384         {
10385             int resource = target_to_host_resource(arg1);
10386             struct target_rlimit *target_rlim;
10387             struct rlimit rlim;
10388             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
10389                 return -TARGET_EFAULT;
10390             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
10391             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
10392             unlock_user_struct(target_rlim, arg2, 0);
10393             /*
10394              * If we just passed through resource limit settings for memory then
10395              * they would also apply to QEMU's own allocations, and QEMU will
10396              * crash or hang or die if its allocations fail. Ideally we would
10397              * track the guest allocations in QEMU and apply the limits ourselves.
10398              * For now, just tell the guest the call succeeded but don't actually
10399              * limit anything.
10400              */
10401             if (resource != RLIMIT_AS &&
10402                 resource != RLIMIT_DATA &&
10403                 resource != RLIMIT_STACK) {
10404                 return get_errno(setrlimit(resource, &rlim));
10405             } else {
10406                 return 0;
10407             }
10408         }
10409 #endif
10410 #ifdef TARGET_NR_getrlimit
10411     case TARGET_NR_getrlimit:
10412         {
10413             int resource = target_to_host_resource(arg1);
10414             struct target_rlimit *target_rlim;
10415             struct rlimit rlim;
10416 
10417             ret = get_errno(getrlimit(resource, &rlim));
10418             if (!is_error(ret)) {
10419                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10420                     return -TARGET_EFAULT;
10421                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10422                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10423                 unlock_user_struct(target_rlim, arg2, 1);
10424             }
10425         }
10426         return ret;
10427 #endif
10428     case TARGET_NR_getrusage:
10429         {
10430             struct rusage rusage;
10431             ret = get_errno(getrusage(arg1, &rusage));
10432             if (!is_error(ret)) {
10433                 ret = host_to_target_rusage(arg2, &rusage);
10434             }
10435         }
10436         return ret;
10437 #if defined(TARGET_NR_gettimeofday)
10438     case TARGET_NR_gettimeofday:
10439         {
10440             struct timeval tv;
10441             struct timezone tz;
10442 
10443             ret = get_errno(gettimeofday(&tv, &tz));
10444             if (!is_error(ret)) {
10445                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
10446                     return -TARGET_EFAULT;
10447                 }
10448                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
10449                     return -TARGET_EFAULT;
10450                 }
10451             }
10452         }
10453         return ret;
10454 #endif
10455 #if defined(TARGET_NR_settimeofday)
10456     case TARGET_NR_settimeofday:
10457         {
10458             struct timeval tv, *ptv = NULL;
10459             struct timezone tz, *ptz = NULL;
10460 
10461             if (arg1) {
10462                 if (copy_from_user_timeval(&tv, arg1)) {
10463                     return -TARGET_EFAULT;
10464                 }
10465                 ptv = &tv;
10466             }
10467 
10468             if (arg2) {
10469                 if (copy_from_user_timezone(&tz, arg2)) {
10470                     return -TARGET_EFAULT;
10471                 }
10472                 ptz = &tz;
10473             }
10474 
10475             return get_errno(settimeofday(ptv, ptz));
10476         }
10477 #endif
10478 #if defined(TARGET_NR_select)
10479     case TARGET_NR_select:
10480 #if defined(TARGET_WANT_NI_OLD_SELECT)
10481         /* some architectures used to have old_select here
10482          * but now ENOSYS it.
10483          */
10484         ret = -TARGET_ENOSYS;
10485 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
10486         ret = do_old_select(arg1);
10487 #else
10488         ret = do_select(arg1, arg2, arg3, arg4, arg5);
10489 #endif
10490         return ret;
10491 #endif
10492 #ifdef TARGET_NR_pselect6
10493     case TARGET_NR_pselect6:
10494         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
10495 #endif
10496 #ifdef TARGET_NR_pselect6_time64
10497     case TARGET_NR_pselect6_time64:
10498         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
10499 #endif
10500 #ifdef TARGET_NR_symlink
10501     case TARGET_NR_symlink:
10502         {
10503             void *p2;
10504             p = lock_user_string(arg1);
10505             p2 = lock_user_string(arg2);
10506             if (!p || !p2)
10507                 ret = -TARGET_EFAULT;
10508             else
10509                 ret = get_errno(symlink(p, p2));
10510             unlock_user(p2, arg2, 0);
10511             unlock_user(p, arg1, 0);
10512         }
10513         return ret;
10514 #endif
10515 #if defined(TARGET_NR_symlinkat)
10516     case TARGET_NR_symlinkat:
10517         {
10518             void *p2;
10519             p  = lock_user_string(arg1);
10520             p2 = lock_user_string(arg3);
10521             if (!p || !p2)
10522                 ret = -TARGET_EFAULT;
10523             else
10524                 ret = get_errno(symlinkat(p, arg2, p2));
10525             unlock_user(p2, arg3, 0);
10526             unlock_user(p, arg1, 0);
10527         }
10528         return ret;
10529 #endif
10530 #ifdef TARGET_NR_readlink
10531     case TARGET_NR_readlink:
10532         {
10533             void *p2;
10534             p = lock_user_string(arg1);
10535             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10536             ret = get_errno(do_guest_readlink(p, p2, arg3));
10537             unlock_user(p2, arg2, ret);
10538             unlock_user(p, arg1, 0);
10539         }
10540         return ret;
10541 #endif
10542 #if defined(TARGET_NR_readlinkat)
10543     case TARGET_NR_readlinkat:
10544         {
10545             void *p2;
10546             p  = lock_user_string(arg2);
10547             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10548             if (!p || !p2) {
10549                 ret = -TARGET_EFAULT;
10550             } else if (!arg4) {
10551                 /* Short circuit this for the magic exe check. */
10552                 ret = -TARGET_EINVAL;
10553             } else if (is_proc_myself((const char *)p, "exe")) {
10554                 /*
10555                  * Don't worry about sign mismatch as earlier mapping
10556                  * logic would have thrown a bad address error.
10557                  */
10558                 ret = MIN(strlen(exec_path), arg4);
10559                 /* We cannot NUL terminate the string. */
10560                 memcpy(p2, exec_path, ret);
10561             } else {
10562                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10563             }
10564             unlock_user(p2, arg3, ret);
10565             unlock_user(p, arg2, 0);
10566         }
10567         return ret;
10568 #endif
10569 #ifdef TARGET_NR_swapon
10570     case TARGET_NR_swapon:
10571         if (!(p = lock_user_string(arg1)))
10572             return -TARGET_EFAULT;
10573         ret = get_errno(swapon(p, arg2));
10574         unlock_user(p, arg1, 0);
10575         return ret;
10576 #endif
10577     case TARGET_NR_reboot:
10578         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10579            /* arg4 must be ignored in all other cases */
10580            p = lock_user_string(arg4);
10581            if (!p) {
10582                return -TARGET_EFAULT;
10583            }
10584            ret = get_errno(reboot(arg1, arg2, arg3, p));
10585            unlock_user(p, arg4, 0);
10586         } else {
10587            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10588         }
10589         return ret;
10590 #ifdef TARGET_NR_mmap
10591     case TARGET_NR_mmap:
10592 #ifdef TARGET_ARCH_WANT_SYS_OLD_MMAP
10593         {
10594             abi_ulong *v;
10595             abi_ulong v1, v2, v3, v4, v5, v6;
10596             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10597                 return -TARGET_EFAULT;
10598             v1 = tswapal(v[0]);
10599             v2 = tswapal(v[1]);
10600             v3 = tswapal(v[2]);
10601             v4 = tswapal(v[3]);
10602             v5 = tswapal(v[4]);
10603             v6 = tswapal(v[5]);
10604             unlock_user(v, arg1, 0);
10605             return do_mmap(v1, v2, v3, v4, v5, v6);
10606         }
10607 #else
10608         /* mmap pointers are always untagged */
10609         return do_mmap(arg1, arg2, arg3, arg4, arg5, arg6);
10610 #endif
10611 #endif
10612 #ifdef TARGET_NR_mmap2
10613     case TARGET_NR_mmap2:
10614 #ifndef MMAP_SHIFT
10615 #define MMAP_SHIFT 12
10616 #endif
10617         return do_mmap(arg1, arg2, arg3, arg4, arg5,
10618                        (off_t)(abi_ulong)arg6 << MMAP_SHIFT);
10619 #endif
10620     case TARGET_NR_munmap:
10621         arg1 = cpu_untagged_addr(cpu, arg1);
10622         return get_errno(target_munmap(arg1, arg2));
10623     case TARGET_NR_mprotect:
10624         arg1 = cpu_untagged_addr(cpu, arg1);
10625         {
10626             TaskState *ts = get_task_state(cpu);
10627             /* Special hack to detect libc making the stack executable.  */
10628             if ((arg3 & PROT_GROWSDOWN)
10629                 && arg1 >= ts->info->stack_limit
10630                 && arg1 <= ts->info->start_stack) {
10631                 arg3 &= ~PROT_GROWSDOWN;
10632                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10633                 arg1 = ts->info->stack_limit;
10634             }
10635         }
10636         return get_errno(target_mprotect(arg1, arg2, arg3));
10637 #ifdef TARGET_NR_mremap
10638     case TARGET_NR_mremap:
10639         arg1 = cpu_untagged_addr(cpu, arg1);
10640         /* mremap new_addr (arg5) is always untagged */
10641         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10642 #endif
10643         /* ??? msync/mlock/munlock are broken for softmmu.  */
10644 #ifdef TARGET_NR_msync
10645     case TARGET_NR_msync:
10646         return get_errno(msync(g2h(cpu, arg1), arg2,
10647                                target_to_host_msync_arg(arg3)));
10648 #endif
10649 #ifdef TARGET_NR_mlock
10650     case TARGET_NR_mlock:
10651         return get_errno(mlock(g2h(cpu, arg1), arg2));
10652 #endif
10653 #ifdef TARGET_NR_munlock
10654     case TARGET_NR_munlock:
10655         return get_errno(munlock(g2h(cpu, arg1), arg2));
10656 #endif
10657 #ifdef TARGET_NR_mlockall
10658     case TARGET_NR_mlockall:
10659         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10660 #endif
10661 #ifdef TARGET_NR_munlockall
10662     case TARGET_NR_munlockall:
10663         return get_errno(munlockall());
10664 #endif
10665 #ifdef TARGET_NR_truncate
10666     case TARGET_NR_truncate:
10667         if (!(p = lock_user_string(arg1)))
10668             return -TARGET_EFAULT;
10669         ret = get_errno(truncate(p, arg2));
10670         unlock_user(p, arg1, 0);
10671         return ret;
10672 #endif
10673 #ifdef TARGET_NR_ftruncate
10674     case TARGET_NR_ftruncate:
10675         return get_errno(ftruncate(arg1, arg2));
10676 #endif
10677     case TARGET_NR_fchmod:
10678         return get_errno(fchmod(arg1, arg2));
10679 #if defined(TARGET_NR_fchmodat)
10680     case TARGET_NR_fchmodat:
10681         if (!(p = lock_user_string(arg2)))
10682             return -TARGET_EFAULT;
10683         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10684         unlock_user(p, arg2, 0);
10685         return ret;
10686 #endif
10687     case TARGET_NR_getpriority:
10688         /* Note that negative values are valid for getpriority, so we must
10689            differentiate based on errno settings.  */
10690         errno = 0;
10691         ret = getpriority(arg1, arg2);
10692         if (ret == -1 && errno != 0) {
10693             return -host_to_target_errno(errno);
10694         }
10695 #ifdef TARGET_ALPHA
10696         /* Return value is the unbiased priority.  Signal no error.  */
10697         cpu_env->ir[IR_V0] = 0;
10698 #else
10699         /* Return value is a biased priority to avoid negative numbers.  */
10700         ret = 20 - ret;
10701 #endif
10702         return ret;
10703     case TARGET_NR_setpriority:
10704         return get_errno(setpriority(arg1, arg2, arg3));
10705 #ifdef TARGET_NR_statfs
10706     case TARGET_NR_statfs:
10707         if (!(p = lock_user_string(arg1))) {
10708             return -TARGET_EFAULT;
10709         }
10710         ret = get_errno(statfs(path(p), &stfs));
10711         unlock_user(p, arg1, 0);
10712     convert_statfs:
10713         if (!is_error(ret)) {
10714             struct target_statfs *target_stfs;
10715 
10716             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10717                 return -TARGET_EFAULT;
10718             __put_user(stfs.f_type, &target_stfs->f_type);
10719             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10720             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10721             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10722             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10723             __put_user(stfs.f_files, &target_stfs->f_files);
10724             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10725             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10726             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10727             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10728             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10729 #ifdef _STATFS_F_FLAGS
10730             __put_user(stfs.f_flags, &target_stfs->f_flags);
10731 #else
10732             __put_user(0, &target_stfs->f_flags);
10733 #endif
10734             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10735             unlock_user_struct(target_stfs, arg2, 1);
10736         }
10737         return ret;
10738 #endif
10739 #ifdef TARGET_NR_fstatfs
10740     case TARGET_NR_fstatfs:
10741         ret = get_errno(fstatfs(arg1, &stfs));
10742         goto convert_statfs;
10743 #endif
10744 #ifdef TARGET_NR_statfs64
10745     case TARGET_NR_statfs64:
10746         if (!(p = lock_user_string(arg1))) {
10747             return -TARGET_EFAULT;
10748         }
10749         ret = get_errno(statfs(path(p), &stfs));
10750         unlock_user(p, arg1, 0);
10751     convert_statfs64:
10752         if (!is_error(ret)) {
10753             struct target_statfs64 *target_stfs;
10754 
10755             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10756                 return -TARGET_EFAULT;
10757             __put_user(stfs.f_type, &target_stfs->f_type);
10758             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10759             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10760             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10761             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10762             __put_user(stfs.f_files, &target_stfs->f_files);
10763             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10764             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10765             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10766             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10767             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10768 #ifdef _STATFS_F_FLAGS
10769             __put_user(stfs.f_flags, &target_stfs->f_flags);
10770 #else
10771             __put_user(0, &target_stfs->f_flags);
10772 #endif
10773             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10774             unlock_user_struct(target_stfs, arg3, 1);
10775         }
10776         return ret;
10777     case TARGET_NR_fstatfs64:
10778         ret = get_errno(fstatfs(arg1, &stfs));
10779         goto convert_statfs64;
10780 #endif
10781 #ifdef TARGET_NR_socketcall
10782     case TARGET_NR_socketcall:
10783         return do_socketcall(arg1, arg2);
10784 #endif
10785 #ifdef TARGET_NR_accept
10786     case TARGET_NR_accept:
10787         return do_accept4(arg1, arg2, arg3, 0);
10788 #endif
10789 #ifdef TARGET_NR_accept4
10790     case TARGET_NR_accept4:
10791         return do_accept4(arg1, arg2, arg3, arg4);
10792 #endif
10793 #ifdef TARGET_NR_bind
10794     case TARGET_NR_bind:
10795         return do_bind(arg1, arg2, arg3);
10796 #endif
10797 #ifdef TARGET_NR_connect
10798     case TARGET_NR_connect:
10799         return do_connect(arg1, arg2, arg3);
10800 #endif
10801 #ifdef TARGET_NR_getpeername
10802     case TARGET_NR_getpeername:
10803         return do_getpeername(arg1, arg2, arg3);
10804 #endif
10805 #ifdef TARGET_NR_getsockname
10806     case TARGET_NR_getsockname:
10807         return do_getsockname(arg1, arg2, arg3);
10808 #endif
10809 #ifdef TARGET_NR_getsockopt
10810     case TARGET_NR_getsockopt:
10811         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10812 #endif
10813 #ifdef TARGET_NR_listen
10814     case TARGET_NR_listen:
10815         return get_errno(listen(arg1, arg2));
10816 #endif
10817 #ifdef TARGET_NR_recv
10818     case TARGET_NR_recv:
10819         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10820 #endif
10821 #ifdef TARGET_NR_recvfrom
10822     case TARGET_NR_recvfrom:
10823         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10824 #endif
10825 #ifdef TARGET_NR_recvmsg
10826     case TARGET_NR_recvmsg:
10827         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10828 #endif
10829 #ifdef TARGET_NR_send
10830     case TARGET_NR_send:
10831         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10832 #endif
10833 #ifdef TARGET_NR_sendmsg
10834     case TARGET_NR_sendmsg:
10835         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10836 #endif
10837 #ifdef TARGET_NR_sendmmsg
10838     case TARGET_NR_sendmmsg:
10839         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10840 #endif
10841 #ifdef TARGET_NR_recvmmsg
10842     case TARGET_NR_recvmmsg:
10843         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10844 #endif
10845 #ifdef TARGET_NR_sendto
10846     case TARGET_NR_sendto:
10847         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10848 #endif
10849 #ifdef TARGET_NR_shutdown
10850     case TARGET_NR_shutdown:
10851         return get_errno(shutdown(arg1, arg2));
10852 #endif
10853 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10854     case TARGET_NR_getrandom:
10855         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10856         if (!p) {
10857             return -TARGET_EFAULT;
10858         }
10859         ret = get_errno(getrandom(p, arg2, arg3));
10860         unlock_user(p, arg1, ret);
10861         return ret;
10862 #endif
10863 #ifdef TARGET_NR_socket
10864     case TARGET_NR_socket:
10865         return do_socket(arg1, arg2, arg3);
10866 #endif
10867 #ifdef TARGET_NR_socketpair
10868     case TARGET_NR_socketpair:
10869         return do_socketpair(arg1, arg2, arg3, arg4);
10870 #endif
10871 #ifdef TARGET_NR_setsockopt
10872     case TARGET_NR_setsockopt:
10873         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10874 #endif
10875 #if defined(TARGET_NR_syslog)
10876     case TARGET_NR_syslog:
10877         {
10878             int len = arg2;
10879 
10880             switch (arg1) {
10881             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10882             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10883             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10884             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10885             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10886             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10887             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10888             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10889                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10890             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10891             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10892             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10893                 {
10894                     if (len < 0) {
10895                         return -TARGET_EINVAL;
10896                     }
10897                     if (len == 0) {
10898                         return 0;
10899                     }
10900                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10901                     if (!p) {
10902                         return -TARGET_EFAULT;
10903                     }
10904                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10905                     unlock_user(p, arg2, arg3);
10906                 }
10907                 return ret;
10908             default:
10909                 return -TARGET_EINVAL;
10910             }
10911         }
10912         break;
10913 #endif
10914     case TARGET_NR_setitimer:
10915         {
10916             struct itimerval value, ovalue, *pvalue;
10917 
10918             if (arg2) {
10919                 pvalue = &value;
10920                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10921                     || copy_from_user_timeval(&pvalue->it_value,
10922                                               arg2 + sizeof(struct target_timeval)))
10923                     return -TARGET_EFAULT;
10924             } else {
10925                 pvalue = NULL;
10926             }
10927             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10928             if (!is_error(ret) && arg3) {
10929                 if (copy_to_user_timeval(arg3,
10930                                          &ovalue.it_interval)
10931                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10932                                             &ovalue.it_value))
10933                     return -TARGET_EFAULT;
10934             }
10935         }
10936         return ret;
10937     case TARGET_NR_getitimer:
10938         {
10939             struct itimerval value;
10940 
10941             ret = get_errno(getitimer(arg1, &value));
10942             if (!is_error(ret) && arg2) {
10943                 if (copy_to_user_timeval(arg2,
10944                                          &value.it_interval)
10945                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10946                                             &value.it_value))
10947                     return -TARGET_EFAULT;
10948             }
10949         }
10950         return ret;
10951 #ifdef TARGET_NR_stat
10952     case TARGET_NR_stat:
10953         if (!(p = lock_user_string(arg1))) {
10954             return -TARGET_EFAULT;
10955         }
10956         ret = get_errno(stat(path(p), &st));
10957         unlock_user(p, arg1, 0);
10958         goto do_stat;
10959 #endif
10960 #ifdef TARGET_NR_lstat
10961     case TARGET_NR_lstat:
10962         if (!(p = lock_user_string(arg1))) {
10963             return -TARGET_EFAULT;
10964         }
10965         ret = get_errno(lstat(path(p), &st));
10966         unlock_user(p, arg1, 0);
10967         goto do_stat;
10968 #endif
10969 #ifdef TARGET_NR_fstat
10970     case TARGET_NR_fstat:
10971         {
10972             ret = get_errno(fstat(arg1, &st));
10973 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10974         do_stat:
10975 #endif
10976             if (!is_error(ret)) {
10977                 struct target_stat *target_st;
10978 
10979                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10980                     return -TARGET_EFAULT;
10981                 memset(target_st, 0, sizeof(*target_st));
10982                 __put_user(st.st_dev, &target_st->st_dev);
10983                 __put_user(st.st_ino, &target_st->st_ino);
10984                 __put_user(st.st_mode, &target_st->st_mode);
10985                 __put_user(st.st_uid, &target_st->st_uid);
10986                 __put_user(st.st_gid, &target_st->st_gid);
10987                 __put_user(st.st_nlink, &target_st->st_nlink);
10988                 __put_user(st.st_rdev, &target_st->st_rdev);
10989                 __put_user(st.st_size, &target_st->st_size);
10990                 __put_user(st.st_blksize, &target_st->st_blksize);
10991                 __put_user(st.st_blocks, &target_st->st_blocks);
10992                 __put_user(st.st_atime, &target_st->target_st_atime);
10993                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10994                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10995 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10996                 __put_user(st.st_atim.tv_nsec,
10997                            &target_st->target_st_atime_nsec);
10998                 __put_user(st.st_mtim.tv_nsec,
10999                            &target_st->target_st_mtime_nsec);
11000                 __put_user(st.st_ctim.tv_nsec,
11001                            &target_st->target_st_ctime_nsec);
11002 #endif
11003                 unlock_user_struct(target_st, arg2, 1);
11004             }
11005         }
11006         return ret;
11007 #endif
11008     case TARGET_NR_vhangup:
11009         return get_errno(vhangup());
11010 #ifdef TARGET_NR_syscall
11011     case TARGET_NR_syscall:
11012         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
11013                           arg6, arg7, arg8, 0);
11014 #endif
11015 #if defined(TARGET_NR_wait4)
11016     case TARGET_NR_wait4:
11017         {
11018             int status;
11019             abi_long status_ptr = arg2;
11020             struct rusage rusage, *rusage_ptr;
11021             abi_ulong target_rusage = arg4;
11022             abi_long rusage_err;
11023             if (target_rusage)
11024                 rusage_ptr = &rusage;
11025             else
11026                 rusage_ptr = NULL;
11027             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
11028             if (!is_error(ret)) {
11029                 if (status_ptr && ret) {
11030                     status = host_to_target_waitstatus(status);
11031                     if (put_user_s32(status, status_ptr))
11032                         return -TARGET_EFAULT;
11033                 }
11034                 if (target_rusage) {
11035                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
11036                     if (rusage_err) {
11037                         ret = rusage_err;
11038                     }
11039                 }
11040             }
11041         }
11042         return ret;
11043 #endif
11044 #ifdef TARGET_NR_swapoff
11045     case TARGET_NR_swapoff:
11046         if (!(p = lock_user_string(arg1)))
11047             return -TARGET_EFAULT;
11048         ret = get_errno(swapoff(p));
11049         unlock_user(p, arg1, 0);
11050         return ret;
11051 #endif
11052     case TARGET_NR_sysinfo:
11053         {
11054             struct target_sysinfo *target_value;
11055             struct sysinfo value;
11056             ret = get_errno(sysinfo(&value));
11057             if (!is_error(ret) && arg1)
11058             {
11059                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
11060                     return -TARGET_EFAULT;
11061                 __put_user(value.uptime, &target_value->uptime);
11062                 __put_user(value.loads[0], &target_value->loads[0]);
11063                 __put_user(value.loads[1], &target_value->loads[1]);
11064                 __put_user(value.loads[2], &target_value->loads[2]);
11065                 __put_user(value.totalram, &target_value->totalram);
11066                 __put_user(value.freeram, &target_value->freeram);
11067                 __put_user(value.sharedram, &target_value->sharedram);
11068                 __put_user(value.bufferram, &target_value->bufferram);
11069                 __put_user(value.totalswap, &target_value->totalswap);
11070                 __put_user(value.freeswap, &target_value->freeswap);
11071                 __put_user(value.procs, &target_value->procs);
11072                 __put_user(value.totalhigh, &target_value->totalhigh);
11073                 __put_user(value.freehigh, &target_value->freehigh);
11074                 __put_user(value.mem_unit, &target_value->mem_unit);
11075                 unlock_user_struct(target_value, arg1, 1);
11076             }
11077         }
11078         return ret;
11079 #ifdef TARGET_NR_ipc
11080     case TARGET_NR_ipc:
11081         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
11082 #endif
11083 #ifdef TARGET_NR_semget
11084     case TARGET_NR_semget:
11085         return get_errno(semget(arg1, arg2, arg3));
11086 #endif
11087 #ifdef TARGET_NR_semop
11088     case TARGET_NR_semop:
11089         return do_semtimedop(arg1, arg2, arg3, 0, false);
11090 #endif
11091 #ifdef TARGET_NR_semtimedop
11092     case TARGET_NR_semtimedop:
11093         return do_semtimedop(arg1, arg2, arg3, arg4, false);
11094 #endif
11095 #ifdef TARGET_NR_semtimedop_time64
11096     case TARGET_NR_semtimedop_time64:
11097         return do_semtimedop(arg1, arg2, arg3, arg4, true);
11098 #endif
11099 #ifdef TARGET_NR_semctl
11100     case TARGET_NR_semctl:
11101         return do_semctl(arg1, arg2, arg3, arg4);
11102 #endif
11103 #ifdef TARGET_NR_msgctl
11104     case TARGET_NR_msgctl:
11105         return do_msgctl(arg1, arg2, arg3);
11106 #endif
11107 #ifdef TARGET_NR_msgget
11108     case TARGET_NR_msgget:
11109         return get_errno(msgget(arg1, arg2));
11110 #endif
11111 #ifdef TARGET_NR_msgrcv
11112     case TARGET_NR_msgrcv:
11113         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
11114 #endif
11115 #ifdef TARGET_NR_msgsnd
11116     case TARGET_NR_msgsnd:
11117         return do_msgsnd(arg1, arg2, arg3, arg4);
11118 #endif
11119 #ifdef TARGET_NR_shmget
11120     case TARGET_NR_shmget:
11121         return get_errno(shmget(arg1, arg2, arg3));
11122 #endif
11123 #ifdef TARGET_NR_shmctl
11124     case TARGET_NR_shmctl:
11125         return do_shmctl(arg1, arg2, arg3);
11126 #endif
11127 #ifdef TARGET_NR_shmat
11128     case TARGET_NR_shmat:
11129         return target_shmat(cpu_env, arg1, arg2, arg3);
11130 #endif
11131 #ifdef TARGET_NR_shmdt
11132     case TARGET_NR_shmdt:
11133         return target_shmdt(arg1);
11134 #endif
11135     case TARGET_NR_fsync:
11136         return get_errno(fsync(arg1));
11137     case TARGET_NR_clone:
11138         /* Linux manages to have three different orderings for its
11139          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
11140          * match the kernel's CONFIG_CLONE_* settings.
11141          * Microblaze is further special in that it uses a sixth
11142          * implicit argument to clone for the TLS pointer.
11143          */
11144 #if defined(TARGET_MICROBLAZE)
11145         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
11146 #elif defined(TARGET_CLONE_BACKWARDS)
11147         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
11148 #elif defined(TARGET_CLONE_BACKWARDS2)
11149         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
11150 #else
11151         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
11152 #endif
11153         return ret;
11154 #ifdef __NR_exit_group
11155         /* new thread calls */
11156     case TARGET_NR_exit_group:
11157         preexit_cleanup(cpu_env, arg1);
11158         return get_errno(exit_group(arg1));
11159 #endif
11160     case TARGET_NR_setdomainname:
11161         if (!(p = lock_user_string(arg1)))
11162             return -TARGET_EFAULT;
11163         ret = get_errno(setdomainname(p, arg2));
11164         unlock_user(p, arg1, 0);
11165         return ret;
11166     case TARGET_NR_uname:
11167         /* no need to transcode because we use the linux syscall */
11168         {
11169             struct new_utsname * buf;
11170 
11171             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
11172                 return -TARGET_EFAULT;
11173             ret = get_errno(sys_uname(buf));
11174             if (!is_error(ret)) {
11175                 /* Overwrite the native machine name with whatever is being
11176                    emulated. */
11177                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
11178                           sizeof(buf->machine));
11179                 /* Allow the user to override the reported release.  */
11180                 if (qemu_uname_release && *qemu_uname_release) {
11181                     g_strlcpy(buf->release, qemu_uname_release,
11182                               sizeof(buf->release));
11183                 }
11184             }
11185             unlock_user_struct(buf, arg1, 1);
11186         }
11187         return ret;
11188 #ifdef TARGET_I386
11189     case TARGET_NR_modify_ldt:
11190         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
11191 #if !defined(TARGET_X86_64)
11192     case TARGET_NR_vm86:
11193         return do_vm86(cpu_env, arg1, arg2);
11194 #endif
11195 #endif
11196 #if defined(TARGET_NR_adjtimex)
11197     case TARGET_NR_adjtimex:
11198         {
11199             struct timex host_buf;
11200 
11201             if (target_to_host_timex(&host_buf, arg1) != 0) {
11202                 return -TARGET_EFAULT;
11203             }
11204             ret = get_errno(adjtimex(&host_buf));
11205             if (!is_error(ret)) {
11206                 if (host_to_target_timex(arg1, &host_buf) != 0) {
11207                     return -TARGET_EFAULT;
11208                 }
11209             }
11210         }
11211         return ret;
11212 #endif
11213 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
11214     case TARGET_NR_clock_adjtime:
11215         {
11216             struct timex htx;
11217 
11218             if (target_to_host_timex(&htx, arg2) != 0) {
11219                 return -TARGET_EFAULT;
11220             }
11221             ret = get_errno(clock_adjtime(arg1, &htx));
11222             if (!is_error(ret) && host_to_target_timex(arg2, &htx)) {
11223                 return -TARGET_EFAULT;
11224             }
11225         }
11226         return ret;
11227 #endif
11228 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
11229     case TARGET_NR_clock_adjtime64:
11230         {
11231             struct timex htx;
11232 
11233             if (target_to_host_timex64(&htx, arg2) != 0) {
11234                 return -TARGET_EFAULT;
11235             }
11236             ret = get_errno(clock_adjtime(arg1, &htx));
11237             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
11238                     return -TARGET_EFAULT;
11239             }
11240         }
11241         return ret;
11242 #endif
11243     case TARGET_NR_getpgid:
11244         return get_errno(getpgid(arg1));
11245     case TARGET_NR_fchdir:
11246         return get_errno(fchdir(arg1));
11247     case TARGET_NR_personality:
11248         return get_errno(personality(arg1));
11249 #ifdef TARGET_NR__llseek /* Not on alpha */
11250     case TARGET_NR__llseek:
11251         {
11252             int64_t res;
11253 #if !defined(__NR_llseek)
11254             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
11255             if (res == -1) {
11256                 ret = get_errno(res);
11257             } else {
11258                 ret = 0;
11259             }
11260 #else
11261             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
11262 #endif
11263             if ((ret == 0) && put_user_s64(res, arg4)) {
11264                 return -TARGET_EFAULT;
11265             }
11266         }
11267         return ret;
11268 #endif
11269 #ifdef TARGET_NR_getdents
11270     case TARGET_NR_getdents:
11271         return do_getdents(arg1, arg2, arg3);
11272 #endif /* TARGET_NR_getdents */
11273 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
11274     case TARGET_NR_getdents64:
11275         return do_getdents64(arg1, arg2, arg3);
11276 #endif /* TARGET_NR_getdents64 */
11277 #if defined(TARGET_NR__newselect)
11278     case TARGET_NR__newselect:
11279         return do_select(arg1, arg2, arg3, arg4, arg5);
11280 #endif
11281 #ifdef TARGET_NR_poll
11282     case TARGET_NR_poll:
11283         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
11284 #endif
11285 #ifdef TARGET_NR_ppoll
11286     case TARGET_NR_ppoll:
11287         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
11288 #endif
11289 #ifdef TARGET_NR_ppoll_time64
11290     case TARGET_NR_ppoll_time64:
11291         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
11292 #endif
11293     case TARGET_NR_flock:
11294         /* NOTE: the flock constant seems to be the same for every
11295            Linux platform */
11296         return get_errno(safe_flock(arg1, arg2));
11297     case TARGET_NR_readv:
11298         {
11299             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11300             if (vec != NULL) {
11301                 ret = get_errno(safe_readv(arg1, vec, arg3));
11302                 unlock_iovec(vec, arg2, arg3, 1);
11303             } else {
11304                 ret = -host_to_target_errno(errno);
11305             }
11306         }
11307         return ret;
11308     case TARGET_NR_writev:
11309         {
11310             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11311             if (vec != NULL) {
11312                 ret = get_errno(safe_writev(arg1, vec, arg3));
11313                 unlock_iovec(vec, arg2, arg3, 0);
11314             } else {
11315                 ret = -host_to_target_errno(errno);
11316             }
11317         }
11318         return ret;
11319 #if defined(TARGET_NR_preadv)
11320     case TARGET_NR_preadv:
11321         {
11322             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11323             if (vec != NULL) {
11324                 unsigned long low, high;
11325 
11326                 target_to_host_low_high(arg4, arg5, &low, &high);
11327                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
11328                 unlock_iovec(vec, arg2, arg3, 1);
11329             } else {
11330                 ret = -host_to_target_errno(errno);
11331            }
11332         }
11333         return ret;
11334 #endif
11335 #if defined(TARGET_NR_pwritev)
11336     case TARGET_NR_pwritev:
11337         {
11338             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11339             if (vec != NULL) {
11340                 unsigned long low, high;
11341 
11342                 target_to_host_low_high(arg4, arg5, &low, &high);
11343                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
11344                 unlock_iovec(vec, arg2, arg3, 0);
11345             } else {
11346                 ret = -host_to_target_errno(errno);
11347            }
11348         }
11349         return ret;
11350 #endif
11351     case TARGET_NR_getsid:
11352         return get_errno(getsid(arg1));
11353 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
11354     case TARGET_NR_fdatasync:
11355         return get_errno(fdatasync(arg1));
11356 #endif
11357     case TARGET_NR_sched_getaffinity:
11358         {
11359             unsigned int mask_size;
11360             unsigned long *mask;
11361 
11362             /*
11363              * sched_getaffinity needs multiples of ulong, so need to take
11364              * care of mismatches between target ulong and host ulong sizes.
11365              */
11366             if (arg2 & (sizeof(abi_ulong) - 1)) {
11367                 return -TARGET_EINVAL;
11368             }
11369             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11370 
11371             mask = alloca(mask_size);
11372             memset(mask, 0, mask_size);
11373             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
11374 
11375             if (!is_error(ret)) {
11376                 if (ret > arg2) {
11377                     /* More data returned than the caller's buffer will fit.
11378                      * This only happens if sizeof(abi_long) < sizeof(long)
11379                      * and the caller passed us a buffer holding an odd number
11380                      * of abi_longs. If the host kernel is actually using the
11381                      * extra 4 bytes then fail EINVAL; otherwise we can just
11382                      * ignore them and only copy the interesting part.
11383                      */
11384                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
11385                     if (numcpus > arg2 * 8) {
11386                         return -TARGET_EINVAL;
11387                     }
11388                     ret = arg2;
11389                 }
11390 
11391                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
11392                     return -TARGET_EFAULT;
11393                 }
11394             }
11395         }
11396         return ret;
11397     case TARGET_NR_sched_setaffinity:
11398         {
11399             unsigned int mask_size;
11400             unsigned long *mask;
11401 
11402             /*
11403              * sched_setaffinity needs multiples of ulong, so need to take
11404              * care of mismatches between target ulong and host ulong sizes.
11405              */
11406             if (arg2 & (sizeof(abi_ulong) - 1)) {
11407                 return -TARGET_EINVAL;
11408             }
11409             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11410             mask = alloca(mask_size);
11411 
11412             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
11413             if (ret) {
11414                 return ret;
11415             }
11416 
11417             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
11418         }
11419     case TARGET_NR_getcpu:
11420         {
11421             unsigned cpuid, node;
11422             ret = get_errno(sys_getcpu(arg1 ? &cpuid : NULL,
11423                                        arg2 ? &node : NULL,
11424                                        NULL));
11425             if (is_error(ret)) {
11426                 return ret;
11427             }
11428             if (arg1 && put_user_u32(cpuid, arg1)) {
11429                 return -TARGET_EFAULT;
11430             }
11431             if (arg2 && put_user_u32(node, arg2)) {
11432                 return -TARGET_EFAULT;
11433             }
11434         }
11435         return ret;
11436     case TARGET_NR_sched_setparam:
11437         {
11438             struct target_sched_param *target_schp;
11439             struct sched_param schp;
11440 
11441             if (arg2 == 0) {
11442                 return -TARGET_EINVAL;
11443             }
11444             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
11445                 return -TARGET_EFAULT;
11446             }
11447             schp.sched_priority = tswap32(target_schp->sched_priority);
11448             unlock_user_struct(target_schp, arg2, 0);
11449             return get_errno(sys_sched_setparam(arg1, &schp));
11450         }
11451     case TARGET_NR_sched_getparam:
11452         {
11453             struct target_sched_param *target_schp;
11454             struct sched_param schp;
11455 
11456             if (arg2 == 0) {
11457                 return -TARGET_EINVAL;
11458             }
11459             ret = get_errno(sys_sched_getparam(arg1, &schp));
11460             if (!is_error(ret)) {
11461                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
11462                     return -TARGET_EFAULT;
11463                 }
11464                 target_schp->sched_priority = tswap32(schp.sched_priority);
11465                 unlock_user_struct(target_schp, arg2, 1);
11466             }
11467         }
11468         return ret;
11469     case TARGET_NR_sched_setscheduler:
11470         {
11471             struct target_sched_param *target_schp;
11472             struct sched_param schp;
11473             if (arg3 == 0) {
11474                 return -TARGET_EINVAL;
11475             }
11476             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
11477                 return -TARGET_EFAULT;
11478             }
11479             schp.sched_priority = tswap32(target_schp->sched_priority);
11480             unlock_user_struct(target_schp, arg3, 0);
11481             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
11482         }
11483     case TARGET_NR_sched_getscheduler:
11484         return get_errno(sys_sched_getscheduler(arg1));
11485     case TARGET_NR_sched_getattr:
11486         {
11487             struct target_sched_attr *target_scha;
11488             struct sched_attr scha;
11489             if (arg2 == 0) {
11490                 return -TARGET_EINVAL;
11491             }
11492             if (arg3 > sizeof(scha)) {
11493                 arg3 = sizeof(scha);
11494             }
11495             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
11496             if (!is_error(ret)) {
11497                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11498                 if (!target_scha) {
11499                     return -TARGET_EFAULT;
11500                 }
11501                 target_scha->size = tswap32(scha.size);
11502                 target_scha->sched_policy = tswap32(scha.sched_policy);
11503                 target_scha->sched_flags = tswap64(scha.sched_flags);
11504                 target_scha->sched_nice = tswap32(scha.sched_nice);
11505                 target_scha->sched_priority = tswap32(scha.sched_priority);
11506                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
11507                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
11508                 target_scha->sched_period = tswap64(scha.sched_period);
11509                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
11510                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
11511                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
11512                 }
11513                 unlock_user(target_scha, arg2, arg3);
11514             }
11515             return ret;
11516         }
11517     case TARGET_NR_sched_setattr:
11518         {
11519             struct target_sched_attr *target_scha;
11520             struct sched_attr scha;
11521             uint32_t size;
11522             int zeroed;
11523             if (arg2 == 0) {
11524                 return -TARGET_EINVAL;
11525             }
11526             if (get_user_u32(size, arg2)) {
11527                 return -TARGET_EFAULT;
11528             }
11529             if (!size) {
11530                 size = offsetof(struct target_sched_attr, sched_util_min);
11531             }
11532             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11533                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11534                     return -TARGET_EFAULT;
11535                 }
11536                 return -TARGET_E2BIG;
11537             }
11538 
11539             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11540             if (zeroed < 0) {
11541                 return zeroed;
11542             } else if (zeroed == 0) {
11543                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11544                     return -TARGET_EFAULT;
11545                 }
11546                 return -TARGET_E2BIG;
11547             }
11548             if (size > sizeof(struct target_sched_attr)) {
11549                 size = sizeof(struct target_sched_attr);
11550             }
11551 
11552             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11553             if (!target_scha) {
11554                 return -TARGET_EFAULT;
11555             }
11556             scha.size = size;
11557             scha.sched_policy = tswap32(target_scha->sched_policy);
11558             scha.sched_flags = tswap64(target_scha->sched_flags);
11559             scha.sched_nice = tswap32(target_scha->sched_nice);
11560             scha.sched_priority = tswap32(target_scha->sched_priority);
11561             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11562             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11563             scha.sched_period = tswap64(target_scha->sched_period);
11564             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11565                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11566                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11567             }
11568             unlock_user(target_scha, arg2, 0);
11569             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11570         }
11571     case TARGET_NR_sched_yield:
11572         return get_errno(sched_yield());
11573     case TARGET_NR_sched_get_priority_max:
11574         return get_errno(sched_get_priority_max(arg1));
11575     case TARGET_NR_sched_get_priority_min:
11576         return get_errno(sched_get_priority_min(arg1));
11577 #ifdef TARGET_NR_sched_rr_get_interval
11578     case TARGET_NR_sched_rr_get_interval:
11579         {
11580             struct timespec ts;
11581             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11582             if (!is_error(ret)) {
11583                 ret = host_to_target_timespec(arg2, &ts);
11584             }
11585         }
11586         return ret;
11587 #endif
11588 #ifdef TARGET_NR_sched_rr_get_interval_time64
11589     case TARGET_NR_sched_rr_get_interval_time64:
11590         {
11591             struct timespec ts;
11592             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11593             if (!is_error(ret)) {
11594                 ret = host_to_target_timespec64(arg2, &ts);
11595             }
11596         }
11597         return ret;
11598 #endif
11599 #if defined(TARGET_NR_nanosleep)
11600     case TARGET_NR_nanosleep:
11601         {
11602             struct timespec req, rem;
11603             target_to_host_timespec(&req, arg1);
11604             ret = get_errno(safe_nanosleep(&req, &rem));
11605             if (is_error(ret) && arg2) {
11606                 host_to_target_timespec(arg2, &rem);
11607             }
11608         }
11609         return ret;
11610 #endif
11611     case TARGET_NR_prctl:
11612         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11613         break;
11614 #ifdef TARGET_NR_arch_prctl
11615     case TARGET_NR_arch_prctl:
11616         return do_arch_prctl(cpu_env, arg1, arg2);
11617 #endif
11618 #ifdef TARGET_NR_pread64
11619     case TARGET_NR_pread64:
11620         if (regpairs_aligned(cpu_env, num)) {
11621             arg4 = arg5;
11622             arg5 = arg6;
11623         }
11624         if (arg2 == 0 && arg3 == 0) {
11625             /* Special-case NULL buffer and zero length, which should succeed */
11626             p = 0;
11627         } else {
11628             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11629             if (!p) {
11630                 return -TARGET_EFAULT;
11631             }
11632         }
11633         ret = get_errno(pread(arg1, p, arg3, target_offset64(arg4, arg5)));
11634         unlock_user(p, arg2, ret);
11635         return ret;
11636     case TARGET_NR_pwrite64:
11637         if (regpairs_aligned(cpu_env, num)) {
11638             arg4 = arg5;
11639             arg5 = arg6;
11640         }
11641         if (arg2 == 0 && arg3 == 0) {
11642             /* Special-case NULL buffer and zero length, which should succeed */
11643             p = 0;
11644         } else {
11645             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11646             if (!p) {
11647                 return -TARGET_EFAULT;
11648             }
11649         }
11650         ret = get_errno(pwrite(arg1, p, arg3, target_offset64(arg4, arg5)));
11651         unlock_user(p, arg2, 0);
11652         return ret;
11653 #endif
11654     case TARGET_NR_getcwd:
11655         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11656             return -TARGET_EFAULT;
11657         ret = get_errno(sys_getcwd1(p, arg2));
11658         unlock_user(p, arg1, ret);
11659         return ret;
11660     case TARGET_NR_capget:
11661     case TARGET_NR_capset:
11662     {
11663         struct target_user_cap_header *target_header;
11664         struct target_user_cap_data *target_data = NULL;
11665         struct __user_cap_header_struct header;
11666         struct __user_cap_data_struct data[2];
11667         struct __user_cap_data_struct *dataptr = NULL;
11668         int i, target_datalen;
11669         int data_items = 1;
11670 
11671         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11672             return -TARGET_EFAULT;
11673         }
11674         header.version = tswap32(target_header->version);
11675         header.pid = tswap32(target_header->pid);
11676 
11677         if (header.version != _LINUX_CAPABILITY_VERSION) {
11678             /* Version 2 and up takes pointer to two user_data structs */
11679             data_items = 2;
11680         }
11681 
11682         target_datalen = sizeof(*target_data) * data_items;
11683 
11684         if (arg2) {
11685             if (num == TARGET_NR_capget) {
11686                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11687             } else {
11688                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11689             }
11690             if (!target_data) {
11691                 unlock_user_struct(target_header, arg1, 0);
11692                 return -TARGET_EFAULT;
11693             }
11694 
11695             if (num == TARGET_NR_capset) {
11696                 for (i = 0; i < data_items; i++) {
11697                     data[i].effective = tswap32(target_data[i].effective);
11698                     data[i].permitted = tswap32(target_data[i].permitted);
11699                     data[i].inheritable = tswap32(target_data[i].inheritable);
11700                 }
11701             }
11702 
11703             dataptr = data;
11704         }
11705 
11706         if (num == TARGET_NR_capget) {
11707             ret = get_errno(capget(&header, dataptr));
11708         } else {
11709             ret = get_errno(capset(&header, dataptr));
11710         }
11711 
11712         /* The kernel always updates version for both capget and capset */
11713         target_header->version = tswap32(header.version);
11714         unlock_user_struct(target_header, arg1, 1);
11715 
11716         if (arg2) {
11717             if (num == TARGET_NR_capget) {
11718                 for (i = 0; i < data_items; i++) {
11719                     target_data[i].effective = tswap32(data[i].effective);
11720                     target_data[i].permitted = tswap32(data[i].permitted);
11721                     target_data[i].inheritable = tswap32(data[i].inheritable);
11722                 }
11723                 unlock_user(target_data, arg2, target_datalen);
11724             } else {
11725                 unlock_user(target_data, arg2, 0);
11726             }
11727         }
11728         return ret;
11729     }
11730     case TARGET_NR_sigaltstack:
11731         return do_sigaltstack(arg1, arg2, cpu_env);
11732 
11733 #ifdef CONFIG_SENDFILE
11734 #ifdef TARGET_NR_sendfile
11735     case TARGET_NR_sendfile:
11736     {
11737         off_t *offp = NULL;
11738         off_t off;
11739         if (arg3) {
11740             ret = get_user_sal(off, arg3);
11741             if (is_error(ret)) {
11742                 return ret;
11743             }
11744             offp = &off;
11745         }
11746         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11747         if (!is_error(ret) && arg3) {
11748             abi_long ret2 = put_user_sal(off, arg3);
11749             if (is_error(ret2)) {
11750                 ret = ret2;
11751             }
11752         }
11753         return ret;
11754     }
11755 #endif
11756 #ifdef TARGET_NR_sendfile64
11757     case TARGET_NR_sendfile64:
11758     {
11759         off_t *offp = NULL;
11760         off_t off;
11761         if (arg3) {
11762             ret = get_user_s64(off, arg3);
11763             if (is_error(ret)) {
11764                 return ret;
11765             }
11766             offp = &off;
11767         }
11768         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11769         if (!is_error(ret) && arg3) {
11770             abi_long ret2 = put_user_s64(off, arg3);
11771             if (is_error(ret2)) {
11772                 ret = ret2;
11773             }
11774         }
11775         return ret;
11776     }
11777 #endif
11778 #endif
11779 #ifdef TARGET_NR_vfork
11780     case TARGET_NR_vfork:
11781         return get_errno(do_fork(cpu_env,
11782                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11783                          0, 0, 0, 0));
11784 #endif
11785 #ifdef TARGET_NR_ugetrlimit
11786     case TARGET_NR_ugetrlimit:
11787     {
11788 	struct rlimit rlim;
11789 	int resource = target_to_host_resource(arg1);
11790 	ret = get_errno(getrlimit(resource, &rlim));
11791 	if (!is_error(ret)) {
11792 	    struct target_rlimit *target_rlim;
11793             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11794                 return -TARGET_EFAULT;
11795 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11796 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11797             unlock_user_struct(target_rlim, arg2, 1);
11798 	}
11799         return ret;
11800     }
11801 #endif
11802 #ifdef TARGET_NR_truncate64
11803     case TARGET_NR_truncate64:
11804         if (!(p = lock_user_string(arg1)))
11805             return -TARGET_EFAULT;
11806 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11807         unlock_user(p, arg1, 0);
11808         return ret;
11809 #endif
11810 #ifdef TARGET_NR_ftruncate64
11811     case TARGET_NR_ftruncate64:
11812         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11813 #endif
11814 #ifdef TARGET_NR_stat64
11815     case TARGET_NR_stat64:
11816         if (!(p = lock_user_string(arg1))) {
11817             return -TARGET_EFAULT;
11818         }
11819         ret = get_errno(stat(path(p), &st));
11820         unlock_user(p, arg1, 0);
11821         if (!is_error(ret))
11822             ret = host_to_target_stat64(cpu_env, arg2, &st);
11823         return ret;
11824 #endif
11825 #ifdef TARGET_NR_lstat64
11826     case TARGET_NR_lstat64:
11827         if (!(p = lock_user_string(arg1))) {
11828             return -TARGET_EFAULT;
11829         }
11830         ret = get_errno(lstat(path(p), &st));
11831         unlock_user(p, arg1, 0);
11832         if (!is_error(ret))
11833             ret = host_to_target_stat64(cpu_env, arg2, &st);
11834         return ret;
11835 #endif
11836 #ifdef TARGET_NR_fstat64
11837     case TARGET_NR_fstat64:
11838         ret = get_errno(fstat(arg1, &st));
11839         if (!is_error(ret))
11840             ret = host_to_target_stat64(cpu_env, arg2, &st);
11841         return ret;
11842 #endif
11843 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11844 #ifdef TARGET_NR_fstatat64
11845     case TARGET_NR_fstatat64:
11846 #endif
11847 #ifdef TARGET_NR_newfstatat
11848     case TARGET_NR_newfstatat:
11849 #endif
11850         if (!(p = lock_user_string(arg2))) {
11851             return -TARGET_EFAULT;
11852         }
11853         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11854         unlock_user(p, arg2, 0);
11855         if (!is_error(ret))
11856             ret = host_to_target_stat64(cpu_env, arg3, &st);
11857         return ret;
11858 #endif
11859 #if defined(TARGET_NR_statx)
11860     case TARGET_NR_statx:
11861         {
11862             struct target_statx *target_stx;
11863             int dirfd = arg1;
11864             int flags = arg3;
11865 
11866             p = lock_user_string(arg2);
11867             if (p == NULL) {
11868                 return -TARGET_EFAULT;
11869             }
11870 #if defined(__NR_statx)
11871             {
11872                 /*
11873                  * It is assumed that struct statx is architecture independent.
11874                  */
11875                 struct target_statx host_stx;
11876                 int mask = arg4;
11877 
11878                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11879                 if (!is_error(ret)) {
11880                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11881                         unlock_user(p, arg2, 0);
11882                         return -TARGET_EFAULT;
11883                     }
11884                 }
11885 
11886                 if (ret != -TARGET_ENOSYS) {
11887                     unlock_user(p, arg2, 0);
11888                     return ret;
11889                 }
11890             }
11891 #endif
11892             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11893             unlock_user(p, arg2, 0);
11894 
11895             if (!is_error(ret)) {
11896                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11897                     return -TARGET_EFAULT;
11898                 }
11899                 memset(target_stx, 0, sizeof(*target_stx));
11900                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11901                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11902                 __put_user(st.st_ino, &target_stx->stx_ino);
11903                 __put_user(st.st_mode, &target_stx->stx_mode);
11904                 __put_user(st.st_uid, &target_stx->stx_uid);
11905                 __put_user(st.st_gid, &target_stx->stx_gid);
11906                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11907                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11908                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11909                 __put_user(st.st_size, &target_stx->stx_size);
11910                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11911                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11912                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11913                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11914                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11915                 unlock_user_struct(target_stx, arg5, 1);
11916             }
11917         }
11918         return ret;
11919 #endif
11920 #ifdef TARGET_NR_lchown
11921     case TARGET_NR_lchown:
11922         if (!(p = lock_user_string(arg1)))
11923             return -TARGET_EFAULT;
11924         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11925         unlock_user(p, arg1, 0);
11926         return ret;
11927 #endif
11928 #ifdef TARGET_NR_getuid
11929     case TARGET_NR_getuid:
11930         return get_errno(high2lowuid(getuid()));
11931 #endif
11932 #ifdef TARGET_NR_getgid
11933     case TARGET_NR_getgid:
11934         return get_errno(high2lowgid(getgid()));
11935 #endif
11936 #ifdef TARGET_NR_geteuid
11937     case TARGET_NR_geteuid:
11938         return get_errno(high2lowuid(geteuid()));
11939 #endif
11940 #ifdef TARGET_NR_getegid
11941     case TARGET_NR_getegid:
11942         return get_errno(high2lowgid(getegid()));
11943 #endif
11944     case TARGET_NR_setreuid:
11945         return get_errno(sys_setreuid(low2highuid(arg1), low2highuid(arg2)));
11946     case TARGET_NR_setregid:
11947         return get_errno(sys_setregid(low2highgid(arg1), low2highgid(arg2)));
11948     case TARGET_NR_getgroups:
11949         { /* the same code as for TARGET_NR_getgroups32 */
11950             int gidsetsize = arg1;
11951             target_id *target_grouplist;
11952             g_autofree gid_t *grouplist = NULL;
11953             int i;
11954 
11955             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11956                 return -TARGET_EINVAL;
11957             }
11958             if (gidsetsize > 0) {
11959                 grouplist = g_try_new(gid_t, gidsetsize);
11960                 if (!grouplist) {
11961                     return -TARGET_ENOMEM;
11962                 }
11963             }
11964             ret = get_errno(getgroups(gidsetsize, grouplist));
11965             if (!is_error(ret) && gidsetsize > 0) {
11966                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
11967                                              gidsetsize * sizeof(target_id), 0);
11968                 if (!target_grouplist) {
11969                     return -TARGET_EFAULT;
11970                 }
11971                 for (i = 0; i < ret; i++) {
11972                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11973                 }
11974                 unlock_user(target_grouplist, arg2,
11975                             gidsetsize * sizeof(target_id));
11976             }
11977             return ret;
11978         }
11979     case TARGET_NR_setgroups:
11980         { /* the same code as for TARGET_NR_setgroups32 */
11981             int gidsetsize = arg1;
11982             target_id *target_grouplist;
11983             g_autofree gid_t *grouplist = NULL;
11984             int i;
11985 
11986             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11987                 return -TARGET_EINVAL;
11988             }
11989             if (gidsetsize > 0) {
11990                 grouplist = g_try_new(gid_t, gidsetsize);
11991                 if (!grouplist) {
11992                     return -TARGET_ENOMEM;
11993                 }
11994                 target_grouplist = lock_user(VERIFY_READ, arg2,
11995                                              gidsetsize * sizeof(target_id), 1);
11996                 if (!target_grouplist) {
11997                     return -TARGET_EFAULT;
11998                 }
11999                 for (i = 0; i < gidsetsize; i++) {
12000                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
12001                 }
12002                 unlock_user(target_grouplist, arg2,
12003                             gidsetsize * sizeof(target_id));
12004             }
12005             return get_errno(sys_setgroups(gidsetsize, grouplist));
12006         }
12007     case TARGET_NR_fchown:
12008         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
12009 #if defined(TARGET_NR_fchownat)
12010     case TARGET_NR_fchownat:
12011         if (!(p = lock_user_string(arg2)))
12012             return -TARGET_EFAULT;
12013         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
12014                                  low2highgid(arg4), arg5));
12015         unlock_user(p, arg2, 0);
12016         return ret;
12017 #endif
12018 #ifdef TARGET_NR_setresuid
12019     case TARGET_NR_setresuid:
12020         return get_errno(sys_setresuid(low2highuid(arg1),
12021                                        low2highuid(arg2),
12022                                        low2highuid(arg3)));
12023 #endif
12024 #ifdef TARGET_NR_getresuid
12025     case TARGET_NR_getresuid:
12026         {
12027             uid_t ruid, euid, suid;
12028             ret = get_errno(getresuid(&ruid, &euid, &suid));
12029             if (!is_error(ret)) {
12030                 if (put_user_id(high2lowuid(ruid), arg1)
12031                     || put_user_id(high2lowuid(euid), arg2)
12032                     || put_user_id(high2lowuid(suid), arg3))
12033                     return -TARGET_EFAULT;
12034             }
12035         }
12036         return ret;
12037 #endif
12038 #ifdef TARGET_NR_getresgid
12039     case TARGET_NR_setresgid:
12040         return get_errno(sys_setresgid(low2highgid(arg1),
12041                                        low2highgid(arg2),
12042                                        low2highgid(arg3)));
12043 #endif
12044 #ifdef TARGET_NR_getresgid
12045     case TARGET_NR_getresgid:
12046         {
12047             gid_t rgid, egid, sgid;
12048             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12049             if (!is_error(ret)) {
12050                 if (put_user_id(high2lowgid(rgid), arg1)
12051                     || put_user_id(high2lowgid(egid), arg2)
12052                     || put_user_id(high2lowgid(sgid), arg3))
12053                     return -TARGET_EFAULT;
12054             }
12055         }
12056         return ret;
12057 #endif
12058 #ifdef TARGET_NR_chown
12059     case TARGET_NR_chown:
12060         if (!(p = lock_user_string(arg1)))
12061             return -TARGET_EFAULT;
12062         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
12063         unlock_user(p, arg1, 0);
12064         return ret;
12065 #endif
12066     case TARGET_NR_setuid:
12067         return get_errno(sys_setuid(low2highuid(arg1)));
12068     case TARGET_NR_setgid:
12069         return get_errno(sys_setgid(low2highgid(arg1)));
12070     case TARGET_NR_setfsuid:
12071         return get_errno(setfsuid(arg1));
12072     case TARGET_NR_setfsgid:
12073         return get_errno(setfsgid(arg1));
12074 
12075 #ifdef TARGET_NR_lchown32
12076     case TARGET_NR_lchown32:
12077         if (!(p = lock_user_string(arg1)))
12078             return -TARGET_EFAULT;
12079         ret = get_errno(lchown(p, arg2, arg3));
12080         unlock_user(p, arg1, 0);
12081         return ret;
12082 #endif
12083 #ifdef TARGET_NR_getuid32
12084     case TARGET_NR_getuid32:
12085         return get_errno(getuid());
12086 #endif
12087 
12088 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
12089    /* Alpha specific */
12090     case TARGET_NR_getxuid:
12091          {
12092             uid_t euid;
12093             euid=geteuid();
12094             cpu_env->ir[IR_A4]=euid;
12095          }
12096         return get_errno(getuid());
12097 #endif
12098 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
12099    /* Alpha specific */
12100     case TARGET_NR_getxgid:
12101          {
12102             uid_t egid;
12103             egid=getegid();
12104             cpu_env->ir[IR_A4]=egid;
12105          }
12106         return get_errno(getgid());
12107 #endif
12108 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
12109     /* Alpha specific */
12110     case TARGET_NR_osf_getsysinfo:
12111         ret = -TARGET_EOPNOTSUPP;
12112         switch (arg1) {
12113           case TARGET_GSI_IEEE_FP_CONTROL:
12114             {
12115                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
12116                 uint64_t swcr = cpu_env->swcr;
12117 
12118                 swcr &= ~SWCR_STATUS_MASK;
12119                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
12120 
12121                 if (put_user_u64 (swcr, arg2))
12122                         return -TARGET_EFAULT;
12123                 ret = 0;
12124             }
12125             break;
12126 
12127           /* case GSI_IEEE_STATE_AT_SIGNAL:
12128              -- Not implemented in linux kernel.
12129              case GSI_UACPROC:
12130              -- Retrieves current unaligned access state; not much used.
12131              case GSI_PROC_TYPE:
12132              -- Retrieves implver information; surely not used.
12133              case GSI_GET_HWRPB:
12134              -- Grabs a copy of the HWRPB; surely not used.
12135           */
12136         }
12137         return ret;
12138 #endif
12139 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
12140     /* Alpha specific */
12141     case TARGET_NR_osf_setsysinfo:
12142         ret = -TARGET_EOPNOTSUPP;
12143         switch (arg1) {
12144           case TARGET_SSI_IEEE_FP_CONTROL:
12145             {
12146                 uint64_t swcr, fpcr;
12147 
12148                 if (get_user_u64 (swcr, arg2)) {
12149                     return -TARGET_EFAULT;
12150                 }
12151 
12152                 /*
12153                  * The kernel calls swcr_update_status to update the
12154                  * status bits from the fpcr at every point that it
12155                  * could be queried.  Therefore, we store the status
12156                  * bits only in FPCR.
12157                  */
12158                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
12159 
12160                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12161                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
12162                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
12163                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12164                 ret = 0;
12165             }
12166             break;
12167 
12168           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
12169             {
12170                 uint64_t exc, fpcr, fex;
12171 
12172                 if (get_user_u64(exc, arg2)) {
12173                     return -TARGET_EFAULT;
12174                 }
12175                 exc &= SWCR_STATUS_MASK;
12176                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12177 
12178                 /* Old exceptions are not signaled.  */
12179                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
12180                 fex = exc & ~fex;
12181                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
12182                 fex &= (cpu_env)->swcr;
12183 
12184                 /* Update the hardware fpcr.  */
12185                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
12186                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12187 
12188                 if (fex) {
12189                     int si_code = TARGET_FPE_FLTUNK;
12190                     target_siginfo_t info;
12191 
12192                     if (fex & SWCR_TRAP_ENABLE_DNO) {
12193                         si_code = TARGET_FPE_FLTUND;
12194                     }
12195                     if (fex & SWCR_TRAP_ENABLE_INE) {
12196                         si_code = TARGET_FPE_FLTRES;
12197                     }
12198                     if (fex & SWCR_TRAP_ENABLE_UNF) {
12199                         si_code = TARGET_FPE_FLTUND;
12200                     }
12201                     if (fex & SWCR_TRAP_ENABLE_OVF) {
12202                         si_code = TARGET_FPE_FLTOVF;
12203                     }
12204                     if (fex & SWCR_TRAP_ENABLE_DZE) {
12205                         si_code = TARGET_FPE_FLTDIV;
12206                     }
12207                     if (fex & SWCR_TRAP_ENABLE_INV) {
12208                         si_code = TARGET_FPE_FLTINV;
12209                     }
12210 
12211                     info.si_signo = SIGFPE;
12212                     info.si_errno = 0;
12213                     info.si_code = si_code;
12214                     info._sifields._sigfault._addr = (cpu_env)->pc;
12215                     queue_signal(cpu_env, info.si_signo,
12216                                  QEMU_SI_FAULT, &info);
12217                 }
12218                 ret = 0;
12219             }
12220             break;
12221 
12222           /* case SSI_NVPAIRS:
12223              -- Used with SSIN_UACPROC to enable unaligned accesses.
12224              case SSI_IEEE_STATE_AT_SIGNAL:
12225              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
12226              -- Not implemented in linux kernel
12227           */
12228         }
12229         return ret;
12230 #endif
12231 #ifdef TARGET_NR_osf_sigprocmask
12232     /* Alpha specific.  */
12233     case TARGET_NR_osf_sigprocmask:
12234         {
12235             abi_ulong mask;
12236             int how;
12237             sigset_t set, oldset;
12238 
12239             switch(arg1) {
12240             case TARGET_SIG_BLOCK:
12241                 how = SIG_BLOCK;
12242                 break;
12243             case TARGET_SIG_UNBLOCK:
12244                 how = SIG_UNBLOCK;
12245                 break;
12246             case TARGET_SIG_SETMASK:
12247                 how = SIG_SETMASK;
12248                 break;
12249             default:
12250                 return -TARGET_EINVAL;
12251             }
12252             mask = arg2;
12253             target_to_host_old_sigset(&set, &mask);
12254             ret = do_sigprocmask(how, &set, &oldset);
12255             if (!ret) {
12256                 host_to_target_old_sigset(&mask, &oldset);
12257                 ret = mask;
12258             }
12259         }
12260         return ret;
12261 #endif
12262 
12263 #ifdef TARGET_NR_getgid32
12264     case TARGET_NR_getgid32:
12265         return get_errno(getgid());
12266 #endif
12267 #ifdef TARGET_NR_geteuid32
12268     case TARGET_NR_geteuid32:
12269         return get_errno(geteuid());
12270 #endif
12271 #ifdef TARGET_NR_getegid32
12272     case TARGET_NR_getegid32:
12273         return get_errno(getegid());
12274 #endif
12275 #ifdef TARGET_NR_setreuid32
12276     case TARGET_NR_setreuid32:
12277         return get_errno(sys_setreuid(arg1, arg2));
12278 #endif
12279 #ifdef TARGET_NR_setregid32
12280     case TARGET_NR_setregid32:
12281         return get_errno(sys_setregid(arg1, arg2));
12282 #endif
12283 #ifdef TARGET_NR_getgroups32
12284     case TARGET_NR_getgroups32:
12285         { /* the same code as for TARGET_NR_getgroups */
12286             int gidsetsize = arg1;
12287             uint32_t *target_grouplist;
12288             g_autofree gid_t *grouplist = NULL;
12289             int i;
12290 
12291             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12292                 return -TARGET_EINVAL;
12293             }
12294             if (gidsetsize > 0) {
12295                 grouplist = g_try_new(gid_t, gidsetsize);
12296                 if (!grouplist) {
12297                     return -TARGET_ENOMEM;
12298                 }
12299             }
12300             ret = get_errno(getgroups(gidsetsize, grouplist));
12301             if (!is_error(ret) && gidsetsize > 0) {
12302                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
12303                                              gidsetsize * 4, 0);
12304                 if (!target_grouplist) {
12305                     return -TARGET_EFAULT;
12306                 }
12307                 for (i = 0; i < ret; i++) {
12308                     target_grouplist[i] = tswap32(grouplist[i]);
12309                 }
12310                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
12311             }
12312             return ret;
12313         }
12314 #endif
12315 #ifdef TARGET_NR_setgroups32
12316     case TARGET_NR_setgroups32:
12317         { /* the same code as for TARGET_NR_setgroups */
12318             int gidsetsize = arg1;
12319             uint32_t *target_grouplist;
12320             g_autofree gid_t *grouplist = NULL;
12321             int i;
12322 
12323             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12324                 return -TARGET_EINVAL;
12325             }
12326             if (gidsetsize > 0) {
12327                 grouplist = g_try_new(gid_t, gidsetsize);
12328                 if (!grouplist) {
12329                     return -TARGET_ENOMEM;
12330                 }
12331                 target_grouplist = lock_user(VERIFY_READ, arg2,
12332                                              gidsetsize * 4, 1);
12333                 if (!target_grouplist) {
12334                     return -TARGET_EFAULT;
12335                 }
12336                 for (i = 0; i < gidsetsize; i++) {
12337                     grouplist[i] = tswap32(target_grouplist[i]);
12338                 }
12339                 unlock_user(target_grouplist, arg2, 0);
12340             }
12341             return get_errno(sys_setgroups(gidsetsize, grouplist));
12342         }
12343 #endif
12344 #ifdef TARGET_NR_fchown32
12345     case TARGET_NR_fchown32:
12346         return get_errno(fchown(arg1, arg2, arg3));
12347 #endif
12348 #ifdef TARGET_NR_setresuid32
12349     case TARGET_NR_setresuid32:
12350         return get_errno(sys_setresuid(arg1, arg2, arg3));
12351 #endif
12352 #ifdef TARGET_NR_getresuid32
12353     case TARGET_NR_getresuid32:
12354         {
12355             uid_t ruid, euid, suid;
12356             ret = get_errno(getresuid(&ruid, &euid, &suid));
12357             if (!is_error(ret)) {
12358                 if (put_user_u32(ruid, arg1)
12359                     || put_user_u32(euid, arg2)
12360                     || put_user_u32(suid, arg3))
12361                     return -TARGET_EFAULT;
12362             }
12363         }
12364         return ret;
12365 #endif
12366 #ifdef TARGET_NR_setresgid32
12367     case TARGET_NR_setresgid32:
12368         return get_errno(sys_setresgid(arg1, arg2, arg3));
12369 #endif
12370 #ifdef TARGET_NR_getresgid32
12371     case TARGET_NR_getresgid32:
12372         {
12373             gid_t rgid, egid, sgid;
12374             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12375             if (!is_error(ret)) {
12376                 if (put_user_u32(rgid, arg1)
12377                     || put_user_u32(egid, arg2)
12378                     || put_user_u32(sgid, arg3))
12379                     return -TARGET_EFAULT;
12380             }
12381         }
12382         return ret;
12383 #endif
12384 #ifdef TARGET_NR_chown32
12385     case TARGET_NR_chown32:
12386         if (!(p = lock_user_string(arg1)))
12387             return -TARGET_EFAULT;
12388         ret = get_errno(chown(p, arg2, arg3));
12389         unlock_user(p, arg1, 0);
12390         return ret;
12391 #endif
12392 #ifdef TARGET_NR_setuid32
12393     case TARGET_NR_setuid32:
12394         return get_errno(sys_setuid(arg1));
12395 #endif
12396 #ifdef TARGET_NR_setgid32
12397     case TARGET_NR_setgid32:
12398         return get_errno(sys_setgid(arg1));
12399 #endif
12400 #ifdef TARGET_NR_setfsuid32
12401     case TARGET_NR_setfsuid32:
12402         return get_errno(setfsuid(arg1));
12403 #endif
12404 #ifdef TARGET_NR_setfsgid32
12405     case TARGET_NR_setfsgid32:
12406         return get_errno(setfsgid(arg1));
12407 #endif
12408 #ifdef TARGET_NR_mincore
12409     case TARGET_NR_mincore:
12410         {
12411             void *a = lock_user(VERIFY_NONE, arg1, arg2, 0);
12412             if (!a) {
12413                 return -TARGET_ENOMEM;
12414             }
12415             p = lock_user_string(arg3);
12416             if (!p) {
12417                 ret = -TARGET_EFAULT;
12418             } else {
12419                 ret = get_errno(mincore(a, arg2, p));
12420                 unlock_user(p, arg3, ret);
12421             }
12422             unlock_user(a, arg1, 0);
12423         }
12424         return ret;
12425 #endif
12426 #ifdef TARGET_NR_arm_fadvise64_64
12427     case TARGET_NR_arm_fadvise64_64:
12428         /* arm_fadvise64_64 looks like fadvise64_64 but
12429          * with different argument order: fd, advice, offset, len
12430          * rather than the usual fd, offset, len, advice.
12431          * Note that offset and len are both 64-bit so appear as
12432          * pairs of 32-bit registers.
12433          */
12434         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
12435                             target_offset64(arg5, arg6), arg2);
12436         return -host_to_target_errno(ret);
12437 #endif
12438 
12439 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12440 
12441 #ifdef TARGET_NR_fadvise64_64
12442     case TARGET_NR_fadvise64_64:
12443 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
12444         /* 6 args: fd, advice, offset (high, low), len (high, low) */
12445         ret = arg2;
12446         arg2 = arg3;
12447         arg3 = arg4;
12448         arg4 = arg5;
12449         arg5 = arg6;
12450         arg6 = ret;
12451 #else
12452         /* 6 args: fd, offset (high, low), len (high, low), advice */
12453         if (regpairs_aligned(cpu_env, num)) {
12454             /* offset is in (3,4), len in (5,6) and advice in 7 */
12455             arg2 = arg3;
12456             arg3 = arg4;
12457             arg4 = arg5;
12458             arg5 = arg6;
12459             arg6 = arg7;
12460         }
12461 #endif
12462         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
12463                             target_offset64(arg4, arg5), arg6);
12464         return -host_to_target_errno(ret);
12465 #endif
12466 
12467 #ifdef TARGET_NR_fadvise64
12468     case TARGET_NR_fadvise64:
12469         /* 5 args: fd, offset (high, low), len, advice */
12470         if (regpairs_aligned(cpu_env, num)) {
12471             /* offset is in (3,4), len in 5 and advice in 6 */
12472             arg2 = arg3;
12473             arg3 = arg4;
12474             arg4 = arg5;
12475             arg5 = arg6;
12476         }
12477         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
12478         return -host_to_target_errno(ret);
12479 #endif
12480 
12481 #else /* not a 32-bit ABI */
12482 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
12483 #ifdef TARGET_NR_fadvise64_64
12484     case TARGET_NR_fadvise64_64:
12485 #endif
12486 #ifdef TARGET_NR_fadvise64
12487     case TARGET_NR_fadvise64:
12488 #endif
12489 #ifdef TARGET_S390X
12490         switch (arg4) {
12491         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
12492         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
12493         case 6: arg4 = POSIX_FADV_DONTNEED; break;
12494         case 7: arg4 = POSIX_FADV_NOREUSE; break;
12495         default: break;
12496         }
12497 #endif
12498         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
12499 #endif
12500 #endif /* end of 64-bit ABI fadvise handling */
12501 
12502 #ifdef TARGET_NR_madvise
12503     case TARGET_NR_madvise:
12504         return target_madvise(arg1, arg2, arg3);
12505 #endif
12506 #ifdef TARGET_NR_fcntl64
12507     case TARGET_NR_fcntl64:
12508     {
12509         int cmd;
12510         struct flock fl;
12511         from_flock64_fn *copyfrom = copy_from_user_flock64;
12512         to_flock64_fn *copyto = copy_to_user_flock64;
12513 
12514 #ifdef TARGET_ARM
12515         if (!cpu_env->eabi) {
12516             copyfrom = copy_from_user_oabi_flock64;
12517             copyto = copy_to_user_oabi_flock64;
12518         }
12519 #endif
12520 
12521         cmd = target_to_host_fcntl_cmd(arg2);
12522         if (cmd == -TARGET_EINVAL) {
12523             return cmd;
12524         }
12525 
12526         switch(arg2) {
12527         case TARGET_F_GETLK64:
12528             ret = copyfrom(&fl, arg3);
12529             if (ret) {
12530                 break;
12531             }
12532             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12533             if (ret == 0) {
12534                 ret = copyto(arg3, &fl);
12535             }
12536 	    break;
12537 
12538         case TARGET_F_SETLK64:
12539         case TARGET_F_SETLKW64:
12540             ret = copyfrom(&fl, arg3);
12541             if (ret) {
12542                 break;
12543             }
12544             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12545 	    break;
12546         default:
12547             ret = do_fcntl(arg1, arg2, arg3);
12548             break;
12549         }
12550         return ret;
12551     }
12552 #endif
12553 #ifdef TARGET_NR_cacheflush
12554     case TARGET_NR_cacheflush:
12555         /* self-modifying code is handled automatically, so nothing needed */
12556         return 0;
12557 #endif
12558 #ifdef TARGET_NR_getpagesize
12559     case TARGET_NR_getpagesize:
12560         return TARGET_PAGE_SIZE;
12561 #endif
12562     case TARGET_NR_gettid:
12563         return get_errno(sys_gettid());
12564 #ifdef TARGET_NR_readahead
12565     case TARGET_NR_readahead:
12566 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12567         if (regpairs_aligned(cpu_env, num)) {
12568             arg2 = arg3;
12569             arg3 = arg4;
12570             arg4 = arg5;
12571         }
12572         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12573 #else
12574         ret = get_errno(readahead(arg1, arg2, arg3));
12575 #endif
12576         return ret;
12577 #endif
12578 #ifdef CONFIG_ATTR
12579 #ifdef TARGET_NR_setxattr
12580     case TARGET_NR_listxattr:
12581     case TARGET_NR_llistxattr:
12582     {
12583         void *b = 0;
12584         if (arg2) {
12585             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12586             if (!b) {
12587                 return -TARGET_EFAULT;
12588             }
12589         }
12590         p = lock_user_string(arg1);
12591         if (p) {
12592             if (num == TARGET_NR_listxattr) {
12593                 ret = get_errno(listxattr(p, b, arg3));
12594             } else {
12595                 ret = get_errno(llistxattr(p, b, arg3));
12596             }
12597         } else {
12598             ret = -TARGET_EFAULT;
12599         }
12600         unlock_user(p, arg1, 0);
12601         unlock_user(b, arg2, arg3);
12602         return ret;
12603     }
12604     case TARGET_NR_flistxattr:
12605     {
12606         void *b = 0;
12607         if (arg2) {
12608             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12609             if (!b) {
12610                 return -TARGET_EFAULT;
12611             }
12612         }
12613         ret = get_errno(flistxattr(arg1, b, arg3));
12614         unlock_user(b, arg2, arg3);
12615         return ret;
12616     }
12617     case TARGET_NR_setxattr:
12618     case TARGET_NR_lsetxattr:
12619         {
12620             void *n, *v = 0;
12621             if (arg3) {
12622                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12623                 if (!v) {
12624                     return -TARGET_EFAULT;
12625                 }
12626             }
12627             p = lock_user_string(arg1);
12628             n = lock_user_string(arg2);
12629             if (p && n) {
12630                 if (num == TARGET_NR_setxattr) {
12631                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12632                 } else {
12633                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12634                 }
12635             } else {
12636                 ret = -TARGET_EFAULT;
12637             }
12638             unlock_user(p, arg1, 0);
12639             unlock_user(n, arg2, 0);
12640             unlock_user(v, arg3, 0);
12641         }
12642         return ret;
12643     case TARGET_NR_fsetxattr:
12644         {
12645             void *n, *v = 0;
12646             if (arg3) {
12647                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12648                 if (!v) {
12649                     return -TARGET_EFAULT;
12650                 }
12651             }
12652             n = lock_user_string(arg2);
12653             if (n) {
12654                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12655             } else {
12656                 ret = -TARGET_EFAULT;
12657             }
12658             unlock_user(n, arg2, 0);
12659             unlock_user(v, arg3, 0);
12660         }
12661         return ret;
12662     case TARGET_NR_getxattr:
12663     case TARGET_NR_lgetxattr:
12664         {
12665             void *n, *v = 0;
12666             if (arg3) {
12667                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12668                 if (!v) {
12669                     return -TARGET_EFAULT;
12670                 }
12671             }
12672             p = lock_user_string(arg1);
12673             n = lock_user_string(arg2);
12674             if (p && n) {
12675                 if (num == TARGET_NR_getxattr) {
12676                     ret = get_errno(getxattr(p, n, v, arg4));
12677                 } else {
12678                     ret = get_errno(lgetxattr(p, n, v, arg4));
12679                 }
12680             } else {
12681                 ret = -TARGET_EFAULT;
12682             }
12683             unlock_user(p, arg1, 0);
12684             unlock_user(n, arg2, 0);
12685             unlock_user(v, arg3, arg4);
12686         }
12687         return ret;
12688     case TARGET_NR_fgetxattr:
12689         {
12690             void *n, *v = 0;
12691             if (arg3) {
12692                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12693                 if (!v) {
12694                     return -TARGET_EFAULT;
12695                 }
12696             }
12697             n = lock_user_string(arg2);
12698             if (n) {
12699                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12700             } else {
12701                 ret = -TARGET_EFAULT;
12702             }
12703             unlock_user(n, arg2, 0);
12704             unlock_user(v, arg3, arg4);
12705         }
12706         return ret;
12707     case TARGET_NR_removexattr:
12708     case TARGET_NR_lremovexattr:
12709         {
12710             void *n;
12711             p = lock_user_string(arg1);
12712             n = lock_user_string(arg2);
12713             if (p && n) {
12714                 if (num == TARGET_NR_removexattr) {
12715                     ret = get_errno(removexattr(p, n));
12716                 } else {
12717                     ret = get_errno(lremovexattr(p, n));
12718                 }
12719             } else {
12720                 ret = -TARGET_EFAULT;
12721             }
12722             unlock_user(p, arg1, 0);
12723             unlock_user(n, arg2, 0);
12724         }
12725         return ret;
12726     case TARGET_NR_fremovexattr:
12727         {
12728             void *n;
12729             n = lock_user_string(arg2);
12730             if (n) {
12731                 ret = get_errno(fremovexattr(arg1, n));
12732             } else {
12733                 ret = -TARGET_EFAULT;
12734             }
12735             unlock_user(n, arg2, 0);
12736         }
12737         return ret;
12738 #endif
12739 #endif /* CONFIG_ATTR */
12740 #ifdef TARGET_NR_set_thread_area
12741     case TARGET_NR_set_thread_area:
12742 #if defined(TARGET_MIPS)
12743       cpu_env->active_tc.CP0_UserLocal = arg1;
12744       return 0;
12745 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12746       return do_set_thread_area(cpu_env, arg1);
12747 #elif defined(TARGET_M68K)
12748       {
12749           TaskState *ts = get_task_state(cpu);
12750           ts->tp_value = arg1;
12751           return 0;
12752       }
12753 #else
12754       return -TARGET_ENOSYS;
12755 #endif
12756 #endif
12757 #ifdef TARGET_NR_get_thread_area
12758     case TARGET_NR_get_thread_area:
12759 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12760         return do_get_thread_area(cpu_env, arg1);
12761 #elif defined(TARGET_M68K)
12762         {
12763             TaskState *ts = get_task_state(cpu);
12764             return ts->tp_value;
12765         }
12766 #else
12767         return -TARGET_ENOSYS;
12768 #endif
12769 #endif
12770 #ifdef TARGET_NR_getdomainname
12771     case TARGET_NR_getdomainname:
12772         return -TARGET_ENOSYS;
12773 #endif
12774 
12775 #ifdef TARGET_NR_clock_settime
12776     case TARGET_NR_clock_settime:
12777     {
12778         struct timespec ts;
12779 
12780         ret = target_to_host_timespec(&ts, arg2);
12781         if (!is_error(ret)) {
12782             ret = get_errno(clock_settime(arg1, &ts));
12783         }
12784         return ret;
12785     }
12786 #endif
12787 #ifdef TARGET_NR_clock_settime64
12788     case TARGET_NR_clock_settime64:
12789     {
12790         struct timespec ts;
12791 
12792         ret = target_to_host_timespec64(&ts, arg2);
12793         if (!is_error(ret)) {
12794             ret = get_errno(clock_settime(arg1, &ts));
12795         }
12796         return ret;
12797     }
12798 #endif
12799 #ifdef TARGET_NR_clock_gettime
12800     case TARGET_NR_clock_gettime:
12801     {
12802         struct timespec ts;
12803         ret = get_errno(clock_gettime(arg1, &ts));
12804         if (!is_error(ret)) {
12805             ret = host_to_target_timespec(arg2, &ts);
12806         }
12807         return ret;
12808     }
12809 #endif
12810 #ifdef TARGET_NR_clock_gettime64
12811     case TARGET_NR_clock_gettime64:
12812     {
12813         struct timespec ts;
12814         ret = get_errno(clock_gettime(arg1, &ts));
12815         if (!is_error(ret)) {
12816             ret = host_to_target_timespec64(arg2, &ts);
12817         }
12818         return ret;
12819     }
12820 #endif
12821 #ifdef TARGET_NR_clock_getres
12822     case TARGET_NR_clock_getres:
12823     {
12824         struct timespec ts;
12825         ret = get_errno(clock_getres(arg1, &ts));
12826         if (!is_error(ret)) {
12827             host_to_target_timespec(arg2, &ts);
12828         }
12829         return ret;
12830     }
12831 #endif
12832 #ifdef TARGET_NR_clock_getres_time64
12833     case TARGET_NR_clock_getres_time64:
12834     {
12835         struct timespec ts;
12836         ret = get_errno(clock_getres(arg1, &ts));
12837         if (!is_error(ret)) {
12838             host_to_target_timespec64(arg2, &ts);
12839         }
12840         return ret;
12841     }
12842 #endif
12843 #ifdef TARGET_NR_clock_nanosleep
12844     case TARGET_NR_clock_nanosleep:
12845     {
12846         struct timespec ts;
12847         if (target_to_host_timespec(&ts, arg3)) {
12848             return -TARGET_EFAULT;
12849         }
12850         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12851                                              &ts, arg4 ? &ts : NULL));
12852         /*
12853          * if the call is interrupted by a signal handler, it fails
12854          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12855          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12856          */
12857         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12858             host_to_target_timespec(arg4, &ts)) {
12859               return -TARGET_EFAULT;
12860         }
12861 
12862         return ret;
12863     }
12864 #endif
12865 #ifdef TARGET_NR_clock_nanosleep_time64
12866     case TARGET_NR_clock_nanosleep_time64:
12867     {
12868         struct timespec ts;
12869 
12870         if (target_to_host_timespec64(&ts, arg3)) {
12871             return -TARGET_EFAULT;
12872         }
12873 
12874         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12875                                              &ts, arg4 ? &ts : NULL));
12876 
12877         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12878             host_to_target_timespec64(arg4, &ts)) {
12879             return -TARGET_EFAULT;
12880         }
12881         return ret;
12882     }
12883 #endif
12884 
12885 #if defined(TARGET_NR_set_tid_address)
12886     case TARGET_NR_set_tid_address:
12887     {
12888         TaskState *ts = get_task_state(cpu);
12889         ts->child_tidptr = arg1;
12890         /* do not call host set_tid_address() syscall, instead return tid() */
12891         return get_errno(sys_gettid());
12892     }
12893 #endif
12894 
12895     case TARGET_NR_tkill:
12896         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12897 
12898     case TARGET_NR_tgkill:
12899         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12900                          target_to_host_signal(arg3)));
12901 
12902 #ifdef TARGET_NR_set_robust_list
12903     case TARGET_NR_set_robust_list:
12904     case TARGET_NR_get_robust_list:
12905         /* The ABI for supporting robust futexes has userspace pass
12906          * the kernel a pointer to a linked list which is updated by
12907          * userspace after the syscall; the list is walked by the kernel
12908          * when the thread exits. Since the linked list in QEMU guest
12909          * memory isn't a valid linked list for the host and we have
12910          * no way to reliably intercept the thread-death event, we can't
12911          * support these. Silently return ENOSYS so that guest userspace
12912          * falls back to a non-robust futex implementation (which should
12913          * be OK except in the corner case of the guest crashing while
12914          * holding a mutex that is shared with another process via
12915          * shared memory).
12916          */
12917         return -TARGET_ENOSYS;
12918 #endif
12919 
12920 #if defined(TARGET_NR_utimensat)
12921     case TARGET_NR_utimensat:
12922         {
12923             struct timespec *tsp, ts[2];
12924             if (!arg3) {
12925                 tsp = NULL;
12926             } else {
12927                 if (target_to_host_timespec(ts, arg3)) {
12928                     return -TARGET_EFAULT;
12929                 }
12930                 if (target_to_host_timespec(ts + 1, arg3 +
12931                                             sizeof(struct target_timespec))) {
12932                     return -TARGET_EFAULT;
12933                 }
12934                 tsp = ts;
12935             }
12936             if (!arg2)
12937                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12938             else {
12939                 if (!(p = lock_user_string(arg2))) {
12940                     return -TARGET_EFAULT;
12941                 }
12942                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12943                 unlock_user(p, arg2, 0);
12944             }
12945         }
12946         return ret;
12947 #endif
12948 #ifdef TARGET_NR_utimensat_time64
12949     case TARGET_NR_utimensat_time64:
12950         {
12951             struct timespec *tsp, ts[2];
12952             if (!arg3) {
12953                 tsp = NULL;
12954             } else {
12955                 if (target_to_host_timespec64(ts, arg3)) {
12956                     return -TARGET_EFAULT;
12957                 }
12958                 if (target_to_host_timespec64(ts + 1, arg3 +
12959                                      sizeof(struct target__kernel_timespec))) {
12960                     return -TARGET_EFAULT;
12961                 }
12962                 tsp = ts;
12963             }
12964             if (!arg2)
12965                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12966             else {
12967                 p = lock_user_string(arg2);
12968                 if (!p) {
12969                     return -TARGET_EFAULT;
12970                 }
12971                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12972                 unlock_user(p, arg2, 0);
12973             }
12974         }
12975         return ret;
12976 #endif
12977 #ifdef TARGET_NR_futex
12978     case TARGET_NR_futex:
12979         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12980 #endif
12981 #ifdef TARGET_NR_futex_time64
12982     case TARGET_NR_futex_time64:
12983         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
12984 #endif
12985 #ifdef CONFIG_INOTIFY
12986 #if defined(TARGET_NR_inotify_init)
12987     case TARGET_NR_inotify_init:
12988         ret = get_errno(inotify_init());
12989         if (ret >= 0) {
12990             fd_trans_register(ret, &target_inotify_trans);
12991         }
12992         return ret;
12993 #endif
12994 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12995     case TARGET_NR_inotify_init1:
12996         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12997                                           fcntl_flags_tbl)));
12998         if (ret >= 0) {
12999             fd_trans_register(ret, &target_inotify_trans);
13000         }
13001         return ret;
13002 #endif
13003 #if defined(TARGET_NR_inotify_add_watch)
13004     case TARGET_NR_inotify_add_watch:
13005         p = lock_user_string(arg2);
13006         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
13007         unlock_user(p, arg2, 0);
13008         return ret;
13009 #endif
13010 #if defined(TARGET_NR_inotify_rm_watch)
13011     case TARGET_NR_inotify_rm_watch:
13012         return get_errno(inotify_rm_watch(arg1, arg2));
13013 #endif
13014 #endif
13015 
13016 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
13017     case TARGET_NR_mq_open:
13018         {
13019             struct mq_attr posix_mq_attr;
13020             struct mq_attr *pposix_mq_attr;
13021             int host_flags;
13022 
13023             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
13024             pposix_mq_attr = NULL;
13025             if (arg4) {
13026                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
13027                     return -TARGET_EFAULT;
13028                 }
13029                 pposix_mq_attr = &posix_mq_attr;
13030             }
13031             p = lock_user_string(arg1 - 1);
13032             if (!p) {
13033                 return -TARGET_EFAULT;
13034             }
13035             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
13036             unlock_user (p, arg1, 0);
13037         }
13038         return ret;
13039 
13040     case TARGET_NR_mq_unlink:
13041         p = lock_user_string(arg1 - 1);
13042         if (!p) {
13043             return -TARGET_EFAULT;
13044         }
13045         ret = get_errno(mq_unlink(p));
13046         unlock_user (p, arg1, 0);
13047         return ret;
13048 
13049 #ifdef TARGET_NR_mq_timedsend
13050     case TARGET_NR_mq_timedsend:
13051         {
13052             struct timespec ts;
13053 
13054             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13055             if (arg5 != 0) {
13056                 if (target_to_host_timespec(&ts, arg5)) {
13057                     return -TARGET_EFAULT;
13058                 }
13059                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13060                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13061                     return -TARGET_EFAULT;
13062                 }
13063             } else {
13064                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13065             }
13066             unlock_user (p, arg2, arg3);
13067         }
13068         return ret;
13069 #endif
13070 #ifdef TARGET_NR_mq_timedsend_time64
13071     case TARGET_NR_mq_timedsend_time64:
13072         {
13073             struct timespec ts;
13074 
13075             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13076             if (arg5 != 0) {
13077                 if (target_to_host_timespec64(&ts, arg5)) {
13078                     return -TARGET_EFAULT;
13079                 }
13080                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13081                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13082                     return -TARGET_EFAULT;
13083                 }
13084             } else {
13085                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13086             }
13087             unlock_user(p, arg2, arg3);
13088         }
13089         return ret;
13090 #endif
13091 
13092 #ifdef TARGET_NR_mq_timedreceive
13093     case TARGET_NR_mq_timedreceive:
13094         {
13095             struct timespec ts;
13096             unsigned int prio;
13097 
13098             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13099             if (arg5 != 0) {
13100                 if (target_to_host_timespec(&ts, arg5)) {
13101                     return -TARGET_EFAULT;
13102                 }
13103                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13104                                                      &prio, &ts));
13105                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13106                     return -TARGET_EFAULT;
13107                 }
13108             } else {
13109                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13110                                                      &prio, NULL));
13111             }
13112             unlock_user (p, arg2, arg3);
13113             if (arg4 != 0)
13114                 put_user_u32(prio, arg4);
13115         }
13116         return ret;
13117 #endif
13118 #ifdef TARGET_NR_mq_timedreceive_time64
13119     case TARGET_NR_mq_timedreceive_time64:
13120         {
13121             struct timespec ts;
13122             unsigned int prio;
13123 
13124             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13125             if (arg5 != 0) {
13126                 if (target_to_host_timespec64(&ts, arg5)) {
13127                     return -TARGET_EFAULT;
13128                 }
13129                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13130                                                      &prio, &ts));
13131                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13132                     return -TARGET_EFAULT;
13133                 }
13134             } else {
13135                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13136                                                      &prio, NULL));
13137             }
13138             unlock_user(p, arg2, arg3);
13139             if (arg4 != 0) {
13140                 put_user_u32(prio, arg4);
13141             }
13142         }
13143         return ret;
13144 #endif
13145 
13146     /* Not implemented for now... */
13147 /*     case TARGET_NR_mq_notify: */
13148 /*         break; */
13149 
13150     case TARGET_NR_mq_getsetattr:
13151         {
13152             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
13153             ret = 0;
13154             if (arg2 != 0) {
13155                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
13156                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
13157                                            &posix_mq_attr_out));
13158             } else if (arg3 != 0) {
13159                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
13160             }
13161             if (ret == 0 && arg3 != 0) {
13162                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
13163             }
13164         }
13165         return ret;
13166 #endif
13167 
13168 #ifdef CONFIG_SPLICE
13169 #ifdef TARGET_NR_tee
13170     case TARGET_NR_tee:
13171         {
13172             ret = get_errno(tee(arg1,arg2,arg3,arg4));
13173         }
13174         return ret;
13175 #endif
13176 #ifdef TARGET_NR_splice
13177     case TARGET_NR_splice:
13178         {
13179             loff_t loff_in, loff_out;
13180             loff_t *ploff_in = NULL, *ploff_out = NULL;
13181             if (arg2) {
13182                 if (get_user_u64(loff_in, arg2)) {
13183                     return -TARGET_EFAULT;
13184                 }
13185                 ploff_in = &loff_in;
13186             }
13187             if (arg4) {
13188                 if (get_user_u64(loff_out, arg4)) {
13189                     return -TARGET_EFAULT;
13190                 }
13191                 ploff_out = &loff_out;
13192             }
13193             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
13194             if (arg2) {
13195                 if (put_user_u64(loff_in, arg2)) {
13196                     return -TARGET_EFAULT;
13197                 }
13198             }
13199             if (arg4) {
13200                 if (put_user_u64(loff_out, arg4)) {
13201                     return -TARGET_EFAULT;
13202                 }
13203             }
13204         }
13205         return ret;
13206 #endif
13207 #ifdef TARGET_NR_vmsplice
13208 	case TARGET_NR_vmsplice:
13209         {
13210             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
13211             if (vec != NULL) {
13212                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
13213                 unlock_iovec(vec, arg2, arg3, 0);
13214             } else {
13215                 ret = -host_to_target_errno(errno);
13216             }
13217         }
13218         return ret;
13219 #endif
13220 #endif /* CONFIG_SPLICE */
13221 #ifdef CONFIG_EVENTFD
13222 #if defined(TARGET_NR_eventfd)
13223     case TARGET_NR_eventfd:
13224         ret = get_errno(eventfd(arg1, 0));
13225         if (ret >= 0) {
13226             fd_trans_register(ret, &target_eventfd_trans);
13227         }
13228         return ret;
13229 #endif
13230 #if defined(TARGET_NR_eventfd2)
13231     case TARGET_NR_eventfd2:
13232     {
13233         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
13234         if (arg2 & TARGET_O_NONBLOCK) {
13235             host_flags |= O_NONBLOCK;
13236         }
13237         if (arg2 & TARGET_O_CLOEXEC) {
13238             host_flags |= O_CLOEXEC;
13239         }
13240         ret = get_errno(eventfd(arg1, host_flags));
13241         if (ret >= 0) {
13242             fd_trans_register(ret, &target_eventfd_trans);
13243         }
13244         return ret;
13245     }
13246 #endif
13247 #endif /* CONFIG_EVENTFD  */
13248 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
13249     case TARGET_NR_fallocate:
13250 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13251         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
13252                                   target_offset64(arg5, arg6)));
13253 #else
13254         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
13255 #endif
13256         return ret;
13257 #endif
13258 #if defined(CONFIG_SYNC_FILE_RANGE)
13259 #if defined(TARGET_NR_sync_file_range)
13260     case TARGET_NR_sync_file_range:
13261 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13262 #if defined(TARGET_MIPS)
13263         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13264                                         target_offset64(arg5, arg6), arg7));
13265 #else
13266         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
13267                                         target_offset64(arg4, arg5), arg6));
13268 #endif /* !TARGET_MIPS */
13269 #else
13270         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
13271 #endif
13272         return ret;
13273 #endif
13274 #if defined(TARGET_NR_sync_file_range2) || \
13275     defined(TARGET_NR_arm_sync_file_range)
13276 #if defined(TARGET_NR_sync_file_range2)
13277     case TARGET_NR_sync_file_range2:
13278 #endif
13279 #if defined(TARGET_NR_arm_sync_file_range)
13280     case TARGET_NR_arm_sync_file_range:
13281 #endif
13282         /* This is like sync_file_range but the arguments are reordered */
13283 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13284         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13285                                         target_offset64(arg5, arg6), arg2));
13286 #else
13287         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
13288 #endif
13289         return ret;
13290 #endif
13291 #endif
13292 #if defined(TARGET_NR_signalfd4)
13293     case TARGET_NR_signalfd4:
13294         return do_signalfd4(arg1, arg2, arg4);
13295 #endif
13296 #if defined(TARGET_NR_signalfd)
13297     case TARGET_NR_signalfd:
13298         return do_signalfd4(arg1, arg2, 0);
13299 #endif
13300 #if defined(CONFIG_EPOLL)
13301 #if defined(TARGET_NR_epoll_create)
13302     case TARGET_NR_epoll_create:
13303         return get_errno(epoll_create(arg1));
13304 #endif
13305 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
13306     case TARGET_NR_epoll_create1:
13307         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
13308 #endif
13309 #if defined(TARGET_NR_epoll_ctl)
13310     case TARGET_NR_epoll_ctl:
13311     {
13312         struct epoll_event ep;
13313         struct epoll_event *epp = 0;
13314         if (arg4) {
13315             if (arg2 != EPOLL_CTL_DEL) {
13316                 struct target_epoll_event *target_ep;
13317                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
13318                     return -TARGET_EFAULT;
13319                 }
13320                 ep.events = tswap32(target_ep->events);
13321                 /*
13322                  * The epoll_data_t union is just opaque data to the kernel,
13323                  * so we transfer all 64 bits across and need not worry what
13324                  * actual data type it is.
13325                  */
13326                 ep.data.u64 = tswap64(target_ep->data.u64);
13327                 unlock_user_struct(target_ep, arg4, 0);
13328             }
13329             /*
13330              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
13331              * non-null pointer, even though this argument is ignored.
13332              *
13333              */
13334             epp = &ep;
13335         }
13336         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
13337     }
13338 #endif
13339 
13340 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
13341 #if defined(TARGET_NR_epoll_wait)
13342     case TARGET_NR_epoll_wait:
13343 #endif
13344 #if defined(TARGET_NR_epoll_pwait)
13345     case TARGET_NR_epoll_pwait:
13346 #endif
13347     {
13348         struct target_epoll_event *target_ep;
13349         struct epoll_event *ep;
13350         int epfd = arg1;
13351         int maxevents = arg3;
13352         int timeout = arg4;
13353 
13354         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
13355             return -TARGET_EINVAL;
13356         }
13357 
13358         target_ep = lock_user(VERIFY_WRITE, arg2,
13359                               maxevents * sizeof(struct target_epoll_event), 1);
13360         if (!target_ep) {
13361             return -TARGET_EFAULT;
13362         }
13363 
13364         ep = g_try_new(struct epoll_event, maxevents);
13365         if (!ep) {
13366             unlock_user(target_ep, arg2, 0);
13367             return -TARGET_ENOMEM;
13368         }
13369 
13370         switch (num) {
13371 #if defined(TARGET_NR_epoll_pwait)
13372         case TARGET_NR_epoll_pwait:
13373         {
13374             sigset_t *set = NULL;
13375 
13376             if (arg5) {
13377                 ret = process_sigsuspend_mask(&set, arg5, arg6);
13378                 if (ret != 0) {
13379                     break;
13380                 }
13381             }
13382 
13383             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13384                                              set, SIGSET_T_SIZE));
13385 
13386             if (set) {
13387                 finish_sigsuspend_mask(ret);
13388             }
13389             break;
13390         }
13391 #endif
13392 #if defined(TARGET_NR_epoll_wait)
13393         case TARGET_NR_epoll_wait:
13394             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13395                                              NULL, 0));
13396             break;
13397 #endif
13398         default:
13399             ret = -TARGET_ENOSYS;
13400         }
13401         if (!is_error(ret)) {
13402             int i;
13403             for (i = 0; i < ret; i++) {
13404                 target_ep[i].events = tswap32(ep[i].events);
13405                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
13406             }
13407             unlock_user(target_ep, arg2,
13408                         ret * sizeof(struct target_epoll_event));
13409         } else {
13410             unlock_user(target_ep, arg2, 0);
13411         }
13412         g_free(ep);
13413         return ret;
13414     }
13415 #endif
13416 #endif
13417 #ifdef TARGET_NR_prlimit64
13418     case TARGET_NR_prlimit64:
13419     {
13420         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
13421         struct target_rlimit64 *target_rnew, *target_rold;
13422         struct host_rlimit64 rnew, rold, *rnewp = 0;
13423         int resource = target_to_host_resource(arg2);
13424 
13425         if (arg3 && (resource != RLIMIT_AS &&
13426                      resource != RLIMIT_DATA &&
13427                      resource != RLIMIT_STACK)) {
13428             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
13429                 return -TARGET_EFAULT;
13430             }
13431             __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
13432             __get_user(rnew.rlim_max, &target_rnew->rlim_max);
13433             unlock_user_struct(target_rnew, arg3, 0);
13434             rnewp = &rnew;
13435         }
13436 
13437         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
13438         if (!is_error(ret) && arg4) {
13439             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
13440                 return -TARGET_EFAULT;
13441             }
13442             __put_user(rold.rlim_cur, &target_rold->rlim_cur);
13443             __put_user(rold.rlim_max, &target_rold->rlim_max);
13444             unlock_user_struct(target_rold, arg4, 1);
13445         }
13446         return ret;
13447     }
13448 #endif
13449 #ifdef TARGET_NR_gethostname
13450     case TARGET_NR_gethostname:
13451     {
13452         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
13453         if (name) {
13454             ret = get_errno(gethostname(name, arg2));
13455             unlock_user(name, arg1, arg2);
13456         } else {
13457             ret = -TARGET_EFAULT;
13458         }
13459         return ret;
13460     }
13461 #endif
13462 #ifdef TARGET_NR_atomic_cmpxchg_32
13463     case TARGET_NR_atomic_cmpxchg_32:
13464     {
13465         /* should use start_exclusive from main.c */
13466         abi_ulong mem_value;
13467         if (get_user_u32(mem_value, arg6)) {
13468             target_siginfo_t info;
13469             info.si_signo = SIGSEGV;
13470             info.si_errno = 0;
13471             info.si_code = TARGET_SEGV_MAPERR;
13472             info._sifields._sigfault._addr = arg6;
13473             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
13474             ret = 0xdeadbeef;
13475 
13476         }
13477         if (mem_value == arg2)
13478             put_user_u32(arg1, arg6);
13479         return mem_value;
13480     }
13481 #endif
13482 #ifdef TARGET_NR_atomic_barrier
13483     case TARGET_NR_atomic_barrier:
13484         /* Like the kernel implementation and the
13485            qemu arm barrier, no-op this? */
13486         return 0;
13487 #endif
13488 
13489 #ifdef TARGET_NR_timer_create
13490     case TARGET_NR_timer_create:
13491     {
13492         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
13493 
13494         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
13495 
13496         int clkid = arg1;
13497         int timer_index = next_free_host_timer();
13498 
13499         if (timer_index < 0) {
13500             ret = -TARGET_EAGAIN;
13501         } else {
13502             timer_t *phtimer = g_posix_timers  + timer_index;
13503 
13504             if (arg2) {
13505                 phost_sevp = &host_sevp;
13506                 ret = target_to_host_sigevent(phost_sevp, arg2);
13507                 if (ret != 0) {
13508                     free_host_timer_slot(timer_index);
13509                     return ret;
13510                 }
13511             }
13512 
13513             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
13514             if (ret) {
13515                 free_host_timer_slot(timer_index);
13516             } else {
13517                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
13518                     timer_delete(*phtimer);
13519                     free_host_timer_slot(timer_index);
13520                     return -TARGET_EFAULT;
13521                 }
13522             }
13523         }
13524         return ret;
13525     }
13526 #endif
13527 
13528 #ifdef TARGET_NR_timer_settime
13529     case TARGET_NR_timer_settime:
13530     {
13531         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13532          * struct itimerspec * old_value */
13533         target_timer_t timerid = get_timer_id(arg1);
13534 
13535         if (timerid < 0) {
13536             ret = timerid;
13537         } else if (arg3 == 0) {
13538             ret = -TARGET_EINVAL;
13539         } else {
13540             timer_t htimer = g_posix_timers[timerid];
13541             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13542 
13543             if (target_to_host_itimerspec(&hspec_new, arg3)) {
13544                 return -TARGET_EFAULT;
13545             }
13546             ret = get_errno(
13547                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13548             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13549                 return -TARGET_EFAULT;
13550             }
13551         }
13552         return ret;
13553     }
13554 #endif
13555 
13556 #ifdef TARGET_NR_timer_settime64
13557     case TARGET_NR_timer_settime64:
13558     {
13559         target_timer_t timerid = get_timer_id(arg1);
13560 
13561         if (timerid < 0) {
13562             ret = timerid;
13563         } else if (arg3 == 0) {
13564             ret = -TARGET_EINVAL;
13565         } else {
13566             timer_t htimer = g_posix_timers[timerid];
13567             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13568 
13569             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13570                 return -TARGET_EFAULT;
13571             }
13572             ret = get_errno(
13573                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13574             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13575                 return -TARGET_EFAULT;
13576             }
13577         }
13578         return ret;
13579     }
13580 #endif
13581 
13582 #ifdef TARGET_NR_timer_gettime
13583     case TARGET_NR_timer_gettime:
13584     {
13585         /* args: timer_t timerid, struct itimerspec *curr_value */
13586         target_timer_t timerid = get_timer_id(arg1);
13587 
13588         if (timerid < 0) {
13589             ret = timerid;
13590         } else if (!arg2) {
13591             ret = -TARGET_EFAULT;
13592         } else {
13593             timer_t htimer = g_posix_timers[timerid];
13594             struct itimerspec hspec;
13595             ret = get_errno(timer_gettime(htimer, &hspec));
13596 
13597             if (host_to_target_itimerspec(arg2, &hspec)) {
13598                 ret = -TARGET_EFAULT;
13599             }
13600         }
13601         return ret;
13602     }
13603 #endif
13604 
13605 #ifdef TARGET_NR_timer_gettime64
13606     case TARGET_NR_timer_gettime64:
13607     {
13608         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13609         target_timer_t timerid = get_timer_id(arg1);
13610 
13611         if (timerid < 0) {
13612             ret = timerid;
13613         } else if (!arg2) {
13614             ret = -TARGET_EFAULT;
13615         } else {
13616             timer_t htimer = g_posix_timers[timerid];
13617             struct itimerspec hspec;
13618             ret = get_errno(timer_gettime(htimer, &hspec));
13619 
13620             if (host_to_target_itimerspec64(arg2, &hspec)) {
13621                 ret = -TARGET_EFAULT;
13622             }
13623         }
13624         return ret;
13625     }
13626 #endif
13627 
13628 #ifdef TARGET_NR_timer_getoverrun
13629     case TARGET_NR_timer_getoverrun:
13630     {
13631         /* args: timer_t timerid */
13632         target_timer_t timerid = get_timer_id(arg1);
13633 
13634         if (timerid < 0) {
13635             ret = timerid;
13636         } else {
13637             timer_t htimer = g_posix_timers[timerid];
13638             ret = get_errno(timer_getoverrun(htimer));
13639         }
13640         return ret;
13641     }
13642 #endif
13643 
13644 #ifdef TARGET_NR_timer_delete
13645     case TARGET_NR_timer_delete:
13646     {
13647         /* args: timer_t timerid */
13648         target_timer_t timerid = get_timer_id(arg1);
13649 
13650         if (timerid < 0) {
13651             ret = timerid;
13652         } else {
13653             timer_t htimer = g_posix_timers[timerid];
13654             ret = get_errno(timer_delete(htimer));
13655             free_host_timer_slot(timerid);
13656         }
13657         return ret;
13658     }
13659 #endif
13660 
13661 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13662     case TARGET_NR_timerfd_create:
13663         ret = get_errno(timerfd_create(arg1,
13664                         target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13665         if (ret >= 0) {
13666             fd_trans_register(ret, &target_timerfd_trans);
13667         }
13668         return ret;
13669 #endif
13670 
13671 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13672     case TARGET_NR_timerfd_gettime:
13673         {
13674             struct itimerspec its_curr;
13675 
13676             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13677 
13678             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13679                 return -TARGET_EFAULT;
13680             }
13681         }
13682         return ret;
13683 #endif
13684 
13685 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13686     case TARGET_NR_timerfd_gettime64:
13687         {
13688             struct itimerspec its_curr;
13689 
13690             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13691 
13692             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13693                 return -TARGET_EFAULT;
13694             }
13695         }
13696         return ret;
13697 #endif
13698 
13699 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13700     case TARGET_NR_timerfd_settime:
13701         {
13702             struct itimerspec its_new, its_old, *p_new;
13703 
13704             if (arg3) {
13705                 if (target_to_host_itimerspec(&its_new, arg3)) {
13706                     return -TARGET_EFAULT;
13707                 }
13708                 p_new = &its_new;
13709             } else {
13710                 p_new = NULL;
13711             }
13712 
13713             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13714 
13715             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13716                 return -TARGET_EFAULT;
13717             }
13718         }
13719         return ret;
13720 #endif
13721 
13722 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13723     case TARGET_NR_timerfd_settime64:
13724         {
13725             struct itimerspec its_new, its_old, *p_new;
13726 
13727             if (arg3) {
13728                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13729                     return -TARGET_EFAULT;
13730                 }
13731                 p_new = &its_new;
13732             } else {
13733                 p_new = NULL;
13734             }
13735 
13736             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13737 
13738             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13739                 return -TARGET_EFAULT;
13740             }
13741         }
13742         return ret;
13743 #endif
13744 
13745 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13746     case TARGET_NR_ioprio_get:
13747         return get_errno(ioprio_get(arg1, arg2));
13748 #endif
13749 
13750 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13751     case TARGET_NR_ioprio_set:
13752         return get_errno(ioprio_set(arg1, arg2, arg3));
13753 #endif
13754 
13755 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13756     case TARGET_NR_setns:
13757         return get_errno(setns(arg1, arg2));
13758 #endif
13759 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13760     case TARGET_NR_unshare:
13761         return get_errno(unshare(arg1));
13762 #endif
13763 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13764     case TARGET_NR_kcmp:
13765         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13766 #endif
13767 #ifdef TARGET_NR_swapcontext
13768     case TARGET_NR_swapcontext:
13769         /* PowerPC specific.  */
13770         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13771 #endif
13772 #ifdef TARGET_NR_memfd_create
13773     case TARGET_NR_memfd_create:
13774         p = lock_user_string(arg1);
13775         if (!p) {
13776             return -TARGET_EFAULT;
13777         }
13778         ret = get_errno(memfd_create(p, arg2));
13779         fd_trans_unregister(ret);
13780         unlock_user(p, arg1, 0);
13781         return ret;
13782 #endif
13783 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13784     case TARGET_NR_membarrier:
13785         return get_errno(membarrier(arg1, arg2));
13786 #endif
13787 
13788 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13789     case TARGET_NR_copy_file_range:
13790         {
13791             loff_t inoff, outoff;
13792             loff_t *pinoff = NULL, *poutoff = NULL;
13793 
13794             if (arg2) {
13795                 if (get_user_u64(inoff, arg2)) {
13796                     return -TARGET_EFAULT;
13797                 }
13798                 pinoff = &inoff;
13799             }
13800             if (arg4) {
13801                 if (get_user_u64(outoff, arg4)) {
13802                     return -TARGET_EFAULT;
13803                 }
13804                 poutoff = &outoff;
13805             }
13806             /* Do not sign-extend the count parameter. */
13807             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13808                                                  (abi_ulong)arg5, arg6));
13809             if (!is_error(ret) && ret > 0) {
13810                 if (arg2) {
13811                     if (put_user_u64(inoff, arg2)) {
13812                         return -TARGET_EFAULT;
13813                     }
13814                 }
13815                 if (arg4) {
13816                     if (put_user_u64(outoff, arg4)) {
13817                         return -TARGET_EFAULT;
13818                     }
13819                 }
13820             }
13821         }
13822         return ret;
13823 #endif
13824 
13825 #if defined(TARGET_NR_pivot_root)
13826     case TARGET_NR_pivot_root:
13827         {
13828             void *p2;
13829             p = lock_user_string(arg1); /* new_root */
13830             p2 = lock_user_string(arg2); /* put_old */
13831             if (!p || !p2) {
13832                 ret = -TARGET_EFAULT;
13833             } else {
13834                 ret = get_errno(pivot_root(p, p2));
13835             }
13836             unlock_user(p2, arg2, 0);
13837             unlock_user(p, arg1, 0);
13838         }
13839         return ret;
13840 #endif
13841 
13842 #if defined(TARGET_NR_riscv_hwprobe)
13843     case TARGET_NR_riscv_hwprobe:
13844         return do_riscv_hwprobe(cpu_env, arg1, arg2, arg3, arg4, arg5);
13845 #endif
13846 
13847     default:
13848         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13849         return -TARGET_ENOSYS;
13850     }
13851     return ret;
13852 }
13853 
13854 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13855                     abi_long arg2, abi_long arg3, abi_long arg4,
13856                     abi_long arg5, abi_long arg6, abi_long arg7,
13857                     abi_long arg8)
13858 {
13859     CPUState *cpu = env_cpu(cpu_env);
13860     abi_long ret;
13861 
13862 #ifdef DEBUG_ERESTARTSYS
13863     /* Debug-only code for exercising the syscall-restart code paths
13864      * in the per-architecture cpu main loops: restart every syscall
13865      * the guest makes once before letting it through.
13866      */
13867     {
13868         static bool flag;
13869         flag = !flag;
13870         if (flag) {
13871             return -QEMU_ERESTARTSYS;
13872         }
13873     }
13874 #endif
13875 
13876     record_syscall_start(cpu, num, arg1,
13877                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13878 
13879     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13880         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13881     }
13882 
13883     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13884                       arg5, arg6, arg7, arg8);
13885 
13886     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13887         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13888                           arg3, arg4, arg5, arg6);
13889     }
13890 
13891     record_syscall_return(cpu, num, ret);
13892     return ret;
13893 }
13894