xref: /qemu/linux-user/syscall.c (revision 70ce076fa6dff60585c229a4b641b13e64bf03cf)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "qemu/plugin.h"
26 #include "tcg/startup.h"
27 #include "target_mman.h"
28 #include "exec/page-protection.h"
29 #include "exec/translation-block.h"
30 #include <elf.h>
31 #include <endian.h>
32 #include <grp.h>
33 #include <sys/ipc.h>
34 #include <sys/msg.h>
35 #include <sys/wait.h>
36 #include <sys/mount.h>
37 #include <sys/file.h>
38 #include <sys/fsuid.h>
39 #include <sys/personality.h>
40 #include <sys/prctl.h>
41 #include <sys/resource.h>
42 #include <sys/swap.h>
43 #include <linux/capability.h>
44 #include <sched.h>
45 #include <sys/timex.h>
46 #include <sys/socket.h>
47 #include <linux/sockios.h>
48 #include <sys/un.h>
49 #include <sys/uio.h>
50 #include <poll.h>
51 #include <sys/times.h>
52 #include <sys/shm.h>
53 #include <sys/sem.h>
54 #include <sys/statfs.h>
55 #include <utime.h>
56 #include <sys/sysinfo.h>
57 #include <sys/signalfd.h>
58 #include <netinet/in.h>
59 #include <netinet/ip.h>
60 #include <netinet/tcp.h>
61 #include <netinet/udp.h>
62 #include <linux/wireless.h>
63 #include <linux/icmp.h>
64 #include <linux/icmpv6.h>
65 #include <linux/if_tun.h>
66 #include <linux/in6.h>
67 #include <linux/errqueue.h>
68 #include <linux/random.h>
69 #ifdef CONFIG_TIMERFD
70 #include <sys/timerfd.h>
71 #endif
72 #ifdef CONFIG_EVENTFD
73 #include <sys/eventfd.h>
74 #endif
75 #ifdef CONFIG_EPOLL
76 #include <sys/epoll.h>
77 #endif
78 #ifdef CONFIG_ATTR
79 #include "qemu/xattr.h"
80 #endif
81 #ifdef CONFIG_SENDFILE
82 #include <sys/sendfile.h>
83 #endif
84 #ifdef HAVE_SYS_KCOV_H
85 #include <sys/kcov.h>
86 #endif
87 
88 #define termios host_termios
89 #define winsize host_winsize
90 #define termio host_termio
91 #define sgttyb host_sgttyb /* same as target */
92 #define tchars host_tchars /* same as target */
93 #define ltchars host_ltchars /* same as target */
94 
95 #include <linux/termios.h>
96 #include <linux/unistd.h>
97 #include <linux/cdrom.h>
98 #include <linux/hdreg.h>
99 #include <linux/soundcard.h>
100 #include <linux/kd.h>
101 #include <linux/mtio.h>
102 #include <linux/fs.h>
103 #include <linux/fd.h>
104 #if defined(CONFIG_FIEMAP)
105 #include <linux/fiemap.h>
106 #endif
107 #include <linux/fb.h>
108 #if defined(CONFIG_USBFS)
109 #include <linux/usbdevice_fs.h>
110 #include <linux/usb/ch9.h>
111 #endif
112 #include <linux/vt.h>
113 #include <linux/dm-ioctl.h>
114 #include <linux/reboot.h>
115 #include <linux/route.h>
116 #include <linux/filter.h>
117 #include <linux/blkpg.h>
118 #include <netpacket/packet.h>
119 #include <linux/netlink.h>
120 #include <linux/if_alg.h>
121 #include <linux/rtc.h>
122 #include <sound/asound.h>
123 #ifdef HAVE_BTRFS_H
124 #include <linux/btrfs.h>
125 #endif
126 #ifdef HAVE_DRM_H
127 #include <libdrm/drm.h>
128 #include <libdrm/i915_drm.h>
129 #endif
130 #include "linux_loop.h"
131 #include "uname.h"
132 
133 #include "qemu.h"
134 #include "user-internals.h"
135 #include "strace.h"
136 #include "signal-common.h"
137 #include "loader.h"
138 #include "user-mmap.h"
139 #include "user/page-protection.h"
140 #include "user/safe-syscall.h"
141 #include "user/signal.h"
142 #include "qemu/guest-random.h"
143 #include "qemu/selfmap.h"
144 #include "user/syscall-trace.h"
145 #include "special-errno.h"
146 #include "qapi/error.h"
147 #include "fd-trans.h"
148 #include "user/cpu_loop.h"
149 
150 #ifndef CLONE_IO
151 #define CLONE_IO                0x80000000      /* Clone io context */
152 #endif
153 
154 /* We can't directly call the host clone syscall, because this will
155  * badly confuse libc (breaking mutexes, for example). So we must
156  * divide clone flags into:
157  *  * flag combinations that look like pthread_create()
158  *  * flag combinations that look like fork()
159  *  * flags we can implement within QEMU itself
160  *  * flags we can't support and will return an error for
161  */
162 /* For thread creation, all these flags must be present; for
163  * fork, none must be present.
164  */
165 #define CLONE_THREAD_FLAGS                              \
166     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
167      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
168 
169 /* These flags are ignored:
170  * CLONE_DETACHED is now ignored by the kernel;
171  * CLONE_IO is just an optimisation hint to the I/O scheduler
172  */
173 #define CLONE_IGNORED_FLAGS                     \
174     (CLONE_DETACHED | CLONE_IO)
175 
176 #ifndef CLONE_PIDFD
177 # define CLONE_PIDFD 0x00001000
178 #endif
179 
180 /* Flags for fork which we can implement within QEMU itself */
181 #define CLONE_OPTIONAL_FORK_FLAGS               \
182     (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
183      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
184 
185 /* Flags for thread creation which we can implement within QEMU itself */
186 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
187     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
188      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
189 
190 #define CLONE_INVALID_FORK_FLAGS                                        \
191     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
192 
193 #define CLONE_INVALID_THREAD_FLAGS                                      \
194     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
195        CLONE_IGNORED_FLAGS))
196 
197 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
198  * have almost all been allocated. We cannot support any of
199  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
200  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
201  * The checks against the invalid thread masks above will catch these.
202  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
203  */
204 
205 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
206  * once. This exercises the codepaths for restart.
207  */
208 //#define DEBUG_ERESTARTSYS
209 
210 //#include <linux/msdos_fs.h>
211 #define VFAT_IOCTL_READDIR_BOTH \
212     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
213 #define VFAT_IOCTL_READDIR_SHORT \
214     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
215 
216 #undef _syscall0
217 #undef _syscall1
218 #undef _syscall2
219 #undef _syscall3
220 #undef _syscall4
221 #undef _syscall5
222 #undef _syscall6
223 
224 #define _syscall0(type,name)		\
225 static type name (void)			\
226 {					\
227 	return syscall(__NR_##name);	\
228 }
229 
230 #define _syscall1(type,name,type1,arg1)		\
231 static type name (type1 arg1)			\
232 {						\
233 	return syscall(__NR_##name, arg1);	\
234 }
235 
236 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
237 static type name (type1 arg1,type2 arg2)		\
238 {							\
239 	return syscall(__NR_##name, arg1, arg2);	\
240 }
241 
242 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
243 static type name (type1 arg1,type2 arg2,type3 arg3)		\
244 {								\
245 	return syscall(__NR_##name, arg1, arg2, arg3);		\
246 }
247 
248 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
249 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
250 {										\
251 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
252 }
253 
254 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
255 		  type5,arg5)							\
256 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
257 {										\
258 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
259 }
260 
261 
262 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
263 		  type5,arg5,type6,arg6)					\
264 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
265                   type6 arg6)							\
266 {										\
267 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
268 }
269 
270 
271 #define __NR_sys_uname __NR_uname
272 #define __NR_sys_getcwd1 __NR_getcwd
273 #define __NR_sys_getdents __NR_getdents
274 #define __NR_sys_getdents64 __NR_getdents64
275 #define __NR_sys_getpriority __NR_getpriority
276 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
277 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
278 #define __NR_sys_syslog __NR_syslog
279 #if defined(__NR_futex)
280 # define __NR_sys_futex __NR_futex
281 #endif
282 #if defined(__NR_futex_time64)
283 # define __NR_sys_futex_time64 __NR_futex_time64
284 #endif
285 #define __NR_sys_statx __NR_statx
286 
287 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
288 #define __NR__llseek __NR_lseek
289 #endif
290 
291 /* Newer kernel ports have llseek() instead of _llseek() */
292 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
293 #define TARGET_NR__llseek TARGET_NR_llseek
294 #endif
295 
296 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
297 #ifndef TARGET_O_NONBLOCK_MASK
298 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
299 #endif
300 
301 #define __NR_sys_gettid __NR_gettid
302 _syscall0(int, sys_gettid)
303 
304 /* For the 64-bit guest on 32-bit host case we must emulate
305  * getdents using getdents64, because otherwise the host
306  * might hand us back more dirent records than we can fit
307  * into the guest buffer after structure format conversion.
308  * Otherwise we emulate getdents with getdents if the host has it.
309  */
310 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
311 #define EMULATE_GETDENTS_WITH_GETDENTS
312 #endif
313 
314 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
315 _syscall3(int, sys_getdents, unsigned int, fd, struct linux_dirent *, dirp, unsigned int, count);
316 #endif
317 #if (defined(TARGET_NR_getdents) && \
318       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
319     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
320 _syscall3(int, sys_getdents64, unsigned int, fd, struct linux_dirent64 *, dirp, unsigned int, count);
321 #endif
322 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
323 _syscall5(int, _llseek,  unsigned int,  fd, unsigned long, hi, unsigned long, lo,
324           loff_t *, res, unsigned int, wh);
325 #endif
326 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
327 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
328           siginfo_t *, uinfo)
329 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
330 #ifdef __NR_exit_group
331 _syscall1(int,exit_group,int,error_code)
332 #endif
333 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
334 #define __NR_sys_close_range __NR_close_range
335 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
336 #ifndef CLOSE_RANGE_CLOEXEC
337 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
338 #endif
339 #endif
340 #if defined(__NR_futex)
341 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
342           const struct timespec *,timeout,int *,uaddr2,int,val3)
343 #endif
344 #if defined(__NR_futex_time64)
345 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
346           const struct timespec *,timeout,int *,uaddr2,int,val3)
347 #endif
348 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
349 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
350 #endif
351 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
352 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
353                              unsigned int, flags);
354 #endif
355 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
356 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
357 #endif
358 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
359 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
360           unsigned long *, user_mask_ptr);
361 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
362 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
363           unsigned long *, user_mask_ptr);
364 /* sched_attr is not defined in glibc < 2.41 */
365 #ifndef SCHED_ATTR_SIZE_VER0
366 struct sched_attr {
367     uint32_t size;
368     uint32_t sched_policy;
369     uint64_t sched_flags;
370     int32_t sched_nice;
371     uint32_t sched_priority;
372     uint64_t sched_runtime;
373     uint64_t sched_deadline;
374     uint64_t sched_period;
375     uint32_t sched_util_min;
376     uint32_t sched_util_max;
377 };
378 #endif
379 #define __NR_sys_sched_getattr __NR_sched_getattr
380 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
381           unsigned int, size, unsigned int, flags);
382 #define __NR_sys_sched_setattr __NR_sched_setattr
383 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
384           unsigned int, flags);
385 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
386 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
387 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
388 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
389           const struct sched_param *, param);
390 #define __NR_sys_sched_getparam __NR_sched_getparam
391 _syscall2(int, sys_sched_getparam, pid_t, pid,
392           struct sched_param *, param);
393 #define __NR_sys_sched_setparam __NR_sched_setparam
394 _syscall2(int, sys_sched_setparam, pid_t, pid,
395           const struct sched_param *, param);
396 #define __NR_sys_getcpu __NR_getcpu
397 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
398 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
399           void *, arg);
400 _syscall2(int, capget, struct __user_cap_header_struct *, header,
401           struct __user_cap_data_struct *, data);
402 _syscall2(int, capset, struct __user_cap_header_struct *, header,
403           struct __user_cap_data_struct *, data);
404 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
405 _syscall2(int, ioprio_get, int, which, int, who)
406 #endif
407 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
408 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
409 #endif
410 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
411 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
412 #endif
413 
414 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
415 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
416           unsigned long, idx1, unsigned long, idx2)
417 #endif
418 
419 /*
420  * It is assumed that struct statx is architecture independent.
421  */
422 #if defined(TARGET_NR_statx) && defined(__NR_statx)
423 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
424           unsigned int, mask, struct target_statx *, statxbuf)
425 #endif
426 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
427 _syscall2(int, membarrier, int, cmd, int, flags)
428 #endif
429 
430 static const bitmask_transtbl fcntl_flags_tbl[] = {
431   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
432   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
433   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
434   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
435   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
436   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
437   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
438   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
439   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
440   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
441   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
442   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
443   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
444 #if defined(O_DIRECT)
445   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
446 #endif
447 #if defined(O_NOATIME)
448   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
449 #endif
450 #if defined(O_CLOEXEC)
451   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
452 #endif
453 #if defined(O_PATH)
454   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
455 #endif
456 #if defined(O_TMPFILE)
457   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
458 #endif
459   /* Don't terminate the list prematurely on 64-bit host+guest.  */
460 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
461   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
462 #endif
463 };
464 
465 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
466 
467 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
468 #if defined(__NR_utimensat)
469 #define __NR_sys_utimensat __NR_utimensat
470 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
471           const struct timespec *,tsp,int,flags)
472 #else
473 static int sys_utimensat(int dirfd, const char *pathname,
474                          const struct timespec times[2], int flags)
475 {
476     errno = ENOSYS;
477     return -1;
478 }
479 #endif
480 #endif /* TARGET_NR_utimensat */
481 
482 #ifdef TARGET_NR_renameat2
483 #if defined(__NR_renameat2)
484 #define __NR_sys_renameat2 __NR_renameat2
485 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
486           const char *, new, unsigned int, flags)
487 #else
488 static int sys_renameat2(int oldfd, const char *old,
489                          int newfd, const char *new, int flags)
490 {
491     if (flags == 0) {
492         return renameat(oldfd, old, newfd, new);
493     }
494     errno = ENOSYS;
495     return -1;
496 }
497 #endif
498 #endif /* TARGET_NR_renameat2 */
499 
500 #ifdef CONFIG_INOTIFY
501 #include <sys/inotify.h>
502 #else
503 /* Userspace can usually survive runtime without inotify */
504 #undef TARGET_NR_inotify_init
505 #undef TARGET_NR_inotify_init1
506 #undef TARGET_NR_inotify_add_watch
507 #undef TARGET_NR_inotify_rm_watch
508 #endif /* CONFIG_INOTIFY  */
509 
510 #if defined(TARGET_NR_prlimit64)
511 #ifndef __NR_prlimit64
512 # define __NR_prlimit64 -1
513 #endif
514 #define __NR_sys_prlimit64 __NR_prlimit64
515 /* The glibc rlimit structure may not be that used by the underlying syscall */
516 struct host_rlimit64 {
517     uint64_t rlim_cur;
518     uint64_t rlim_max;
519 };
520 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
521           const struct host_rlimit64 *, new_limit,
522           struct host_rlimit64 *, old_limit)
523 #endif
524 
525 
526 #if defined(TARGET_NR_timer_create)
527 /* Maximum of 32 active POSIX timers allowed at any one time. */
528 #define GUEST_TIMER_MAX 32
529 static timer_t g_posix_timers[GUEST_TIMER_MAX];
530 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
531 
532 static inline int next_free_host_timer(void)
533 {
534     int k;
535     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
536         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
537             return k;
538         }
539     }
540     return -1;
541 }
542 
543 static inline void free_host_timer_slot(int id)
544 {
545     qatomic_store_release(g_posix_timer_allocated + id, 0);
546 }
547 #endif
548 
549 static inline int host_to_target_errno(int host_errno)
550 {
551     switch (host_errno) {
552 #define E(X)  case X: return TARGET_##X;
553 #include "errnos.c.inc"
554 #undef E
555     default:
556         return host_errno;
557     }
558 }
559 
560 static inline int target_to_host_errno(int target_errno)
561 {
562     switch (target_errno) {
563 #define E(X)  case TARGET_##X: return X;
564 #include "errnos.c.inc"
565 #undef E
566     default:
567         return target_errno;
568     }
569 }
570 
571 abi_long get_errno(abi_long ret)
572 {
573     if (ret == -1)
574         return -host_to_target_errno(errno);
575     else
576         return ret;
577 }
578 
579 const char *target_strerror(int err)
580 {
581     if (err == QEMU_ERESTARTSYS) {
582         return "To be restarted";
583     }
584     if (err == QEMU_ESIGRETURN) {
585         return "Successful exit from sigreturn";
586     }
587 
588     return strerror(target_to_host_errno(err));
589 }
590 
591 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
592 {
593     int i;
594     uint8_t b;
595     if (usize <= ksize) {
596         return 1;
597     }
598     for (i = ksize; i < usize; i++) {
599         if (get_user_u8(b, addr + i)) {
600             return -TARGET_EFAULT;
601         }
602         if (b != 0) {
603             return 0;
604         }
605     }
606     return 1;
607 }
608 
609 /*
610  * Copies a target struct to a host struct, in a way that guarantees
611  * backwards-compatibility for struct syscall arguments.
612  *
613  * Similar to kernels uaccess.h:copy_struct_from_user()
614  */
615 int copy_struct_from_user(void *dst, size_t ksize, abi_ptr src, size_t usize)
616 {
617     size_t size = MIN(ksize, usize);
618     size_t rest = MAX(ksize, usize) - size;
619 
620     /* Deal with trailing bytes. */
621     if (usize < ksize) {
622         memset(dst + size, 0, rest);
623     } else if (usize > ksize) {
624         int ret = check_zeroed_user(src, ksize, usize);
625         if (ret <= 0) {
626             return ret ?: -TARGET_E2BIG;
627         }
628     }
629     /* Copy the interoperable parts of the struct. */
630     if (copy_from_user(dst, src, size)) {
631         return -TARGET_EFAULT;
632     }
633     return 0;
634 }
635 
636 #define safe_syscall0(type, name) \
637 static type safe_##name(void) \
638 { \
639     return safe_syscall(__NR_##name); \
640 }
641 
642 #define safe_syscall1(type, name, type1, arg1) \
643 static type safe_##name(type1 arg1) \
644 { \
645     return safe_syscall(__NR_##name, arg1); \
646 }
647 
648 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
649 static type safe_##name(type1 arg1, type2 arg2) \
650 { \
651     return safe_syscall(__NR_##name, arg1, arg2); \
652 }
653 
654 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
655 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
656 { \
657     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
658 }
659 
660 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
661     type4, arg4) \
662 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
663 { \
664     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
665 }
666 
667 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
668     type4, arg4, type5, arg5) \
669 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
670     type5 arg5) \
671 { \
672     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
673 }
674 
675 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
676     type4, arg4, type5, arg5, type6, arg6) \
677 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
678     type5 arg5, type6 arg6) \
679 { \
680     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
681 }
682 
683 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
684 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
685 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
686               int, flags, mode_t, mode)
687 
688 safe_syscall4(int, openat2, int, dirfd, const char *, pathname, \
689               const struct open_how_ver0 *, how, size_t, size)
690 
691 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
692 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
693               struct rusage *, rusage)
694 #endif
695 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
696               int, options, struct rusage *, rusage)
697 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
698 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
699               char **, argv, char **, envp, int, flags)
700 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
701     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
702 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
703               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
704 #endif
705 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
706 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
707               struct timespec *, tsp, const sigset_t *, sigmask,
708               size_t, sigsetsize)
709 #endif
710 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
711               int, maxevents, int, timeout, const sigset_t *, sigmask,
712               size_t, sigsetsize)
713 #if defined(__NR_futex)
714 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
715               const struct timespec *,timeout,int *,uaddr2,int,val3)
716 #endif
717 #if defined(__NR_futex_time64)
718 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
719               const struct timespec *,timeout,int *,uaddr2,int,val3)
720 #endif
721 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
722 safe_syscall2(int, kill, pid_t, pid, int, sig)
723 safe_syscall2(int, tkill, int, tid, int, sig)
724 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
725 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
726 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
727 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
728               unsigned long, pos_l, unsigned long, pos_h)
729 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
730               unsigned long, pos_l, unsigned long, pos_h)
731 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
732               socklen_t, addrlen)
733 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
734               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
735 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
736               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
737 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
738 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
739 safe_syscall2(int, flock, int, fd, int, operation)
740 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
741 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
742               const struct timespec *, uts, size_t, sigsetsize)
743 #endif
744 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
745               int, flags)
746 #if defined(TARGET_NR_nanosleep)
747 safe_syscall2(int, nanosleep, const struct timespec *, req,
748               struct timespec *, rem)
749 #endif
750 #if defined(TARGET_NR_clock_nanosleep) || \
751     defined(TARGET_NR_clock_nanosleep_time64)
752 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
753               const struct timespec *, req, struct timespec *, rem)
754 #endif
755 #ifdef __NR_ipc
756 #ifdef __s390x__
757 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
758               void *, ptr)
759 #else
760 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
761               void *, ptr, long, fifth)
762 #endif
763 #endif
764 #ifdef __NR_msgsnd
765 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
766               int, flags)
767 #endif
768 #ifdef __NR_msgrcv
769 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
770               long, msgtype, int, flags)
771 #endif
772 #ifdef __NR_semtimedop
773 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
774               unsigned, nsops, const struct timespec *, timeout)
775 #endif
776 #if defined(TARGET_NR_mq_timedsend) || \
777     defined(TARGET_NR_mq_timedsend_time64)
778 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
779               size_t, len, unsigned, prio, const struct timespec *, timeout)
780 #endif
781 #if defined(TARGET_NR_mq_timedreceive) || \
782     defined(TARGET_NR_mq_timedreceive_time64)
783 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
784               size_t, len, unsigned *, prio, const struct timespec *, timeout)
785 #endif
786 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
787 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
788               int, outfd, loff_t *, poutoff, size_t, length,
789               unsigned int, flags)
790 #endif
791 
792 /* We do ioctl like this rather than via safe_syscall3 to preserve the
793  * "third argument might be integer or pointer or not present" behaviour of
794  * the libc function.
795  */
796 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
797 /* Similarly for fcntl. Since we always build with LFS enabled,
798  * we should be using the 64-bit structures automatically.
799  */
800 #ifdef __NR_fcntl64
801 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
802 #else
803 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
804 #endif
805 
806 static inline int host_to_target_sock_type(int host_type)
807 {
808     int target_type;
809 
810     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
811     case SOCK_DGRAM:
812         target_type = TARGET_SOCK_DGRAM;
813         break;
814     case SOCK_STREAM:
815         target_type = TARGET_SOCK_STREAM;
816         break;
817     default:
818         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
819         break;
820     }
821 
822 #if defined(SOCK_CLOEXEC)
823     if (host_type & SOCK_CLOEXEC) {
824         target_type |= TARGET_SOCK_CLOEXEC;
825     }
826 #endif
827 
828 #if defined(SOCK_NONBLOCK)
829     if (host_type & SOCK_NONBLOCK) {
830         target_type |= TARGET_SOCK_NONBLOCK;
831     }
832 #endif
833 
834     return target_type;
835 }
836 
837 static abi_ulong target_brk, initial_target_brk;
838 
839 void target_set_brk(abi_ulong new_brk)
840 {
841     target_brk = TARGET_PAGE_ALIGN(new_brk);
842     initial_target_brk = target_brk;
843 }
844 
845 /* do_brk() must return target values and target errnos. */
846 abi_long do_brk(abi_ulong brk_val)
847 {
848     abi_long mapped_addr;
849     abi_ulong new_brk;
850     abi_ulong old_brk;
851 
852     /* brk pointers are always untagged */
853 
854     /* do not allow to shrink below initial brk value */
855     if (brk_val < initial_target_brk) {
856         return target_brk;
857     }
858 
859     new_brk = TARGET_PAGE_ALIGN(brk_val);
860     old_brk = TARGET_PAGE_ALIGN(target_brk);
861 
862     /* new and old target_brk might be on the same page */
863     if (new_brk == old_brk) {
864         target_brk = brk_val;
865         return target_brk;
866     }
867 
868     /* Release heap if necessary */
869     if (new_brk < old_brk) {
870         target_munmap(new_brk, old_brk - new_brk);
871 
872         target_brk = brk_val;
873         return target_brk;
874     }
875 
876     mapped_addr = target_mmap(old_brk, new_brk - old_brk,
877                               PROT_READ | PROT_WRITE,
878                               MAP_FIXED_NOREPLACE | MAP_ANON | MAP_PRIVATE,
879                               -1, 0);
880 
881     if (mapped_addr == old_brk) {
882         target_brk = brk_val;
883         return target_brk;
884     }
885 
886 #if defined(TARGET_ALPHA)
887     /* We (partially) emulate OSF/1 on Alpha, which requires we
888        return a proper errno, not an unchanged brk value.  */
889     return -TARGET_ENOMEM;
890 #endif
891     /* For everything else, return the previous break. */
892     return target_brk;
893 }
894 
895 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
896     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
897 static inline abi_long copy_from_user_fdset(fd_set *fds,
898                                             abi_ulong target_fds_addr,
899                                             int n)
900 {
901     int i, nw, j, k;
902     abi_ulong b, *target_fds;
903 
904     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
905     if (!(target_fds = lock_user(VERIFY_READ,
906                                  target_fds_addr,
907                                  sizeof(abi_ulong) * nw,
908                                  1)))
909         return -TARGET_EFAULT;
910 
911     FD_ZERO(fds);
912     k = 0;
913     for (i = 0; i < nw; i++) {
914         /* grab the abi_ulong */
915         __get_user(b, &target_fds[i]);
916         for (j = 0; j < TARGET_ABI_BITS; j++) {
917             /* check the bit inside the abi_ulong */
918             if ((b >> j) & 1)
919                 FD_SET(k, fds);
920             k++;
921         }
922     }
923 
924     unlock_user(target_fds, target_fds_addr, 0);
925 
926     return 0;
927 }
928 
929 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
930                                                  abi_ulong target_fds_addr,
931                                                  int n)
932 {
933     if (target_fds_addr) {
934         if (copy_from_user_fdset(fds, target_fds_addr, n))
935             return -TARGET_EFAULT;
936         *fds_ptr = fds;
937     } else {
938         *fds_ptr = NULL;
939     }
940     return 0;
941 }
942 
943 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
944                                           const fd_set *fds,
945                                           int n)
946 {
947     int i, nw, j, k;
948     abi_long v;
949     abi_ulong *target_fds;
950 
951     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
952     if (!(target_fds = lock_user(VERIFY_WRITE,
953                                  target_fds_addr,
954                                  sizeof(abi_ulong) * nw,
955                                  0)))
956         return -TARGET_EFAULT;
957 
958     k = 0;
959     for (i = 0; i < nw; i++) {
960         v = 0;
961         for (j = 0; j < TARGET_ABI_BITS; j++) {
962             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
963             k++;
964         }
965         __put_user(v, &target_fds[i]);
966     }
967 
968     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
969 
970     return 0;
971 }
972 #endif
973 
974 #if defined(__alpha__)
975 #define HOST_HZ 1024
976 #else
977 #define HOST_HZ 100
978 #endif
979 
980 static inline abi_long host_to_target_clock_t(long ticks)
981 {
982 #if HOST_HZ == TARGET_HZ
983     return ticks;
984 #else
985     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
986 #endif
987 }
988 
989 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
990                                              const struct rusage *rusage)
991 {
992     struct target_rusage *target_rusage;
993 
994     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
995         return -TARGET_EFAULT;
996     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
997     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
998     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
999     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1000     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1001     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1002     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1003     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1004     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1005     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1006     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1007     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1008     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1009     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1010     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1011     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1012     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1013     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1014     unlock_user_struct(target_rusage, target_addr, 1);
1015 
1016     return 0;
1017 }
1018 
1019 #ifdef TARGET_NR_setrlimit
1020 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1021 {
1022     abi_ulong target_rlim_swap;
1023     rlim_t result;
1024 
1025     target_rlim_swap = tswapal(target_rlim);
1026     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1027         return RLIM_INFINITY;
1028 
1029     result = target_rlim_swap;
1030     if (target_rlim_swap != (rlim_t)result)
1031         return RLIM_INFINITY;
1032 
1033     return result;
1034 }
1035 #endif
1036 
1037 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1038 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1039 {
1040     abi_ulong target_rlim_swap;
1041     abi_ulong result;
1042 
1043     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1044         target_rlim_swap = TARGET_RLIM_INFINITY;
1045     else
1046         target_rlim_swap = rlim;
1047     result = tswapal(target_rlim_swap);
1048 
1049     return result;
1050 }
1051 #endif
1052 
1053 static inline int target_to_host_resource(int code)
1054 {
1055     switch (code) {
1056     case TARGET_RLIMIT_AS:
1057         return RLIMIT_AS;
1058     case TARGET_RLIMIT_CORE:
1059         return RLIMIT_CORE;
1060     case TARGET_RLIMIT_CPU:
1061         return RLIMIT_CPU;
1062     case TARGET_RLIMIT_DATA:
1063         return RLIMIT_DATA;
1064     case TARGET_RLIMIT_FSIZE:
1065         return RLIMIT_FSIZE;
1066     case TARGET_RLIMIT_LOCKS:
1067         return RLIMIT_LOCKS;
1068     case TARGET_RLIMIT_MEMLOCK:
1069         return RLIMIT_MEMLOCK;
1070     case TARGET_RLIMIT_MSGQUEUE:
1071         return RLIMIT_MSGQUEUE;
1072     case TARGET_RLIMIT_NICE:
1073         return RLIMIT_NICE;
1074     case TARGET_RLIMIT_NOFILE:
1075         return RLIMIT_NOFILE;
1076     case TARGET_RLIMIT_NPROC:
1077         return RLIMIT_NPROC;
1078     case TARGET_RLIMIT_RSS:
1079         return RLIMIT_RSS;
1080     case TARGET_RLIMIT_RTPRIO:
1081         return RLIMIT_RTPRIO;
1082 #ifdef RLIMIT_RTTIME
1083     case TARGET_RLIMIT_RTTIME:
1084         return RLIMIT_RTTIME;
1085 #endif
1086     case TARGET_RLIMIT_SIGPENDING:
1087         return RLIMIT_SIGPENDING;
1088     case TARGET_RLIMIT_STACK:
1089         return RLIMIT_STACK;
1090     default:
1091         return code;
1092     }
1093 }
1094 
1095 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1096                                               abi_ulong target_tv_addr)
1097 {
1098     struct target_timeval *target_tv;
1099 
1100     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1101         return -TARGET_EFAULT;
1102     }
1103 
1104     __get_user(tv->tv_sec, &target_tv->tv_sec);
1105     __get_user(tv->tv_usec, &target_tv->tv_usec);
1106 
1107     unlock_user_struct(target_tv, target_tv_addr, 0);
1108 
1109     return 0;
1110 }
1111 
1112 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1113                                             const struct timeval *tv)
1114 {
1115     struct target_timeval *target_tv;
1116 
1117     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1118         return -TARGET_EFAULT;
1119     }
1120 
1121     __put_user(tv->tv_sec, &target_tv->tv_sec);
1122     __put_user(tv->tv_usec, &target_tv->tv_usec);
1123 
1124     unlock_user_struct(target_tv, target_tv_addr, 1);
1125 
1126     return 0;
1127 }
1128 
1129 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1130 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1131                                                 abi_ulong target_tv_addr)
1132 {
1133     struct target__kernel_sock_timeval *target_tv;
1134 
1135     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1136         return -TARGET_EFAULT;
1137     }
1138 
1139     __get_user(tv->tv_sec, &target_tv->tv_sec);
1140     __get_user(tv->tv_usec, &target_tv->tv_usec);
1141 
1142     unlock_user_struct(target_tv, target_tv_addr, 0);
1143 
1144     return 0;
1145 }
1146 #endif
1147 
1148 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1149                                               const struct timeval *tv)
1150 {
1151     struct target__kernel_sock_timeval *target_tv;
1152 
1153     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1154         return -TARGET_EFAULT;
1155     }
1156 
1157     __put_user(tv->tv_sec, &target_tv->tv_sec);
1158     __put_user(tv->tv_usec, &target_tv->tv_usec);
1159 
1160     unlock_user_struct(target_tv, target_tv_addr, 1);
1161 
1162     return 0;
1163 }
1164 
1165 #if defined(TARGET_NR_futex) || \
1166     defined(TARGET_NR_rt_sigtimedwait) || \
1167     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1168     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1169     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1170     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1171     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1172     defined(TARGET_NR_timer_settime) || \
1173     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1174 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1175                                                abi_ulong target_addr)
1176 {
1177     struct target_timespec *target_ts;
1178 
1179     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1180         return -TARGET_EFAULT;
1181     }
1182     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1183     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1184     unlock_user_struct(target_ts, target_addr, 0);
1185     return 0;
1186 }
1187 #endif
1188 
1189 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1190     defined(TARGET_NR_timer_settime64) || \
1191     defined(TARGET_NR_mq_timedsend_time64) || \
1192     defined(TARGET_NR_mq_timedreceive_time64) || \
1193     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1194     defined(TARGET_NR_clock_nanosleep_time64) || \
1195     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1196     defined(TARGET_NR_utimensat) || \
1197     defined(TARGET_NR_utimensat_time64) || \
1198     defined(TARGET_NR_semtimedop_time64) || \
1199     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1200 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1201                                                  abi_ulong target_addr)
1202 {
1203     struct target__kernel_timespec *target_ts;
1204 
1205     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1206         return -TARGET_EFAULT;
1207     }
1208     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1209     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1210     /* in 32bit mode, this drops the padding */
1211     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1212     unlock_user_struct(target_ts, target_addr, 0);
1213     return 0;
1214 }
1215 #endif
1216 
1217 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1218                                                struct timespec *host_ts)
1219 {
1220     struct target_timespec *target_ts;
1221 
1222     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1223         return -TARGET_EFAULT;
1224     }
1225     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1226     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1227     unlock_user_struct(target_ts, target_addr, 1);
1228     return 0;
1229 }
1230 
1231 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1232                                                  struct timespec *host_ts)
1233 {
1234     struct target__kernel_timespec *target_ts;
1235 
1236     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1237         return -TARGET_EFAULT;
1238     }
1239     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1240     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1241     unlock_user_struct(target_ts, target_addr, 1);
1242     return 0;
1243 }
1244 
1245 #if defined(TARGET_NR_gettimeofday)
1246 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1247                                              struct timezone *tz)
1248 {
1249     struct target_timezone *target_tz;
1250 
1251     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1252         return -TARGET_EFAULT;
1253     }
1254 
1255     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1256     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1257 
1258     unlock_user_struct(target_tz, target_tz_addr, 1);
1259 
1260     return 0;
1261 }
1262 #endif
1263 
1264 #if defined(TARGET_NR_settimeofday)
1265 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1266                                                abi_ulong target_tz_addr)
1267 {
1268     struct target_timezone *target_tz;
1269 
1270     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1271         return -TARGET_EFAULT;
1272     }
1273 
1274     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1275     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1276 
1277     unlock_user_struct(target_tz, target_tz_addr, 0);
1278 
1279     return 0;
1280 }
1281 #endif
1282 
1283 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1284 #include <mqueue.h>
1285 
1286 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1287                                               abi_ulong target_mq_attr_addr)
1288 {
1289     struct target_mq_attr *target_mq_attr;
1290 
1291     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1292                           target_mq_attr_addr, 1))
1293         return -TARGET_EFAULT;
1294 
1295     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1296     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1297     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1298     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1299 
1300     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1301 
1302     return 0;
1303 }
1304 
1305 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1306                                             const struct mq_attr *attr)
1307 {
1308     struct target_mq_attr *target_mq_attr;
1309 
1310     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1311                           target_mq_attr_addr, 0))
1312         return -TARGET_EFAULT;
1313 
1314     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1315     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1316     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1317     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1318 
1319     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1320 
1321     return 0;
1322 }
1323 #endif
1324 
1325 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1326 /* do_select() must return target values and target errnos. */
1327 static abi_long do_select(int n,
1328                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1329                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1330 {
1331     fd_set rfds, wfds, efds;
1332     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1333     struct timeval tv;
1334     struct timespec ts, *ts_ptr;
1335     abi_long ret;
1336 
1337     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1338     if (ret) {
1339         return ret;
1340     }
1341     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1342     if (ret) {
1343         return ret;
1344     }
1345     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1346     if (ret) {
1347         return ret;
1348     }
1349 
1350     if (target_tv_addr) {
1351         if (copy_from_user_timeval(&tv, target_tv_addr))
1352             return -TARGET_EFAULT;
1353         ts.tv_sec = tv.tv_sec;
1354         ts.tv_nsec = tv.tv_usec * 1000;
1355         ts_ptr = &ts;
1356     } else {
1357         ts_ptr = NULL;
1358     }
1359 
1360     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1361                                   ts_ptr, NULL));
1362 
1363     if (!is_error(ret)) {
1364         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1365             return -TARGET_EFAULT;
1366         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1367             return -TARGET_EFAULT;
1368         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1369             return -TARGET_EFAULT;
1370 
1371         if (target_tv_addr) {
1372             tv.tv_sec = ts.tv_sec;
1373             tv.tv_usec = ts.tv_nsec / 1000;
1374             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1375                 return -TARGET_EFAULT;
1376             }
1377         }
1378     }
1379 
1380     return ret;
1381 }
1382 
1383 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1384 static abi_long do_old_select(abi_ulong arg1)
1385 {
1386     struct target_sel_arg_struct *sel;
1387     abi_ulong inp, outp, exp, tvp;
1388     long nsel;
1389 
1390     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1391         return -TARGET_EFAULT;
1392     }
1393 
1394     nsel = tswapal(sel->n);
1395     inp = tswapal(sel->inp);
1396     outp = tswapal(sel->outp);
1397     exp = tswapal(sel->exp);
1398     tvp = tswapal(sel->tvp);
1399 
1400     unlock_user_struct(sel, arg1, 0);
1401 
1402     return do_select(nsel, inp, outp, exp, tvp);
1403 }
1404 #endif
1405 #endif
1406 
1407 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1408 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1409                             abi_long arg4, abi_long arg5, abi_long arg6,
1410                             bool time64)
1411 {
1412     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1413     fd_set rfds, wfds, efds;
1414     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1415     struct timespec ts, *ts_ptr;
1416     abi_long ret;
1417 
1418     /*
1419      * The 6th arg is actually two args smashed together,
1420      * so we cannot use the C library.
1421      */
1422     struct {
1423         sigset_t *set;
1424         size_t size;
1425     } sig, *sig_ptr;
1426 
1427     abi_ulong arg_sigset, arg_sigsize, *arg7;
1428 
1429     n = arg1;
1430     rfd_addr = arg2;
1431     wfd_addr = arg3;
1432     efd_addr = arg4;
1433     ts_addr = arg5;
1434 
1435     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1436     if (ret) {
1437         return ret;
1438     }
1439     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1440     if (ret) {
1441         return ret;
1442     }
1443     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1444     if (ret) {
1445         return ret;
1446     }
1447 
1448     /*
1449      * This takes a timespec, and not a timeval, so we cannot
1450      * use the do_select() helper ...
1451      */
1452     if (ts_addr) {
1453         if (time64) {
1454             if (target_to_host_timespec64(&ts, ts_addr)) {
1455                 return -TARGET_EFAULT;
1456             }
1457         } else {
1458             if (target_to_host_timespec(&ts, ts_addr)) {
1459                 return -TARGET_EFAULT;
1460             }
1461         }
1462             ts_ptr = &ts;
1463     } else {
1464         ts_ptr = NULL;
1465     }
1466 
1467     /* Extract the two packed args for the sigset */
1468     sig_ptr = NULL;
1469     if (arg6) {
1470         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1471         if (!arg7) {
1472             return -TARGET_EFAULT;
1473         }
1474         arg_sigset = tswapal(arg7[0]);
1475         arg_sigsize = tswapal(arg7[1]);
1476         unlock_user(arg7, arg6, 0);
1477 
1478         if (arg_sigset) {
1479             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1480             if (ret != 0) {
1481                 return ret;
1482             }
1483             sig_ptr = &sig;
1484             sig.size = SIGSET_T_SIZE;
1485         }
1486     }
1487 
1488     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1489                                   ts_ptr, sig_ptr));
1490 
1491     if (sig_ptr) {
1492         finish_sigsuspend_mask(ret);
1493     }
1494 
1495     if (!is_error(ret)) {
1496         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1497             return -TARGET_EFAULT;
1498         }
1499         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1500             return -TARGET_EFAULT;
1501         }
1502         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1503             return -TARGET_EFAULT;
1504         }
1505         if (time64) {
1506             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1507                 return -TARGET_EFAULT;
1508             }
1509         } else {
1510             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1511                 return -TARGET_EFAULT;
1512             }
1513         }
1514     }
1515     return ret;
1516 }
1517 #endif
1518 
1519 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1520     defined(TARGET_NR_ppoll_time64)
1521 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1522                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1523 {
1524     struct target_pollfd *target_pfd;
1525     unsigned int nfds = arg2;
1526     struct pollfd *pfd;
1527     unsigned int i;
1528     abi_long ret;
1529 
1530     pfd = NULL;
1531     target_pfd = NULL;
1532     if (nfds) {
1533         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1534             return -TARGET_EINVAL;
1535         }
1536         target_pfd = lock_user(VERIFY_WRITE, arg1,
1537                                sizeof(struct target_pollfd) * nfds, 1);
1538         if (!target_pfd) {
1539             return -TARGET_EFAULT;
1540         }
1541 
1542         pfd = alloca(sizeof(struct pollfd) * nfds);
1543         for (i = 0; i < nfds; i++) {
1544             pfd[i].fd = tswap32(target_pfd[i].fd);
1545             pfd[i].events = tswap16(target_pfd[i].events);
1546         }
1547     }
1548     if (ppoll) {
1549         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1550         sigset_t *set = NULL;
1551 
1552         if (arg3) {
1553             if (time64) {
1554                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1555                     unlock_user(target_pfd, arg1, 0);
1556                     return -TARGET_EFAULT;
1557                 }
1558             } else {
1559                 if (target_to_host_timespec(timeout_ts, arg3)) {
1560                     unlock_user(target_pfd, arg1, 0);
1561                     return -TARGET_EFAULT;
1562                 }
1563             }
1564         } else {
1565             timeout_ts = NULL;
1566         }
1567 
1568         if (arg4) {
1569             ret = process_sigsuspend_mask(&set, arg4, arg5);
1570             if (ret != 0) {
1571                 unlock_user(target_pfd, arg1, 0);
1572                 return ret;
1573             }
1574         }
1575 
1576         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1577                                    set, SIGSET_T_SIZE));
1578 
1579         if (set) {
1580             finish_sigsuspend_mask(ret);
1581         }
1582         if (!is_error(ret) && arg3) {
1583             if (time64) {
1584                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1585                     return -TARGET_EFAULT;
1586                 }
1587             } else {
1588                 if (host_to_target_timespec(arg3, timeout_ts)) {
1589                     return -TARGET_EFAULT;
1590                 }
1591             }
1592         }
1593     } else {
1594           struct timespec ts, *pts;
1595 
1596           if (arg3 >= 0) {
1597               /* Convert ms to secs, ns */
1598               ts.tv_sec = arg3 / 1000;
1599               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1600               pts = &ts;
1601           } else {
1602               /* -ve poll() timeout means "infinite" */
1603               pts = NULL;
1604           }
1605           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1606     }
1607 
1608     if (!is_error(ret)) {
1609         for (i = 0; i < nfds; i++) {
1610             target_pfd[i].revents = tswap16(pfd[i].revents);
1611         }
1612     }
1613     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1614     return ret;
1615 }
1616 #endif
1617 
1618 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1619                         int flags, int is_pipe2)
1620 {
1621     int host_pipe[2];
1622     abi_long ret;
1623     ret = pipe2(host_pipe, flags);
1624 
1625     if (is_error(ret))
1626         return get_errno(ret);
1627 
1628     /* Several targets have special calling conventions for the original
1629        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1630     if (!is_pipe2) {
1631 #if defined(TARGET_ALPHA)
1632         cpu_env->ir[IR_A4] = host_pipe[1];
1633         return host_pipe[0];
1634 #elif defined(TARGET_MIPS)
1635         cpu_env->active_tc.gpr[3] = host_pipe[1];
1636         return host_pipe[0];
1637 #elif defined(TARGET_SH4)
1638         cpu_env->gregs[1] = host_pipe[1];
1639         return host_pipe[0];
1640 #elif defined(TARGET_SPARC)
1641         cpu_env->regwptr[1] = host_pipe[1];
1642         return host_pipe[0];
1643 #endif
1644     }
1645 
1646     if (put_user_s32(host_pipe[0], pipedes)
1647         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1648         return -TARGET_EFAULT;
1649     return get_errno(ret);
1650 }
1651 
1652 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1653                                                abi_ulong target_addr,
1654                                                socklen_t len)
1655 {
1656     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1657     sa_family_t sa_family;
1658     struct target_sockaddr *target_saddr;
1659 
1660     if (fd_trans_target_to_host_addr(fd)) {
1661         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1662     }
1663 
1664     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1665     if (!target_saddr)
1666         return -TARGET_EFAULT;
1667 
1668     sa_family = tswap16(target_saddr->sa_family);
1669 
1670     /* Oops. The caller might send a incomplete sun_path; sun_path
1671      * must be terminated by \0 (see the manual page), but
1672      * unfortunately it is quite common to specify sockaddr_un
1673      * length as "strlen(x->sun_path)" while it should be
1674      * "strlen(...) + 1". We'll fix that here if needed.
1675      * Linux kernel has a similar feature.
1676      */
1677 
1678     if (sa_family == AF_UNIX) {
1679         if (len < unix_maxlen && len > 0) {
1680             char *cp = (char*)target_saddr;
1681 
1682             if ( cp[len-1] && !cp[len] )
1683                 len++;
1684         }
1685         if (len > unix_maxlen)
1686             len = unix_maxlen;
1687     }
1688 
1689     memcpy(addr, target_saddr, len);
1690     addr->sa_family = sa_family;
1691     if (sa_family == AF_NETLINK) {
1692         struct sockaddr_nl *nladdr;
1693 
1694         nladdr = (struct sockaddr_nl *)addr;
1695         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1696         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1697     } else if (sa_family == AF_PACKET) {
1698 	struct target_sockaddr_ll *lladdr;
1699 
1700 	lladdr = (struct target_sockaddr_ll *)addr;
1701 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1702 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1703     } else if (sa_family == AF_INET6) {
1704         struct sockaddr_in6 *in6addr;
1705 
1706         in6addr = (struct sockaddr_in6 *)addr;
1707         in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
1708     }
1709     unlock_user(target_saddr, target_addr, 0);
1710 
1711     return 0;
1712 }
1713 
1714 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1715                                                struct sockaddr *addr,
1716                                                socklen_t len)
1717 {
1718     struct target_sockaddr *target_saddr;
1719 
1720     if (len == 0) {
1721         return 0;
1722     }
1723     assert(addr);
1724 
1725     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1726     if (!target_saddr)
1727         return -TARGET_EFAULT;
1728     memcpy(target_saddr, addr, len);
1729     if (len >= offsetof(struct target_sockaddr, sa_family) +
1730         sizeof(target_saddr->sa_family)) {
1731         target_saddr->sa_family = tswap16(addr->sa_family);
1732     }
1733     if (addr->sa_family == AF_NETLINK &&
1734         len >= sizeof(struct target_sockaddr_nl)) {
1735         struct target_sockaddr_nl *target_nl =
1736                (struct target_sockaddr_nl *)target_saddr;
1737         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1738         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1739     } else if (addr->sa_family == AF_PACKET) {
1740         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1741         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1742         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1743     } else if (addr->sa_family == AF_INET6 &&
1744                len >= sizeof(struct target_sockaddr_in6)) {
1745         struct target_sockaddr_in6 *target_in6 =
1746                (struct target_sockaddr_in6 *)target_saddr;
1747         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1748     }
1749     unlock_user(target_saddr, target_addr, len);
1750 
1751     return 0;
1752 }
1753 
1754 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1755                                            struct target_msghdr *target_msgh)
1756 {
1757     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1758     abi_long msg_controllen;
1759     abi_ulong target_cmsg_addr;
1760     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1761     socklen_t space = 0;
1762 
1763     msg_controllen = tswapal(target_msgh->msg_controllen);
1764     if (msg_controllen < sizeof (struct target_cmsghdr))
1765         goto the_end;
1766     target_cmsg_addr = tswapal(target_msgh->msg_control);
1767     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1768     target_cmsg_start = target_cmsg;
1769     if (!target_cmsg)
1770         return -TARGET_EFAULT;
1771 
1772     while (cmsg && target_cmsg) {
1773         void *data = CMSG_DATA(cmsg);
1774         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1775 
1776         int len = tswapal(target_cmsg->cmsg_len)
1777             - sizeof(struct target_cmsghdr);
1778 
1779         space += CMSG_SPACE(len);
1780         if (space > msgh->msg_controllen) {
1781             space -= CMSG_SPACE(len);
1782             /* This is a QEMU bug, since we allocated the payload
1783              * area ourselves (unlike overflow in host-to-target
1784              * conversion, which is just the guest giving us a buffer
1785              * that's too small). It can't happen for the payload types
1786              * we currently support; if it becomes an issue in future
1787              * we would need to improve our allocation strategy to
1788              * something more intelligent than "twice the size of the
1789              * target buffer we're reading from".
1790              */
1791             qemu_log_mask(LOG_UNIMP,
1792                           ("Unsupported ancillary data %d/%d: "
1793                            "unhandled msg size\n"),
1794                           tswap32(target_cmsg->cmsg_level),
1795                           tswap32(target_cmsg->cmsg_type));
1796             break;
1797         }
1798 
1799         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1800             cmsg->cmsg_level = SOL_SOCKET;
1801         } else {
1802             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1803         }
1804         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1805         cmsg->cmsg_len = CMSG_LEN(len);
1806 
1807         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1808             int *fd = (int *)data;
1809             int *target_fd = (int *)target_data;
1810             int i, numfds = len / sizeof(int);
1811 
1812             for (i = 0; i < numfds; i++) {
1813                 __get_user(fd[i], target_fd + i);
1814             }
1815         } else if (cmsg->cmsg_level == SOL_SOCKET
1816                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1817             struct ucred *cred = (struct ucred *)data;
1818             struct target_ucred *target_cred =
1819                 (struct target_ucred *)target_data;
1820 
1821             __get_user(cred->pid, &target_cred->pid);
1822             __get_user(cred->uid, &target_cred->uid);
1823             __get_user(cred->gid, &target_cred->gid);
1824         } else if (cmsg->cmsg_level == SOL_ALG) {
1825             uint32_t *dst = (uint32_t *)data;
1826 
1827             memcpy(dst, target_data, len);
1828             /* fix endianness of first 32-bit word */
1829             if (len >= sizeof(uint32_t)) {
1830                 *dst = tswap32(*dst);
1831             }
1832         } else {
1833             qemu_log_mask(LOG_UNIMP, "Unsupported target ancillary data: %d/%d\n",
1834                           cmsg->cmsg_level, cmsg->cmsg_type);
1835             memcpy(data, target_data, len);
1836         }
1837 
1838         cmsg = CMSG_NXTHDR(msgh, cmsg);
1839         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1840                                          target_cmsg_start);
1841     }
1842     unlock_user(target_cmsg, target_cmsg_addr, 0);
1843  the_end:
1844     msgh->msg_controllen = space;
1845     return 0;
1846 }
1847 
1848 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1849                                            struct msghdr *msgh)
1850 {
1851     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1852     abi_long msg_controllen;
1853     abi_ulong target_cmsg_addr;
1854     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1855     socklen_t space = 0;
1856 
1857     msg_controllen = tswapal(target_msgh->msg_controllen);
1858     if (msg_controllen < sizeof (struct target_cmsghdr))
1859         goto the_end;
1860     target_cmsg_addr = tswapal(target_msgh->msg_control);
1861     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1862     target_cmsg_start = target_cmsg;
1863     if (!target_cmsg)
1864         return -TARGET_EFAULT;
1865 
1866     while (cmsg && target_cmsg) {
1867         void *data = CMSG_DATA(cmsg);
1868         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1869 
1870         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1871         int tgt_len, tgt_space;
1872 
1873         /* We never copy a half-header but may copy half-data;
1874          * this is Linux's behaviour in put_cmsg(). Note that
1875          * truncation here is a guest problem (which we report
1876          * to the guest via the CTRUNC bit), unlike truncation
1877          * in target_to_host_cmsg, which is a QEMU bug.
1878          */
1879         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1880             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1881             break;
1882         }
1883 
1884         if (cmsg->cmsg_level == SOL_SOCKET) {
1885             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1886         } else {
1887             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1888         }
1889         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1890 
1891         /* Payload types which need a different size of payload on
1892          * the target must adjust tgt_len here.
1893          */
1894         tgt_len = len;
1895         switch (cmsg->cmsg_level) {
1896         case SOL_SOCKET:
1897             switch (cmsg->cmsg_type) {
1898             case SO_TIMESTAMP:
1899                 tgt_len = sizeof(struct target_timeval);
1900                 break;
1901             default:
1902                 break;
1903             }
1904             break;
1905         default:
1906             break;
1907         }
1908 
1909         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1910             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1911             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1912         }
1913 
1914         /* We must now copy-and-convert len bytes of payload
1915          * into tgt_len bytes of destination space. Bear in mind
1916          * that in both source and destination we may be dealing
1917          * with a truncated value!
1918          */
1919         switch (cmsg->cmsg_level) {
1920         case SOL_SOCKET:
1921             switch (cmsg->cmsg_type) {
1922             case SCM_RIGHTS:
1923             {
1924                 int *fd = (int *)data;
1925                 int *target_fd = (int *)target_data;
1926                 int i, numfds = tgt_len / sizeof(int);
1927 
1928                 for (i = 0; i < numfds; i++) {
1929                     __put_user(fd[i], target_fd + i);
1930                 }
1931                 break;
1932             }
1933             case SO_TIMESTAMP:
1934             {
1935                 struct timeval *tv = (struct timeval *)data;
1936                 struct target_timeval *target_tv =
1937                     (struct target_timeval *)target_data;
1938 
1939                 if (len != sizeof(struct timeval) ||
1940                     tgt_len != sizeof(struct target_timeval)) {
1941                     goto unimplemented;
1942                 }
1943 
1944                 /* copy struct timeval to target */
1945                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1946                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1947                 break;
1948             }
1949             case SCM_CREDENTIALS:
1950             {
1951                 struct ucred *cred = (struct ucred *)data;
1952                 struct target_ucred *target_cred =
1953                     (struct target_ucred *)target_data;
1954 
1955                 __put_user(cred->pid, &target_cred->pid);
1956                 __put_user(cred->uid, &target_cred->uid);
1957                 __put_user(cred->gid, &target_cred->gid);
1958                 break;
1959             }
1960             default:
1961                 goto unimplemented;
1962             }
1963             break;
1964 
1965         case SOL_IP:
1966             switch (cmsg->cmsg_type) {
1967             case IP_TTL:
1968             {
1969                 uint32_t *v = (uint32_t *)data;
1970                 uint32_t *t_int = (uint32_t *)target_data;
1971 
1972                 if (len != sizeof(uint32_t) ||
1973                     tgt_len != sizeof(uint32_t)) {
1974                     goto unimplemented;
1975                 }
1976                 __put_user(*v, t_int);
1977                 break;
1978             }
1979             case IP_RECVERR:
1980             {
1981                 struct errhdr_t {
1982                    struct sock_extended_err ee;
1983                    struct sockaddr_in offender;
1984                 };
1985                 struct errhdr_t *errh = (struct errhdr_t *)data;
1986                 struct errhdr_t *target_errh =
1987                     (struct errhdr_t *)target_data;
1988 
1989                 if (len != sizeof(struct errhdr_t) ||
1990                     tgt_len != sizeof(struct errhdr_t)) {
1991                     goto unimplemented;
1992                 }
1993                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1994                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1995                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1996                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1997                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1998                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1999                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2000                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2001                     (void *) &errh->offender, sizeof(errh->offender));
2002                 break;
2003             }
2004             case IP_PKTINFO:
2005             {
2006                 struct in_pktinfo *pkti = data;
2007                 struct target_in_pktinfo *target_pi = target_data;
2008 
2009                 __put_user(pkti->ipi_ifindex, &target_pi->ipi_ifindex);
2010                 target_pi->ipi_spec_dst.s_addr = pkti->ipi_spec_dst.s_addr;
2011                 target_pi->ipi_addr.s_addr = pkti->ipi_addr.s_addr;
2012                 break;
2013             }
2014             default:
2015                 goto unimplemented;
2016             }
2017             break;
2018 
2019         case SOL_IPV6:
2020             switch (cmsg->cmsg_type) {
2021             case IPV6_HOPLIMIT:
2022             {
2023                 uint32_t *v = (uint32_t *)data;
2024                 uint32_t *t_int = (uint32_t *)target_data;
2025 
2026                 if (len != sizeof(uint32_t) ||
2027                     tgt_len != sizeof(uint32_t)) {
2028                     goto unimplemented;
2029                 }
2030                 __put_user(*v, t_int);
2031                 break;
2032             }
2033             case IPV6_RECVERR:
2034             {
2035                 struct errhdr6_t {
2036                    struct sock_extended_err ee;
2037                    struct sockaddr_in6 offender;
2038                 };
2039                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2040                 struct errhdr6_t *target_errh =
2041                     (struct errhdr6_t *)target_data;
2042 
2043                 if (len != sizeof(struct errhdr6_t) ||
2044                     tgt_len != sizeof(struct errhdr6_t)) {
2045                     goto unimplemented;
2046                 }
2047                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2048                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2049                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2050                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2051                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2052                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2053                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2054                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2055                     (void *) &errh->offender, sizeof(errh->offender));
2056                 break;
2057             }
2058             default:
2059                 goto unimplemented;
2060             }
2061             break;
2062 
2063         default:
2064         unimplemented:
2065             qemu_log_mask(LOG_UNIMP, "Unsupported host ancillary data: %d/%d\n",
2066                           cmsg->cmsg_level, cmsg->cmsg_type);
2067             memcpy(target_data, data, MIN(len, tgt_len));
2068             if (tgt_len > len) {
2069                 memset(target_data + len, 0, tgt_len - len);
2070             }
2071         }
2072 
2073         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2074         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2075         if (msg_controllen < tgt_space) {
2076             tgt_space = msg_controllen;
2077         }
2078         msg_controllen -= tgt_space;
2079         space += tgt_space;
2080         cmsg = CMSG_NXTHDR(msgh, cmsg);
2081         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2082                                          target_cmsg_start);
2083     }
2084     unlock_user(target_cmsg, target_cmsg_addr, space);
2085  the_end:
2086     target_msgh->msg_controllen = tswapal(space);
2087     return 0;
2088 }
2089 
2090 /* do_setsockopt() Must return target values and target errnos. */
2091 static abi_long do_setsockopt(int sockfd, int level, int optname,
2092                               abi_ulong optval_addr, socklen_t optlen)
2093 {
2094     abi_long ret;
2095     int val;
2096 
2097     switch(level) {
2098     case SOL_TCP:
2099     case SOL_UDP:
2100         /* TCP and UDP options all take an 'int' value.  */
2101         if (optlen < sizeof(uint32_t))
2102             return -TARGET_EINVAL;
2103 
2104         if (get_user_u32(val, optval_addr))
2105             return -TARGET_EFAULT;
2106         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2107         break;
2108     case SOL_IP:
2109         switch(optname) {
2110         case IP_TOS:
2111         case IP_TTL:
2112         case IP_HDRINCL:
2113         case IP_ROUTER_ALERT:
2114         case IP_RECVOPTS:
2115         case IP_RETOPTS:
2116         case IP_PKTINFO:
2117         case IP_MTU_DISCOVER:
2118         case IP_RECVERR:
2119         case IP_RECVTTL:
2120         case IP_RECVTOS:
2121 #ifdef IP_FREEBIND
2122         case IP_FREEBIND:
2123 #endif
2124         case IP_MULTICAST_TTL:
2125         case IP_MULTICAST_LOOP:
2126             val = 0;
2127             if (optlen >= sizeof(uint32_t)) {
2128                 if (get_user_u32(val, optval_addr))
2129                     return -TARGET_EFAULT;
2130             } else if (optlen >= 1) {
2131                 if (get_user_u8(val, optval_addr))
2132                     return -TARGET_EFAULT;
2133             }
2134             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2135             break;
2136         case IP_MULTICAST_IF:
2137         case IP_ADD_MEMBERSHIP:
2138         case IP_DROP_MEMBERSHIP:
2139         {
2140             struct ip_mreqn ip_mreq;
2141             struct target_ip_mreqn *target_smreqn;
2142             int min_size;
2143 
2144             QEMU_BUILD_BUG_ON(sizeof(struct ip_mreq) !=
2145                               sizeof(struct target_ip_mreq));
2146 
2147             if (optname == IP_MULTICAST_IF) {
2148                 min_size = sizeof(struct in_addr);
2149             } else {
2150                 min_size = sizeof(struct target_ip_mreq);
2151             }
2152             if (optlen < min_size ||
2153                 optlen > sizeof (struct target_ip_mreqn)) {
2154                 return -TARGET_EINVAL;
2155             }
2156 
2157             target_smreqn = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2158             if (!target_smreqn) {
2159                 return -TARGET_EFAULT;
2160             }
2161             ip_mreq.imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
2162             if (optlen >= sizeof(struct target_ip_mreq)) {
2163                 ip_mreq.imr_address.s_addr = target_smreqn->imr_address.s_addr;
2164                 if (optlen >= sizeof(struct target_ip_mreqn)) {
2165                     __put_user(target_smreqn->imr_ifindex, &ip_mreq.imr_ifindex);
2166                     optlen = sizeof(struct ip_mreqn);
2167                 }
2168             }
2169             unlock_user(target_smreqn, optval_addr, 0);
2170             ret = get_errno(setsockopt(sockfd, level, optname, &ip_mreq, optlen));
2171             break;
2172         }
2173         case IP_BLOCK_SOURCE:
2174         case IP_UNBLOCK_SOURCE:
2175         case IP_ADD_SOURCE_MEMBERSHIP:
2176         case IP_DROP_SOURCE_MEMBERSHIP:
2177         {
2178             struct ip_mreq_source *ip_mreq_source;
2179 
2180             if (optlen != sizeof (struct target_ip_mreq_source))
2181                 return -TARGET_EINVAL;
2182 
2183             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2184             if (!ip_mreq_source) {
2185                 return -TARGET_EFAULT;
2186             }
2187             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2188             unlock_user (ip_mreq_source, optval_addr, 0);
2189             break;
2190         }
2191         default:
2192             goto unimplemented;
2193         }
2194         break;
2195     case SOL_IPV6:
2196         switch (optname) {
2197         case IPV6_MTU_DISCOVER:
2198         case IPV6_MTU:
2199         case IPV6_V6ONLY:
2200         case IPV6_RECVPKTINFO:
2201         case IPV6_UNICAST_HOPS:
2202         case IPV6_MULTICAST_HOPS:
2203         case IPV6_MULTICAST_LOOP:
2204         case IPV6_RECVERR:
2205         case IPV6_RECVHOPLIMIT:
2206         case IPV6_2292HOPLIMIT:
2207         case IPV6_CHECKSUM:
2208         case IPV6_ADDRFORM:
2209         case IPV6_2292PKTINFO:
2210         case IPV6_RECVTCLASS:
2211         case IPV6_RECVRTHDR:
2212         case IPV6_2292RTHDR:
2213         case IPV6_RECVHOPOPTS:
2214         case IPV6_2292HOPOPTS:
2215         case IPV6_RECVDSTOPTS:
2216         case IPV6_2292DSTOPTS:
2217         case IPV6_TCLASS:
2218         case IPV6_ADDR_PREFERENCES:
2219 #ifdef IPV6_RECVPATHMTU
2220         case IPV6_RECVPATHMTU:
2221 #endif
2222 #ifdef IPV6_TRANSPARENT
2223         case IPV6_TRANSPARENT:
2224 #endif
2225 #ifdef IPV6_FREEBIND
2226         case IPV6_FREEBIND:
2227 #endif
2228 #ifdef IPV6_RECVORIGDSTADDR
2229         case IPV6_RECVORIGDSTADDR:
2230 #endif
2231             val = 0;
2232             if (optlen < sizeof(uint32_t)) {
2233                 return -TARGET_EINVAL;
2234             }
2235             if (get_user_u32(val, optval_addr)) {
2236                 return -TARGET_EFAULT;
2237             }
2238             ret = get_errno(setsockopt(sockfd, level, optname,
2239                                        &val, sizeof(val)));
2240             break;
2241         case IPV6_PKTINFO:
2242         {
2243             struct in6_pktinfo pki;
2244 
2245             if (optlen < sizeof(pki)) {
2246                 return -TARGET_EINVAL;
2247             }
2248 
2249             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2250                 return -TARGET_EFAULT;
2251             }
2252 
2253             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2254 
2255             ret = get_errno(setsockopt(sockfd, level, optname,
2256                                        &pki, sizeof(pki)));
2257             break;
2258         }
2259         case IPV6_ADD_MEMBERSHIP:
2260         case IPV6_DROP_MEMBERSHIP:
2261         {
2262             struct ipv6_mreq ipv6mreq;
2263 
2264             if (optlen < sizeof(ipv6mreq)) {
2265                 return -TARGET_EINVAL;
2266             }
2267 
2268             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2269                 return -TARGET_EFAULT;
2270             }
2271 
2272             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2273 
2274             ret = get_errno(setsockopt(sockfd, level, optname,
2275                                        &ipv6mreq, sizeof(ipv6mreq)));
2276             break;
2277         }
2278         default:
2279             goto unimplemented;
2280         }
2281         break;
2282     case SOL_ICMPV6:
2283         switch (optname) {
2284         case ICMPV6_FILTER:
2285         {
2286             struct icmp6_filter icmp6f;
2287 
2288             if (optlen > sizeof(icmp6f)) {
2289                 optlen = sizeof(icmp6f);
2290             }
2291 
2292             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2293                 return -TARGET_EFAULT;
2294             }
2295 
2296             for (val = 0; val < 8; val++) {
2297                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2298             }
2299 
2300             ret = get_errno(setsockopt(sockfd, level, optname,
2301                                        &icmp6f, optlen));
2302             break;
2303         }
2304         default:
2305             goto unimplemented;
2306         }
2307         break;
2308     case SOL_RAW:
2309         switch (optname) {
2310         case ICMP_FILTER:
2311         case IPV6_CHECKSUM:
2312             /* those take an u32 value */
2313             if (optlen < sizeof(uint32_t)) {
2314                 return -TARGET_EINVAL;
2315             }
2316 
2317             if (get_user_u32(val, optval_addr)) {
2318                 return -TARGET_EFAULT;
2319             }
2320             ret = get_errno(setsockopt(sockfd, level, optname,
2321                                        &val, sizeof(val)));
2322             break;
2323 
2324         default:
2325             goto unimplemented;
2326         }
2327         break;
2328 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2329     case SOL_ALG:
2330         switch (optname) {
2331         case ALG_SET_KEY:
2332         {
2333             char *alg_key = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2334             if (!alg_key) {
2335                 return -TARGET_EFAULT;
2336             }
2337             ret = get_errno(setsockopt(sockfd, level, optname,
2338                                        alg_key, optlen));
2339             unlock_user(alg_key, optval_addr, optlen);
2340             break;
2341         }
2342         case ALG_SET_AEAD_AUTHSIZE:
2343         {
2344             ret = get_errno(setsockopt(sockfd, level, optname,
2345                                        NULL, optlen));
2346             break;
2347         }
2348         default:
2349             goto unimplemented;
2350         }
2351         break;
2352 #endif
2353     case TARGET_SOL_SOCKET:
2354         switch (optname) {
2355         case TARGET_SO_RCVTIMEO:
2356         case TARGET_SO_SNDTIMEO:
2357         {
2358                 struct timeval tv;
2359 
2360                 if (optlen != sizeof(struct target_timeval)) {
2361                     return -TARGET_EINVAL;
2362                 }
2363 
2364                 if (copy_from_user_timeval(&tv, optval_addr)) {
2365                     return -TARGET_EFAULT;
2366                 }
2367 
2368                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2369                                 optname == TARGET_SO_RCVTIMEO ?
2370                                     SO_RCVTIMEO : SO_SNDTIMEO,
2371                                 &tv, sizeof(tv)));
2372                 return ret;
2373         }
2374         case TARGET_SO_ATTACH_FILTER:
2375         {
2376                 struct target_sock_fprog *tfprog;
2377                 struct target_sock_filter *tfilter;
2378                 struct sock_fprog fprog;
2379                 struct sock_filter *filter;
2380                 int i;
2381 
2382                 if (optlen != sizeof(*tfprog)) {
2383                     return -TARGET_EINVAL;
2384                 }
2385                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2386                     return -TARGET_EFAULT;
2387                 }
2388                 if (!lock_user_struct(VERIFY_READ, tfilter,
2389                                       tswapal(tfprog->filter), 0)) {
2390                     unlock_user_struct(tfprog, optval_addr, 1);
2391                     return -TARGET_EFAULT;
2392                 }
2393 
2394                 fprog.len = tswap16(tfprog->len);
2395                 filter = g_try_new(struct sock_filter, fprog.len);
2396                 if (filter == NULL) {
2397                     unlock_user_struct(tfilter, tfprog->filter, 1);
2398                     unlock_user_struct(tfprog, optval_addr, 1);
2399                     return -TARGET_ENOMEM;
2400                 }
2401                 for (i = 0; i < fprog.len; i++) {
2402                     filter[i].code = tswap16(tfilter[i].code);
2403                     filter[i].jt = tfilter[i].jt;
2404                     filter[i].jf = tfilter[i].jf;
2405                     filter[i].k = tswap32(tfilter[i].k);
2406                 }
2407                 fprog.filter = filter;
2408 
2409                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2410                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2411                 g_free(filter);
2412 
2413                 unlock_user_struct(tfilter, tfprog->filter, 1);
2414                 unlock_user_struct(tfprog, optval_addr, 1);
2415                 return ret;
2416         }
2417 	case TARGET_SO_BINDTODEVICE:
2418 	{
2419 		char *dev_ifname, *addr_ifname;
2420 
2421 		if (optlen > IFNAMSIZ - 1) {
2422 		    optlen = IFNAMSIZ - 1;
2423 		}
2424 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2425 		if (!dev_ifname) {
2426 		    return -TARGET_EFAULT;
2427 		}
2428 		optname = SO_BINDTODEVICE;
2429 		addr_ifname = alloca(IFNAMSIZ);
2430 		memcpy(addr_ifname, dev_ifname, optlen);
2431 		addr_ifname[optlen] = 0;
2432 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2433                                            addr_ifname, optlen));
2434 		unlock_user (dev_ifname, optval_addr, 0);
2435 		return ret;
2436 	}
2437         case TARGET_SO_LINGER:
2438         {
2439                 struct linger lg;
2440                 struct target_linger *tlg;
2441 
2442                 if (optlen != sizeof(struct target_linger)) {
2443                     return -TARGET_EINVAL;
2444                 }
2445                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2446                     return -TARGET_EFAULT;
2447                 }
2448                 __get_user(lg.l_onoff, &tlg->l_onoff);
2449                 __get_user(lg.l_linger, &tlg->l_linger);
2450                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2451                                 &lg, sizeof(lg)));
2452                 unlock_user_struct(tlg, optval_addr, 0);
2453                 return ret;
2454         }
2455             /* Options with 'int' argument.  */
2456         case TARGET_SO_DEBUG:
2457 		optname = SO_DEBUG;
2458 		break;
2459         case TARGET_SO_REUSEADDR:
2460 		optname = SO_REUSEADDR;
2461 		break;
2462 #ifdef SO_REUSEPORT
2463         case TARGET_SO_REUSEPORT:
2464                 optname = SO_REUSEPORT;
2465                 break;
2466 #endif
2467         case TARGET_SO_TYPE:
2468 		optname = SO_TYPE;
2469 		break;
2470         case TARGET_SO_ERROR:
2471 		optname = SO_ERROR;
2472 		break;
2473         case TARGET_SO_DONTROUTE:
2474 		optname = SO_DONTROUTE;
2475 		break;
2476         case TARGET_SO_BROADCAST:
2477 		optname = SO_BROADCAST;
2478 		break;
2479         case TARGET_SO_SNDBUF:
2480 		optname = SO_SNDBUF;
2481 		break;
2482         case TARGET_SO_SNDBUFFORCE:
2483                 optname = SO_SNDBUFFORCE;
2484                 break;
2485         case TARGET_SO_RCVBUF:
2486 		optname = SO_RCVBUF;
2487 		break;
2488         case TARGET_SO_RCVBUFFORCE:
2489                 optname = SO_RCVBUFFORCE;
2490                 break;
2491         case TARGET_SO_KEEPALIVE:
2492 		optname = SO_KEEPALIVE;
2493 		break;
2494         case TARGET_SO_OOBINLINE:
2495 		optname = SO_OOBINLINE;
2496 		break;
2497         case TARGET_SO_NO_CHECK:
2498 		optname = SO_NO_CHECK;
2499 		break;
2500         case TARGET_SO_PRIORITY:
2501 		optname = SO_PRIORITY;
2502 		break;
2503 #ifdef SO_BSDCOMPAT
2504         case TARGET_SO_BSDCOMPAT:
2505 		optname = SO_BSDCOMPAT;
2506 		break;
2507 #endif
2508         case TARGET_SO_PASSCRED:
2509 		optname = SO_PASSCRED;
2510 		break;
2511         case TARGET_SO_PASSSEC:
2512                 optname = SO_PASSSEC;
2513                 break;
2514         case TARGET_SO_TIMESTAMP:
2515 		optname = SO_TIMESTAMP;
2516 		break;
2517         case TARGET_SO_RCVLOWAT:
2518 		optname = SO_RCVLOWAT;
2519 		break;
2520         default:
2521             goto unimplemented;
2522         }
2523 	if (optlen < sizeof(uint32_t))
2524             return -TARGET_EINVAL;
2525 
2526 	if (get_user_u32(val, optval_addr))
2527             return -TARGET_EFAULT;
2528 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2529         break;
2530 #ifdef SOL_NETLINK
2531     case SOL_NETLINK:
2532         switch (optname) {
2533         case NETLINK_PKTINFO:
2534         case NETLINK_ADD_MEMBERSHIP:
2535         case NETLINK_DROP_MEMBERSHIP:
2536         case NETLINK_BROADCAST_ERROR:
2537         case NETLINK_NO_ENOBUFS:
2538 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2539         case NETLINK_LISTEN_ALL_NSID:
2540         case NETLINK_CAP_ACK:
2541 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2542 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2543         case NETLINK_EXT_ACK:
2544 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2545 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2546         case NETLINK_GET_STRICT_CHK:
2547 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2548             break;
2549         default:
2550             goto unimplemented;
2551         }
2552         val = 0;
2553         if (optlen < sizeof(uint32_t)) {
2554             return -TARGET_EINVAL;
2555         }
2556         if (get_user_u32(val, optval_addr)) {
2557             return -TARGET_EFAULT;
2558         }
2559         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2560                                    sizeof(val)));
2561         break;
2562 #endif /* SOL_NETLINK */
2563     default:
2564     unimplemented:
2565         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2566                       level, optname);
2567         ret = -TARGET_ENOPROTOOPT;
2568     }
2569     return ret;
2570 }
2571 
2572 /* do_getsockopt() Must return target values and target errnos. */
2573 static abi_long do_getsockopt(int sockfd, int level, int optname,
2574                               abi_ulong optval_addr, abi_ulong optlen)
2575 {
2576     abi_long ret;
2577     int len, val;
2578     socklen_t lv;
2579 
2580     switch(level) {
2581     case TARGET_SOL_SOCKET:
2582         level = SOL_SOCKET;
2583         switch (optname) {
2584         /* These don't just return a single integer */
2585         case TARGET_SO_PEERNAME:
2586             goto unimplemented;
2587         case TARGET_SO_RCVTIMEO: {
2588             struct timeval tv;
2589             socklen_t tvlen;
2590 
2591             optname = SO_RCVTIMEO;
2592 
2593 get_timeout:
2594             if (get_user_u32(len, optlen)) {
2595                 return -TARGET_EFAULT;
2596             }
2597             if (len < 0) {
2598                 return -TARGET_EINVAL;
2599             }
2600 
2601             tvlen = sizeof(tv);
2602             ret = get_errno(getsockopt(sockfd, level, optname,
2603                                        &tv, &tvlen));
2604             if (ret < 0) {
2605                 return ret;
2606             }
2607             if (len > sizeof(struct target_timeval)) {
2608                 len = sizeof(struct target_timeval);
2609             }
2610             if (copy_to_user_timeval(optval_addr, &tv)) {
2611                 return -TARGET_EFAULT;
2612             }
2613             if (put_user_u32(len, optlen)) {
2614                 return -TARGET_EFAULT;
2615             }
2616             break;
2617         }
2618         case TARGET_SO_SNDTIMEO:
2619             optname = SO_SNDTIMEO;
2620             goto get_timeout;
2621         case TARGET_SO_PEERCRED: {
2622             struct ucred cr;
2623             socklen_t crlen;
2624             struct target_ucred *tcr;
2625 
2626             if (get_user_u32(len, optlen)) {
2627                 return -TARGET_EFAULT;
2628             }
2629             if (len < 0) {
2630                 return -TARGET_EINVAL;
2631             }
2632 
2633             crlen = sizeof(cr);
2634             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2635                                        &cr, &crlen));
2636             if (ret < 0) {
2637                 return ret;
2638             }
2639             if (len > crlen) {
2640                 len = crlen;
2641             }
2642             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2643                 return -TARGET_EFAULT;
2644             }
2645             __put_user(cr.pid, &tcr->pid);
2646             __put_user(cr.uid, &tcr->uid);
2647             __put_user(cr.gid, &tcr->gid);
2648             unlock_user_struct(tcr, optval_addr, 1);
2649             if (put_user_u32(len, optlen)) {
2650                 return -TARGET_EFAULT;
2651             }
2652             break;
2653         }
2654         case TARGET_SO_PEERSEC: {
2655             char *name;
2656 
2657             if (get_user_u32(len, optlen)) {
2658                 return -TARGET_EFAULT;
2659             }
2660             if (len < 0) {
2661                 return -TARGET_EINVAL;
2662             }
2663             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2664             if (!name) {
2665                 return -TARGET_EFAULT;
2666             }
2667             lv = len;
2668             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2669                                        name, &lv));
2670             if (put_user_u32(lv, optlen)) {
2671                 ret = -TARGET_EFAULT;
2672             }
2673             unlock_user(name, optval_addr, lv);
2674             break;
2675         }
2676         case TARGET_SO_LINGER:
2677         {
2678             struct linger lg;
2679             socklen_t lglen;
2680             struct target_linger *tlg;
2681 
2682             if (get_user_u32(len, optlen)) {
2683                 return -TARGET_EFAULT;
2684             }
2685             if (len < 0) {
2686                 return -TARGET_EINVAL;
2687             }
2688 
2689             lglen = sizeof(lg);
2690             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2691                                        &lg, &lglen));
2692             if (ret < 0) {
2693                 return ret;
2694             }
2695             if (len > lglen) {
2696                 len = lglen;
2697             }
2698             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2699                 return -TARGET_EFAULT;
2700             }
2701             __put_user(lg.l_onoff, &tlg->l_onoff);
2702             __put_user(lg.l_linger, &tlg->l_linger);
2703             unlock_user_struct(tlg, optval_addr, 1);
2704             if (put_user_u32(len, optlen)) {
2705                 return -TARGET_EFAULT;
2706             }
2707             break;
2708         }
2709         /* Options with 'int' argument.  */
2710         case TARGET_SO_DEBUG:
2711             optname = SO_DEBUG;
2712             goto int_case;
2713         case TARGET_SO_REUSEADDR:
2714             optname = SO_REUSEADDR;
2715             goto int_case;
2716 #ifdef SO_REUSEPORT
2717         case TARGET_SO_REUSEPORT:
2718             optname = SO_REUSEPORT;
2719             goto int_case;
2720 #endif
2721         case TARGET_SO_TYPE:
2722             optname = SO_TYPE;
2723             goto int_case;
2724         case TARGET_SO_ERROR:
2725             optname = SO_ERROR;
2726             goto int_case;
2727         case TARGET_SO_DONTROUTE:
2728             optname = SO_DONTROUTE;
2729             goto int_case;
2730         case TARGET_SO_BROADCAST:
2731             optname = SO_BROADCAST;
2732             goto int_case;
2733         case TARGET_SO_SNDBUF:
2734             optname = SO_SNDBUF;
2735             goto int_case;
2736         case TARGET_SO_RCVBUF:
2737             optname = SO_RCVBUF;
2738             goto int_case;
2739         case TARGET_SO_KEEPALIVE:
2740             optname = SO_KEEPALIVE;
2741             goto int_case;
2742         case TARGET_SO_OOBINLINE:
2743             optname = SO_OOBINLINE;
2744             goto int_case;
2745         case TARGET_SO_NO_CHECK:
2746             optname = SO_NO_CHECK;
2747             goto int_case;
2748         case TARGET_SO_PRIORITY:
2749             optname = SO_PRIORITY;
2750             goto int_case;
2751 #ifdef SO_BSDCOMPAT
2752         case TARGET_SO_BSDCOMPAT:
2753             optname = SO_BSDCOMPAT;
2754             goto int_case;
2755 #endif
2756         case TARGET_SO_PASSCRED:
2757             optname = SO_PASSCRED;
2758             goto int_case;
2759         case TARGET_SO_TIMESTAMP:
2760             optname = SO_TIMESTAMP;
2761             goto int_case;
2762         case TARGET_SO_RCVLOWAT:
2763             optname = SO_RCVLOWAT;
2764             goto int_case;
2765         case TARGET_SO_ACCEPTCONN:
2766             optname = SO_ACCEPTCONN;
2767             goto int_case;
2768         case TARGET_SO_PROTOCOL:
2769             optname = SO_PROTOCOL;
2770             goto int_case;
2771         case TARGET_SO_DOMAIN:
2772             optname = SO_DOMAIN;
2773             goto int_case;
2774         default:
2775             goto int_case;
2776         }
2777         break;
2778     case SOL_TCP:
2779     case SOL_UDP:
2780         /* TCP and UDP options all take an 'int' value.  */
2781     int_case:
2782         if (get_user_u32(len, optlen))
2783             return -TARGET_EFAULT;
2784         if (len < 0)
2785             return -TARGET_EINVAL;
2786         lv = sizeof(lv);
2787         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2788         if (ret < 0)
2789             return ret;
2790         switch (optname) {
2791         case SO_TYPE:
2792             val = host_to_target_sock_type(val);
2793             break;
2794         case SO_ERROR:
2795             val = host_to_target_errno(val);
2796             break;
2797         }
2798         if (len > lv)
2799             len = lv;
2800         if (len == 4) {
2801             if (put_user_u32(val, optval_addr))
2802                 return -TARGET_EFAULT;
2803         } else {
2804             if (put_user_u8(val, optval_addr))
2805                 return -TARGET_EFAULT;
2806         }
2807         if (put_user_u32(len, optlen))
2808             return -TARGET_EFAULT;
2809         break;
2810     case SOL_IP:
2811         switch(optname) {
2812         case IP_TOS:
2813         case IP_TTL:
2814         case IP_HDRINCL:
2815         case IP_ROUTER_ALERT:
2816         case IP_RECVOPTS:
2817         case IP_RETOPTS:
2818         case IP_PKTINFO:
2819         case IP_MTU_DISCOVER:
2820         case IP_RECVERR:
2821         case IP_RECVTOS:
2822 #ifdef IP_FREEBIND
2823         case IP_FREEBIND:
2824 #endif
2825         case IP_MULTICAST_TTL:
2826         case IP_MULTICAST_LOOP:
2827             if (get_user_u32(len, optlen))
2828                 return -TARGET_EFAULT;
2829             if (len < 0)
2830                 return -TARGET_EINVAL;
2831             lv = sizeof(lv);
2832             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2833             if (ret < 0)
2834                 return ret;
2835             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2836                 len = 1;
2837                 if (put_user_u32(len, optlen)
2838                     || put_user_u8(val, optval_addr))
2839                     return -TARGET_EFAULT;
2840             } else {
2841                 if (len > sizeof(int))
2842                     len = sizeof(int);
2843                 if (put_user_u32(len, optlen)
2844                     || put_user_u32(val, optval_addr))
2845                     return -TARGET_EFAULT;
2846             }
2847             break;
2848         default:
2849             ret = -TARGET_ENOPROTOOPT;
2850             break;
2851         }
2852         break;
2853     case SOL_IPV6:
2854         switch (optname) {
2855         case IPV6_MTU_DISCOVER:
2856         case IPV6_MTU:
2857         case IPV6_V6ONLY:
2858         case IPV6_RECVPKTINFO:
2859         case IPV6_UNICAST_HOPS:
2860         case IPV6_MULTICAST_HOPS:
2861         case IPV6_MULTICAST_LOOP:
2862         case IPV6_RECVERR:
2863         case IPV6_RECVHOPLIMIT:
2864         case IPV6_2292HOPLIMIT:
2865         case IPV6_CHECKSUM:
2866         case IPV6_ADDRFORM:
2867         case IPV6_2292PKTINFO:
2868         case IPV6_RECVTCLASS:
2869         case IPV6_RECVRTHDR:
2870         case IPV6_2292RTHDR:
2871         case IPV6_RECVHOPOPTS:
2872         case IPV6_2292HOPOPTS:
2873         case IPV6_RECVDSTOPTS:
2874         case IPV6_2292DSTOPTS:
2875         case IPV6_TCLASS:
2876         case IPV6_ADDR_PREFERENCES:
2877 #ifdef IPV6_RECVPATHMTU
2878         case IPV6_RECVPATHMTU:
2879 #endif
2880 #ifdef IPV6_TRANSPARENT
2881         case IPV6_TRANSPARENT:
2882 #endif
2883 #ifdef IPV6_FREEBIND
2884         case IPV6_FREEBIND:
2885 #endif
2886 #ifdef IPV6_RECVORIGDSTADDR
2887         case IPV6_RECVORIGDSTADDR:
2888 #endif
2889             if (get_user_u32(len, optlen))
2890                 return -TARGET_EFAULT;
2891             if (len < 0)
2892                 return -TARGET_EINVAL;
2893             lv = sizeof(lv);
2894             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2895             if (ret < 0)
2896                 return ret;
2897             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2898                 len = 1;
2899                 if (put_user_u32(len, optlen)
2900                     || put_user_u8(val, optval_addr))
2901                     return -TARGET_EFAULT;
2902             } else {
2903                 if (len > sizeof(int))
2904                     len = sizeof(int);
2905                 if (put_user_u32(len, optlen)
2906                     || put_user_u32(val, optval_addr))
2907                     return -TARGET_EFAULT;
2908             }
2909             break;
2910         default:
2911             ret = -TARGET_ENOPROTOOPT;
2912             break;
2913         }
2914         break;
2915 #ifdef SOL_NETLINK
2916     case SOL_NETLINK:
2917         switch (optname) {
2918         case NETLINK_PKTINFO:
2919         case NETLINK_BROADCAST_ERROR:
2920         case NETLINK_NO_ENOBUFS:
2921 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2922         case NETLINK_LISTEN_ALL_NSID:
2923         case NETLINK_CAP_ACK:
2924 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2925 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2926         case NETLINK_EXT_ACK:
2927 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2928 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2929         case NETLINK_GET_STRICT_CHK:
2930 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2931             if (get_user_u32(len, optlen)) {
2932                 return -TARGET_EFAULT;
2933             }
2934             if (len != sizeof(val)) {
2935                 return -TARGET_EINVAL;
2936             }
2937             lv = len;
2938             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2939             if (ret < 0) {
2940                 return ret;
2941             }
2942             if (put_user_u32(lv, optlen)
2943                 || put_user_u32(val, optval_addr)) {
2944                 return -TARGET_EFAULT;
2945             }
2946             break;
2947 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2948         case NETLINK_LIST_MEMBERSHIPS:
2949         {
2950             uint32_t *results;
2951             int i;
2952             if (get_user_u32(len, optlen)) {
2953                 return -TARGET_EFAULT;
2954             }
2955             if (len < 0) {
2956                 return -TARGET_EINVAL;
2957             }
2958             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2959             if (!results && len > 0) {
2960                 return -TARGET_EFAULT;
2961             }
2962             lv = len;
2963             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2964             if (ret < 0) {
2965                 unlock_user(results, optval_addr, 0);
2966                 return ret;
2967             }
2968             /* swap host endianness to target endianness. */
2969             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2970                 results[i] = tswap32(results[i]);
2971             }
2972             if (put_user_u32(lv, optlen)) {
2973                 return -TARGET_EFAULT;
2974             }
2975             unlock_user(results, optval_addr, 0);
2976             break;
2977         }
2978 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2979         default:
2980             goto unimplemented;
2981         }
2982         break;
2983 #endif /* SOL_NETLINK */
2984     default:
2985     unimplemented:
2986         qemu_log_mask(LOG_UNIMP,
2987                       "getsockopt level=%d optname=%d not yet supported\n",
2988                       level, optname);
2989         ret = -TARGET_EOPNOTSUPP;
2990         break;
2991     }
2992     return ret;
2993 }
2994 
2995 /* Convert target low/high pair representing file offset into the host
2996  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2997  * as the kernel doesn't handle them either.
2998  */
2999 static void target_to_host_low_high(abi_ulong tlow,
3000                                     abi_ulong thigh,
3001                                     unsigned long *hlow,
3002                                     unsigned long *hhigh)
3003 {
3004     uint64_t off = tlow |
3005         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3006         TARGET_LONG_BITS / 2;
3007 
3008     *hlow = off;
3009     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3010 }
3011 
3012 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3013                                 abi_ulong count, int copy)
3014 {
3015     struct target_iovec *target_vec;
3016     struct iovec *vec;
3017     abi_ulong total_len, max_len;
3018     int i;
3019     int err = 0;
3020     bool bad_address = false;
3021 
3022     if (count == 0) {
3023         errno = 0;
3024         return NULL;
3025     }
3026     if (count > IOV_MAX) {
3027         errno = EINVAL;
3028         return NULL;
3029     }
3030 
3031     vec = g_try_new0(struct iovec, count);
3032     if (vec == NULL) {
3033         errno = ENOMEM;
3034         return NULL;
3035     }
3036 
3037     target_vec = lock_user(VERIFY_READ, target_addr,
3038                            count * sizeof(struct target_iovec), 1);
3039     if (target_vec == NULL) {
3040         err = EFAULT;
3041         goto fail2;
3042     }
3043 
3044     /* ??? If host page size > target page size, this will result in a
3045        value larger than what we can actually support.  */
3046     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3047     total_len = 0;
3048 
3049     for (i = 0; i < count; i++) {
3050         abi_ulong base = tswapal(target_vec[i].iov_base);
3051         abi_long len = tswapal(target_vec[i].iov_len);
3052 
3053         if (len < 0) {
3054             err = EINVAL;
3055             goto fail;
3056         } else if (len == 0) {
3057             /* Zero length pointer is ignored.  */
3058             vec[i].iov_base = 0;
3059         } else {
3060             vec[i].iov_base = lock_user(type, base, len, copy);
3061             /* If the first buffer pointer is bad, this is a fault.  But
3062              * subsequent bad buffers will result in a partial write; this
3063              * is realized by filling the vector with null pointers and
3064              * zero lengths. */
3065             if (!vec[i].iov_base) {
3066                 if (i == 0) {
3067                     err = EFAULT;
3068                     goto fail;
3069                 } else {
3070                     bad_address = true;
3071                 }
3072             }
3073             if (bad_address) {
3074                 len = 0;
3075             }
3076             if (len > max_len - total_len) {
3077                 len = max_len - total_len;
3078             }
3079         }
3080         vec[i].iov_len = len;
3081         total_len += len;
3082     }
3083 
3084     unlock_user(target_vec, target_addr, 0);
3085     return vec;
3086 
3087  fail:
3088     while (--i >= 0) {
3089         if (tswapal(target_vec[i].iov_len) > 0) {
3090             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3091         }
3092     }
3093     unlock_user(target_vec, target_addr, 0);
3094  fail2:
3095     g_free(vec);
3096     errno = err;
3097     return NULL;
3098 }
3099 
3100 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3101                          abi_ulong count, int copy)
3102 {
3103     struct target_iovec *target_vec;
3104     int i;
3105 
3106     target_vec = lock_user(VERIFY_READ, target_addr,
3107                            count * sizeof(struct target_iovec), 1);
3108     if (target_vec) {
3109         for (i = 0; i < count; i++) {
3110             abi_ulong base = tswapal(target_vec[i].iov_base);
3111             abi_long len = tswapal(target_vec[i].iov_len);
3112             if (len < 0) {
3113                 break;
3114             }
3115             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3116         }
3117         unlock_user(target_vec, target_addr, 0);
3118     }
3119 
3120     g_free(vec);
3121 }
3122 
3123 static inline int target_to_host_sock_type(int *type)
3124 {
3125     int host_type = 0;
3126     int target_type = *type;
3127 
3128     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3129     case TARGET_SOCK_DGRAM:
3130         host_type = SOCK_DGRAM;
3131         break;
3132     case TARGET_SOCK_STREAM:
3133         host_type = SOCK_STREAM;
3134         break;
3135     default:
3136         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3137         break;
3138     }
3139     if (target_type & TARGET_SOCK_CLOEXEC) {
3140 #if defined(SOCK_CLOEXEC)
3141         host_type |= SOCK_CLOEXEC;
3142 #else
3143         return -TARGET_EINVAL;
3144 #endif
3145     }
3146     if (target_type & TARGET_SOCK_NONBLOCK) {
3147 #if defined(SOCK_NONBLOCK)
3148         host_type |= SOCK_NONBLOCK;
3149 #elif !defined(O_NONBLOCK)
3150         return -TARGET_EINVAL;
3151 #endif
3152     }
3153     *type = host_type;
3154     return 0;
3155 }
3156 
3157 /* Try to emulate socket type flags after socket creation.  */
3158 static int sock_flags_fixup(int fd, int target_type)
3159 {
3160 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3161     if (target_type & TARGET_SOCK_NONBLOCK) {
3162         int flags = fcntl(fd, F_GETFL);
3163         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3164             close(fd);
3165             return -TARGET_EINVAL;
3166         }
3167     }
3168 #endif
3169     return fd;
3170 }
3171 
3172 /* do_socket() Must return target values and target errnos. */
3173 static abi_long do_socket(int domain, int type, int protocol)
3174 {
3175     int target_type = type;
3176     int ret;
3177 
3178     ret = target_to_host_sock_type(&type);
3179     if (ret) {
3180         return ret;
3181     }
3182 
3183     if (domain == PF_NETLINK && !(
3184 #ifdef CONFIG_RTNETLINK
3185          protocol == NETLINK_ROUTE ||
3186 #endif
3187          protocol == NETLINK_KOBJECT_UEVENT ||
3188          protocol == NETLINK_AUDIT)) {
3189         return -TARGET_EPROTONOSUPPORT;
3190     }
3191 
3192     if (domain == AF_PACKET ||
3193         (domain == AF_INET && type == SOCK_PACKET)) {
3194         protocol = tswap16(protocol);
3195     }
3196 
3197     ret = get_errno(socket(domain, type, protocol));
3198     if (ret >= 0) {
3199         ret = sock_flags_fixup(ret, target_type);
3200         if (type == SOCK_PACKET) {
3201             /* Manage an obsolete case :
3202              * if socket type is SOCK_PACKET, bind by name
3203              */
3204             fd_trans_register(ret, &target_packet_trans);
3205         } else if (domain == PF_NETLINK) {
3206             switch (protocol) {
3207 #ifdef CONFIG_RTNETLINK
3208             case NETLINK_ROUTE:
3209                 fd_trans_register(ret, &target_netlink_route_trans);
3210                 break;
3211 #endif
3212             case NETLINK_KOBJECT_UEVENT:
3213                 /* nothing to do: messages are strings */
3214                 break;
3215             case NETLINK_AUDIT:
3216                 fd_trans_register(ret, &target_netlink_audit_trans);
3217                 break;
3218             default:
3219                 g_assert_not_reached();
3220             }
3221         }
3222     }
3223     return ret;
3224 }
3225 
3226 /* do_bind() Must return target values and target errnos. */
3227 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3228                         socklen_t addrlen)
3229 {
3230     void *addr;
3231     abi_long ret;
3232 
3233     if ((int)addrlen < 0) {
3234         return -TARGET_EINVAL;
3235     }
3236 
3237     addr = alloca(addrlen+1);
3238 
3239     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3240     if (ret)
3241         return ret;
3242 
3243     return get_errno(bind(sockfd, addr, addrlen));
3244 }
3245 
3246 /* do_connect() Must return target values and target errnos. */
3247 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3248                            socklen_t addrlen)
3249 {
3250     void *addr;
3251     abi_long ret;
3252 
3253     if ((int)addrlen < 0) {
3254         return -TARGET_EINVAL;
3255     }
3256 
3257     addr = alloca(addrlen+1);
3258 
3259     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3260     if (ret)
3261         return ret;
3262 
3263     return get_errno(safe_connect(sockfd, addr, addrlen));
3264 }
3265 
3266 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3267 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3268                                       int flags, int send)
3269 {
3270     abi_long ret, len;
3271     struct msghdr msg;
3272     abi_ulong count;
3273     struct iovec *vec;
3274     abi_ulong target_vec;
3275 
3276     if (msgp->msg_name) {
3277         msg.msg_namelen = tswap32(msgp->msg_namelen);
3278         msg.msg_name = alloca(msg.msg_namelen+1);
3279         ret = target_to_host_sockaddr(fd, msg.msg_name,
3280                                       tswapal(msgp->msg_name),
3281                                       msg.msg_namelen);
3282         if (ret == -TARGET_EFAULT) {
3283             /* For connected sockets msg_name and msg_namelen must
3284              * be ignored, so returning EFAULT immediately is wrong.
3285              * Instead, pass a bad msg_name to the host kernel, and
3286              * let it decide whether to return EFAULT or not.
3287              */
3288             msg.msg_name = (void *)-1;
3289         } else if (ret) {
3290             goto out2;
3291         }
3292     } else {
3293         msg.msg_name = NULL;
3294         msg.msg_namelen = 0;
3295     }
3296     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3297     msg.msg_control = alloca(msg.msg_controllen);
3298     memset(msg.msg_control, 0, msg.msg_controllen);
3299 
3300     msg.msg_flags = tswap32(msgp->msg_flags);
3301 
3302     count = tswapal(msgp->msg_iovlen);
3303     target_vec = tswapal(msgp->msg_iov);
3304 
3305     if (count > IOV_MAX) {
3306         /* sendrcvmsg returns a different errno for this condition than
3307          * readv/writev, so we must catch it here before lock_iovec() does.
3308          */
3309         ret = -TARGET_EMSGSIZE;
3310         goto out2;
3311     }
3312 
3313     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3314                      target_vec, count, send);
3315     if (vec == NULL) {
3316         ret = -host_to_target_errno(errno);
3317         /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3318         if (!send || ret) {
3319             goto out2;
3320         }
3321     }
3322     msg.msg_iovlen = count;
3323     msg.msg_iov = vec;
3324 
3325     if (send) {
3326         if (fd_trans_target_to_host_data(fd)) {
3327             void *host_msg;
3328 
3329             host_msg = g_malloc(msg.msg_iov->iov_len);
3330             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3331             ret = fd_trans_target_to_host_data(fd)(host_msg,
3332                                                    msg.msg_iov->iov_len);
3333             if (ret >= 0) {
3334                 msg.msg_iov->iov_base = host_msg;
3335                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3336             }
3337             g_free(host_msg);
3338         } else {
3339             ret = target_to_host_cmsg(&msg, msgp);
3340             if (ret == 0) {
3341                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3342             }
3343         }
3344     } else {
3345         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3346         if (!is_error(ret)) {
3347             len = ret;
3348             if (fd_trans_host_to_target_data(fd)) {
3349                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3350                                                MIN(msg.msg_iov->iov_len, len));
3351             }
3352             if (!is_error(ret)) {
3353                 ret = host_to_target_cmsg(msgp, &msg);
3354             }
3355             if (!is_error(ret)) {
3356                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3357                 msgp->msg_flags = tswap32(msg.msg_flags);
3358                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3359                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3360                                     msg.msg_name, msg.msg_namelen);
3361                     if (ret) {
3362                         goto out;
3363                     }
3364                 }
3365 
3366                 ret = len;
3367             }
3368         }
3369     }
3370 
3371 out:
3372     if (vec) {
3373         unlock_iovec(vec, target_vec, count, !send);
3374     }
3375 out2:
3376     return ret;
3377 }
3378 
3379 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3380                                int flags, int send)
3381 {
3382     abi_long ret;
3383     struct target_msghdr *msgp;
3384 
3385     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3386                           msgp,
3387                           target_msg,
3388                           send ? 1 : 0)) {
3389         return -TARGET_EFAULT;
3390     }
3391     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3392     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3393     return ret;
3394 }
3395 
3396 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3397  * so it might not have this *mmsg-specific flag either.
3398  */
3399 #ifndef MSG_WAITFORONE
3400 #define MSG_WAITFORONE 0x10000
3401 #endif
3402 
3403 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3404                                 unsigned int vlen, unsigned int flags,
3405                                 int send)
3406 {
3407     struct target_mmsghdr *mmsgp;
3408     abi_long ret = 0;
3409     int i;
3410 
3411     if (vlen > UIO_MAXIOV) {
3412         vlen = UIO_MAXIOV;
3413     }
3414 
3415     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3416     if (!mmsgp) {
3417         return -TARGET_EFAULT;
3418     }
3419 
3420     for (i = 0; i < vlen; i++) {
3421         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3422         if (is_error(ret)) {
3423             break;
3424         }
3425         mmsgp[i].msg_len = tswap32(ret);
3426         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3427         if (flags & MSG_WAITFORONE) {
3428             flags |= MSG_DONTWAIT;
3429         }
3430     }
3431 
3432     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3433 
3434     /* Return number of datagrams sent if we sent any at all;
3435      * otherwise return the error.
3436      */
3437     if (i) {
3438         return i;
3439     }
3440     return ret;
3441 }
3442 
3443 /* do_accept4() Must return target values and target errnos. */
3444 static abi_long do_accept4(int fd, abi_ulong target_addr,
3445                            abi_ulong target_addrlen_addr, int flags)
3446 {
3447     socklen_t addrlen, ret_addrlen;
3448     void *addr;
3449     abi_long ret;
3450     int host_flags;
3451 
3452     if (flags & ~(TARGET_SOCK_CLOEXEC | TARGET_SOCK_NONBLOCK)) {
3453         return -TARGET_EINVAL;
3454     }
3455 
3456     host_flags = 0;
3457     if (flags & TARGET_SOCK_NONBLOCK) {
3458         host_flags |= SOCK_NONBLOCK;
3459     }
3460     if (flags & TARGET_SOCK_CLOEXEC) {
3461         host_flags |= SOCK_CLOEXEC;
3462     }
3463 
3464     if (target_addr == 0) {
3465         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3466     }
3467 
3468     /* linux returns EFAULT if addrlen pointer is invalid */
3469     if (get_user_u32(addrlen, target_addrlen_addr))
3470         return -TARGET_EFAULT;
3471 
3472     if ((int)addrlen < 0) {
3473         return -TARGET_EINVAL;
3474     }
3475 
3476     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3477         return -TARGET_EFAULT;
3478     }
3479 
3480     addr = alloca(addrlen);
3481 
3482     ret_addrlen = addrlen;
3483     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3484     if (!is_error(ret)) {
3485         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3486         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3487             ret = -TARGET_EFAULT;
3488         }
3489     }
3490     return ret;
3491 }
3492 
3493 /* do_getpeername() Must return target values and target errnos. */
3494 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3495                                abi_ulong target_addrlen_addr)
3496 {
3497     socklen_t addrlen, ret_addrlen;
3498     void *addr;
3499     abi_long ret;
3500 
3501     if (get_user_u32(addrlen, target_addrlen_addr))
3502         return -TARGET_EFAULT;
3503 
3504     if ((int)addrlen < 0) {
3505         return -TARGET_EINVAL;
3506     }
3507 
3508     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3509         return -TARGET_EFAULT;
3510     }
3511 
3512     addr = alloca(addrlen);
3513 
3514     ret_addrlen = addrlen;
3515     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3516     if (!is_error(ret)) {
3517         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3518         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3519             ret = -TARGET_EFAULT;
3520         }
3521     }
3522     return ret;
3523 }
3524 
3525 /* do_getsockname() Must return target values and target errnos. */
3526 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3527                                abi_ulong target_addrlen_addr)
3528 {
3529     socklen_t addrlen, ret_addrlen;
3530     void *addr;
3531     abi_long ret;
3532 
3533     if (get_user_u32(addrlen, target_addrlen_addr))
3534         return -TARGET_EFAULT;
3535 
3536     if ((int)addrlen < 0) {
3537         return -TARGET_EINVAL;
3538     }
3539 
3540     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3541         return -TARGET_EFAULT;
3542     }
3543 
3544     addr = alloca(addrlen);
3545 
3546     ret_addrlen = addrlen;
3547     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3548     if (!is_error(ret)) {
3549         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3550         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3551             ret = -TARGET_EFAULT;
3552         }
3553     }
3554     return ret;
3555 }
3556 
3557 /* do_socketpair() Must return target values and target errnos. */
3558 static abi_long do_socketpair(int domain, int type, int protocol,
3559                               abi_ulong target_tab_addr)
3560 {
3561     int tab[2];
3562     abi_long ret;
3563 
3564     target_to_host_sock_type(&type);
3565 
3566     ret = get_errno(socketpair(domain, type, protocol, tab));
3567     if (!is_error(ret)) {
3568         if (put_user_s32(tab[0], target_tab_addr)
3569             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3570             ret = -TARGET_EFAULT;
3571     }
3572     return ret;
3573 }
3574 
3575 /* do_sendto() Must return target values and target errnos. */
3576 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3577                           abi_ulong target_addr, socklen_t addrlen)
3578 {
3579     void *addr;
3580     void *host_msg;
3581     void *copy_msg = NULL;
3582     abi_long ret;
3583 
3584     if ((int)addrlen < 0) {
3585         return -TARGET_EINVAL;
3586     }
3587 
3588     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3589     if (!host_msg)
3590         return -TARGET_EFAULT;
3591     if (fd_trans_target_to_host_data(fd)) {
3592         copy_msg = host_msg;
3593         host_msg = g_malloc(len);
3594         memcpy(host_msg, copy_msg, len);
3595         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3596         if (ret < 0) {
3597             goto fail;
3598         }
3599     }
3600     if (target_addr) {
3601         addr = alloca(addrlen+1);
3602         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3603         if (ret) {
3604             goto fail;
3605         }
3606         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3607     } else {
3608         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3609     }
3610 fail:
3611     if (copy_msg) {
3612         g_free(host_msg);
3613         host_msg = copy_msg;
3614     }
3615     unlock_user(host_msg, msg, 0);
3616     return ret;
3617 }
3618 
3619 /* do_recvfrom() Must return target values and target errnos. */
3620 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3621                             abi_ulong target_addr,
3622                             abi_ulong target_addrlen)
3623 {
3624     socklen_t addrlen, ret_addrlen;
3625     void *addr;
3626     void *host_msg;
3627     abi_long ret;
3628 
3629     if (!msg) {
3630         host_msg = NULL;
3631     } else {
3632         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3633         if (!host_msg) {
3634             return -TARGET_EFAULT;
3635         }
3636     }
3637     if (target_addr) {
3638         if (get_user_u32(addrlen, target_addrlen)) {
3639             ret = -TARGET_EFAULT;
3640             goto fail;
3641         }
3642         if ((int)addrlen < 0) {
3643             ret = -TARGET_EINVAL;
3644             goto fail;
3645         }
3646         addr = alloca(addrlen);
3647         ret_addrlen = addrlen;
3648         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3649                                       addr, &ret_addrlen));
3650     } else {
3651         addr = NULL; /* To keep compiler quiet.  */
3652         addrlen = 0; /* To keep compiler quiet.  */
3653         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3654     }
3655     if (!is_error(ret)) {
3656         if (fd_trans_host_to_target_data(fd)) {
3657             abi_long trans;
3658             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3659             if (is_error(trans)) {
3660                 ret = trans;
3661                 goto fail;
3662             }
3663         }
3664         if (target_addr) {
3665             host_to_target_sockaddr(target_addr, addr,
3666                                     MIN(addrlen, ret_addrlen));
3667             if (put_user_u32(ret_addrlen, target_addrlen)) {
3668                 ret = -TARGET_EFAULT;
3669                 goto fail;
3670             }
3671         }
3672         unlock_user(host_msg, msg, len);
3673     } else {
3674 fail:
3675         unlock_user(host_msg, msg, 0);
3676     }
3677     return ret;
3678 }
3679 
3680 #ifdef TARGET_NR_socketcall
3681 /* do_socketcall() must return target values and target errnos. */
3682 static abi_long do_socketcall(int num, abi_ulong vptr)
3683 {
3684     static const unsigned nargs[] = { /* number of arguments per operation */
3685         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3686         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3687         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3688         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3689         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3690         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3691         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3692         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3693         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3694         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3695         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3696         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3697         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3698         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3699         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3700         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3701         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3702         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3703         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3704         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3705     };
3706     abi_long a[6]; /* max 6 args */
3707     unsigned i;
3708 
3709     /* check the range of the first argument num */
3710     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3711     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3712         return -TARGET_EINVAL;
3713     }
3714     /* ensure we have space for args */
3715     if (nargs[num] > ARRAY_SIZE(a)) {
3716         return -TARGET_EINVAL;
3717     }
3718     /* collect the arguments in a[] according to nargs[] */
3719     for (i = 0; i < nargs[num]; ++i) {
3720         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3721             return -TARGET_EFAULT;
3722         }
3723     }
3724     /* now when we have the args, invoke the appropriate underlying function */
3725     switch (num) {
3726     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3727         return do_socket(a[0], a[1], a[2]);
3728     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3729         return do_bind(a[0], a[1], a[2]);
3730     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3731         return do_connect(a[0], a[1], a[2]);
3732     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3733         return get_errno(listen(a[0], a[1]));
3734     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3735         return do_accept4(a[0], a[1], a[2], 0);
3736     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3737         return do_getsockname(a[0], a[1], a[2]);
3738     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3739         return do_getpeername(a[0], a[1], a[2]);
3740     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3741         return do_socketpair(a[0], a[1], a[2], a[3]);
3742     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3743         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3744     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3745         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3746     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3747         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3748     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3749         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3750     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3751         return get_errno(shutdown(a[0], a[1]));
3752     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3753         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3754     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3755         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3756     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3757         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3758     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3759         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3760     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3761         return do_accept4(a[0], a[1], a[2], a[3]);
3762     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3763         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3764     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3765         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3766     default:
3767         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3768         return -TARGET_EINVAL;
3769     }
3770 }
3771 #endif
3772 
3773 #ifndef TARGET_SEMID64_DS
3774 /* asm-generic version of this struct */
3775 struct target_semid64_ds
3776 {
3777   struct target_ipc_perm sem_perm;
3778   abi_ulong sem_otime;
3779 #if TARGET_ABI_BITS == 32
3780   abi_ulong __unused1;
3781 #endif
3782   abi_ulong sem_ctime;
3783 #if TARGET_ABI_BITS == 32
3784   abi_ulong __unused2;
3785 #endif
3786   abi_ulong sem_nsems;
3787   abi_ulong __unused3;
3788   abi_ulong __unused4;
3789 };
3790 #endif
3791 
3792 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3793                                                abi_ulong target_addr)
3794 {
3795     struct target_ipc_perm *target_ip;
3796     struct target_semid64_ds *target_sd;
3797 
3798     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3799         return -TARGET_EFAULT;
3800     target_ip = &(target_sd->sem_perm);
3801     host_ip->__key = tswap32(target_ip->__key);
3802     host_ip->uid = tswap32(target_ip->uid);
3803     host_ip->gid = tswap32(target_ip->gid);
3804     host_ip->cuid = tswap32(target_ip->cuid);
3805     host_ip->cgid = tswap32(target_ip->cgid);
3806 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3807     host_ip->mode = tswap32(target_ip->mode);
3808 #else
3809     host_ip->mode = tswap16(target_ip->mode);
3810 #endif
3811 #if defined(TARGET_PPC)
3812     host_ip->__seq = tswap32(target_ip->__seq);
3813 #else
3814     host_ip->__seq = tswap16(target_ip->__seq);
3815 #endif
3816     unlock_user_struct(target_sd, target_addr, 0);
3817     return 0;
3818 }
3819 
3820 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3821                                                struct ipc_perm *host_ip)
3822 {
3823     struct target_ipc_perm *target_ip;
3824     struct target_semid64_ds *target_sd;
3825 
3826     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3827         return -TARGET_EFAULT;
3828     target_ip = &(target_sd->sem_perm);
3829     target_ip->__key = tswap32(host_ip->__key);
3830     target_ip->uid = tswap32(host_ip->uid);
3831     target_ip->gid = tswap32(host_ip->gid);
3832     target_ip->cuid = tswap32(host_ip->cuid);
3833     target_ip->cgid = tswap32(host_ip->cgid);
3834 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3835     target_ip->mode = tswap32(host_ip->mode);
3836 #else
3837     target_ip->mode = tswap16(host_ip->mode);
3838 #endif
3839 #if defined(TARGET_PPC)
3840     target_ip->__seq = tswap32(host_ip->__seq);
3841 #else
3842     target_ip->__seq = tswap16(host_ip->__seq);
3843 #endif
3844     unlock_user_struct(target_sd, target_addr, 1);
3845     return 0;
3846 }
3847 
3848 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3849                                                abi_ulong target_addr)
3850 {
3851     struct target_semid64_ds *target_sd;
3852 
3853     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3854         return -TARGET_EFAULT;
3855     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3856         return -TARGET_EFAULT;
3857     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3858     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3859     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3860     unlock_user_struct(target_sd, target_addr, 0);
3861     return 0;
3862 }
3863 
3864 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3865                                                struct semid_ds *host_sd)
3866 {
3867     struct target_semid64_ds *target_sd;
3868 
3869     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3870         return -TARGET_EFAULT;
3871     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3872         return -TARGET_EFAULT;
3873     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3874     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3875     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3876     unlock_user_struct(target_sd, target_addr, 1);
3877     return 0;
3878 }
3879 
3880 struct target_seminfo {
3881     int semmap;
3882     int semmni;
3883     int semmns;
3884     int semmnu;
3885     int semmsl;
3886     int semopm;
3887     int semume;
3888     int semusz;
3889     int semvmx;
3890     int semaem;
3891 };
3892 
3893 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3894                                               struct seminfo *host_seminfo)
3895 {
3896     struct target_seminfo *target_seminfo;
3897     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3898         return -TARGET_EFAULT;
3899     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3900     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3901     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3902     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3903     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3904     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3905     __put_user(host_seminfo->semume, &target_seminfo->semume);
3906     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3907     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3908     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3909     unlock_user_struct(target_seminfo, target_addr, 1);
3910     return 0;
3911 }
3912 
3913 union semun {
3914 	int val;
3915 	struct semid_ds *buf;
3916 	unsigned short *array;
3917 	struct seminfo *__buf;
3918 };
3919 
3920 union target_semun {
3921 	int val;
3922 	abi_ulong buf;
3923 	abi_ulong array;
3924 	abi_ulong __buf;
3925 };
3926 
3927 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3928                                                abi_ulong target_addr)
3929 {
3930     int nsems;
3931     unsigned short *array;
3932     union semun semun;
3933     struct semid_ds semid_ds;
3934     int i, ret;
3935 
3936     semun.buf = &semid_ds;
3937 
3938     ret = semctl(semid, 0, IPC_STAT, semun);
3939     if (ret == -1)
3940         return get_errno(ret);
3941 
3942     nsems = semid_ds.sem_nsems;
3943 
3944     *host_array = g_try_new(unsigned short, nsems);
3945     if (!*host_array) {
3946         return -TARGET_ENOMEM;
3947     }
3948     array = lock_user(VERIFY_READ, target_addr,
3949                       nsems*sizeof(unsigned short), 1);
3950     if (!array) {
3951         g_free(*host_array);
3952         return -TARGET_EFAULT;
3953     }
3954 
3955     for(i=0; i<nsems; i++) {
3956         __get_user((*host_array)[i], &array[i]);
3957     }
3958     unlock_user(array, target_addr, 0);
3959 
3960     return 0;
3961 }
3962 
3963 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3964                                                unsigned short **host_array)
3965 {
3966     int nsems;
3967     unsigned short *array;
3968     union semun semun;
3969     struct semid_ds semid_ds;
3970     int i, ret;
3971 
3972     semun.buf = &semid_ds;
3973 
3974     ret = semctl(semid, 0, IPC_STAT, semun);
3975     if (ret == -1)
3976         return get_errno(ret);
3977 
3978     nsems = semid_ds.sem_nsems;
3979 
3980     array = lock_user(VERIFY_WRITE, target_addr,
3981                       nsems*sizeof(unsigned short), 0);
3982     if (!array)
3983         return -TARGET_EFAULT;
3984 
3985     for(i=0; i<nsems; i++) {
3986         __put_user((*host_array)[i], &array[i]);
3987     }
3988     g_free(*host_array);
3989     unlock_user(array, target_addr, 1);
3990 
3991     return 0;
3992 }
3993 
3994 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3995                                  abi_ulong target_arg)
3996 {
3997     union target_semun target_su = { .buf = target_arg };
3998     union semun arg;
3999     struct semid_ds dsarg;
4000     unsigned short *array = NULL;
4001     struct seminfo seminfo;
4002     abi_long ret = -TARGET_EINVAL;
4003     abi_long err;
4004     cmd &= 0xff;
4005 
4006     switch( cmd ) {
4007 	case GETVAL:
4008 	case SETVAL:
4009             /* In 64 bit cross-endian situations, we will erroneously pick up
4010              * the wrong half of the union for the "val" element.  To rectify
4011              * this, the entire 8-byte structure is byteswapped, followed by
4012 	     * a swap of the 4 byte val field. In other cases, the data is
4013 	     * already in proper host byte order. */
4014 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4015 		target_su.buf = tswapal(target_su.buf);
4016 		arg.val = tswap32(target_su.val);
4017 	    } else {
4018 		arg.val = target_su.val;
4019 	    }
4020             ret = get_errno(semctl(semid, semnum, cmd, arg));
4021             break;
4022 	case GETALL:
4023 	case SETALL:
4024             err = target_to_host_semarray(semid, &array, target_su.array);
4025             if (err)
4026                 return err;
4027             arg.array = array;
4028             ret = get_errno(semctl(semid, semnum, cmd, arg));
4029             err = host_to_target_semarray(semid, target_su.array, &array);
4030             if (err)
4031                 return err;
4032             break;
4033 	case IPC_STAT:
4034 	case IPC_SET:
4035 	case SEM_STAT:
4036             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4037             if (err)
4038                 return err;
4039             arg.buf = &dsarg;
4040             ret = get_errno(semctl(semid, semnum, cmd, arg));
4041             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4042             if (err)
4043                 return err;
4044             break;
4045 	case IPC_INFO:
4046 	case SEM_INFO:
4047             arg.__buf = &seminfo;
4048             ret = get_errno(semctl(semid, semnum, cmd, arg));
4049             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4050             if (err)
4051                 return err;
4052             break;
4053 	case IPC_RMID:
4054 	case GETPID:
4055 	case GETNCNT:
4056 	case GETZCNT:
4057             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4058             break;
4059     }
4060 
4061     return ret;
4062 }
4063 
4064 struct target_sembuf {
4065     unsigned short sem_num;
4066     short sem_op;
4067     short sem_flg;
4068 };
4069 
4070 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4071                                              abi_ulong target_addr,
4072                                              unsigned nsops)
4073 {
4074     struct target_sembuf *target_sembuf;
4075     int i;
4076 
4077     target_sembuf = lock_user(VERIFY_READ, target_addr,
4078                               nsops*sizeof(struct target_sembuf), 1);
4079     if (!target_sembuf)
4080         return -TARGET_EFAULT;
4081 
4082     for(i=0; i<nsops; i++) {
4083         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4084         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4085         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4086     }
4087 
4088     unlock_user(target_sembuf, target_addr, 0);
4089 
4090     return 0;
4091 }
4092 
4093 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4094     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4095 
4096 /*
4097  * This macro is required to handle the s390 variants, which passes the
4098  * arguments in a different order than default.
4099  */
4100 #ifdef __s390x__
4101 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4102   (__nsops), (__timeout), (__sops)
4103 #else
4104 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4105   (__nsops), 0, (__sops), (__timeout)
4106 #endif
4107 
4108 static inline abi_long do_semtimedop(int semid,
4109                                      abi_long ptr,
4110                                      unsigned nsops,
4111                                      abi_long timeout, bool time64)
4112 {
4113     struct sembuf *sops;
4114     struct timespec ts, *pts = NULL;
4115     abi_long ret;
4116 
4117     if (timeout) {
4118         pts = &ts;
4119         if (time64) {
4120             if (target_to_host_timespec64(pts, timeout)) {
4121                 return -TARGET_EFAULT;
4122             }
4123         } else {
4124             if (target_to_host_timespec(pts, timeout)) {
4125                 return -TARGET_EFAULT;
4126             }
4127         }
4128     }
4129 
4130     if (nsops > TARGET_SEMOPM) {
4131         return -TARGET_E2BIG;
4132     }
4133 
4134     sops = g_new(struct sembuf, nsops);
4135 
4136     if (target_to_host_sembuf(sops, ptr, nsops)) {
4137         g_free(sops);
4138         return -TARGET_EFAULT;
4139     }
4140 
4141     ret = -TARGET_ENOSYS;
4142 #ifdef __NR_semtimedop
4143     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4144 #endif
4145 #ifdef __NR_ipc
4146     if (ret == -TARGET_ENOSYS) {
4147         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4148                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4149     }
4150 #endif
4151     g_free(sops);
4152     return ret;
4153 }
4154 #endif
4155 
4156 struct target_msqid_ds
4157 {
4158     struct target_ipc_perm msg_perm;
4159     abi_ulong msg_stime;
4160 #if TARGET_ABI_BITS == 32
4161     abi_ulong __unused1;
4162 #endif
4163     abi_ulong msg_rtime;
4164 #if TARGET_ABI_BITS == 32
4165     abi_ulong __unused2;
4166 #endif
4167     abi_ulong msg_ctime;
4168 #if TARGET_ABI_BITS == 32
4169     abi_ulong __unused3;
4170 #endif
4171     abi_ulong __msg_cbytes;
4172     abi_ulong msg_qnum;
4173     abi_ulong msg_qbytes;
4174     abi_ulong msg_lspid;
4175     abi_ulong msg_lrpid;
4176     abi_ulong __unused4;
4177     abi_ulong __unused5;
4178 };
4179 
4180 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4181                                                abi_ulong target_addr)
4182 {
4183     struct target_msqid_ds *target_md;
4184 
4185     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4186         return -TARGET_EFAULT;
4187     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4188         return -TARGET_EFAULT;
4189     host_md->msg_stime = tswapal(target_md->msg_stime);
4190     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4191     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4192     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4193     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4194     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4195     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4196     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4197     unlock_user_struct(target_md, target_addr, 0);
4198     return 0;
4199 }
4200 
4201 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4202                                                struct msqid_ds *host_md)
4203 {
4204     struct target_msqid_ds *target_md;
4205 
4206     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4207         return -TARGET_EFAULT;
4208     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4209         return -TARGET_EFAULT;
4210     target_md->msg_stime = tswapal(host_md->msg_stime);
4211     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4212     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4213     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4214     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4215     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4216     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4217     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4218     unlock_user_struct(target_md, target_addr, 1);
4219     return 0;
4220 }
4221 
4222 struct target_msginfo {
4223     int msgpool;
4224     int msgmap;
4225     int msgmax;
4226     int msgmnb;
4227     int msgmni;
4228     int msgssz;
4229     int msgtql;
4230     unsigned short int msgseg;
4231 };
4232 
4233 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4234                                               struct msginfo *host_msginfo)
4235 {
4236     struct target_msginfo *target_msginfo;
4237     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4238         return -TARGET_EFAULT;
4239     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4240     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4241     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4242     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4243     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4244     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4245     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4246     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4247     unlock_user_struct(target_msginfo, target_addr, 1);
4248     return 0;
4249 }
4250 
4251 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4252 {
4253     struct msqid_ds dsarg;
4254     struct msginfo msginfo;
4255     abi_long ret = -TARGET_EINVAL;
4256 
4257     cmd &= 0xff;
4258 
4259     switch (cmd) {
4260     case IPC_STAT:
4261     case IPC_SET:
4262     case MSG_STAT:
4263         if (target_to_host_msqid_ds(&dsarg,ptr))
4264             return -TARGET_EFAULT;
4265         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4266         if (host_to_target_msqid_ds(ptr,&dsarg))
4267             return -TARGET_EFAULT;
4268         break;
4269     case IPC_RMID:
4270         ret = get_errno(msgctl(msgid, cmd, NULL));
4271         break;
4272     case IPC_INFO:
4273     case MSG_INFO:
4274         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4275         if (host_to_target_msginfo(ptr, &msginfo))
4276             return -TARGET_EFAULT;
4277         break;
4278     }
4279 
4280     return ret;
4281 }
4282 
4283 struct target_msgbuf {
4284     abi_long mtype;
4285     char	mtext[1];
4286 };
4287 
4288 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4289                                  ssize_t msgsz, int msgflg)
4290 {
4291     struct target_msgbuf *target_mb;
4292     struct msgbuf *host_mb;
4293     abi_long ret = 0;
4294 
4295     if (msgsz < 0) {
4296         return -TARGET_EINVAL;
4297     }
4298 
4299     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4300         return -TARGET_EFAULT;
4301     host_mb = g_try_malloc(msgsz + sizeof(long));
4302     if (!host_mb) {
4303         unlock_user_struct(target_mb, msgp, 0);
4304         return -TARGET_ENOMEM;
4305     }
4306     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4307     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4308     ret = -TARGET_ENOSYS;
4309 #ifdef __NR_msgsnd
4310     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4311 #endif
4312 #ifdef __NR_ipc
4313     if (ret == -TARGET_ENOSYS) {
4314 #ifdef __s390x__
4315         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4316                                  host_mb));
4317 #else
4318         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4319                                  host_mb, 0));
4320 #endif
4321     }
4322 #endif
4323     g_free(host_mb);
4324     unlock_user_struct(target_mb, msgp, 0);
4325 
4326     return ret;
4327 }
4328 
4329 #ifdef __NR_ipc
4330 #if defined(__sparc__)
4331 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4332 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4333 #elif defined(__s390x__)
4334 /* The s390 sys_ipc variant has only five parameters.  */
4335 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4336     ((long int[]){(long int)__msgp, __msgtyp})
4337 #else
4338 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4339     ((long int[]){(long int)__msgp, __msgtyp}), 0
4340 #endif
4341 #endif
4342 
4343 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4344                                  ssize_t msgsz, abi_long msgtyp,
4345                                  int msgflg)
4346 {
4347     struct target_msgbuf *target_mb;
4348     char *target_mtext;
4349     struct msgbuf *host_mb;
4350     abi_long ret = 0;
4351 
4352     if (msgsz < 0) {
4353         return -TARGET_EINVAL;
4354     }
4355 
4356     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4357         return -TARGET_EFAULT;
4358 
4359     host_mb = g_try_malloc(msgsz + sizeof(long));
4360     if (!host_mb) {
4361         ret = -TARGET_ENOMEM;
4362         goto end;
4363     }
4364     ret = -TARGET_ENOSYS;
4365 #ifdef __NR_msgrcv
4366     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4367 #endif
4368 #ifdef __NR_ipc
4369     if (ret == -TARGET_ENOSYS) {
4370         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4371                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4372     }
4373 #endif
4374 
4375     if (ret > 0) {
4376         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4377         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4378         if (!target_mtext) {
4379             ret = -TARGET_EFAULT;
4380             goto end;
4381         }
4382         memcpy(target_mb->mtext, host_mb->mtext, ret);
4383         unlock_user(target_mtext, target_mtext_addr, ret);
4384     }
4385 
4386     target_mb->mtype = tswapal(host_mb->mtype);
4387 
4388 end:
4389     if (target_mb)
4390         unlock_user_struct(target_mb, msgp, 1);
4391     g_free(host_mb);
4392     return ret;
4393 }
4394 
4395 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4396                                                abi_ulong target_addr)
4397 {
4398     struct target_shmid_ds *target_sd;
4399 
4400     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4401         return -TARGET_EFAULT;
4402     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4403         return -TARGET_EFAULT;
4404     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4405     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4406     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4407     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4408     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4409     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4410     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4411     unlock_user_struct(target_sd, target_addr, 0);
4412     return 0;
4413 }
4414 
4415 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4416                                                struct shmid_ds *host_sd)
4417 {
4418     struct target_shmid_ds *target_sd;
4419 
4420     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4421         return -TARGET_EFAULT;
4422     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4423         return -TARGET_EFAULT;
4424     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4425     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4426     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4427     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4428     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4429     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4430     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4431     unlock_user_struct(target_sd, target_addr, 1);
4432     return 0;
4433 }
4434 
4435 struct  target_shminfo {
4436     abi_ulong shmmax;
4437     abi_ulong shmmin;
4438     abi_ulong shmmni;
4439     abi_ulong shmseg;
4440     abi_ulong shmall;
4441 };
4442 
4443 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4444                                               struct shminfo *host_shminfo)
4445 {
4446     struct target_shminfo *target_shminfo;
4447     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4448         return -TARGET_EFAULT;
4449     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4450     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4451     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4452     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4453     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4454     unlock_user_struct(target_shminfo, target_addr, 1);
4455     return 0;
4456 }
4457 
4458 struct target_shm_info {
4459     int used_ids;
4460     abi_ulong shm_tot;
4461     abi_ulong shm_rss;
4462     abi_ulong shm_swp;
4463     abi_ulong swap_attempts;
4464     abi_ulong swap_successes;
4465 };
4466 
4467 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4468                                                struct shm_info *host_shm_info)
4469 {
4470     struct target_shm_info *target_shm_info;
4471     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4472         return -TARGET_EFAULT;
4473     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4474     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4475     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4476     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4477     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4478     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4479     unlock_user_struct(target_shm_info, target_addr, 1);
4480     return 0;
4481 }
4482 
4483 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4484 {
4485     struct shmid_ds dsarg;
4486     struct shminfo shminfo;
4487     struct shm_info shm_info;
4488     abi_long ret = -TARGET_EINVAL;
4489 
4490     cmd &= 0xff;
4491 
4492     switch(cmd) {
4493     case IPC_STAT:
4494     case IPC_SET:
4495     case SHM_STAT:
4496         if (target_to_host_shmid_ds(&dsarg, buf))
4497             return -TARGET_EFAULT;
4498         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4499         if (host_to_target_shmid_ds(buf, &dsarg))
4500             return -TARGET_EFAULT;
4501         break;
4502     case IPC_INFO:
4503         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4504         if (host_to_target_shminfo(buf, &shminfo))
4505             return -TARGET_EFAULT;
4506         break;
4507     case SHM_INFO:
4508         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4509         if (host_to_target_shm_info(buf, &shm_info))
4510             return -TARGET_EFAULT;
4511         break;
4512     case IPC_RMID:
4513     case SHM_LOCK:
4514     case SHM_UNLOCK:
4515         ret = get_errno(shmctl(shmid, cmd, NULL));
4516         break;
4517     }
4518 
4519     return ret;
4520 }
4521 
4522 #ifdef TARGET_NR_ipc
4523 /* ??? This only works with linear mappings.  */
4524 /* do_ipc() must return target values and target errnos. */
4525 static abi_long do_ipc(CPUArchState *cpu_env,
4526                        unsigned int call, abi_long first,
4527                        abi_long second, abi_long third,
4528                        abi_long ptr, abi_long fifth)
4529 {
4530     int version;
4531     abi_long ret = 0;
4532 
4533     version = call >> 16;
4534     call &= 0xffff;
4535 
4536     switch (call) {
4537     case IPCOP_semop:
4538         ret = do_semtimedop(first, ptr, second, 0, false);
4539         break;
4540     case IPCOP_semtimedop:
4541     /*
4542      * The s390 sys_ipc variant has only five parameters instead of six
4543      * (as for default variant) and the only difference is the handling of
4544      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4545      * to a struct timespec where the generic variant uses fifth parameter.
4546      */
4547 #if defined(TARGET_S390X)
4548         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4549 #else
4550         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4551 #endif
4552         break;
4553 
4554     case IPCOP_semget:
4555         ret = get_errno(semget(first, second, third));
4556         break;
4557 
4558     case IPCOP_semctl: {
4559         /* The semun argument to semctl is passed by value, so dereference the
4560          * ptr argument. */
4561         abi_ulong atptr;
4562         get_user_ual(atptr, ptr);
4563         ret = do_semctl(first, second, third, atptr);
4564         break;
4565     }
4566 
4567     case IPCOP_msgget:
4568         ret = get_errno(msgget(first, second));
4569         break;
4570 
4571     case IPCOP_msgsnd:
4572         ret = do_msgsnd(first, ptr, second, third);
4573         break;
4574 
4575     case IPCOP_msgctl:
4576         ret = do_msgctl(first, second, ptr);
4577         break;
4578 
4579     case IPCOP_msgrcv:
4580         switch (version) {
4581         case 0:
4582             {
4583                 struct target_ipc_kludge {
4584                     abi_long msgp;
4585                     abi_long msgtyp;
4586                 } *tmp;
4587 
4588                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4589                     ret = -TARGET_EFAULT;
4590                     break;
4591                 }
4592 
4593                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4594 
4595                 unlock_user_struct(tmp, ptr, 0);
4596                 break;
4597             }
4598         default:
4599             ret = do_msgrcv(first, ptr, second, fifth, third);
4600         }
4601         break;
4602 
4603     case IPCOP_shmat:
4604         switch (version) {
4605         default:
4606         {
4607             abi_ulong raddr;
4608             raddr = target_shmat(cpu_env, first, ptr, second);
4609             if (is_error(raddr))
4610                 return get_errno(raddr);
4611             if (put_user_ual(raddr, third))
4612                 return -TARGET_EFAULT;
4613             break;
4614         }
4615         case 1:
4616             ret = -TARGET_EINVAL;
4617             break;
4618         }
4619 	break;
4620     case IPCOP_shmdt:
4621         ret = target_shmdt(ptr);
4622 	break;
4623 
4624     case IPCOP_shmget:
4625 	/* IPC_* flag values are the same on all linux platforms */
4626 	ret = get_errno(shmget(first, second, third));
4627 	break;
4628 
4629 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4630     case IPCOP_shmctl:
4631         ret = do_shmctl(first, second, ptr);
4632         break;
4633     default:
4634         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4635                       call, version);
4636 	ret = -TARGET_ENOSYS;
4637 	break;
4638     }
4639     return ret;
4640 }
4641 #endif
4642 
4643 /* kernel structure types definitions */
4644 
4645 #define STRUCT(name, ...) STRUCT_ ## name,
4646 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4647 enum {
4648 #include "syscall_types.h"
4649 STRUCT_MAX
4650 };
4651 #undef STRUCT
4652 #undef STRUCT_SPECIAL
4653 
4654 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4655 #define STRUCT_SPECIAL(name)
4656 #include "syscall_types.h"
4657 #undef STRUCT
4658 #undef STRUCT_SPECIAL
4659 
4660 #define MAX_STRUCT_SIZE 4096
4661 
4662 #ifdef CONFIG_FIEMAP
4663 /* So fiemap access checks don't overflow on 32 bit systems.
4664  * This is very slightly smaller than the limit imposed by
4665  * the underlying kernel.
4666  */
4667 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4668                             / sizeof(struct fiemap_extent))
4669 
4670 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4671                                        int fd, int cmd, abi_long arg)
4672 {
4673     /* The parameter for this ioctl is a struct fiemap followed
4674      * by an array of struct fiemap_extent whose size is set
4675      * in fiemap->fm_extent_count. The array is filled in by the
4676      * ioctl.
4677      */
4678     int target_size_in, target_size_out;
4679     struct fiemap *fm;
4680     const argtype *arg_type = ie->arg_type;
4681     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4682     void *argptr, *p;
4683     abi_long ret;
4684     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4685     uint32_t outbufsz;
4686     int free_fm = 0;
4687 
4688     assert(arg_type[0] == TYPE_PTR);
4689     assert(ie->access == IOC_RW);
4690     arg_type++;
4691     target_size_in = thunk_type_size(arg_type, 0);
4692     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4693     if (!argptr) {
4694         return -TARGET_EFAULT;
4695     }
4696     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4697     unlock_user(argptr, arg, 0);
4698     fm = (struct fiemap *)buf_temp;
4699     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4700         return -TARGET_EINVAL;
4701     }
4702 
4703     outbufsz = sizeof (*fm) +
4704         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4705 
4706     if (outbufsz > MAX_STRUCT_SIZE) {
4707         /* We can't fit all the extents into the fixed size buffer.
4708          * Allocate one that is large enough and use it instead.
4709          */
4710         fm = g_try_malloc(outbufsz);
4711         if (!fm) {
4712             return -TARGET_ENOMEM;
4713         }
4714         memcpy(fm, buf_temp, sizeof(struct fiemap));
4715         free_fm = 1;
4716     }
4717     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4718     if (!is_error(ret)) {
4719         target_size_out = target_size_in;
4720         /* An extent_count of 0 means we were only counting the extents
4721          * so there are no structs to copy
4722          */
4723         if (fm->fm_extent_count != 0) {
4724             target_size_out += fm->fm_mapped_extents * extent_size;
4725         }
4726         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4727         if (!argptr) {
4728             ret = -TARGET_EFAULT;
4729         } else {
4730             /* Convert the struct fiemap */
4731             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4732             if (fm->fm_extent_count != 0) {
4733                 p = argptr + target_size_in;
4734                 /* ...and then all the struct fiemap_extents */
4735                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4736                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4737                                   THUNK_TARGET);
4738                     p += extent_size;
4739                 }
4740             }
4741             unlock_user(argptr, arg, target_size_out);
4742         }
4743     }
4744     if (free_fm) {
4745         g_free(fm);
4746     }
4747     return ret;
4748 }
4749 #endif
4750 
4751 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4752                                 int fd, int cmd, abi_long arg)
4753 {
4754     const argtype *arg_type = ie->arg_type;
4755     int target_size;
4756     void *argptr;
4757     int ret;
4758     struct ifconf *host_ifconf;
4759     uint32_t outbufsz;
4760     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4761     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4762     int target_ifreq_size;
4763     int nb_ifreq;
4764     int free_buf = 0;
4765     int i;
4766     int target_ifc_len;
4767     abi_long target_ifc_buf;
4768     int host_ifc_len;
4769     char *host_ifc_buf;
4770 
4771     assert(arg_type[0] == TYPE_PTR);
4772     assert(ie->access == IOC_RW);
4773 
4774     arg_type++;
4775     target_size = thunk_type_size(arg_type, 0);
4776 
4777     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4778     if (!argptr)
4779         return -TARGET_EFAULT;
4780     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4781     unlock_user(argptr, arg, 0);
4782 
4783     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4784     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4785     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4786 
4787     if (target_ifc_buf != 0) {
4788         target_ifc_len = host_ifconf->ifc_len;
4789         nb_ifreq = target_ifc_len / target_ifreq_size;
4790         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4791 
4792         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4793         if (outbufsz > MAX_STRUCT_SIZE) {
4794             /*
4795              * We can't fit all the extents into the fixed size buffer.
4796              * Allocate one that is large enough and use it instead.
4797              */
4798             host_ifconf = g_try_malloc(outbufsz);
4799             if (!host_ifconf) {
4800                 return -TARGET_ENOMEM;
4801             }
4802             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4803             free_buf = 1;
4804         }
4805         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4806 
4807         host_ifconf->ifc_len = host_ifc_len;
4808     } else {
4809       host_ifc_buf = NULL;
4810     }
4811     host_ifconf->ifc_buf = host_ifc_buf;
4812 
4813     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4814     if (!is_error(ret)) {
4815 	/* convert host ifc_len to target ifc_len */
4816 
4817         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4818         target_ifc_len = nb_ifreq * target_ifreq_size;
4819         host_ifconf->ifc_len = target_ifc_len;
4820 
4821 	/* restore target ifc_buf */
4822 
4823         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4824 
4825 	/* copy struct ifconf to target user */
4826 
4827         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4828         if (!argptr)
4829             return -TARGET_EFAULT;
4830         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4831         unlock_user(argptr, arg, target_size);
4832 
4833         if (target_ifc_buf != 0) {
4834             /* copy ifreq[] to target user */
4835             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4836             for (i = 0; i < nb_ifreq ; i++) {
4837                 thunk_convert(argptr + i * target_ifreq_size,
4838                               host_ifc_buf + i * sizeof(struct ifreq),
4839                               ifreq_arg_type, THUNK_TARGET);
4840             }
4841             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4842         }
4843     }
4844 
4845     if (free_buf) {
4846         g_free(host_ifconf);
4847     }
4848 
4849     return ret;
4850 }
4851 
4852 #if defined(CONFIG_USBFS)
4853 #if HOST_LONG_BITS > 64
4854 #error USBDEVFS thunks do not support >64 bit hosts yet.
4855 #endif
4856 struct live_urb {
4857     uint64_t target_urb_adr;
4858     uint64_t target_buf_adr;
4859     char *target_buf_ptr;
4860     struct usbdevfs_urb host_urb;
4861 };
4862 
4863 static GHashTable *usbdevfs_urb_hashtable(void)
4864 {
4865     static GHashTable *urb_hashtable;
4866 
4867     if (!urb_hashtable) {
4868         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4869     }
4870     return urb_hashtable;
4871 }
4872 
4873 static void urb_hashtable_insert(struct live_urb *urb)
4874 {
4875     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4876     g_hash_table_insert(urb_hashtable, urb, urb);
4877 }
4878 
4879 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4880 {
4881     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4882     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4883 }
4884 
4885 static void urb_hashtable_remove(struct live_urb *urb)
4886 {
4887     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4888     g_hash_table_remove(urb_hashtable, urb);
4889 }
4890 
4891 static abi_long
4892 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4893                           int fd, int cmd, abi_long arg)
4894 {
4895     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4896     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4897     struct live_urb *lurb;
4898     void *argptr;
4899     uint64_t hurb;
4900     int target_size;
4901     uintptr_t target_urb_adr;
4902     abi_long ret;
4903 
4904     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4905 
4906     memset(buf_temp, 0, sizeof(uint64_t));
4907     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4908     if (is_error(ret)) {
4909         return ret;
4910     }
4911 
4912     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4913     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4914     if (!lurb->target_urb_adr) {
4915         return -TARGET_EFAULT;
4916     }
4917     urb_hashtable_remove(lurb);
4918     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4919         lurb->host_urb.buffer_length);
4920     lurb->target_buf_ptr = NULL;
4921 
4922     /* restore the guest buffer pointer */
4923     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4924 
4925     /* update the guest urb struct */
4926     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4927     if (!argptr) {
4928         g_free(lurb);
4929         return -TARGET_EFAULT;
4930     }
4931     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4932     unlock_user(argptr, lurb->target_urb_adr, target_size);
4933 
4934     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4935     /* write back the urb handle */
4936     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4937     if (!argptr) {
4938         g_free(lurb);
4939         return -TARGET_EFAULT;
4940     }
4941 
4942     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4943     target_urb_adr = lurb->target_urb_adr;
4944     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4945     unlock_user(argptr, arg, target_size);
4946 
4947     g_free(lurb);
4948     return ret;
4949 }
4950 
4951 static abi_long
4952 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4953                              uint8_t *buf_temp __attribute__((unused)),
4954                              int fd, int cmd, abi_long arg)
4955 {
4956     struct live_urb *lurb;
4957 
4958     /* map target address back to host URB with metadata. */
4959     lurb = urb_hashtable_lookup(arg);
4960     if (!lurb) {
4961         return -TARGET_EFAULT;
4962     }
4963     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4964 }
4965 
4966 static abi_long
4967 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4968                             int fd, int cmd, abi_long arg)
4969 {
4970     const argtype *arg_type = ie->arg_type;
4971     int target_size;
4972     abi_long ret;
4973     void *argptr;
4974     int rw_dir;
4975     struct live_urb *lurb;
4976 
4977     /*
4978      * each submitted URB needs to map to a unique ID for the
4979      * kernel, and that unique ID needs to be a pointer to
4980      * host memory.  hence, we need to malloc for each URB.
4981      * isochronous transfers have a variable length struct.
4982      */
4983     arg_type++;
4984     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4985 
4986     /* construct host copy of urb and metadata */
4987     lurb = g_try_new0(struct live_urb, 1);
4988     if (!lurb) {
4989         return -TARGET_ENOMEM;
4990     }
4991 
4992     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4993     if (!argptr) {
4994         g_free(lurb);
4995         return -TARGET_EFAULT;
4996     }
4997     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4998     unlock_user(argptr, arg, 0);
4999 
5000     lurb->target_urb_adr = arg;
5001     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5002 
5003     /* buffer space used depends on endpoint type so lock the entire buffer */
5004     /* control type urbs should check the buffer contents for true direction */
5005     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5006     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5007         lurb->host_urb.buffer_length, 1);
5008     if (lurb->target_buf_ptr == NULL) {
5009         g_free(lurb);
5010         return -TARGET_EFAULT;
5011     }
5012 
5013     /* update buffer pointer in host copy */
5014     lurb->host_urb.buffer = lurb->target_buf_ptr;
5015 
5016     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5017     if (is_error(ret)) {
5018         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5019         g_free(lurb);
5020     } else {
5021         urb_hashtable_insert(lurb);
5022     }
5023 
5024     return ret;
5025 }
5026 #endif /* CONFIG_USBFS */
5027 
5028 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5029                             int cmd, abi_long arg)
5030 {
5031     void *argptr;
5032     struct dm_ioctl *host_dm;
5033     abi_long guest_data;
5034     uint32_t guest_data_size;
5035     int target_size;
5036     const argtype *arg_type = ie->arg_type;
5037     abi_long ret;
5038     void *big_buf = NULL;
5039     char *host_data;
5040 
5041     arg_type++;
5042     target_size = thunk_type_size(arg_type, 0);
5043     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5044     if (!argptr) {
5045         ret = -TARGET_EFAULT;
5046         goto out;
5047     }
5048     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5049     unlock_user(argptr, arg, 0);
5050 
5051     /* buf_temp is too small, so fetch things into a bigger buffer */
5052     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5053     memcpy(big_buf, buf_temp, target_size);
5054     buf_temp = big_buf;
5055     host_dm = big_buf;
5056 
5057     guest_data = arg + host_dm->data_start;
5058     if ((guest_data - arg) < 0) {
5059         ret = -TARGET_EINVAL;
5060         goto out;
5061     }
5062     guest_data_size = host_dm->data_size - host_dm->data_start;
5063     host_data = (char*)host_dm + host_dm->data_start;
5064 
5065     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5066     if (!argptr) {
5067         ret = -TARGET_EFAULT;
5068         goto out;
5069     }
5070 
5071     switch (ie->host_cmd) {
5072     case DM_REMOVE_ALL:
5073     case DM_LIST_DEVICES:
5074     case DM_DEV_CREATE:
5075     case DM_DEV_REMOVE:
5076     case DM_DEV_SUSPEND:
5077     case DM_DEV_STATUS:
5078     case DM_DEV_WAIT:
5079     case DM_TABLE_STATUS:
5080     case DM_TABLE_CLEAR:
5081     case DM_TABLE_DEPS:
5082     case DM_LIST_VERSIONS:
5083         /* no input data */
5084         break;
5085     case DM_DEV_RENAME:
5086     case DM_DEV_SET_GEOMETRY:
5087         /* data contains only strings */
5088         memcpy(host_data, argptr, guest_data_size);
5089         break;
5090     case DM_TARGET_MSG:
5091         memcpy(host_data, argptr, guest_data_size);
5092         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5093         break;
5094     case DM_TABLE_LOAD:
5095     {
5096         void *gspec = argptr;
5097         void *cur_data = host_data;
5098         const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5099         int spec_size = thunk_type_size(dm_arg_type, 0);
5100         int i;
5101 
5102         for (i = 0; i < host_dm->target_count; i++) {
5103             struct dm_target_spec *spec = cur_data;
5104             uint32_t next;
5105             int slen;
5106 
5107             thunk_convert(spec, gspec, dm_arg_type, THUNK_HOST);
5108             slen = strlen((char*)gspec + spec_size) + 1;
5109             next = spec->next;
5110             spec->next = sizeof(*spec) + slen;
5111             strcpy((char*)&spec[1], gspec + spec_size);
5112             gspec += next;
5113             cur_data += spec->next;
5114         }
5115         break;
5116     }
5117     default:
5118         ret = -TARGET_EINVAL;
5119         unlock_user(argptr, guest_data, 0);
5120         goto out;
5121     }
5122     unlock_user(argptr, guest_data, 0);
5123 
5124     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5125     if (!is_error(ret)) {
5126         guest_data = arg + host_dm->data_start;
5127         guest_data_size = host_dm->data_size - host_dm->data_start;
5128         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5129         switch (ie->host_cmd) {
5130         case DM_REMOVE_ALL:
5131         case DM_DEV_CREATE:
5132         case DM_DEV_REMOVE:
5133         case DM_DEV_RENAME:
5134         case DM_DEV_SUSPEND:
5135         case DM_DEV_STATUS:
5136         case DM_TABLE_LOAD:
5137         case DM_TABLE_CLEAR:
5138         case DM_TARGET_MSG:
5139         case DM_DEV_SET_GEOMETRY:
5140             /* no return data */
5141             break;
5142         case DM_LIST_DEVICES:
5143         {
5144             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5145             uint32_t remaining_data = guest_data_size;
5146             void *cur_data = argptr;
5147             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5148             int nl_size = 12; /* can't use thunk_size due to alignment */
5149 
5150             while (1) {
5151                 uint32_t next = nl->next;
5152                 if (next) {
5153                     nl->next = nl_size + (strlen(nl->name) + 1);
5154                 }
5155                 if (remaining_data < nl->next) {
5156                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5157                     break;
5158                 }
5159                 thunk_convert(cur_data, nl, dm_arg_type, THUNK_TARGET);
5160                 strcpy(cur_data + nl_size, nl->name);
5161                 cur_data += nl->next;
5162                 remaining_data -= nl->next;
5163                 if (!next) {
5164                     break;
5165                 }
5166                 nl = (void*)nl + next;
5167             }
5168             break;
5169         }
5170         case DM_DEV_WAIT:
5171         case DM_TABLE_STATUS:
5172         {
5173             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5174             void *cur_data = argptr;
5175             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5176             int spec_size = thunk_type_size(dm_arg_type, 0);
5177             int i;
5178 
5179             for (i = 0; i < host_dm->target_count; i++) {
5180                 uint32_t next = spec->next;
5181                 int slen = strlen((char*)&spec[1]) + 1;
5182                 spec->next = (cur_data - argptr) + spec_size + slen;
5183                 if (guest_data_size < spec->next) {
5184                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5185                     break;
5186                 }
5187                 thunk_convert(cur_data, spec, dm_arg_type, THUNK_TARGET);
5188                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5189                 cur_data = argptr + spec->next;
5190                 spec = (void*)host_dm + host_dm->data_start + next;
5191             }
5192             break;
5193         }
5194         case DM_TABLE_DEPS:
5195         {
5196             void *hdata = (void*)host_dm + host_dm->data_start;
5197             int count = *(uint32_t*)hdata;
5198             uint64_t *hdev = hdata + 8;
5199             uint64_t *gdev = argptr + 8;
5200             int i;
5201 
5202             *(uint32_t*)argptr = tswap32(count);
5203             for (i = 0; i < count; i++) {
5204                 *gdev = tswap64(*hdev);
5205                 gdev++;
5206                 hdev++;
5207             }
5208             break;
5209         }
5210         case DM_LIST_VERSIONS:
5211         {
5212             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5213             uint32_t remaining_data = guest_data_size;
5214             void *cur_data = argptr;
5215             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5216             int vers_size = thunk_type_size(dm_arg_type, 0);
5217 
5218             while (1) {
5219                 uint32_t next = vers->next;
5220                 if (next) {
5221                     vers->next = vers_size + (strlen(vers->name) + 1);
5222                 }
5223                 if (remaining_data < vers->next) {
5224                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5225                     break;
5226                 }
5227                 thunk_convert(cur_data, vers, dm_arg_type, THUNK_TARGET);
5228                 strcpy(cur_data + vers_size, vers->name);
5229                 cur_data += vers->next;
5230                 remaining_data -= vers->next;
5231                 if (!next) {
5232                     break;
5233                 }
5234                 vers = (void*)vers + next;
5235             }
5236             break;
5237         }
5238         default:
5239             unlock_user(argptr, guest_data, 0);
5240             ret = -TARGET_EINVAL;
5241             goto out;
5242         }
5243         unlock_user(argptr, guest_data, guest_data_size);
5244 
5245         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5246         if (!argptr) {
5247             ret = -TARGET_EFAULT;
5248             goto out;
5249         }
5250         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5251         unlock_user(argptr, arg, target_size);
5252     }
5253 out:
5254     g_free(big_buf);
5255     return ret;
5256 }
5257 
5258 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5259                                int cmd, abi_long arg)
5260 {
5261     void *argptr;
5262     int target_size;
5263     const argtype *arg_type = ie->arg_type;
5264     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5265     abi_long ret;
5266 
5267     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5268     struct blkpg_partition host_part;
5269 
5270     /* Read and convert blkpg */
5271     arg_type++;
5272     target_size = thunk_type_size(arg_type, 0);
5273     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5274     if (!argptr) {
5275         ret = -TARGET_EFAULT;
5276         goto out;
5277     }
5278     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5279     unlock_user(argptr, arg, 0);
5280 
5281     switch (host_blkpg->op) {
5282     case BLKPG_ADD_PARTITION:
5283     case BLKPG_DEL_PARTITION:
5284         /* payload is struct blkpg_partition */
5285         break;
5286     default:
5287         /* Unknown opcode */
5288         ret = -TARGET_EINVAL;
5289         goto out;
5290     }
5291 
5292     /* Read and convert blkpg->data */
5293     arg = (abi_long)(uintptr_t)host_blkpg->data;
5294     target_size = thunk_type_size(part_arg_type, 0);
5295     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5296     if (!argptr) {
5297         ret = -TARGET_EFAULT;
5298         goto out;
5299     }
5300     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5301     unlock_user(argptr, arg, 0);
5302 
5303     /* Swizzle the data pointer to our local copy and call! */
5304     host_blkpg->data = &host_part;
5305     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5306 
5307 out:
5308     return ret;
5309 }
5310 
5311 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5312                                 int fd, int cmd, abi_long arg)
5313 {
5314     const argtype *arg_type = ie->arg_type;
5315     const StructEntry *se;
5316     const argtype *field_types;
5317     const int *dst_offsets, *src_offsets;
5318     int target_size;
5319     void *argptr;
5320     abi_ulong *target_rt_dev_ptr = NULL;
5321     unsigned long *host_rt_dev_ptr = NULL;
5322     abi_long ret;
5323     int i;
5324 
5325     assert(ie->access == IOC_W);
5326     assert(*arg_type == TYPE_PTR);
5327     arg_type++;
5328     assert(*arg_type == TYPE_STRUCT);
5329     target_size = thunk_type_size(arg_type, 0);
5330     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5331     if (!argptr) {
5332         return -TARGET_EFAULT;
5333     }
5334     arg_type++;
5335     assert(*arg_type == (int)STRUCT_rtentry);
5336     se = struct_entries + *arg_type++;
5337     assert(se->convert[0] == NULL);
5338     /* convert struct here to be able to catch rt_dev string */
5339     field_types = se->field_types;
5340     dst_offsets = se->field_offsets[THUNK_HOST];
5341     src_offsets = se->field_offsets[THUNK_TARGET];
5342     for (i = 0; i < se->nb_fields; i++) {
5343         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5344             assert(*field_types == TYPE_PTRVOID);
5345             target_rt_dev_ptr = argptr + src_offsets[i];
5346             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5347             if (*target_rt_dev_ptr != 0) {
5348                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5349                                                   tswapal(*target_rt_dev_ptr));
5350                 if (!*host_rt_dev_ptr) {
5351                     unlock_user(argptr, arg, 0);
5352                     return -TARGET_EFAULT;
5353                 }
5354             } else {
5355                 *host_rt_dev_ptr = 0;
5356             }
5357             field_types++;
5358             continue;
5359         }
5360         field_types = thunk_convert(buf_temp + dst_offsets[i],
5361                                     argptr + src_offsets[i],
5362                                     field_types, THUNK_HOST);
5363     }
5364     unlock_user(argptr, arg, 0);
5365 
5366     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5367 
5368     assert(host_rt_dev_ptr != NULL);
5369     assert(target_rt_dev_ptr != NULL);
5370     if (*host_rt_dev_ptr != 0) {
5371         unlock_user((void *)*host_rt_dev_ptr,
5372                     *target_rt_dev_ptr, 0);
5373     }
5374     return ret;
5375 }
5376 
5377 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5378                                      int fd, int cmd, abi_long arg)
5379 {
5380     int sig = target_to_host_signal(arg);
5381     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5382 }
5383 
5384 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5385                                     int fd, int cmd, abi_long arg)
5386 {
5387     struct timeval tv;
5388     abi_long ret;
5389 
5390     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5391     if (is_error(ret)) {
5392         return ret;
5393     }
5394 
5395     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5396         if (copy_to_user_timeval(arg, &tv)) {
5397             return -TARGET_EFAULT;
5398         }
5399     } else {
5400         if (copy_to_user_timeval64(arg, &tv)) {
5401             return -TARGET_EFAULT;
5402         }
5403     }
5404 
5405     return ret;
5406 }
5407 
5408 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5409                                       int fd, int cmd, abi_long arg)
5410 {
5411     struct timespec ts;
5412     abi_long ret;
5413 
5414     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5415     if (is_error(ret)) {
5416         return ret;
5417     }
5418 
5419     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5420         if (host_to_target_timespec(arg, &ts)) {
5421             return -TARGET_EFAULT;
5422         }
5423     } else{
5424         if (host_to_target_timespec64(arg, &ts)) {
5425             return -TARGET_EFAULT;
5426         }
5427     }
5428 
5429     return ret;
5430 }
5431 
5432 #ifdef TIOCGPTPEER
5433 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5434                                      int fd, int cmd, abi_long arg)
5435 {
5436     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5437     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5438 }
5439 #endif
5440 
5441 #ifdef HAVE_DRM_H
5442 
5443 static void unlock_drm_version(struct drm_version *host_ver,
5444                                struct target_drm_version *target_ver,
5445                                bool copy)
5446 {
5447     unlock_user(host_ver->name, target_ver->name,
5448                                 copy ? host_ver->name_len : 0);
5449     unlock_user(host_ver->date, target_ver->date,
5450                                 copy ? host_ver->date_len : 0);
5451     unlock_user(host_ver->desc, target_ver->desc,
5452                                 copy ? host_ver->desc_len : 0);
5453 }
5454 
5455 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5456                                           struct target_drm_version *target_ver)
5457 {
5458     memset(host_ver, 0, sizeof(*host_ver));
5459 
5460     __get_user(host_ver->name_len, &target_ver->name_len);
5461     if (host_ver->name_len) {
5462         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5463                                    target_ver->name_len, 0);
5464         if (!host_ver->name) {
5465             return -EFAULT;
5466         }
5467     }
5468 
5469     __get_user(host_ver->date_len, &target_ver->date_len);
5470     if (host_ver->date_len) {
5471         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5472                                    target_ver->date_len, 0);
5473         if (!host_ver->date) {
5474             goto err;
5475         }
5476     }
5477 
5478     __get_user(host_ver->desc_len, &target_ver->desc_len);
5479     if (host_ver->desc_len) {
5480         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5481                                    target_ver->desc_len, 0);
5482         if (!host_ver->desc) {
5483             goto err;
5484         }
5485     }
5486 
5487     return 0;
5488 err:
5489     unlock_drm_version(host_ver, target_ver, false);
5490     return -EFAULT;
5491 }
5492 
5493 static inline void host_to_target_drmversion(
5494                                           struct target_drm_version *target_ver,
5495                                           struct drm_version *host_ver)
5496 {
5497     __put_user(host_ver->version_major, &target_ver->version_major);
5498     __put_user(host_ver->version_minor, &target_ver->version_minor);
5499     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5500     __put_user(host_ver->name_len, &target_ver->name_len);
5501     __put_user(host_ver->date_len, &target_ver->date_len);
5502     __put_user(host_ver->desc_len, &target_ver->desc_len);
5503     unlock_drm_version(host_ver, target_ver, true);
5504 }
5505 
5506 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5507                              int fd, int cmd, abi_long arg)
5508 {
5509     struct drm_version *ver;
5510     struct target_drm_version *target_ver;
5511     abi_long ret;
5512 
5513     switch (ie->host_cmd) {
5514     case DRM_IOCTL_VERSION:
5515         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5516             return -TARGET_EFAULT;
5517         }
5518         ver = (struct drm_version *)buf_temp;
5519         ret = target_to_host_drmversion(ver, target_ver);
5520         if (!is_error(ret)) {
5521             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5522             if (is_error(ret)) {
5523                 unlock_drm_version(ver, target_ver, false);
5524             } else {
5525                 host_to_target_drmversion(target_ver, ver);
5526             }
5527         }
5528         unlock_user_struct(target_ver, arg, 0);
5529         return ret;
5530     }
5531     return -TARGET_ENOSYS;
5532 }
5533 
5534 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5535                                            struct drm_i915_getparam *gparam,
5536                                            int fd, abi_long arg)
5537 {
5538     abi_long ret;
5539     int value;
5540     struct target_drm_i915_getparam *target_gparam;
5541 
5542     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5543         return -TARGET_EFAULT;
5544     }
5545 
5546     __get_user(gparam->param, &target_gparam->param);
5547     gparam->value = &value;
5548     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5549     put_user_s32(value, target_gparam->value);
5550 
5551     unlock_user_struct(target_gparam, arg, 0);
5552     return ret;
5553 }
5554 
5555 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5556                                   int fd, int cmd, abi_long arg)
5557 {
5558     switch (ie->host_cmd) {
5559     case DRM_IOCTL_I915_GETPARAM:
5560         return do_ioctl_drm_i915_getparam(ie,
5561                                           (struct drm_i915_getparam *)buf_temp,
5562                                           fd, arg);
5563     default:
5564         return -TARGET_ENOSYS;
5565     }
5566 }
5567 
5568 #endif
5569 
5570 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5571                                         int fd, int cmd, abi_long arg)
5572 {
5573     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5574     struct tun_filter *target_filter;
5575     char *target_addr;
5576 
5577     assert(ie->access == IOC_W);
5578 
5579     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5580     if (!target_filter) {
5581         return -TARGET_EFAULT;
5582     }
5583     filter->flags = tswap16(target_filter->flags);
5584     filter->count = tswap16(target_filter->count);
5585     unlock_user(target_filter, arg, 0);
5586 
5587     if (filter->count) {
5588         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5589             MAX_STRUCT_SIZE) {
5590             return -TARGET_EFAULT;
5591         }
5592 
5593         target_addr = lock_user(VERIFY_READ,
5594                                 arg + offsetof(struct tun_filter, addr),
5595                                 filter->count * ETH_ALEN, 1);
5596         if (!target_addr) {
5597             return -TARGET_EFAULT;
5598         }
5599         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5600         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5601     }
5602 
5603     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5604 }
5605 
5606 IOCTLEntry ioctl_entries[] = {
5607 #define IOCTL(cmd, access, ...) \
5608     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5609 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5610     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5611 #define IOCTL_IGNORE(cmd) \
5612     { TARGET_ ## cmd, 0, #cmd },
5613 #include "ioctls.h"
5614     { 0, 0, },
5615 };
5616 
5617 /* ??? Implement proper locking for ioctls.  */
5618 /* do_ioctl() Must return target values and target errnos. */
5619 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5620 {
5621     const IOCTLEntry *ie;
5622     const argtype *arg_type;
5623     abi_long ret;
5624     uint8_t buf_temp[MAX_STRUCT_SIZE];
5625     int target_size;
5626     void *argptr;
5627 
5628     ie = ioctl_entries;
5629     for(;;) {
5630         if (ie->target_cmd == 0) {
5631             qemu_log_mask(
5632                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5633             return -TARGET_ENOTTY;
5634         }
5635         if (ie->target_cmd == cmd)
5636             break;
5637         ie++;
5638     }
5639     arg_type = ie->arg_type;
5640     if (ie->do_ioctl) {
5641         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5642     } else if (!ie->host_cmd) {
5643         /* Some architectures define BSD ioctls in their headers
5644            that are not implemented in Linux.  */
5645         return -TARGET_ENOTTY;
5646     }
5647 
5648     switch(arg_type[0]) {
5649     case TYPE_NULL:
5650         /* no argument */
5651         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5652         break;
5653     case TYPE_PTRVOID:
5654     case TYPE_INT:
5655     case TYPE_LONG:
5656     case TYPE_ULONG:
5657         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5658         break;
5659     case TYPE_PTR:
5660         arg_type++;
5661         target_size = thunk_type_size(arg_type, 0);
5662         switch(ie->access) {
5663         case IOC_R:
5664             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5665             if (!is_error(ret)) {
5666                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5667                 if (!argptr)
5668                     return -TARGET_EFAULT;
5669                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5670                 unlock_user(argptr, arg, target_size);
5671             }
5672             break;
5673         case IOC_W:
5674             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5675             if (!argptr)
5676                 return -TARGET_EFAULT;
5677             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5678             unlock_user(argptr, arg, 0);
5679             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5680             break;
5681         default:
5682         case IOC_RW:
5683             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5684             if (!argptr)
5685                 return -TARGET_EFAULT;
5686             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5687             unlock_user(argptr, arg, 0);
5688             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5689             if (!is_error(ret)) {
5690                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5691                 if (!argptr)
5692                     return -TARGET_EFAULT;
5693                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5694                 unlock_user(argptr, arg, target_size);
5695             }
5696             break;
5697         }
5698         break;
5699     default:
5700         qemu_log_mask(LOG_UNIMP,
5701                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5702                       (long)cmd, arg_type[0]);
5703         ret = -TARGET_ENOTTY;
5704         break;
5705     }
5706     return ret;
5707 }
5708 
5709 static const bitmask_transtbl iflag_tbl[] = {
5710         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5711         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5712         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5713         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5714         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5715         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5716         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5717         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5718         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5719         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5720         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5721         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5722         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5723         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5724         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5725 };
5726 
5727 static const bitmask_transtbl oflag_tbl[] = {
5728 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5729 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5730 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5731 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5732 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5733 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5734 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5735 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5736 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5737 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5738 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5739 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5740 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5741 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5742 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5743 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5744 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5745 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5746 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5747 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5748 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5749 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5750 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5751 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5752 };
5753 
5754 static const bitmask_transtbl cflag_tbl[] = {
5755 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5756 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5757 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5758 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5759 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5760 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5761 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5762 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5763 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5764 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5765 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5766 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5767 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5768 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5769 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5770 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5771 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5772 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5773 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5774 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5775 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5776 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5777 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5778 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5779 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5780 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5781 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5782 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5783 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5784 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5785 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5786 };
5787 
5788 static const bitmask_transtbl lflag_tbl[] = {
5789   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5790   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5791   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5792   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5793   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5794   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5795   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5796   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5797   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5798   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5799   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5800   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5801   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5802   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5803   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5804   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5805 };
5806 
5807 static void target_to_host_termios (void *dst, const void *src)
5808 {
5809     struct host_termios *host = dst;
5810     const struct target_termios *target = src;
5811 
5812     host->c_iflag =
5813         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5814     host->c_oflag =
5815         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5816     host->c_cflag =
5817         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5818     host->c_lflag =
5819         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5820     host->c_line = target->c_line;
5821 
5822     memset(host->c_cc, 0, sizeof(host->c_cc));
5823     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5824     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5825     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5826     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5827     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5828     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5829     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5830     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5831     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5832     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5833     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5834     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5835     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5836     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5837     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5838     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5839     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5840 }
5841 
5842 static void host_to_target_termios (void *dst, const void *src)
5843 {
5844     struct target_termios *target = dst;
5845     const struct host_termios *host = src;
5846 
5847     target->c_iflag =
5848         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5849     target->c_oflag =
5850         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5851     target->c_cflag =
5852         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5853     target->c_lflag =
5854         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5855     target->c_line = host->c_line;
5856 
5857     memset(target->c_cc, 0, sizeof(target->c_cc));
5858     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5859     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5860     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5861     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5862     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5863     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5864     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5865     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5866     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5867     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5868     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5869     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5870     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5871     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5872     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5873     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5874     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5875 }
5876 
5877 static const StructEntry struct_termios_def = {
5878     .convert = { host_to_target_termios, target_to_host_termios },
5879     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5880     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5881     .print = print_termios,
5882 };
5883 
5884 /* If the host does not provide these bits, they may be safely discarded. */
5885 #ifndef MAP_SYNC
5886 #define MAP_SYNC 0
5887 #endif
5888 #ifndef MAP_UNINITIALIZED
5889 #define MAP_UNINITIALIZED 0
5890 #endif
5891 
5892 static const bitmask_transtbl mmap_flags_tbl[] = {
5893     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5894     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5895       MAP_ANONYMOUS, MAP_ANONYMOUS },
5896     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5897       MAP_GROWSDOWN, MAP_GROWSDOWN },
5898     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5899       MAP_DENYWRITE, MAP_DENYWRITE },
5900     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5901       MAP_EXECUTABLE, MAP_EXECUTABLE },
5902     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5903     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5904       MAP_NORESERVE, MAP_NORESERVE },
5905     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5906     /* MAP_STACK had been ignored by the kernel for quite some time.
5907        Recognize it for the target insofar as we do not want to pass
5908        it through to the host.  */
5909     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5910     { TARGET_MAP_NONBLOCK, TARGET_MAP_NONBLOCK, MAP_NONBLOCK, MAP_NONBLOCK },
5911     { TARGET_MAP_POPULATE, TARGET_MAP_POPULATE, MAP_POPULATE, MAP_POPULATE },
5912     { TARGET_MAP_FIXED_NOREPLACE, TARGET_MAP_FIXED_NOREPLACE,
5913       MAP_FIXED_NOREPLACE, MAP_FIXED_NOREPLACE },
5914     { TARGET_MAP_UNINITIALIZED, TARGET_MAP_UNINITIALIZED,
5915       MAP_UNINITIALIZED, MAP_UNINITIALIZED },
5916 };
5917 
5918 /*
5919  * Arrange for legacy / undefined architecture specific flags to be
5920  * ignored by mmap handling code.
5921  */
5922 #ifndef TARGET_MAP_32BIT
5923 #define TARGET_MAP_32BIT 0
5924 #endif
5925 #ifndef TARGET_MAP_HUGE_2MB
5926 #define TARGET_MAP_HUGE_2MB 0
5927 #endif
5928 #ifndef TARGET_MAP_HUGE_1GB
5929 #define TARGET_MAP_HUGE_1GB 0
5930 #endif
5931 
5932 static abi_long do_mmap(abi_ulong addr, abi_ulong len, int prot,
5933                         int target_flags, int fd, off_t offset)
5934 {
5935     /*
5936      * The historical set of flags that all mmap types implicitly support.
5937      */
5938     enum {
5939         TARGET_LEGACY_MAP_MASK = TARGET_MAP_SHARED
5940                                | TARGET_MAP_PRIVATE
5941                                | TARGET_MAP_FIXED
5942                                | TARGET_MAP_ANONYMOUS
5943                                | TARGET_MAP_DENYWRITE
5944                                | TARGET_MAP_EXECUTABLE
5945                                | TARGET_MAP_UNINITIALIZED
5946                                | TARGET_MAP_GROWSDOWN
5947                                | TARGET_MAP_LOCKED
5948                                | TARGET_MAP_NORESERVE
5949                                | TARGET_MAP_POPULATE
5950                                | TARGET_MAP_NONBLOCK
5951                                | TARGET_MAP_STACK
5952                                | TARGET_MAP_HUGETLB
5953                                | TARGET_MAP_32BIT
5954                                | TARGET_MAP_HUGE_2MB
5955                                | TARGET_MAP_HUGE_1GB
5956     };
5957     int host_flags;
5958 
5959     switch (target_flags & TARGET_MAP_TYPE) {
5960     case TARGET_MAP_PRIVATE:
5961         host_flags = MAP_PRIVATE;
5962         break;
5963     case TARGET_MAP_SHARED:
5964         host_flags = MAP_SHARED;
5965         break;
5966     case TARGET_MAP_SHARED_VALIDATE:
5967         /*
5968          * MAP_SYNC is only supported for MAP_SHARED_VALIDATE, and is
5969          * therefore omitted from mmap_flags_tbl and TARGET_LEGACY_MAP_MASK.
5970          */
5971         if (target_flags & ~(TARGET_LEGACY_MAP_MASK | TARGET_MAP_SYNC)) {
5972             return -TARGET_EOPNOTSUPP;
5973         }
5974         host_flags = MAP_SHARED_VALIDATE;
5975         if (target_flags & TARGET_MAP_SYNC) {
5976             host_flags |= MAP_SYNC;
5977         }
5978         break;
5979     default:
5980         return -TARGET_EINVAL;
5981     }
5982     host_flags |= target_to_host_bitmask(target_flags, mmap_flags_tbl);
5983 
5984     return get_errno(target_mmap(addr, len, prot, host_flags, fd, offset));
5985 }
5986 
5987 /*
5988  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5989  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5990  */
5991 #if defined(TARGET_I386)
5992 
5993 /* NOTE: there is really one LDT for all the threads */
5994 static uint8_t *ldt_table;
5995 
5996 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5997 {
5998     int size;
5999     void *p;
6000 
6001     if (!ldt_table)
6002         return 0;
6003     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6004     if (size > bytecount)
6005         size = bytecount;
6006     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6007     if (!p)
6008         return -TARGET_EFAULT;
6009     /* ??? Should this by byteswapped?  */
6010     memcpy(p, ldt_table, size);
6011     unlock_user(p, ptr, size);
6012     return size;
6013 }
6014 
6015 /* XXX: add locking support */
6016 static abi_long write_ldt(CPUX86State *env,
6017                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6018 {
6019     struct target_modify_ldt_ldt_s ldt_info;
6020     struct target_modify_ldt_ldt_s *target_ldt_info;
6021     int seg_32bit, contents, read_exec_only, limit_in_pages;
6022     int seg_not_present, useable, lm;
6023     uint32_t *lp, entry_1, entry_2;
6024 
6025     if (bytecount != sizeof(ldt_info))
6026         return -TARGET_EINVAL;
6027     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6028         return -TARGET_EFAULT;
6029     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6030     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6031     ldt_info.limit = tswap32(target_ldt_info->limit);
6032     ldt_info.flags = tswap32(target_ldt_info->flags);
6033     unlock_user_struct(target_ldt_info, ptr, 0);
6034 
6035     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6036         return -TARGET_EINVAL;
6037     seg_32bit = ldt_info.flags & 1;
6038     contents = (ldt_info.flags >> 1) & 3;
6039     read_exec_only = (ldt_info.flags >> 3) & 1;
6040     limit_in_pages = (ldt_info.flags >> 4) & 1;
6041     seg_not_present = (ldt_info.flags >> 5) & 1;
6042     useable = (ldt_info.flags >> 6) & 1;
6043 #ifdef TARGET_ABI32
6044     lm = 0;
6045 #else
6046     lm = (ldt_info.flags >> 7) & 1;
6047 #endif
6048     if (contents == 3) {
6049         if (oldmode)
6050             return -TARGET_EINVAL;
6051         if (seg_not_present == 0)
6052             return -TARGET_EINVAL;
6053     }
6054     /* allocate the LDT */
6055     if (!ldt_table) {
6056         env->ldt.base = target_mmap(0,
6057                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6058                                     PROT_READ|PROT_WRITE,
6059                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6060         if (env->ldt.base == -1)
6061             return -TARGET_ENOMEM;
6062         memset(g2h_untagged(env->ldt.base), 0,
6063                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6064         env->ldt.limit = 0xffff;
6065         ldt_table = g2h_untagged(env->ldt.base);
6066     }
6067 
6068     /* NOTE: same code as Linux kernel */
6069     /* Allow LDTs to be cleared by the user. */
6070     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6071         if (oldmode ||
6072             (contents == 0		&&
6073              read_exec_only == 1	&&
6074              seg_32bit == 0		&&
6075              limit_in_pages == 0	&&
6076              seg_not_present == 1	&&
6077              useable == 0 )) {
6078             entry_1 = 0;
6079             entry_2 = 0;
6080             goto install;
6081         }
6082     }
6083 
6084     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6085         (ldt_info.limit & 0x0ffff);
6086     entry_2 = (ldt_info.base_addr & 0xff000000) |
6087         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6088         (ldt_info.limit & 0xf0000) |
6089         ((read_exec_only ^ 1) << 9) |
6090         (contents << 10) |
6091         ((seg_not_present ^ 1) << 15) |
6092         (seg_32bit << 22) |
6093         (limit_in_pages << 23) |
6094         (lm << 21) |
6095         0x7000;
6096     if (!oldmode)
6097         entry_2 |= (useable << 20);
6098 
6099     /* Install the new entry ...  */
6100 install:
6101     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6102     lp[0] = tswap32(entry_1);
6103     lp[1] = tswap32(entry_2);
6104     return 0;
6105 }
6106 
6107 /* specific and weird i386 syscalls */
6108 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6109                               unsigned long bytecount)
6110 {
6111     abi_long ret;
6112 
6113     switch (func) {
6114     case 0:
6115         ret = read_ldt(ptr, bytecount);
6116         break;
6117     case 1:
6118         ret = write_ldt(env, ptr, bytecount, 1);
6119         break;
6120     case 0x11:
6121         ret = write_ldt(env, ptr, bytecount, 0);
6122         break;
6123     default:
6124         ret = -TARGET_ENOSYS;
6125         break;
6126     }
6127     return ret;
6128 }
6129 
6130 #if defined(TARGET_ABI32)
6131 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6132 {
6133     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6134     struct target_modify_ldt_ldt_s ldt_info;
6135     struct target_modify_ldt_ldt_s *target_ldt_info;
6136     int seg_32bit, contents, read_exec_only, limit_in_pages;
6137     int seg_not_present, useable, lm;
6138     uint32_t *lp, entry_1, entry_2;
6139     int i;
6140 
6141     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6142     if (!target_ldt_info)
6143         return -TARGET_EFAULT;
6144     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6145     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6146     ldt_info.limit = tswap32(target_ldt_info->limit);
6147     ldt_info.flags = tswap32(target_ldt_info->flags);
6148     if (ldt_info.entry_number == -1) {
6149         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6150             if (gdt_table[i] == 0) {
6151                 ldt_info.entry_number = i;
6152                 target_ldt_info->entry_number = tswap32(i);
6153                 break;
6154             }
6155         }
6156     }
6157     unlock_user_struct(target_ldt_info, ptr, 1);
6158 
6159     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6160         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6161            return -TARGET_EINVAL;
6162     seg_32bit = ldt_info.flags & 1;
6163     contents = (ldt_info.flags >> 1) & 3;
6164     read_exec_only = (ldt_info.flags >> 3) & 1;
6165     limit_in_pages = (ldt_info.flags >> 4) & 1;
6166     seg_not_present = (ldt_info.flags >> 5) & 1;
6167     useable = (ldt_info.flags >> 6) & 1;
6168 #ifdef TARGET_ABI32
6169     lm = 0;
6170 #else
6171     lm = (ldt_info.flags >> 7) & 1;
6172 #endif
6173 
6174     if (contents == 3) {
6175         if (seg_not_present == 0)
6176             return -TARGET_EINVAL;
6177     }
6178 
6179     /* NOTE: same code as Linux kernel */
6180     /* Allow LDTs to be cleared by the user. */
6181     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6182         if ((contents == 0             &&
6183              read_exec_only == 1       &&
6184              seg_32bit == 0            &&
6185              limit_in_pages == 0       &&
6186              seg_not_present == 1      &&
6187              useable == 0 )) {
6188             entry_1 = 0;
6189             entry_2 = 0;
6190             goto install;
6191         }
6192     }
6193 
6194     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6195         (ldt_info.limit & 0x0ffff);
6196     entry_2 = (ldt_info.base_addr & 0xff000000) |
6197         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6198         (ldt_info.limit & 0xf0000) |
6199         ((read_exec_only ^ 1) << 9) |
6200         (contents << 10) |
6201         ((seg_not_present ^ 1) << 15) |
6202         (seg_32bit << 22) |
6203         (limit_in_pages << 23) |
6204         (useable << 20) |
6205         (lm << 21) |
6206         0x7000;
6207 
6208     /* Install the new entry ...  */
6209 install:
6210     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6211     lp[0] = tswap32(entry_1);
6212     lp[1] = tswap32(entry_2);
6213     return 0;
6214 }
6215 
6216 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6217 {
6218     struct target_modify_ldt_ldt_s *target_ldt_info;
6219     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6220     uint32_t base_addr, limit, flags;
6221     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6222     int seg_not_present, useable, lm;
6223     uint32_t *lp, entry_1, entry_2;
6224 
6225     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6226     if (!target_ldt_info)
6227         return -TARGET_EFAULT;
6228     idx = tswap32(target_ldt_info->entry_number);
6229     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6230         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6231         unlock_user_struct(target_ldt_info, ptr, 1);
6232         return -TARGET_EINVAL;
6233     }
6234     lp = (uint32_t *)(gdt_table + idx);
6235     entry_1 = tswap32(lp[0]);
6236     entry_2 = tswap32(lp[1]);
6237 
6238     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6239     contents = (entry_2 >> 10) & 3;
6240     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6241     seg_32bit = (entry_2 >> 22) & 1;
6242     limit_in_pages = (entry_2 >> 23) & 1;
6243     useable = (entry_2 >> 20) & 1;
6244 #ifdef TARGET_ABI32
6245     lm = 0;
6246 #else
6247     lm = (entry_2 >> 21) & 1;
6248 #endif
6249     flags = (seg_32bit << 0) | (contents << 1) |
6250         (read_exec_only << 3) | (limit_in_pages << 4) |
6251         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6252     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6253     base_addr = (entry_1 >> 16) |
6254         (entry_2 & 0xff000000) |
6255         ((entry_2 & 0xff) << 16);
6256     target_ldt_info->base_addr = tswapal(base_addr);
6257     target_ldt_info->limit = tswap32(limit);
6258     target_ldt_info->flags = tswap32(flags);
6259     unlock_user_struct(target_ldt_info, ptr, 1);
6260     return 0;
6261 }
6262 
6263 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6264 {
6265     return -TARGET_ENOSYS;
6266 }
6267 #else
6268 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6269 {
6270     abi_long ret = 0;
6271     abi_ulong val;
6272     int idx;
6273 
6274     switch(code) {
6275     case TARGET_ARCH_SET_GS:
6276     case TARGET_ARCH_SET_FS:
6277         if (code == TARGET_ARCH_SET_GS)
6278             idx = R_GS;
6279         else
6280             idx = R_FS;
6281         cpu_x86_load_seg(env, idx, 0);
6282         env->segs[idx].base = addr;
6283         break;
6284     case TARGET_ARCH_GET_GS:
6285     case TARGET_ARCH_GET_FS:
6286         if (code == TARGET_ARCH_GET_GS)
6287             idx = R_GS;
6288         else
6289             idx = R_FS;
6290         val = env->segs[idx].base;
6291         if (put_user(val, addr, abi_ulong))
6292             ret = -TARGET_EFAULT;
6293         break;
6294     default:
6295         ret = -TARGET_EINVAL;
6296         break;
6297     }
6298     return ret;
6299 }
6300 #endif /* defined(TARGET_ABI32 */
6301 #endif /* defined(TARGET_I386) */
6302 
6303 /*
6304  * These constants are generic.  Supply any that are missing from the host.
6305  */
6306 #ifndef PR_SET_NAME
6307 # define PR_SET_NAME    15
6308 # define PR_GET_NAME    16
6309 #endif
6310 #ifndef PR_SET_FP_MODE
6311 # define PR_SET_FP_MODE 45
6312 # define PR_GET_FP_MODE 46
6313 # define PR_FP_MODE_FR   (1 << 0)
6314 # define PR_FP_MODE_FRE  (1 << 1)
6315 #endif
6316 #ifndef PR_SVE_SET_VL
6317 # define PR_SVE_SET_VL  50
6318 # define PR_SVE_GET_VL  51
6319 # define PR_SVE_VL_LEN_MASK  0xffff
6320 # define PR_SVE_VL_INHERIT   (1 << 17)
6321 #endif
6322 #ifndef PR_PAC_RESET_KEYS
6323 # define PR_PAC_RESET_KEYS  54
6324 # define PR_PAC_APIAKEY   (1 << 0)
6325 # define PR_PAC_APIBKEY   (1 << 1)
6326 # define PR_PAC_APDAKEY   (1 << 2)
6327 # define PR_PAC_APDBKEY   (1 << 3)
6328 # define PR_PAC_APGAKEY   (1 << 4)
6329 #endif
6330 #ifndef PR_SET_TAGGED_ADDR_CTRL
6331 # define PR_SET_TAGGED_ADDR_CTRL 55
6332 # define PR_GET_TAGGED_ADDR_CTRL 56
6333 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6334 #endif
6335 #ifndef PR_SET_IO_FLUSHER
6336 # define PR_SET_IO_FLUSHER 57
6337 # define PR_GET_IO_FLUSHER 58
6338 #endif
6339 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6340 # define PR_SET_SYSCALL_USER_DISPATCH 59
6341 #endif
6342 #ifndef PR_SME_SET_VL
6343 # define PR_SME_SET_VL  63
6344 # define PR_SME_GET_VL  64
6345 # define PR_SME_VL_LEN_MASK  0xffff
6346 # define PR_SME_VL_INHERIT   (1 << 17)
6347 #endif
6348 
6349 #include "target_prctl.h"
6350 
6351 static abi_long do_prctl_inval0(CPUArchState *env)
6352 {
6353     return -TARGET_EINVAL;
6354 }
6355 
6356 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6357 {
6358     return -TARGET_EINVAL;
6359 }
6360 
6361 #ifndef do_prctl_get_fp_mode
6362 #define do_prctl_get_fp_mode do_prctl_inval0
6363 #endif
6364 #ifndef do_prctl_set_fp_mode
6365 #define do_prctl_set_fp_mode do_prctl_inval1
6366 #endif
6367 #ifndef do_prctl_sve_get_vl
6368 #define do_prctl_sve_get_vl do_prctl_inval0
6369 #endif
6370 #ifndef do_prctl_sve_set_vl
6371 #define do_prctl_sve_set_vl do_prctl_inval1
6372 #endif
6373 #ifndef do_prctl_reset_keys
6374 #define do_prctl_reset_keys do_prctl_inval1
6375 #endif
6376 #ifndef do_prctl_set_tagged_addr_ctrl
6377 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6378 #endif
6379 #ifndef do_prctl_get_tagged_addr_ctrl
6380 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6381 #endif
6382 #ifndef do_prctl_get_unalign
6383 #define do_prctl_get_unalign do_prctl_inval1
6384 #endif
6385 #ifndef do_prctl_set_unalign
6386 #define do_prctl_set_unalign do_prctl_inval1
6387 #endif
6388 #ifndef do_prctl_sme_get_vl
6389 #define do_prctl_sme_get_vl do_prctl_inval0
6390 #endif
6391 #ifndef do_prctl_sme_set_vl
6392 #define do_prctl_sme_set_vl do_prctl_inval1
6393 #endif
6394 
6395 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6396                          abi_long arg3, abi_long arg4, abi_long arg5)
6397 {
6398     abi_long ret;
6399 
6400     switch (option) {
6401     case PR_GET_PDEATHSIG:
6402         {
6403             int deathsig;
6404             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6405                                   arg3, arg4, arg5));
6406             if (!is_error(ret) &&
6407                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6408                 return -TARGET_EFAULT;
6409             }
6410             return ret;
6411         }
6412     case PR_SET_PDEATHSIG:
6413         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6414                                arg3, arg4, arg5));
6415     case PR_GET_NAME:
6416         {
6417             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6418             if (!name) {
6419                 return -TARGET_EFAULT;
6420             }
6421             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6422                                   arg3, arg4, arg5));
6423             unlock_user(name, arg2, 16);
6424             return ret;
6425         }
6426     case PR_SET_NAME:
6427         {
6428             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6429             if (!name) {
6430                 return -TARGET_EFAULT;
6431             }
6432             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6433                                   arg3, arg4, arg5));
6434             unlock_user(name, arg2, 0);
6435             return ret;
6436         }
6437     case PR_GET_FP_MODE:
6438         return do_prctl_get_fp_mode(env);
6439     case PR_SET_FP_MODE:
6440         return do_prctl_set_fp_mode(env, arg2);
6441     case PR_SVE_GET_VL:
6442         return do_prctl_sve_get_vl(env);
6443     case PR_SVE_SET_VL:
6444         return do_prctl_sve_set_vl(env, arg2);
6445     case PR_SME_GET_VL:
6446         return do_prctl_sme_get_vl(env);
6447     case PR_SME_SET_VL:
6448         return do_prctl_sme_set_vl(env, arg2);
6449     case PR_PAC_RESET_KEYS:
6450         if (arg3 || arg4 || arg5) {
6451             return -TARGET_EINVAL;
6452         }
6453         return do_prctl_reset_keys(env, arg2);
6454     case PR_SET_TAGGED_ADDR_CTRL:
6455         if (arg3 || arg4 || arg5) {
6456             return -TARGET_EINVAL;
6457         }
6458         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6459     case PR_GET_TAGGED_ADDR_CTRL:
6460         if (arg2 || arg3 || arg4 || arg5) {
6461             return -TARGET_EINVAL;
6462         }
6463         return do_prctl_get_tagged_addr_ctrl(env);
6464 
6465     case PR_GET_UNALIGN:
6466         return do_prctl_get_unalign(env, arg2);
6467     case PR_SET_UNALIGN:
6468         return do_prctl_set_unalign(env, arg2);
6469 
6470     case PR_CAP_AMBIENT:
6471     case PR_CAPBSET_READ:
6472     case PR_CAPBSET_DROP:
6473     case PR_GET_DUMPABLE:
6474     case PR_SET_DUMPABLE:
6475     case PR_GET_KEEPCAPS:
6476     case PR_SET_KEEPCAPS:
6477     case PR_GET_SECUREBITS:
6478     case PR_SET_SECUREBITS:
6479     case PR_GET_TIMING:
6480     case PR_SET_TIMING:
6481     case PR_GET_TIMERSLACK:
6482     case PR_SET_TIMERSLACK:
6483     case PR_MCE_KILL:
6484     case PR_MCE_KILL_GET:
6485     case PR_GET_NO_NEW_PRIVS:
6486     case PR_SET_NO_NEW_PRIVS:
6487     case PR_GET_IO_FLUSHER:
6488     case PR_SET_IO_FLUSHER:
6489     case PR_SET_CHILD_SUBREAPER:
6490     case PR_GET_SPECULATION_CTRL:
6491     case PR_SET_SPECULATION_CTRL:
6492         /* Some prctl options have no pointer arguments and we can pass on. */
6493         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6494 
6495     case PR_GET_CHILD_SUBREAPER:
6496         {
6497             int val;
6498             ret = get_errno(prctl(PR_GET_CHILD_SUBREAPER, &val,
6499                                   arg3, arg4, arg5));
6500             if (!is_error(ret) && put_user_s32(val, arg2)) {
6501                 return -TARGET_EFAULT;
6502             }
6503             return ret;
6504         }
6505 
6506     case PR_GET_TID_ADDRESS:
6507         {
6508             TaskState *ts = get_task_state(env_cpu(env));
6509             return put_user_ual(ts->child_tidptr, arg2);
6510         }
6511 
6512     case PR_GET_FPEXC:
6513     case PR_SET_FPEXC:
6514         /* Was used for SPE on PowerPC. */
6515         return -TARGET_EINVAL;
6516 
6517     case PR_GET_ENDIAN:
6518     case PR_SET_ENDIAN:
6519     case PR_GET_FPEMU:
6520     case PR_SET_FPEMU:
6521     case PR_SET_MM:
6522     case PR_GET_SECCOMP:
6523     case PR_SET_SECCOMP:
6524     case PR_SET_SYSCALL_USER_DISPATCH:
6525     case PR_GET_THP_DISABLE:
6526     case PR_SET_THP_DISABLE:
6527     case PR_GET_TSC:
6528     case PR_SET_TSC:
6529         /* Disable to prevent the target disabling stuff we need. */
6530         return -TARGET_EINVAL;
6531 
6532     default:
6533         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6534                       option);
6535         return -TARGET_EINVAL;
6536     }
6537 }
6538 
6539 #define NEW_STACK_SIZE 0x40000
6540 
6541 
6542 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6543 typedef struct {
6544     CPUArchState *env;
6545     pthread_mutex_t mutex;
6546     pthread_cond_t cond;
6547     pthread_t thread;
6548     uint32_t tid;
6549     abi_ulong child_tidptr;
6550     abi_ulong parent_tidptr;
6551     sigset_t sigmask;
6552 } new_thread_info;
6553 
6554 static void *clone_func(void *arg)
6555 {
6556     new_thread_info *info = arg;
6557     CPUArchState *env;
6558     CPUState *cpu;
6559     TaskState *ts;
6560 
6561     rcu_register_thread();
6562     tcg_register_thread();
6563     env = info->env;
6564     cpu = env_cpu(env);
6565     thread_cpu = cpu;
6566     ts = get_task_state(cpu);
6567     info->tid = sys_gettid();
6568     task_settid(ts);
6569     if (info->child_tidptr)
6570         put_user_u32(info->tid, info->child_tidptr);
6571     if (info->parent_tidptr)
6572         put_user_u32(info->tid, info->parent_tidptr);
6573     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6574     /* Enable signals.  */
6575     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6576     /* Signal to the parent that we're ready.  */
6577     pthread_mutex_lock(&info->mutex);
6578     pthread_cond_broadcast(&info->cond);
6579     pthread_mutex_unlock(&info->mutex);
6580     /* Wait until the parent has finished initializing the tls state.  */
6581     pthread_mutex_lock(&clone_lock);
6582     pthread_mutex_unlock(&clone_lock);
6583     cpu_loop(env);
6584     /* never exits */
6585     return NULL;
6586 }
6587 
6588 /* do_fork() Must return host values and target errnos (unlike most
6589    do_*() functions). */
6590 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6591                    abi_ulong parent_tidptr, target_ulong newtls,
6592                    abi_ulong child_tidptr)
6593 {
6594     CPUState *cpu = env_cpu(env);
6595     int ret;
6596     TaskState *ts;
6597     CPUState *new_cpu;
6598     CPUArchState *new_env;
6599     sigset_t sigmask;
6600 
6601     flags &= ~CLONE_IGNORED_FLAGS;
6602 
6603     /* Emulate vfork() with fork() */
6604     if (flags & CLONE_VFORK)
6605         flags &= ~(CLONE_VFORK | CLONE_VM);
6606 
6607     if (flags & CLONE_VM) {
6608         TaskState *parent_ts = get_task_state(cpu);
6609         new_thread_info info;
6610         pthread_attr_t attr;
6611 
6612         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6613             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6614             return -TARGET_EINVAL;
6615         }
6616 
6617         ts = g_new0(TaskState, 1);
6618         init_task_state(ts);
6619 
6620         /* Grab a mutex so that thread setup appears atomic.  */
6621         pthread_mutex_lock(&clone_lock);
6622 
6623         /*
6624          * If this is our first additional thread, we need to ensure we
6625          * generate code for parallel execution and flush old translations.
6626          * Do this now so that the copy gets CF_PARALLEL too.
6627          */
6628         if (!tcg_cflags_has(cpu, CF_PARALLEL)) {
6629             tcg_cflags_set(cpu, CF_PARALLEL);
6630             tb_flush(cpu);
6631         }
6632 
6633         /* we create a new CPU instance. */
6634         new_env = cpu_copy(env);
6635         /* Init regs that differ from the parent.  */
6636         cpu_clone_regs_child(new_env, newsp, flags);
6637         cpu_clone_regs_parent(env, flags);
6638         new_cpu = env_cpu(new_env);
6639         new_cpu->opaque = ts;
6640         ts->bprm = parent_ts->bprm;
6641         ts->info = parent_ts->info;
6642         ts->signal_mask = parent_ts->signal_mask;
6643 
6644         if (flags & CLONE_CHILD_CLEARTID) {
6645             ts->child_tidptr = child_tidptr;
6646         }
6647 
6648         if (flags & CLONE_SETTLS) {
6649             cpu_set_tls (new_env, newtls);
6650         }
6651 
6652         memset(&info, 0, sizeof(info));
6653         pthread_mutex_init(&info.mutex, NULL);
6654         pthread_mutex_lock(&info.mutex);
6655         pthread_cond_init(&info.cond, NULL);
6656         info.env = new_env;
6657         if (flags & CLONE_CHILD_SETTID) {
6658             info.child_tidptr = child_tidptr;
6659         }
6660         if (flags & CLONE_PARENT_SETTID) {
6661             info.parent_tidptr = parent_tidptr;
6662         }
6663 
6664         ret = pthread_attr_init(&attr);
6665         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6666         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6667         /* It is not safe to deliver signals until the child has finished
6668            initializing, so temporarily block all signals.  */
6669         sigfillset(&sigmask);
6670         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6671         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6672 
6673         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6674         /* TODO: Free new CPU state if thread creation failed.  */
6675 
6676         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6677         pthread_attr_destroy(&attr);
6678         if (ret == 0) {
6679             /* Wait for the child to initialize.  */
6680             pthread_cond_wait(&info.cond, &info.mutex);
6681             ret = info.tid;
6682         } else {
6683             ret = -1;
6684         }
6685         pthread_mutex_unlock(&info.mutex);
6686         pthread_cond_destroy(&info.cond);
6687         pthread_mutex_destroy(&info.mutex);
6688         pthread_mutex_unlock(&clone_lock);
6689     } else {
6690         /* if no CLONE_VM, we consider it is a fork */
6691         if (flags & CLONE_INVALID_FORK_FLAGS) {
6692             return -TARGET_EINVAL;
6693         }
6694 
6695         /* We can't support custom termination signals */
6696         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6697             return -TARGET_EINVAL;
6698         }
6699 
6700 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6701         if (flags & CLONE_PIDFD) {
6702             return -TARGET_EINVAL;
6703         }
6704 #endif
6705 
6706         /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6707         if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) {
6708             return -TARGET_EINVAL;
6709         }
6710 
6711         if (block_signals()) {
6712             return -QEMU_ERESTARTSYS;
6713         }
6714 
6715         fork_start();
6716         ret = fork();
6717         if (ret == 0) {
6718             /* Child Process.  */
6719             cpu_clone_regs_child(env, newsp, flags);
6720             fork_end(ret);
6721             /* There is a race condition here.  The parent process could
6722                theoretically read the TID in the child process before the child
6723                tid is set.  This would require using either ptrace
6724                (not implemented) or having *_tidptr to point at a shared memory
6725                mapping.  We can't repeat the spinlock hack used above because
6726                the child process gets its own copy of the lock.  */
6727             if (flags & CLONE_CHILD_SETTID)
6728                 put_user_u32(sys_gettid(), child_tidptr);
6729             if (flags & CLONE_PARENT_SETTID)
6730                 put_user_u32(sys_gettid(), parent_tidptr);
6731             ts = get_task_state(cpu);
6732             if (flags & CLONE_SETTLS)
6733                 cpu_set_tls (env, newtls);
6734             if (flags & CLONE_CHILD_CLEARTID)
6735                 ts->child_tidptr = child_tidptr;
6736         } else {
6737             cpu_clone_regs_parent(env, flags);
6738             if (flags & CLONE_PIDFD) {
6739                 int pid_fd = 0;
6740 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6741                 int pid_child = ret;
6742                 pid_fd = pidfd_open(pid_child, 0);
6743                 if (pid_fd >= 0) {
6744                         fcntl(pid_fd, F_SETFD, fcntl(pid_fd, F_GETFL)
6745                                                | FD_CLOEXEC);
6746                 } else {
6747                         pid_fd = 0;
6748                 }
6749 #endif
6750                 put_user_u32(pid_fd, parent_tidptr);
6751             }
6752             fork_end(ret);
6753         }
6754         g_assert(!cpu_in_exclusive_context(cpu));
6755     }
6756     return ret;
6757 }
6758 
6759 /* warning : doesn't handle linux specific flags... */
6760 static int target_to_host_fcntl_cmd(int cmd)
6761 {
6762     int ret;
6763 
6764     switch(cmd) {
6765     case TARGET_F_DUPFD:
6766     case TARGET_F_GETFD:
6767     case TARGET_F_SETFD:
6768     case TARGET_F_GETFL:
6769     case TARGET_F_SETFL:
6770     case TARGET_F_OFD_GETLK:
6771     case TARGET_F_OFD_SETLK:
6772     case TARGET_F_OFD_SETLKW:
6773         ret = cmd;
6774         break;
6775     case TARGET_F_GETLK:
6776         ret = F_GETLK;
6777         break;
6778     case TARGET_F_SETLK:
6779         ret = F_SETLK;
6780         break;
6781     case TARGET_F_SETLKW:
6782         ret = F_SETLKW;
6783         break;
6784     case TARGET_F_GETOWN:
6785         ret = F_GETOWN;
6786         break;
6787     case TARGET_F_SETOWN:
6788         ret = F_SETOWN;
6789         break;
6790     case TARGET_F_GETSIG:
6791         ret = F_GETSIG;
6792         break;
6793     case TARGET_F_SETSIG:
6794         ret = F_SETSIG;
6795         break;
6796 #if TARGET_ABI_BITS == 32
6797     case TARGET_F_GETLK64:
6798         ret = F_GETLK;
6799         break;
6800     case TARGET_F_SETLK64:
6801         ret = F_SETLK;
6802         break;
6803     case TARGET_F_SETLKW64:
6804         ret = F_SETLKW;
6805         break;
6806 #endif
6807     case TARGET_F_SETLEASE:
6808         ret = F_SETLEASE;
6809         break;
6810     case TARGET_F_GETLEASE:
6811         ret = F_GETLEASE;
6812         break;
6813 #ifdef F_DUPFD_CLOEXEC
6814     case TARGET_F_DUPFD_CLOEXEC:
6815         ret = F_DUPFD_CLOEXEC;
6816         break;
6817 #endif
6818     case TARGET_F_NOTIFY:
6819         ret = F_NOTIFY;
6820         break;
6821 #ifdef F_GETOWN_EX
6822     case TARGET_F_GETOWN_EX:
6823         ret = F_GETOWN_EX;
6824         break;
6825 #endif
6826 #ifdef F_SETOWN_EX
6827     case TARGET_F_SETOWN_EX:
6828         ret = F_SETOWN_EX;
6829         break;
6830 #endif
6831 #ifdef F_SETPIPE_SZ
6832     case TARGET_F_SETPIPE_SZ:
6833         ret = F_SETPIPE_SZ;
6834         break;
6835     case TARGET_F_GETPIPE_SZ:
6836         ret = F_GETPIPE_SZ;
6837         break;
6838 #endif
6839 #ifdef F_ADD_SEALS
6840     case TARGET_F_ADD_SEALS:
6841         ret = F_ADD_SEALS;
6842         break;
6843     case TARGET_F_GET_SEALS:
6844         ret = F_GET_SEALS;
6845         break;
6846 #endif
6847     default:
6848         ret = -TARGET_EINVAL;
6849         break;
6850     }
6851 
6852 #if defined(__powerpc64__)
6853     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6854      * is not supported by kernel. The glibc fcntl call actually adjusts
6855      * them to 5, 6 and 7 before making the syscall(). Since we make the
6856      * syscall directly, adjust to what is supported by the kernel.
6857      */
6858     if (ret >= F_GETLK && ret <= F_SETLKW) {
6859         ret -= F_GETLK - 5;
6860     }
6861 #endif
6862 
6863     return ret;
6864 }
6865 
6866 #define FLOCK_TRANSTBL \
6867     switch (type) { \
6868     TRANSTBL_CONVERT(F_RDLCK); \
6869     TRANSTBL_CONVERT(F_WRLCK); \
6870     TRANSTBL_CONVERT(F_UNLCK); \
6871     }
6872 
6873 static int target_to_host_flock(int type)
6874 {
6875 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6876     FLOCK_TRANSTBL
6877 #undef  TRANSTBL_CONVERT
6878     return -TARGET_EINVAL;
6879 }
6880 
6881 static int host_to_target_flock(int type)
6882 {
6883 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6884     FLOCK_TRANSTBL
6885 #undef  TRANSTBL_CONVERT
6886     /* if we don't know how to convert the value coming
6887      * from the host we copy to the target field as-is
6888      */
6889     return type;
6890 }
6891 
6892 static inline abi_long copy_from_user_flock(struct flock *fl,
6893                                             abi_ulong target_flock_addr)
6894 {
6895     struct target_flock *target_fl;
6896     int l_type;
6897 
6898     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6899         return -TARGET_EFAULT;
6900     }
6901 
6902     __get_user(l_type, &target_fl->l_type);
6903     l_type = target_to_host_flock(l_type);
6904     if (l_type < 0) {
6905         return l_type;
6906     }
6907     fl->l_type = l_type;
6908     __get_user(fl->l_whence, &target_fl->l_whence);
6909     __get_user(fl->l_start, &target_fl->l_start);
6910     __get_user(fl->l_len, &target_fl->l_len);
6911     __get_user(fl->l_pid, &target_fl->l_pid);
6912     unlock_user_struct(target_fl, target_flock_addr, 0);
6913     return 0;
6914 }
6915 
6916 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6917                                           const struct flock *fl)
6918 {
6919     struct target_flock *target_fl;
6920     short l_type;
6921 
6922     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6923         return -TARGET_EFAULT;
6924     }
6925 
6926     l_type = host_to_target_flock(fl->l_type);
6927     __put_user(l_type, &target_fl->l_type);
6928     __put_user(fl->l_whence, &target_fl->l_whence);
6929     __put_user(fl->l_start, &target_fl->l_start);
6930     __put_user(fl->l_len, &target_fl->l_len);
6931     __put_user(fl->l_pid, &target_fl->l_pid);
6932     unlock_user_struct(target_fl, target_flock_addr, 1);
6933     return 0;
6934 }
6935 
6936 typedef abi_long from_flock64_fn(struct flock *fl, abi_ulong target_addr);
6937 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock *fl);
6938 
6939 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6940 struct target_oabi_flock64 {
6941     abi_short l_type;
6942     abi_short l_whence;
6943     abi_llong l_start;
6944     abi_llong l_len;
6945     abi_int   l_pid;
6946 } QEMU_PACKED;
6947 
6948 static inline abi_long copy_from_user_oabi_flock64(struct flock *fl,
6949                                                    abi_ulong target_flock_addr)
6950 {
6951     struct target_oabi_flock64 *target_fl;
6952     int l_type;
6953 
6954     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6955         return -TARGET_EFAULT;
6956     }
6957 
6958     __get_user(l_type, &target_fl->l_type);
6959     l_type = target_to_host_flock(l_type);
6960     if (l_type < 0) {
6961         return l_type;
6962     }
6963     fl->l_type = l_type;
6964     __get_user(fl->l_whence, &target_fl->l_whence);
6965     __get_user(fl->l_start, &target_fl->l_start);
6966     __get_user(fl->l_len, &target_fl->l_len);
6967     __get_user(fl->l_pid, &target_fl->l_pid);
6968     unlock_user_struct(target_fl, target_flock_addr, 0);
6969     return 0;
6970 }
6971 
6972 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6973                                                  const struct flock *fl)
6974 {
6975     struct target_oabi_flock64 *target_fl;
6976     short l_type;
6977 
6978     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6979         return -TARGET_EFAULT;
6980     }
6981 
6982     l_type = host_to_target_flock(fl->l_type);
6983     __put_user(l_type, &target_fl->l_type);
6984     __put_user(fl->l_whence, &target_fl->l_whence);
6985     __put_user(fl->l_start, &target_fl->l_start);
6986     __put_user(fl->l_len, &target_fl->l_len);
6987     __put_user(fl->l_pid, &target_fl->l_pid);
6988     unlock_user_struct(target_fl, target_flock_addr, 1);
6989     return 0;
6990 }
6991 #endif
6992 
6993 static inline abi_long copy_from_user_flock64(struct flock *fl,
6994                                               abi_ulong target_flock_addr)
6995 {
6996     struct target_flock64 *target_fl;
6997     int l_type;
6998 
6999     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7000         return -TARGET_EFAULT;
7001     }
7002 
7003     __get_user(l_type, &target_fl->l_type);
7004     l_type = target_to_host_flock(l_type);
7005     if (l_type < 0) {
7006         return l_type;
7007     }
7008     fl->l_type = l_type;
7009     __get_user(fl->l_whence, &target_fl->l_whence);
7010     __get_user(fl->l_start, &target_fl->l_start);
7011     __get_user(fl->l_len, &target_fl->l_len);
7012     __get_user(fl->l_pid, &target_fl->l_pid);
7013     unlock_user_struct(target_fl, target_flock_addr, 0);
7014     return 0;
7015 }
7016 
7017 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7018                                             const struct flock *fl)
7019 {
7020     struct target_flock64 *target_fl;
7021     short l_type;
7022 
7023     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7024         return -TARGET_EFAULT;
7025     }
7026 
7027     l_type = host_to_target_flock(fl->l_type);
7028     __put_user(l_type, &target_fl->l_type);
7029     __put_user(fl->l_whence, &target_fl->l_whence);
7030     __put_user(fl->l_start, &target_fl->l_start);
7031     __put_user(fl->l_len, &target_fl->l_len);
7032     __put_user(fl->l_pid, &target_fl->l_pid);
7033     unlock_user_struct(target_fl, target_flock_addr, 1);
7034     return 0;
7035 }
7036 
7037 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7038 {
7039     struct flock fl;
7040 #ifdef F_GETOWN_EX
7041     struct f_owner_ex fox;
7042     struct target_f_owner_ex *target_fox;
7043 #endif
7044     abi_long ret;
7045     int host_cmd = target_to_host_fcntl_cmd(cmd);
7046 
7047     if (host_cmd == -TARGET_EINVAL)
7048 	    return host_cmd;
7049 
7050     switch(cmd) {
7051     case TARGET_F_GETLK:
7052         ret = copy_from_user_flock(&fl, arg);
7053         if (ret) {
7054             return ret;
7055         }
7056         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7057         if (ret == 0) {
7058             ret = copy_to_user_flock(arg, &fl);
7059         }
7060         break;
7061 
7062     case TARGET_F_SETLK:
7063     case TARGET_F_SETLKW:
7064         ret = copy_from_user_flock(&fl, arg);
7065         if (ret) {
7066             return ret;
7067         }
7068         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7069         break;
7070 
7071     case TARGET_F_GETLK64:
7072     case TARGET_F_OFD_GETLK:
7073         ret = copy_from_user_flock64(&fl, arg);
7074         if (ret) {
7075             return ret;
7076         }
7077         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7078         if (ret == 0) {
7079             ret = copy_to_user_flock64(arg, &fl);
7080         }
7081         break;
7082     case TARGET_F_SETLK64:
7083     case TARGET_F_SETLKW64:
7084     case TARGET_F_OFD_SETLK:
7085     case TARGET_F_OFD_SETLKW:
7086         ret = copy_from_user_flock64(&fl, arg);
7087         if (ret) {
7088             return ret;
7089         }
7090         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7091         break;
7092 
7093     case TARGET_F_GETFL:
7094         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7095         if (ret >= 0) {
7096             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7097             /* tell 32-bit guests it uses largefile on 64-bit hosts: */
7098             if (O_LARGEFILE == 0 && HOST_LONG_BITS == 64) {
7099                 ret |= TARGET_O_LARGEFILE;
7100             }
7101         }
7102         break;
7103 
7104     case TARGET_F_SETFL:
7105         ret = get_errno(safe_fcntl(fd, host_cmd,
7106                                    target_to_host_bitmask(arg,
7107                                                           fcntl_flags_tbl)));
7108         break;
7109 
7110 #ifdef F_GETOWN_EX
7111     case TARGET_F_GETOWN_EX:
7112         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7113         if (ret >= 0) {
7114             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7115                 return -TARGET_EFAULT;
7116             target_fox->type = tswap32(fox.type);
7117             target_fox->pid = tswap32(fox.pid);
7118             unlock_user_struct(target_fox, arg, 1);
7119         }
7120         break;
7121 #endif
7122 
7123 #ifdef F_SETOWN_EX
7124     case TARGET_F_SETOWN_EX:
7125         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7126             return -TARGET_EFAULT;
7127         fox.type = tswap32(target_fox->type);
7128         fox.pid = tswap32(target_fox->pid);
7129         unlock_user_struct(target_fox, arg, 0);
7130         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7131         break;
7132 #endif
7133 
7134     case TARGET_F_SETSIG:
7135         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7136         break;
7137 
7138     case TARGET_F_GETSIG:
7139         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7140         break;
7141 
7142     case TARGET_F_SETOWN:
7143     case TARGET_F_GETOWN:
7144     case TARGET_F_SETLEASE:
7145     case TARGET_F_GETLEASE:
7146     case TARGET_F_SETPIPE_SZ:
7147     case TARGET_F_GETPIPE_SZ:
7148     case TARGET_F_ADD_SEALS:
7149     case TARGET_F_GET_SEALS:
7150         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7151         break;
7152 
7153     default:
7154         ret = get_errno(safe_fcntl(fd, cmd, arg));
7155         break;
7156     }
7157     return ret;
7158 }
7159 
7160 #ifdef USE_UID16
7161 
7162 static inline int high2lowuid(int uid)
7163 {
7164     if (uid > 65535)
7165         return 65534;
7166     else
7167         return uid;
7168 }
7169 
7170 static inline int high2lowgid(int gid)
7171 {
7172     if (gid > 65535)
7173         return 65534;
7174     else
7175         return gid;
7176 }
7177 
7178 static inline int low2highuid(int uid)
7179 {
7180     if ((int16_t)uid == -1)
7181         return -1;
7182     else
7183         return uid;
7184 }
7185 
7186 static inline int low2highgid(int gid)
7187 {
7188     if ((int16_t)gid == -1)
7189         return -1;
7190     else
7191         return gid;
7192 }
7193 static inline int tswapid(int id)
7194 {
7195     return tswap16(id);
7196 }
7197 
7198 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7199 
7200 #else /* !USE_UID16 */
7201 static inline int high2lowuid(int uid)
7202 {
7203     return uid;
7204 }
7205 static inline int high2lowgid(int gid)
7206 {
7207     return gid;
7208 }
7209 static inline int low2highuid(int uid)
7210 {
7211     return uid;
7212 }
7213 static inline int low2highgid(int gid)
7214 {
7215     return gid;
7216 }
7217 static inline int tswapid(int id)
7218 {
7219     return tswap32(id);
7220 }
7221 
7222 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7223 
7224 #endif /* USE_UID16 */
7225 
7226 /* We must do direct syscalls for setting UID/GID, because we want to
7227  * implement the Linux system call semantics of "change only for this thread",
7228  * not the libc/POSIX semantics of "change for all threads in process".
7229  * (See http://ewontfix.com/17/ for more details.)
7230  * We use the 32-bit version of the syscalls if present; if it is not
7231  * then either the host architecture supports 32-bit UIDs natively with
7232  * the standard syscall, or the 16-bit UID is the best we can do.
7233  */
7234 #ifdef __NR_setuid32
7235 #define __NR_sys_setuid __NR_setuid32
7236 #else
7237 #define __NR_sys_setuid __NR_setuid
7238 #endif
7239 #ifdef __NR_setgid32
7240 #define __NR_sys_setgid __NR_setgid32
7241 #else
7242 #define __NR_sys_setgid __NR_setgid
7243 #endif
7244 #ifdef __NR_setresuid32
7245 #define __NR_sys_setresuid __NR_setresuid32
7246 #else
7247 #define __NR_sys_setresuid __NR_setresuid
7248 #endif
7249 #ifdef __NR_setresgid32
7250 #define __NR_sys_setresgid __NR_setresgid32
7251 #else
7252 #define __NR_sys_setresgid __NR_setresgid
7253 #endif
7254 #ifdef __NR_setgroups32
7255 #define __NR_sys_setgroups __NR_setgroups32
7256 #else
7257 #define __NR_sys_setgroups __NR_setgroups
7258 #endif
7259 #ifdef __NR_sys_setreuid32
7260 #define __NR_sys_setreuid __NR_setreuid32
7261 #else
7262 #define __NR_sys_setreuid __NR_setreuid
7263 #endif
7264 #ifdef __NR_sys_setregid32
7265 #define __NR_sys_setregid __NR_setregid32
7266 #else
7267 #define __NR_sys_setregid __NR_setregid
7268 #endif
7269 
7270 _syscall1(int, sys_setuid, uid_t, uid)
7271 _syscall1(int, sys_setgid, gid_t, gid)
7272 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7273 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7274 _syscall2(int, sys_setgroups, int, size, gid_t *, grouplist)
7275 _syscall2(int, sys_setreuid, uid_t, ruid, uid_t, euid);
7276 _syscall2(int, sys_setregid, gid_t, rgid, gid_t, egid);
7277 
7278 void syscall_init(void)
7279 {
7280     IOCTLEntry *ie;
7281     const argtype *arg_type;
7282     int size;
7283 
7284     thunk_init(STRUCT_MAX);
7285 
7286 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7287 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7288 #include "syscall_types.h"
7289 #undef STRUCT
7290 #undef STRUCT_SPECIAL
7291 
7292     /* we patch the ioctl size if necessary. We rely on the fact that
7293        no ioctl has all the bits at '1' in the size field */
7294     ie = ioctl_entries;
7295     while (ie->target_cmd != 0) {
7296         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7297             TARGET_IOC_SIZEMASK) {
7298             arg_type = ie->arg_type;
7299             if (arg_type[0] != TYPE_PTR) {
7300                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7301                         ie->target_cmd);
7302                 exit(1);
7303             }
7304             arg_type++;
7305             size = thunk_type_size(arg_type, 0);
7306             ie->target_cmd = (ie->target_cmd &
7307                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7308                 (size << TARGET_IOC_SIZESHIFT);
7309         }
7310 
7311         /* automatic consistency check if same arch */
7312 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7313     (defined(__x86_64__) && defined(TARGET_X86_64))
7314         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7315             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7316                     ie->name, ie->target_cmd, ie->host_cmd);
7317         }
7318 #endif
7319         ie++;
7320     }
7321 }
7322 
7323 #ifdef TARGET_NR_truncate64
7324 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7325                                          abi_long arg2,
7326                                          abi_long arg3,
7327                                          abi_long arg4)
7328 {
7329     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7330         arg2 = arg3;
7331         arg3 = arg4;
7332     }
7333     return get_errno(truncate(arg1, target_offset64(arg2, arg3)));
7334 }
7335 #endif
7336 
7337 #ifdef TARGET_NR_ftruncate64
7338 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7339                                           abi_long arg2,
7340                                           abi_long arg3,
7341                                           abi_long arg4)
7342 {
7343     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7344         arg2 = arg3;
7345         arg3 = arg4;
7346     }
7347     return get_errno(ftruncate(arg1, target_offset64(arg2, arg3)));
7348 }
7349 #endif
7350 
7351 #if defined(TARGET_NR_timer_settime) || \
7352     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7353 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7354                                                  abi_ulong target_addr)
7355 {
7356     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7357                                 offsetof(struct target_itimerspec,
7358                                          it_interval)) ||
7359         target_to_host_timespec(&host_its->it_value, target_addr +
7360                                 offsetof(struct target_itimerspec,
7361                                          it_value))) {
7362         return -TARGET_EFAULT;
7363     }
7364 
7365     return 0;
7366 }
7367 #endif
7368 
7369 #if defined(TARGET_NR_timer_settime64) || \
7370     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7371 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7372                                                    abi_ulong target_addr)
7373 {
7374     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7375                                   offsetof(struct target__kernel_itimerspec,
7376                                            it_interval)) ||
7377         target_to_host_timespec64(&host_its->it_value, target_addr +
7378                                   offsetof(struct target__kernel_itimerspec,
7379                                            it_value))) {
7380         return -TARGET_EFAULT;
7381     }
7382 
7383     return 0;
7384 }
7385 #endif
7386 
7387 #if ((defined(TARGET_NR_timerfd_gettime) || \
7388       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7389       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7390 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7391                                                  struct itimerspec *host_its)
7392 {
7393     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7394                                                        it_interval),
7395                                 &host_its->it_interval) ||
7396         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7397                                                        it_value),
7398                                 &host_its->it_value)) {
7399         return -TARGET_EFAULT;
7400     }
7401     return 0;
7402 }
7403 #endif
7404 
7405 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7406       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7407       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7408 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7409                                                    struct itimerspec *host_its)
7410 {
7411     if (host_to_target_timespec64(target_addr +
7412                                   offsetof(struct target__kernel_itimerspec,
7413                                            it_interval),
7414                                   &host_its->it_interval) ||
7415         host_to_target_timespec64(target_addr +
7416                                   offsetof(struct target__kernel_itimerspec,
7417                                            it_value),
7418                                   &host_its->it_value)) {
7419         return -TARGET_EFAULT;
7420     }
7421     return 0;
7422 }
7423 #endif
7424 
7425 #if defined(TARGET_NR_adjtimex) || \
7426     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7427 static inline abi_long target_to_host_timex(struct timex *host_tx,
7428                                             abi_long target_addr)
7429 {
7430     struct target_timex *target_tx;
7431 
7432     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7433         return -TARGET_EFAULT;
7434     }
7435 
7436     __get_user(host_tx->modes, &target_tx->modes);
7437     __get_user(host_tx->offset, &target_tx->offset);
7438     __get_user(host_tx->freq, &target_tx->freq);
7439     __get_user(host_tx->maxerror, &target_tx->maxerror);
7440     __get_user(host_tx->esterror, &target_tx->esterror);
7441     __get_user(host_tx->status, &target_tx->status);
7442     __get_user(host_tx->constant, &target_tx->constant);
7443     __get_user(host_tx->precision, &target_tx->precision);
7444     __get_user(host_tx->tolerance, &target_tx->tolerance);
7445     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7446     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7447     __get_user(host_tx->tick, &target_tx->tick);
7448     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7449     __get_user(host_tx->jitter, &target_tx->jitter);
7450     __get_user(host_tx->shift, &target_tx->shift);
7451     __get_user(host_tx->stabil, &target_tx->stabil);
7452     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7453     __get_user(host_tx->calcnt, &target_tx->calcnt);
7454     __get_user(host_tx->errcnt, &target_tx->errcnt);
7455     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7456     __get_user(host_tx->tai, &target_tx->tai);
7457 
7458     unlock_user_struct(target_tx, target_addr, 0);
7459     return 0;
7460 }
7461 
7462 static inline abi_long host_to_target_timex(abi_long target_addr,
7463                                             struct timex *host_tx)
7464 {
7465     struct target_timex *target_tx;
7466 
7467     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7468         return -TARGET_EFAULT;
7469     }
7470 
7471     __put_user(host_tx->modes, &target_tx->modes);
7472     __put_user(host_tx->offset, &target_tx->offset);
7473     __put_user(host_tx->freq, &target_tx->freq);
7474     __put_user(host_tx->maxerror, &target_tx->maxerror);
7475     __put_user(host_tx->esterror, &target_tx->esterror);
7476     __put_user(host_tx->status, &target_tx->status);
7477     __put_user(host_tx->constant, &target_tx->constant);
7478     __put_user(host_tx->precision, &target_tx->precision);
7479     __put_user(host_tx->tolerance, &target_tx->tolerance);
7480     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7481     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7482     __put_user(host_tx->tick, &target_tx->tick);
7483     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7484     __put_user(host_tx->jitter, &target_tx->jitter);
7485     __put_user(host_tx->shift, &target_tx->shift);
7486     __put_user(host_tx->stabil, &target_tx->stabil);
7487     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7488     __put_user(host_tx->calcnt, &target_tx->calcnt);
7489     __put_user(host_tx->errcnt, &target_tx->errcnt);
7490     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7491     __put_user(host_tx->tai, &target_tx->tai);
7492 
7493     unlock_user_struct(target_tx, target_addr, 1);
7494     return 0;
7495 }
7496 #endif
7497 
7498 
7499 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7500 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7501                                               abi_long target_addr)
7502 {
7503     struct target__kernel_timex *target_tx;
7504 
7505     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7506                                  offsetof(struct target__kernel_timex,
7507                                           time))) {
7508         return -TARGET_EFAULT;
7509     }
7510 
7511     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7512         return -TARGET_EFAULT;
7513     }
7514 
7515     __get_user(host_tx->modes, &target_tx->modes);
7516     __get_user(host_tx->offset, &target_tx->offset);
7517     __get_user(host_tx->freq, &target_tx->freq);
7518     __get_user(host_tx->maxerror, &target_tx->maxerror);
7519     __get_user(host_tx->esterror, &target_tx->esterror);
7520     __get_user(host_tx->status, &target_tx->status);
7521     __get_user(host_tx->constant, &target_tx->constant);
7522     __get_user(host_tx->precision, &target_tx->precision);
7523     __get_user(host_tx->tolerance, &target_tx->tolerance);
7524     __get_user(host_tx->tick, &target_tx->tick);
7525     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7526     __get_user(host_tx->jitter, &target_tx->jitter);
7527     __get_user(host_tx->shift, &target_tx->shift);
7528     __get_user(host_tx->stabil, &target_tx->stabil);
7529     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7530     __get_user(host_tx->calcnt, &target_tx->calcnt);
7531     __get_user(host_tx->errcnt, &target_tx->errcnt);
7532     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7533     __get_user(host_tx->tai, &target_tx->tai);
7534 
7535     unlock_user_struct(target_tx, target_addr, 0);
7536     return 0;
7537 }
7538 
7539 static inline abi_long host_to_target_timex64(abi_long target_addr,
7540                                               struct timex *host_tx)
7541 {
7542     struct target__kernel_timex *target_tx;
7543 
7544    if (copy_to_user_timeval64(target_addr +
7545                               offsetof(struct target__kernel_timex, time),
7546                               &host_tx->time)) {
7547         return -TARGET_EFAULT;
7548     }
7549 
7550     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7551         return -TARGET_EFAULT;
7552     }
7553 
7554     __put_user(host_tx->modes, &target_tx->modes);
7555     __put_user(host_tx->offset, &target_tx->offset);
7556     __put_user(host_tx->freq, &target_tx->freq);
7557     __put_user(host_tx->maxerror, &target_tx->maxerror);
7558     __put_user(host_tx->esterror, &target_tx->esterror);
7559     __put_user(host_tx->status, &target_tx->status);
7560     __put_user(host_tx->constant, &target_tx->constant);
7561     __put_user(host_tx->precision, &target_tx->precision);
7562     __put_user(host_tx->tolerance, &target_tx->tolerance);
7563     __put_user(host_tx->tick, &target_tx->tick);
7564     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7565     __put_user(host_tx->jitter, &target_tx->jitter);
7566     __put_user(host_tx->shift, &target_tx->shift);
7567     __put_user(host_tx->stabil, &target_tx->stabil);
7568     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7569     __put_user(host_tx->calcnt, &target_tx->calcnt);
7570     __put_user(host_tx->errcnt, &target_tx->errcnt);
7571     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7572     __put_user(host_tx->tai, &target_tx->tai);
7573 
7574     unlock_user_struct(target_tx, target_addr, 1);
7575     return 0;
7576 }
7577 #endif
7578 
7579 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7580 #define sigev_notify_thread_id _sigev_un._tid
7581 #endif
7582 
7583 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7584                                                abi_ulong target_addr)
7585 {
7586     struct target_sigevent *target_sevp;
7587 
7588     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7589         return -TARGET_EFAULT;
7590     }
7591 
7592     /* This union is awkward on 64 bit systems because it has a 32 bit
7593      * integer and a pointer in it; we follow the conversion approach
7594      * used for handling sigval types in signal.c so the guest should get
7595      * the correct value back even if we did a 64 bit byteswap and it's
7596      * using the 32 bit integer.
7597      */
7598     host_sevp->sigev_value.sival_ptr =
7599         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7600     host_sevp->sigev_signo =
7601         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7602     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7603     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7604 
7605     unlock_user_struct(target_sevp, target_addr, 1);
7606     return 0;
7607 }
7608 
7609 #if defined(TARGET_NR_mlockall)
7610 static inline int target_to_host_mlockall_arg(int arg)
7611 {
7612     int result = 0;
7613 
7614     if (arg & TARGET_MCL_CURRENT) {
7615         result |= MCL_CURRENT;
7616     }
7617     if (arg & TARGET_MCL_FUTURE) {
7618         result |= MCL_FUTURE;
7619     }
7620 #ifdef MCL_ONFAULT
7621     if (arg & TARGET_MCL_ONFAULT) {
7622         result |= MCL_ONFAULT;
7623     }
7624 #endif
7625 
7626     return result;
7627 }
7628 #endif
7629 
7630 static inline int target_to_host_msync_arg(abi_long arg)
7631 {
7632     return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
7633            ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
7634            ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
7635            (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
7636 }
7637 
7638 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7639      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7640      defined(TARGET_NR_newfstatat))
7641 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7642                                              abi_ulong target_addr,
7643                                              struct stat *host_st)
7644 {
7645 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7646     if (cpu_env->eabi) {
7647         struct target_eabi_stat64 *target_st;
7648 
7649         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7650             return -TARGET_EFAULT;
7651         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7652         __put_user(host_st->st_dev, &target_st->st_dev);
7653         __put_user(host_st->st_ino, &target_st->st_ino);
7654 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7655         __put_user(host_st->st_ino, &target_st->__st_ino);
7656 #endif
7657         __put_user(host_st->st_mode, &target_st->st_mode);
7658         __put_user(host_st->st_nlink, &target_st->st_nlink);
7659         __put_user(host_st->st_uid, &target_st->st_uid);
7660         __put_user(host_st->st_gid, &target_st->st_gid);
7661         __put_user(host_st->st_rdev, &target_st->st_rdev);
7662         __put_user(host_st->st_size, &target_st->st_size);
7663         __put_user(host_st->st_blksize, &target_st->st_blksize);
7664         __put_user(host_st->st_blocks, &target_st->st_blocks);
7665         __put_user(host_st->st_atime, &target_st->target_st_atime);
7666         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7667         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7668 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7669         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7670         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7671         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7672 #endif
7673         unlock_user_struct(target_st, target_addr, 1);
7674     } else
7675 #endif
7676     {
7677 #if defined(TARGET_HAS_STRUCT_STAT64)
7678         struct target_stat64 *target_st;
7679 #else
7680         struct target_stat *target_st;
7681 #endif
7682 
7683         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7684             return -TARGET_EFAULT;
7685         memset(target_st, 0, sizeof(*target_st));
7686         __put_user(host_st->st_dev, &target_st->st_dev);
7687         __put_user(host_st->st_ino, &target_st->st_ino);
7688 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7689         __put_user(host_st->st_ino, &target_st->__st_ino);
7690 #endif
7691         __put_user(host_st->st_mode, &target_st->st_mode);
7692         __put_user(host_st->st_nlink, &target_st->st_nlink);
7693         __put_user(host_st->st_uid, &target_st->st_uid);
7694         __put_user(host_st->st_gid, &target_st->st_gid);
7695         __put_user(host_st->st_rdev, &target_st->st_rdev);
7696         /* XXX: better use of kernel struct */
7697         __put_user(host_st->st_size, &target_st->st_size);
7698         __put_user(host_st->st_blksize, &target_st->st_blksize);
7699         __put_user(host_st->st_blocks, &target_st->st_blocks);
7700         __put_user(host_st->st_atime, &target_st->target_st_atime);
7701         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7702         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7703 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7704         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7705         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7706         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7707 #endif
7708         unlock_user_struct(target_st, target_addr, 1);
7709     }
7710 
7711     return 0;
7712 }
7713 #endif
7714 
7715 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7716 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7717                                             abi_ulong target_addr)
7718 {
7719     struct target_statx *target_stx;
7720 
7721     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7722         return -TARGET_EFAULT;
7723     }
7724     memset(target_stx, 0, sizeof(*target_stx));
7725 
7726     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7727     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7728     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7729     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7730     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7731     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7732     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7733     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7734     __put_user(host_stx->stx_size, &target_stx->stx_size);
7735     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7736     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7737     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7738     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7739     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7740     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7741     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7742     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7743     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7744     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7745     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7746     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7747     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7748     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7749 
7750     unlock_user_struct(target_stx, target_addr, 1);
7751 
7752     return 0;
7753 }
7754 #endif
7755 
7756 static int do_sys_futex(int *uaddr, int op, int val,
7757                          const struct timespec *timeout, int *uaddr2,
7758                          int val3)
7759 {
7760 #if HOST_LONG_BITS == 64
7761 #if defined(__NR_futex)
7762     /* always a 64-bit time_t, it doesn't define _time64 version  */
7763     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7764 
7765 #endif
7766 #else /* HOST_LONG_BITS == 64 */
7767 #if defined(__NR_futex_time64)
7768     if (sizeof(timeout->tv_sec) == 8) {
7769         /* _time64 function on 32bit arch */
7770         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7771     }
7772 #endif
7773 #if defined(__NR_futex)
7774     /* old function on 32bit arch */
7775     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7776 #endif
7777 #endif /* HOST_LONG_BITS == 64 */
7778     g_assert_not_reached();
7779 }
7780 
7781 static int do_safe_futex(int *uaddr, int op, int val,
7782                          const struct timespec *timeout, int *uaddr2,
7783                          int val3)
7784 {
7785 #if HOST_LONG_BITS == 64
7786 #if defined(__NR_futex)
7787     /* always a 64-bit time_t, it doesn't define _time64 version  */
7788     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7789 #endif
7790 #else /* HOST_LONG_BITS == 64 */
7791 #if defined(__NR_futex_time64)
7792     if (sizeof(timeout->tv_sec) == 8) {
7793         /* _time64 function on 32bit arch */
7794         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7795                                            val3));
7796     }
7797 #endif
7798 #if defined(__NR_futex)
7799     /* old function on 32bit arch */
7800     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7801 #endif
7802 #endif /* HOST_LONG_BITS == 64 */
7803     return -TARGET_ENOSYS;
7804 }
7805 
7806 /* ??? Using host futex calls even when target atomic operations
7807    are not really atomic probably breaks things.  However implementing
7808    futexes locally would make futexes shared between multiple processes
7809    tricky.  However they're probably useless because guest atomic
7810    operations won't work either.  */
7811 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7812 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7813                     int op, int val, target_ulong timeout,
7814                     target_ulong uaddr2, int val3)
7815 {
7816     struct timespec ts, *pts = NULL;
7817     void *haddr2 = NULL;
7818     int base_op;
7819 
7820     /* We assume FUTEX_* constants are the same on both host and target. */
7821 #ifdef FUTEX_CMD_MASK
7822     base_op = op & FUTEX_CMD_MASK;
7823 #else
7824     base_op = op;
7825 #endif
7826     switch (base_op) {
7827     case FUTEX_WAIT:
7828     case FUTEX_WAIT_BITSET:
7829         val = tswap32(val);
7830         break;
7831     case FUTEX_WAIT_REQUEUE_PI:
7832         val = tswap32(val);
7833         haddr2 = g2h(cpu, uaddr2);
7834         break;
7835     case FUTEX_LOCK_PI:
7836     case FUTEX_LOCK_PI2:
7837         break;
7838     case FUTEX_WAKE:
7839     case FUTEX_WAKE_BITSET:
7840     case FUTEX_TRYLOCK_PI:
7841     case FUTEX_UNLOCK_PI:
7842         timeout = 0;
7843         break;
7844     case FUTEX_FD:
7845         val = target_to_host_signal(val);
7846         timeout = 0;
7847         break;
7848     case FUTEX_CMP_REQUEUE:
7849     case FUTEX_CMP_REQUEUE_PI:
7850         val3 = tswap32(val3);
7851         /* fall through */
7852     case FUTEX_REQUEUE:
7853     case FUTEX_WAKE_OP:
7854         /*
7855          * For these, the 4th argument is not TIMEOUT, but VAL2.
7856          * But the prototype of do_safe_futex takes a pointer, so
7857          * insert casts to satisfy the compiler.  We do not need
7858          * to tswap VAL2 since it's not compared to guest memory.
7859           */
7860         pts = (struct timespec *)(uintptr_t)timeout;
7861         timeout = 0;
7862         haddr2 = g2h(cpu, uaddr2);
7863         break;
7864     default:
7865         return -TARGET_ENOSYS;
7866     }
7867     if (timeout) {
7868         pts = &ts;
7869         if (time64
7870             ? target_to_host_timespec64(pts, timeout)
7871             : target_to_host_timespec(pts, timeout)) {
7872             return -TARGET_EFAULT;
7873         }
7874     }
7875     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7876 }
7877 #endif
7878 
7879 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7880 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7881                                      abi_long handle, abi_long mount_id,
7882                                      abi_long flags)
7883 {
7884     struct file_handle *target_fh;
7885     struct file_handle *fh;
7886     int mid = 0;
7887     abi_long ret;
7888     char *name;
7889     unsigned int size, total_size;
7890 
7891     if (get_user_s32(size, handle)) {
7892         return -TARGET_EFAULT;
7893     }
7894 
7895     name = lock_user_string(pathname);
7896     if (!name) {
7897         return -TARGET_EFAULT;
7898     }
7899 
7900     total_size = sizeof(struct file_handle) + size;
7901     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7902     if (!target_fh) {
7903         unlock_user(name, pathname, 0);
7904         return -TARGET_EFAULT;
7905     }
7906 
7907     fh = g_malloc0(total_size);
7908     fh->handle_bytes = size;
7909 
7910     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7911     unlock_user(name, pathname, 0);
7912 
7913     /* man name_to_handle_at(2):
7914      * Other than the use of the handle_bytes field, the caller should treat
7915      * the file_handle structure as an opaque data type
7916      */
7917 
7918     memcpy(target_fh, fh, total_size);
7919     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7920     target_fh->handle_type = tswap32(fh->handle_type);
7921     g_free(fh);
7922     unlock_user(target_fh, handle, total_size);
7923 
7924     if (put_user_s32(mid, mount_id)) {
7925         return -TARGET_EFAULT;
7926     }
7927 
7928     return ret;
7929 
7930 }
7931 #endif
7932 
7933 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7934 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7935                                      abi_long flags)
7936 {
7937     struct file_handle *target_fh;
7938     struct file_handle *fh;
7939     unsigned int size, total_size;
7940     abi_long ret;
7941 
7942     if (get_user_s32(size, handle)) {
7943         return -TARGET_EFAULT;
7944     }
7945 
7946     total_size = sizeof(struct file_handle) + size;
7947     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7948     if (!target_fh) {
7949         return -TARGET_EFAULT;
7950     }
7951 
7952     fh = g_memdup(target_fh, total_size);
7953     fh->handle_bytes = size;
7954     fh->handle_type = tswap32(target_fh->handle_type);
7955 
7956     ret = get_errno(open_by_handle_at(mount_fd, fh,
7957                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7958 
7959     g_free(fh);
7960 
7961     unlock_user(target_fh, handle, total_size);
7962 
7963     return ret;
7964 }
7965 #endif
7966 
7967 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7968 
7969 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7970 {
7971     int host_flags;
7972     target_sigset_t *target_mask;
7973     sigset_t host_mask;
7974     abi_long ret;
7975 
7976     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7977         return -TARGET_EINVAL;
7978     }
7979     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7980         return -TARGET_EFAULT;
7981     }
7982 
7983     target_to_host_sigset(&host_mask, target_mask);
7984 
7985     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7986 
7987     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7988     if (ret >= 0) {
7989         fd_trans_register(ret, &target_signalfd_trans);
7990     }
7991 
7992     unlock_user_struct(target_mask, mask, 0);
7993 
7994     return ret;
7995 }
7996 #endif
7997 
7998 /* Map host to target signal numbers for the wait family of syscalls.
7999    Assume all other status bits are the same.  */
8000 int host_to_target_waitstatus(int status)
8001 {
8002     if (WIFSIGNALED(status)) {
8003         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
8004     }
8005     if (WIFSTOPPED(status)) {
8006         return (host_to_target_signal(WSTOPSIG(status)) << 8)
8007                | (status & 0xff);
8008     }
8009     return status;
8010 }
8011 
8012 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
8013 {
8014     CPUState *cpu = env_cpu(cpu_env);
8015     struct linux_binprm *bprm = get_task_state(cpu)->bprm;
8016     int i;
8017 
8018     for (i = 0; i < bprm->argc; i++) {
8019         size_t len = strlen(bprm->argv[i]) + 1;
8020 
8021         if (write(fd, bprm->argv[i], len) != len) {
8022             return -1;
8023         }
8024     }
8025 
8026     return 0;
8027 }
8028 
8029 struct open_self_maps_data {
8030     TaskState *ts;
8031     IntervalTreeRoot *host_maps;
8032     int fd;
8033     bool smaps;
8034 };
8035 
8036 /*
8037  * Subroutine to output one line of /proc/self/maps,
8038  * or one region of /proc/self/smaps.
8039  */
8040 
8041 #ifdef TARGET_HPPA
8042 # define test_stack(S, E, L)  (E == L)
8043 #else
8044 # define test_stack(S, E, L)  (S == L)
8045 #endif
8046 
8047 static void open_self_maps_4(const struct open_self_maps_data *d,
8048                              const MapInfo *mi, abi_ptr start,
8049                              abi_ptr end, unsigned flags)
8050 {
8051     const struct image_info *info = d->ts->info;
8052     const char *path = mi->path;
8053     uint64_t offset;
8054     int fd = d->fd;
8055     int count;
8056 
8057     if (test_stack(start, end, info->stack_limit)) {
8058         path = "[stack]";
8059     } else if (start == info->brk) {
8060         path = "[heap]";
8061     } else if (start == info->vdso) {
8062         path = "[vdso]";
8063 #ifdef TARGET_X86_64
8064     } else if (start == TARGET_VSYSCALL_PAGE) {
8065         path = "[vsyscall]";
8066 #endif
8067     }
8068 
8069     /* Except null device (MAP_ANON), adjust offset for this fragment. */
8070     offset = mi->offset;
8071     if (mi->dev) {
8072         uintptr_t hstart = (uintptr_t)g2h_untagged(start);
8073         offset += hstart - mi->itree.start;
8074     }
8075 
8076     count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8077                     " %c%c%c%c %08" PRIx64 " %02x:%02x %"PRId64,
8078                     start, end,
8079                     (flags & PAGE_READ) ? 'r' : '-',
8080                     (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8081                     (flags & PAGE_EXEC) ? 'x' : '-',
8082                     mi->is_priv ? 'p' : 's',
8083                     offset, major(mi->dev), minor(mi->dev),
8084                     (uint64_t)mi->inode);
8085     if (path) {
8086         dprintf(fd, "%*s%s\n", 73 - count, "", path);
8087     } else {
8088         dprintf(fd, "\n");
8089     }
8090 
8091     if (d->smaps) {
8092         unsigned long size = end - start;
8093         unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
8094         unsigned long size_kb = size >> 10;
8095 
8096         dprintf(fd, "Size:                  %lu kB\n"
8097                 "KernelPageSize:        %lu kB\n"
8098                 "MMUPageSize:           %lu kB\n"
8099                 "Rss:                   0 kB\n"
8100                 "Pss:                   0 kB\n"
8101                 "Pss_Dirty:             0 kB\n"
8102                 "Shared_Clean:          0 kB\n"
8103                 "Shared_Dirty:          0 kB\n"
8104                 "Private_Clean:         0 kB\n"
8105                 "Private_Dirty:         0 kB\n"
8106                 "Referenced:            0 kB\n"
8107                 "Anonymous:             %lu kB\n"
8108                 "LazyFree:              0 kB\n"
8109                 "AnonHugePages:         0 kB\n"
8110                 "ShmemPmdMapped:        0 kB\n"
8111                 "FilePmdMapped:         0 kB\n"
8112                 "Shared_Hugetlb:        0 kB\n"
8113                 "Private_Hugetlb:       0 kB\n"
8114                 "Swap:                  0 kB\n"
8115                 "SwapPss:               0 kB\n"
8116                 "Locked:                0 kB\n"
8117                 "THPeligible:    0\n"
8118                 "VmFlags:%s%s%s%s%s%s%s%s\n",
8119                 size_kb, page_size_kb, page_size_kb,
8120                 (flags & PAGE_ANON ? size_kb : 0),
8121                 (flags & PAGE_READ) ? " rd" : "",
8122                 (flags & PAGE_WRITE_ORG) ? " wr" : "",
8123                 (flags & PAGE_EXEC) ? " ex" : "",
8124                 mi->is_priv ? "" : " sh",
8125                 (flags & PAGE_READ) ? " mr" : "",
8126                 (flags & PAGE_WRITE_ORG) ? " mw" : "",
8127                 (flags & PAGE_EXEC) ? " me" : "",
8128                 mi->is_priv ? "" : " ms");
8129     }
8130 }
8131 
8132 /*
8133  * Callback for walk_memory_regions, when read_self_maps() fails.
8134  * Proceed without the benefit of host /proc/self/maps cross-check.
8135  */
8136 static int open_self_maps_3(void *opaque, target_ulong guest_start,
8137                             target_ulong guest_end, unsigned long flags)
8138 {
8139     static const MapInfo mi = { .is_priv = true };
8140 
8141     open_self_maps_4(opaque, &mi, guest_start, guest_end, flags);
8142     return 0;
8143 }
8144 
8145 /*
8146  * Callback for walk_memory_regions, when read_self_maps() succeeds.
8147  */
8148 static int open_self_maps_2(void *opaque, target_ulong guest_start,
8149                             target_ulong guest_end, unsigned long flags)
8150 {
8151     const struct open_self_maps_data *d = opaque;
8152     uintptr_t host_start = (uintptr_t)g2h_untagged(guest_start);
8153     uintptr_t host_last = (uintptr_t)g2h_untagged(guest_end - 1);
8154 
8155 #ifdef TARGET_X86_64
8156     /*
8157      * Because of the extremely high position of the page within the guest
8158      * virtual address space, this is not backed by host memory at all.
8159      * Therefore the loop below would fail.  This is the only instance
8160      * of not having host backing memory.
8161      */
8162     if (guest_start == TARGET_VSYSCALL_PAGE) {
8163         return open_self_maps_3(opaque, guest_start, guest_end, flags);
8164     }
8165 #endif
8166 
8167     while (1) {
8168         IntervalTreeNode *n =
8169             interval_tree_iter_first(d->host_maps, host_start, host_start);
8170         MapInfo *mi = container_of(n, MapInfo, itree);
8171         uintptr_t this_hlast = MIN(host_last, n->last);
8172         target_ulong this_gend = h2g(this_hlast) + 1;
8173 
8174         open_self_maps_4(d, mi, guest_start, this_gend, flags);
8175 
8176         if (this_hlast == host_last) {
8177             return 0;
8178         }
8179         host_start = this_hlast + 1;
8180         guest_start = h2g(host_start);
8181     }
8182 }
8183 
8184 static int open_self_maps_1(CPUArchState *env, int fd, bool smaps)
8185 {
8186     struct open_self_maps_data d = {
8187         .ts = get_task_state(env_cpu(env)),
8188         .fd = fd,
8189         .smaps = smaps
8190     };
8191 
8192     mmap_lock();
8193     d.host_maps = read_self_maps();
8194     if (d.host_maps) {
8195         walk_memory_regions(&d, open_self_maps_2);
8196         free_self_maps(d.host_maps);
8197     } else {
8198         walk_memory_regions(&d, open_self_maps_3);
8199     }
8200     mmap_unlock();
8201     return 0;
8202 }
8203 
8204 static int open_self_maps(CPUArchState *cpu_env, int fd)
8205 {
8206     return open_self_maps_1(cpu_env, fd, false);
8207 }
8208 
8209 static int open_self_smaps(CPUArchState *cpu_env, int fd)
8210 {
8211     return open_self_maps_1(cpu_env, fd, true);
8212 }
8213 
8214 static int open_self_stat(CPUArchState *cpu_env, int fd)
8215 {
8216     CPUState *cpu = env_cpu(cpu_env);
8217     TaskState *ts = get_task_state(cpu);
8218     g_autoptr(GString) buf = g_string_new(NULL);
8219     int i;
8220 
8221     for (i = 0; i < 44; i++) {
8222         if (i == 0) {
8223             /* pid */
8224             g_string_printf(buf, FMT_pid " ", getpid());
8225         } else if (i == 1) {
8226             /* app name */
8227             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8228             bin = bin ? bin + 1 : ts->bprm->argv[0];
8229             g_string_printf(buf, "(%.15s) ", bin);
8230         } else if (i == 2) {
8231             /* task state */
8232             g_string_assign(buf, "R "); /* we are running right now */
8233         } else if (i == 3) {
8234             /* ppid */
8235             g_string_printf(buf, FMT_pid " ", getppid());
8236         } else if (i == 19) {
8237             /* num_threads */
8238             int cpus = 0;
8239             WITH_RCU_READ_LOCK_GUARD() {
8240                 CPUState *cpu_iter;
8241                 CPU_FOREACH(cpu_iter) {
8242                     cpus++;
8243                 }
8244             }
8245             g_string_printf(buf, "%d ", cpus);
8246         } else if (i == 21) {
8247             /* starttime */
8248             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8249         } else if (i == 27) {
8250             /* stack bottom */
8251             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8252         } else {
8253             /* for the rest, there is MasterCard */
8254             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8255         }
8256 
8257         if (write(fd, buf->str, buf->len) != buf->len) {
8258             return -1;
8259         }
8260     }
8261 
8262     return 0;
8263 }
8264 
8265 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8266 {
8267     CPUState *cpu = env_cpu(cpu_env);
8268     TaskState *ts = get_task_state(cpu);
8269     abi_ulong auxv = ts->info->saved_auxv;
8270     abi_ulong len = ts->info->auxv_len;
8271     char *ptr;
8272 
8273     /*
8274      * Auxiliary vector is stored in target process stack.
8275      * read in whole auxv vector and copy it to file
8276      */
8277     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8278     if (ptr != NULL) {
8279         while (len > 0) {
8280             ssize_t r;
8281             r = write(fd, ptr, len);
8282             if (r <= 0) {
8283                 break;
8284             }
8285             len -= r;
8286             ptr += r;
8287         }
8288         lseek(fd, 0, SEEK_SET);
8289         unlock_user(ptr, auxv, len);
8290     }
8291 
8292     return 0;
8293 }
8294 
8295 static int is_proc_myself(const char *filename, const char *entry)
8296 {
8297     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8298         filename += strlen("/proc/");
8299         if (!strncmp(filename, "self/", strlen("self/"))) {
8300             filename += strlen("self/");
8301         } else if (*filename >= '1' && *filename <= '9') {
8302             char myself[80];
8303             snprintf(myself, sizeof(myself), "%d/", getpid());
8304             if (!strncmp(filename, myself, strlen(myself))) {
8305                 filename += strlen(myself);
8306             } else {
8307                 return 0;
8308             }
8309         } else {
8310             return 0;
8311         }
8312         if (!strcmp(filename, entry)) {
8313             return 1;
8314         }
8315     }
8316     return 0;
8317 }
8318 
8319 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8320                       const char *fmt, int code)
8321 {
8322     if (logfile) {
8323         CPUState *cs = env_cpu(env);
8324 
8325         fprintf(logfile, fmt, code);
8326         fprintf(logfile, "Failing executable: %s\n", exec_path);
8327         cpu_dump_state(cs, logfile, 0);
8328         open_self_maps(env, fileno(logfile));
8329     }
8330 }
8331 
8332 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8333 {
8334     /* dump to console */
8335     excp_dump_file(stderr, env, fmt, code);
8336 
8337     /* dump to log file */
8338     if (qemu_log_separate()) {
8339         FILE *logfile = qemu_log_trylock();
8340 
8341         excp_dump_file(logfile, env, fmt, code);
8342         qemu_log_unlock(logfile);
8343     }
8344 }
8345 
8346 #include "target_proc.h"
8347 
8348 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8349     defined(HAVE_ARCH_PROC_CPUINFO) || \
8350     defined(HAVE_ARCH_PROC_HARDWARE)
8351 static int is_proc(const char *filename, const char *entry)
8352 {
8353     return strcmp(filename, entry) == 0;
8354 }
8355 #endif
8356 
8357 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8358 static int open_net_route(CPUArchState *cpu_env, int fd)
8359 {
8360     FILE *fp;
8361     char *line = NULL;
8362     size_t len = 0;
8363     ssize_t read;
8364 
8365     fp = fopen("/proc/net/route", "r");
8366     if (fp == NULL) {
8367         return -1;
8368     }
8369 
8370     /* read header */
8371 
8372     read = getline(&line, &len, fp);
8373     dprintf(fd, "%s", line);
8374 
8375     /* read routes */
8376 
8377     while ((read = getline(&line, &len, fp)) != -1) {
8378         char iface[16];
8379         uint32_t dest, gw, mask;
8380         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8381         int fields;
8382 
8383         fields = sscanf(line,
8384                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8385                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8386                         &mask, &mtu, &window, &irtt);
8387         if (fields != 11) {
8388             continue;
8389         }
8390         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8391                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8392                 metric, tswap32(mask), mtu, window, irtt);
8393     }
8394 
8395     free(line);
8396     fclose(fp);
8397 
8398     return 0;
8399 }
8400 #endif
8401 
8402 static int maybe_do_fake_open(CPUArchState *cpu_env, int dirfd,
8403                               const char *fname, int flags, mode_t mode,
8404                               int openat2_resolve, bool safe)
8405 {
8406     g_autofree char *proc_name = NULL;
8407     const char *pathname;
8408     struct fake_open {
8409         const char *filename;
8410         int (*fill)(CPUArchState *cpu_env, int fd);
8411         int (*cmp)(const char *s1, const char *s2);
8412     };
8413     const struct fake_open *fake_open;
8414     static const struct fake_open fakes[] = {
8415         { "maps", open_self_maps, is_proc_myself },
8416         { "smaps", open_self_smaps, is_proc_myself },
8417         { "stat", open_self_stat, is_proc_myself },
8418         { "auxv", open_self_auxv, is_proc_myself },
8419         { "cmdline", open_self_cmdline, is_proc_myself },
8420 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8421         { "/proc/net/route", open_net_route, is_proc },
8422 #endif
8423 #if defined(HAVE_ARCH_PROC_CPUINFO)
8424         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8425 #endif
8426 #if defined(HAVE_ARCH_PROC_HARDWARE)
8427         { "/proc/hardware", open_hardware, is_proc },
8428 #endif
8429         { NULL, NULL, NULL }
8430     };
8431 
8432     /* if this is a file from /proc/ filesystem, expand full name */
8433     proc_name = realpath(fname, NULL);
8434     if (proc_name && strncmp(proc_name, "/proc/", 6) == 0) {
8435         pathname = proc_name;
8436     } else {
8437         pathname = fname;
8438     }
8439 
8440     if (is_proc_myself(pathname, "exe")) {
8441         /* Honor openat2 resolve flags */
8442         if ((openat2_resolve & RESOLVE_NO_MAGICLINKS) ||
8443             (openat2_resolve & RESOLVE_NO_SYMLINKS)) {
8444             errno = ELOOP;
8445             return -1;
8446         }
8447         if (safe) {
8448             return safe_openat(dirfd, exec_path, flags, mode);
8449         } else {
8450             return openat(dirfd, exec_path, flags, mode);
8451         }
8452     }
8453 
8454     for (fake_open = fakes; fake_open->filename; fake_open++) {
8455         if (fake_open->cmp(pathname, fake_open->filename)) {
8456             break;
8457         }
8458     }
8459 
8460     if (fake_open->filename) {
8461         const char *tmpdir;
8462         char filename[PATH_MAX];
8463         int fd, r;
8464 
8465         fd = memfd_create("qemu-open", 0);
8466         if (fd < 0) {
8467             if (errno != ENOSYS) {
8468                 return fd;
8469             }
8470             /* create temporary file to map stat to */
8471             tmpdir = getenv("TMPDIR");
8472             if (!tmpdir)
8473                 tmpdir = "/tmp";
8474             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8475             fd = mkstemp(filename);
8476             if (fd < 0) {
8477                 return fd;
8478             }
8479             unlink(filename);
8480         }
8481 
8482         if ((r = fake_open->fill(cpu_env, fd))) {
8483             int e = errno;
8484             close(fd);
8485             errno = e;
8486             return r;
8487         }
8488         lseek(fd, 0, SEEK_SET);
8489 
8490         return fd;
8491     }
8492 
8493     return -2;
8494 }
8495 
8496 int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *pathname,
8497                     int flags, mode_t mode, bool safe)
8498 {
8499     int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, flags, mode, 0, safe);
8500     if (fd > -2) {
8501         return fd;
8502     }
8503 
8504     if (safe) {
8505         return safe_openat(dirfd, path(pathname), flags, mode);
8506     } else {
8507         return openat(dirfd, path(pathname), flags, mode);
8508     }
8509 }
8510 
8511 
8512 static int do_openat2(CPUArchState *cpu_env, abi_long dirfd,
8513                       abi_ptr guest_pathname, abi_ptr guest_open_how,
8514                       abi_ulong guest_size)
8515 {
8516     struct open_how_ver0 how = {0};
8517     char *pathname;
8518     int ret;
8519 
8520     if (guest_size < sizeof(struct target_open_how_ver0)) {
8521         return -TARGET_EINVAL;
8522     }
8523     ret = copy_struct_from_user(&how, sizeof(how), guest_open_how, guest_size);
8524     if (ret) {
8525         if (ret == -TARGET_E2BIG) {
8526             qemu_log_mask(LOG_UNIMP,
8527                           "Unimplemented openat2 open_how size: "
8528                           TARGET_ABI_FMT_lu "\n", guest_size);
8529         }
8530         return ret;
8531     }
8532     pathname = lock_user_string(guest_pathname);
8533     if (!pathname) {
8534         return -TARGET_EFAULT;
8535     }
8536 
8537     how.flags = target_to_host_bitmask(tswap64(how.flags), fcntl_flags_tbl);
8538     how.mode = tswap64(how.mode);
8539     how.resolve = tswap64(how.resolve);
8540     int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, how.flags, how.mode,
8541                                 how.resolve, true);
8542     if (fd > -2) {
8543         ret = get_errno(fd);
8544     } else {
8545         ret = get_errno(safe_openat2(dirfd, pathname, &how,
8546                                      sizeof(struct open_how_ver0)));
8547     }
8548 
8549     fd_trans_unregister(ret);
8550     unlock_user(pathname, guest_pathname, 0);
8551     return ret;
8552 }
8553 
8554 ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz)
8555 {
8556     ssize_t ret;
8557 
8558     if (!pathname || !buf) {
8559         errno = EFAULT;
8560         return -1;
8561     }
8562 
8563     if (!bufsiz) {
8564         /* Short circuit this for the magic exe check. */
8565         errno = EINVAL;
8566         return -1;
8567     }
8568 
8569     if (is_proc_myself((const char *)pathname, "exe")) {
8570         /*
8571          * Don't worry about sign mismatch as earlier mapping
8572          * logic would have thrown a bad address error.
8573          */
8574         ret = MIN(strlen(exec_path), bufsiz);
8575         /* We cannot NUL terminate the string. */
8576         memcpy(buf, exec_path, ret);
8577     } else {
8578         ret = readlink(path(pathname), buf, bufsiz);
8579     }
8580 
8581     return ret;
8582 }
8583 
8584 static int do_execv(CPUArchState *cpu_env, int dirfd,
8585                     abi_long pathname, abi_long guest_argp,
8586                     abi_long guest_envp, int flags, bool is_execveat)
8587 {
8588     int ret;
8589     char **argp, **envp;
8590     int argc, envc;
8591     abi_ulong gp;
8592     abi_ulong addr;
8593     char **q;
8594     void *p;
8595 
8596     argc = 0;
8597 
8598     for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8599         if (get_user_ual(addr, gp)) {
8600             return -TARGET_EFAULT;
8601         }
8602         if (!addr) {
8603             break;
8604         }
8605         argc++;
8606     }
8607     envc = 0;
8608     for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8609         if (get_user_ual(addr, gp)) {
8610             return -TARGET_EFAULT;
8611         }
8612         if (!addr) {
8613             break;
8614         }
8615         envc++;
8616     }
8617 
8618     argp = g_new0(char *, argc + 1);
8619     envp = g_new0(char *, envc + 1);
8620 
8621     for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8622         if (get_user_ual(addr, gp)) {
8623             goto execve_efault;
8624         }
8625         if (!addr) {
8626             break;
8627         }
8628         *q = lock_user_string(addr);
8629         if (!*q) {
8630             goto execve_efault;
8631         }
8632     }
8633     *q = NULL;
8634 
8635     for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8636         if (get_user_ual(addr, gp)) {
8637             goto execve_efault;
8638         }
8639         if (!addr) {
8640             break;
8641         }
8642         *q = lock_user_string(addr);
8643         if (!*q) {
8644             goto execve_efault;
8645         }
8646     }
8647     *q = NULL;
8648 
8649     /*
8650      * Although execve() is not an interruptible syscall it is
8651      * a special case where we must use the safe_syscall wrapper:
8652      * if we allow a signal to happen before we make the host
8653      * syscall then we will 'lose' it, because at the point of
8654      * execve the process leaves QEMU's control. So we use the
8655      * safe syscall wrapper to ensure that we either take the
8656      * signal as a guest signal, or else it does not happen
8657      * before the execve completes and makes it the other
8658      * program's problem.
8659      */
8660     p = lock_user_string(pathname);
8661     if (!p) {
8662         goto execve_efault;
8663     }
8664 
8665     const char *exe = p;
8666     if (is_proc_myself(p, "exe")) {
8667         exe = exec_path;
8668     }
8669     ret = is_execveat
8670         ? safe_execveat(dirfd, exe, argp, envp, flags)
8671         : safe_execve(exe, argp, envp);
8672     ret = get_errno(ret);
8673 
8674     unlock_user(p, pathname, 0);
8675 
8676     goto execve_end;
8677 
8678 execve_efault:
8679     ret = -TARGET_EFAULT;
8680 
8681 execve_end:
8682     for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8683         if (get_user_ual(addr, gp) || !addr) {
8684             break;
8685         }
8686         unlock_user(*q, addr, 0);
8687     }
8688     for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8689         if (get_user_ual(addr, gp) || !addr) {
8690             break;
8691         }
8692         unlock_user(*q, addr, 0);
8693     }
8694 
8695     g_free(argp);
8696     g_free(envp);
8697     return ret;
8698 }
8699 
8700 #define TIMER_MAGIC 0x0caf0000
8701 #define TIMER_MAGIC_MASK 0xffff0000
8702 
8703 /* Convert QEMU provided timer ID back to internal 16bit index format */
8704 static target_timer_t get_timer_id(abi_long arg)
8705 {
8706     target_timer_t timerid = arg;
8707 
8708     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8709         return -TARGET_EINVAL;
8710     }
8711 
8712     timerid &= 0xffff;
8713 
8714     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8715         return -TARGET_EINVAL;
8716     }
8717 
8718     return timerid;
8719 }
8720 
8721 static int target_to_host_cpu_mask(unsigned long *host_mask,
8722                                    size_t host_size,
8723                                    abi_ulong target_addr,
8724                                    size_t target_size)
8725 {
8726     unsigned target_bits = sizeof(abi_ulong) * 8;
8727     unsigned host_bits = sizeof(*host_mask) * 8;
8728     abi_ulong *target_mask;
8729     unsigned i, j;
8730 
8731     assert(host_size >= target_size);
8732 
8733     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8734     if (!target_mask) {
8735         return -TARGET_EFAULT;
8736     }
8737     memset(host_mask, 0, host_size);
8738 
8739     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8740         unsigned bit = i * target_bits;
8741         abi_ulong val;
8742 
8743         __get_user(val, &target_mask[i]);
8744         for (j = 0; j < target_bits; j++, bit++) {
8745             if (val & (1UL << j)) {
8746                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8747             }
8748         }
8749     }
8750 
8751     unlock_user(target_mask, target_addr, 0);
8752     return 0;
8753 }
8754 
8755 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8756                                    size_t host_size,
8757                                    abi_ulong target_addr,
8758                                    size_t target_size)
8759 {
8760     unsigned target_bits = sizeof(abi_ulong) * 8;
8761     unsigned host_bits = sizeof(*host_mask) * 8;
8762     abi_ulong *target_mask;
8763     unsigned i, j;
8764 
8765     assert(host_size >= target_size);
8766 
8767     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8768     if (!target_mask) {
8769         return -TARGET_EFAULT;
8770     }
8771 
8772     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8773         unsigned bit = i * target_bits;
8774         abi_ulong val = 0;
8775 
8776         for (j = 0; j < target_bits; j++, bit++) {
8777             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8778                 val |= 1UL << j;
8779             }
8780         }
8781         __put_user(val, &target_mask[i]);
8782     }
8783 
8784     unlock_user(target_mask, target_addr, target_size);
8785     return 0;
8786 }
8787 
8788 #ifdef TARGET_NR_getdents
8789 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8790 {
8791     g_autofree void *hdirp = NULL;
8792     void *tdirp;
8793     int hlen, hoff, toff;
8794     int hreclen, treclen;
8795     off_t prev_diroff = 0;
8796 
8797     hdirp = g_try_malloc(count);
8798     if (!hdirp) {
8799         return -TARGET_ENOMEM;
8800     }
8801 
8802 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8803     hlen = sys_getdents(dirfd, hdirp, count);
8804 #else
8805     hlen = sys_getdents64(dirfd, hdirp, count);
8806 #endif
8807 
8808     hlen = get_errno(hlen);
8809     if (is_error(hlen)) {
8810         return hlen;
8811     }
8812 
8813     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8814     if (!tdirp) {
8815         return -TARGET_EFAULT;
8816     }
8817 
8818     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8819 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8820         struct linux_dirent *hde = hdirp + hoff;
8821 #else
8822         struct linux_dirent64 *hde = hdirp + hoff;
8823 #endif
8824         struct target_dirent *tde = tdirp + toff;
8825         int namelen;
8826         uint8_t type;
8827 
8828         namelen = strlen(hde->d_name);
8829         hreclen = hde->d_reclen;
8830         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8831         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8832 
8833         if (toff + treclen > count) {
8834             /*
8835              * If the host struct is smaller than the target struct, or
8836              * requires less alignment and thus packs into less space,
8837              * then the host can return more entries than we can pass
8838              * on to the guest.
8839              */
8840             if (toff == 0) {
8841                 toff = -TARGET_EINVAL; /* result buffer is too small */
8842                 break;
8843             }
8844             /*
8845              * Return what we have, resetting the file pointer to the
8846              * location of the first record not returned.
8847              */
8848             lseek(dirfd, prev_diroff, SEEK_SET);
8849             break;
8850         }
8851 
8852         prev_diroff = hde->d_off;
8853         tde->d_ino = tswapal(hde->d_ino);
8854         tde->d_off = tswapal(hde->d_off);
8855         tde->d_reclen = tswap16(treclen);
8856         memcpy(tde->d_name, hde->d_name, namelen + 1);
8857 
8858         /*
8859          * The getdents type is in what was formerly a padding byte at the
8860          * end of the structure.
8861          */
8862 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8863         type = *((uint8_t *)hde + hreclen - 1);
8864 #else
8865         type = hde->d_type;
8866 #endif
8867         *((uint8_t *)tde + treclen - 1) = type;
8868     }
8869 
8870     unlock_user(tdirp, arg2, toff);
8871     return toff;
8872 }
8873 #endif /* TARGET_NR_getdents */
8874 
8875 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8876 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8877 {
8878     g_autofree void *hdirp = NULL;
8879     void *tdirp;
8880     int hlen, hoff, toff;
8881     int hreclen, treclen;
8882     off_t prev_diroff = 0;
8883 
8884     hdirp = g_try_malloc(count);
8885     if (!hdirp) {
8886         return -TARGET_ENOMEM;
8887     }
8888 
8889     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8890     if (is_error(hlen)) {
8891         return hlen;
8892     }
8893 
8894     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8895     if (!tdirp) {
8896         return -TARGET_EFAULT;
8897     }
8898 
8899     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8900         struct linux_dirent64 *hde = hdirp + hoff;
8901         struct target_dirent64 *tde = tdirp + toff;
8902         int namelen;
8903 
8904         namelen = strlen(hde->d_name) + 1;
8905         hreclen = hde->d_reclen;
8906         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8907         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8908 
8909         if (toff + treclen > count) {
8910             /*
8911              * If the host struct is smaller than the target struct, or
8912              * requires less alignment and thus packs into less space,
8913              * then the host can return more entries than we can pass
8914              * on to the guest.
8915              */
8916             if (toff == 0) {
8917                 toff = -TARGET_EINVAL; /* result buffer is too small */
8918                 break;
8919             }
8920             /*
8921              * Return what we have, resetting the file pointer to the
8922              * location of the first record not returned.
8923              */
8924             lseek(dirfd, prev_diroff, SEEK_SET);
8925             break;
8926         }
8927 
8928         prev_diroff = hde->d_off;
8929         tde->d_ino = tswap64(hde->d_ino);
8930         tde->d_off = tswap64(hde->d_off);
8931         tde->d_reclen = tswap16(treclen);
8932         tde->d_type = hde->d_type;
8933         memcpy(tde->d_name, hde->d_name, namelen);
8934     }
8935 
8936     unlock_user(tdirp, arg2, toff);
8937     return toff;
8938 }
8939 #endif /* TARGET_NR_getdents64 */
8940 
8941 #if defined(TARGET_NR_riscv_hwprobe)
8942 
8943 #define RISCV_HWPROBE_KEY_MVENDORID     0
8944 #define RISCV_HWPROBE_KEY_MARCHID       1
8945 #define RISCV_HWPROBE_KEY_MIMPID        2
8946 
8947 #define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
8948 #define     RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
8949 
8950 #define RISCV_HWPROBE_KEY_IMA_EXT_0         4
8951 #define     RISCV_HWPROBE_IMA_FD            (1 << 0)
8952 #define     RISCV_HWPROBE_IMA_C             (1 << 1)
8953 #define     RISCV_HWPROBE_IMA_V             (1 << 2)
8954 #define     RISCV_HWPROBE_EXT_ZBA           (1 << 3)
8955 #define     RISCV_HWPROBE_EXT_ZBB           (1 << 4)
8956 #define     RISCV_HWPROBE_EXT_ZBS           (1 << 5)
8957 #define     RISCV_HWPROBE_EXT_ZICBOZ        (1 << 6)
8958 #define     RISCV_HWPROBE_EXT_ZBC           (1 << 7)
8959 #define     RISCV_HWPROBE_EXT_ZBKB          (1 << 8)
8960 #define     RISCV_HWPROBE_EXT_ZBKC          (1 << 9)
8961 #define     RISCV_HWPROBE_EXT_ZBKX          (1 << 10)
8962 #define     RISCV_HWPROBE_EXT_ZKND          (1 << 11)
8963 #define     RISCV_HWPROBE_EXT_ZKNE          (1 << 12)
8964 #define     RISCV_HWPROBE_EXT_ZKNH          (1 << 13)
8965 #define     RISCV_HWPROBE_EXT_ZKSED         (1 << 14)
8966 #define     RISCV_HWPROBE_EXT_ZKSH          (1 << 15)
8967 #define     RISCV_HWPROBE_EXT_ZKT           (1 << 16)
8968 #define     RISCV_HWPROBE_EXT_ZVBB          (1 << 17)
8969 #define     RISCV_HWPROBE_EXT_ZVBC          (1 << 18)
8970 #define     RISCV_HWPROBE_EXT_ZVKB          (1 << 19)
8971 #define     RISCV_HWPROBE_EXT_ZVKG          (1 << 20)
8972 #define     RISCV_HWPROBE_EXT_ZVKNED        (1 << 21)
8973 #define     RISCV_HWPROBE_EXT_ZVKNHA        (1 << 22)
8974 #define     RISCV_HWPROBE_EXT_ZVKNHB        (1 << 23)
8975 #define     RISCV_HWPROBE_EXT_ZVKSED        (1 << 24)
8976 #define     RISCV_HWPROBE_EXT_ZVKSH         (1 << 25)
8977 #define     RISCV_HWPROBE_EXT_ZVKT          (1 << 26)
8978 #define     RISCV_HWPROBE_EXT_ZFH           (1 << 27)
8979 #define     RISCV_HWPROBE_EXT_ZFHMIN        (1 << 28)
8980 #define     RISCV_HWPROBE_EXT_ZIHINTNTL     (1 << 29)
8981 #define     RISCV_HWPROBE_EXT_ZVFH          (1 << 30)
8982 #define     RISCV_HWPROBE_EXT_ZVFHMIN       (1ULL << 31)
8983 #define     RISCV_HWPROBE_EXT_ZFA           (1ULL << 32)
8984 #define     RISCV_HWPROBE_EXT_ZTSO          (1ULL << 33)
8985 #define     RISCV_HWPROBE_EXT_ZACAS         (1ULL << 34)
8986 #define     RISCV_HWPROBE_EXT_ZICOND        (1ULL << 35)
8987 
8988 #define RISCV_HWPROBE_KEY_CPUPERF_0     5
8989 #define     RISCV_HWPROBE_MISALIGNED_UNKNOWN     (0 << 0)
8990 #define     RISCV_HWPROBE_MISALIGNED_EMULATED    (1 << 0)
8991 #define     RISCV_HWPROBE_MISALIGNED_SLOW        (2 << 0)
8992 #define     RISCV_HWPROBE_MISALIGNED_FAST        (3 << 0)
8993 #define     RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
8994 #define     RISCV_HWPROBE_MISALIGNED_MASK        (7 << 0)
8995 
8996 #define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6
8997 
8998 struct riscv_hwprobe {
8999     abi_llong  key;
9000     abi_ullong value;
9001 };
9002 
9003 static void risc_hwprobe_fill_pairs(CPURISCVState *env,
9004                                     struct riscv_hwprobe *pair,
9005                                     size_t pair_count)
9006 {
9007     const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
9008 
9009     for (; pair_count > 0; pair_count--, pair++) {
9010         abi_llong key;
9011         abi_ullong value;
9012         __put_user(0, &pair->value);
9013         __get_user(key, &pair->key);
9014         switch (key) {
9015         case RISCV_HWPROBE_KEY_MVENDORID:
9016             __put_user(cfg->mvendorid, &pair->value);
9017             break;
9018         case RISCV_HWPROBE_KEY_MARCHID:
9019             __put_user(cfg->marchid, &pair->value);
9020             break;
9021         case RISCV_HWPROBE_KEY_MIMPID:
9022             __put_user(cfg->mimpid, &pair->value);
9023             break;
9024         case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
9025             value = riscv_has_ext(env, RVI) &&
9026                     riscv_has_ext(env, RVM) &&
9027                     riscv_has_ext(env, RVA) ?
9028                     RISCV_HWPROBE_BASE_BEHAVIOR_IMA : 0;
9029             __put_user(value, &pair->value);
9030             break;
9031         case RISCV_HWPROBE_KEY_IMA_EXT_0:
9032             value = riscv_has_ext(env, RVF) &&
9033                     riscv_has_ext(env, RVD) ?
9034                     RISCV_HWPROBE_IMA_FD : 0;
9035             value |= riscv_has_ext(env, RVC) ?
9036                      RISCV_HWPROBE_IMA_C : 0;
9037             value |= riscv_has_ext(env, RVV) ?
9038                      RISCV_HWPROBE_IMA_V : 0;
9039             value |= cfg->ext_zba ?
9040                      RISCV_HWPROBE_EXT_ZBA : 0;
9041             value |= cfg->ext_zbb ?
9042                      RISCV_HWPROBE_EXT_ZBB : 0;
9043             value |= cfg->ext_zbs ?
9044                      RISCV_HWPROBE_EXT_ZBS : 0;
9045             value |= cfg->ext_zicboz ?
9046                      RISCV_HWPROBE_EXT_ZICBOZ : 0;
9047             value |= cfg->ext_zbc ?
9048                      RISCV_HWPROBE_EXT_ZBC : 0;
9049             value |= cfg->ext_zbkb ?
9050                      RISCV_HWPROBE_EXT_ZBKB : 0;
9051             value |= cfg->ext_zbkc ?
9052                      RISCV_HWPROBE_EXT_ZBKC : 0;
9053             value |= cfg->ext_zbkx ?
9054                      RISCV_HWPROBE_EXT_ZBKX : 0;
9055             value |= cfg->ext_zknd ?
9056                      RISCV_HWPROBE_EXT_ZKND : 0;
9057             value |= cfg->ext_zkne ?
9058                      RISCV_HWPROBE_EXT_ZKNE : 0;
9059             value |= cfg->ext_zknh ?
9060                      RISCV_HWPROBE_EXT_ZKNH : 0;
9061             value |= cfg->ext_zksed ?
9062                      RISCV_HWPROBE_EXT_ZKSED : 0;
9063             value |= cfg->ext_zksh ?
9064                      RISCV_HWPROBE_EXT_ZKSH : 0;
9065             value |= cfg->ext_zkt ?
9066                      RISCV_HWPROBE_EXT_ZKT : 0;
9067             value |= cfg->ext_zvbb ?
9068                      RISCV_HWPROBE_EXT_ZVBB : 0;
9069             value |= cfg->ext_zvbc ?
9070                      RISCV_HWPROBE_EXT_ZVBC : 0;
9071             value |= cfg->ext_zvkb ?
9072                      RISCV_HWPROBE_EXT_ZVKB : 0;
9073             value |= cfg->ext_zvkg ?
9074                      RISCV_HWPROBE_EXT_ZVKG : 0;
9075             value |= cfg->ext_zvkned ?
9076                      RISCV_HWPROBE_EXT_ZVKNED : 0;
9077             value |= cfg->ext_zvknha ?
9078                      RISCV_HWPROBE_EXT_ZVKNHA : 0;
9079             value |= cfg->ext_zvknhb ?
9080                      RISCV_HWPROBE_EXT_ZVKNHB : 0;
9081             value |= cfg->ext_zvksed ?
9082                      RISCV_HWPROBE_EXT_ZVKSED : 0;
9083             value |= cfg->ext_zvksh ?
9084                      RISCV_HWPROBE_EXT_ZVKSH : 0;
9085             value |= cfg->ext_zvkt ?
9086                      RISCV_HWPROBE_EXT_ZVKT : 0;
9087             value |= cfg->ext_zfh ?
9088                      RISCV_HWPROBE_EXT_ZFH : 0;
9089             value |= cfg->ext_zfhmin ?
9090                      RISCV_HWPROBE_EXT_ZFHMIN : 0;
9091             value |= cfg->ext_zihintntl ?
9092                      RISCV_HWPROBE_EXT_ZIHINTNTL : 0;
9093             value |= cfg->ext_zvfh ?
9094                      RISCV_HWPROBE_EXT_ZVFH : 0;
9095             value |= cfg->ext_zvfhmin ?
9096                      RISCV_HWPROBE_EXT_ZVFHMIN : 0;
9097             value |= cfg->ext_zfa ?
9098                      RISCV_HWPROBE_EXT_ZFA : 0;
9099             value |= cfg->ext_ztso ?
9100                      RISCV_HWPROBE_EXT_ZTSO : 0;
9101             value |= cfg->ext_zacas ?
9102                      RISCV_HWPROBE_EXT_ZACAS : 0;
9103             value |= cfg->ext_zicond ?
9104                      RISCV_HWPROBE_EXT_ZICOND : 0;
9105             __put_user(value, &pair->value);
9106             break;
9107         case RISCV_HWPROBE_KEY_CPUPERF_0:
9108             __put_user(RISCV_HWPROBE_MISALIGNED_FAST, &pair->value);
9109             break;
9110         case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
9111             value = cfg->ext_zicboz ? cfg->cboz_blocksize : 0;
9112             __put_user(value, &pair->value);
9113             break;
9114         default:
9115             __put_user(-1, &pair->key);
9116             break;
9117         }
9118     }
9119 }
9120 
9121 static int cpu_set_valid(abi_long arg3, abi_long arg4)
9122 {
9123     int ret, i, tmp;
9124     size_t host_mask_size, target_mask_size;
9125     unsigned long *host_mask;
9126 
9127     /*
9128      * cpu_set_t represent CPU masks as bit masks of type unsigned long *.
9129      * arg3 contains the cpu count.
9130      */
9131     tmp = (8 * sizeof(abi_ulong));
9132     target_mask_size = ((arg3 + tmp - 1) / tmp) * sizeof(abi_ulong);
9133     host_mask_size = (target_mask_size + (sizeof(*host_mask) - 1)) &
9134                      ~(sizeof(*host_mask) - 1);
9135 
9136     host_mask = alloca(host_mask_size);
9137 
9138     ret = target_to_host_cpu_mask(host_mask, host_mask_size,
9139                                   arg4, target_mask_size);
9140     if (ret != 0) {
9141         return ret;
9142     }
9143 
9144     for (i = 0 ; i < host_mask_size / sizeof(*host_mask); i++) {
9145         if (host_mask[i] != 0) {
9146             return 0;
9147         }
9148     }
9149     return -TARGET_EINVAL;
9150 }
9151 
9152 static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
9153                                  abi_long arg2, abi_long arg3,
9154                                  abi_long arg4, abi_long arg5)
9155 {
9156     int ret;
9157     struct riscv_hwprobe *host_pairs;
9158 
9159     /* flags must be 0 */
9160     if (arg5 != 0) {
9161         return -TARGET_EINVAL;
9162     }
9163 
9164     /* check cpu_set */
9165     if (arg3 != 0) {
9166         ret = cpu_set_valid(arg3, arg4);
9167         if (ret != 0) {
9168             return ret;
9169         }
9170     } else if (arg4 != 0) {
9171         return -TARGET_EINVAL;
9172     }
9173 
9174     /* no pairs */
9175     if (arg2 == 0) {
9176         return 0;
9177     }
9178 
9179     host_pairs = lock_user(VERIFY_WRITE, arg1,
9180                            sizeof(*host_pairs) * (size_t)arg2, 0);
9181     if (host_pairs == NULL) {
9182         return -TARGET_EFAULT;
9183     }
9184     risc_hwprobe_fill_pairs(cpu_env, host_pairs, arg2);
9185     unlock_user(host_pairs, arg1, sizeof(*host_pairs) * (size_t)arg2);
9186     return 0;
9187 }
9188 #endif /* TARGET_NR_riscv_hwprobe */
9189 
9190 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
9191 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
9192 #endif
9193 
9194 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9195 #define __NR_sys_open_tree __NR_open_tree
9196 _syscall3(int, sys_open_tree, int, __dfd, const char *, __filename,
9197           unsigned int, __flags)
9198 #endif
9199 
9200 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9201 #define __NR_sys_move_mount __NR_move_mount
9202 _syscall5(int, sys_move_mount, int, __from_dfd, const char *, __from_pathname,
9203            int, __to_dfd, const char *, __to_pathname, unsigned int, flag)
9204 #endif
9205 
9206 /* This is an internal helper for do_syscall so that it is easier
9207  * to have a single return point, so that actions, such as logging
9208  * of syscall results, can be performed.
9209  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
9210  */
9211 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
9212                             abi_long arg2, abi_long arg3, abi_long arg4,
9213                             abi_long arg5, abi_long arg6, abi_long arg7,
9214                             abi_long arg8)
9215 {
9216     CPUState *cpu = env_cpu(cpu_env);
9217     abi_long ret;
9218 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
9219     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
9220     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
9221     || defined(TARGET_NR_statx)
9222     struct stat st;
9223 #endif
9224 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
9225     || defined(TARGET_NR_fstatfs)
9226     struct statfs stfs;
9227 #endif
9228     void *p;
9229 
9230     switch(num) {
9231     case TARGET_NR_exit:
9232         /* In old applications this may be used to implement _exit(2).
9233            However in threaded applications it is used for thread termination,
9234            and _exit_group is used for application termination.
9235            Do thread termination if we have more then one thread.  */
9236 
9237         if (block_signals()) {
9238             return -QEMU_ERESTARTSYS;
9239         }
9240 
9241         pthread_mutex_lock(&clone_lock);
9242 
9243         if (CPU_NEXT(first_cpu)) {
9244             TaskState *ts = get_task_state(cpu);
9245 
9246             if (ts->child_tidptr) {
9247                 put_user_u32(0, ts->child_tidptr);
9248                 do_sys_futex(g2h(cpu, ts->child_tidptr),
9249                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
9250             }
9251 
9252             object_unparent(OBJECT(cpu));
9253             object_unref(OBJECT(cpu));
9254             /*
9255              * At this point the CPU should be unrealized and removed
9256              * from cpu lists. We can clean-up the rest of the thread
9257              * data without the lock held.
9258              */
9259 
9260             pthread_mutex_unlock(&clone_lock);
9261 
9262             thread_cpu = NULL;
9263             g_free(ts);
9264             rcu_unregister_thread();
9265             pthread_exit(NULL);
9266         }
9267 
9268         pthread_mutex_unlock(&clone_lock);
9269         preexit_cleanup(cpu_env, arg1);
9270         _exit(arg1);
9271         return 0; /* avoid warning */
9272     case TARGET_NR_read:
9273         if (arg2 == 0 && arg3 == 0) {
9274             return get_errno(safe_read(arg1, 0, 0));
9275         } else {
9276             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9277                 return -TARGET_EFAULT;
9278             ret = get_errno(safe_read(arg1, p, arg3));
9279             if (ret >= 0 &&
9280                 fd_trans_host_to_target_data(arg1)) {
9281                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
9282             }
9283             unlock_user(p, arg2, ret);
9284         }
9285         return ret;
9286     case TARGET_NR_write:
9287         if (arg2 == 0 && arg3 == 0) {
9288             return get_errno(safe_write(arg1, 0, 0));
9289         }
9290         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9291             return -TARGET_EFAULT;
9292         if (fd_trans_target_to_host_data(arg1)) {
9293             void *copy = g_malloc(arg3);
9294             memcpy(copy, p, arg3);
9295             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
9296             if (ret >= 0) {
9297                 ret = get_errno(safe_write(arg1, copy, ret));
9298             }
9299             g_free(copy);
9300         } else {
9301             ret = get_errno(safe_write(arg1, p, arg3));
9302         }
9303         unlock_user(p, arg2, 0);
9304         return ret;
9305 
9306 #ifdef TARGET_NR_open
9307     case TARGET_NR_open:
9308         if (!(p = lock_user_string(arg1)))
9309             return -TARGET_EFAULT;
9310         ret = get_errno(do_guest_openat(cpu_env, AT_FDCWD, p,
9311                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
9312                                   arg3, true));
9313         fd_trans_unregister(ret);
9314         unlock_user(p, arg1, 0);
9315         return ret;
9316 #endif
9317     case TARGET_NR_openat:
9318         if (!(p = lock_user_string(arg2)))
9319             return -TARGET_EFAULT;
9320         ret = get_errno(do_guest_openat(cpu_env, arg1, p,
9321                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
9322                                   arg4, true));
9323         fd_trans_unregister(ret);
9324         unlock_user(p, arg2, 0);
9325         return ret;
9326     case TARGET_NR_openat2:
9327         ret = do_openat2(cpu_env, arg1, arg2, arg3, arg4);
9328         return ret;
9329 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9330     case TARGET_NR_name_to_handle_at:
9331         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
9332         return ret;
9333 #endif
9334 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9335     case TARGET_NR_open_by_handle_at:
9336         ret = do_open_by_handle_at(arg1, arg2, arg3);
9337         fd_trans_unregister(ret);
9338         return ret;
9339 #endif
9340 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
9341     case TARGET_NR_pidfd_open:
9342         return get_errno(pidfd_open(arg1, arg2));
9343 #endif
9344 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
9345     case TARGET_NR_pidfd_send_signal:
9346         {
9347             siginfo_t uinfo, *puinfo;
9348 
9349             if (arg3) {
9350                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9351                 if (!p) {
9352                     return -TARGET_EFAULT;
9353                  }
9354                  target_to_host_siginfo(&uinfo, p);
9355                  unlock_user(p, arg3, 0);
9356                  puinfo = &uinfo;
9357             } else {
9358                  puinfo = NULL;
9359             }
9360             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
9361                                               puinfo, arg4));
9362         }
9363         return ret;
9364 #endif
9365 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
9366     case TARGET_NR_pidfd_getfd:
9367         return get_errno(pidfd_getfd(arg1, arg2, arg3));
9368 #endif
9369     case TARGET_NR_close:
9370         fd_trans_unregister(arg1);
9371         return get_errno(close(arg1));
9372 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
9373     case TARGET_NR_close_range:
9374         ret = get_errno(sys_close_range(arg1, arg2, arg3));
9375         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
9376             abi_long fd, maxfd;
9377             maxfd = MIN(arg2, target_fd_max);
9378             for (fd = arg1; fd < maxfd; fd++) {
9379                 fd_trans_unregister(fd);
9380             }
9381         }
9382         return ret;
9383 #endif
9384 
9385     case TARGET_NR_brk:
9386         return do_brk(arg1);
9387 #ifdef TARGET_NR_fork
9388     case TARGET_NR_fork:
9389         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
9390 #endif
9391 #ifdef TARGET_NR_waitpid
9392     case TARGET_NR_waitpid:
9393         {
9394             int status;
9395             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
9396             if (!is_error(ret) && arg2 && ret
9397                 && put_user_s32(host_to_target_waitstatus(status), arg2))
9398                 return -TARGET_EFAULT;
9399         }
9400         return ret;
9401 #endif
9402 #ifdef TARGET_NR_waitid
9403     case TARGET_NR_waitid:
9404         {
9405             struct rusage ru;
9406             siginfo_t info;
9407 
9408             ret = get_errno(safe_waitid(arg1, arg2, (arg3 ? &info : NULL),
9409                                         arg4, (arg5 ? &ru : NULL)));
9410             if (!is_error(ret)) {
9411                 if (arg3) {
9412                     p = lock_user(VERIFY_WRITE, arg3,
9413                                   sizeof(target_siginfo_t), 0);
9414                     if (!p) {
9415                         return -TARGET_EFAULT;
9416                     }
9417                     host_to_target_siginfo(p, &info);
9418                     unlock_user(p, arg3, sizeof(target_siginfo_t));
9419                 }
9420                 if (arg5 && host_to_target_rusage(arg5, &ru)) {
9421                     return -TARGET_EFAULT;
9422                 }
9423             }
9424         }
9425         return ret;
9426 #endif
9427 #ifdef TARGET_NR_creat /* not on alpha */
9428     case TARGET_NR_creat:
9429         if (!(p = lock_user_string(arg1)))
9430             return -TARGET_EFAULT;
9431         ret = get_errno(creat(p, arg2));
9432         fd_trans_unregister(ret);
9433         unlock_user(p, arg1, 0);
9434         return ret;
9435 #endif
9436 #ifdef TARGET_NR_link
9437     case TARGET_NR_link:
9438         {
9439             void * p2;
9440             p = lock_user_string(arg1);
9441             p2 = lock_user_string(arg2);
9442             if (!p || !p2)
9443                 ret = -TARGET_EFAULT;
9444             else
9445                 ret = get_errno(link(p, p2));
9446             unlock_user(p2, arg2, 0);
9447             unlock_user(p, arg1, 0);
9448         }
9449         return ret;
9450 #endif
9451 #if defined(TARGET_NR_linkat)
9452     case TARGET_NR_linkat:
9453         {
9454             void * p2 = NULL;
9455             if (!arg2 || !arg4)
9456                 return -TARGET_EFAULT;
9457             p  = lock_user_string(arg2);
9458             p2 = lock_user_string(arg4);
9459             if (!p || !p2)
9460                 ret = -TARGET_EFAULT;
9461             else
9462                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
9463             unlock_user(p, arg2, 0);
9464             unlock_user(p2, arg4, 0);
9465         }
9466         return ret;
9467 #endif
9468 #ifdef TARGET_NR_unlink
9469     case TARGET_NR_unlink:
9470         if (!(p = lock_user_string(arg1)))
9471             return -TARGET_EFAULT;
9472         ret = get_errno(unlink(p));
9473         unlock_user(p, arg1, 0);
9474         return ret;
9475 #endif
9476 #if defined(TARGET_NR_unlinkat)
9477     case TARGET_NR_unlinkat:
9478         if (!(p = lock_user_string(arg2)))
9479             return -TARGET_EFAULT;
9480         ret = get_errno(unlinkat(arg1, p, arg3));
9481         unlock_user(p, arg2, 0);
9482         return ret;
9483 #endif
9484     case TARGET_NR_execveat:
9485         return do_execv(cpu_env, arg1, arg2, arg3, arg4, arg5, true);
9486     case TARGET_NR_execve:
9487         return do_execv(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0, false);
9488     case TARGET_NR_chdir:
9489         if (!(p = lock_user_string(arg1)))
9490             return -TARGET_EFAULT;
9491         ret = get_errno(chdir(p));
9492         unlock_user(p, arg1, 0);
9493         return ret;
9494 #ifdef TARGET_NR_time
9495     case TARGET_NR_time:
9496         {
9497             time_t host_time;
9498             ret = get_errno(time(&host_time));
9499             if (!is_error(ret)
9500                 && arg1
9501                 && put_user_sal(host_time, arg1))
9502                 return -TARGET_EFAULT;
9503         }
9504         return ret;
9505 #endif
9506 #ifdef TARGET_NR_mknod
9507     case TARGET_NR_mknod:
9508         if (!(p = lock_user_string(arg1)))
9509             return -TARGET_EFAULT;
9510         ret = get_errno(mknod(p, arg2, arg3));
9511         unlock_user(p, arg1, 0);
9512         return ret;
9513 #endif
9514 #if defined(TARGET_NR_mknodat)
9515     case TARGET_NR_mknodat:
9516         if (!(p = lock_user_string(arg2)))
9517             return -TARGET_EFAULT;
9518         ret = get_errno(mknodat(arg1, p, arg3, arg4));
9519         unlock_user(p, arg2, 0);
9520         return ret;
9521 #endif
9522 #ifdef TARGET_NR_chmod
9523     case TARGET_NR_chmod:
9524         if (!(p = lock_user_string(arg1)))
9525             return -TARGET_EFAULT;
9526         ret = get_errno(chmod(p, arg2));
9527         unlock_user(p, arg1, 0);
9528         return ret;
9529 #endif
9530 #ifdef TARGET_NR_lseek
9531     case TARGET_NR_lseek:
9532         return get_errno(lseek(arg1, arg2, arg3));
9533 #endif
9534 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9535     /* Alpha specific */
9536     case TARGET_NR_getxpid:
9537         cpu_env->ir[IR_A4] = getppid();
9538         return get_errno(getpid());
9539 #endif
9540 #ifdef TARGET_NR_getpid
9541     case TARGET_NR_getpid:
9542         return get_errno(getpid());
9543 #endif
9544     case TARGET_NR_mount:
9545         {
9546             /* need to look at the data field */
9547             void *p2, *p3;
9548 
9549             if (arg1) {
9550                 p = lock_user_string(arg1);
9551                 if (!p) {
9552                     return -TARGET_EFAULT;
9553                 }
9554             } else {
9555                 p = NULL;
9556             }
9557 
9558             p2 = lock_user_string(arg2);
9559             if (!p2) {
9560                 if (arg1) {
9561                     unlock_user(p, arg1, 0);
9562                 }
9563                 return -TARGET_EFAULT;
9564             }
9565 
9566             if (arg3) {
9567                 p3 = lock_user_string(arg3);
9568                 if (!p3) {
9569                     if (arg1) {
9570                         unlock_user(p, arg1, 0);
9571                     }
9572                     unlock_user(p2, arg2, 0);
9573                     return -TARGET_EFAULT;
9574                 }
9575             } else {
9576                 p3 = NULL;
9577             }
9578 
9579             /* FIXME - arg5 should be locked, but it isn't clear how to
9580              * do that since it's not guaranteed to be a NULL-terminated
9581              * string.
9582              */
9583             if (!arg5) {
9584                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9585             } else {
9586                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9587             }
9588             ret = get_errno(ret);
9589 
9590             if (arg1) {
9591                 unlock_user(p, arg1, 0);
9592             }
9593             unlock_user(p2, arg2, 0);
9594             if (arg3) {
9595                 unlock_user(p3, arg3, 0);
9596             }
9597         }
9598         return ret;
9599 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9600 #if defined(TARGET_NR_umount)
9601     case TARGET_NR_umount:
9602 #endif
9603 #if defined(TARGET_NR_oldumount)
9604     case TARGET_NR_oldumount:
9605 #endif
9606         if (!(p = lock_user_string(arg1)))
9607             return -TARGET_EFAULT;
9608         ret = get_errno(umount(p));
9609         unlock_user(p, arg1, 0);
9610         return ret;
9611 #endif
9612 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9613     case TARGET_NR_move_mount:
9614         {
9615             void *p2, *p4;
9616 
9617             if (!arg2 || !arg4) {
9618                 return -TARGET_EFAULT;
9619             }
9620 
9621             p2 = lock_user_string(arg2);
9622             if (!p2) {
9623                 return -TARGET_EFAULT;
9624             }
9625 
9626             p4 = lock_user_string(arg4);
9627             if (!p4) {
9628                 unlock_user(p2, arg2, 0);
9629                 return -TARGET_EFAULT;
9630             }
9631             ret = get_errno(sys_move_mount(arg1, p2, arg3, p4, arg5));
9632 
9633             unlock_user(p2, arg2, 0);
9634             unlock_user(p4, arg4, 0);
9635 
9636             return ret;
9637         }
9638 #endif
9639 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9640     case TARGET_NR_open_tree:
9641         {
9642             void *p2;
9643             int host_flags;
9644 
9645             if (!arg2) {
9646                 return -TARGET_EFAULT;
9647             }
9648 
9649             p2 = lock_user_string(arg2);
9650             if (!p2) {
9651                 return -TARGET_EFAULT;
9652             }
9653 
9654             host_flags = arg3 & ~TARGET_O_CLOEXEC;
9655             if (arg3 & TARGET_O_CLOEXEC) {
9656                 host_flags |= O_CLOEXEC;
9657             }
9658 
9659             ret = get_errno(sys_open_tree(arg1, p2, host_flags));
9660 
9661             unlock_user(p2, arg2, 0);
9662 
9663             return ret;
9664         }
9665 #endif
9666 #ifdef TARGET_NR_stime /* not on alpha */
9667     case TARGET_NR_stime:
9668         {
9669             struct timespec ts;
9670             ts.tv_nsec = 0;
9671             if (get_user_sal(ts.tv_sec, arg1)) {
9672                 return -TARGET_EFAULT;
9673             }
9674             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9675         }
9676 #endif
9677 #ifdef TARGET_NR_alarm /* not on alpha */
9678     case TARGET_NR_alarm:
9679         return alarm(arg1);
9680 #endif
9681 #ifdef TARGET_NR_pause /* not on alpha */
9682     case TARGET_NR_pause:
9683         if (!block_signals()) {
9684             sigsuspend(&get_task_state(cpu)->signal_mask);
9685         }
9686         return -TARGET_EINTR;
9687 #endif
9688 #ifdef TARGET_NR_utime
9689     case TARGET_NR_utime:
9690         {
9691             struct utimbuf tbuf, *host_tbuf;
9692             struct target_utimbuf *target_tbuf;
9693             if (arg2) {
9694                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9695                     return -TARGET_EFAULT;
9696                 tbuf.actime = tswapal(target_tbuf->actime);
9697                 tbuf.modtime = tswapal(target_tbuf->modtime);
9698                 unlock_user_struct(target_tbuf, arg2, 0);
9699                 host_tbuf = &tbuf;
9700             } else {
9701                 host_tbuf = NULL;
9702             }
9703             if (!(p = lock_user_string(arg1)))
9704                 return -TARGET_EFAULT;
9705             ret = get_errno(utime(p, host_tbuf));
9706             unlock_user(p, arg1, 0);
9707         }
9708         return ret;
9709 #endif
9710 #ifdef TARGET_NR_utimes
9711     case TARGET_NR_utimes:
9712         {
9713             struct timeval *tvp, tv[2];
9714             if (arg2) {
9715                 if (copy_from_user_timeval(&tv[0], arg2)
9716                     || copy_from_user_timeval(&tv[1],
9717                                               arg2 + sizeof(struct target_timeval)))
9718                     return -TARGET_EFAULT;
9719                 tvp = tv;
9720             } else {
9721                 tvp = NULL;
9722             }
9723             if (!(p = lock_user_string(arg1)))
9724                 return -TARGET_EFAULT;
9725             ret = get_errno(utimes(p, tvp));
9726             unlock_user(p, arg1, 0);
9727         }
9728         return ret;
9729 #endif
9730 #if defined(TARGET_NR_futimesat)
9731     case TARGET_NR_futimesat:
9732         {
9733             struct timeval *tvp, tv[2];
9734             if (arg3) {
9735                 if (copy_from_user_timeval(&tv[0], arg3)
9736                     || copy_from_user_timeval(&tv[1],
9737                                               arg3 + sizeof(struct target_timeval)))
9738                     return -TARGET_EFAULT;
9739                 tvp = tv;
9740             } else {
9741                 tvp = NULL;
9742             }
9743             if (!(p = lock_user_string(arg2))) {
9744                 return -TARGET_EFAULT;
9745             }
9746             ret = get_errno(futimesat(arg1, path(p), tvp));
9747             unlock_user(p, arg2, 0);
9748         }
9749         return ret;
9750 #endif
9751 #ifdef TARGET_NR_access
9752     case TARGET_NR_access:
9753         if (!(p = lock_user_string(arg1))) {
9754             return -TARGET_EFAULT;
9755         }
9756         ret = get_errno(access(path(p), arg2));
9757         unlock_user(p, arg1, 0);
9758         return ret;
9759 #endif
9760 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9761     case TARGET_NR_faccessat:
9762         if (!(p = lock_user_string(arg2))) {
9763             return -TARGET_EFAULT;
9764         }
9765         ret = get_errno(faccessat(arg1, p, arg3, 0));
9766         unlock_user(p, arg2, 0);
9767         return ret;
9768 #endif
9769 #if defined(TARGET_NR_faccessat2)
9770     case TARGET_NR_faccessat2:
9771         if (!(p = lock_user_string(arg2))) {
9772             return -TARGET_EFAULT;
9773         }
9774         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9775         unlock_user(p, arg2, 0);
9776         return ret;
9777 #endif
9778 #ifdef TARGET_NR_nice /* not on alpha */
9779     case TARGET_NR_nice:
9780         return get_errno(nice(arg1));
9781 #endif
9782     case TARGET_NR_sync:
9783         sync();
9784         return 0;
9785 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9786     case TARGET_NR_syncfs:
9787         return get_errno(syncfs(arg1));
9788 #endif
9789     case TARGET_NR_kill:
9790         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9791 #ifdef TARGET_NR_rename
9792     case TARGET_NR_rename:
9793         {
9794             void *p2;
9795             p = lock_user_string(arg1);
9796             p2 = lock_user_string(arg2);
9797             if (!p || !p2)
9798                 ret = -TARGET_EFAULT;
9799             else
9800                 ret = get_errno(rename(p, p2));
9801             unlock_user(p2, arg2, 0);
9802             unlock_user(p, arg1, 0);
9803         }
9804         return ret;
9805 #endif
9806 #if defined(TARGET_NR_renameat)
9807     case TARGET_NR_renameat:
9808         {
9809             void *p2;
9810             p  = lock_user_string(arg2);
9811             p2 = lock_user_string(arg4);
9812             if (!p || !p2)
9813                 ret = -TARGET_EFAULT;
9814             else
9815                 ret = get_errno(renameat(arg1, p, arg3, p2));
9816             unlock_user(p2, arg4, 0);
9817             unlock_user(p, arg2, 0);
9818         }
9819         return ret;
9820 #endif
9821 #if defined(TARGET_NR_renameat2)
9822     case TARGET_NR_renameat2:
9823         {
9824             void *p2;
9825             p  = lock_user_string(arg2);
9826             p2 = lock_user_string(arg4);
9827             if (!p || !p2) {
9828                 ret = -TARGET_EFAULT;
9829             } else {
9830                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9831             }
9832             unlock_user(p2, arg4, 0);
9833             unlock_user(p, arg2, 0);
9834         }
9835         return ret;
9836 #endif
9837 #ifdef TARGET_NR_mkdir
9838     case TARGET_NR_mkdir:
9839         if (!(p = lock_user_string(arg1)))
9840             return -TARGET_EFAULT;
9841         ret = get_errno(mkdir(p, arg2));
9842         unlock_user(p, arg1, 0);
9843         return ret;
9844 #endif
9845 #if defined(TARGET_NR_mkdirat)
9846     case TARGET_NR_mkdirat:
9847         if (!(p = lock_user_string(arg2)))
9848             return -TARGET_EFAULT;
9849         ret = get_errno(mkdirat(arg1, p, arg3));
9850         unlock_user(p, arg2, 0);
9851         return ret;
9852 #endif
9853 #ifdef TARGET_NR_rmdir
9854     case TARGET_NR_rmdir:
9855         if (!(p = lock_user_string(arg1)))
9856             return -TARGET_EFAULT;
9857         ret = get_errno(rmdir(p));
9858         unlock_user(p, arg1, 0);
9859         return ret;
9860 #endif
9861     case TARGET_NR_dup:
9862         ret = get_errno(dup(arg1));
9863         if (ret >= 0) {
9864             fd_trans_dup(arg1, ret);
9865         }
9866         return ret;
9867 #ifdef TARGET_NR_pipe
9868     case TARGET_NR_pipe:
9869         return do_pipe(cpu_env, arg1, 0, 0);
9870 #endif
9871 #ifdef TARGET_NR_pipe2
9872     case TARGET_NR_pipe2:
9873         return do_pipe(cpu_env, arg1,
9874                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9875 #endif
9876     case TARGET_NR_times:
9877         {
9878             struct target_tms *tmsp;
9879             struct tms tms;
9880             ret = get_errno(times(&tms));
9881             if (arg1) {
9882                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9883                 if (!tmsp)
9884                     return -TARGET_EFAULT;
9885                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9886                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9887                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9888                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9889             }
9890             if (!is_error(ret))
9891                 ret = host_to_target_clock_t(ret);
9892         }
9893         return ret;
9894     case TARGET_NR_acct:
9895         if (arg1 == 0) {
9896             ret = get_errno(acct(NULL));
9897         } else {
9898             if (!(p = lock_user_string(arg1))) {
9899                 return -TARGET_EFAULT;
9900             }
9901             ret = get_errno(acct(path(p)));
9902             unlock_user(p, arg1, 0);
9903         }
9904         return ret;
9905 #ifdef TARGET_NR_umount2
9906     case TARGET_NR_umount2:
9907         if (!(p = lock_user_string(arg1)))
9908             return -TARGET_EFAULT;
9909         ret = get_errno(umount2(p, arg2));
9910         unlock_user(p, arg1, 0);
9911         return ret;
9912 #endif
9913     case TARGET_NR_ioctl:
9914         return do_ioctl(arg1, arg2, arg3);
9915 #ifdef TARGET_NR_fcntl
9916     case TARGET_NR_fcntl:
9917         return do_fcntl(arg1, arg2, arg3);
9918 #endif
9919     case TARGET_NR_setpgid:
9920         return get_errno(setpgid(arg1, arg2));
9921     case TARGET_NR_umask:
9922         return get_errno(umask(arg1));
9923     case TARGET_NR_chroot:
9924         if (!(p = lock_user_string(arg1)))
9925             return -TARGET_EFAULT;
9926         ret = get_errno(chroot(p));
9927         unlock_user(p, arg1, 0);
9928         return ret;
9929 #ifdef TARGET_NR_dup2
9930     case TARGET_NR_dup2:
9931         ret = get_errno(dup2(arg1, arg2));
9932         if (ret >= 0) {
9933             fd_trans_dup(arg1, arg2);
9934         }
9935         return ret;
9936 #endif
9937 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9938     case TARGET_NR_dup3:
9939     {
9940         int host_flags;
9941 
9942         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9943             return -EINVAL;
9944         }
9945         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9946         ret = get_errno(dup3(arg1, arg2, host_flags));
9947         if (ret >= 0) {
9948             fd_trans_dup(arg1, arg2);
9949         }
9950         return ret;
9951     }
9952 #endif
9953 #ifdef TARGET_NR_getppid /* not on alpha */
9954     case TARGET_NR_getppid:
9955         return get_errno(getppid());
9956 #endif
9957 #ifdef TARGET_NR_getpgrp
9958     case TARGET_NR_getpgrp:
9959         return get_errno(getpgrp());
9960 #endif
9961     case TARGET_NR_setsid:
9962         return get_errno(setsid());
9963 #ifdef TARGET_NR_sigaction
9964     case TARGET_NR_sigaction:
9965         {
9966 #if defined(TARGET_MIPS)
9967 	    struct target_sigaction act, oact, *pact, *old_act;
9968 
9969 	    if (arg2) {
9970                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9971                     return -TARGET_EFAULT;
9972 		act._sa_handler = old_act->_sa_handler;
9973 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9974 		act.sa_flags = old_act->sa_flags;
9975 		unlock_user_struct(old_act, arg2, 0);
9976 		pact = &act;
9977 	    } else {
9978 		pact = NULL;
9979 	    }
9980 
9981         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9982 
9983 	    if (!is_error(ret) && arg3) {
9984                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9985                     return -TARGET_EFAULT;
9986 		old_act->_sa_handler = oact._sa_handler;
9987 		old_act->sa_flags = oact.sa_flags;
9988 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9989 		old_act->sa_mask.sig[1] = 0;
9990 		old_act->sa_mask.sig[2] = 0;
9991 		old_act->sa_mask.sig[3] = 0;
9992 		unlock_user_struct(old_act, arg3, 1);
9993 	    }
9994 #else
9995             struct target_old_sigaction *old_act;
9996             struct target_sigaction act, oact, *pact;
9997             if (arg2) {
9998                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9999                     return -TARGET_EFAULT;
10000                 act._sa_handler = old_act->_sa_handler;
10001                 target_siginitset(&act.sa_mask, old_act->sa_mask);
10002                 act.sa_flags = old_act->sa_flags;
10003 #ifdef TARGET_ARCH_HAS_SA_RESTORER
10004                 act.sa_restorer = old_act->sa_restorer;
10005 #endif
10006                 unlock_user_struct(old_act, arg2, 0);
10007                 pact = &act;
10008             } else {
10009                 pact = NULL;
10010             }
10011             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
10012             if (!is_error(ret) && arg3) {
10013                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
10014                     return -TARGET_EFAULT;
10015                 old_act->_sa_handler = oact._sa_handler;
10016                 old_act->sa_mask = oact.sa_mask.sig[0];
10017                 old_act->sa_flags = oact.sa_flags;
10018 #ifdef TARGET_ARCH_HAS_SA_RESTORER
10019                 old_act->sa_restorer = oact.sa_restorer;
10020 #endif
10021                 unlock_user_struct(old_act, arg3, 1);
10022             }
10023 #endif
10024         }
10025         return ret;
10026 #endif
10027     case TARGET_NR_rt_sigaction:
10028         {
10029             /*
10030              * For Alpha and SPARC this is a 5 argument syscall, with
10031              * a 'restorer' parameter which must be copied into the
10032              * sa_restorer field of the sigaction struct.
10033              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
10034              * and arg5 is the sigsetsize.
10035              */
10036 #if defined(TARGET_ALPHA)
10037             target_ulong sigsetsize = arg4;
10038             target_ulong restorer = arg5;
10039 #elif defined(TARGET_SPARC)
10040             target_ulong restorer = arg4;
10041             target_ulong sigsetsize = arg5;
10042 #else
10043             target_ulong sigsetsize = arg4;
10044             target_ulong restorer = 0;
10045 #endif
10046             struct target_sigaction *act = NULL;
10047             struct target_sigaction *oact = NULL;
10048 
10049             if (sigsetsize != sizeof(target_sigset_t)) {
10050                 return -TARGET_EINVAL;
10051             }
10052             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
10053                 return -TARGET_EFAULT;
10054             }
10055             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
10056                 ret = -TARGET_EFAULT;
10057             } else {
10058                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
10059                 if (oact) {
10060                     unlock_user_struct(oact, arg3, 1);
10061                 }
10062             }
10063             if (act) {
10064                 unlock_user_struct(act, arg2, 0);
10065             }
10066         }
10067         return ret;
10068 #ifdef TARGET_NR_sgetmask /* not on alpha */
10069     case TARGET_NR_sgetmask:
10070         {
10071             sigset_t cur_set;
10072             abi_ulong target_set;
10073             ret = do_sigprocmask(0, NULL, &cur_set);
10074             if (!ret) {
10075                 host_to_target_old_sigset(&target_set, &cur_set);
10076                 ret = target_set;
10077             }
10078         }
10079         return ret;
10080 #endif
10081 #ifdef TARGET_NR_ssetmask /* not on alpha */
10082     case TARGET_NR_ssetmask:
10083         {
10084             sigset_t set, oset;
10085             abi_ulong target_set = arg1;
10086             target_to_host_old_sigset(&set, &target_set);
10087             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
10088             if (!ret) {
10089                 host_to_target_old_sigset(&target_set, &oset);
10090                 ret = target_set;
10091             }
10092         }
10093         return ret;
10094 #endif
10095 #ifdef TARGET_NR_sigprocmask
10096     case TARGET_NR_sigprocmask:
10097         {
10098 #if defined(TARGET_ALPHA)
10099             sigset_t set, oldset;
10100             abi_ulong mask;
10101             int how;
10102 
10103             switch (arg1) {
10104             case TARGET_SIG_BLOCK:
10105                 how = SIG_BLOCK;
10106                 break;
10107             case TARGET_SIG_UNBLOCK:
10108                 how = SIG_UNBLOCK;
10109                 break;
10110             case TARGET_SIG_SETMASK:
10111                 how = SIG_SETMASK;
10112                 break;
10113             default:
10114                 return -TARGET_EINVAL;
10115             }
10116             mask = arg2;
10117             target_to_host_old_sigset(&set, &mask);
10118 
10119             ret = do_sigprocmask(how, &set, &oldset);
10120             if (!is_error(ret)) {
10121                 host_to_target_old_sigset(&mask, &oldset);
10122                 ret = mask;
10123                 cpu_env->ir[IR_V0] = 0; /* force no error */
10124             }
10125 #else
10126             sigset_t set, oldset, *set_ptr;
10127             int how;
10128 
10129             if (arg2) {
10130                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10131                 if (!p) {
10132                     return -TARGET_EFAULT;
10133                 }
10134                 target_to_host_old_sigset(&set, p);
10135                 unlock_user(p, arg2, 0);
10136                 set_ptr = &set;
10137                 switch (arg1) {
10138                 case TARGET_SIG_BLOCK:
10139                     how = SIG_BLOCK;
10140                     break;
10141                 case TARGET_SIG_UNBLOCK:
10142                     how = SIG_UNBLOCK;
10143                     break;
10144                 case TARGET_SIG_SETMASK:
10145                     how = SIG_SETMASK;
10146                     break;
10147                 default:
10148                     return -TARGET_EINVAL;
10149                 }
10150             } else {
10151                 how = 0;
10152                 set_ptr = NULL;
10153             }
10154             ret = do_sigprocmask(how, set_ptr, &oldset);
10155             if (!is_error(ret) && arg3) {
10156                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10157                     return -TARGET_EFAULT;
10158                 host_to_target_old_sigset(p, &oldset);
10159                 unlock_user(p, arg3, sizeof(target_sigset_t));
10160             }
10161 #endif
10162         }
10163         return ret;
10164 #endif
10165     case TARGET_NR_rt_sigprocmask:
10166         {
10167             int how = arg1;
10168             sigset_t set, oldset, *set_ptr;
10169 
10170             if (arg4 != sizeof(target_sigset_t)) {
10171                 return -TARGET_EINVAL;
10172             }
10173 
10174             if (arg2) {
10175                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10176                 if (!p) {
10177                     return -TARGET_EFAULT;
10178                 }
10179                 target_to_host_sigset(&set, p);
10180                 unlock_user(p, arg2, 0);
10181                 set_ptr = &set;
10182                 switch(how) {
10183                 case TARGET_SIG_BLOCK:
10184                     how = SIG_BLOCK;
10185                     break;
10186                 case TARGET_SIG_UNBLOCK:
10187                     how = SIG_UNBLOCK;
10188                     break;
10189                 case TARGET_SIG_SETMASK:
10190                     how = SIG_SETMASK;
10191                     break;
10192                 default:
10193                     return -TARGET_EINVAL;
10194                 }
10195             } else {
10196                 how = 0;
10197                 set_ptr = NULL;
10198             }
10199             ret = do_sigprocmask(how, set_ptr, &oldset);
10200             if (!is_error(ret) && arg3) {
10201                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10202                     return -TARGET_EFAULT;
10203                 host_to_target_sigset(p, &oldset);
10204                 unlock_user(p, arg3, sizeof(target_sigset_t));
10205             }
10206         }
10207         return ret;
10208 #ifdef TARGET_NR_sigpending
10209     case TARGET_NR_sigpending:
10210         {
10211             sigset_t set;
10212             ret = get_errno(sigpending(&set));
10213             if (!is_error(ret)) {
10214                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10215                     return -TARGET_EFAULT;
10216                 host_to_target_old_sigset(p, &set);
10217                 unlock_user(p, arg1, sizeof(target_sigset_t));
10218             }
10219         }
10220         return ret;
10221 #endif
10222     case TARGET_NR_rt_sigpending:
10223         {
10224             sigset_t set;
10225 
10226             /* Yes, this check is >, not != like most. We follow the kernel's
10227              * logic and it does it like this because it implements
10228              * NR_sigpending through the same code path, and in that case
10229              * the old_sigset_t is smaller in size.
10230              */
10231             if (arg2 > sizeof(target_sigset_t)) {
10232                 return -TARGET_EINVAL;
10233             }
10234 
10235             ret = get_errno(sigpending(&set));
10236             if (!is_error(ret)) {
10237                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10238                     return -TARGET_EFAULT;
10239                 host_to_target_sigset(p, &set);
10240                 unlock_user(p, arg1, sizeof(target_sigset_t));
10241             }
10242         }
10243         return ret;
10244 #ifdef TARGET_NR_sigsuspend
10245     case TARGET_NR_sigsuspend:
10246         {
10247             sigset_t *set;
10248 
10249 #if defined(TARGET_ALPHA)
10250             TaskState *ts = get_task_state(cpu);
10251             /* target_to_host_old_sigset will bswap back */
10252             abi_ulong mask = tswapal(arg1);
10253             set = &ts->sigsuspend_mask;
10254             target_to_host_old_sigset(set, &mask);
10255 #else
10256             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
10257             if (ret != 0) {
10258                 return ret;
10259             }
10260 #endif
10261             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10262             finish_sigsuspend_mask(ret);
10263         }
10264         return ret;
10265 #endif
10266     case TARGET_NR_rt_sigsuspend:
10267         {
10268             sigset_t *set;
10269 
10270             ret = process_sigsuspend_mask(&set, arg1, arg2);
10271             if (ret != 0) {
10272                 return ret;
10273             }
10274             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10275             finish_sigsuspend_mask(ret);
10276         }
10277         return ret;
10278 #ifdef TARGET_NR_rt_sigtimedwait
10279     case TARGET_NR_rt_sigtimedwait:
10280         {
10281             sigset_t set;
10282             struct timespec uts, *puts;
10283             siginfo_t uinfo;
10284 
10285             if (arg4 != sizeof(target_sigset_t)) {
10286                 return -TARGET_EINVAL;
10287             }
10288 
10289             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
10290                 return -TARGET_EFAULT;
10291             target_to_host_sigset(&set, p);
10292             unlock_user(p, arg1, 0);
10293             if (arg3) {
10294                 puts = &uts;
10295                 if (target_to_host_timespec(puts, arg3)) {
10296                     return -TARGET_EFAULT;
10297                 }
10298             } else {
10299                 puts = NULL;
10300             }
10301             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10302                                                  SIGSET_T_SIZE));
10303             if (!is_error(ret)) {
10304                 if (arg2) {
10305                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
10306                                   0);
10307                     if (!p) {
10308                         return -TARGET_EFAULT;
10309                     }
10310                     host_to_target_siginfo(p, &uinfo);
10311                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10312                 }
10313                 ret = host_to_target_signal(ret);
10314             }
10315         }
10316         return ret;
10317 #endif
10318 #ifdef TARGET_NR_rt_sigtimedwait_time64
10319     case TARGET_NR_rt_sigtimedwait_time64:
10320         {
10321             sigset_t set;
10322             struct timespec uts, *puts;
10323             siginfo_t uinfo;
10324 
10325             if (arg4 != sizeof(target_sigset_t)) {
10326                 return -TARGET_EINVAL;
10327             }
10328 
10329             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
10330             if (!p) {
10331                 return -TARGET_EFAULT;
10332             }
10333             target_to_host_sigset(&set, p);
10334             unlock_user(p, arg1, 0);
10335             if (arg3) {
10336                 puts = &uts;
10337                 if (target_to_host_timespec64(puts, arg3)) {
10338                     return -TARGET_EFAULT;
10339                 }
10340             } else {
10341                 puts = NULL;
10342             }
10343             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10344                                                  SIGSET_T_SIZE));
10345             if (!is_error(ret)) {
10346                 if (arg2) {
10347                     p = lock_user(VERIFY_WRITE, arg2,
10348                                   sizeof(target_siginfo_t), 0);
10349                     if (!p) {
10350                         return -TARGET_EFAULT;
10351                     }
10352                     host_to_target_siginfo(p, &uinfo);
10353                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10354                 }
10355                 ret = host_to_target_signal(ret);
10356             }
10357         }
10358         return ret;
10359 #endif
10360     case TARGET_NR_rt_sigqueueinfo:
10361         {
10362             siginfo_t uinfo;
10363 
10364             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
10365             if (!p) {
10366                 return -TARGET_EFAULT;
10367             }
10368             target_to_host_siginfo(&uinfo, p);
10369             unlock_user(p, arg3, 0);
10370             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
10371         }
10372         return ret;
10373     case TARGET_NR_rt_tgsigqueueinfo:
10374         {
10375             siginfo_t uinfo;
10376 
10377             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
10378             if (!p) {
10379                 return -TARGET_EFAULT;
10380             }
10381             target_to_host_siginfo(&uinfo, p);
10382             unlock_user(p, arg4, 0);
10383             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
10384         }
10385         return ret;
10386 #ifdef TARGET_NR_sigreturn
10387     case TARGET_NR_sigreturn:
10388         if (block_signals()) {
10389             return -QEMU_ERESTARTSYS;
10390         }
10391         return do_sigreturn(cpu_env);
10392 #endif
10393     case TARGET_NR_rt_sigreturn:
10394         if (block_signals()) {
10395             return -QEMU_ERESTARTSYS;
10396         }
10397         return do_rt_sigreturn(cpu_env);
10398     case TARGET_NR_sethostname:
10399         if (!(p = lock_user_string(arg1)))
10400             return -TARGET_EFAULT;
10401         ret = get_errno(sethostname(p, arg2));
10402         unlock_user(p, arg1, 0);
10403         return ret;
10404 #ifdef TARGET_NR_setrlimit
10405     case TARGET_NR_setrlimit:
10406         {
10407             int resource = target_to_host_resource(arg1);
10408             struct target_rlimit *target_rlim;
10409             struct rlimit rlim;
10410             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
10411                 return -TARGET_EFAULT;
10412             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
10413             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
10414             unlock_user_struct(target_rlim, arg2, 0);
10415             /*
10416              * If we just passed through resource limit settings for memory then
10417              * they would also apply to QEMU's own allocations, and QEMU will
10418              * crash or hang or die if its allocations fail. Ideally we would
10419              * track the guest allocations in QEMU and apply the limits ourselves.
10420              * For now, just tell the guest the call succeeded but don't actually
10421              * limit anything.
10422              */
10423             if (resource != RLIMIT_AS &&
10424                 resource != RLIMIT_DATA &&
10425                 resource != RLIMIT_STACK) {
10426                 return get_errno(setrlimit(resource, &rlim));
10427             } else {
10428                 return 0;
10429             }
10430         }
10431 #endif
10432 #ifdef TARGET_NR_getrlimit
10433     case TARGET_NR_getrlimit:
10434         {
10435             int resource = target_to_host_resource(arg1);
10436             struct target_rlimit *target_rlim;
10437             struct rlimit rlim;
10438 
10439             ret = get_errno(getrlimit(resource, &rlim));
10440             if (!is_error(ret)) {
10441                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10442                     return -TARGET_EFAULT;
10443                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10444                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10445                 unlock_user_struct(target_rlim, arg2, 1);
10446             }
10447         }
10448         return ret;
10449 #endif
10450     case TARGET_NR_getrusage:
10451         {
10452             struct rusage rusage;
10453             ret = get_errno(getrusage(arg1, &rusage));
10454             if (!is_error(ret)) {
10455                 ret = host_to_target_rusage(arg2, &rusage);
10456             }
10457         }
10458         return ret;
10459 #if defined(TARGET_NR_gettimeofday)
10460     case TARGET_NR_gettimeofday:
10461         {
10462             struct timeval tv;
10463             struct timezone tz;
10464 
10465             ret = get_errno(gettimeofday(&tv, &tz));
10466             if (!is_error(ret)) {
10467                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
10468                     return -TARGET_EFAULT;
10469                 }
10470                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
10471                     return -TARGET_EFAULT;
10472                 }
10473             }
10474         }
10475         return ret;
10476 #endif
10477 #if defined(TARGET_NR_settimeofday)
10478     case TARGET_NR_settimeofday:
10479         {
10480             struct timeval tv, *ptv = NULL;
10481             struct timezone tz, *ptz = NULL;
10482 
10483             if (arg1) {
10484                 if (copy_from_user_timeval(&tv, arg1)) {
10485                     return -TARGET_EFAULT;
10486                 }
10487                 ptv = &tv;
10488             }
10489 
10490             if (arg2) {
10491                 if (copy_from_user_timezone(&tz, arg2)) {
10492                     return -TARGET_EFAULT;
10493                 }
10494                 ptz = &tz;
10495             }
10496 
10497             return get_errno(settimeofday(ptv, ptz));
10498         }
10499 #endif
10500 #if defined(TARGET_NR_select)
10501     case TARGET_NR_select:
10502 #if defined(TARGET_WANT_NI_OLD_SELECT)
10503         /* some architectures used to have old_select here
10504          * but now ENOSYS it.
10505          */
10506         ret = -TARGET_ENOSYS;
10507 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
10508         ret = do_old_select(arg1);
10509 #else
10510         ret = do_select(arg1, arg2, arg3, arg4, arg5);
10511 #endif
10512         return ret;
10513 #endif
10514 #ifdef TARGET_NR_pselect6
10515     case TARGET_NR_pselect6:
10516         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
10517 #endif
10518 #ifdef TARGET_NR_pselect6_time64
10519     case TARGET_NR_pselect6_time64:
10520         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
10521 #endif
10522 #ifdef TARGET_NR_symlink
10523     case TARGET_NR_symlink:
10524         {
10525             void *p2;
10526             p = lock_user_string(arg1);
10527             p2 = lock_user_string(arg2);
10528             if (!p || !p2)
10529                 ret = -TARGET_EFAULT;
10530             else
10531                 ret = get_errno(symlink(p, p2));
10532             unlock_user(p2, arg2, 0);
10533             unlock_user(p, arg1, 0);
10534         }
10535         return ret;
10536 #endif
10537 #if defined(TARGET_NR_symlinkat)
10538     case TARGET_NR_symlinkat:
10539         {
10540             void *p2;
10541             p  = lock_user_string(arg1);
10542             p2 = lock_user_string(arg3);
10543             if (!p || !p2)
10544                 ret = -TARGET_EFAULT;
10545             else
10546                 ret = get_errno(symlinkat(p, arg2, p2));
10547             unlock_user(p2, arg3, 0);
10548             unlock_user(p, arg1, 0);
10549         }
10550         return ret;
10551 #endif
10552 #ifdef TARGET_NR_readlink
10553     case TARGET_NR_readlink:
10554         {
10555             void *p2;
10556             p = lock_user_string(arg1);
10557             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10558             ret = get_errno(do_guest_readlink(p, p2, arg3));
10559             unlock_user(p2, arg2, ret);
10560             unlock_user(p, arg1, 0);
10561         }
10562         return ret;
10563 #endif
10564 #if defined(TARGET_NR_readlinkat)
10565     case TARGET_NR_readlinkat:
10566         {
10567             void *p2;
10568             p  = lock_user_string(arg2);
10569             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10570             if (!p || !p2) {
10571                 ret = -TARGET_EFAULT;
10572             } else if (!arg4) {
10573                 /* Short circuit this for the magic exe check. */
10574                 ret = -TARGET_EINVAL;
10575             } else if (is_proc_myself((const char *)p, "exe")) {
10576                 /*
10577                  * Don't worry about sign mismatch as earlier mapping
10578                  * logic would have thrown a bad address error.
10579                  */
10580                 ret = MIN(strlen(exec_path), arg4);
10581                 /* We cannot NUL terminate the string. */
10582                 memcpy(p2, exec_path, ret);
10583             } else {
10584                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10585             }
10586             unlock_user(p2, arg3, ret);
10587             unlock_user(p, arg2, 0);
10588         }
10589         return ret;
10590 #endif
10591 #ifdef TARGET_NR_swapon
10592     case TARGET_NR_swapon:
10593         if (!(p = lock_user_string(arg1)))
10594             return -TARGET_EFAULT;
10595         ret = get_errno(swapon(p, arg2));
10596         unlock_user(p, arg1, 0);
10597         return ret;
10598 #endif
10599     case TARGET_NR_reboot:
10600         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10601            /* arg4 must be ignored in all other cases */
10602            p = lock_user_string(arg4);
10603            if (!p) {
10604                return -TARGET_EFAULT;
10605            }
10606            ret = get_errno(reboot(arg1, arg2, arg3, p));
10607            unlock_user(p, arg4, 0);
10608         } else {
10609            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10610         }
10611         return ret;
10612 #ifdef TARGET_NR_mmap
10613     case TARGET_NR_mmap:
10614 #ifdef TARGET_ARCH_WANT_SYS_OLD_MMAP
10615         {
10616             abi_ulong *v;
10617             abi_ulong v1, v2, v3, v4, v5, v6;
10618             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10619                 return -TARGET_EFAULT;
10620             v1 = tswapal(v[0]);
10621             v2 = tswapal(v[1]);
10622             v3 = tswapal(v[2]);
10623             v4 = tswapal(v[3]);
10624             v5 = tswapal(v[4]);
10625             v6 = tswapal(v[5]);
10626             unlock_user(v, arg1, 0);
10627             return do_mmap(v1, v2, v3, v4, v5, v6);
10628         }
10629 #else
10630         /* mmap pointers are always untagged */
10631         return do_mmap(arg1, arg2, arg3, arg4, arg5, arg6);
10632 #endif
10633 #endif
10634 #ifdef TARGET_NR_mmap2
10635     case TARGET_NR_mmap2:
10636 #ifndef MMAP_SHIFT
10637 #define MMAP_SHIFT 12
10638 #endif
10639         return do_mmap(arg1, arg2, arg3, arg4, arg5,
10640                        (off_t)(abi_ulong)arg6 << MMAP_SHIFT);
10641 #endif
10642     case TARGET_NR_munmap:
10643         arg1 = cpu_untagged_addr(cpu, arg1);
10644         return get_errno(target_munmap(arg1, arg2));
10645     case TARGET_NR_mprotect:
10646         arg1 = cpu_untagged_addr(cpu, arg1);
10647         {
10648             TaskState *ts = get_task_state(cpu);
10649             /* Special hack to detect libc making the stack executable.  */
10650             if ((arg3 & PROT_GROWSDOWN)
10651                 && arg1 >= ts->info->stack_limit
10652                 && arg1 <= ts->info->start_stack) {
10653                 arg3 &= ~PROT_GROWSDOWN;
10654                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10655                 arg1 = ts->info->stack_limit;
10656             }
10657         }
10658         return get_errno(target_mprotect(arg1, arg2, arg3));
10659 #ifdef TARGET_NR_mremap
10660     case TARGET_NR_mremap:
10661         arg1 = cpu_untagged_addr(cpu, arg1);
10662         /* mremap new_addr (arg5) is always untagged */
10663         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10664 #endif
10665         /* ??? msync/mlock/munlock are broken for softmmu.  */
10666 #ifdef TARGET_NR_msync
10667     case TARGET_NR_msync:
10668         return get_errno(msync(g2h(cpu, arg1), arg2,
10669                                target_to_host_msync_arg(arg3)));
10670 #endif
10671 #ifdef TARGET_NR_mlock
10672     case TARGET_NR_mlock:
10673         return get_errno(mlock(g2h(cpu, arg1), arg2));
10674 #endif
10675 #ifdef TARGET_NR_munlock
10676     case TARGET_NR_munlock:
10677         return get_errno(munlock(g2h(cpu, arg1), arg2));
10678 #endif
10679 #ifdef TARGET_NR_mlockall
10680     case TARGET_NR_mlockall:
10681         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10682 #endif
10683 #ifdef TARGET_NR_munlockall
10684     case TARGET_NR_munlockall:
10685         return get_errno(munlockall());
10686 #endif
10687 #ifdef TARGET_NR_truncate
10688     case TARGET_NR_truncate:
10689         if (!(p = lock_user_string(arg1)))
10690             return -TARGET_EFAULT;
10691         ret = get_errno(truncate(p, arg2));
10692         unlock_user(p, arg1, 0);
10693         return ret;
10694 #endif
10695 #ifdef TARGET_NR_ftruncate
10696     case TARGET_NR_ftruncate:
10697         return get_errno(ftruncate(arg1, arg2));
10698 #endif
10699     case TARGET_NR_fchmod:
10700         return get_errno(fchmod(arg1, arg2));
10701 #if defined(TARGET_NR_fchmodat)
10702     case TARGET_NR_fchmodat:
10703         if (!(p = lock_user_string(arg2)))
10704             return -TARGET_EFAULT;
10705         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10706         unlock_user(p, arg2, 0);
10707         return ret;
10708 #endif
10709     case TARGET_NR_getpriority:
10710         /* Note that negative values are valid for getpriority, so we must
10711            differentiate based on errno settings.  */
10712         errno = 0;
10713         ret = getpriority(arg1, arg2);
10714         if (ret == -1 && errno != 0) {
10715             return -host_to_target_errno(errno);
10716         }
10717 #ifdef TARGET_ALPHA
10718         /* Return value is the unbiased priority.  Signal no error.  */
10719         cpu_env->ir[IR_V0] = 0;
10720 #else
10721         /* Return value is a biased priority to avoid negative numbers.  */
10722         ret = 20 - ret;
10723 #endif
10724         return ret;
10725     case TARGET_NR_setpriority:
10726         return get_errno(setpriority(arg1, arg2, arg3));
10727 #ifdef TARGET_NR_statfs
10728     case TARGET_NR_statfs:
10729         if (!(p = lock_user_string(arg1))) {
10730             return -TARGET_EFAULT;
10731         }
10732         ret = get_errno(statfs(path(p), &stfs));
10733         unlock_user(p, arg1, 0);
10734     convert_statfs:
10735         if (!is_error(ret)) {
10736             struct target_statfs *target_stfs;
10737 
10738             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10739                 return -TARGET_EFAULT;
10740             __put_user(stfs.f_type, &target_stfs->f_type);
10741             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10742             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10743             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10744             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10745             __put_user(stfs.f_files, &target_stfs->f_files);
10746             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10747             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10748             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10749             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10750             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10751 #ifdef _STATFS_F_FLAGS
10752             __put_user(stfs.f_flags, &target_stfs->f_flags);
10753 #else
10754             __put_user(0, &target_stfs->f_flags);
10755 #endif
10756             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10757             unlock_user_struct(target_stfs, arg2, 1);
10758         }
10759         return ret;
10760 #endif
10761 #ifdef TARGET_NR_fstatfs
10762     case TARGET_NR_fstatfs:
10763         ret = get_errno(fstatfs(arg1, &stfs));
10764         goto convert_statfs;
10765 #endif
10766 #ifdef TARGET_NR_statfs64
10767     case TARGET_NR_statfs64:
10768         if (!(p = lock_user_string(arg1))) {
10769             return -TARGET_EFAULT;
10770         }
10771         ret = get_errno(statfs(path(p), &stfs));
10772         unlock_user(p, arg1, 0);
10773     convert_statfs64:
10774         if (!is_error(ret)) {
10775             struct target_statfs64 *target_stfs;
10776 
10777             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10778                 return -TARGET_EFAULT;
10779             __put_user(stfs.f_type, &target_stfs->f_type);
10780             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10781             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10782             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10783             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10784             __put_user(stfs.f_files, &target_stfs->f_files);
10785             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10786             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10787             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10788             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10789             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10790 #ifdef _STATFS_F_FLAGS
10791             __put_user(stfs.f_flags, &target_stfs->f_flags);
10792 #else
10793             __put_user(0, &target_stfs->f_flags);
10794 #endif
10795             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10796             unlock_user_struct(target_stfs, arg3, 1);
10797         }
10798         return ret;
10799     case TARGET_NR_fstatfs64:
10800         ret = get_errno(fstatfs(arg1, &stfs));
10801         goto convert_statfs64;
10802 #endif
10803 #ifdef TARGET_NR_socketcall
10804     case TARGET_NR_socketcall:
10805         return do_socketcall(arg1, arg2);
10806 #endif
10807 #ifdef TARGET_NR_accept
10808     case TARGET_NR_accept:
10809         return do_accept4(arg1, arg2, arg3, 0);
10810 #endif
10811 #ifdef TARGET_NR_accept4
10812     case TARGET_NR_accept4:
10813         return do_accept4(arg1, arg2, arg3, arg4);
10814 #endif
10815 #ifdef TARGET_NR_bind
10816     case TARGET_NR_bind:
10817         return do_bind(arg1, arg2, arg3);
10818 #endif
10819 #ifdef TARGET_NR_connect
10820     case TARGET_NR_connect:
10821         return do_connect(arg1, arg2, arg3);
10822 #endif
10823 #ifdef TARGET_NR_getpeername
10824     case TARGET_NR_getpeername:
10825         return do_getpeername(arg1, arg2, arg3);
10826 #endif
10827 #ifdef TARGET_NR_getsockname
10828     case TARGET_NR_getsockname:
10829         return do_getsockname(arg1, arg2, arg3);
10830 #endif
10831 #ifdef TARGET_NR_getsockopt
10832     case TARGET_NR_getsockopt:
10833         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10834 #endif
10835 #ifdef TARGET_NR_listen
10836     case TARGET_NR_listen:
10837         return get_errno(listen(arg1, arg2));
10838 #endif
10839 #ifdef TARGET_NR_recv
10840     case TARGET_NR_recv:
10841         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10842 #endif
10843 #ifdef TARGET_NR_recvfrom
10844     case TARGET_NR_recvfrom:
10845         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10846 #endif
10847 #ifdef TARGET_NR_recvmsg
10848     case TARGET_NR_recvmsg:
10849         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10850 #endif
10851 #ifdef TARGET_NR_send
10852     case TARGET_NR_send:
10853         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10854 #endif
10855 #ifdef TARGET_NR_sendmsg
10856     case TARGET_NR_sendmsg:
10857         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10858 #endif
10859 #ifdef TARGET_NR_sendmmsg
10860     case TARGET_NR_sendmmsg:
10861         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10862 #endif
10863 #ifdef TARGET_NR_recvmmsg
10864     case TARGET_NR_recvmmsg:
10865         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10866 #endif
10867 #ifdef TARGET_NR_sendto
10868     case TARGET_NR_sendto:
10869         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10870 #endif
10871 #ifdef TARGET_NR_shutdown
10872     case TARGET_NR_shutdown:
10873         return get_errno(shutdown(arg1, arg2));
10874 #endif
10875 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10876     case TARGET_NR_getrandom:
10877         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10878         if (!p) {
10879             return -TARGET_EFAULT;
10880         }
10881         ret = get_errno(getrandom(p, arg2, arg3));
10882         unlock_user(p, arg1, ret);
10883         return ret;
10884 #endif
10885 #ifdef TARGET_NR_socket
10886     case TARGET_NR_socket:
10887         return do_socket(arg1, arg2, arg3);
10888 #endif
10889 #ifdef TARGET_NR_socketpair
10890     case TARGET_NR_socketpair:
10891         return do_socketpair(arg1, arg2, arg3, arg4);
10892 #endif
10893 #ifdef TARGET_NR_setsockopt
10894     case TARGET_NR_setsockopt:
10895         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10896 #endif
10897 #if defined(TARGET_NR_syslog)
10898     case TARGET_NR_syslog:
10899         {
10900             int len = arg2;
10901 
10902             switch (arg1) {
10903             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10904             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10905             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10906             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10907             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10908             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10909             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10910             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10911                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10912             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10913             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10914             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10915                 {
10916                     if (len < 0) {
10917                         return -TARGET_EINVAL;
10918                     }
10919                     if (len == 0) {
10920                         return 0;
10921                     }
10922                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10923                     if (!p) {
10924                         return -TARGET_EFAULT;
10925                     }
10926                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10927                     unlock_user(p, arg2, arg3);
10928                 }
10929                 return ret;
10930             default:
10931                 return -TARGET_EINVAL;
10932             }
10933         }
10934         break;
10935 #endif
10936     case TARGET_NR_setitimer:
10937         {
10938             struct itimerval value, ovalue, *pvalue;
10939 
10940             if (arg2) {
10941                 pvalue = &value;
10942                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10943                     || copy_from_user_timeval(&pvalue->it_value,
10944                                               arg2 + sizeof(struct target_timeval)))
10945                     return -TARGET_EFAULT;
10946             } else {
10947                 pvalue = NULL;
10948             }
10949             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10950             if (!is_error(ret) && arg3) {
10951                 if (copy_to_user_timeval(arg3,
10952                                          &ovalue.it_interval)
10953                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10954                                             &ovalue.it_value))
10955                     return -TARGET_EFAULT;
10956             }
10957         }
10958         return ret;
10959     case TARGET_NR_getitimer:
10960         {
10961             struct itimerval value;
10962 
10963             ret = get_errno(getitimer(arg1, &value));
10964             if (!is_error(ret) && arg2) {
10965                 if (copy_to_user_timeval(arg2,
10966                                          &value.it_interval)
10967                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10968                                             &value.it_value))
10969                     return -TARGET_EFAULT;
10970             }
10971         }
10972         return ret;
10973 #ifdef TARGET_NR_stat
10974     case TARGET_NR_stat:
10975         if (!(p = lock_user_string(arg1))) {
10976             return -TARGET_EFAULT;
10977         }
10978         ret = get_errno(stat(path(p), &st));
10979         unlock_user(p, arg1, 0);
10980         goto do_stat;
10981 #endif
10982 #ifdef TARGET_NR_lstat
10983     case TARGET_NR_lstat:
10984         if (!(p = lock_user_string(arg1))) {
10985             return -TARGET_EFAULT;
10986         }
10987         ret = get_errno(lstat(path(p), &st));
10988         unlock_user(p, arg1, 0);
10989         goto do_stat;
10990 #endif
10991 #ifdef TARGET_NR_fstat
10992     case TARGET_NR_fstat:
10993         {
10994             ret = get_errno(fstat(arg1, &st));
10995 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10996         do_stat:
10997 #endif
10998             if (!is_error(ret)) {
10999                 struct target_stat *target_st;
11000 
11001                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
11002                     return -TARGET_EFAULT;
11003                 memset(target_st, 0, sizeof(*target_st));
11004                 __put_user(st.st_dev, &target_st->st_dev);
11005                 __put_user(st.st_ino, &target_st->st_ino);
11006                 __put_user(st.st_mode, &target_st->st_mode);
11007                 __put_user(st.st_uid, &target_st->st_uid);
11008                 __put_user(st.st_gid, &target_st->st_gid);
11009                 __put_user(st.st_nlink, &target_st->st_nlink);
11010                 __put_user(st.st_rdev, &target_st->st_rdev);
11011                 __put_user(st.st_size, &target_st->st_size);
11012                 __put_user(st.st_blksize, &target_st->st_blksize);
11013                 __put_user(st.st_blocks, &target_st->st_blocks);
11014                 __put_user(st.st_atime, &target_st->target_st_atime);
11015                 __put_user(st.st_mtime, &target_st->target_st_mtime);
11016                 __put_user(st.st_ctime, &target_st->target_st_ctime);
11017 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
11018                 __put_user(st.st_atim.tv_nsec,
11019                            &target_st->target_st_atime_nsec);
11020                 __put_user(st.st_mtim.tv_nsec,
11021                            &target_st->target_st_mtime_nsec);
11022                 __put_user(st.st_ctim.tv_nsec,
11023                            &target_st->target_st_ctime_nsec);
11024 #endif
11025                 unlock_user_struct(target_st, arg2, 1);
11026             }
11027         }
11028         return ret;
11029 #endif
11030     case TARGET_NR_vhangup:
11031         return get_errno(vhangup());
11032 #ifdef TARGET_NR_syscall
11033     case TARGET_NR_syscall:
11034         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
11035                           arg6, arg7, arg8, 0);
11036 #endif
11037 #if defined(TARGET_NR_wait4)
11038     case TARGET_NR_wait4:
11039         {
11040             int status;
11041             abi_long status_ptr = arg2;
11042             struct rusage rusage, *rusage_ptr;
11043             abi_ulong target_rusage = arg4;
11044             abi_long rusage_err;
11045             if (target_rusage)
11046                 rusage_ptr = &rusage;
11047             else
11048                 rusage_ptr = NULL;
11049             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
11050             if (!is_error(ret)) {
11051                 if (status_ptr && ret) {
11052                     status = host_to_target_waitstatus(status);
11053                     if (put_user_s32(status, status_ptr))
11054                         return -TARGET_EFAULT;
11055                 }
11056                 if (target_rusage) {
11057                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
11058                     if (rusage_err) {
11059                         ret = rusage_err;
11060                     }
11061                 }
11062             }
11063         }
11064         return ret;
11065 #endif
11066 #ifdef TARGET_NR_swapoff
11067     case TARGET_NR_swapoff:
11068         if (!(p = lock_user_string(arg1)))
11069             return -TARGET_EFAULT;
11070         ret = get_errno(swapoff(p));
11071         unlock_user(p, arg1, 0);
11072         return ret;
11073 #endif
11074     case TARGET_NR_sysinfo:
11075         {
11076             struct target_sysinfo *target_value;
11077             struct sysinfo value;
11078             ret = get_errno(sysinfo(&value));
11079             if (!is_error(ret) && arg1)
11080             {
11081                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
11082                     return -TARGET_EFAULT;
11083                 __put_user(value.uptime, &target_value->uptime);
11084                 __put_user(value.loads[0], &target_value->loads[0]);
11085                 __put_user(value.loads[1], &target_value->loads[1]);
11086                 __put_user(value.loads[2], &target_value->loads[2]);
11087                 __put_user(value.totalram, &target_value->totalram);
11088                 __put_user(value.freeram, &target_value->freeram);
11089                 __put_user(value.sharedram, &target_value->sharedram);
11090                 __put_user(value.bufferram, &target_value->bufferram);
11091                 __put_user(value.totalswap, &target_value->totalswap);
11092                 __put_user(value.freeswap, &target_value->freeswap);
11093                 __put_user(value.procs, &target_value->procs);
11094                 __put_user(value.totalhigh, &target_value->totalhigh);
11095                 __put_user(value.freehigh, &target_value->freehigh);
11096                 __put_user(value.mem_unit, &target_value->mem_unit);
11097                 unlock_user_struct(target_value, arg1, 1);
11098             }
11099         }
11100         return ret;
11101 #ifdef TARGET_NR_ipc
11102     case TARGET_NR_ipc:
11103         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
11104 #endif
11105 #ifdef TARGET_NR_semget
11106     case TARGET_NR_semget:
11107         return get_errno(semget(arg1, arg2, arg3));
11108 #endif
11109 #ifdef TARGET_NR_semop
11110     case TARGET_NR_semop:
11111         return do_semtimedop(arg1, arg2, arg3, 0, false);
11112 #endif
11113 #ifdef TARGET_NR_semtimedop
11114     case TARGET_NR_semtimedop:
11115         return do_semtimedop(arg1, arg2, arg3, arg4, false);
11116 #endif
11117 #ifdef TARGET_NR_semtimedop_time64
11118     case TARGET_NR_semtimedop_time64:
11119         return do_semtimedop(arg1, arg2, arg3, arg4, true);
11120 #endif
11121 #ifdef TARGET_NR_semctl
11122     case TARGET_NR_semctl:
11123         return do_semctl(arg1, arg2, arg3, arg4);
11124 #endif
11125 #ifdef TARGET_NR_msgctl
11126     case TARGET_NR_msgctl:
11127         return do_msgctl(arg1, arg2, arg3);
11128 #endif
11129 #ifdef TARGET_NR_msgget
11130     case TARGET_NR_msgget:
11131         return get_errno(msgget(arg1, arg2));
11132 #endif
11133 #ifdef TARGET_NR_msgrcv
11134     case TARGET_NR_msgrcv:
11135         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
11136 #endif
11137 #ifdef TARGET_NR_msgsnd
11138     case TARGET_NR_msgsnd:
11139         return do_msgsnd(arg1, arg2, arg3, arg4);
11140 #endif
11141 #ifdef TARGET_NR_shmget
11142     case TARGET_NR_shmget:
11143         return get_errno(shmget(arg1, arg2, arg3));
11144 #endif
11145 #ifdef TARGET_NR_shmctl
11146     case TARGET_NR_shmctl:
11147         return do_shmctl(arg1, arg2, arg3);
11148 #endif
11149 #ifdef TARGET_NR_shmat
11150     case TARGET_NR_shmat:
11151         return target_shmat(cpu_env, arg1, arg2, arg3);
11152 #endif
11153 #ifdef TARGET_NR_shmdt
11154     case TARGET_NR_shmdt:
11155         return target_shmdt(arg1);
11156 #endif
11157     case TARGET_NR_fsync:
11158         return get_errno(fsync(arg1));
11159     case TARGET_NR_clone:
11160         /* Linux manages to have three different orderings for its
11161          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
11162          * match the kernel's CONFIG_CLONE_* settings.
11163          * Microblaze is further special in that it uses a sixth
11164          * implicit argument to clone for the TLS pointer.
11165          */
11166 #if defined(TARGET_MICROBLAZE)
11167         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
11168 #elif defined(TARGET_CLONE_BACKWARDS)
11169         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
11170 #elif defined(TARGET_CLONE_BACKWARDS2)
11171         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
11172 #else
11173         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
11174 #endif
11175         return ret;
11176 #ifdef __NR_exit_group
11177         /* new thread calls */
11178     case TARGET_NR_exit_group:
11179         preexit_cleanup(cpu_env, arg1);
11180         return get_errno(exit_group(arg1));
11181 #endif
11182     case TARGET_NR_setdomainname:
11183         if (!(p = lock_user_string(arg1)))
11184             return -TARGET_EFAULT;
11185         ret = get_errno(setdomainname(p, arg2));
11186         unlock_user(p, arg1, 0);
11187         return ret;
11188     case TARGET_NR_uname:
11189         /* no need to transcode because we use the linux syscall */
11190         {
11191             struct new_utsname * buf;
11192 
11193             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
11194                 return -TARGET_EFAULT;
11195             ret = get_errno(sys_uname(buf));
11196             if (!is_error(ret)) {
11197                 /* Overwrite the native machine name with whatever is being
11198                    emulated. */
11199                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
11200                           sizeof(buf->machine));
11201                 /* Allow the user to override the reported release.  */
11202                 if (qemu_uname_release && *qemu_uname_release) {
11203                     g_strlcpy(buf->release, qemu_uname_release,
11204                               sizeof(buf->release));
11205                 }
11206             }
11207             unlock_user_struct(buf, arg1, 1);
11208         }
11209         return ret;
11210 #ifdef TARGET_I386
11211     case TARGET_NR_modify_ldt:
11212         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
11213 #if !defined(TARGET_X86_64)
11214     case TARGET_NR_vm86:
11215         return do_vm86(cpu_env, arg1, arg2);
11216 #endif
11217 #endif
11218 #if defined(TARGET_NR_adjtimex)
11219     case TARGET_NR_adjtimex:
11220         {
11221             struct timex host_buf;
11222 
11223             if (target_to_host_timex(&host_buf, arg1) != 0) {
11224                 return -TARGET_EFAULT;
11225             }
11226             ret = get_errno(adjtimex(&host_buf));
11227             if (!is_error(ret)) {
11228                 if (host_to_target_timex(arg1, &host_buf) != 0) {
11229                     return -TARGET_EFAULT;
11230                 }
11231             }
11232         }
11233         return ret;
11234 #endif
11235 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
11236     case TARGET_NR_clock_adjtime:
11237         {
11238             struct timex htx;
11239 
11240             if (target_to_host_timex(&htx, arg2) != 0) {
11241                 return -TARGET_EFAULT;
11242             }
11243             ret = get_errno(clock_adjtime(arg1, &htx));
11244             if (!is_error(ret) && host_to_target_timex(arg2, &htx)) {
11245                 return -TARGET_EFAULT;
11246             }
11247         }
11248         return ret;
11249 #endif
11250 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
11251     case TARGET_NR_clock_adjtime64:
11252         {
11253             struct timex htx;
11254 
11255             if (target_to_host_timex64(&htx, arg2) != 0) {
11256                 return -TARGET_EFAULT;
11257             }
11258             ret = get_errno(clock_adjtime(arg1, &htx));
11259             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
11260                     return -TARGET_EFAULT;
11261             }
11262         }
11263         return ret;
11264 #endif
11265     case TARGET_NR_getpgid:
11266         return get_errno(getpgid(arg1));
11267     case TARGET_NR_fchdir:
11268         return get_errno(fchdir(arg1));
11269     case TARGET_NR_personality:
11270         return get_errno(personality(arg1));
11271 #ifdef TARGET_NR__llseek /* Not on alpha */
11272     case TARGET_NR__llseek:
11273         {
11274             int64_t res;
11275 #if !defined(__NR_llseek)
11276             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
11277             if (res == -1) {
11278                 ret = get_errno(res);
11279             } else {
11280                 ret = 0;
11281             }
11282 #else
11283             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
11284 #endif
11285             if ((ret == 0) && put_user_s64(res, arg4)) {
11286                 return -TARGET_EFAULT;
11287             }
11288         }
11289         return ret;
11290 #endif
11291 #ifdef TARGET_NR_getdents
11292     case TARGET_NR_getdents:
11293         return do_getdents(arg1, arg2, arg3);
11294 #endif /* TARGET_NR_getdents */
11295 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
11296     case TARGET_NR_getdents64:
11297         return do_getdents64(arg1, arg2, arg3);
11298 #endif /* TARGET_NR_getdents64 */
11299 #if defined(TARGET_NR__newselect)
11300     case TARGET_NR__newselect:
11301         return do_select(arg1, arg2, arg3, arg4, arg5);
11302 #endif
11303 #ifdef TARGET_NR_poll
11304     case TARGET_NR_poll:
11305         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
11306 #endif
11307 #ifdef TARGET_NR_ppoll
11308     case TARGET_NR_ppoll:
11309         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
11310 #endif
11311 #ifdef TARGET_NR_ppoll_time64
11312     case TARGET_NR_ppoll_time64:
11313         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
11314 #endif
11315     case TARGET_NR_flock:
11316         /* NOTE: the flock constant seems to be the same for every
11317            Linux platform */
11318         return get_errno(safe_flock(arg1, arg2));
11319     case TARGET_NR_readv:
11320         {
11321             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11322             if (vec != NULL) {
11323                 ret = get_errno(safe_readv(arg1, vec, arg3));
11324                 unlock_iovec(vec, arg2, arg3, 1);
11325             } else {
11326                 ret = -host_to_target_errno(errno);
11327             }
11328         }
11329         return ret;
11330     case TARGET_NR_writev:
11331         {
11332             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11333             if (vec != NULL) {
11334                 ret = get_errno(safe_writev(arg1, vec, arg3));
11335                 unlock_iovec(vec, arg2, arg3, 0);
11336             } else {
11337                 ret = -host_to_target_errno(errno);
11338             }
11339         }
11340         return ret;
11341 #if defined(TARGET_NR_preadv)
11342     case TARGET_NR_preadv:
11343         {
11344             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11345             if (vec != NULL) {
11346                 unsigned long low, high;
11347 
11348                 target_to_host_low_high(arg4, arg5, &low, &high);
11349                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
11350                 unlock_iovec(vec, arg2, arg3, 1);
11351             } else {
11352                 ret = -host_to_target_errno(errno);
11353            }
11354         }
11355         return ret;
11356 #endif
11357 #if defined(TARGET_NR_pwritev)
11358     case TARGET_NR_pwritev:
11359         {
11360             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11361             if (vec != NULL) {
11362                 unsigned long low, high;
11363 
11364                 target_to_host_low_high(arg4, arg5, &low, &high);
11365                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
11366                 unlock_iovec(vec, arg2, arg3, 0);
11367             } else {
11368                 ret = -host_to_target_errno(errno);
11369            }
11370         }
11371         return ret;
11372 #endif
11373     case TARGET_NR_getsid:
11374         return get_errno(getsid(arg1));
11375 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
11376     case TARGET_NR_fdatasync:
11377         return get_errno(fdatasync(arg1));
11378 #endif
11379     case TARGET_NR_sched_getaffinity:
11380         {
11381             unsigned int mask_size;
11382             unsigned long *mask;
11383 
11384             /*
11385              * sched_getaffinity needs multiples of ulong, so need to take
11386              * care of mismatches between target ulong and host ulong sizes.
11387              */
11388             if (arg2 & (sizeof(abi_ulong) - 1)) {
11389                 return -TARGET_EINVAL;
11390             }
11391             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11392 
11393             mask = alloca(mask_size);
11394             memset(mask, 0, mask_size);
11395             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
11396 
11397             if (!is_error(ret)) {
11398                 if (ret > arg2) {
11399                     /* More data returned than the caller's buffer will fit.
11400                      * This only happens if sizeof(abi_long) < sizeof(long)
11401                      * and the caller passed us a buffer holding an odd number
11402                      * of abi_longs. If the host kernel is actually using the
11403                      * extra 4 bytes then fail EINVAL; otherwise we can just
11404                      * ignore them and only copy the interesting part.
11405                      */
11406                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
11407                     if (numcpus > arg2 * 8) {
11408                         return -TARGET_EINVAL;
11409                     }
11410                     ret = arg2;
11411                 }
11412 
11413                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
11414                     return -TARGET_EFAULT;
11415                 }
11416             }
11417         }
11418         return ret;
11419     case TARGET_NR_sched_setaffinity:
11420         {
11421             unsigned int mask_size;
11422             unsigned long *mask;
11423 
11424             /*
11425              * sched_setaffinity needs multiples of ulong, so need to take
11426              * care of mismatches between target ulong and host ulong sizes.
11427              */
11428             if (arg2 & (sizeof(abi_ulong) - 1)) {
11429                 return -TARGET_EINVAL;
11430             }
11431             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11432             mask = alloca(mask_size);
11433 
11434             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
11435             if (ret) {
11436                 return ret;
11437             }
11438 
11439             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
11440         }
11441     case TARGET_NR_getcpu:
11442         {
11443             unsigned cpuid, node;
11444             ret = get_errno(sys_getcpu(arg1 ? &cpuid : NULL,
11445                                        arg2 ? &node : NULL,
11446                                        NULL));
11447             if (is_error(ret)) {
11448                 return ret;
11449             }
11450             if (arg1 && put_user_u32(cpuid, arg1)) {
11451                 return -TARGET_EFAULT;
11452             }
11453             if (arg2 && put_user_u32(node, arg2)) {
11454                 return -TARGET_EFAULT;
11455             }
11456         }
11457         return ret;
11458     case TARGET_NR_sched_setparam:
11459         {
11460             struct target_sched_param *target_schp;
11461             struct sched_param schp;
11462 
11463             if (arg2 == 0) {
11464                 return -TARGET_EINVAL;
11465             }
11466             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
11467                 return -TARGET_EFAULT;
11468             }
11469             schp.sched_priority = tswap32(target_schp->sched_priority);
11470             unlock_user_struct(target_schp, arg2, 0);
11471             return get_errno(sys_sched_setparam(arg1, &schp));
11472         }
11473     case TARGET_NR_sched_getparam:
11474         {
11475             struct target_sched_param *target_schp;
11476             struct sched_param schp;
11477 
11478             if (arg2 == 0) {
11479                 return -TARGET_EINVAL;
11480             }
11481             ret = get_errno(sys_sched_getparam(arg1, &schp));
11482             if (!is_error(ret)) {
11483                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
11484                     return -TARGET_EFAULT;
11485                 }
11486                 target_schp->sched_priority = tswap32(schp.sched_priority);
11487                 unlock_user_struct(target_schp, arg2, 1);
11488             }
11489         }
11490         return ret;
11491     case TARGET_NR_sched_setscheduler:
11492         {
11493             struct target_sched_param *target_schp;
11494             struct sched_param schp;
11495             if (arg3 == 0) {
11496                 return -TARGET_EINVAL;
11497             }
11498             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
11499                 return -TARGET_EFAULT;
11500             }
11501             schp.sched_priority = tswap32(target_schp->sched_priority);
11502             unlock_user_struct(target_schp, arg3, 0);
11503             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
11504         }
11505     case TARGET_NR_sched_getscheduler:
11506         return get_errno(sys_sched_getscheduler(arg1));
11507     case TARGET_NR_sched_getattr:
11508         {
11509             struct target_sched_attr *target_scha;
11510             struct sched_attr scha;
11511             if (arg2 == 0) {
11512                 return -TARGET_EINVAL;
11513             }
11514             if (arg3 > sizeof(scha)) {
11515                 arg3 = sizeof(scha);
11516             }
11517             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
11518             if (!is_error(ret)) {
11519                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11520                 if (!target_scha) {
11521                     return -TARGET_EFAULT;
11522                 }
11523                 target_scha->size = tswap32(scha.size);
11524                 target_scha->sched_policy = tswap32(scha.sched_policy);
11525                 target_scha->sched_flags = tswap64(scha.sched_flags);
11526                 target_scha->sched_nice = tswap32(scha.sched_nice);
11527                 target_scha->sched_priority = tswap32(scha.sched_priority);
11528                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
11529                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
11530                 target_scha->sched_period = tswap64(scha.sched_period);
11531                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
11532                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
11533                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
11534                 }
11535                 unlock_user(target_scha, arg2, arg3);
11536             }
11537             return ret;
11538         }
11539     case TARGET_NR_sched_setattr:
11540         {
11541             struct target_sched_attr *target_scha;
11542             struct sched_attr scha;
11543             uint32_t size;
11544             int zeroed;
11545             if (arg2 == 0) {
11546                 return -TARGET_EINVAL;
11547             }
11548             if (get_user_u32(size, arg2)) {
11549                 return -TARGET_EFAULT;
11550             }
11551             if (!size) {
11552                 size = offsetof(struct target_sched_attr, sched_util_min);
11553             }
11554             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11555                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11556                     return -TARGET_EFAULT;
11557                 }
11558                 return -TARGET_E2BIG;
11559             }
11560 
11561             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11562             if (zeroed < 0) {
11563                 return zeroed;
11564             } else if (zeroed == 0) {
11565                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11566                     return -TARGET_EFAULT;
11567                 }
11568                 return -TARGET_E2BIG;
11569             }
11570             if (size > sizeof(struct target_sched_attr)) {
11571                 size = sizeof(struct target_sched_attr);
11572             }
11573 
11574             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11575             if (!target_scha) {
11576                 return -TARGET_EFAULT;
11577             }
11578             scha.size = size;
11579             scha.sched_policy = tswap32(target_scha->sched_policy);
11580             scha.sched_flags = tswap64(target_scha->sched_flags);
11581             scha.sched_nice = tswap32(target_scha->sched_nice);
11582             scha.sched_priority = tswap32(target_scha->sched_priority);
11583             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11584             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11585             scha.sched_period = tswap64(target_scha->sched_period);
11586             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11587                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11588                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11589             }
11590             unlock_user(target_scha, arg2, 0);
11591             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11592         }
11593     case TARGET_NR_sched_yield:
11594         return get_errno(sched_yield());
11595     case TARGET_NR_sched_get_priority_max:
11596         return get_errno(sched_get_priority_max(arg1));
11597     case TARGET_NR_sched_get_priority_min:
11598         return get_errno(sched_get_priority_min(arg1));
11599 #ifdef TARGET_NR_sched_rr_get_interval
11600     case TARGET_NR_sched_rr_get_interval:
11601         {
11602             struct timespec ts;
11603             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11604             if (!is_error(ret)) {
11605                 ret = host_to_target_timespec(arg2, &ts);
11606             }
11607         }
11608         return ret;
11609 #endif
11610 #ifdef TARGET_NR_sched_rr_get_interval_time64
11611     case TARGET_NR_sched_rr_get_interval_time64:
11612         {
11613             struct timespec ts;
11614             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11615             if (!is_error(ret)) {
11616                 ret = host_to_target_timespec64(arg2, &ts);
11617             }
11618         }
11619         return ret;
11620 #endif
11621 #if defined(TARGET_NR_nanosleep)
11622     case TARGET_NR_nanosleep:
11623         {
11624             struct timespec req, rem;
11625             target_to_host_timespec(&req, arg1);
11626             ret = get_errno(safe_nanosleep(&req, &rem));
11627             if (is_error(ret) && arg2) {
11628                 host_to_target_timespec(arg2, &rem);
11629             }
11630         }
11631         return ret;
11632 #endif
11633     case TARGET_NR_prctl:
11634         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11635         break;
11636 #ifdef TARGET_NR_arch_prctl
11637     case TARGET_NR_arch_prctl:
11638         return do_arch_prctl(cpu_env, arg1, arg2);
11639 #endif
11640 #ifdef TARGET_NR_pread64
11641     case TARGET_NR_pread64:
11642         if (regpairs_aligned(cpu_env, num)) {
11643             arg4 = arg5;
11644             arg5 = arg6;
11645         }
11646         if (arg2 == 0 && arg3 == 0) {
11647             /* Special-case NULL buffer and zero length, which should succeed */
11648             p = 0;
11649         } else {
11650             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11651             if (!p) {
11652                 return -TARGET_EFAULT;
11653             }
11654         }
11655         ret = get_errno(pread(arg1, p, arg3, target_offset64(arg4, arg5)));
11656         unlock_user(p, arg2, ret);
11657         return ret;
11658     case TARGET_NR_pwrite64:
11659         if (regpairs_aligned(cpu_env, num)) {
11660             arg4 = arg5;
11661             arg5 = arg6;
11662         }
11663         if (arg2 == 0 && arg3 == 0) {
11664             /* Special-case NULL buffer and zero length, which should succeed */
11665             p = 0;
11666         } else {
11667             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11668             if (!p) {
11669                 return -TARGET_EFAULT;
11670             }
11671         }
11672         ret = get_errno(pwrite(arg1, p, arg3, target_offset64(arg4, arg5)));
11673         unlock_user(p, arg2, 0);
11674         return ret;
11675 #endif
11676     case TARGET_NR_getcwd:
11677         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11678             return -TARGET_EFAULT;
11679         ret = get_errno(sys_getcwd1(p, arg2));
11680         unlock_user(p, arg1, ret);
11681         return ret;
11682     case TARGET_NR_capget:
11683     case TARGET_NR_capset:
11684     {
11685         struct target_user_cap_header *target_header;
11686         struct target_user_cap_data *target_data = NULL;
11687         struct __user_cap_header_struct header;
11688         struct __user_cap_data_struct data[2];
11689         struct __user_cap_data_struct *dataptr = NULL;
11690         int i, target_datalen;
11691         int data_items = 1;
11692 
11693         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11694             return -TARGET_EFAULT;
11695         }
11696         header.version = tswap32(target_header->version);
11697         header.pid = tswap32(target_header->pid);
11698 
11699         if (header.version != _LINUX_CAPABILITY_VERSION) {
11700             /* Version 2 and up takes pointer to two user_data structs */
11701             data_items = 2;
11702         }
11703 
11704         target_datalen = sizeof(*target_data) * data_items;
11705 
11706         if (arg2) {
11707             if (num == TARGET_NR_capget) {
11708                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11709             } else {
11710                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11711             }
11712             if (!target_data) {
11713                 unlock_user_struct(target_header, arg1, 0);
11714                 return -TARGET_EFAULT;
11715             }
11716 
11717             if (num == TARGET_NR_capset) {
11718                 for (i = 0; i < data_items; i++) {
11719                     data[i].effective = tswap32(target_data[i].effective);
11720                     data[i].permitted = tswap32(target_data[i].permitted);
11721                     data[i].inheritable = tswap32(target_data[i].inheritable);
11722                 }
11723             }
11724 
11725             dataptr = data;
11726         }
11727 
11728         if (num == TARGET_NR_capget) {
11729             ret = get_errno(capget(&header, dataptr));
11730         } else {
11731             ret = get_errno(capset(&header, dataptr));
11732         }
11733 
11734         /* The kernel always updates version for both capget and capset */
11735         target_header->version = tswap32(header.version);
11736         unlock_user_struct(target_header, arg1, 1);
11737 
11738         if (arg2) {
11739             if (num == TARGET_NR_capget) {
11740                 for (i = 0; i < data_items; i++) {
11741                     target_data[i].effective = tswap32(data[i].effective);
11742                     target_data[i].permitted = tswap32(data[i].permitted);
11743                     target_data[i].inheritable = tswap32(data[i].inheritable);
11744                 }
11745                 unlock_user(target_data, arg2, target_datalen);
11746             } else {
11747                 unlock_user(target_data, arg2, 0);
11748             }
11749         }
11750         return ret;
11751     }
11752     case TARGET_NR_sigaltstack:
11753         return do_sigaltstack(arg1, arg2, cpu_env);
11754 
11755 #ifdef CONFIG_SENDFILE
11756 #ifdef TARGET_NR_sendfile
11757     case TARGET_NR_sendfile:
11758     {
11759         off_t *offp = NULL;
11760         off_t off;
11761         if (arg3) {
11762             ret = get_user_sal(off, arg3);
11763             if (is_error(ret)) {
11764                 return ret;
11765             }
11766             offp = &off;
11767         }
11768         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11769         if (!is_error(ret) && arg3) {
11770             abi_long ret2 = put_user_sal(off, arg3);
11771             if (is_error(ret2)) {
11772                 ret = ret2;
11773             }
11774         }
11775         return ret;
11776     }
11777 #endif
11778 #ifdef TARGET_NR_sendfile64
11779     case TARGET_NR_sendfile64:
11780     {
11781         off_t *offp = NULL;
11782         off_t off;
11783         if (arg3) {
11784             ret = get_user_s64(off, arg3);
11785             if (is_error(ret)) {
11786                 return ret;
11787             }
11788             offp = &off;
11789         }
11790         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11791         if (!is_error(ret) && arg3) {
11792             abi_long ret2 = put_user_s64(off, arg3);
11793             if (is_error(ret2)) {
11794                 ret = ret2;
11795             }
11796         }
11797         return ret;
11798     }
11799 #endif
11800 #endif
11801 #ifdef TARGET_NR_vfork
11802     case TARGET_NR_vfork:
11803         return get_errno(do_fork(cpu_env,
11804                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11805                          0, 0, 0, 0));
11806 #endif
11807 #ifdef TARGET_NR_ugetrlimit
11808     case TARGET_NR_ugetrlimit:
11809     {
11810 	struct rlimit rlim;
11811 	int resource = target_to_host_resource(arg1);
11812 	ret = get_errno(getrlimit(resource, &rlim));
11813 	if (!is_error(ret)) {
11814 	    struct target_rlimit *target_rlim;
11815             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11816                 return -TARGET_EFAULT;
11817 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11818 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11819             unlock_user_struct(target_rlim, arg2, 1);
11820 	}
11821         return ret;
11822     }
11823 #endif
11824 #ifdef TARGET_NR_truncate64
11825     case TARGET_NR_truncate64:
11826         if (!(p = lock_user_string(arg1)))
11827             return -TARGET_EFAULT;
11828 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11829         unlock_user(p, arg1, 0);
11830         return ret;
11831 #endif
11832 #ifdef TARGET_NR_ftruncate64
11833     case TARGET_NR_ftruncate64:
11834         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11835 #endif
11836 #ifdef TARGET_NR_stat64
11837     case TARGET_NR_stat64:
11838         if (!(p = lock_user_string(arg1))) {
11839             return -TARGET_EFAULT;
11840         }
11841         ret = get_errno(stat(path(p), &st));
11842         unlock_user(p, arg1, 0);
11843         if (!is_error(ret))
11844             ret = host_to_target_stat64(cpu_env, arg2, &st);
11845         return ret;
11846 #endif
11847 #ifdef TARGET_NR_lstat64
11848     case TARGET_NR_lstat64:
11849         if (!(p = lock_user_string(arg1))) {
11850             return -TARGET_EFAULT;
11851         }
11852         ret = get_errno(lstat(path(p), &st));
11853         unlock_user(p, arg1, 0);
11854         if (!is_error(ret))
11855             ret = host_to_target_stat64(cpu_env, arg2, &st);
11856         return ret;
11857 #endif
11858 #ifdef TARGET_NR_fstat64
11859     case TARGET_NR_fstat64:
11860         ret = get_errno(fstat(arg1, &st));
11861         if (!is_error(ret))
11862             ret = host_to_target_stat64(cpu_env, arg2, &st);
11863         return ret;
11864 #endif
11865 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11866 #ifdef TARGET_NR_fstatat64
11867     case TARGET_NR_fstatat64:
11868 #endif
11869 #ifdef TARGET_NR_newfstatat
11870     case TARGET_NR_newfstatat:
11871 #endif
11872         if (!(p = lock_user_string(arg2))) {
11873             return -TARGET_EFAULT;
11874         }
11875         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11876         unlock_user(p, arg2, 0);
11877         if (!is_error(ret))
11878             ret = host_to_target_stat64(cpu_env, arg3, &st);
11879         return ret;
11880 #endif
11881 #if defined(TARGET_NR_statx)
11882     case TARGET_NR_statx:
11883         {
11884             struct target_statx *target_stx;
11885             int dirfd = arg1;
11886             int flags = arg3;
11887 
11888             p = lock_user_string(arg2);
11889             if (p == NULL) {
11890                 return -TARGET_EFAULT;
11891             }
11892 #if defined(__NR_statx)
11893             {
11894                 /*
11895                  * It is assumed that struct statx is architecture independent.
11896                  */
11897                 struct target_statx host_stx;
11898                 int mask = arg4;
11899 
11900                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11901                 if (!is_error(ret)) {
11902                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11903                         unlock_user(p, arg2, 0);
11904                         return -TARGET_EFAULT;
11905                     }
11906                 }
11907 
11908                 if (ret != -TARGET_ENOSYS) {
11909                     unlock_user(p, arg2, 0);
11910                     return ret;
11911                 }
11912             }
11913 #endif
11914             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11915             unlock_user(p, arg2, 0);
11916 
11917             if (!is_error(ret)) {
11918                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11919                     return -TARGET_EFAULT;
11920                 }
11921                 memset(target_stx, 0, sizeof(*target_stx));
11922                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11923                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11924                 __put_user(st.st_ino, &target_stx->stx_ino);
11925                 __put_user(st.st_mode, &target_stx->stx_mode);
11926                 __put_user(st.st_uid, &target_stx->stx_uid);
11927                 __put_user(st.st_gid, &target_stx->stx_gid);
11928                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11929                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11930                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11931                 __put_user(st.st_size, &target_stx->stx_size);
11932                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11933                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11934                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11935                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11936                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11937                 unlock_user_struct(target_stx, arg5, 1);
11938             }
11939         }
11940         return ret;
11941 #endif
11942 #ifdef TARGET_NR_lchown
11943     case TARGET_NR_lchown:
11944         if (!(p = lock_user_string(arg1)))
11945             return -TARGET_EFAULT;
11946         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11947         unlock_user(p, arg1, 0);
11948         return ret;
11949 #endif
11950 #ifdef TARGET_NR_getuid
11951     case TARGET_NR_getuid:
11952         return get_errno(high2lowuid(getuid()));
11953 #endif
11954 #ifdef TARGET_NR_getgid
11955     case TARGET_NR_getgid:
11956         return get_errno(high2lowgid(getgid()));
11957 #endif
11958 #ifdef TARGET_NR_geteuid
11959     case TARGET_NR_geteuid:
11960         return get_errno(high2lowuid(geteuid()));
11961 #endif
11962 #ifdef TARGET_NR_getegid
11963     case TARGET_NR_getegid:
11964         return get_errno(high2lowgid(getegid()));
11965 #endif
11966     case TARGET_NR_setreuid:
11967         return get_errno(sys_setreuid(low2highuid(arg1), low2highuid(arg2)));
11968     case TARGET_NR_setregid:
11969         return get_errno(sys_setregid(low2highgid(arg1), low2highgid(arg2)));
11970     case TARGET_NR_getgroups:
11971         { /* the same code as for TARGET_NR_getgroups32 */
11972             int gidsetsize = arg1;
11973             target_id *target_grouplist;
11974             g_autofree gid_t *grouplist = NULL;
11975             int i;
11976 
11977             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11978                 return -TARGET_EINVAL;
11979             }
11980             if (gidsetsize > 0) {
11981                 grouplist = g_try_new(gid_t, gidsetsize);
11982                 if (!grouplist) {
11983                     return -TARGET_ENOMEM;
11984                 }
11985             }
11986             ret = get_errno(getgroups(gidsetsize, grouplist));
11987             if (!is_error(ret) && gidsetsize > 0) {
11988                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
11989                                              gidsetsize * sizeof(target_id), 0);
11990                 if (!target_grouplist) {
11991                     return -TARGET_EFAULT;
11992                 }
11993                 for (i = 0; i < ret; i++) {
11994                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11995                 }
11996                 unlock_user(target_grouplist, arg2,
11997                             gidsetsize * sizeof(target_id));
11998             }
11999             return ret;
12000         }
12001     case TARGET_NR_setgroups:
12002         { /* the same code as for TARGET_NR_setgroups32 */
12003             int gidsetsize = arg1;
12004             target_id *target_grouplist;
12005             g_autofree gid_t *grouplist = NULL;
12006             int i;
12007 
12008             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12009                 return -TARGET_EINVAL;
12010             }
12011             if (gidsetsize > 0) {
12012                 grouplist = g_try_new(gid_t, gidsetsize);
12013                 if (!grouplist) {
12014                     return -TARGET_ENOMEM;
12015                 }
12016                 target_grouplist = lock_user(VERIFY_READ, arg2,
12017                                              gidsetsize * sizeof(target_id), 1);
12018                 if (!target_grouplist) {
12019                     return -TARGET_EFAULT;
12020                 }
12021                 for (i = 0; i < gidsetsize; i++) {
12022                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
12023                 }
12024                 unlock_user(target_grouplist, arg2,
12025                             gidsetsize * sizeof(target_id));
12026             }
12027             return get_errno(sys_setgroups(gidsetsize, grouplist));
12028         }
12029     case TARGET_NR_fchown:
12030         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
12031 #if defined(TARGET_NR_fchownat)
12032     case TARGET_NR_fchownat:
12033         if (!(p = lock_user_string(arg2)))
12034             return -TARGET_EFAULT;
12035         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
12036                                  low2highgid(arg4), arg5));
12037         unlock_user(p, arg2, 0);
12038         return ret;
12039 #endif
12040 #ifdef TARGET_NR_setresuid
12041     case TARGET_NR_setresuid:
12042         return get_errno(sys_setresuid(low2highuid(arg1),
12043                                        low2highuid(arg2),
12044                                        low2highuid(arg3)));
12045 #endif
12046 #ifdef TARGET_NR_getresuid
12047     case TARGET_NR_getresuid:
12048         {
12049             uid_t ruid, euid, suid;
12050             ret = get_errno(getresuid(&ruid, &euid, &suid));
12051             if (!is_error(ret)) {
12052                 if (put_user_id(high2lowuid(ruid), arg1)
12053                     || put_user_id(high2lowuid(euid), arg2)
12054                     || put_user_id(high2lowuid(suid), arg3))
12055                     return -TARGET_EFAULT;
12056             }
12057         }
12058         return ret;
12059 #endif
12060 #ifdef TARGET_NR_getresgid
12061     case TARGET_NR_setresgid:
12062         return get_errno(sys_setresgid(low2highgid(arg1),
12063                                        low2highgid(arg2),
12064                                        low2highgid(arg3)));
12065 #endif
12066 #ifdef TARGET_NR_getresgid
12067     case TARGET_NR_getresgid:
12068         {
12069             gid_t rgid, egid, sgid;
12070             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12071             if (!is_error(ret)) {
12072                 if (put_user_id(high2lowgid(rgid), arg1)
12073                     || put_user_id(high2lowgid(egid), arg2)
12074                     || put_user_id(high2lowgid(sgid), arg3))
12075                     return -TARGET_EFAULT;
12076             }
12077         }
12078         return ret;
12079 #endif
12080 #ifdef TARGET_NR_chown
12081     case TARGET_NR_chown:
12082         if (!(p = lock_user_string(arg1)))
12083             return -TARGET_EFAULT;
12084         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
12085         unlock_user(p, arg1, 0);
12086         return ret;
12087 #endif
12088     case TARGET_NR_setuid:
12089         return get_errno(sys_setuid(low2highuid(arg1)));
12090     case TARGET_NR_setgid:
12091         return get_errno(sys_setgid(low2highgid(arg1)));
12092     case TARGET_NR_setfsuid:
12093         return get_errno(setfsuid(arg1));
12094     case TARGET_NR_setfsgid:
12095         return get_errno(setfsgid(arg1));
12096 
12097 #ifdef TARGET_NR_lchown32
12098     case TARGET_NR_lchown32:
12099         if (!(p = lock_user_string(arg1)))
12100             return -TARGET_EFAULT;
12101         ret = get_errno(lchown(p, arg2, arg3));
12102         unlock_user(p, arg1, 0);
12103         return ret;
12104 #endif
12105 #ifdef TARGET_NR_getuid32
12106     case TARGET_NR_getuid32:
12107         return get_errno(getuid());
12108 #endif
12109 
12110 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
12111    /* Alpha specific */
12112     case TARGET_NR_getxuid:
12113          {
12114             uid_t euid;
12115             euid=geteuid();
12116             cpu_env->ir[IR_A4]=euid;
12117          }
12118         return get_errno(getuid());
12119 #endif
12120 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
12121    /* Alpha specific */
12122     case TARGET_NR_getxgid:
12123          {
12124             uid_t egid;
12125             egid=getegid();
12126             cpu_env->ir[IR_A4]=egid;
12127          }
12128         return get_errno(getgid());
12129 #endif
12130 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
12131     /* Alpha specific */
12132     case TARGET_NR_osf_getsysinfo:
12133         ret = -TARGET_EOPNOTSUPP;
12134         switch (arg1) {
12135           case TARGET_GSI_IEEE_FP_CONTROL:
12136             {
12137                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
12138                 uint64_t swcr = cpu_env->swcr;
12139 
12140                 swcr &= ~SWCR_STATUS_MASK;
12141                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
12142 
12143                 if (put_user_u64 (swcr, arg2))
12144                         return -TARGET_EFAULT;
12145                 ret = 0;
12146             }
12147             break;
12148 
12149           /* case GSI_IEEE_STATE_AT_SIGNAL:
12150              -- Not implemented in linux kernel.
12151              case GSI_UACPROC:
12152              -- Retrieves current unaligned access state; not much used.
12153              case GSI_PROC_TYPE:
12154              -- Retrieves implver information; surely not used.
12155              case GSI_GET_HWRPB:
12156              -- Grabs a copy of the HWRPB; surely not used.
12157           */
12158         }
12159         return ret;
12160 #endif
12161 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
12162     /* Alpha specific */
12163     case TARGET_NR_osf_setsysinfo:
12164         ret = -TARGET_EOPNOTSUPP;
12165         switch (arg1) {
12166           case TARGET_SSI_IEEE_FP_CONTROL:
12167             {
12168                 uint64_t swcr, fpcr;
12169 
12170                 if (get_user_u64 (swcr, arg2)) {
12171                     return -TARGET_EFAULT;
12172                 }
12173 
12174                 /*
12175                  * The kernel calls swcr_update_status to update the
12176                  * status bits from the fpcr at every point that it
12177                  * could be queried.  Therefore, we store the status
12178                  * bits only in FPCR.
12179                  */
12180                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
12181 
12182                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12183                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
12184                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
12185                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12186                 ret = 0;
12187             }
12188             break;
12189 
12190           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
12191             {
12192                 uint64_t exc, fpcr, fex;
12193 
12194                 if (get_user_u64(exc, arg2)) {
12195                     return -TARGET_EFAULT;
12196                 }
12197                 exc &= SWCR_STATUS_MASK;
12198                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12199 
12200                 /* Old exceptions are not signaled.  */
12201                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
12202                 fex = exc & ~fex;
12203                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
12204                 fex &= (cpu_env)->swcr;
12205 
12206                 /* Update the hardware fpcr.  */
12207                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
12208                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12209 
12210                 if (fex) {
12211                     int si_code = TARGET_FPE_FLTUNK;
12212                     target_siginfo_t info;
12213 
12214                     if (fex & SWCR_TRAP_ENABLE_DNO) {
12215                         si_code = TARGET_FPE_FLTUND;
12216                     }
12217                     if (fex & SWCR_TRAP_ENABLE_INE) {
12218                         si_code = TARGET_FPE_FLTRES;
12219                     }
12220                     if (fex & SWCR_TRAP_ENABLE_UNF) {
12221                         si_code = TARGET_FPE_FLTUND;
12222                     }
12223                     if (fex & SWCR_TRAP_ENABLE_OVF) {
12224                         si_code = TARGET_FPE_FLTOVF;
12225                     }
12226                     if (fex & SWCR_TRAP_ENABLE_DZE) {
12227                         si_code = TARGET_FPE_FLTDIV;
12228                     }
12229                     if (fex & SWCR_TRAP_ENABLE_INV) {
12230                         si_code = TARGET_FPE_FLTINV;
12231                     }
12232 
12233                     info.si_signo = SIGFPE;
12234                     info.si_errno = 0;
12235                     info.si_code = si_code;
12236                     info._sifields._sigfault._addr = (cpu_env)->pc;
12237                     queue_signal(cpu_env, info.si_signo,
12238                                  QEMU_SI_FAULT, &info);
12239                 }
12240                 ret = 0;
12241             }
12242             break;
12243 
12244           /* case SSI_NVPAIRS:
12245              -- Used with SSIN_UACPROC to enable unaligned accesses.
12246              case SSI_IEEE_STATE_AT_SIGNAL:
12247              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
12248              -- Not implemented in linux kernel
12249           */
12250         }
12251         return ret;
12252 #endif
12253 #ifdef TARGET_NR_osf_sigprocmask
12254     /* Alpha specific.  */
12255     case TARGET_NR_osf_sigprocmask:
12256         {
12257             abi_ulong mask;
12258             int how;
12259             sigset_t set, oldset;
12260 
12261             switch(arg1) {
12262             case TARGET_SIG_BLOCK:
12263                 how = SIG_BLOCK;
12264                 break;
12265             case TARGET_SIG_UNBLOCK:
12266                 how = SIG_UNBLOCK;
12267                 break;
12268             case TARGET_SIG_SETMASK:
12269                 how = SIG_SETMASK;
12270                 break;
12271             default:
12272                 return -TARGET_EINVAL;
12273             }
12274             mask = arg2;
12275             target_to_host_old_sigset(&set, &mask);
12276             ret = do_sigprocmask(how, &set, &oldset);
12277             if (!ret) {
12278                 host_to_target_old_sigset(&mask, &oldset);
12279                 ret = mask;
12280             }
12281         }
12282         return ret;
12283 #endif
12284 
12285 #ifdef TARGET_NR_getgid32
12286     case TARGET_NR_getgid32:
12287         return get_errno(getgid());
12288 #endif
12289 #ifdef TARGET_NR_geteuid32
12290     case TARGET_NR_geteuid32:
12291         return get_errno(geteuid());
12292 #endif
12293 #ifdef TARGET_NR_getegid32
12294     case TARGET_NR_getegid32:
12295         return get_errno(getegid());
12296 #endif
12297 #ifdef TARGET_NR_setreuid32
12298     case TARGET_NR_setreuid32:
12299         return get_errno(sys_setreuid(arg1, arg2));
12300 #endif
12301 #ifdef TARGET_NR_setregid32
12302     case TARGET_NR_setregid32:
12303         return get_errno(sys_setregid(arg1, arg2));
12304 #endif
12305 #ifdef TARGET_NR_getgroups32
12306     case TARGET_NR_getgroups32:
12307         { /* the same code as for TARGET_NR_getgroups */
12308             int gidsetsize = arg1;
12309             uint32_t *target_grouplist;
12310             g_autofree gid_t *grouplist = NULL;
12311             int i;
12312 
12313             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12314                 return -TARGET_EINVAL;
12315             }
12316             if (gidsetsize > 0) {
12317                 grouplist = g_try_new(gid_t, gidsetsize);
12318                 if (!grouplist) {
12319                     return -TARGET_ENOMEM;
12320                 }
12321             }
12322             ret = get_errno(getgroups(gidsetsize, grouplist));
12323             if (!is_error(ret) && gidsetsize > 0) {
12324                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
12325                                              gidsetsize * 4, 0);
12326                 if (!target_grouplist) {
12327                     return -TARGET_EFAULT;
12328                 }
12329                 for (i = 0; i < ret; i++) {
12330                     target_grouplist[i] = tswap32(grouplist[i]);
12331                 }
12332                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
12333             }
12334             return ret;
12335         }
12336 #endif
12337 #ifdef TARGET_NR_setgroups32
12338     case TARGET_NR_setgroups32:
12339         { /* the same code as for TARGET_NR_setgroups */
12340             int gidsetsize = arg1;
12341             uint32_t *target_grouplist;
12342             g_autofree gid_t *grouplist = NULL;
12343             int i;
12344 
12345             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12346                 return -TARGET_EINVAL;
12347             }
12348             if (gidsetsize > 0) {
12349                 grouplist = g_try_new(gid_t, gidsetsize);
12350                 if (!grouplist) {
12351                     return -TARGET_ENOMEM;
12352                 }
12353                 target_grouplist = lock_user(VERIFY_READ, arg2,
12354                                              gidsetsize * 4, 1);
12355                 if (!target_grouplist) {
12356                     return -TARGET_EFAULT;
12357                 }
12358                 for (i = 0; i < gidsetsize; i++) {
12359                     grouplist[i] = tswap32(target_grouplist[i]);
12360                 }
12361                 unlock_user(target_grouplist, arg2, 0);
12362             }
12363             return get_errno(sys_setgroups(gidsetsize, grouplist));
12364         }
12365 #endif
12366 #ifdef TARGET_NR_fchown32
12367     case TARGET_NR_fchown32:
12368         return get_errno(fchown(arg1, arg2, arg3));
12369 #endif
12370 #ifdef TARGET_NR_setresuid32
12371     case TARGET_NR_setresuid32:
12372         return get_errno(sys_setresuid(arg1, arg2, arg3));
12373 #endif
12374 #ifdef TARGET_NR_getresuid32
12375     case TARGET_NR_getresuid32:
12376         {
12377             uid_t ruid, euid, suid;
12378             ret = get_errno(getresuid(&ruid, &euid, &suid));
12379             if (!is_error(ret)) {
12380                 if (put_user_u32(ruid, arg1)
12381                     || put_user_u32(euid, arg2)
12382                     || put_user_u32(suid, arg3))
12383                     return -TARGET_EFAULT;
12384             }
12385         }
12386         return ret;
12387 #endif
12388 #ifdef TARGET_NR_setresgid32
12389     case TARGET_NR_setresgid32:
12390         return get_errno(sys_setresgid(arg1, arg2, arg3));
12391 #endif
12392 #ifdef TARGET_NR_getresgid32
12393     case TARGET_NR_getresgid32:
12394         {
12395             gid_t rgid, egid, sgid;
12396             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12397             if (!is_error(ret)) {
12398                 if (put_user_u32(rgid, arg1)
12399                     || put_user_u32(egid, arg2)
12400                     || put_user_u32(sgid, arg3))
12401                     return -TARGET_EFAULT;
12402             }
12403         }
12404         return ret;
12405 #endif
12406 #ifdef TARGET_NR_chown32
12407     case TARGET_NR_chown32:
12408         if (!(p = lock_user_string(arg1)))
12409             return -TARGET_EFAULT;
12410         ret = get_errno(chown(p, arg2, arg3));
12411         unlock_user(p, arg1, 0);
12412         return ret;
12413 #endif
12414 #ifdef TARGET_NR_setuid32
12415     case TARGET_NR_setuid32:
12416         return get_errno(sys_setuid(arg1));
12417 #endif
12418 #ifdef TARGET_NR_setgid32
12419     case TARGET_NR_setgid32:
12420         return get_errno(sys_setgid(arg1));
12421 #endif
12422 #ifdef TARGET_NR_setfsuid32
12423     case TARGET_NR_setfsuid32:
12424         return get_errno(setfsuid(arg1));
12425 #endif
12426 #ifdef TARGET_NR_setfsgid32
12427     case TARGET_NR_setfsgid32:
12428         return get_errno(setfsgid(arg1));
12429 #endif
12430 #ifdef TARGET_NR_mincore
12431     case TARGET_NR_mincore:
12432         {
12433             void *a = lock_user(VERIFY_NONE, arg1, arg2, 0);
12434             if (!a) {
12435                 return -TARGET_ENOMEM;
12436             }
12437             p = lock_user_string(arg3);
12438             if (!p) {
12439                 ret = -TARGET_EFAULT;
12440             } else {
12441                 ret = get_errno(mincore(a, arg2, p));
12442                 unlock_user(p, arg3, ret);
12443             }
12444             unlock_user(a, arg1, 0);
12445         }
12446         return ret;
12447 #endif
12448 #ifdef TARGET_NR_arm_fadvise64_64
12449     case TARGET_NR_arm_fadvise64_64:
12450         /* arm_fadvise64_64 looks like fadvise64_64 but
12451          * with different argument order: fd, advice, offset, len
12452          * rather than the usual fd, offset, len, advice.
12453          * Note that offset and len are both 64-bit so appear as
12454          * pairs of 32-bit registers.
12455          */
12456         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
12457                             target_offset64(arg5, arg6), arg2);
12458         return -host_to_target_errno(ret);
12459 #endif
12460 
12461 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12462 
12463 #ifdef TARGET_NR_fadvise64_64
12464     case TARGET_NR_fadvise64_64:
12465 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
12466         /* 6 args: fd, advice, offset (high, low), len (high, low) */
12467         ret = arg2;
12468         arg2 = arg3;
12469         arg3 = arg4;
12470         arg4 = arg5;
12471         arg5 = arg6;
12472         arg6 = ret;
12473 #else
12474         /* 6 args: fd, offset (high, low), len (high, low), advice */
12475         if (regpairs_aligned(cpu_env, num)) {
12476             /* offset is in (3,4), len in (5,6) and advice in 7 */
12477             arg2 = arg3;
12478             arg3 = arg4;
12479             arg4 = arg5;
12480             arg5 = arg6;
12481             arg6 = arg7;
12482         }
12483 #endif
12484         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
12485                             target_offset64(arg4, arg5), arg6);
12486         return -host_to_target_errno(ret);
12487 #endif
12488 
12489 #ifdef TARGET_NR_fadvise64
12490     case TARGET_NR_fadvise64:
12491         /* 5 args: fd, offset (high, low), len, advice */
12492         if (regpairs_aligned(cpu_env, num)) {
12493             /* offset is in (3,4), len in 5 and advice in 6 */
12494             arg2 = arg3;
12495             arg3 = arg4;
12496             arg4 = arg5;
12497             arg5 = arg6;
12498         }
12499         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
12500         return -host_to_target_errno(ret);
12501 #endif
12502 
12503 #else /* not a 32-bit ABI */
12504 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
12505 #ifdef TARGET_NR_fadvise64_64
12506     case TARGET_NR_fadvise64_64:
12507 #endif
12508 #ifdef TARGET_NR_fadvise64
12509     case TARGET_NR_fadvise64:
12510 #endif
12511 #ifdef TARGET_S390X
12512         switch (arg4) {
12513         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
12514         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
12515         case 6: arg4 = POSIX_FADV_DONTNEED; break;
12516         case 7: arg4 = POSIX_FADV_NOREUSE; break;
12517         default: break;
12518         }
12519 #endif
12520         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
12521 #endif
12522 #endif /* end of 64-bit ABI fadvise handling */
12523 
12524 #ifdef TARGET_NR_madvise
12525     case TARGET_NR_madvise:
12526         return target_madvise(arg1, arg2, arg3);
12527 #endif
12528 #ifdef TARGET_NR_fcntl64
12529     case TARGET_NR_fcntl64:
12530     {
12531         int cmd;
12532         struct flock fl;
12533         from_flock64_fn *copyfrom = copy_from_user_flock64;
12534         to_flock64_fn *copyto = copy_to_user_flock64;
12535 
12536 #ifdef TARGET_ARM
12537         if (!cpu_env->eabi) {
12538             copyfrom = copy_from_user_oabi_flock64;
12539             copyto = copy_to_user_oabi_flock64;
12540         }
12541 #endif
12542 
12543         cmd = target_to_host_fcntl_cmd(arg2);
12544         if (cmd == -TARGET_EINVAL) {
12545             return cmd;
12546         }
12547 
12548         switch(arg2) {
12549         case TARGET_F_GETLK64:
12550             ret = copyfrom(&fl, arg3);
12551             if (ret) {
12552                 break;
12553             }
12554             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12555             if (ret == 0) {
12556                 ret = copyto(arg3, &fl);
12557             }
12558 	    break;
12559 
12560         case TARGET_F_SETLK64:
12561         case TARGET_F_SETLKW64:
12562             ret = copyfrom(&fl, arg3);
12563             if (ret) {
12564                 break;
12565             }
12566             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12567 	    break;
12568         default:
12569             ret = do_fcntl(arg1, arg2, arg3);
12570             break;
12571         }
12572         return ret;
12573     }
12574 #endif
12575 #ifdef TARGET_NR_cacheflush
12576     case TARGET_NR_cacheflush:
12577         /* self-modifying code is handled automatically, so nothing needed */
12578         return 0;
12579 #endif
12580 #ifdef TARGET_NR_getpagesize
12581     case TARGET_NR_getpagesize:
12582         return TARGET_PAGE_SIZE;
12583 #endif
12584     case TARGET_NR_gettid:
12585         return get_errno(sys_gettid());
12586 #ifdef TARGET_NR_readahead
12587     case TARGET_NR_readahead:
12588 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12589         if (regpairs_aligned(cpu_env, num)) {
12590             arg2 = arg3;
12591             arg3 = arg4;
12592             arg4 = arg5;
12593         }
12594         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12595 #else
12596         ret = get_errno(readahead(arg1, arg2, arg3));
12597 #endif
12598         return ret;
12599 #endif
12600 #ifdef CONFIG_ATTR
12601 #ifdef TARGET_NR_setxattr
12602     case TARGET_NR_listxattr:
12603     case TARGET_NR_llistxattr:
12604     {
12605         void *b = 0;
12606         if (arg2) {
12607             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12608             if (!b) {
12609                 return -TARGET_EFAULT;
12610             }
12611         }
12612         p = lock_user_string(arg1);
12613         if (p) {
12614             if (num == TARGET_NR_listxattr) {
12615                 ret = get_errno(listxattr(p, b, arg3));
12616             } else {
12617                 ret = get_errno(llistxattr(p, b, arg3));
12618             }
12619         } else {
12620             ret = -TARGET_EFAULT;
12621         }
12622         unlock_user(p, arg1, 0);
12623         unlock_user(b, arg2, arg3);
12624         return ret;
12625     }
12626     case TARGET_NR_flistxattr:
12627     {
12628         void *b = 0;
12629         if (arg2) {
12630             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12631             if (!b) {
12632                 return -TARGET_EFAULT;
12633             }
12634         }
12635         ret = get_errno(flistxattr(arg1, b, arg3));
12636         unlock_user(b, arg2, arg3);
12637         return ret;
12638     }
12639     case TARGET_NR_setxattr:
12640     case TARGET_NR_lsetxattr:
12641         {
12642             void *n, *v = 0;
12643             if (arg3) {
12644                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12645                 if (!v) {
12646                     return -TARGET_EFAULT;
12647                 }
12648             }
12649             p = lock_user_string(arg1);
12650             n = lock_user_string(arg2);
12651             if (p && n) {
12652                 if (num == TARGET_NR_setxattr) {
12653                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12654                 } else {
12655                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12656                 }
12657             } else {
12658                 ret = -TARGET_EFAULT;
12659             }
12660             unlock_user(p, arg1, 0);
12661             unlock_user(n, arg2, 0);
12662             unlock_user(v, arg3, 0);
12663         }
12664         return ret;
12665     case TARGET_NR_fsetxattr:
12666         {
12667             void *n, *v = 0;
12668             if (arg3) {
12669                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12670                 if (!v) {
12671                     return -TARGET_EFAULT;
12672                 }
12673             }
12674             n = lock_user_string(arg2);
12675             if (n) {
12676                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12677             } else {
12678                 ret = -TARGET_EFAULT;
12679             }
12680             unlock_user(n, arg2, 0);
12681             unlock_user(v, arg3, 0);
12682         }
12683         return ret;
12684     case TARGET_NR_getxattr:
12685     case TARGET_NR_lgetxattr:
12686         {
12687             void *n, *v = 0;
12688             if (arg3) {
12689                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12690                 if (!v) {
12691                     return -TARGET_EFAULT;
12692                 }
12693             }
12694             p = lock_user_string(arg1);
12695             n = lock_user_string(arg2);
12696             if (p && n) {
12697                 if (num == TARGET_NR_getxattr) {
12698                     ret = get_errno(getxattr(p, n, v, arg4));
12699                 } else {
12700                     ret = get_errno(lgetxattr(p, n, v, arg4));
12701                 }
12702             } else {
12703                 ret = -TARGET_EFAULT;
12704             }
12705             unlock_user(p, arg1, 0);
12706             unlock_user(n, arg2, 0);
12707             unlock_user(v, arg3, arg4);
12708         }
12709         return ret;
12710     case TARGET_NR_fgetxattr:
12711         {
12712             void *n, *v = 0;
12713             if (arg3) {
12714                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12715                 if (!v) {
12716                     return -TARGET_EFAULT;
12717                 }
12718             }
12719             n = lock_user_string(arg2);
12720             if (n) {
12721                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12722             } else {
12723                 ret = -TARGET_EFAULT;
12724             }
12725             unlock_user(n, arg2, 0);
12726             unlock_user(v, arg3, arg4);
12727         }
12728         return ret;
12729     case TARGET_NR_removexattr:
12730     case TARGET_NR_lremovexattr:
12731         {
12732             void *n;
12733             p = lock_user_string(arg1);
12734             n = lock_user_string(arg2);
12735             if (p && n) {
12736                 if (num == TARGET_NR_removexattr) {
12737                     ret = get_errno(removexattr(p, n));
12738                 } else {
12739                     ret = get_errno(lremovexattr(p, n));
12740                 }
12741             } else {
12742                 ret = -TARGET_EFAULT;
12743             }
12744             unlock_user(p, arg1, 0);
12745             unlock_user(n, arg2, 0);
12746         }
12747         return ret;
12748     case TARGET_NR_fremovexattr:
12749         {
12750             void *n;
12751             n = lock_user_string(arg2);
12752             if (n) {
12753                 ret = get_errno(fremovexattr(arg1, n));
12754             } else {
12755                 ret = -TARGET_EFAULT;
12756             }
12757             unlock_user(n, arg2, 0);
12758         }
12759         return ret;
12760 #endif
12761 #endif /* CONFIG_ATTR */
12762 #ifdef TARGET_NR_set_thread_area
12763     case TARGET_NR_set_thread_area:
12764 #if defined(TARGET_MIPS)
12765       cpu_env->active_tc.CP0_UserLocal = arg1;
12766       return 0;
12767 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12768       return do_set_thread_area(cpu_env, arg1);
12769 #elif defined(TARGET_M68K)
12770       {
12771           TaskState *ts = get_task_state(cpu);
12772           ts->tp_value = arg1;
12773           return 0;
12774       }
12775 #else
12776       return -TARGET_ENOSYS;
12777 #endif
12778 #endif
12779 #ifdef TARGET_NR_get_thread_area
12780     case TARGET_NR_get_thread_area:
12781 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12782         return do_get_thread_area(cpu_env, arg1);
12783 #elif defined(TARGET_M68K)
12784         {
12785             TaskState *ts = get_task_state(cpu);
12786             return ts->tp_value;
12787         }
12788 #else
12789         return -TARGET_ENOSYS;
12790 #endif
12791 #endif
12792 #ifdef TARGET_NR_getdomainname
12793     case TARGET_NR_getdomainname:
12794         return -TARGET_ENOSYS;
12795 #endif
12796 
12797 #ifdef TARGET_NR_clock_settime
12798     case TARGET_NR_clock_settime:
12799     {
12800         struct timespec ts;
12801 
12802         ret = target_to_host_timespec(&ts, arg2);
12803         if (!is_error(ret)) {
12804             ret = get_errno(clock_settime(arg1, &ts));
12805         }
12806         return ret;
12807     }
12808 #endif
12809 #ifdef TARGET_NR_clock_settime64
12810     case TARGET_NR_clock_settime64:
12811     {
12812         struct timespec ts;
12813 
12814         ret = target_to_host_timespec64(&ts, arg2);
12815         if (!is_error(ret)) {
12816             ret = get_errno(clock_settime(arg1, &ts));
12817         }
12818         return ret;
12819     }
12820 #endif
12821 #ifdef TARGET_NR_clock_gettime
12822     case TARGET_NR_clock_gettime:
12823     {
12824         struct timespec ts;
12825         ret = get_errno(clock_gettime(arg1, &ts));
12826         if (!is_error(ret)) {
12827             ret = host_to_target_timespec(arg2, &ts);
12828         }
12829         return ret;
12830     }
12831 #endif
12832 #ifdef TARGET_NR_clock_gettime64
12833     case TARGET_NR_clock_gettime64:
12834     {
12835         struct timespec ts;
12836         ret = get_errno(clock_gettime(arg1, &ts));
12837         if (!is_error(ret)) {
12838             ret = host_to_target_timespec64(arg2, &ts);
12839         }
12840         return ret;
12841     }
12842 #endif
12843 #ifdef TARGET_NR_clock_getres
12844     case TARGET_NR_clock_getres:
12845     {
12846         struct timespec ts;
12847         ret = get_errno(clock_getres(arg1, &ts));
12848         if (!is_error(ret)) {
12849             host_to_target_timespec(arg2, &ts);
12850         }
12851         return ret;
12852     }
12853 #endif
12854 #ifdef TARGET_NR_clock_getres_time64
12855     case TARGET_NR_clock_getres_time64:
12856     {
12857         struct timespec ts;
12858         ret = get_errno(clock_getres(arg1, &ts));
12859         if (!is_error(ret)) {
12860             host_to_target_timespec64(arg2, &ts);
12861         }
12862         return ret;
12863     }
12864 #endif
12865 #ifdef TARGET_NR_clock_nanosleep
12866     case TARGET_NR_clock_nanosleep:
12867     {
12868         struct timespec ts;
12869         if (target_to_host_timespec(&ts, arg3)) {
12870             return -TARGET_EFAULT;
12871         }
12872         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12873                                              &ts, arg4 ? &ts : NULL));
12874         /*
12875          * if the call is interrupted by a signal handler, it fails
12876          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12877          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12878          */
12879         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12880             host_to_target_timespec(arg4, &ts)) {
12881               return -TARGET_EFAULT;
12882         }
12883 
12884         return ret;
12885     }
12886 #endif
12887 #ifdef TARGET_NR_clock_nanosleep_time64
12888     case TARGET_NR_clock_nanosleep_time64:
12889     {
12890         struct timespec ts;
12891 
12892         if (target_to_host_timespec64(&ts, arg3)) {
12893             return -TARGET_EFAULT;
12894         }
12895 
12896         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12897                                              &ts, arg4 ? &ts : NULL));
12898 
12899         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12900             host_to_target_timespec64(arg4, &ts)) {
12901             return -TARGET_EFAULT;
12902         }
12903         return ret;
12904     }
12905 #endif
12906 
12907 #if defined(TARGET_NR_set_tid_address)
12908     case TARGET_NR_set_tid_address:
12909     {
12910         TaskState *ts = get_task_state(cpu);
12911         ts->child_tidptr = arg1;
12912         /* do not call host set_tid_address() syscall, instead return tid() */
12913         return get_errno(sys_gettid());
12914     }
12915 #endif
12916 
12917     case TARGET_NR_tkill:
12918         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12919 
12920     case TARGET_NR_tgkill:
12921         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12922                          target_to_host_signal(arg3)));
12923 
12924 #ifdef TARGET_NR_set_robust_list
12925     case TARGET_NR_set_robust_list:
12926     case TARGET_NR_get_robust_list:
12927         /* The ABI for supporting robust futexes has userspace pass
12928          * the kernel a pointer to a linked list which is updated by
12929          * userspace after the syscall; the list is walked by the kernel
12930          * when the thread exits. Since the linked list in QEMU guest
12931          * memory isn't a valid linked list for the host and we have
12932          * no way to reliably intercept the thread-death event, we can't
12933          * support these. Silently return ENOSYS so that guest userspace
12934          * falls back to a non-robust futex implementation (which should
12935          * be OK except in the corner case of the guest crashing while
12936          * holding a mutex that is shared with another process via
12937          * shared memory).
12938          */
12939         return -TARGET_ENOSYS;
12940 #endif
12941 
12942 #if defined(TARGET_NR_utimensat)
12943     case TARGET_NR_utimensat:
12944         {
12945             struct timespec *tsp, ts[2];
12946             if (!arg3) {
12947                 tsp = NULL;
12948             } else {
12949                 if (target_to_host_timespec(ts, arg3)) {
12950                     return -TARGET_EFAULT;
12951                 }
12952                 if (target_to_host_timespec(ts + 1, arg3 +
12953                                             sizeof(struct target_timespec))) {
12954                     return -TARGET_EFAULT;
12955                 }
12956                 tsp = ts;
12957             }
12958             if (!arg2)
12959                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12960             else {
12961                 if (!(p = lock_user_string(arg2))) {
12962                     return -TARGET_EFAULT;
12963                 }
12964                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12965                 unlock_user(p, arg2, 0);
12966             }
12967         }
12968         return ret;
12969 #endif
12970 #ifdef TARGET_NR_utimensat_time64
12971     case TARGET_NR_utimensat_time64:
12972         {
12973             struct timespec *tsp, ts[2];
12974             if (!arg3) {
12975                 tsp = NULL;
12976             } else {
12977                 if (target_to_host_timespec64(ts, arg3)) {
12978                     return -TARGET_EFAULT;
12979                 }
12980                 if (target_to_host_timespec64(ts + 1, arg3 +
12981                                      sizeof(struct target__kernel_timespec))) {
12982                     return -TARGET_EFAULT;
12983                 }
12984                 tsp = ts;
12985             }
12986             if (!arg2)
12987                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12988             else {
12989                 p = lock_user_string(arg2);
12990                 if (!p) {
12991                     return -TARGET_EFAULT;
12992                 }
12993                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12994                 unlock_user(p, arg2, 0);
12995             }
12996         }
12997         return ret;
12998 #endif
12999 #ifdef TARGET_NR_futex
13000     case TARGET_NR_futex:
13001         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
13002 #endif
13003 #ifdef TARGET_NR_futex_time64
13004     case TARGET_NR_futex_time64:
13005         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
13006 #endif
13007 #ifdef CONFIG_INOTIFY
13008 #if defined(TARGET_NR_inotify_init)
13009     case TARGET_NR_inotify_init:
13010         ret = get_errno(inotify_init());
13011         if (ret >= 0) {
13012             fd_trans_register(ret, &target_inotify_trans);
13013         }
13014         return ret;
13015 #endif
13016 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
13017     case TARGET_NR_inotify_init1:
13018         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
13019                                           fcntl_flags_tbl)));
13020         if (ret >= 0) {
13021             fd_trans_register(ret, &target_inotify_trans);
13022         }
13023         return ret;
13024 #endif
13025 #if defined(TARGET_NR_inotify_add_watch)
13026     case TARGET_NR_inotify_add_watch:
13027         p = lock_user_string(arg2);
13028         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
13029         unlock_user(p, arg2, 0);
13030         return ret;
13031 #endif
13032 #if defined(TARGET_NR_inotify_rm_watch)
13033     case TARGET_NR_inotify_rm_watch:
13034         return get_errno(inotify_rm_watch(arg1, arg2));
13035 #endif
13036 #endif
13037 
13038 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
13039     case TARGET_NR_mq_open:
13040         {
13041             struct mq_attr posix_mq_attr;
13042             struct mq_attr *pposix_mq_attr;
13043             int host_flags;
13044 
13045             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
13046             pposix_mq_attr = NULL;
13047             if (arg4) {
13048                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
13049                     return -TARGET_EFAULT;
13050                 }
13051                 pposix_mq_attr = &posix_mq_attr;
13052             }
13053             p = lock_user_string(arg1 - 1);
13054             if (!p) {
13055                 return -TARGET_EFAULT;
13056             }
13057             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
13058             unlock_user (p, arg1, 0);
13059         }
13060         return ret;
13061 
13062     case TARGET_NR_mq_unlink:
13063         p = lock_user_string(arg1 - 1);
13064         if (!p) {
13065             return -TARGET_EFAULT;
13066         }
13067         ret = get_errno(mq_unlink(p));
13068         unlock_user (p, arg1, 0);
13069         return ret;
13070 
13071 #ifdef TARGET_NR_mq_timedsend
13072     case TARGET_NR_mq_timedsend:
13073         {
13074             struct timespec ts;
13075 
13076             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13077             if (arg5 != 0) {
13078                 if (target_to_host_timespec(&ts, arg5)) {
13079                     return -TARGET_EFAULT;
13080                 }
13081                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13082                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13083                     return -TARGET_EFAULT;
13084                 }
13085             } else {
13086                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13087             }
13088             unlock_user (p, arg2, arg3);
13089         }
13090         return ret;
13091 #endif
13092 #ifdef TARGET_NR_mq_timedsend_time64
13093     case TARGET_NR_mq_timedsend_time64:
13094         {
13095             struct timespec ts;
13096 
13097             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13098             if (arg5 != 0) {
13099                 if (target_to_host_timespec64(&ts, arg5)) {
13100                     return -TARGET_EFAULT;
13101                 }
13102                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13103                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13104                     return -TARGET_EFAULT;
13105                 }
13106             } else {
13107                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13108             }
13109             unlock_user(p, arg2, arg3);
13110         }
13111         return ret;
13112 #endif
13113 
13114 #ifdef TARGET_NR_mq_timedreceive
13115     case TARGET_NR_mq_timedreceive:
13116         {
13117             struct timespec ts;
13118             unsigned int prio;
13119 
13120             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13121             if (arg5 != 0) {
13122                 if (target_to_host_timespec(&ts, arg5)) {
13123                     return -TARGET_EFAULT;
13124                 }
13125                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13126                                                      &prio, &ts));
13127                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13128                     return -TARGET_EFAULT;
13129                 }
13130             } else {
13131                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13132                                                      &prio, NULL));
13133             }
13134             unlock_user (p, arg2, arg3);
13135             if (arg4 != 0)
13136                 put_user_u32(prio, arg4);
13137         }
13138         return ret;
13139 #endif
13140 #ifdef TARGET_NR_mq_timedreceive_time64
13141     case TARGET_NR_mq_timedreceive_time64:
13142         {
13143             struct timespec ts;
13144             unsigned int prio;
13145 
13146             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13147             if (arg5 != 0) {
13148                 if (target_to_host_timespec64(&ts, arg5)) {
13149                     return -TARGET_EFAULT;
13150                 }
13151                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13152                                                      &prio, &ts));
13153                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13154                     return -TARGET_EFAULT;
13155                 }
13156             } else {
13157                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13158                                                      &prio, NULL));
13159             }
13160             unlock_user(p, arg2, arg3);
13161             if (arg4 != 0) {
13162                 put_user_u32(prio, arg4);
13163             }
13164         }
13165         return ret;
13166 #endif
13167 
13168     /* Not implemented for now... */
13169 /*     case TARGET_NR_mq_notify: */
13170 /*         break; */
13171 
13172     case TARGET_NR_mq_getsetattr:
13173         {
13174             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
13175             ret = 0;
13176             if (arg2 != 0) {
13177                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
13178                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
13179                                            &posix_mq_attr_out));
13180             } else if (arg3 != 0) {
13181                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
13182             }
13183             if (ret == 0 && arg3 != 0) {
13184                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
13185             }
13186         }
13187         return ret;
13188 #endif
13189 
13190 #ifdef CONFIG_SPLICE
13191 #ifdef TARGET_NR_tee
13192     case TARGET_NR_tee:
13193         {
13194             ret = get_errno(tee(arg1,arg2,arg3,arg4));
13195         }
13196         return ret;
13197 #endif
13198 #ifdef TARGET_NR_splice
13199     case TARGET_NR_splice:
13200         {
13201             loff_t loff_in, loff_out;
13202             loff_t *ploff_in = NULL, *ploff_out = NULL;
13203             if (arg2) {
13204                 if (get_user_u64(loff_in, arg2)) {
13205                     return -TARGET_EFAULT;
13206                 }
13207                 ploff_in = &loff_in;
13208             }
13209             if (arg4) {
13210                 if (get_user_u64(loff_out, arg4)) {
13211                     return -TARGET_EFAULT;
13212                 }
13213                 ploff_out = &loff_out;
13214             }
13215             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
13216             if (arg2) {
13217                 if (put_user_u64(loff_in, arg2)) {
13218                     return -TARGET_EFAULT;
13219                 }
13220             }
13221             if (arg4) {
13222                 if (put_user_u64(loff_out, arg4)) {
13223                     return -TARGET_EFAULT;
13224                 }
13225             }
13226         }
13227         return ret;
13228 #endif
13229 #ifdef TARGET_NR_vmsplice
13230 	case TARGET_NR_vmsplice:
13231         {
13232             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
13233             if (vec != NULL) {
13234                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
13235                 unlock_iovec(vec, arg2, arg3, 0);
13236             } else {
13237                 ret = -host_to_target_errno(errno);
13238             }
13239         }
13240         return ret;
13241 #endif
13242 #endif /* CONFIG_SPLICE */
13243 #ifdef CONFIG_EVENTFD
13244 #if defined(TARGET_NR_eventfd)
13245     case TARGET_NR_eventfd:
13246         ret = get_errno(eventfd(arg1, 0));
13247         if (ret >= 0) {
13248             fd_trans_register(ret, &target_eventfd_trans);
13249         }
13250         return ret;
13251 #endif
13252 #if defined(TARGET_NR_eventfd2)
13253     case TARGET_NR_eventfd2:
13254     {
13255         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
13256         if (arg2 & TARGET_O_NONBLOCK) {
13257             host_flags |= O_NONBLOCK;
13258         }
13259         if (arg2 & TARGET_O_CLOEXEC) {
13260             host_flags |= O_CLOEXEC;
13261         }
13262         ret = get_errno(eventfd(arg1, host_flags));
13263         if (ret >= 0) {
13264             fd_trans_register(ret, &target_eventfd_trans);
13265         }
13266         return ret;
13267     }
13268 #endif
13269 #endif /* CONFIG_EVENTFD  */
13270 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
13271     case TARGET_NR_fallocate:
13272 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13273         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
13274                                   target_offset64(arg5, arg6)));
13275 #else
13276         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
13277 #endif
13278         return ret;
13279 #endif
13280 #if defined(CONFIG_SYNC_FILE_RANGE)
13281 #if defined(TARGET_NR_sync_file_range)
13282     case TARGET_NR_sync_file_range:
13283 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13284 #if defined(TARGET_MIPS)
13285         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13286                                         target_offset64(arg5, arg6), arg7));
13287 #else
13288         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
13289                                         target_offset64(arg4, arg5), arg6));
13290 #endif /* !TARGET_MIPS */
13291 #else
13292         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
13293 #endif
13294         return ret;
13295 #endif
13296 #if defined(TARGET_NR_sync_file_range2) || \
13297     defined(TARGET_NR_arm_sync_file_range)
13298 #if defined(TARGET_NR_sync_file_range2)
13299     case TARGET_NR_sync_file_range2:
13300 #endif
13301 #if defined(TARGET_NR_arm_sync_file_range)
13302     case TARGET_NR_arm_sync_file_range:
13303 #endif
13304         /* This is like sync_file_range but the arguments are reordered */
13305 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13306         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13307                                         target_offset64(arg5, arg6), arg2));
13308 #else
13309         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
13310 #endif
13311         return ret;
13312 #endif
13313 #endif
13314 #if defined(TARGET_NR_signalfd4)
13315     case TARGET_NR_signalfd4:
13316         return do_signalfd4(arg1, arg2, arg4);
13317 #endif
13318 #if defined(TARGET_NR_signalfd)
13319     case TARGET_NR_signalfd:
13320         return do_signalfd4(arg1, arg2, 0);
13321 #endif
13322 #if defined(CONFIG_EPOLL)
13323 #if defined(TARGET_NR_epoll_create)
13324     case TARGET_NR_epoll_create:
13325         return get_errno(epoll_create(arg1));
13326 #endif
13327 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
13328     case TARGET_NR_epoll_create1:
13329         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
13330 #endif
13331 #if defined(TARGET_NR_epoll_ctl)
13332     case TARGET_NR_epoll_ctl:
13333     {
13334         struct epoll_event ep;
13335         struct epoll_event *epp = 0;
13336         if (arg4) {
13337             if (arg2 != EPOLL_CTL_DEL) {
13338                 struct target_epoll_event *target_ep;
13339                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
13340                     return -TARGET_EFAULT;
13341                 }
13342                 ep.events = tswap32(target_ep->events);
13343                 /*
13344                  * The epoll_data_t union is just opaque data to the kernel,
13345                  * so we transfer all 64 bits across and need not worry what
13346                  * actual data type it is.
13347                  */
13348                 ep.data.u64 = tswap64(target_ep->data.u64);
13349                 unlock_user_struct(target_ep, arg4, 0);
13350             }
13351             /*
13352              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
13353              * non-null pointer, even though this argument is ignored.
13354              *
13355              */
13356             epp = &ep;
13357         }
13358         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
13359     }
13360 #endif
13361 
13362 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
13363 #if defined(TARGET_NR_epoll_wait)
13364     case TARGET_NR_epoll_wait:
13365 #endif
13366 #if defined(TARGET_NR_epoll_pwait)
13367     case TARGET_NR_epoll_pwait:
13368 #endif
13369     {
13370         struct target_epoll_event *target_ep;
13371         struct epoll_event *ep;
13372         int epfd = arg1;
13373         int maxevents = arg3;
13374         int timeout = arg4;
13375 
13376         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
13377             return -TARGET_EINVAL;
13378         }
13379 
13380         target_ep = lock_user(VERIFY_WRITE, arg2,
13381                               maxevents * sizeof(struct target_epoll_event), 1);
13382         if (!target_ep) {
13383             return -TARGET_EFAULT;
13384         }
13385 
13386         ep = g_try_new(struct epoll_event, maxevents);
13387         if (!ep) {
13388             unlock_user(target_ep, arg2, 0);
13389             return -TARGET_ENOMEM;
13390         }
13391 
13392         switch (num) {
13393 #if defined(TARGET_NR_epoll_pwait)
13394         case TARGET_NR_epoll_pwait:
13395         {
13396             sigset_t *set = NULL;
13397 
13398             if (arg5) {
13399                 ret = process_sigsuspend_mask(&set, arg5, arg6);
13400                 if (ret != 0) {
13401                     break;
13402                 }
13403             }
13404 
13405             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13406                                              set, SIGSET_T_SIZE));
13407 
13408             if (set) {
13409                 finish_sigsuspend_mask(ret);
13410             }
13411             break;
13412         }
13413 #endif
13414 #if defined(TARGET_NR_epoll_wait)
13415         case TARGET_NR_epoll_wait:
13416             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13417                                              NULL, 0));
13418             break;
13419 #endif
13420         default:
13421             ret = -TARGET_ENOSYS;
13422         }
13423         if (!is_error(ret)) {
13424             int i;
13425             for (i = 0; i < ret; i++) {
13426                 target_ep[i].events = tswap32(ep[i].events);
13427                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
13428             }
13429             unlock_user(target_ep, arg2,
13430                         ret * sizeof(struct target_epoll_event));
13431         } else {
13432             unlock_user(target_ep, arg2, 0);
13433         }
13434         g_free(ep);
13435         return ret;
13436     }
13437 #endif
13438 #endif
13439 #ifdef TARGET_NR_prlimit64
13440     case TARGET_NR_prlimit64:
13441     {
13442         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
13443         struct target_rlimit64 *target_rnew, *target_rold;
13444         struct host_rlimit64 rnew, rold, *rnewp = 0;
13445         int resource = target_to_host_resource(arg2);
13446 
13447         if (arg3 && (resource != RLIMIT_AS &&
13448                      resource != RLIMIT_DATA &&
13449                      resource != RLIMIT_STACK)) {
13450             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
13451                 return -TARGET_EFAULT;
13452             }
13453             __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
13454             __get_user(rnew.rlim_max, &target_rnew->rlim_max);
13455             unlock_user_struct(target_rnew, arg3, 0);
13456             rnewp = &rnew;
13457         }
13458 
13459         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
13460         if (!is_error(ret) && arg4) {
13461             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
13462                 return -TARGET_EFAULT;
13463             }
13464             __put_user(rold.rlim_cur, &target_rold->rlim_cur);
13465             __put_user(rold.rlim_max, &target_rold->rlim_max);
13466             unlock_user_struct(target_rold, arg4, 1);
13467         }
13468         return ret;
13469     }
13470 #endif
13471 #ifdef TARGET_NR_gethostname
13472     case TARGET_NR_gethostname:
13473     {
13474         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
13475         if (name) {
13476             ret = get_errno(gethostname(name, arg2));
13477             unlock_user(name, arg1, arg2);
13478         } else {
13479             ret = -TARGET_EFAULT;
13480         }
13481         return ret;
13482     }
13483 #endif
13484 #ifdef TARGET_NR_atomic_cmpxchg_32
13485     case TARGET_NR_atomic_cmpxchg_32:
13486     {
13487         /* should use start_exclusive from main.c */
13488         abi_ulong mem_value;
13489         if (get_user_u32(mem_value, arg6)) {
13490             target_siginfo_t info;
13491             info.si_signo = SIGSEGV;
13492             info.si_errno = 0;
13493             info.si_code = TARGET_SEGV_MAPERR;
13494             info._sifields._sigfault._addr = arg6;
13495             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
13496             ret = 0xdeadbeef;
13497 
13498         }
13499         if (mem_value == arg2)
13500             put_user_u32(arg1, arg6);
13501         return mem_value;
13502     }
13503 #endif
13504 #ifdef TARGET_NR_atomic_barrier
13505     case TARGET_NR_atomic_barrier:
13506         /* Like the kernel implementation and the
13507            qemu arm barrier, no-op this? */
13508         return 0;
13509 #endif
13510 
13511 #ifdef TARGET_NR_timer_create
13512     case TARGET_NR_timer_create:
13513     {
13514         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
13515 
13516         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
13517 
13518         int clkid = arg1;
13519         int timer_index = next_free_host_timer();
13520 
13521         if (timer_index < 0) {
13522             ret = -TARGET_EAGAIN;
13523         } else {
13524             timer_t *phtimer = g_posix_timers  + timer_index;
13525 
13526             if (arg2) {
13527                 phost_sevp = &host_sevp;
13528                 ret = target_to_host_sigevent(phost_sevp, arg2);
13529                 if (ret != 0) {
13530                     free_host_timer_slot(timer_index);
13531                     return ret;
13532                 }
13533             }
13534 
13535             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
13536             if (ret) {
13537                 free_host_timer_slot(timer_index);
13538             } else {
13539                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
13540                     timer_delete(*phtimer);
13541                     free_host_timer_slot(timer_index);
13542                     return -TARGET_EFAULT;
13543                 }
13544             }
13545         }
13546         return ret;
13547     }
13548 #endif
13549 
13550 #ifdef TARGET_NR_timer_settime
13551     case TARGET_NR_timer_settime:
13552     {
13553         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13554          * struct itimerspec * old_value */
13555         target_timer_t timerid = get_timer_id(arg1);
13556 
13557         if (timerid < 0) {
13558             ret = timerid;
13559         } else if (arg3 == 0) {
13560             ret = -TARGET_EINVAL;
13561         } else {
13562             timer_t htimer = g_posix_timers[timerid];
13563             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13564 
13565             if (target_to_host_itimerspec(&hspec_new, arg3)) {
13566                 return -TARGET_EFAULT;
13567             }
13568             ret = get_errno(
13569                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13570             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13571                 return -TARGET_EFAULT;
13572             }
13573         }
13574         return ret;
13575     }
13576 #endif
13577 
13578 #ifdef TARGET_NR_timer_settime64
13579     case TARGET_NR_timer_settime64:
13580     {
13581         target_timer_t timerid = get_timer_id(arg1);
13582 
13583         if (timerid < 0) {
13584             ret = timerid;
13585         } else if (arg3 == 0) {
13586             ret = -TARGET_EINVAL;
13587         } else {
13588             timer_t htimer = g_posix_timers[timerid];
13589             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13590 
13591             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13592                 return -TARGET_EFAULT;
13593             }
13594             ret = get_errno(
13595                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13596             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13597                 return -TARGET_EFAULT;
13598             }
13599         }
13600         return ret;
13601     }
13602 #endif
13603 
13604 #ifdef TARGET_NR_timer_gettime
13605     case TARGET_NR_timer_gettime:
13606     {
13607         /* args: timer_t timerid, struct itimerspec *curr_value */
13608         target_timer_t timerid = get_timer_id(arg1);
13609 
13610         if (timerid < 0) {
13611             ret = timerid;
13612         } else if (!arg2) {
13613             ret = -TARGET_EFAULT;
13614         } else {
13615             timer_t htimer = g_posix_timers[timerid];
13616             struct itimerspec hspec;
13617             ret = get_errno(timer_gettime(htimer, &hspec));
13618 
13619             if (host_to_target_itimerspec(arg2, &hspec)) {
13620                 ret = -TARGET_EFAULT;
13621             }
13622         }
13623         return ret;
13624     }
13625 #endif
13626 
13627 #ifdef TARGET_NR_timer_gettime64
13628     case TARGET_NR_timer_gettime64:
13629     {
13630         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13631         target_timer_t timerid = get_timer_id(arg1);
13632 
13633         if (timerid < 0) {
13634             ret = timerid;
13635         } else if (!arg2) {
13636             ret = -TARGET_EFAULT;
13637         } else {
13638             timer_t htimer = g_posix_timers[timerid];
13639             struct itimerspec hspec;
13640             ret = get_errno(timer_gettime(htimer, &hspec));
13641 
13642             if (host_to_target_itimerspec64(arg2, &hspec)) {
13643                 ret = -TARGET_EFAULT;
13644             }
13645         }
13646         return ret;
13647     }
13648 #endif
13649 
13650 #ifdef TARGET_NR_timer_getoverrun
13651     case TARGET_NR_timer_getoverrun:
13652     {
13653         /* args: timer_t timerid */
13654         target_timer_t timerid = get_timer_id(arg1);
13655 
13656         if (timerid < 0) {
13657             ret = timerid;
13658         } else {
13659             timer_t htimer = g_posix_timers[timerid];
13660             ret = get_errno(timer_getoverrun(htimer));
13661         }
13662         return ret;
13663     }
13664 #endif
13665 
13666 #ifdef TARGET_NR_timer_delete
13667     case TARGET_NR_timer_delete:
13668     {
13669         /* args: timer_t timerid */
13670         target_timer_t timerid = get_timer_id(arg1);
13671 
13672         if (timerid < 0) {
13673             ret = timerid;
13674         } else {
13675             timer_t htimer = g_posix_timers[timerid];
13676             ret = get_errno(timer_delete(htimer));
13677             free_host_timer_slot(timerid);
13678         }
13679         return ret;
13680     }
13681 #endif
13682 
13683 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13684     case TARGET_NR_timerfd_create:
13685         ret = get_errno(timerfd_create(arg1,
13686                         target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13687         if (ret >= 0) {
13688             fd_trans_register(ret, &target_timerfd_trans);
13689         }
13690         return ret;
13691 #endif
13692 
13693 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13694     case TARGET_NR_timerfd_gettime:
13695         {
13696             struct itimerspec its_curr;
13697 
13698             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13699 
13700             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13701                 return -TARGET_EFAULT;
13702             }
13703         }
13704         return ret;
13705 #endif
13706 
13707 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13708     case TARGET_NR_timerfd_gettime64:
13709         {
13710             struct itimerspec its_curr;
13711 
13712             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13713 
13714             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13715                 return -TARGET_EFAULT;
13716             }
13717         }
13718         return ret;
13719 #endif
13720 
13721 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13722     case TARGET_NR_timerfd_settime:
13723         {
13724             struct itimerspec its_new, its_old, *p_new;
13725 
13726             if (arg3) {
13727                 if (target_to_host_itimerspec(&its_new, arg3)) {
13728                     return -TARGET_EFAULT;
13729                 }
13730                 p_new = &its_new;
13731             } else {
13732                 p_new = NULL;
13733             }
13734 
13735             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13736 
13737             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13738                 return -TARGET_EFAULT;
13739             }
13740         }
13741         return ret;
13742 #endif
13743 
13744 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13745     case TARGET_NR_timerfd_settime64:
13746         {
13747             struct itimerspec its_new, its_old, *p_new;
13748 
13749             if (arg3) {
13750                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13751                     return -TARGET_EFAULT;
13752                 }
13753                 p_new = &its_new;
13754             } else {
13755                 p_new = NULL;
13756             }
13757 
13758             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13759 
13760             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13761                 return -TARGET_EFAULT;
13762             }
13763         }
13764         return ret;
13765 #endif
13766 
13767 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13768     case TARGET_NR_ioprio_get:
13769         return get_errno(ioprio_get(arg1, arg2));
13770 #endif
13771 
13772 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13773     case TARGET_NR_ioprio_set:
13774         return get_errno(ioprio_set(arg1, arg2, arg3));
13775 #endif
13776 
13777 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13778     case TARGET_NR_setns:
13779         return get_errno(setns(arg1, arg2));
13780 #endif
13781 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13782     case TARGET_NR_unshare:
13783         return get_errno(unshare(arg1));
13784 #endif
13785 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13786     case TARGET_NR_kcmp:
13787         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13788 #endif
13789 #ifdef TARGET_NR_swapcontext
13790     case TARGET_NR_swapcontext:
13791         /* PowerPC specific.  */
13792         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13793 #endif
13794 #ifdef TARGET_NR_memfd_create
13795     case TARGET_NR_memfd_create:
13796         p = lock_user_string(arg1);
13797         if (!p) {
13798             return -TARGET_EFAULT;
13799         }
13800         ret = get_errno(memfd_create(p, arg2));
13801         fd_trans_unregister(ret);
13802         unlock_user(p, arg1, 0);
13803         return ret;
13804 #endif
13805 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13806     case TARGET_NR_membarrier:
13807         return get_errno(membarrier(arg1, arg2));
13808 #endif
13809 
13810 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13811     case TARGET_NR_copy_file_range:
13812         {
13813             loff_t inoff, outoff;
13814             loff_t *pinoff = NULL, *poutoff = NULL;
13815 
13816             if (arg2) {
13817                 if (get_user_u64(inoff, arg2)) {
13818                     return -TARGET_EFAULT;
13819                 }
13820                 pinoff = &inoff;
13821             }
13822             if (arg4) {
13823                 if (get_user_u64(outoff, arg4)) {
13824                     return -TARGET_EFAULT;
13825                 }
13826                 poutoff = &outoff;
13827             }
13828             /* Do not sign-extend the count parameter. */
13829             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13830                                                  (abi_ulong)arg5, arg6));
13831             if (!is_error(ret) && ret > 0) {
13832                 if (arg2) {
13833                     if (put_user_u64(inoff, arg2)) {
13834                         return -TARGET_EFAULT;
13835                     }
13836                 }
13837                 if (arg4) {
13838                     if (put_user_u64(outoff, arg4)) {
13839                         return -TARGET_EFAULT;
13840                     }
13841                 }
13842             }
13843         }
13844         return ret;
13845 #endif
13846 
13847 #if defined(TARGET_NR_pivot_root)
13848     case TARGET_NR_pivot_root:
13849         {
13850             void *p2;
13851             p = lock_user_string(arg1); /* new_root */
13852             p2 = lock_user_string(arg2); /* put_old */
13853             if (!p || !p2) {
13854                 ret = -TARGET_EFAULT;
13855             } else {
13856                 ret = get_errno(pivot_root(p, p2));
13857             }
13858             unlock_user(p2, arg2, 0);
13859             unlock_user(p, arg1, 0);
13860         }
13861         return ret;
13862 #endif
13863 
13864 #if defined(TARGET_NR_riscv_hwprobe)
13865     case TARGET_NR_riscv_hwprobe:
13866         return do_riscv_hwprobe(cpu_env, arg1, arg2, arg3, arg4, arg5);
13867 #endif
13868 
13869     default:
13870         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13871         return -TARGET_ENOSYS;
13872     }
13873     return ret;
13874 }
13875 
13876 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13877                     abi_long arg2, abi_long arg3, abi_long arg4,
13878                     abi_long arg5, abi_long arg6, abi_long arg7,
13879                     abi_long arg8)
13880 {
13881     CPUState *cpu = env_cpu(cpu_env);
13882     abi_long ret;
13883 
13884 #ifdef DEBUG_ERESTARTSYS
13885     /* Debug-only code for exercising the syscall-restart code paths
13886      * in the per-architecture cpu main loops: restart every syscall
13887      * the guest makes once before letting it through.
13888      */
13889     {
13890         static bool flag;
13891         flag = !flag;
13892         if (flag) {
13893             return -QEMU_ERESTARTSYS;
13894         }
13895     }
13896 #endif
13897 
13898     record_syscall_start(cpu, num, arg1,
13899                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13900 
13901     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13902         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13903     }
13904 
13905     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13906                       arg5, arg6, arg7, arg8);
13907 
13908     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13909         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13910                           arg3, arg4, arg5, arg6);
13911     }
13912 
13913     record_syscall_return(cpu, num, ret);
13914     return ret;
13915 }
13916