xref: /qemu/linux-user/syscall.c (revision 897c68fb795cf03b89b6688a6f945d68a765c3e4)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "qemu/plugin.h"
26 #include "tcg/startup.h"
27 #include "target_mman.h"
28 #include "exec/page-protection.h"
29 #include "exec/tb-flush.h"
30 #include "exec/translation-block.h"
31 #include <elf.h>
32 #include <endian.h>
33 #include <grp.h>
34 #include <sys/ipc.h>
35 #include <sys/msg.h>
36 #include <sys/wait.h>
37 #include <sys/mount.h>
38 #include <sys/file.h>
39 #include <sys/fsuid.h>
40 #include <sys/personality.h>
41 #include <sys/prctl.h>
42 #include <sys/resource.h>
43 #include <sys/swap.h>
44 #include <linux/capability.h>
45 #include <sched.h>
46 #include <sys/timex.h>
47 #include <sys/socket.h>
48 #include <linux/sockios.h>
49 #include <sys/un.h>
50 #include <sys/uio.h>
51 #include <poll.h>
52 #include <sys/times.h>
53 #include <sys/shm.h>
54 #include <sys/sem.h>
55 #include <sys/statfs.h>
56 #include <utime.h>
57 #include <sys/sysinfo.h>
58 #include <sys/signalfd.h>
59 #include <netinet/in.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <netinet/udp.h>
63 #include <linux/wireless.h>
64 #include <linux/icmp.h>
65 #include <linux/icmpv6.h>
66 #include <linux/if_tun.h>
67 #include <linux/in6.h>
68 #include <linux/errqueue.h>
69 #include <linux/random.h>
70 #ifdef CONFIG_TIMERFD
71 #include <sys/timerfd.h>
72 #endif
73 #ifdef CONFIG_EVENTFD
74 #include <sys/eventfd.h>
75 #endif
76 #ifdef CONFIG_EPOLL
77 #include <sys/epoll.h>
78 #endif
79 #ifdef CONFIG_ATTR
80 #include "qemu/xattr.h"
81 #endif
82 #ifdef CONFIG_SENDFILE
83 #include <sys/sendfile.h>
84 #endif
85 #ifdef HAVE_SYS_KCOV_H
86 #include <sys/kcov.h>
87 #endif
88 
89 #define termios host_termios
90 #define winsize host_winsize
91 #define termio host_termio
92 #define sgttyb host_sgttyb /* same as target */
93 #define tchars host_tchars /* same as target */
94 #define ltchars host_ltchars /* same as target */
95 
96 #include <linux/termios.h>
97 #include <linux/unistd.h>
98 #include <linux/cdrom.h>
99 #include <linux/hdreg.h>
100 #include <linux/soundcard.h>
101 #include <linux/kd.h>
102 #include <linux/mtio.h>
103 #include <linux/fs.h>
104 #include <linux/fd.h>
105 #if defined(CONFIG_FIEMAP)
106 #include <linux/fiemap.h>
107 #endif
108 #include <linux/fb.h>
109 #if defined(CONFIG_USBFS)
110 #include <linux/usbdevice_fs.h>
111 #include <linux/usb/ch9.h>
112 #endif
113 #include <linux/vt.h>
114 #include <linux/dm-ioctl.h>
115 #include <linux/reboot.h>
116 #include <linux/route.h>
117 #include <linux/filter.h>
118 #include <linux/blkpg.h>
119 #include <netpacket/packet.h>
120 #include <linux/netlink.h>
121 #include <linux/if_alg.h>
122 #include <linux/rtc.h>
123 #include <sound/asound.h>
124 #ifdef HAVE_BTRFS_H
125 #include <linux/btrfs.h>
126 #endif
127 #ifdef HAVE_DRM_H
128 #include <libdrm/drm.h>
129 #include <libdrm/i915_drm.h>
130 #endif
131 #include "linux_loop.h"
132 #include "uname.h"
133 
134 #include "qemu.h"
135 #include "user-internals.h"
136 #include "strace.h"
137 #include "signal-common.h"
138 #include "loader.h"
139 #include "user-mmap.h"
140 #include "user/page-protection.h"
141 #include "user/safe-syscall.h"
142 #include "user/signal.h"
143 #include "qemu/guest-random.h"
144 #include "qemu/selfmap.h"
145 #include "user/syscall-trace.h"
146 #include "special-errno.h"
147 #include "qapi/error.h"
148 #include "fd-trans.h"
149 #include "user/cpu_loop.h"
150 
151 #ifndef CLONE_IO
152 #define CLONE_IO                0x80000000      /* Clone io context */
153 #endif
154 
155 /* We can't directly call the host clone syscall, because this will
156  * badly confuse libc (breaking mutexes, for example). So we must
157  * divide clone flags into:
158  *  * flag combinations that look like pthread_create()
159  *  * flag combinations that look like fork()
160  *  * flags we can implement within QEMU itself
161  *  * flags we can't support and will return an error for
162  */
163 /* For thread creation, all these flags must be present; for
164  * fork, none must be present.
165  */
166 #define CLONE_THREAD_FLAGS                              \
167     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
168      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
169 
170 /* These flags are ignored:
171  * CLONE_DETACHED is now ignored by the kernel;
172  * CLONE_IO is just an optimisation hint to the I/O scheduler
173  */
174 #define CLONE_IGNORED_FLAGS                     \
175     (CLONE_DETACHED | CLONE_IO)
176 
177 #ifndef CLONE_PIDFD
178 # define CLONE_PIDFD 0x00001000
179 #endif
180 
181 /* Flags for fork which we can implement within QEMU itself */
182 #define CLONE_OPTIONAL_FORK_FLAGS               \
183     (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
184      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
185 
186 /* Flags for thread creation which we can implement within QEMU itself */
187 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
188     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
189      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
190 
191 #define CLONE_INVALID_FORK_FLAGS                                        \
192     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
193 
194 #define CLONE_INVALID_THREAD_FLAGS                                      \
195     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
196        CLONE_IGNORED_FLAGS))
197 
198 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
199  * have almost all been allocated. We cannot support any of
200  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
201  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
202  * The checks against the invalid thread masks above will catch these.
203  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
204  */
205 
206 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
207  * once. This exercises the codepaths for restart.
208  */
209 //#define DEBUG_ERESTARTSYS
210 
211 //#include <linux/msdos_fs.h>
212 #define VFAT_IOCTL_READDIR_BOTH \
213     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
214 #define VFAT_IOCTL_READDIR_SHORT \
215     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
216 
217 #undef _syscall0
218 #undef _syscall1
219 #undef _syscall2
220 #undef _syscall3
221 #undef _syscall4
222 #undef _syscall5
223 #undef _syscall6
224 
225 #define _syscall0(type,name)		\
226 static type name (void)			\
227 {					\
228 	return syscall(__NR_##name);	\
229 }
230 
231 #define _syscall1(type,name,type1,arg1)		\
232 static type name (type1 arg1)			\
233 {						\
234 	return syscall(__NR_##name, arg1);	\
235 }
236 
237 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
238 static type name (type1 arg1,type2 arg2)		\
239 {							\
240 	return syscall(__NR_##name, arg1, arg2);	\
241 }
242 
243 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
244 static type name (type1 arg1,type2 arg2,type3 arg3)		\
245 {								\
246 	return syscall(__NR_##name, arg1, arg2, arg3);		\
247 }
248 
249 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
250 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
251 {										\
252 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
253 }
254 
255 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
256 		  type5,arg5)							\
257 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
258 {										\
259 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
260 }
261 
262 
263 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
264 		  type5,arg5,type6,arg6)					\
265 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
266                   type6 arg6)							\
267 {										\
268 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
269 }
270 
271 
272 #define __NR_sys_uname __NR_uname
273 #define __NR_sys_getcwd1 __NR_getcwd
274 #define __NR_sys_getdents __NR_getdents
275 #define __NR_sys_getdents64 __NR_getdents64
276 #define __NR_sys_getpriority __NR_getpriority
277 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
278 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
279 #define __NR_sys_syslog __NR_syslog
280 #if defined(__NR_futex)
281 # define __NR_sys_futex __NR_futex
282 #endif
283 #if defined(__NR_futex_time64)
284 # define __NR_sys_futex_time64 __NR_futex_time64
285 #endif
286 #define __NR_sys_statx __NR_statx
287 
288 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
289 #define __NR__llseek __NR_lseek
290 #endif
291 
292 /* Newer kernel ports have llseek() instead of _llseek() */
293 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
294 #define TARGET_NR__llseek TARGET_NR_llseek
295 #endif
296 
297 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
298 #ifndef TARGET_O_NONBLOCK_MASK
299 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
300 #endif
301 
302 #define __NR_sys_gettid __NR_gettid
303 _syscall0(int, sys_gettid)
304 
305 /* For the 64-bit guest on 32-bit host case we must emulate
306  * getdents using getdents64, because otherwise the host
307  * might hand us back more dirent records than we can fit
308  * into the guest buffer after structure format conversion.
309  * Otherwise we emulate getdents with getdents if the host has it.
310  */
311 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
312 #define EMULATE_GETDENTS_WITH_GETDENTS
313 #endif
314 
315 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
316 _syscall3(int, sys_getdents, unsigned int, fd, struct linux_dirent *, dirp, unsigned int, count);
317 #endif
318 #if (defined(TARGET_NR_getdents) && \
319       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
320     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
321 _syscall3(int, sys_getdents64, unsigned int, fd, struct linux_dirent64 *, dirp, unsigned int, count);
322 #endif
323 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
324 _syscall5(int, _llseek,  unsigned int,  fd, unsigned long, hi, unsigned long, lo,
325           loff_t *, res, unsigned int, wh);
326 #endif
327 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
328 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
329           siginfo_t *, uinfo)
330 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
331 #ifdef __NR_exit_group
332 _syscall1(int,exit_group,int,error_code)
333 #endif
334 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
335 #define __NR_sys_close_range __NR_close_range
336 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
337 #ifndef CLOSE_RANGE_CLOEXEC
338 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
339 #endif
340 #endif
341 #if defined(__NR_futex)
342 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
343           const struct timespec *,timeout,int *,uaddr2,int,val3)
344 #endif
345 #if defined(__NR_futex_time64)
346 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
347           const struct timespec *,timeout,int *,uaddr2,int,val3)
348 #endif
349 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
350 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
351 #endif
352 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
353 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
354                              unsigned int, flags);
355 #endif
356 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
357 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
358 #endif
359 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
360 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
361           unsigned long *, user_mask_ptr);
362 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
363 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
364           unsigned long *, user_mask_ptr);
365 /* sched_attr is not defined in glibc < 2.41 */
366 #ifndef SCHED_ATTR_SIZE_VER0
367 struct sched_attr {
368     uint32_t size;
369     uint32_t sched_policy;
370     uint64_t sched_flags;
371     int32_t sched_nice;
372     uint32_t sched_priority;
373     uint64_t sched_runtime;
374     uint64_t sched_deadline;
375     uint64_t sched_period;
376     uint32_t sched_util_min;
377     uint32_t sched_util_max;
378 };
379 #endif
380 #define __NR_sys_sched_getattr __NR_sched_getattr
381 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
382           unsigned int, size, unsigned int, flags);
383 #define __NR_sys_sched_setattr __NR_sched_setattr
384 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
385           unsigned int, flags);
386 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
387 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
388 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
389 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
390           const struct sched_param *, param);
391 #define __NR_sys_sched_getparam __NR_sched_getparam
392 _syscall2(int, sys_sched_getparam, pid_t, pid,
393           struct sched_param *, param);
394 #define __NR_sys_sched_setparam __NR_sched_setparam
395 _syscall2(int, sys_sched_setparam, pid_t, pid,
396           const struct sched_param *, param);
397 #define __NR_sys_getcpu __NR_getcpu
398 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
399 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
400           void *, arg);
401 _syscall2(int, capget, struct __user_cap_header_struct *, header,
402           struct __user_cap_data_struct *, data);
403 _syscall2(int, capset, struct __user_cap_header_struct *, header,
404           struct __user_cap_data_struct *, data);
405 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
406 _syscall2(int, ioprio_get, int, which, int, who)
407 #endif
408 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
409 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
410 #endif
411 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
412 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
413 #endif
414 
415 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
416 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
417           unsigned long, idx1, unsigned long, idx2)
418 #endif
419 
420 /*
421  * It is assumed that struct statx is architecture independent.
422  */
423 #if defined(TARGET_NR_statx) && defined(__NR_statx)
424 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
425           unsigned int, mask, struct target_statx *, statxbuf)
426 #endif
427 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
428 _syscall2(int, membarrier, int, cmd, int, flags)
429 #endif
430 
431 static const bitmask_transtbl fcntl_flags_tbl[] = {
432   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
433   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
434   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
435   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
436   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
437   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
438   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
439   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
440   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
441   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
442   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
443   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
444   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
445 #if defined(O_DIRECT)
446   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
447 #endif
448 #if defined(O_NOATIME)
449   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
450 #endif
451 #if defined(O_CLOEXEC)
452   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
453 #endif
454 #if defined(O_PATH)
455   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
456 #endif
457 #if defined(O_TMPFILE)
458   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
459 #endif
460   /* Don't terminate the list prematurely on 64-bit host+guest.  */
461 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
462   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
463 #endif
464 };
465 
466 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
467 
468 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
469 #if defined(__NR_utimensat)
470 #define __NR_sys_utimensat __NR_utimensat
471 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
472           const struct timespec *,tsp,int,flags)
473 #else
474 static int sys_utimensat(int dirfd, const char *pathname,
475                          const struct timespec times[2], int flags)
476 {
477     errno = ENOSYS;
478     return -1;
479 }
480 #endif
481 #endif /* TARGET_NR_utimensat */
482 
483 #ifdef TARGET_NR_renameat2
484 #if defined(__NR_renameat2)
485 #define __NR_sys_renameat2 __NR_renameat2
486 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
487           const char *, new, unsigned int, flags)
488 #else
489 static int sys_renameat2(int oldfd, const char *old,
490                          int newfd, const char *new, int flags)
491 {
492     if (flags == 0) {
493         return renameat(oldfd, old, newfd, new);
494     }
495     errno = ENOSYS;
496     return -1;
497 }
498 #endif
499 #endif /* TARGET_NR_renameat2 */
500 
501 #ifdef CONFIG_INOTIFY
502 #include <sys/inotify.h>
503 #else
504 /* Userspace can usually survive runtime without inotify */
505 #undef TARGET_NR_inotify_init
506 #undef TARGET_NR_inotify_init1
507 #undef TARGET_NR_inotify_add_watch
508 #undef TARGET_NR_inotify_rm_watch
509 #endif /* CONFIG_INOTIFY  */
510 
511 #if defined(TARGET_NR_prlimit64)
512 #ifndef __NR_prlimit64
513 # define __NR_prlimit64 -1
514 #endif
515 #define __NR_sys_prlimit64 __NR_prlimit64
516 /* The glibc rlimit structure may not be that used by the underlying syscall */
517 struct host_rlimit64 {
518     uint64_t rlim_cur;
519     uint64_t rlim_max;
520 };
521 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
522           const struct host_rlimit64 *, new_limit,
523           struct host_rlimit64 *, old_limit)
524 #endif
525 
526 
527 #if defined(TARGET_NR_timer_create)
528 /* Maximum of 32 active POSIX timers allowed at any one time. */
529 #define GUEST_TIMER_MAX 32
530 static timer_t g_posix_timers[GUEST_TIMER_MAX];
531 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
532 
533 static inline int next_free_host_timer(void)
534 {
535     int k;
536     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
537         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
538             return k;
539         }
540     }
541     return -1;
542 }
543 
544 static inline void free_host_timer_slot(int id)
545 {
546     qatomic_store_release(g_posix_timer_allocated + id, 0);
547 }
548 #endif
549 
550 static inline int host_to_target_errno(int host_errno)
551 {
552     switch (host_errno) {
553 #define E(X)  case X: return TARGET_##X;
554 #include "errnos.c.inc"
555 #undef E
556     default:
557         return host_errno;
558     }
559 }
560 
561 static inline int target_to_host_errno(int target_errno)
562 {
563     switch (target_errno) {
564 #define E(X)  case TARGET_##X: return X;
565 #include "errnos.c.inc"
566 #undef E
567     default:
568         return target_errno;
569     }
570 }
571 
572 abi_long get_errno(abi_long ret)
573 {
574     if (ret == -1)
575         return -host_to_target_errno(errno);
576     else
577         return ret;
578 }
579 
580 const char *target_strerror(int err)
581 {
582     if (err == QEMU_ERESTARTSYS) {
583         return "To be restarted";
584     }
585     if (err == QEMU_ESIGRETURN) {
586         return "Successful exit from sigreturn";
587     }
588 
589     return strerror(target_to_host_errno(err));
590 }
591 
592 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
593 {
594     int i;
595     uint8_t b;
596     if (usize <= ksize) {
597         return 1;
598     }
599     for (i = ksize; i < usize; i++) {
600         if (get_user_u8(b, addr + i)) {
601             return -TARGET_EFAULT;
602         }
603         if (b != 0) {
604             return 0;
605         }
606     }
607     return 1;
608 }
609 
610 /*
611  * Copies a target struct to a host struct, in a way that guarantees
612  * backwards-compatibility for struct syscall arguments.
613  *
614  * Similar to kernels uaccess.h:copy_struct_from_user()
615  */
616 int copy_struct_from_user(void *dst, size_t ksize, abi_ptr src, size_t usize)
617 {
618     size_t size = MIN(ksize, usize);
619     size_t rest = MAX(ksize, usize) - size;
620 
621     /* Deal with trailing bytes. */
622     if (usize < ksize) {
623         memset(dst + size, 0, rest);
624     } else if (usize > ksize) {
625         int ret = check_zeroed_user(src, ksize, usize);
626         if (ret <= 0) {
627             return ret ?: -TARGET_E2BIG;
628         }
629     }
630     /* Copy the interoperable parts of the struct. */
631     if (copy_from_user(dst, src, size)) {
632         return -TARGET_EFAULT;
633     }
634     return 0;
635 }
636 
637 #define safe_syscall0(type, name) \
638 static type safe_##name(void) \
639 { \
640     return safe_syscall(__NR_##name); \
641 }
642 
643 #define safe_syscall1(type, name, type1, arg1) \
644 static type safe_##name(type1 arg1) \
645 { \
646     return safe_syscall(__NR_##name, arg1); \
647 }
648 
649 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
650 static type safe_##name(type1 arg1, type2 arg2) \
651 { \
652     return safe_syscall(__NR_##name, arg1, arg2); \
653 }
654 
655 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
656 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
657 { \
658     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
659 }
660 
661 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
662     type4, arg4) \
663 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
664 { \
665     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
666 }
667 
668 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
669     type4, arg4, type5, arg5) \
670 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
671     type5 arg5) \
672 { \
673     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
674 }
675 
676 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
677     type4, arg4, type5, arg5, type6, arg6) \
678 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
679     type5 arg5, type6 arg6) \
680 { \
681     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
682 }
683 
684 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
685 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
686 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
687               int, flags, mode_t, mode)
688 
689 safe_syscall4(int, openat2, int, dirfd, const char *, pathname, \
690               const struct open_how_ver0 *, how, size_t, size)
691 
692 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
693 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
694               struct rusage *, rusage)
695 #endif
696 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
697               int, options, struct rusage *, rusage)
698 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
699 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
700               char **, argv, char **, envp, int, flags)
701 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
702     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
703 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
704               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
705 #endif
706 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
707 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
708               struct timespec *, tsp, const sigset_t *, sigmask,
709               size_t, sigsetsize)
710 #endif
711 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
712               int, maxevents, int, timeout, const sigset_t *, sigmask,
713               size_t, sigsetsize)
714 #if defined(__NR_futex)
715 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
716               const struct timespec *,timeout,int *,uaddr2,int,val3)
717 #endif
718 #if defined(__NR_futex_time64)
719 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
720               const struct timespec *,timeout,int *,uaddr2,int,val3)
721 #endif
722 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
723 safe_syscall2(int, kill, pid_t, pid, int, sig)
724 safe_syscall2(int, tkill, int, tid, int, sig)
725 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
726 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
727 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
728 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
729               unsigned long, pos_l, unsigned long, pos_h)
730 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
731               unsigned long, pos_l, unsigned long, pos_h)
732 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
733               socklen_t, addrlen)
734 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
735               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
736 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
737               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
738 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
739 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
740 safe_syscall2(int, flock, int, fd, int, operation)
741 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
742 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
743               const struct timespec *, uts, size_t, sigsetsize)
744 #endif
745 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
746               int, flags)
747 #if defined(TARGET_NR_nanosleep)
748 safe_syscall2(int, nanosleep, const struct timespec *, req,
749               struct timespec *, rem)
750 #endif
751 #if defined(TARGET_NR_clock_nanosleep) || \
752     defined(TARGET_NR_clock_nanosleep_time64)
753 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
754               const struct timespec *, req, struct timespec *, rem)
755 #endif
756 #ifdef __NR_ipc
757 #ifdef __s390x__
758 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
759               void *, ptr)
760 #else
761 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
762               void *, ptr, long, fifth)
763 #endif
764 #endif
765 #ifdef __NR_msgsnd
766 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
767               int, flags)
768 #endif
769 #ifdef __NR_msgrcv
770 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
771               long, msgtype, int, flags)
772 #endif
773 #ifdef __NR_semtimedop
774 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
775               unsigned, nsops, const struct timespec *, timeout)
776 #endif
777 #if defined(TARGET_NR_mq_timedsend) || \
778     defined(TARGET_NR_mq_timedsend_time64)
779 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
780               size_t, len, unsigned, prio, const struct timespec *, timeout)
781 #endif
782 #if defined(TARGET_NR_mq_timedreceive) || \
783     defined(TARGET_NR_mq_timedreceive_time64)
784 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
785               size_t, len, unsigned *, prio, const struct timespec *, timeout)
786 #endif
787 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
788 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
789               int, outfd, loff_t *, poutoff, size_t, length,
790               unsigned int, flags)
791 #endif
792 
793 /* We do ioctl like this rather than via safe_syscall3 to preserve the
794  * "third argument might be integer or pointer or not present" behaviour of
795  * the libc function.
796  */
797 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
798 /* Similarly for fcntl. Since we always build with LFS enabled,
799  * we should be using the 64-bit structures automatically.
800  */
801 #ifdef __NR_fcntl64
802 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
803 #else
804 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
805 #endif
806 
807 static inline int host_to_target_sock_type(int host_type)
808 {
809     int target_type;
810 
811     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
812     case SOCK_DGRAM:
813         target_type = TARGET_SOCK_DGRAM;
814         break;
815     case SOCK_STREAM:
816         target_type = TARGET_SOCK_STREAM;
817         break;
818     default:
819         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
820         break;
821     }
822 
823 #if defined(SOCK_CLOEXEC)
824     if (host_type & SOCK_CLOEXEC) {
825         target_type |= TARGET_SOCK_CLOEXEC;
826     }
827 #endif
828 
829 #if defined(SOCK_NONBLOCK)
830     if (host_type & SOCK_NONBLOCK) {
831         target_type |= TARGET_SOCK_NONBLOCK;
832     }
833 #endif
834 
835     return target_type;
836 }
837 
838 static abi_ulong target_brk, initial_target_brk;
839 
840 void target_set_brk(abi_ulong new_brk)
841 {
842     target_brk = TARGET_PAGE_ALIGN(new_brk);
843     initial_target_brk = target_brk;
844 }
845 
846 /* do_brk() must return target values and target errnos. */
847 abi_long do_brk(abi_ulong brk_val)
848 {
849     abi_long mapped_addr;
850     abi_ulong new_brk;
851     abi_ulong old_brk;
852 
853     /* brk pointers are always untagged */
854 
855     /* do not allow to shrink below initial brk value */
856     if (brk_val < initial_target_brk) {
857         return target_brk;
858     }
859 
860     new_brk = TARGET_PAGE_ALIGN(brk_val);
861     old_brk = TARGET_PAGE_ALIGN(target_brk);
862 
863     /* new and old target_brk might be on the same page */
864     if (new_brk == old_brk) {
865         target_brk = brk_val;
866         return target_brk;
867     }
868 
869     /* Release heap if necessary */
870     if (new_brk < old_brk) {
871         target_munmap(new_brk, old_brk - new_brk);
872 
873         target_brk = brk_val;
874         return target_brk;
875     }
876 
877     mapped_addr = target_mmap(old_brk, new_brk - old_brk,
878                               PROT_READ | PROT_WRITE,
879                               MAP_FIXED_NOREPLACE | MAP_ANON | MAP_PRIVATE,
880                               -1, 0);
881 
882     if (mapped_addr == old_brk) {
883         target_brk = brk_val;
884         return target_brk;
885     }
886 
887 #if defined(TARGET_ALPHA)
888     /* We (partially) emulate OSF/1 on Alpha, which requires we
889        return a proper errno, not an unchanged brk value.  */
890     return -TARGET_ENOMEM;
891 #endif
892     /* For everything else, return the previous break. */
893     return target_brk;
894 }
895 
896 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
897     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
898 static inline abi_long copy_from_user_fdset(fd_set *fds,
899                                             abi_ulong target_fds_addr,
900                                             int n)
901 {
902     int i, nw, j, k;
903     abi_ulong b, *target_fds;
904 
905     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
906     if (!(target_fds = lock_user(VERIFY_READ,
907                                  target_fds_addr,
908                                  sizeof(abi_ulong) * nw,
909                                  1)))
910         return -TARGET_EFAULT;
911 
912     FD_ZERO(fds);
913     k = 0;
914     for (i = 0; i < nw; i++) {
915         /* grab the abi_ulong */
916         __get_user(b, &target_fds[i]);
917         for (j = 0; j < TARGET_ABI_BITS; j++) {
918             /* check the bit inside the abi_ulong */
919             if ((b >> j) & 1)
920                 FD_SET(k, fds);
921             k++;
922         }
923     }
924 
925     unlock_user(target_fds, target_fds_addr, 0);
926 
927     return 0;
928 }
929 
930 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
931                                                  abi_ulong target_fds_addr,
932                                                  int n)
933 {
934     if (target_fds_addr) {
935         if (copy_from_user_fdset(fds, target_fds_addr, n))
936             return -TARGET_EFAULT;
937         *fds_ptr = fds;
938     } else {
939         *fds_ptr = NULL;
940     }
941     return 0;
942 }
943 
944 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
945                                           const fd_set *fds,
946                                           int n)
947 {
948     int i, nw, j, k;
949     abi_long v;
950     abi_ulong *target_fds;
951 
952     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
953     if (!(target_fds = lock_user(VERIFY_WRITE,
954                                  target_fds_addr,
955                                  sizeof(abi_ulong) * nw,
956                                  0)))
957         return -TARGET_EFAULT;
958 
959     k = 0;
960     for (i = 0; i < nw; i++) {
961         v = 0;
962         for (j = 0; j < TARGET_ABI_BITS; j++) {
963             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
964             k++;
965         }
966         __put_user(v, &target_fds[i]);
967     }
968 
969     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
970 
971     return 0;
972 }
973 #endif
974 
975 #if defined(__alpha__)
976 #define HOST_HZ 1024
977 #else
978 #define HOST_HZ 100
979 #endif
980 
981 static inline abi_long host_to_target_clock_t(long ticks)
982 {
983 #if HOST_HZ == TARGET_HZ
984     return ticks;
985 #else
986     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
987 #endif
988 }
989 
990 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
991                                              const struct rusage *rusage)
992 {
993     struct target_rusage *target_rusage;
994 
995     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
996         return -TARGET_EFAULT;
997     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
998     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
999     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1000     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1001     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1002     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1003     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1004     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1005     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1006     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1007     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1008     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1009     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1010     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1011     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1012     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1013     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1014     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1015     unlock_user_struct(target_rusage, target_addr, 1);
1016 
1017     return 0;
1018 }
1019 
1020 #ifdef TARGET_NR_setrlimit
1021 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1022 {
1023     abi_ulong target_rlim_swap;
1024     rlim_t result;
1025 
1026     target_rlim_swap = tswapal(target_rlim);
1027     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1028         return RLIM_INFINITY;
1029 
1030     result = target_rlim_swap;
1031     if (target_rlim_swap != (rlim_t)result)
1032         return RLIM_INFINITY;
1033 
1034     return result;
1035 }
1036 #endif
1037 
1038 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1039 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1040 {
1041     abi_ulong target_rlim_swap;
1042     abi_ulong result;
1043 
1044     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1045         target_rlim_swap = TARGET_RLIM_INFINITY;
1046     else
1047         target_rlim_swap = rlim;
1048     result = tswapal(target_rlim_swap);
1049 
1050     return result;
1051 }
1052 #endif
1053 
1054 static inline int target_to_host_resource(int code)
1055 {
1056     switch (code) {
1057     case TARGET_RLIMIT_AS:
1058         return RLIMIT_AS;
1059     case TARGET_RLIMIT_CORE:
1060         return RLIMIT_CORE;
1061     case TARGET_RLIMIT_CPU:
1062         return RLIMIT_CPU;
1063     case TARGET_RLIMIT_DATA:
1064         return RLIMIT_DATA;
1065     case TARGET_RLIMIT_FSIZE:
1066         return RLIMIT_FSIZE;
1067     case TARGET_RLIMIT_LOCKS:
1068         return RLIMIT_LOCKS;
1069     case TARGET_RLIMIT_MEMLOCK:
1070         return RLIMIT_MEMLOCK;
1071     case TARGET_RLIMIT_MSGQUEUE:
1072         return RLIMIT_MSGQUEUE;
1073     case TARGET_RLIMIT_NICE:
1074         return RLIMIT_NICE;
1075     case TARGET_RLIMIT_NOFILE:
1076         return RLIMIT_NOFILE;
1077     case TARGET_RLIMIT_NPROC:
1078         return RLIMIT_NPROC;
1079     case TARGET_RLIMIT_RSS:
1080         return RLIMIT_RSS;
1081     case TARGET_RLIMIT_RTPRIO:
1082         return RLIMIT_RTPRIO;
1083 #ifdef RLIMIT_RTTIME
1084     case TARGET_RLIMIT_RTTIME:
1085         return RLIMIT_RTTIME;
1086 #endif
1087     case TARGET_RLIMIT_SIGPENDING:
1088         return RLIMIT_SIGPENDING;
1089     case TARGET_RLIMIT_STACK:
1090         return RLIMIT_STACK;
1091     default:
1092         return code;
1093     }
1094 }
1095 
1096 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1097                                               abi_ulong target_tv_addr)
1098 {
1099     struct target_timeval *target_tv;
1100 
1101     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1102         return -TARGET_EFAULT;
1103     }
1104 
1105     __get_user(tv->tv_sec, &target_tv->tv_sec);
1106     __get_user(tv->tv_usec, &target_tv->tv_usec);
1107 
1108     unlock_user_struct(target_tv, target_tv_addr, 0);
1109 
1110     return 0;
1111 }
1112 
1113 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1114                                             const struct timeval *tv)
1115 {
1116     struct target_timeval *target_tv;
1117 
1118     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1119         return -TARGET_EFAULT;
1120     }
1121 
1122     __put_user(tv->tv_sec, &target_tv->tv_sec);
1123     __put_user(tv->tv_usec, &target_tv->tv_usec);
1124 
1125     unlock_user_struct(target_tv, target_tv_addr, 1);
1126 
1127     return 0;
1128 }
1129 
1130 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1131 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1132                                                 abi_ulong target_tv_addr)
1133 {
1134     struct target__kernel_sock_timeval *target_tv;
1135 
1136     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1137         return -TARGET_EFAULT;
1138     }
1139 
1140     __get_user(tv->tv_sec, &target_tv->tv_sec);
1141     __get_user(tv->tv_usec, &target_tv->tv_usec);
1142 
1143     unlock_user_struct(target_tv, target_tv_addr, 0);
1144 
1145     return 0;
1146 }
1147 #endif
1148 
1149 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1150                                               const struct timeval *tv)
1151 {
1152     struct target__kernel_sock_timeval *target_tv;
1153 
1154     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1155         return -TARGET_EFAULT;
1156     }
1157 
1158     __put_user(tv->tv_sec, &target_tv->tv_sec);
1159     __put_user(tv->tv_usec, &target_tv->tv_usec);
1160 
1161     unlock_user_struct(target_tv, target_tv_addr, 1);
1162 
1163     return 0;
1164 }
1165 
1166 #if defined(TARGET_NR_futex) || \
1167     defined(TARGET_NR_rt_sigtimedwait) || \
1168     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1169     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1170     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1171     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1172     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1173     defined(TARGET_NR_timer_settime) || \
1174     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1175 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1176                                                abi_ulong target_addr)
1177 {
1178     struct target_timespec *target_ts;
1179 
1180     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1181         return -TARGET_EFAULT;
1182     }
1183     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1184     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1185     unlock_user_struct(target_ts, target_addr, 0);
1186     return 0;
1187 }
1188 #endif
1189 
1190 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1191     defined(TARGET_NR_timer_settime64) || \
1192     defined(TARGET_NR_mq_timedsend_time64) || \
1193     defined(TARGET_NR_mq_timedreceive_time64) || \
1194     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1195     defined(TARGET_NR_clock_nanosleep_time64) || \
1196     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1197     defined(TARGET_NR_utimensat) || \
1198     defined(TARGET_NR_utimensat_time64) || \
1199     defined(TARGET_NR_semtimedop_time64) || \
1200     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1201 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1202                                                  abi_ulong target_addr)
1203 {
1204     struct target__kernel_timespec *target_ts;
1205 
1206     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1207         return -TARGET_EFAULT;
1208     }
1209     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1210     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1211     /* in 32bit mode, this drops the padding */
1212     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1213     unlock_user_struct(target_ts, target_addr, 0);
1214     return 0;
1215 }
1216 #endif
1217 
1218 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1219                                                struct timespec *host_ts)
1220 {
1221     struct target_timespec *target_ts;
1222 
1223     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1224         return -TARGET_EFAULT;
1225     }
1226     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1227     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1228     unlock_user_struct(target_ts, target_addr, 1);
1229     return 0;
1230 }
1231 
1232 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1233                                                  struct timespec *host_ts)
1234 {
1235     struct target__kernel_timespec *target_ts;
1236 
1237     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1238         return -TARGET_EFAULT;
1239     }
1240     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1241     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1242     unlock_user_struct(target_ts, target_addr, 1);
1243     return 0;
1244 }
1245 
1246 #if defined(TARGET_NR_gettimeofday)
1247 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1248                                              struct timezone *tz)
1249 {
1250     struct target_timezone *target_tz;
1251 
1252     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1253         return -TARGET_EFAULT;
1254     }
1255 
1256     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1257     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1258 
1259     unlock_user_struct(target_tz, target_tz_addr, 1);
1260 
1261     return 0;
1262 }
1263 #endif
1264 
1265 #if defined(TARGET_NR_settimeofday)
1266 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1267                                                abi_ulong target_tz_addr)
1268 {
1269     struct target_timezone *target_tz;
1270 
1271     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1272         return -TARGET_EFAULT;
1273     }
1274 
1275     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1276     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1277 
1278     unlock_user_struct(target_tz, target_tz_addr, 0);
1279 
1280     return 0;
1281 }
1282 #endif
1283 
1284 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1285 #include <mqueue.h>
1286 
1287 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1288                                               abi_ulong target_mq_attr_addr)
1289 {
1290     struct target_mq_attr *target_mq_attr;
1291 
1292     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1293                           target_mq_attr_addr, 1))
1294         return -TARGET_EFAULT;
1295 
1296     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1297     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1298     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1299     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1300 
1301     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1302 
1303     return 0;
1304 }
1305 
1306 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1307                                             const struct mq_attr *attr)
1308 {
1309     struct target_mq_attr *target_mq_attr;
1310 
1311     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1312                           target_mq_attr_addr, 0))
1313         return -TARGET_EFAULT;
1314 
1315     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1316     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1317     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1318     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1319 
1320     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1321 
1322     return 0;
1323 }
1324 #endif
1325 
1326 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1327 /* do_select() must return target values and target errnos. */
1328 static abi_long do_select(int n,
1329                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1330                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1331 {
1332     fd_set rfds, wfds, efds;
1333     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1334     struct timeval tv;
1335     struct timespec ts, *ts_ptr;
1336     abi_long ret;
1337 
1338     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1339     if (ret) {
1340         return ret;
1341     }
1342     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1343     if (ret) {
1344         return ret;
1345     }
1346     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1347     if (ret) {
1348         return ret;
1349     }
1350 
1351     if (target_tv_addr) {
1352         if (copy_from_user_timeval(&tv, target_tv_addr))
1353             return -TARGET_EFAULT;
1354         ts.tv_sec = tv.tv_sec;
1355         ts.tv_nsec = tv.tv_usec * 1000;
1356         ts_ptr = &ts;
1357     } else {
1358         ts_ptr = NULL;
1359     }
1360 
1361     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1362                                   ts_ptr, NULL));
1363 
1364     if (!is_error(ret)) {
1365         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1366             return -TARGET_EFAULT;
1367         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1368             return -TARGET_EFAULT;
1369         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1370             return -TARGET_EFAULT;
1371 
1372         if (target_tv_addr) {
1373             tv.tv_sec = ts.tv_sec;
1374             tv.tv_usec = ts.tv_nsec / 1000;
1375             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1376                 return -TARGET_EFAULT;
1377             }
1378         }
1379     }
1380 
1381     return ret;
1382 }
1383 
1384 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1385 static abi_long do_old_select(abi_ulong arg1)
1386 {
1387     struct target_sel_arg_struct *sel;
1388     abi_ulong inp, outp, exp, tvp;
1389     long nsel;
1390 
1391     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1392         return -TARGET_EFAULT;
1393     }
1394 
1395     nsel = tswapal(sel->n);
1396     inp = tswapal(sel->inp);
1397     outp = tswapal(sel->outp);
1398     exp = tswapal(sel->exp);
1399     tvp = tswapal(sel->tvp);
1400 
1401     unlock_user_struct(sel, arg1, 0);
1402 
1403     return do_select(nsel, inp, outp, exp, tvp);
1404 }
1405 #endif
1406 #endif
1407 
1408 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1409 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1410                             abi_long arg4, abi_long arg5, abi_long arg6,
1411                             bool time64)
1412 {
1413     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1414     fd_set rfds, wfds, efds;
1415     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1416     struct timespec ts, *ts_ptr;
1417     abi_long ret;
1418 
1419     /*
1420      * The 6th arg is actually two args smashed together,
1421      * so we cannot use the C library.
1422      */
1423     struct {
1424         sigset_t *set;
1425         size_t size;
1426     } sig, *sig_ptr;
1427 
1428     abi_ulong arg_sigset, arg_sigsize, *arg7;
1429 
1430     n = arg1;
1431     rfd_addr = arg2;
1432     wfd_addr = arg3;
1433     efd_addr = arg4;
1434     ts_addr = arg5;
1435 
1436     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1437     if (ret) {
1438         return ret;
1439     }
1440     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1441     if (ret) {
1442         return ret;
1443     }
1444     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1445     if (ret) {
1446         return ret;
1447     }
1448 
1449     /*
1450      * This takes a timespec, and not a timeval, so we cannot
1451      * use the do_select() helper ...
1452      */
1453     if (ts_addr) {
1454         if (time64) {
1455             if (target_to_host_timespec64(&ts, ts_addr)) {
1456                 return -TARGET_EFAULT;
1457             }
1458         } else {
1459             if (target_to_host_timespec(&ts, ts_addr)) {
1460                 return -TARGET_EFAULT;
1461             }
1462         }
1463             ts_ptr = &ts;
1464     } else {
1465         ts_ptr = NULL;
1466     }
1467 
1468     /* Extract the two packed args for the sigset */
1469     sig_ptr = NULL;
1470     if (arg6) {
1471         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1472         if (!arg7) {
1473             return -TARGET_EFAULT;
1474         }
1475         arg_sigset = tswapal(arg7[0]);
1476         arg_sigsize = tswapal(arg7[1]);
1477         unlock_user(arg7, arg6, 0);
1478 
1479         if (arg_sigset) {
1480             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1481             if (ret != 0) {
1482                 return ret;
1483             }
1484             sig_ptr = &sig;
1485             sig.size = SIGSET_T_SIZE;
1486         }
1487     }
1488 
1489     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1490                                   ts_ptr, sig_ptr));
1491 
1492     if (sig_ptr) {
1493         finish_sigsuspend_mask(ret);
1494     }
1495 
1496     if (!is_error(ret)) {
1497         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1498             return -TARGET_EFAULT;
1499         }
1500         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1501             return -TARGET_EFAULT;
1502         }
1503         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1504             return -TARGET_EFAULT;
1505         }
1506         if (time64) {
1507             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1508                 return -TARGET_EFAULT;
1509             }
1510         } else {
1511             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1512                 return -TARGET_EFAULT;
1513             }
1514         }
1515     }
1516     return ret;
1517 }
1518 #endif
1519 
1520 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1521     defined(TARGET_NR_ppoll_time64)
1522 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1523                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1524 {
1525     struct target_pollfd *target_pfd;
1526     unsigned int nfds = arg2;
1527     struct pollfd *pfd;
1528     unsigned int i;
1529     abi_long ret;
1530 
1531     pfd = NULL;
1532     target_pfd = NULL;
1533     if (nfds) {
1534         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1535             return -TARGET_EINVAL;
1536         }
1537         target_pfd = lock_user(VERIFY_WRITE, arg1,
1538                                sizeof(struct target_pollfd) * nfds, 1);
1539         if (!target_pfd) {
1540             return -TARGET_EFAULT;
1541         }
1542 
1543         pfd = alloca(sizeof(struct pollfd) * nfds);
1544         for (i = 0; i < nfds; i++) {
1545             pfd[i].fd = tswap32(target_pfd[i].fd);
1546             pfd[i].events = tswap16(target_pfd[i].events);
1547         }
1548     }
1549     if (ppoll) {
1550         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1551         sigset_t *set = NULL;
1552 
1553         if (arg3) {
1554             if (time64) {
1555                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1556                     unlock_user(target_pfd, arg1, 0);
1557                     return -TARGET_EFAULT;
1558                 }
1559             } else {
1560                 if (target_to_host_timespec(timeout_ts, arg3)) {
1561                     unlock_user(target_pfd, arg1, 0);
1562                     return -TARGET_EFAULT;
1563                 }
1564             }
1565         } else {
1566             timeout_ts = NULL;
1567         }
1568 
1569         if (arg4) {
1570             ret = process_sigsuspend_mask(&set, arg4, arg5);
1571             if (ret != 0) {
1572                 unlock_user(target_pfd, arg1, 0);
1573                 return ret;
1574             }
1575         }
1576 
1577         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1578                                    set, SIGSET_T_SIZE));
1579 
1580         if (set) {
1581             finish_sigsuspend_mask(ret);
1582         }
1583         if (!is_error(ret) && arg3) {
1584             if (time64) {
1585                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1586                     return -TARGET_EFAULT;
1587                 }
1588             } else {
1589                 if (host_to_target_timespec(arg3, timeout_ts)) {
1590                     return -TARGET_EFAULT;
1591                 }
1592             }
1593         }
1594     } else {
1595           struct timespec ts, *pts;
1596 
1597           if (arg3 >= 0) {
1598               /* Convert ms to secs, ns */
1599               ts.tv_sec = arg3 / 1000;
1600               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1601               pts = &ts;
1602           } else {
1603               /* -ve poll() timeout means "infinite" */
1604               pts = NULL;
1605           }
1606           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1607     }
1608 
1609     if (!is_error(ret)) {
1610         for (i = 0; i < nfds; i++) {
1611             target_pfd[i].revents = tswap16(pfd[i].revents);
1612         }
1613     }
1614     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1615     return ret;
1616 }
1617 #endif
1618 
1619 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1620                         int flags, int is_pipe2)
1621 {
1622     int host_pipe[2];
1623     abi_long ret;
1624     ret = pipe2(host_pipe, flags);
1625 
1626     if (is_error(ret))
1627         return get_errno(ret);
1628 
1629     /* Several targets have special calling conventions for the original
1630        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1631     if (!is_pipe2) {
1632 #if defined(TARGET_ALPHA)
1633         cpu_env->ir[IR_A4] = host_pipe[1];
1634         return host_pipe[0];
1635 #elif defined(TARGET_MIPS)
1636         cpu_env->active_tc.gpr[3] = host_pipe[1];
1637         return host_pipe[0];
1638 #elif defined(TARGET_SH4)
1639         cpu_env->gregs[1] = host_pipe[1];
1640         return host_pipe[0];
1641 #elif defined(TARGET_SPARC)
1642         cpu_env->regwptr[1] = host_pipe[1];
1643         return host_pipe[0];
1644 #endif
1645     }
1646 
1647     if (put_user_s32(host_pipe[0], pipedes)
1648         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1649         return -TARGET_EFAULT;
1650     return get_errno(ret);
1651 }
1652 
1653 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1654                                                abi_ulong target_addr,
1655                                                socklen_t len)
1656 {
1657     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1658     sa_family_t sa_family;
1659     struct target_sockaddr *target_saddr;
1660 
1661     if (fd_trans_target_to_host_addr(fd)) {
1662         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1663     }
1664 
1665     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1666     if (!target_saddr)
1667         return -TARGET_EFAULT;
1668 
1669     sa_family = tswap16(target_saddr->sa_family);
1670 
1671     /* Oops. The caller might send a incomplete sun_path; sun_path
1672      * must be terminated by \0 (see the manual page), but
1673      * unfortunately it is quite common to specify sockaddr_un
1674      * length as "strlen(x->sun_path)" while it should be
1675      * "strlen(...) + 1". We'll fix that here if needed.
1676      * Linux kernel has a similar feature.
1677      */
1678 
1679     if (sa_family == AF_UNIX) {
1680         if (len < unix_maxlen && len > 0) {
1681             char *cp = (char*)target_saddr;
1682 
1683             if ( cp[len-1] && !cp[len] )
1684                 len++;
1685         }
1686         if (len > unix_maxlen)
1687             len = unix_maxlen;
1688     }
1689 
1690     memcpy(addr, target_saddr, len);
1691     addr->sa_family = sa_family;
1692     if (sa_family == AF_NETLINK) {
1693         struct sockaddr_nl *nladdr;
1694 
1695         nladdr = (struct sockaddr_nl *)addr;
1696         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1697         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1698     } else if (sa_family == AF_PACKET) {
1699 	struct target_sockaddr_ll *lladdr;
1700 
1701 	lladdr = (struct target_sockaddr_ll *)addr;
1702 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1703 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1704     } else if (sa_family == AF_INET6) {
1705         struct sockaddr_in6 *in6addr;
1706 
1707         in6addr = (struct sockaddr_in6 *)addr;
1708         in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
1709     }
1710     unlock_user(target_saddr, target_addr, 0);
1711 
1712     return 0;
1713 }
1714 
1715 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1716                                                struct sockaddr *addr,
1717                                                socklen_t len)
1718 {
1719     struct target_sockaddr *target_saddr;
1720 
1721     if (len == 0) {
1722         return 0;
1723     }
1724     assert(addr);
1725 
1726     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1727     if (!target_saddr)
1728         return -TARGET_EFAULT;
1729     memcpy(target_saddr, addr, len);
1730     if (len >= offsetof(struct target_sockaddr, sa_family) +
1731         sizeof(target_saddr->sa_family)) {
1732         target_saddr->sa_family = tswap16(addr->sa_family);
1733     }
1734     if (addr->sa_family == AF_NETLINK &&
1735         len >= sizeof(struct target_sockaddr_nl)) {
1736         struct target_sockaddr_nl *target_nl =
1737                (struct target_sockaddr_nl *)target_saddr;
1738         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1739         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1740     } else if (addr->sa_family == AF_PACKET) {
1741         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1742         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1743         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1744     } else if (addr->sa_family == AF_INET6 &&
1745                len >= sizeof(struct target_sockaddr_in6)) {
1746         struct target_sockaddr_in6 *target_in6 =
1747                (struct target_sockaddr_in6 *)target_saddr;
1748         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1749     }
1750     unlock_user(target_saddr, target_addr, len);
1751 
1752     return 0;
1753 }
1754 
1755 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1756                                            struct target_msghdr *target_msgh)
1757 {
1758     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1759     abi_long msg_controllen;
1760     abi_ulong target_cmsg_addr;
1761     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1762     socklen_t space = 0;
1763 
1764     msg_controllen = tswapal(target_msgh->msg_controllen);
1765     if (msg_controllen < sizeof (struct target_cmsghdr))
1766         goto the_end;
1767     target_cmsg_addr = tswapal(target_msgh->msg_control);
1768     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1769     target_cmsg_start = target_cmsg;
1770     if (!target_cmsg)
1771         return -TARGET_EFAULT;
1772 
1773     while (cmsg && target_cmsg) {
1774         void *data = CMSG_DATA(cmsg);
1775         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1776 
1777         int len = tswapal(target_cmsg->cmsg_len)
1778             - sizeof(struct target_cmsghdr);
1779 
1780         space += CMSG_SPACE(len);
1781         if (space > msgh->msg_controllen) {
1782             space -= CMSG_SPACE(len);
1783             /* This is a QEMU bug, since we allocated the payload
1784              * area ourselves (unlike overflow in host-to-target
1785              * conversion, which is just the guest giving us a buffer
1786              * that's too small). It can't happen for the payload types
1787              * we currently support; if it becomes an issue in future
1788              * we would need to improve our allocation strategy to
1789              * something more intelligent than "twice the size of the
1790              * target buffer we're reading from".
1791              */
1792             qemu_log_mask(LOG_UNIMP,
1793                           ("Unsupported ancillary data %d/%d: "
1794                            "unhandled msg size\n"),
1795                           tswap32(target_cmsg->cmsg_level),
1796                           tswap32(target_cmsg->cmsg_type));
1797             break;
1798         }
1799 
1800         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1801             cmsg->cmsg_level = SOL_SOCKET;
1802         } else {
1803             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1804         }
1805         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1806         cmsg->cmsg_len = CMSG_LEN(len);
1807 
1808         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1809             int *fd = (int *)data;
1810             int *target_fd = (int *)target_data;
1811             int i, numfds = len / sizeof(int);
1812 
1813             for (i = 0; i < numfds; i++) {
1814                 __get_user(fd[i], target_fd + i);
1815             }
1816         } else if (cmsg->cmsg_level == SOL_SOCKET
1817                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1818             struct ucred *cred = (struct ucred *)data;
1819             struct target_ucred *target_cred =
1820                 (struct target_ucred *)target_data;
1821 
1822             __get_user(cred->pid, &target_cred->pid);
1823             __get_user(cred->uid, &target_cred->uid);
1824             __get_user(cred->gid, &target_cred->gid);
1825         } else if (cmsg->cmsg_level == SOL_ALG) {
1826             uint32_t *dst = (uint32_t *)data;
1827 
1828             memcpy(dst, target_data, len);
1829             /* fix endianness of first 32-bit word */
1830             if (len >= sizeof(uint32_t)) {
1831                 *dst = tswap32(*dst);
1832             }
1833         } else {
1834             qemu_log_mask(LOG_UNIMP, "Unsupported target ancillary data: %d/%d\n",
1835                           cmsg->cmsg_level, cmsg->cmsg_type);
1836             memcpy(data, target_data, len);
1837         }
1838 
1839         cmsg = CMSG_NXTHDR(msgh, cmsg);
1840         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1841                                          target_cmsg_start);
1842     }
1843     unlock_user(target_cmsg, target_cmsg_addr, 0);
1844  the_end:
1845     msgh->msg_controllen = space;
1846     return 0;
1847 }
1848 
1849 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1850                                            struct msghdr *msgh)
1851 {
1852     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1853     abi_long msg_controllen;
1854     abi_ulong target_cmsg_addr;
1855     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1856     socklen_t space = 0;
1857 
1858     msg_controllen = tswapal(target_msgh->msg_controllen);
1859     if (msg_controllen < sizeof (struct target_cmsghdr))
1860         goto the_end;
1861     target_cmsg_addr = tswapal(target_msgh->msg_control);
1862     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1863     target_cmsg_start = target_cmsg;
1864     if (!target_cmsg)
1865         return -TARGET_EFAULT;
1866 
1867     while (cmsg && target_cmsg) {
1868         void *data = CMSG_DATA(cmsg);
1869         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1870 
1871         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1872         int tgt_len, tgt_space;
1873 
1874         /* We never copy a half-header but may copy half-data;
1875          * this is Linux's behaviour in put_cmsg(). Note that
1876          * truncation here is a guest problem (which we report
1877          * to the guest via the CTRUNC bit), unlike truncation
1878          * in target_to_host_cmsg, which is a QEMU bug.
1879          */
1880         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1881             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1882             break;
1883         }
1884 
1885         if (cmsg->cmsg_level == SOL_SOCKET) {
1886             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1887         } else {
1888             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1889         }
1890         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1891 
1892         /* Payload types which need a different size of payload on
1893          * the target must adjust tgt_len here.
1894          */
1895         tgt_len = len;
1896         switch (cmsg->cmsg_level) {
1897         case SOL_SOCKET:
1898             switch (cmsg->cmsg_type) {
1899             case SO_TIMESTAMP:
1900                 tgt_len = sizeof(struct target_timeval);
1901                 break;
1902             default:
1903                 break;
1904             }
1905             break;
1906         default:
1907             break;
1908         }
1909 
1910         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1911             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1912             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1913         }
1914 
1915         /* We must now copy-and-convert len bytes of payload
1916          * into tgt_len bytes of destination space. Bear in mind
1917          * that in both source and destination we may be dealing
1918          * with a truncated value!
1919          */
1920         switch (cmsg->cmsg_level) {
1921         case SOL_SOCKET:
1922             switch (cmsg->cmsg_type) {
1923             case SCM_RIGHTS:
1924             {
1925                 int *fd = (int *)data;
1926                 int *target_fd = (int *)target_data;
1927                 int i, numfds = tgt_len / sizeof(int);
1928 
1929                 for (i = 0; i < numfds; i++) {
1930                     __put_user(fd[i], target_fd + i);
1931                 }
1932                 break;
1933             }
1934             case SO_TIMESTAMP:
1935             {
1936                 struct timeval *tv = (struct timeval *)data;
1937                 struct target_timeval *target_tv =
1938                     (struct target_timeval *)target_data;
1939 
1940                 if (len != sizeof(struct timeval) ||
1941                     tgt_len != sizeof(struct target_timeval)) {
1942                     goto unimplemented;
1943                 }
1944 
1945                 /* copy struct timeval to target */
1946                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1947                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1948                 break;
1949             }
1950             case SCM_CREDENTIALS:
1951             {
1952                 struct ucred *cred = (struct ucred *)data;
1953                 struct target_ucred *target_cred =
1954                     (struct target_ucred *)target_data;
1955 
1956                 __put_user(cred->pid, &target_cred->pid);
1957                 __put_user(cred->uid, &target_cred->uid);
1958                 __put_user(cred->gid, &target_cred->gid);
1959                 break;
1960             }
1961             default:
1962                 goto unimplemented;
1963             }
1964             break;
1965 
1966         case SOL_IP:
1967             switch (cmsg->cmsg_type) {
1968             case IP_TTL:
1969             {
1970                 uint32_t *v = (uint32_t *)data;
1971                 uint32_t *t_int = (uint32_t *)target_data;
1972 
1973                 if (len != sizeof(uint32_t) ||
1974                     tgt_len != sizeof(uint32_t)) {
1975                     goto unimplemented;
1976                 }
1977                 __put_user(*v, t_int);
1978                 break;
1979             }
1980             case IP_RECVERR:
1981             {
1982                 struct errhdr_t {
1983                    struct sock_extended_err ee;
1984                    struct sockaddr_in offender;
1985                 };
1986                 struct errhdr_t *errh = (struct errhdr_t *)data;
1987                 struct errhdr_t *target_errh =
1988                     (struct errhdr_t *)target_data;
1989 
1990                 if (len != sizeof(struct errhdr_t) ||
1991                     tgt_len != sizeof(struct errhdr_t)) {
1992                     goto unimplemented;
1993                 }
1994                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1995                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1996                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1997                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1998                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1999                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2000                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2001                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2002                     (void *) &errh->offender, sizeof(errh->offender));
2003                 break;
2004             }
2005             case IP_PKTINFO:
2006             {
2007                 struct in_pktinfo *pkti = data;
2008                 struct target_in_pktinfo *target_pi = target_data;
2009 
2010                 __put_user(pkti->ipi_ifindex, &target_pi->ipi_ifindex);
2011                 target_pi->ipi_spec_dst.s_addr = pkti->ipi_spec_dst.s_addr;
2012                 target_pi->ipi_addr.s_addr = pkti->ipi_addr.s_addr;
2013                 break;
2014             }
2015             default:
2016                 goto unimplemented;
2017             }
2018             break;
2019 
2020         case SOL_IPV6:
2021             switch (cmsg->cmsg_type) {
2022             case IPV6_HOPLIMIT:
2023             {
2024                 uint32_t *v = (uint32_t *)data;
2025                 uint32_t *t_int = (uint32_t *)target_data;
2026 
2027                 if (len != sizeof(uint32_t) ||
2028                     tgt_len != sizeof(uint32_t)) {
2029                     goto unimplemented;
2030                 }
2031                 __put_user(*v, t_int);
2032                 break;
2033             }
2034             case IPV6_RECVERR:
2035             {
2036                 struct errhdr6_t {
2037                    struct sock_extended_err ee;
2038                    struct sockaddr_in6 offender;
2039                 };
2040                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2041                 struct errhdr6_t *target_errh =
2042                     (struct errhdr6_t *)target_data;
2043 
2044                 if (len != sizeof(struct errhdr6_t) ||
2045                     tgt_len != sizeof(struct errhdr6_t)) {
2046                     goto unimplemented;
2047                 }
2048                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2049                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2050                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2051                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2052                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2053                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2054                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2055                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2056                     (void *) &errh->offender, sizeof(errh->offender));
2057                 break;
2058             }
2059             default:
2060                 goto unimplemented;
2061             }
2062             break;
2063 
2064         default:
2065         unimplemented:
2066             qemu_log_mask(LOG_UNIMP, "Unsupported host ancillary data: %d/%d\n",
2067                           cmsg->cmsg_level, cmsg->cmsg_type);
2068             memcpy(target_data, data, MIN(len, tgt_len));
2069             if (tgt_len > len) {
2070                 memset(target_data + len, 0, tgt_len - len);
2071             }
2072         }
2073 
2074         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2075         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2076         if (msg_controllen < tgt_space) {
2077             tgt_space = msg_controllen;
2078         }
2079         msg_controllen -= tgt_space;
2080         space += tgt_space;
2081         cmsg = CMSG_NXTHDR(msgh, cmsg);
2082         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2083                                          target_cmsg_start);
2084     }
2085     unlock_user(target_cmsg, target_cmsg_addr, space);
2086  the_end:
2087     target_msgh->msg_controllen = tswapal(space);
2088     return 0;
2089 }
2090 
2091 /* do_setsockopt() Must return target values and target errnos. */
2092 static abi_long do_setsockopt(int sockfd, int level, int optname,
2093                               abi_ulong optval_addr, socklen_t optlen)
2094 {
2095     abi_long ret;
2096     int val;
2097 
2098     switch(level) {
2099     case SOL_TCP:
2100     case SOL_UDP:
2101         /* TCP and UDP options all take an 'int' value.  */
2102         if (optlen < sizeof(uint32_t))
2103             return -TARGET_EINVAL;
2104 
2105         if (get_user_u32(val, optval_addr))
2106             return -TARGET_EFAULT;
2107         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2108         break;
2109     case SOL_IP:
2110         switch(optname) {
2111         case IP_TOS:
2112         case IP_TTL:
2113         case IP_HDRINCL:
2114         case IP_ROUTER_ALERT:
2115         case IP_RECVOPTS:
2116         case IP_RETOPTS:
2117         case IP_PKTINFO:
2118         case IP_MTU_DISCOVER:
2119         case IP_RECVERR:
2120         case IP_RECVTTL:
2121         case IP_RECVTOS:
2122 #ifdef IP_FREEBIND
2123         case IP_FREEBIND:
2124 #endif
2125         case IP_MULTICAST_TTL:
2126         case IP_MULTICAST_LOOP:
2127             val = 0;
2128             if (optlen >= sizeof(uint32_t)) {
2129                 if (get_user_u32(val, optval_addr))
2130                     return -TARGET_EFAULT;
2131             } else if (optlen >= 1) {
2132                 if (get_user_u8(val, optval_addr))
2133                     return -TARGET_EFAULT;
2134             }
2135             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2136             break;
2137         case IP_MULTICAST_IF:
2138         case IP_ADD_MEMBERSHIP:
2139         case IP_DROP_MEMBERSHIP:
2140         {
2141             struct ip_mreqn ip_mreq;
2142             struct target_ip_mreqn *target_smreqn;
2143             int min_size;
2144 
2145             QEMU_BUILD_BUG_ON(sizeof(struct ip_mreq) !=
2146                               sizeof(struct target_ip_mreq));
2147 
2148             if (optname == IP_MULTICAST_IF) {
2149                 min_size = sizeof(struct in_addr);
2150             } else {
2151                 min_size = sizeof(struct target_ip_mreq);
2152             }
2153             if (optlen < min_size ||
2154                 optlen > sizeof (struct target_ip_mreqn)) {
2155                 return -TARGET_EINVAL;
2156             }
2157 
2158             target_smreqn = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2159             if (!target_smreqn) {
2160                 return -TARGET_EFAULT;
2161             }
2162             ip_mreq.imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
2163             if (optlen >= sizeof(struct target_ip_mreq)) {
2164                 ip_mreq.imr_address.s_addr = target_smreqn->imr_address.s_addr;
2165                 if (optlen >= sizeof(struct target_ip_mreqn)) {
2166                     __put_user(target_smreqn->imr_ifindex, &ip_mreq.imr_ifindex);
2167                     optlen = sizeof(struct ip_mreqn);
2168                 }
2169             }
2170             unlock_user(target_smreqn, optval_addr, 0);
2171             ret = get_errno(setsockopt(sockfd, level, optname, &ip_mreq, optlen));
2172             break;
2173         }
2174         case IP_BLOCK_SOURCE:
2175         case IP_UNBLOCK_SOURCE:
2176         case IP_ADD_SOURCE_MEMBERSHIP:
2177         case IP_DROP_SOURCE_MEMBERSHIP:
2178         {
2179             struct ip_mreq_source *ip_mreq_source;
2180 
2181             if (optlen != sizeof (struct target_ip_mreq_source))
2182                 return -TARGET_EINVAL;
2183 
2184             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2185             if (!ip_mreq_source) {
2186                 return -TARGET_EFAULT;
2187             }
2188             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2189             unlock_user (ip_mreq_source, optval_addr, 0);
2190             break;
2191         }
2192         default:
2193             goto unimplemented;
2194         }
2195         break;
2196     case SOL_IPV6:
2197         switch (optname) {
2198         case IPV6_MTU_DISCOVER:
2199         case IPV6_MTU:
2200         case IPV6_V6ONLY:
2201         case IPV6_RECVPKTINFO:
2202         case IPV6_UNICAST_HOPS:
2203         case IPV6_MULTICAST_HOPS:
2204         case IPV6_MULTICAST_LOOP:
2205         case IPV6_RECVERR:
2206         case IPV6_RECVHOPLIMIT:
2207         case IPV6_2292HOPLIMIT:
2208         case IPV6_CHECKSUM:
2209         case IPV6_ADDRFORM:
2210         case IPV6_2292PKTINFO:
2211         case IPV6_RECVTCLASS:
2212         case IPV6_RECVRTHDR:
2213         case IPV6_2292RTHDR:
2214         case IPV6_RECVHOPOPTS:
2215         case IPV6_2292HOPOPTS:
2216         case IPV6_RECVDSTOPTS:
2217         case IPV6_2292DSTOPTS:
2218         case IPV6_TCLASS:
2219         case IPV6_ADDR_PREFERENCES:
2220 #ifdef IPV6_RECVPATHMTU
2221         case IPV6_RECVPATHMTU:
2222 #endif
2223 #ifdef IPV6_TRANSPARENT
2224         case IPV6_TRANSPARENT:
2225 #endif
2226 #ifdef IPV6_FREEBIND
2227         case IPV6_FREEBIND:
2228 #endif
2229 #ifdef IPV6_RECVORIGDSTADDR
2230         case IPV6_RECVORIGDSTADDR:
2231 #endif
2232             val = 0;
2233             if (optlen < sizeof(uint32_t)) {
2234                 return -TARGET_EINVAL;
2235             }
2236             if (get_user_u32(val, optval_addr)) {
2237                 return -TARGET_EFAULT;
2238             }
2239             ret = get_errno(setsockopt(sockfd, level, optname,
2240                                        &val, sizeof(val)));
2241             break;
2242         case IPV6_PKTINFO:
2243         {
2244             struct in6_pktinfo pki;
2245 
2246             if (optlen < sizeof(pki)) {
2247                 return -TARGET_EINVAL;
2248             }
2249 
2250             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2251                 return -TARGET_EFAULT;
2252             }
2253 
2254             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2255 
2256             ret = get_errno(setsockopt(sockfd, level, optname,
2257                                        &pki, sizeof(pki)));
2258             break;
2259         }
2260         case IPV6_ADD_MEMBERSHIP:
2261         case IPV6_DROP_MEMBERSHIP:
2262         {
2263             struct ipv6_mreq ipv6mreq;
2264 
2265             if (optlen < sizeof(ipv6mreq)) {
2266                 return -TARGET_EINVAL;
2267             }
2268 
2269             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2270                 return -TARGET_EFAULT;
2271             }
2272 
2273             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2274 
2275             ret = get_errno(setsockopt(sockfd, level, optname,
2276                                        &ipv6mreq, sizeof(ipv6mreq)));
2277             break;
2278         }
2279         default:
2280             goto unimplemented;
2281         }
2282         break;
2283     case SOL_ICMPV6:
2284         switch (optname) {
2285         case ICMPV6_FILTER:
2286         {
2287             struct icmp6_filter icmp6f;
2288 
2289             if (optlen > sizeof(icmp6f)) {
2290                 optlen = sizeof(icmp6f);
2291             }
2292 
2293             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2294                 return -TARGET_EFAULT;
2295             }
2296 
2297             for (val = 0; val < 8; val++) {
2298                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2299             }
2300 
2301             ret = get_errno(setsockopt(sockfd, level, optname,
2302                                        &icmp6f, optlen));
2303             break;
2304         }
2305         default:
2306             goto unimplemented;
2307         }
2308         break;
2309     case SOL_RAW:
2310         switch (optname) {
2311         case ICMP_FILTER:
2312         case IPV6_CHECKSUM:
2313             /* those take an u32 value */
2314             if (optlen < sizeof(uint32_t)) {
2315                 return -TARGET_EINVAL;
2316             }
2317 
2318             if (get_user_u32(val, optval_addr)) {
2319                 return -TARGET_EFAULT;
2320             }
2321             ret = get_errno(setsockopt(sockfd, level, optname,
2322                                        &val, sizeof(val)));
2323             break;
2324 
2325         default:
2326             goto unimplemented;
2327         }
2328         break;
2329 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2330     case SOL_ALG:
2331         switch (optname) {
2332         case ALG_SET_KEY:
2333         {
2334             char *alg_key = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2335             if (!alg_key) {
2336                 return -TARGET_EFAULT;
2337             }
2338             ret = get_errno(setsockopt(sockfd, level, optname,
2339                                        alg_key, optlen));
2340             unlock_user(alg_key, optval_addr, optlen);
2341             break;
2342         }
2343         case ALG_SET_AEAD_AUTHSIZE:
2344         {
2345             ret = get_errno(setsockopt(sockfd, level, optname,
2346                                        NULL, optlen));
2347             break;
2348         }
2349         default:
2350             goto unimplemented;
2351         }
2352         break;
2353 #endif
2354     case TARGET_SOL_SOCKET:
2355         switch (optname) {
2356         case TARGET_SO_RCVTIMEO:
2357         case TARGET_SO_SNDTIMEO:
2358         {
2359                 struct timeval tv;
2360 
2361                 if (optlen != sizeof(struct target_timeval)) {
2362                     return -TARGET_EINVAL;
2363                 }
2364 
2365                 if (copy_from_user_timeval(&tv, optval_addr)) {
2366                     return -TARGET_EFAULT;
2367                 }
2368 
2369                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2370                                 optname == TARGET_SO_RCVTIMEO ?
2371                                     SO_RCVTIMEO : SO_SNDTIMEO,
2372                                 &tv, sizeof(tv)));
2373                 return ret;
2374         }
2375         case TARGET_SO_ATTACH_FILTER:
2376         {
2377                 struct target_sock_fprog *tfprog;
2378                 struct target_sock_filter *tfilter;
2379                 struct sock_fprog fprog;
2380                 struct sock_filter *filter;
2381                 int i;
2382 
2383                 if (optlen != sizeof(*tfprog)) {
2384                     return -TARGET_EINVAL;
2385                 }
2386                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2387                     return -TARGET_EFAULT;
2388                 }
2389                 if (!lock_user_struct(VERIFY_READ, tfilter,
2390                                       tswapal(tfprog->filter), 0)) {
2391                     unlock_user_struct(tfprog, optval_addr, 1);
2392                     return -TARGET_EFAULT;
2393                 }
2394 
2395                 fprog.len = tswap16(tfprog->len);
2396                 filter = g_try_new(struct sock_filter, fprog.len);
2397                 if (filter == NULL) {
2398                     unlock_user_struct(tfilter, tfprog->filter, 1);
2399                     unlock_user_struct(tfprog, optval_addr, 1);
2400                     return -TARGET_ENOMEM;
2401                 }
2402                 for (i = 0; i < fprog.len; i++) {
2403                     filter[i].code = tswap16(tfilter[i].code);
2404                     filter[i].jt = tfilter[i].jt;
2405                     filter[i].jf = tfilter[i].jf;
2406                     filter[i].k = tswap32(tfilter[i].k);
2407                 }
2408                 fprog.filter = filter;
2409 
2410                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2411                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2412                 g_free(filter);
2413 
2414                 unlock_user_struct(tfilter, tfprog->filter, 1);
2415                 unlock_user_struct(tfprog, optval_addr, 1);
2416                 return ret;
2417         }
2418 	case TARGET_SO_BINDTODEVICE:
2419 	{
2420 		char *dev_ifname, *addr_ifname;
2421 
2422 		if (optlen > IFNAMSIZ - 1) {
2423 		    optlen = IFNAMSIZ - 1;
2424 		}
2425 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2426 		if (!dev_ifname) {
2427 		    return -TARGET_EFAULT;
2428 		}
2429 		optname = SO_BINDTODEVICE;
2430 		addr_ifname = alloca(IFNAMSIZ);
2431 		memcpy(addr_ifname, dev_ifname, optlen);
2432 		addr_ifname[optlen] = 0;
2433 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2434                                            addr_ifname, optlen));
2435 		unlock_user (dev_ifname, optval_addr, 0);
2436 		return ret;
2437 	}
2438         case TARGET_SO_LINGER:
2439         {
2440                 struct linger lg;
2441                 struct target_linger *tlg;
2442 
2443                 if (optlen != sizeof(struct target_linger)) {
2444                     return -TARGET_EINVAL;
2445                 }
2446                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2447                     return -TARGET_EFAULT;
2448                 }
2449                 __get_user(lg.l_onoff, &tlg->l_onoff);
2450                 __get_user(lg.l_linger, &tlg->l_linger);
2451                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2452                                 &lg, sizeof(lg)));
2453                 unlock_user_struct(tlg, optval_addr, 0);
2454                 return ret;
2455         }
2456             /* Options with 'int' argument.  */
2457         case TARGET_SO_DEBUG:
2458 		optname = SO_DEBUG;
2459 		break;
2460         case TARGET_SO_REUSEADDR:
2461 		optname = SO_REUSEADDR;
2462 		break;
2463 #ifdef SO_REUSEPORT
2464         case TARGET_SO_REUSEPORT:
2465                 optname = SO_REUSEPORT;
2466                 break;
2467 #endif
2468         case TARGET_SO_TYPE:
2469 		optname = SO_TYPE;
2470 		break;
2471         case TARGET_SO_ERROR:
2472 		optname = SO_ERROR;
2473 		break;
2474         case TARGET_SO_DONTROUTE:
2475 		optname = SO_DONTROUTE;
2476 		break;
2477         case TARGET_SO_BROADCAST:
2478 		optname = SO_BROADCAST;
2479 		break;
2480         case TARGET_SO_SNDBUF:
2481 		optname = SO_SNDBUF;
2482 		break;
2483         case TARGET_SO_SNDBUFFORCE:
2484                 optname = SO_SNDBUFFORCE;
2485                 break;
2486         case TARGET_SO_RCVBUF:
2487 		optname = SO_RCVBUF;
2488 		break;
2489         case TARGET_SO_RCVBUFFORCE:
2490                 optname = SO_RCVBUFFORCE;
2491                 break;
2492         case TARGET_SO_KEEPALIVE:
2493 		optname = SO_KEEPALIVE;
2494 		break;
2495         case TARGET_SO_OOBINLINE:
2496 		optname = SO_OOBINLINE;
2497 		break;
2498         case TARGET_SO_NO_CHECK:
2499 		optname = SO_NO_CHECK;
2500 		break;
2501         case TARGET_SO_PRIORITY:
2502 		optname = SO_PRIORITY;
2503 		break;
2504 #ifdef SO_BSDCOMPAT
2505         case TARGET_SO_BSDCOMPAT:
2506 		optname = SO_BSDCOMPAT;
2507 		break;
2508 #endif
2509         case TARGET_SO_PASSCRED:
2510 		optname = SO_PASSCRED;
2511 		break;
2512         case TARGET_SO_PASSSEC:
2513                 optname = SO_PASSSEC;
2514                 break;
2515         case TARGET_SO_TIMESTAMP:
2516 		optname = SO_TIMESTAMP;
2517 		break;
2518         case TARGET_SO_RCVLOWAT:
2519 		optname = SO_RCVLOWAT;
2520 		break;
2521         default:
2522             goto unimplemented;
2523         }
2524 	if (optlen < sizeof(uint32_t))
2525             return -TARGET_EINVAL;
2526 
2527 	if (get_user_u32(val, optval_addr))
2528             return -TARGET_EFAULT;
2529 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2530         break;
2531 #ifdef SOL_NETLINK
2532     case SOL_NETLINK:
2533         switch (optname) {
2534         case NETLINK_PKTINFO:
2535         case NETLINK_ADD_MEMBERSHIP:
2536         case NETLINK_DROP_MEMBERSHIP:
2537         case NETLINK_BROADCAST_ERROR:
2538         case NETLINK_NO_ENOBUFS:
2539 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2540         case NETLINK_LISTEN_ALL_NSID:
2541         case NETLINK_CAP_ACK:
2542 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2543 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2544         case NETLINK_EXT_ACK:
2545 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2546 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2547         case NETLINK_GET_STRICT_CHK:
2548 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2549             break;
2550         default:
2551             goto unimplemented;
2552         }
2553         val = 0;
2554         if (optlen < sizeof(uint32_t)) {
2555             return -TARGET_EINVAL;
2556         }
2557         if (get_user_u32(val, optval_addr)) {
2558             return -TARGET_EFAULT;
2559         }
2560         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2561                                    sizeof(val)));
2562         break;
2563 #endif /* SOL_NETLINK */
2564     default:
2565     unimplemented:
2566         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2567                       level, optname);
2568         ret = -TARGET_ENOPROTOOPT;
2569     }
2570     return ret;
2571 }
2572 
2573 /* do_getsockopt() Must return target values and target errnos. */
2574 static abi_long do_getsockopt(int sockfd, int level, int optname,
2575                               abi_ulong optval_addr, abi_ulong optlen)
2576 {
2577     abi_long ret;
2578     int len, val;
2579     socklen_t lv;
2580 
2581     switch(level) {
2582     case TARGET_SOL_SOCKET:
2583         level = SOL_SOCKET;
2584         switch (optname) {
2585         /* These don't just return a single integer */
2586         case TARGET_SO_PEERNAME:
2587             goto unimplemented;
2588         case TARGET_SO_RCVTIMEO: {
2589             struct timeval tv;
2590             socklen_t tvlen;
2591 
2592             optname = SO_RCVTIMEO;
2593 
2594 get_timeout:
2595             if (get_user_u32(len, optlen)) {
2596                 return -TARGET_EFAULT;
2597             }
2598             if (len < 0) {
2599                 return -TARGET_EINVAL;
2600             }
2601 
2602             tvlen = sizeof(tv);
2603             ret = get_errno(getsockopt(sockfd, level, optname,
2604                                        &tv, &tvlen));
2605             if (ret < 0) {
2606                 return ret;
2607             }
2608             if (len > sizeof(struct target_timeval)) {
2609                 len = sizeof(struct target_timeval);
2610             }
2611             if (copy_to_user_timeval(optval_addr, &tv)) {
2612                 return -TARGET_EFAULT;
2613             }
2614             if (put_user_u32(len, optlen)) {
2615                 return -TARGET_EFAULT;
2616             }
2617             break;
2618         }
2619         case TARGET_SO_SNDTIMEO:
2620             optname = SO_SNDTIMEO;
2621             goto get_timeout;
2622         case TARGET_SO_PEERCRED: {
2623             struct ucred cr;
2624             socklen_t crlen;
2625             struct target_ucred *tcr;
2626 
2627             if (get_user_u32(len, optlen)) {
2628                 return -TARGET_EFAULT;
2629             }
2630             if (len < 0) {
2631                 return -TARGET_EINVAL;
2632             }
2633 
2634             crlen = sizeof(cr);
2635             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2636                                        &cr, &crlen));
2637             if (ret < 0) {
2638                 return ret;
2639             }
2640             if (len > crlen) {
2641                 len = crlen;
2642             }
2643             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2644                 return -TARGET_EFAULT;
2645             }
2646             __put_user(cr.pid, &tcr->pid);
2647             __put_user(cr.uid, &tcr->uid);
2648             __put_user(cr.gid, &tcr->gid);
2649             unlock_user_struct(tcr, optval_addr, 1);
2650             if (put_user_u32(len, optlen)) {
2651                 return -TARGET_EFAULT;
2652             }
2653             break;
2654         }
2655         case TARGET_SO_PEERSEC: {
2656             char *name;
2657 
2658             if (get_user_u32(len, optlen)) {
2659                 return -TARGET_EFAULT;
2660             }
2661             if (len < 0) {
2662                 return -TARGET_EINVAL;
2663             }
2664             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2665             if (!name) {
2666                 return -TARGET_EFAULT;
2667             }
2668             lv = len;
2669             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2670                                        name, &lv));
2671             if (put_user_u32(lv, optlen)) {
2672                 ret = -TARGET_EFAULT;
2673             }
2674             unlock_user(name, optval_addr, lv);
2675             break;
2676         }
2677         case TARGET_SO_LINGER:
2678         {
2679             struct linger lg;
2680             socklen_t lglen;
2681             struct target_linger *tlg;
2682 
2683             if (get_user_u32(len, optlen)) {
2684                 return -TARGET_EFAULT;
2685             }
2686             if (len < 0) {
2687                 return -TARGET_EINVAL;
2688             }
2689 
2690             lglen = sizeof(lg);
2691             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2692                                        &lg, &lglen));
2693             if (ret < 0) {
2694                 return ret;
2695             }
2696             if (len > lglen) {
2697                 len = lglen;
2698             }
2699             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2700                 return -TARGET_EFAULT;
2701             }
2702             __put_user(lg.l_onoff, &tlg->l_onoff);
2703             __put_user(lg.l_linger, &tlg->l_linger);
2704             unlock_user_struct(tlg, optval_addr, 1);
2705             if (put_user_u32(len, optlen)) {
2706                 return -TARGET_EFAULT;
2707             }
2708             break;
2709         }
2710         /* Options with 'int' argument.  */
2711         case TARGET_SO_DEBUG:
2712             optname = SO_DEBUG;
2713             goto int_case;
2714         case TARGET_SO_REUSEADDR:
2715             optname = SO_REUSEADDR;
2716             goto int_case;
2717 #ifdef SO_REUSEPORT
2718         case TARGET_SO_REUSEPORT:
2719             optname = SO_REUSEPORT;
2720             goto int_case;
2721 #endif
2722         case TARGET_SO_TYPE:
2723             optname = SO_TYPE;
2724             goto int_case;
2725         case TARGET_SO_ERROR:
2726             optname = SO_ERROR;
2727             goto int_case;
2728         case TARGET_SO_DONTROUTE:
2729             optname = SO_DONTROUTE;
2730             goto int_case;
2731         case TARGET_SO_BROADCAST:
2732             optname = SO_BROADCAST;
2733             goto int_case;
2734         case TARGET_SO_SNDBUF:
2735             optname = SO_SNDBUF;
2736             goto int_case;
2737         case TARGET_SO_RCVBUF:
2738             optname = SO_RCVBUF;
2739             goto int_case;
2740         case TARGET_SO_KEEPALIVE:
2741             optname = SO_KEEPALIVE;
2742             goto int_case;
2743         case TARGET_SO_OOBINLINE:
2744             optname = SO_OOBINLINE;
2745             goto int_case;
2746         case TARGET_SO_NO_CHECK:
2747             optname = SO_NO_CHECK;
2748             goto int_case;
2749         case TARGET_SO_PRIORITY:
2750             optname = SO_PRIORITY;
2751             goto int_case;
2752 #ifdef SO_BSDCOMPAT
2753         case TARGET_SO_BSDCOMPAT:
2754             optname = SO_BSDCOMPAT;
2755             goto int_case;
2756 #endif
2757         case TARGET_SO_PASSCRED:
2758             optname = SO_PASSCRED;
2759             goto int_case;
2760         case TARGET_SO_TIMESTAMP:
2761             optname = SO_TIMESTAMP;
2762             goto int_case;
2763         case TARGET_SO_RCVLOWAT:
2764             optname = SO_RCVLOWAT;
2765             goto int_case;
2766         case TARGET_SO_ACCEPTCONN:
2767             optname = SO_ACCEPTCONN;
2768             goto int_case;
2769         case TARGET_SO_PROTOCOL:
2770             optname = SO_PROTOCOL;
2771             goto int_case;
2772         case TARGET_SO_DOMAIN:
2773             optname = SO_DOMAIN;
2774             goto int_case;
2775         default:
2776             goto int_case;
2777         }
2778         break;
2779     case SOL_TCP:
2780     case SOL_UDP:
2781         /* TCP and UDP options all take an 'int' value.  */
2782     int_case:
2783         if (get_user_u32(len, optlen))
2784             return -TARGET_EFAULT;
2785         if (len < 0)
2786             return -TARGET_EINVAL;
2787         lv = sizeof(lv);
2788         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2789         if (ret < 0)
2790             return ret;
2791         switch (optname) {
2792         case SO_TYPE:
2793             val = host_to_target_sock_type(val);
2794             break;
2795         case SO_ERROR:
2796             val = host_to_target_errno(val);
2797             break;
2798         }
2799         if (len > lv)
2800             len = lv;
2801         if (len == 4) {
2802             if (put_user_u32(val, optval_addr))
2803                 return -TARGET_EFAULT;
2804         } else {
2805             if (put_user_u8(val, optval_addr))
2806                 return -TARGET_EFAULT;
2807         }
2808         if (put_user_u32(len, optlen))
2809             return -TARGET_EFAULT;
2810         break;
2811     case SOL_IP:
2812         switch(optname) {
2813         case IP_TOS:
2814         case IP_TTL:
2815         case IP_HDRINCL:
2816         case IP_ROUTER_ALERT:
2817         case IP_RECVOPTS:
2818         case IP_RETOPTS:
2819         case IP_PKTINFO:
2820         case IP_MTU_DISCOVER:
2821         case IP_RECVERR:
2822         case IP_RECVTOS:
2823 #ifdef IP_FREEBIND
2824         case IP_FREEBIND:
2825 #endif
2826         case IP_MULTICAST_TTL:
2827         case IP_MULTICAST_LOOP:
2828             if (get_user_u32(len, optlen))
2829                 return -TARGET_EFAULT;
2830             if (len < 0)
2831                 return -TARGET_EINVAL;
2832             lv = sizeof(lv);
2833             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2834             if (ret < 0)
2835                 return ret;
2836             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2837                 len = 1;
2838                 if (put_user_u32(len, optlen)
2839                     || put_user_u8(val, optval_addr))
2840                     return -TARGET_EFAULT;
2841             } else {
2842                 if (len > sizeof(int))
2843                     len = sizeof(int);
2844                 if (put_user_u32(len, optlen)
2845                     || put_user_u32(val, optval_addr))
2846                     return -TARGET_EFAULT;
2847             }
2848             break;
2849         default:
2850             ret = -TARGET_ENOPROTOOPT;
2851             break;
2852         }
2853         break;
2854     case SOL_IPV6:
2855         switch (optname) {
2856         case IPV6_MTU_DISCOVER:
2857         case IPV6_MTU:
2858         case IPV6_V6ONLY:
2859         case IPV6_RECVPKTINFO:
2860         case IPV6_UNICAST_HOPS:
2861         case IPV6_MULTICAST_HOPS:
2862         case IPV6_MULTICAST_LOOP:
2863         case IPV6_RECVERR:
2864         case IPV6_RECVHOPLIMIT:
2865         case IPV6_2292HOPLIMIT:
2866         case IPV6_CHECKSUM:
2867         case IPV6_ADDRFORM:
2868         case IPV6_2292PKTINFO:
2869         case IPV6_RECVTCLASS:
2870         case IPV6_RECVRTHDR:
2871         case IPV6_2292RTHDR:
2872         case IPV6_RECVHOPOPTS:
2873         case IPV6_2292HOPOPTS:
2874         case IPV6_RECVDSTOPTS:
2875         case IPV6_2292DSTOPTS:
2876         case IPV6_TCLASS:
2877         case IPV6_ADDR_PREFERENCES:
2878 #ifdef IPV6_RECVPATHMTU
2879         case IPV6_RECVPATHMTU:
2880 #endif
2881 #ifdef IPV6_TRANSPARENT
2882         case IPV6_TRANSPARENT:
2883 #endif
2884 #ifdef IPV6_FREEBIND
2885         case IPV6_FREEBIND:
2886 #endif
2887 #ifdef IPV6_RECVORIGDSTADDR
2888         case IPV6_RECVORIGDSTADDR:
2889 #endif
2890             if (get_user_u32(len, optlen))
2891                 return -TARGET_EFAULT;
2892             if (len < 0)
2893                 return -TARGET_EINVAL;
2894             lv = sizeof(lv);
2895             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2896             if (ret < 0)
2897                 return ret;
2898             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2899                 len = 1;
2900                 if (put_user_u32(len, optlen)
2901                     || put_user_u8(val, optval_addr))
2902                     return -TARGET_EFAULT;
2903             } else {
2904                 if (len > sizeof(int))
2905                     len = sizeof(int);
2906                 if (put_user_u32(len, optlen)
2907                     || put_user_u32(val, optval_addr))
2908                     return -TARGET_EFAULT;
2909             }
2910             break;
2911         default:
2912             ret = -TARGET_ENOPROTOOPT;
2913             break;
2914         }
2915         break;
2916 #ifdef SOL_NETLINK
2917     case SOL_NETLINK:
2918         switch (optname) {
2919         case NETLINK_PKTINFO:
2920         case NETLINK_BROADCAST_ERROR:
2921         case NETLINK_NO_ENOBUFS:
2922 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2923         case NETLINK_LISTEN_ALL_NSID:
2924         case NETLINK_CAP_ACK:
2925 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2926 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2927         case NETLINK_EXT_ACK:
2928 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2929 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2930         case NETLINK_GET_STRICT_CHK:
2931 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2932             if (get_user_u32(len, optlen)) {
2933                 return -TARGET_EFAULT;
2934             }
2935             if (len != sizeof(val)) {
2936                 return -TARGET_EINVAL;
2937             }
2938             lv = len;
2939             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2940             if (ret < 0) {
2941                 return ret;
2942             }
2943             if (put_user_u32(lv, optlen)
2944                 || put_user_u32(val, optval_addr)) {
2945                 return -TARGET_EFAULT;
2946             }
2947             break;
2948 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2949         case NETLINK_LIST_MEMBERSHIPS:
2950         {
2951             uint32_t *results;
2952             int i;
2953             if (get_user_u32(len, optlen)) {
2954                 return -TARGET_EFAULT;
2955             }
2956             if (len < 0) {
2957                 return -TARGET_EINVAL;
2958             }
2959             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2960             if (!results && len > 0) {
2961                 return -TARGET_EFAULT;
2962             }
2963             lv = len;
2964             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2965             if (ret < 0) {
2966                 unlock_user(results, optval_addr, 0);
2967                 return ret;
2968             }
2969             /* swap host endianness to target endianness. */
2970             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2971                 results[i] = tswap32(results[i]);
2972             }
2973             if (put_user_u32(lv, optlen)) {
2974                 return -TARGET_EFAULT;
2975             }
2976             unlock_user(results, optval_addr, 0);
2977             break;
2978         }
2979 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2980         default:
2981             goto unimplemented;
2982         }
2983         break;
2984 #endif /* SOL_NETLINK */
2985     default:
2986     unimplemented:
2987         qemu_log_mask(LOG_UNIMP,
2988                       "getsockopt level=%d optname=%d not yet supported\n",
2989                       level, optname);
2990         ret = -TARGET_EOPNOTSUPP;
2991         break;
2992     }
2993     return ret;
2994 }
2995 
2996 /* Convert target low/high pair representing file offset into the host
2997  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2998  * as the kernel doesn't handle them either.
2999  */
3000 static void target_to_host_low_high(abi_ulong tlow,
3001                                     abi_ulong thigh,
3002                                     unsigned long *hlow,
3003                                     unsigned long *hhigh)
3004 {
3005     uint64_t off = tlow |
3006         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3007         TARGET_LONG_BITS / 2;
3008 
3009     *hlow = off;
3010     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3011 }
3012 
3013 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3014                                 abi_ulong count, int copy)
3015 {
3016     struct target_iovec *target_vec;
3017     struct iovec *vec;
3018     abi_ulong total_len, max_len;
3019     int i;
3020     int err = 0;
3021     bool bad_address = false;
3022 
3023     if (count == 0) {
3024         errno = 0;
3025         return NULL;
3026     }
3027     if (count > IOV_MAX) {
3028         errno = EINVAL;
3029         return NULL;
3030     }
3031 
3032     vec = g_try_new0(struct iovec, count);
3033     if (vec == NULL) {
3034         errno = ENOMEM;
3035         return NULL;
3036     }
3037 
3038     target_vec = lock_user(VERIFY_READ, target_addr,
3039                            count * sizeof(struct target_iovec), 1);
3040     if (target_vec == NULL) {
3041         err = EFAULT;
3042         goto fail2;
3043     }
3044 
3045     /* ??? If host page size > target page size, this will result in a
3046        value larger than what we can actually support.  */
3047     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3048     total_len = 0;
3049 
3050     for (i = 0; i < count; i++) {
3051         abi_ulong base = tswapal(target_vec[i].iov_base);
3052         abi_long len = tswapal(target_vec[i].iov_len);
3053 
3054         if (len < 0) {
3055             err = EINVAL;
3056             goto fail;
3057         } else if (len == 0) {
3058             /* Zero length pointer is ignored.  */
3059             vec[i].iov_base = 0;
3060         } else {
3061             vec[i].iov_base = lock_user(type, base, len, copy);
3062             /* If the first buffer pointer is bad, this is a fault.  But
3063              * subsequent bad buffers will result in a partial write; this
3064              * is realized by filling the vector with null pointers and
3065              * zero lengths. */
3066             if (!vec[i].iov_base) {
3067                 if (i == 0) {
3068                     err = EFAULT;
3069                     goto fail;
3070                 } else {
3071                     bad_address = true;
3072                 }
3073             }
3074             if (bad_address) {
3075                 len = 0;
3076             }
3077             if (len > max_len - total_len) {
3078                 len = max_len - total_len;
3079             }
3080         }
3081         vec[i].iov_len = len;
3082         total_len += len;
3083     }
3084 
3085     unlock_user(target_vec, target_addr, 0);
3086     return vec;
3087 
3088  fail:
3089     while (--i >= 0) {
3090         if (tswapal(target_vec[i].iov_len) > 0) {
3091             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3092         }
3093     }
3094     unlock_user(target_vec, target_addr, 0);
3095  fail2:
3096     g_free(vec);
3097     errno = err;
3098     return NULL;
3099 }
3100 
3101 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3102                          abi_ulong count, int copy)
3103 {
3104     struct target_iovec *target_vec;
3105     int i;
3106 
3107     target_vec = lock_user(VERIFY_READ, target_addr,
3108                            count * sizeof(struct target_iovec), 1);
3109     if (target_vec) {
3110         for (i = 0; i < count; i++) {
3111             abi_ulong base = tswapal(target_vec[i].iov_base);
3112             abi_long len = tswapal(target_vec[i].iov_len);
3113             if (len < 0) {
3114                 break;
3115             }
3116             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3117         }
3118         unlock_user(target_vec, target_addr, 0);
3119     }
3120 
3121     g_free(vec);
3122 }
3123 
3124 static inline int target_to_host_sock_type(int *type)
3125 {
3126     int host_type = 0;
3127     int target_type = *type;
3128 
3129     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3130     case TARGET_SOCK_DGRAM:
3131         host_type = SOCK_DGRAM;
3132         break;
3133     case TARGET_SOCK_STREAM:
3134         host_type = SOCK_STREAM;
3135         break;
3136     default:
3137         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3138         break;
3139     }
3140     if (target_type & TARGET_SOCK_CLOEXEC) {
3141 #if defined(SOCK_CLOEXEC)
3142         host_type |= SOCK_CLOEXEC;
3143 #else
3144         return -TARGET_EINVAL;
3145 #endif
3146     }
3147     if (target_type & TARGET_SOCK_NONBLOCK) {
3148 #if defined(SOCK_NONBLOCK)
3149         host_type |= SOCK_NONBLOCK;
3150 #elif !defined(O_NONBLOCK)
3151         return -TARGET_EINVAL;
3152 #endif
3153     }
3154     *type = host_type;
3155     return 0;
3156 }
3157 
3158 /* Try to emulate socket type flags after socket creation.  */
3159 static int sock_flags_fixup(int fd, int target_type)
3160 {
3161 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3162     if (target_type & TARGET_SOCK_NONBLOCK) {
3163         int flags = fcntl(fd, F_GETFL);
3164         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3165             close(fd);
3166             return -TARGET_EINVAL;
3167         }
3168     }
3169 #endif
3170     return fd;
3171 }
3172 
3173 /* do_socket() Must return target values and target errnos. */
3174 static abi_long do_socket(int domain, int type, int protocol)
3175 {
3176     int target_type = type;
3177     int ret;
3178 
3179     ret = target_to_host_sock_type(&type);
3180     if (ret) {
3181         return ret;
3182     }
3183 
3184     if (domain == PF_NETLINK && !(
3185 #ifdef CONFIG_RTNETLINK
3186          protocol == NETLINK_ROUTE ||
3187 #endif
3188          protocol == NETLINK_KOBJECT_UEVENT ||
3189          protocol == NETLINK_AUDIT)) {
3190         return -TARGET_EPROTONOSUPPORT;
3191     }
3192 
3193     if (domain == AF_PACKET ||
3194         (domain == AF_INET && type == SOCK_PACKET)) {
3195         protocol = tswap16(protocol);
3196     }
3197 
3198     ret = get_errno(socket(domain, type, protocol));
3199     if (ret >= 0) {
3200         ret = sock_flags_fixup(ret, target_type);
3201         if (type == SOCK_PACKET) {
3202             /* Manage an obsolete case :
3203              * if socket type is SOCK_PACKET, bind by name
3204              */
3205             fd_trans_register(ret, &target_packet_trans);
3206         } else if (domain == PF_NETLINK) {
3207             switch (protocol) {
3208 #ifdef CONFIG_RTNETLINK
3209             case NETLINK_ROUTE:
3210                 fd_trans_register(ret, &target_netlink_route_trans);
3211                 break;
3212 #endif
3213             case NETLINK_KOBJECT_UEVENT:
3214                 /* nothing to do: messages are strings */
3215                 break;
3216             case NETLINK_AUDIT:
3217                 fd_trans_register(ret, &target_netlink_audit_trans);
3218                 break;
3219             default:
3220                 g_assert_not_reached();
3221             }
3222         }
3223     }
3224     return ret;
3225 }
3226 
3227 /* do_bind() Must return target values and target errnos. */
3228 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3229                         socklen_t addrlen)
3230 {
3231     void *addr;
3232     abi_long ret;
3233 
3234     if ((int)addrlen < 0) {
3235         return -TARGET_EINVAL;
3236     }
3237 
3238     addr = alloca(addrlen+1);
3239 
3240     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3241     if (ret)
3242         return ret;
3243 
3244     return get_errno(bind(sockfd, addr, addrlen));
3245 }
3246 
3247 /* do_connect() Must return target values and target errnos. */
3248 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3249                            socklen_t addrlen)
3250 {
3251     void *addr;
3252     abi_long ret;
3253 
3254     if ((int)addrlen < 0) {
3255         return -TARGET_EINVAL;
3256     }
3257 
3258     addr = alloca(addrlen+1);
3259 
3260     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3261     if (ret)
3262         return ret;
3263 
3264     return get_errno(safe_connect(sockfd, addr, addrlen));
3265 }
3266 
3267 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3268 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3269                                       int flags, int send)
3270 {
3271     abi_long ret, len;
3272     struct msghdr msg;
3273     abi_ulong count;
3274     struct iovec *vec;
3275     abi_ulong target_vec;
3276 
3277     if (msgp->msg_name) {
3278         msg.msg_namelen = tswap32(msgp->msg_namelen);
3279         msg.msg_name = alloca(msg.msg_namelen+1);
3280         ret = target_to_host_sockaddr(fd, msg.msg_name,
3281                                       tswapal(msgp->msg_name),
3282                                       msg.msg_namelen);
3283         if (ret == -TARGET_EFAULT) {
3284             /* For connected sockets msg_name and msg_namelen must
3285              * be ignored, so returning EFAULT immediately is wrong.
3286              * Instead, pass a bad msg_name to the host kernel, and
3287              * let it decide whether to return EFAULT or not.
3288              */
3289             msg.msg_name = (void *)-1;
3290         } else if (ret) {
3291             goto out2;
3292         }
3293     } else {
3294         msg.msg_name = NULL;
3295         msg.msg_namelen = 0;
3296     }
3297     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3298     msg.msg_control = alloca(msg.msg_controllen);
3299     memset(msg.msg_control, 0, msg.msg_controllen);
3300 
3301     msg.msg_flags = tswap32(msgp->msg_flags);
3302 
3303     count = tswapal(msgp->msg_iovlen);
3304     target_vec = tswapal(msgp->msg_iov);
3305 
3306     if (count > IOV_MAX) {
3307         /* sendrcvmsg returns a different errno for this condition than
3308          * readv/writev, so we must catch it here before lock_iovec() does.
3309          */
3310         ret = -TARGET_EMSGSIZE;
3311         goto out2;
3312     }
3313 
3314     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3315                      target_vec, count, send);
3316     if (vec == NULL) {
3317         ret = -host_to_target_errno(errno);
3318         /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3319         if (!send || ret) {
3320             goto out2;
3321         }
3322     }
3323     msg.msg_iovlen = count;
3324     msg.msg_iov = vec;
3325 
3326     if (send) {
3327         if (fd_trans_target_to_host_data(fd)) {
3328             void *host_msg;
3329 
3330             host_msg = g_malloc(msg.msg_iov->iov_len);
3331             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3332             ret = fd_trans_target_to_host_data(fd)(host_msg,
3333                                                    msg.msg_iov->iov_len);
3334             if (ret >= 0) {
3335                 msg.msg_iov->iov_base = host_msg;
3336                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3337             }
3338             g_free(host_msg);
3339         } else {
3340             ret = target_to_host_cmsg(&msg, msgp);
3341             if (ret == 0) {
3342                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3343             }
3344         }
3345     } else {
3346         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3347         if (!is_error(ret)) {
3348             len = ret;
3349             if (fd_trans_host_to_target_data(fd)) {
3350                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3351                                                MIN(msg.msg_iov->iov_len, len));
3352             }
3353             if (!is_error(ret)) {
3354                 ret = host_to_target_cmsg(msgp, &msg);
3355             }
3356             if (!is_error(ret)) {
3357                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3358                 msgp->msg_flags = tswap32(msg.msg_flags);
3359                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3360                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3361                                     msg.msg_name, msg.msg_namelen);
3362                     if (ret) {
3363                         goto out;
3364                     }
3365                 }
3366 
3367                 ret = len;
3368             }
3369         }
3370     }
3371 
3372 out:
3373     if (vec) {
3374         unlock_iovec(vec, target_vec, count, !send);
3375     }
3376 out2:
3377     return ret;
3378 }
3379 
3380 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3381                                int flags, int send)
3382 {
3383     abi_long ret;
3384     struct target_msghdr *msgp;
3385 
3386     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3387                           msgp,
3388                           target_msg,
3389                           send ? 1 : 0)) {
3390         return -TARGET_EFAULT;
3391     }
3392     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3393     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3394     return ret;
3395 }
3396 
3397 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3398  * so it might not have this *mmsg-specific flag either.
3399  */
3400 #ifndef MSG_WAITFORONE
3401 #define MSG_WAITFORONE 0x10000
3402 #endif
3403 
3404 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3405                                 unsigned int vlen, unsigned int flags,
3406                                 int send)
3407 {
3408     struct target_mmsghdr *mmsgp;
3409     abi_long ret = 0;
3410     int i;
3411 
3412     if (vlen > UIO_MAXIOV) {
3413         vlen = UIO_MAXIOV;
3414     }
3415 
3416     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3417     if (!mmsgp) {
3418         return -TARGET_EFAULT;
3419     }
3420 
3421     for (i = 0; i < vlen; i++) {
3422         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3423         if (is_error(ret)) {
3424             break;
3425         }
3426         mmsgp[i].msg_len = tswap32(ret);
3427         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3428         if (flags & MSG_WAITFORONE) {
3429             flags |= MSG_DONTWAIT;
3430         }
3431     }
3432 
3433     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3434 
3435     /* Return number of datagrams sent if we sent any at all;
3436      * otherwise return the error.
3437      */
3438     if (i) {
3439         return i;
3440     }
3441     return ret;
3442 }
3443 
3444 /* do_accept4() Must return target values and target errnos. */
3445 static abi_long do_accept4(int fd, abi_ulong target_addr,
3446                            abi_ulong target_addrlen_addr, int flags)
3447 {
3448     socklen_t addrlen, ret_addrlen;
3449     void *addr;
3450     abi_long ret;
3451     int host_flags;
3452 
3453     if (flags & ~(TARGET_SOCK_CLOEXEC | TARGET_SOCK_NONBLOCK)) {
3454         return -TARGET_EINVAL;
3455     }
3456 
3457     host_flags = 0;
3458     if (flags & TARGET_SOCK_NONBLOCK) {
3459         host_flags |= SOCK_NONBLOCK;
3460     }
3461     if (flags & TARGET_SOCK_CLOEXEC) {
3462         host_flags |= SOCK_CLOEXEC;
3463     }
3464 
3465     if (target_addr == 0) {
3466         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3467     }
3468 
3469     /* linux returns EFAULT if addrlen pointer is invalid */
3470     if (get_user_u32(addrlen, target_addrlen_addr))
3471         return -TARGET_EFAULT;
3472 
3473     if ((int)addrlen < 0) {
3474         return -TARGET_EINVAL;
3475     }
3476 
3477     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3478         return -TARGET_EFAULT;
3479     }
3480 
3481     addr = alloca(addrlen);
3482 
3483     ret_addrlen = addrlen;
3484     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3485     if (!is_error(ret)) {
3486         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3487         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3488             ret = -TARGET_EFAULT;
3489         }
3490     }
3491     return ret;
3492 }
3493 
3494 /* do_getpeername() Must return target values and target errnos. */
3495 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3496                                abi_ulong target_addrlen_addr)
3497 {
3498     socklen_t addrlen, ret_addrlen;
3499     void *addr;
3500     abi_long ret;
3501 
3502     if (get_user_u32(addrlen, target_addrlen_addr))
3503         return -TARGET_EFAULT;
3504 
3505     if ((int)addrlen < 0) {
3506         return -TARGET_EINVAL;
3507     }
3508 
3509     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3510         return -TARGET_EFAULT;
3511     }
3512 
3513     addr = alloca(addrlen);
3514 
3515     ret_addrlen = addrlen;
3516     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3517     if (!is_error(ret)) {
3518         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3519         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3520             ret = -TARGET_EFAULT;
3521         }
3522     }
3523     return ret;
3524 }
3525 
3526 /* do_getsockname() Must return target values and target errnos. */
3527 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3528                                abi_ulong target_addrlen_addr)
3529 {
3530     socklen_t addrlen, ret_addrlen;
3531     void *addr;
3532     abi_long ret;
3533 
3534     if (get_user_u32(addrlen, target_addrlen_addr))
3535         return -TARGET_EFAULT;
3536 
3537     if ((int)addrlen < 0) {
3538         return -TARGET_EINVAL;
3539     }
3540 
3541     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3542         return -TARGET_EFAULT;
3543     }
3544 
3545     addr = alloca(addrlen);
3546 
3547     ret_addrlen = addrlen;
3548     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3549     if (!is_error(ret)) {
3550         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3551         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3552             ret = -TARGET_EFAULT;
3553         }
3554     }
3555     return ret;
3556 }
3557 
3558 /* do_socketpair() Must return target values and target errnos. */
3559 static abi_long do_socketpair(int domain, int type, int protocol,
3560                               abi_ulong target_tab_addr)
3561 {
3562     int tab[2];
3563     abi_long ret;
3564 
3565     target_to_host_sock_type(&type);
3566 
3567     ret = get_errno(socketpair(domain, type, protocol, tab));
3568     if (!is_error(ret)) {
3569         if (put_user_s32(tab[0], target_tab_addr)
3570             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3571             ret = -TARGET_EFAULT;
3572     }
3573     return ret;
3574 }
3575 
3576 /* do_sendto() Must return target values and target errnos. */
3577 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3578                           abi_ulong target_addr, socklen_t addrlen)
3579 {
3580     void *addr;
3581     void *host_msg;
3582     void *copy_msg = NULL;
3583     abi_long ret;
3584 
3585     if ((int)addrlen < 0) {
3586         return -TARGET_EINVAL;
3587     }
3588 
3589     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3590     if (!host_msg)
3591         return -TARGET_EFAULT;
3592     if (fd_trans_target_to_host_data(fd)) {
3593         copy_msg = host_msg;
3594         host_msg = g_malloc(len);
3595         memcpy(host_msg, copy_msg, len);
3596         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3597         if (ret < 0) {
3598             goto fail;
3599         }
3600     }
3601     if (target_addr) {
3602         addr = alloca(addrlen+1);
3603         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3604         if (ret) {
3605             goto fail;
3606         }
3607         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3608     } else {
3609         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3610     }
3611 fail:
3612     if (copy_msg) {
3613         g_free(host_msg);
3614         host_msg = copy_msg;
3615     }
3616     unlock_user(host_msg, msg, 0);
3617     return ret;
3618 }
3619 
3620 /* do_recvfrom() Must return target values and target errnos. */
3621 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3622                             abi_ulong target_addr,
3623                             abi_ulong target_addrlen)
3624 {
3625     socklen_t addrlen, ret_addrlen;
3626     void *addr;
3627     void *host_msg;
3628     abi_long ret;
3629 
3630     if (!msg) {
3631         host_msg = NULL;
3632     } else {
3633         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3634         if (!host_msg) {
3635             return -TARGET_EFAULT;
3636         }
3637     }
3638     if (target_addr) {
3639         if (get_user_u32(addrlen, target_addrlen)) {
3640             ret = -TARGET_EFAULT;
3641             goto fail;
3642         }
3643         if ((int)addrlen < 0) {
3644             ret = -TARGET_EINVAL;
3645             goto fail;
3646         }
3647         addr = alloca(addrlen);
3648         ret_addrlen = addrlen;
3649         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3650                                       addr, &ret_addrlen));
3651     } else {
3652         addr = NULL; /* To keep compiler quiet.  */
3653         addrlen = 0; /* To keep compiler quiet.  */
3654         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3655     }
3656     if (!is_error(ret)) {
3657         if (fd_trans_host_to_target_data(fd)) {
3658             abi_long trans;
3659             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3660             if (is_error(trans)) {
3661                 ret = trans;
3662                 goto fail;
3663             }
3664         }
3665         if (target_addr) {
3666             host_to_target_sockaddr(target_addr, addr,
3667                                     MIN(addrlen, ret_addrlen));
3668             if (put_user_u32(ret_addrlen, target_addrlen)) {
3669                 ret = -TARGET_EFAULT;
3670                 goto fail;
3671             }
3672         }
3673         unlock_user(host_msg, msg, len);
3674     } else {
3675 fail:
3676         unlock_user(host_msg, msg, 0);
3677     }
3678     return ret;
3679 }
3680 
3681 #ifdef TARGET_NR_socketcall
3682 /* do_socketcall() must return target values and target errnos. */
3683 static abi_long do_socketcall(int num, abi_ulong vptr)
3684 {
3685     static const unsigned nargs[] = { /* number of arguments per operation */
3686         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3687         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3688         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3689         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3690         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3691         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3692         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3693         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3694         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3695         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3696         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3697         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3698         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3699         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3700         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3701         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3702         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3703         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3704         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3705         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3706     };
3707     abi_long a[6]; /* max 6 args */
3708     unsigned i;
3709 
3710     /* check the range of the first argument num */
3711     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3712     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3713         return -TARGET_EINVAL;
3714     }
3715     /* ensure we have space for args */
3716     if (nargs[num] > ARRAY_SIZE(a)) {
3717         return -TARGET_EINVAL;
3718     }
3719     /* collect the arguments in a[] according to nargs[] */
3720     for (i = 0; i < nargs[num]; ++i) {
3721         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3722             return -TARGET_EFAULT;
3723         }
3724     }
3725     /* now when we have the args, invoke the appropriate underlying function */
3726     switch (num) {
3727     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3728         return do_socket(a[0], a[1], a[2]);
3729     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3730         return do_bind(a[0], a[1], a[2]);
3731     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3732         return do_connect(a[0], a[1], a[2]);
3733     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3734         return get_errno(listen(a[0], a[1]));
3735     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3736         return do_accept4(a[0], a[1], a[2], 0);
3737     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3738         return do_getsockname(a[0], a[1], a[2]);
3739     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3740         return do_getpeername(a[0], a[1], a[2]);
3741     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3742         return do_socketpair(a[0], a[1], a[2], a[3]);
3743     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3744         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3745     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3746         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3747     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3748         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3749     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3750         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3751     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3752         return get_errno(shutdown(a[0], a[1]));
3753     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3754         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3755     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3756         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3757     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3758         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3759     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3760         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3761     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3762         return do_accept4(a[0], a[1], a[2], a[3]);
3763     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3764         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3765     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3766         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3767     default:
3768         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3769         return -TARGET_EINVAL;
3770     }
3771 }
3772 #endif
3773 
3774 #ifndef TARGET_SEMID64_DS
3775 /* asm-generic version of this struct */
3776 struct target_semid64_ds
3777 {
3778   struct target_ipc_perm sem_perm;
3779   abi_ulong sem_otime;
3780 #if TARGET_ABI_BITS == 32
3781   abi_ulong __unused1;
3782 #endif
3783   abi_ulong sem_ctime;
3784 #if TARGET_ABI_BITS == 32
3785   abi_ulong __unused2;
3786 #endif
3787   abi_ulong sem_nsems;
3788   abi_ulong __unused3;
3789   abi_ulong __unused4;
3790 };
3791 #endif
3792 
3793 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3794                                                abi_ulong target_addr)
3795 {
3796     struct target_ipc_perm *target_ip;
3797     struct target_semid64_ds *target_sd;
3798 
3799     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3800         return -TARGET_EFAULT;
3801     target_ip = &(target_sd->sem_perm);
3802     host_ip->__key = tswap32(target_ip->__key);
3803     host_ip->uid = tswap32(target_ip->uid);
3804     host_ip->gid = tswap32(target_ip->gid);
3805     host_ip->cuid = tswap32(target_ip->cuid);
3806     host_ip->cgid = tswap32(target_ip->cgid);
3807 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3808     host_ip->mode = tswap32(target_ip->mode);
3809 #else
3810     host_ip->mode = tswap16(target_ip->mode);
3811 #endif
3812 #if defined(TARGET_PPC)
3813     host_ip->__seq = tswap32(target_ip->__seq);
3814 #else
3815     host_ip->__seq = tswap16(target_ip->__seq);
3816 #endif
3817     unlock_user_struct(target_sd, target_addr, 0);
3818     return 0;
3819 }
3820 
3821 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3822                                                struct ipc_perm *host_ip)
3823 {
3824     struct target_ipc_perm *target_ip;
3825     struct target_semid64_ds *target_sd;
3826 
3827     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3828         return -TARGET_EFAULT;
3829     target_ip = &(target_sd->sem_perm);
3830     target_ip->__key = tswap32(host_ip->__key);
3831     target_ip->uid = tswap32(host_ip->uid);
3832     target_ip->gid = tswap32(host_ip->gid);
3833     target_ip->cuid = tswap32(host_ip->cuid);
3834     target_ip->cgid = tswap32(host_ip->cgid);
3835 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3836     target_ip->mode = tswap32(host_ip->mode);
3837 #else
3838     target_ip->mode = tswap16(host_ip->mode);
3839 #endif
3840 #if defined(TARGET_PPC)
3841     target_ip->__seq = tswap32(host_ip->__seq);
3842 #else
3843     target_ip->__seq = tswap16(host_ip->__seq);
3844 #endif
3845     unlock_user_struct(target_sd, target_addr, 1);
3846     return 0;
3847 }
3848 
3849 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3850                                                abi_ulong target_addr)
3851 {
3852     struct target_semid64_ds *target_sd;
3853 
3854     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3855         return -TARGET_EFAULT;
3856     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3857         return -TARGET_EFAULT;
3858     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3859     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3860     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3861     unlock_user_struct(target_sd, target_addr, 0);
3862     return 0;
3863 }
3864 
3865 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3866                                                struct semid_ds *host_sd)
3867 {
3868     struct target_semid64_ds *target_sd;
3869 
3870     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3871         return -TARGET_EFAULT;
3872     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3873         return -TARGET_EFAULT;
3874     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3875     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3876     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3877     unlock_user_struct(target_sd, target_addr, 1);
3878     return 0;
3879 }
3880 
3881 struct target_seminfo {
3882     int semmap;
3883     int semmni;
3884     int semmns;
3885     int semmnu;
3886     int semmsl;
3887     int semopm;
3888     int semume;
3889     int semusz;
3890     int semvmx;
3891     int semaem;
3892 };
3893 
3894 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3895                                               struct seminfo *host_seminfo)
3896 {
3897     struct target_seminfo *target_seminfo;
3898     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3899         return -TARGET_EFAULT;
3900     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3901     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3902     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3903     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3904     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3905     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3906     __put_user(host_seminfo->semume, &target_seminfo->semume);
3907     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3908     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3909     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3910     unlock_user_struct(target_seminfo, target_addr, 1);
3911     return 0;
3912 }
3913 
3914 union semun {
3915 	int val;
3916 	struct semid_ds *buf;
3917 	unsigned short *array;
3918 	struct seminfo *__buf;
3919 };
3920 
3921 union target_semun {
3922 	int val;
3923 	abi_ulong buf;
3924 	abi_ulong array;
3925 	abi_ulong __buf;
3926 };
3927 
3928 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3929                                                abi_ulong target_addr)
3930 {
3931     int nsems;
3932     unsigned short *array;
3933     union semun semun;
3934     struct semid_ds semid_ds;
3935     int i, ret;
3936 
3937     semun.buf = &semid_ds;
3938 
3939     ret = semctl(semid, 0, IPC_STAT, semun);
3940     if (ret == -1)
3941         return get_errno(ret);
3942 
3943     nsems = semid_ds.sem_nsems;
3944 
3945     *host_array = g_try_new(unsigned short, nsems);
3946     if (!*host_array) {
3947         return -TARGET_ENOMEM;
3948     }
3949     array = lock_user(VERIFY_READ, target_addr,
3950                       nsems*sizeof(unsigned short), 1);
3951     if (!array) {
3952         g_free(*host_array);
3953         return -TARGET_EFAULT;
3954     }
3955 
3956     for(i=0; i<nsems; i++) {
3957         __get_user((*host_array)[i], &array[i]);
3958     }
3959     unlock_user(array, target_addr, 0);
3960 
3961     return 0;
3962 }
3963 
3964 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3965                                                unsigned short **host_array)
3966 {
3967     int nsems;
3968     unsigned short *array;
3969     union semun semun;
3970     struct semid_ds semid_ds;
3971     int i, ret;
3972 
3973     semun.buf = &semid_ds;
3974 
3975     ret = semctl(semid, 0, IPC_STAT, semun);
3976     if (ret == -1)
3977         return get_errno(ret);
3978 
3979     nsems = semid_ds.sem_nsems;
3980 
3981     array = lock_user(VERIFY_WRITE, target_addr,
3982                       nsems*sizeof(unsigned short), 0);
3983     if (!array)
3984         return -TARGET_EFAULT;
3985 
3986     for(i=0; i<nsems; i++) {
3987         __put_user((*host_array)[i], &array[i]);
3988     }
3989     g_free(*host_array);
3990     unlock_user(array, target_addr, 1);
3991 
3992     return 0;
3993 }
3994 
3995 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3996                                  abi_ulong target_arg)
3997 {
3998     union target_semun target_su = { .buf = target_arg };
3999     union semun arg;
4000     struct semid_ds dsarg;
4001     unsigned short *array = NULL;
4002     struct seminfo seminfo;
4003     abi_long ret = -TARGET_EINVAL;
4004     abi_long err;
4005     cmd &= 0xff;
4006 
4007     switch( cmd ) {
4008 	case GETVAL:
4009 	case SETVAL:
4010             /* In 64 bit cross-endian situations, we will erroneously pick up
4011              * the wrong half of the union for the "val" element.  To rectify
4012              * this, the entire 8-byte structure is byteswapped, followed by
4013 	     * a swap of the 4 byte val field. In other cases, the data is
4014 	     * already in proper host byte order. */
4015 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4016 		target_su.buf = tswapal(target_su.buf);
4017 		arg.val = tswap32(target_su.val);
4018 	    } else {
4019 		arg.val = target_su.val;
4020 	    }
4021             ret = get_errno(semctl(semid, semnum, cmd, arg));
4022             break;
4023 	case GETALL:
4024 	case SETALL:
4025             err = target_to_host_semarray(semid, &array, target_su.array);
4026             if (err)
4027                 return err;
4028             arg.array = array;
4029             ret = get_errno(semctl(semid, semnum, cmd, arg));
4030             err = host_to_target_semarray(semid, target_su.array, &array);
4031             if (err)
4032                 return err;
4033             break;
4034 	case IPC_STAT:
4035 	case IPC_SET:
4036 	case SEM_STAT:
4037             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4038             if (err)
4039                 return err;
4040             arg.buf = &dsarg;
4041             ret = get_errno(semctl(semid, semnum, cmd, arg));
4042             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4043             if (err)
4044                 return err;
4045             break;
4046 	case IPC_INFO:
4047 	case SEM_INFO:
4048             arg.__buf = &seminfo;
4049             ret = get_errno(semctl(semid, semnum, cmd, arg));
4050             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4051             if (err)
4052                 return err;
4053             break;
4054 	case IPC_RMID:
4055 	case GETPID:
4056 	case GETNCNT:
4057 	case GETZCNT:
4058             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4059             break;
4060     }
4061 
4062     return ret;
4063 }
4064 
4065 struct target_sembuf {
4066     unsigned short sem_num;
4067     short sem_op;
4068     short sem_flg;
4069 };
4070 
4071 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4072                                              abi_ulong target_addr,
4073                                              unsigned nsops)
4074 {
4075     struct target_sembuf *target_sembuf;
4076     int i;
4077 
4078     target_sembuf = lock_user(VERIFY_READ, target_addr,
4079                               nsops*sizeof(struct target_sembuf), 1);
4080     if (!target_sembuf)
4081         return -TARGET_EFAULT;
4082 
4083     for(i=0; i<nsops; i++) {
4084         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4085         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4086         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4087     }
4088 
4089     unlock_user(target_sembuf, target_addr, 0);
4090 
4091     return 0;
4092 }
4093 
4094 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4095     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4096 
4097 /*
4098  * This macro is required to handle the s390 variants, which passes the
4099  * arguments in a different order than default.
4100  */
4101 #ifdef __s390x__
4102 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4103   (__nsops), (__timeout), (__sops)
4104 #else
4105 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4106   (__nsops), 0, (__sops), (__timeout)
4107 #endif
4108 
4109 static inline abi_long do_semtimedop(int semid,
4110                                      abi_long ptr,
4111                                      unsigned nsops,
4112                                      abi_long timeout, bool time64)
4113 {
4114     struct sembuf *sops;
4115     struct timespec ts, *pts = NULL;
4116     abi_long ret;
4117 
4118     if (timeout) {
4119         pts = &ts;
4120         if (time64) {
4121             if (target_to_host_timespec64(pts, timeout)) {
4122                 return -TARGET_EFAULT;
4123             }
4124         } else {
4125             if (target_to_host_timespec(pts, timeout)) {
4126                 return -TARGET_EFAULT;
4127             }
4128         }
4129     }
4130 
4131     if (nsops > TARGET_SEMOPM) {
4132         return -TARGET_E2BIG;
4133     }
4134 
4135     sops = g_new(struct sembuf, nsops);
4136 
4137     if (target_to_host_sembuf(sops, ptr, nsops)) {
4138         g_free(sops);
4139         return -TARGET_EFAULT;
4140     }
4141 
4142     ret = -TARGET_ENOSYS;
4143 #ifdef __NR_semtimedop
4144     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4145 #endif
4146 #ifdef __NR_ipc
4147     if (ret == -TARGET_ENOSYS) {
4148         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4149                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4150     }
4151 #endif
4152     g_free(sops);
4153     return ret;
4154 }
4155 #endif
4156 
4157 struct target_msqid_ds
4158 {
4159     struct target_ipc_perm msg_perm;
4160     abi_ulong msg_stime;
4161 #if TARGET_ABI_BITS == 32
4162     abi_ulong __unused1;
4163 #endif
4164     abi_ulong msg_rtime;
4165 #if TARGET_ABI_BITS == 32
4166     abi_ulong __unused2;
4167 #endif
4168     abi_ulong msg_ctime;
4169 #if TARGET_ABI_BITS == 32
4170     abi_ulong __unused3;
4171 #endif
4172     abi_ulong __msg_cbytes;
4173     abi_ulong msg_qnum;
4174     abi_ulong msg_qbytes;
4175     abi_ulong msg_lspid;
4176     abi_ulong msg_lrpid;
4177     abi_ulong __unused4;
4178     abi_ulong __unused5;
4179 };
4180 
4181 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4182                                                abi_ulong target_addr)
4183 {
4184     struct target_msqid_ds *target_md;
4185 
4186     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4187         return -TARGET_EFAULT;
4188     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4189         return -TARGET_EFAULT;
4190     host_md->msg_stime = tswapal(target_md->msg_stime);
4191     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4192     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4193     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4194     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4195     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4196     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4197     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4198     unlock_user_struct(target_md, target_addr, 0);
4199     return 0;
4200 }
4201 
4202 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4203                                                struct msqid_ds *host_md)
4204 {
4205     struct target_msqid_ds *target_md;
4206 
4207     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4208         return -TARGET_EFAULT;
4209     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4210         return -TARGET_EFAULT;
4211     target_md->msg_stime = tswapal(host_md->msg_stime);
4212     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4213     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4214     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4215     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4216     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4217     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4218     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4219     unlock_user_struct(target_md, target_addr, 1);
4220     return 0;
4221 }
4222 
4223 struct target_msginfo {
4224     int msgpool;
4225     int msgmap;
4226     int msgmax;
4227     int msgmnb;
4228     int msgmni;
4229     int msgssz;
4230     int msgtql;
4231     unsigned short int msgseg;
4232 };
4233 
4234 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4235                                               struct msginfo *host_msginfo)
4236 {
4237     struct target_msginfo *target_msginfo;
4238     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4239         return -TARGET_EFAULT;
4240     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4241     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4242     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4243     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4244     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4245     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4246     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4247     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4248     unlock_user_struct(target_msginfo, target_addr, 1);
4249     return 0;
4250 }
4251 
4252 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4253 {
4254     struct msqid_ds dsarg;
4255     struct msginfo msginfo;
4256     abi_long ret = -TARGET_EINVAL;
4257 
4258     cmd &= 0xff;
4259 
4260     switch (cmd) {
4261     case IPC_STAT:
4262     case IPC_SET:
4263     case MSG_STAT:
4264         if (target_to_host_msqid_ds(&dsarg,ptr))
4265             return -TARGET_EFAULT;
4266         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4267         if (host_to_target_msqid_ds(ptr,&dsarg))
4268             return -TARGET_EFAULT;
4269         break;
4270     case IPC_RMID:
4271         ret = get_errno(msgctl(msgid, cmd, NULL));
4272         break;
4273     case IPC_INFO:
4274     case MSG_INFO:
4275         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4276         if (host_to_target_msginfo(ptr, &msginfo))
4277             return -TARGET_EFAULT;
4278         break;
4279     }
4280 
4281     return ret;
4282 }
4283 
4284 struct target_msgbuf {
4285     abi_long mtype;
4286     char	mtext[1];
4287 };
4288 
4289 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4290                                  ssize_t msgsz, int msgflg)
4291 {
4292     struct target_msgbuf *target_mb;
4293     struct msgbuf *host_mb;
4294     abi_long ret = 0;
4295 
4296     if (msgsz < 0) {
4297         return -TARGET_EINVAL;
4298     }
4299 
4300     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4301         return -TARGET_EFAULT;
4302     host_mb = g_try_malloc(msgsz + sizeof(long));
4303     if (!host_mb) {
4304         unlock_user_struct(target_mb, msgp, 0);
4305         return -TARGET_ENOMEM;
4306     }
4307     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4308     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4309     ret = -TARGET_ENOSYS;
4310 #ifdef __NR_msgsnd
4311     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4312 #endif
4313 #ifdef __NR_ipc
4314     if (ret == -TARGET_ENOSYS) {
4315 #ifdef __s390x__
4316         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4317                                  host_mb));
4318 #else
4319         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4320                                  host_mb, 0));
4321 #endif
4322     }
4323 #endif
4324     g_free(host_mb);
4325     unlock_user_struct(target_mb, msgp, 0);
4326 
4327     return ret;
4328 }
4329 
4330 #ifdef __NR_ipc
4331 #if defined(__sparc__)
4332 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4333 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4334 #elif defined(__s390x__)
4335 /* The s390 sys_ipc variant has only five parameters.  */
4336 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4337     ((long int[]){(long int)__msgp, __msgtyp})
4338 #else
4339 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4340     ((long int[]){(long int)__msgp, __msgtyp}), 0
4341 #endif
4342 #endif
4343 
4344 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4345                                  ssize_t msgsz, abi_long msgtyp,
4346                                  int msgflg)
4347 {
4348     struct target_msgbuf *target_mb;
4349     char *target_mtext;
4350     struct msgbuf *host_mb;
4351     abi_long ret = 0;
4352 
4353     if (msgsz < 0) {
4354         return -TARGET_EINVAL;
4355     }
4356 
4357     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4358         return -TARGET_EFAULT;
4359 
4360     host_mb = g_try_malloc(msgsz + sizeof(long));
4361     if (!host_mb) {
4362         ret = -TARGET_ENOMEM;
4363         goto end;
4364     }
4365     ret = -TARGET_ENOSYS;
4366 #ifdef __NR_msgrcv
4367     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4368 #endif
4369 #ifdef __NR_ipc
4370     if (ret == -TARGET_ENOSYS) {
4371         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4372                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4373     }
4374 #endif
4375 
4376     if (ret > 0) {
4377         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4378         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4379         if (!target_mtext) {
4380             ret = -TARGET_EFAULT;
4381             goto end;
4382         }
4383         memcpy(target_mb->mtext, host_mb->mtext, ret);
4384         unlock_user(target_mtext, target_mtext_addr, ret);
4385     }
4386 
4387     target_mb->mtype = tswapal(host_mb->mtype);
4388 
4389 end:
4390     if (target_mb)
4391         unlock_user_struct(target_mb, msgp, 1);
4392     g_free(host_mb);
4393     return ret;
4394 }
4395 
4396 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4397                                                abi_ulong target_addr)
4398 {
4399     struct target_shmid_ds *target_sd;
4400 
4401     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4402         return -TARGET_EFAULT;
4403     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4404         return -TARGET_EFAULT;
4405     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4406     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4407     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4408     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4409     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4410     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4411     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4412     unlock_user_struct(target_sd, target_addr, 0);
4413     return 0;
4414 }
4415 
4416 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4417                                                struct shmid_ds *host_sd)
4418 {
4419     struct target_shmid_ds *target_sd;
4420 
4421     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4422         return -TARGET_EFAULT;
4423     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4424         return -TARGET_EFAULT;
4425     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4426     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4427     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4428     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4429     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4430     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4431     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4432     unlock_user_struct(target_sd, target_addr, 1);
4433     return 0;
4434 }
4435 
4436 struct  target_shminfo {
4437     abi_ulong shmmax;
4438     abi_ulong shmmin;
4439     abi_ulong shmmni;
4440     abi_ulong shmseg;
4441     abi_ulong shmall;
4442 };
4443 
4444 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4445                                               struct shminfo *host_shminfo)
4446 {
4447     struct target_shminfo *target_shminfo;
4448     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4449         return -TARGET_EFAULT;
4450     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4451     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4452     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4453     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4454     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4455     unlock_user_struct(target_shminfo, target_addr, 1);
4456     return 0;
4457 }
4458 
4459 struct target_shm_info {
4460     int used_ids;
4461     abi_ulong shm_tot;
4462     abi_ulong shm_rss;
4463     abi_ulong shm_swp;
4464     abi_ulong swap_attempts;
4465     abi_ulong swap_successes;
4466 };
4467 
4468 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4469                                                struct shm_info *host_shm_info)
4470 {
4471     struct target_shm_info *target_shm_info;
4472     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4473         return -TARGET_EFAULT;
4474     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4475     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4476     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4477     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4478     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4479     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4480     unlock_user_struct(target_shm_info, target_addr, 1);
4481     return 0;
4482 }
4483 
4484 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4485 {
4486     struct shmid_ds dsarg;
4487     struct shminfo shminfo;
4488     struct shm_info shm_info;
4489     abi_long ret = -TARGET_EINVAL;
4490 
4491     cmd &= 0xff;
4492 
4493     switch(cmd) {
4494     case IPC_STAT:
4495     case IPC_SET:
4496     case SHM_STAT:
4497         if (target_to_host_shmid_ds(&dsarg, buf))
4498             return -TARGET_EFAULT;
4499         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4500         if (host_to_target_shmid_ds(buf, &dsarg))
4501             return -TARGET_EFAULT;
4502         break;
4503     case IPC_INFO:
4504         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4505         if (host_to_target_shminfo(buf, &shminfo))
4506             return -TARGET_EFAULT;
4507         break;
4508     case SHM_INFO:
4509         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4510         if (host_to_target_shm_info(buf, &shm_info))
4511             return -TARGET_EFAULT;
4512         break;
4513     case IPC_RMID:
4514     case SHM_LOCK:
4515     case SHM_UNLOCK:
4516         ret = get_errno(shmctl(shmid, cmd, NULL));
4517         break;
4518     }
4519 
4520     return ret;
4521 }
4522 
4523 #ifdef TARGET_NR_ipc
4524 /* ??? This only works with linear mappings.  */
4525 /* do_ipc() must return target values and target errnos. */
4526 static abi_long do_ipc(CPUArchState *cpu_env,
4527                        unsigned int call, abi_long first,
4528                        abi_long second, abi_long third,
4529                        abi_long ptr, abi_long fifth)
4530 {
4531     int version;
4532     abi_long ret = 0;
4533 
4534     version = call >> 16;
4535     call &= 0xffff;
4536 
4537     switch (call) {
4538     case IPCOP_semop:
4539         ret = do_semtimedop(first, ptr, second, 0, false);
4540         break;
4541     case IPCOP_semtimedop:
4542     /*
4543      * The s390 sys_ipc variant has only five parameters instead of six
4544      * (as for default variant) and the only difference is the handling of
4545      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4546      * to a struct timespec where the generic variant uses fifth parameter.
4547      */
4548 #if defined(TARGET_S390X)
4549         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4550 #else
4551         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4552 #endif
4553         break;
4554 
4555     case IPCOP_semget:
4556         ret = get_errno(semget(first, second, third));
4557         break;
4558 
4559     case IPCOP_semctl: {
4560         /* The semun argument to semctl is passed by value, so dereference the
4561          * ptr argument. */
4562         abi_ulong atptr;
4563         get_user_ual(atptr, ptr);
4564         ret = do_semctl(first, second, third, atptr);
4565         break;
4566     }
4567 
4568     case IPCOP_msgget:
4569         ret = get_errno(msgget(first, second));
4570         break;
4571 
4572     case IPCOP_msgsnd:
4573         ret = do_msgsnd(first, ptr, second, third);
4574         break;
4575 
4576     case IPCOP_msgctl:
4577         ret = do_msgctl(first, second, ptr);
4578         break;
4579 
4580     case IPCOP_msgrcv:
4581         switch (version) {
4582         case 0:
4583             {
4584                 struct target_ipc_kludge {
4585                     abi_long msgp;
4586                     abi_long msgtyp;
4587                 } *tmp;
4588 
4589                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4590                     ret = -TARGET_EFAULT;
4591                     break;
4592                 }
4593 
4594                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4595 
4596                 unlock_user_struct(tmp, ptr, 0);
4597                 break;
4598             }
4599         default:
4600             ret = do_msgrcv(first, ptr, second, fifth, third);
4601         }
4602         break;
4603 
4604     case IPCOP_shmat:
4605         switch (version) {
4606         default:
4607         {
4608             abi_ulong raddr;
4609             raddr = target_shmat(cpu_env, first, ptr, second);
4610             if (is_error(raddr))
4611                 return get_errno(raddr);
4612             if (put_user_ual(raddr, third))
4613                 return -TARGET_EFAULT;
4614             break;
4615         }
4616         case 1:
4617             ret = -TARGET_EINVAL;
4618             break;
4619         }
4620 	break;
4621     case IPCOP_shmdt:
4622         ret = target_shmdt(ptr);
4623 	break;
4624 
4625     case IPCOP_shmget:
4626 	/* IPC_* flag values are the same on all linux platforms */
4627 	ret = get_errno(shmget(first, second, third));
4628 	break;
4629 
4630 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4631     case IPCOP_shmctl:
4632         ret = do_shmctl(first, second, ptr);
4633         break;
4634     default:
4635         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4636                       call, version);
4637 	ret = -TARGET_ENOSYS;
4638 	break;
4639     }
4640     return ret;
4641 }
4642 #endif
4643 
4644 /* kernel structure types definitions */
4645 
4646 #define STRUCT(name, ...) STRUCT_ ## name,
4647 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4648 enum {
4649 #include "syscall_types.h"
4650 STRUCT_MAX
4651 };
4652 #undef STRUCT
4653 #undef STRUCT_SPECIAL
4654 
4655 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4656 #define STRUCT_SPECIAL(name)
4657 #include "syscall_types.h"
4658 #undef STRUCT
4659 #undef STRUCT_SPECIAL
4660 
4661 #define MAX_STRUCT_SIZE 4096
4662 
4663 #ifdef CONFIG_FIEMAP
4664 /* So fiemap access checks don't overflow on 32 bit systems.
4665  * This is very slightly smaller than the limit imposed by
4666  * the underlying kernel.
4667  */
4668 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4669                             / sizeof(struct fiemap_extent))
4670 
4671 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4672                                        int fd, int cmd, abi_long arg)
4673 {
4674     /* The parameter for this ioctl is a struct fiemap followed
4675      * by an array of struct fiemap_extent whose size is set
4676      * in fiemap->fm_extent_count. The array is filled in by the
4677      * ioctl.
4678      */
4679     int target_size_in, target_size_out;
4680     struct fiemap *fm;
4681     const argtype *arg_type = ie->arg_type;
4682     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4683     void *argptr, *p;
4684     abi_long ret;
4685     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4686     uint32_t outbufsz;
4687     int free_fm = 0;
4688 
4689     assert(arg_type[0] == TYPE_PTR);
4690     assert(ie->access == IOC_RW);
4691     arg_type++;
4692     target_size_in = thunk_type_size(arg_type, 0);
4693     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4694     if (!argptr) {
4695         return -TARGET_EFAULT;
4696     }
4697     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4698     unlock_user(argptr, arg, 0);
4699     fm = (struct fiemap *)buf_temp;
4700     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4701         return -TARGET_EINVAL;
4702     }
4703 
4704     outbufsz = sizeof (*fm) +
4705         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4706 
4707     if (outbufsz > MAX_STRUCT_SIZE) {
4708         /* We can't fit all the extents into the fixed size buffer.
4709          * Allocate one that is large enough and use it instead.
4710          */
4711         fm = g_try_malloc(outbufsz);
4712         if (!fm) {
4713             return -TARGET_ENOMEM;
4714         }
4715         memcpy(fm, buf_temp, sizeof(struct fiemap));
4716         free_fm = 1;
4717     }
4718     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4719     if (!is_error(ret)) {
4720         target_size_out = target_size_in;
4721         /* An extent_count of 0 means we were only counting the extents
4722          * so there are no structs to copy
4723          */
4724         if (fm->fm_extent_count != 0) {
4725             target_size_out += fm->fm_mapped_extents * extent_size;
4726         }
4727         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4728         if (!argptr) {
4729             ret = -TARGET_EFAULT;
4730         } else {
4731             /* Convert the struct fiemap */
4732             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4733             if (fm->fm_extent_count != 0) {
4734                 p = argptr + target_size_in;
4735                 /* ...and then all the struct fiemap_extents */
4736                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4737                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4738                                   THUNK_TARGET);
4739                     p += extent_size;
4740                 }
4741             }
4742             unlock_user(argptr, arg, target_size_out);
4743         }
4744     }
4745     if (free_fm) {
4746         g_free(fm);
4747     }
4748     return ret;
4749 }
4750 #endif
4751 
4752 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4753                                 int fd, int cmd, abi_long arg)
4754 {
4755     const argtype *arg_type = ie->arg_type;
4756     int target_size;
4757     void *argptr;
4758     int ret;
4759     struct ifconf *host_ifconf;
4760     uint32_t outbufsz;
4761     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4762     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4763     int target_ifreq_size;
4764     int nb_ifreq;
4765     int free_buf = 0;
4766     int i;
4767     int target_ifc_len;
4768     abi_long target_ifc_buf;
4769     int host_ifc_len;
4770     char *host_ifc_buf;
4771 
4772     assert(arg_type[0] == TYPE_PTR);
4773     assert(ie->access == IOC_RW);
4774 
4775     arg_type++;
4776     target_size = thunk_type_size(arg_type, 0);
4777 
4778     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4779     if (!argptr)
4780         return -TARGET_EFAULT;
4781     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4782     unlock_user(argptr, arg, 0);
4783 
4784     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4785     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4786     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4787 
4788     if (target_ifc_buf != 0) {
4789         target_ifc_len = host_ifconf->ifc_len;
4790         nb_ifreq = target_ifc_len / target_ifreq_size;
4791         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4792 
4793         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4794         if (outbufsz > MAX_STRUCT_SIZE) {
4795             /*
4796              * We can't fit all the extents into the fixed size buffer.
4797              * Allocate one that is large enough and use it instead.
4798              */
4799             host_ifconf = g_try_malloc(outbufsz);
4800             if (!host_ifconf) {
4801                 return -TARGET_ENOMEM;
4802             }
4803             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4804             free_buf = 1;
4805         }
4806         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4807 
4808         host_ifconf->ifc_len = host_ifc_len;
4809     } else {
4810       host_ifc_buf = NULL;
4811     }
4812     host_ifconf->ifc_buf = host_ifc_buf;
4813 
4814     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4815     if (!is_error(ret)) {
4816 	/* convert host ifc_len to target ifc_len */
4817 
4818         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4819         target_ifc_len = nb_ifreq * target_ifreq_size;
4820         host_ifconf->ifc_len = target_ifc_len;
4821 
4822 	/* restore target ifc_buf */
4823 
4824         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4825 
4826 	/* copy struct ifconf to target user */
4827 
4828         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4829         if (!argptr)
4830             return -TARGET_EFAULT;
4831         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4832         unlock_user(argptr, arg, target_size);
4833 
4834         if (target_ifc_buf != 0) {
4835             /* copy ifreq[] to target user */
4836             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4837             for (i = 0; i < nb_ifreq ; i++) {
4838                 thunk_convert(argptr + i * target_ifreq_size,
4839                               host_ifc_buf + i * sizeof(struct ifreq),
4840                               ifreq_arg_type, THUNK_TARGET);
4841             }
4842             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4843         }
4844     }
4845 
4846     if (free_buf) {
4847         g_free(host_ifconf);
4848     }
4849 
4850     return ret;
4851 }
4852 
4853 #if defined(CONFIG_USBFS)
4854 #if HOST_LONG_BITS > 64
4855 #error USBDEVFS thunks do not support >64 bit hosts yet.
4856 #endif
4857 struct live_urb {
4858     uint64_t target_urb_adr;
4859     uint64_t target_buf_adr;
4860     char *target_buf_ptr;
4861     struct usbdevfs_urb host_urb;
4862 };
4863 
4864 static GHashTable *usbdevfs_urb_hashtable(void)
4865 {
4866     static GHashTable *urb_hashtable;
4867 
4868     if (!urb_hashtable) {
4869         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4870     }
4871     return urb_hashtable;
4872 }
4873 
4874 static void urb_hashtable_insert(struct live_urb *urb)
4875 {
4876     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4877     g_hash_table_insert(urb_hashtable, urb, urb);
4878 }
4879 
4880 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4881 {
4882     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4883     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4884 }
4885 
4886 static void urb_hashtable_remove(struct live_urb *urb)
4887 {
4888     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4889     g_hash_table_remove(urb_hashtable, urb);
4890 }
4891 
4892 static abi_long
4893 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4894                           int fd, int cmd, abi_long arg)
4895 {
4896     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4897     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4898     struct live_urb *lurb;
4899     void *argptr;
4900     uint64_t hurb;
4901     int target_size;
4902     uintptr_t target_urb_adr;
4903     abi_long ret;
4904 
4905     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4906 
4907     memset(buf_temp, 0, sizeof(uint64_t));
4908     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4909     if (is_error(ret)) {
4910         return ret;
4911     }
4912 
4913     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4914     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4915     if (!lurb->target_urb_adr) {
4916         return -TARGET_EFAULT;
4917     }
4918     urb_hashtable_remove(lurb);
4919     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4920         lurb->host_urb.buffer_length);
4921     lurb->target_buf_ptr = NULL;
4922 
4923     /* restore the guest buffer pointer */
4924     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4925 
4926     /* update the guest urb struct */
4927     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4928     if (!argptr) {
4929         g_free(lurb);
4930         return -TARGET_EFAULT;
4931     }
4932     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4933     unlock_user(argptr, lurb->target_urb_adr, target_size);
4934 
4935     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4936     /* write back the urb handle */
4937     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4938     if (!argptr) {
4939         g_free(lurb);
4940         return -TARGET_EFAULT;
4941     }
4942 
4943     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4944     target_urb_adr = lurb->target_urb_adr;
4945     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4946     unlock_user(argptr, arg, target_size);
4947 
4948     g_free(lurb);
4949     return ret;
4950 }
4951 
4952 static abi_long
4953 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4954                              uint8_t *buf_temp __attribute__((unused)),
4955                              int fd, int cmd, abi_long arg)
4956 {
4957     struct live_urb *lurb;
4958 
4959     /* map target address back to host URB with metadata. */
4960     lurb = urb_hashtable_lookup(arg);
4961     if (!lurb) {
4962         return -TARGET_EFAULT;
4963     }
4964     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4965 }
4966 
4967 static abi_long
4968 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4969                             int fd, int cmd, abi_long arg)
4970 {
4971     const argtype *arg_type = ie->arg_type;
4972     int target_size;
4973     abi_long ret;
4974     void *argptr;
4975     int rw_dir;
4976     struct live_urb *lurb;
4977 
4978     /*
4979      * each submitted URB needs to map to a unique ID for the
4980      * kernel, and that unique ID needs to be a pointer to
4981      * host memory.  hence, we need to malloc for each URB.
4982      * isochronous transfers have a variable length struct.
4983      */
4984     arg_type++;
4985     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4986 
4987     /* construct host copy of urb and metadata */
4988     lurb = g_try_new0(struct live_urb, 1);
4989     if (!lurb) {
4990         return -TARGET_ENOMEM;
4991     }
4992 
4993     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4994     if (!argptr) {
4995         g_free(lurb);
4996         return -TARGET_EFAULT;
4997     }
4998     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4999     unlock_user(argptr, arg, 0);
5000 
5001     lurb->target_urb_adr = arg;
5002     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5003 
5004     /* buffer space used depends on endpoint type so lock the entire buffer */
5005     /* control type urbs should check the buffer contents for true direction */
5006     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5007     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5008         lurb->host_urb.buffer_length, 1);
5009     if (lurb->target_buf_ptr == NULL) {
5010         g_free(lurb);
5011         return -TARGET_EFAULT;
5012     }
5013 
5014     /* update buffer pointer in host copy */
5015     lurb->host_urb.buffer = lurb->target_buf_ptr;
5016 
5017     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5018     if (is_error(ret)) {
5019         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5020         g_free(lurb);
5021     } else {
5022         urb_hashtable_insert(lurb);
5023     }
5024 
5025     return ret;
5026 }
5027 #endif /* CONFIG_USBFS */
5028 
5029 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5030                             int cmd, abi_long arg)
5031 {
5032     void *argptr;
5033     struct dm_ioctl *host_dm;
5034     abi_long guest_data;
5035     uint32_t guest_data_size;
5036     int target_size;
5037     const argtype *arg_type = ie->arg_type;
5038     abi_long ret;
5039     void *big_buf = NULL;
5040     char *host_data;
5041 
5042     arg_type++;
5043     target_size = thunk_type_size(arg_type, 0);
5044     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5045     if (!argptr) {
5046         ret = -TARGET_EFAULT;
5047         goto out;
5048     }
5049     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5050     unlock_user(argptr, arg, 0);
5051 
5052     /* buf_temp is too small, so fetch things into a bigger buffer */
5053     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5054     memcpy(big_buf, buf_temp, target_size);
5055     buf_temp = big_buf;
5056     host_dm = big_buf;
5057 
5058     guest_data = arg + host_dm->data_start;
5059     if ((guest_data - arg) < 0) {
5060         ret = -TARGET_EINVAL;
5061         goto out;
5062     }
5063     guest_data_size = host_dm->data_size - host_dm->data_start;
5064     host_data = (char*)host_dm + host_dm->data_start;
5065 
5066     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5067     if (!argptr) {
5068         ret = -TARGET_EFAULT;
5069         goto out;
5070     }
5071 
5072     switch (ie->host_cmd) {
5073     case DM_REMOVE_ALL:
5074     case DM_LIST_DEVICES:
5075     case DM_DEV_CREATE:
5076     case DM_DEV_REMOVE:
5077     case DM_DEV_SUSPEND:
5078     case DM_DEV_STATUS:
5079     case DM_DEV_WAIT:
5080     case DM_TABLE_STATUS:
5081     case DM_TABLE_CLEAR:
5082     case DM_TABLE_DEPS:
5083     case DM_LIST_VERSIONS:
5084         /* no input data */
5085         break;
5086     case DM_DEV_RENAME:
5087     case DM_DEV_SET_GEOMETRY:
5088         /* data contains only strings */
5089         memcpy(host_data, argptr, guest_data_size);
5090         break;
5091     case DM_TARGET_MSG:
5092         memcpy(host_data, argptr, guest_data_size);
5093         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5094         break;
5095     case DM_TABLE_LOAD:
5096     {
5097         void *gspec = argptr;
5098         void *cur_data = host_data;
5099         const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5100         int spec_size = thunk_type_size(dm_arg_type, 0);
5101         int i;
5102 
5103         for (i = 0; i < host_dm->target_count; i++) {
5104             struct dm_target_spec *spec = cur_data;
5105             uint32_t next;
5106             int slen;
5107 
5108             thunk_convert(spec, gspec, dm_arg_type, THUNK_HOST);
5109             slen = strlen((char*)gspec + spec_size) + 1;
5110             next = spec->next;
5111             spec->next = sizeof(*spec) + slen;
5112             strcpy((char*)&spec[1], gspec + spec_size);
5113             gspec += next;
5114             cur_data += spec->next;
5115         }
5116         break;
5117     }
5118     default:
5119         ret = -TARGET_EINVAL;
5120         unlock_user(argptr, guest_data, 0);
5121         goto out;
5122     }
5123     unlock_user(argptr, guest_data, 0);
5124 
5125     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5126     if (!is_error(ret)) {
5127         guest_data = arg + host_dm->data_start;
5128         guest_data_size = host_dm->data_size - host_dm->data_start;
5129         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5130         switch (ie->host_cmd) {
5131         case DM_REMOVE_ALL:
5132         case DM_DEV_CREATE:
5133         case DM_DEV_REMOVE:
5134         case DM_DEV_RENAME:
5135         case DM_DEV_SUSPEND:
5136         case DM_DEV_STATUS:
5137         case DM_TABLE_LOAD:
5138         case DM_TABLE_CLEAR:
5139         case DM_TARGET_MSG:
5140         case DM_DEV_SET_GEOMETRY:
5141             /* no return data */
5142             break;
5143         case DM_LIST_DEVICES:
5144         {
5145             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5146             uint32_t remaining_data = guest_data_size;
5147             void *cur_data = argptr;
5148             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5149             int nl_size = 12; /* can't use thunk_size due to alignment */
5150 
5151             while (1) {
5152                 uint32_t next = nl->next;
5153                 if (next) {
5154                     nl->next = nl_size + (strlen(nl->name) + 1);
5155                 }
5156                 if (remaining_data < nl->next) {
5157                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5158                     break;
5159                 }
5160                 thunk_convert(cur_data, nl, dm_arg_type, THUNK_TARGET);
5161                 strcpy(cur_data + nl_size, nl->name);
5162                 cur_data += nl->next;
5163                 remaining_data -= nl->next;
5164                 if (!next) {
5165                     break;
5166                 }
5167                 nl = (void*)nl + next;
5168             }
5169             break;
5170         }
5171         case DM_DEV_WAIT:
5172         case DM_TABLE_STATUS:
5173         {
5174             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5175             void *cur_data = argptr;
5176             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5177             int spec_size = thunk_type_size(dm_arg_type, 0);
5178             int i;
5179 
5180             for (i = 0; i < host_dm->target_count; i++) {
5181                 uint32_t next = spec->next;
5182                 int slen = strlen((char*)&spec[1]) + 1;
5183                 spec->next = (cur_data - argptr) + spec_size + slen;
5184                 if (guest_data_size < spec->next) {
5185                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5186                     break;
5187                 }
5188                 thunk_convert(cur_data, spec, dm_arg_type, THUNK_TARGET);
5189                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5190                 cur_data = argptr + spec->next;
5191                 spec = (void*)host_dm + host_dm->data_start + next;
5192             }
5193             break;
5194         }
5195         case DM_TABLE_DEPS:
5196         {
5197             void *hdata = (void*)host_dm + host_dm->data_start;
5198             int count = *(uint32_t*)hdata;
5199             uint64_t *hdev = hdata + 8;
5200             uint64_t *gdev = argptr + 8;
5201             int i;
5202 
5203             *(uint32_t*)argptr = tswap32(count);
5204             for (i = 0; i < count; i++) {
5205                 *gdev = tswap64(*hdev);
5206                 gdev++;
5207                 hdev++;
5208             }
5209             break;
5210         }
5211         case DM_LIST_VERSIONS:
5212         {
5213             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5214             uint32_t remaining_data = guest_data_size;
5215             void *cur_data = argptr;
5216             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5217             int vers_size = thunk_type_size(dm_arg_type, 0);
5218 
5219             while (1) {
5220                 uint32_t next = vers->next;
5221                 if (next) {
5222                     vers->next = vers_size + (strlen(vers->name) + 1);
5223                 }
5224                 if (remaining_data < vers->next) {
5225                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5226                     break;
5227                 }
5228                 thunk_convert(cur_data, vers, dm_arg_type, THUNK_TARGET);
5229                 strcpy(cur_data + vers_size, vers->name);
5230                 cur_data += vers->next;
5231                 remaining_data -= vers->next;
5232                 if (!next) {
5233                     break;
5234                 }
5235                 vers = (void*)vers + next;
5236             }
5237             break;
5238         }
5239         default:
5240             unlock_user(argptr, guest_data, 0);
5241             ret = -TARGET_EINVAL;
5242             goto out;
5243         }
5244         unlock_user(argptr, guest_data, guest_data_size);
5245 
5246         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5247         if (!argptr) {
5248             ret = -TARGET_EFAULT;
5249             goto out;
5250         }
5251         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5252         unlock_user(argptr, arg, target_size);
5253     }
5254 out:
5255     g_free(big_buf);
5256     return ret;
5257 }
5258 
5259 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5260                                int cmd, abi_long arg)
5261 {
5262     void *argptr;
5263     int target_size;
5264     const argtype *arg_type = ie->arg_type;
5265     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5266     abi_long ret;
5267 
5268     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5269     struct blkpg_partition host_part;
5270 
5271     /* Read and convert blkpg */
5272     arg_type++;
5273     target_size = thunk_type_size(arg_type, 0);
5274     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5275     if (!argptr) {
5276         ret = -TARGET_EFAULT;
5277         goto out;
5278     }
5279     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5280     unlock_user(argptr, arg, 0);
5281 
5282     switch (host_blkpg->op) {
5283     case BLKPG_ADD_PARTITION:
5284     case BLKPG_DEL_PARTITION:
5285         /* payload is struct blkpg_partition */
5286         break;
5287     default:
5288         /* Unknown opcode */
5289         ret = -TARGET_EINVAL;
5290         goto out;
5291     }
5292 
5293     /* Read and convert blkpg->data */
5294     arg = (abi_long)(uintptr_t)host_blkpg->data;
5295     target_size = thunk_type_size(part_arg_type, 0);
5296     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5297     if (!argptr) {
5298         ret = -TARGET_EFAULT;
5299         goto out;
5300     }
5301     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5302     unlock_user(argptr, arg, 0);
5303 
5304     /* Swizzle the data pointer to our local copy and call! */
5305     host_blkpg->data = &host_part;
5306     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5307 
5308 out:
5309     return ret;
5310 }
5311 
5312 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5313                                 int fd, int cmd, abi_long arg)
5314 {
5315     const argtype *arg_type = ie->arg_type;
5316     const StructEntry *se;
5317     const argtype *field_types;
5318     const int *dst_offsets, *src_offsets;
5319     int target_size;
5320     void *argptr;
5321     abi_ulong *target_rt_dev_ptr = NULL;
5322     unsigned long *host_rt_dev_ptr = NULL;
5323     abi_long ret;
5324     int i;
5325 
5326     assert(ie->access == IOC_W);
5327     assert(*arg_type == TYPE_PTR);
5328     arg_type++;
5329     assert(*arg_type == TYPE_STRUCT);
5330     target_size = thunk_type_size(arg_type, 0);
5331     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5332     if (!argptr) {
5333         return -TARGET_EFAULT;
5334     }
5335     arg_type++;
5336     assert(*arg_type == (int)STRUCT_rtentry);
5337     se = struct_entries + *arg_type++;
5338     assert(se->convert[0] == NULL);
5339     /* convert struct here to be able to catch rt_dev string */
5340     field_types = se->field_types;
5341     dst_offsets = se->field_offsets[THUNK_HOST];
5342     src_offsets = se->field_offsets[THUNK_TARGET];
5343     for (i = 0; i < se->nb_fields; i++) {
5344         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5345             assert(*field_types == TYPE_PTRVOID);
5346             target_rt_dev_ptr = argptr + src_offsets[i];
5347             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5348             if (*target_rt_dev_ptr != 0) {
5349                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5350                                                   tswapal(*target_rt_dev_ptr));
5351                 if (!*host_rt_dev_ptr) {
5352                     unlock_user(argptr, arg, 0);
5353                     return -TARGET_EFAULT;
5354                 }
5355             } else {
5356                 *host_rt_dev_ptr = 0;
5357             }
5358             field_types++;
5359             continue;
5360         }
5361         field_types = thunk_convert(buf_temp + dst_offsets[i],
5362                                     argptr + src_offsets[i],
5363                                     field_types, THUNK_HOST);
5364     }
5365     unlock_user(argptr, arg, 0);
5366 
5367     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5368 
5369     assert(host_rt_dev_ptr != NULL);
5370     assert(target_rt_dev_ptr != NULL);
5371     if (*host_rt_dev_ptr != 0) {
5372         unlock_user((void *)*host_rt_dev_ptr,
5373                     *target_rt_dev_ptr, 0);
5374     }
5375     return ret;
5376 }
5377 
5378 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5379                                      int fd, int cmd, abi_long arg)
5380 {
5381     int sig = target_to_host_signal(arg);
5382     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5383 }
5384 
5385 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5386                                     int fd, int cmd, abi_long arg)
5387 {
5388     struct timeval tv;
5389     abi_long ret;
5390 
5391     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5392     if (is_error(ret)) {
5393         return ret;
5394     }
5395 
5396     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5397         if (copy_to_user_timeval(arg, &tv)) {
5398             return -TARGET_EFAULT;
5399         }
5400     } else {
5401         if (copy_to_user_timeval64(arg, &tv)) {
5402             return -TARGET_EFAULT;
5403         }
5404     }
5405 
5406     return ret;
5407 }
5408 
5409 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5410                                       int fd, int cmd, abi_long arg)
5411 {
5412     struct timespec ts;
5413     abi_long ret;
5414 
5415     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5416     if (is_error(ret)) {
5417         return ret;
5418     }
5419 
5420     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5421         if (host_to_target_timespec(arg, &ts)) {
5422             return -TARGET_EFAULT;
5423         }
5424     } else{
5425         if (host_to_target_timespec64(arg, &ts)) {
5426             return -TARGET_EFAULT;
5427         }
5428     }
5429 
5430     return ret;
5431 }
5432 
5433 #ifdef TIOCGPTPEER
5434 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5435                                      int fd, int cmd, abi_long arg)
5436 {
5437     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5438     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5439 }
5440 #endif
5441 
5442 #ifdef HAVE_DRM_H
5443 
5444 static void unlock_drm_version(struct drm_version *host_ver,
5445                                struct target_drm_version *target_ver,
5446                                bool copy)
5447 {
5448     unlock_user(host_ver->name, target_ver->name,
5449                                 copy ? host_ver->name_len : 0);
5450     unlock_user(host_ver->date, target_ver->date,
5451                                 copy ? host_ver->date_len : 0);
5452     unlock_user(host_ver->desc, target_ver->desc,
5453                                 copy ? host_ver->desc_len : 0);
5454 }
5455 
5456 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5457                                           struct target_drm_version *target_ver)
5458 {
5459     memset(host_ver, 0, sizeof(*host_ver));
5460 
5461     __get_user(host_ver->name_len, &target_ver->name_len);
5462     if (host_ver->name_len) {
5463         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5464                                    target_ver->name_len, 0);
5465         if (!host_ver->name) {
5466             return -EFAULT;
5467         }
5468     }
5469 
5470     __get_user(host_ver->date_len, &target_ver->date_len);
5471     if (host_ver->date_len) {
5472         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5473                                    target_ver->date_len, 0);
5474         if (!host_ver->date) {
5475             goto err;
5476         }
5477     }
5478 
5479     __get_user(host_ver->desc_len, &target_ver->desc_len);
5480     if (host_ver->desc_len) {
5481         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5482                                    target_ver->desc_len, 0);
5483         if (!host_ver->desc) {
5484             goto err;
5485         }
5486     }
5487 
5488     return 0;
5489 err:
5490     unlock_drm_version(host_ver, target_ver, false);
5491     return -EFAULT;
5492 }
5493 
5494 static inline void host_to_target_drmversion(
5495                                           struct target_drm_version *target_ver,
5496                                           struct drm_version *host_ver)
5497 {
5498     __put_user(host_ver->version_major, &target_ver->version_major);
5499     __put_user(host_ver->version_minor, &target_ver->version_minor);
5500     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5501     __put_user(host_ver->name_len, &target_ver->name_len);
5502     __put_user(host_ver->date_len, &target_ver->date_len);
5503     __put_user(host_ver->desc_len, &target_ver->desc_len);
5504     unlock_drm_version(host_ver, target_ver, true);
5505 }
5506 
5507 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5508                              int fd, int cmd, abi_long arg)
5509 {
5510     struct drm_version *ver;
5511     struct target_drm_version *target_ver;
5512     abi_long ret;
5513 
5514     switch (ie->host_cmd) {
5515     case DRM_IOCTL_VERSION:
5516         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5517             return -TARGET_EFAULT;
5518         }
5519         ver = (struct drm_version *)buf_temp;
5520         ret = target_to_host_drmversion(ver, target_ver);
5521         if (!is_error(ret)) {
5522             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5523             if (is_error(ret)) {
5524                 unlock_drm_version(ver, target_ver, false);
5525             } else {
5526                 host_to_target_drmversion(target_ver, ver);
5527             }
5528         }
5529         unlock_user_struct(target_ver, arg, 0);
5530         return ret;
5531     }
5532     return -TARGET_ENOSYS;
5533 }
5534 
5535 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5536                                            struct drm_i915_getparam *gparam,
5537                                            int fd, abi_long arg)
5538 {
5539     abi_long ret;
5540     int value;
5541     struct target_drm_i915_getparam *target_gparam;
5542 
5543     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5544         return -TARGET_EFAULT;
5545     }
5546 
5547     __get_user(gparam->param, &target_gparam->param);
5548     gparam->value = &value;
5549     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5550     put_user_s32(value, target_gparam->value);
5551 
5552     unlock_user_struct(target_gparam, arg, 0);
5553     return ret;
5554 }
5555 
5556 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5557                                   int fd, int cmd, abi_long arg)
5558 {
5559     switch (ie->host_cmd) {
5560     case DRM_IOCTL_I915_GETPARAM:
5561         return do_ioctl_drm_i915_getparam(ie,
5562                                           (struct drm_i915_getparam *)buf_temp,
5563                                           fd, arg);
5564     default:
5565         return -TARGET_ENOSYS;
5566     }
5567 }
5568 
5569 #endif
5570 
5571 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5572                                         int fd, int cmd, abi_long arg)
5573 {
5574     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5575     struct tun_filter *target_filter;
5576     char *target_addr;
5577 
5578     assert(ie->access == IOC_W);
5579 
5580     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5581     if (!target_filter) {
5582         return -TARGET_EFAULT;
5583     }
5584     filter->flags = tswap16(target_filter->flags);
5585     filter->count = tswap16(target_filter->count);
5586     unlock_user(target_filter, arg, 0);
5587 
5588     if (filter->count) {
5589         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5590             MAX_STRUCT_SIZE) {
5591             return -TARGET_EFAULT;
5592         }
5593 
5594         target_addr = lock_user(VERIFY_READ,
5595                                 arg + offsetof(struct tun_filter, addr),
5596                                 filter->count * ETH_ALEN, 1);
5597         if (!target_addr) {
5598             return -TARGET_EFAULT;
5599         }
5600         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5601         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5602     }
5603 
5604     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5605 }
5606 
5607 IOCTLEntry ioctl_entries[] = {
5608 #define IOCTL(cmd, access, ...) \
5609     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5610 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5611     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5612 #define IOCTL_IGNORE(cmd) \
5613     { TARGET_ ## cmd, 0, #cmd },
5614 #include "ioctls.h"
5615     { 0, 0, },
5616 };
5617 
5618 /* ??? Implement proper locking for ioctls.  */
5619 /* do_ioctl() Must return target values and target errnos. */
5620 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5621 {
5622     const IOCTLEntry *ie;
5623     const argtype *arg_type;
5624     abi_long ret;
5625     uint8_t buf_temp[MAX_STRUCT_SIZE];
5626     int target_size;
5627     void *argptr;
5628 
5629     ie = ioctl_entries;
5630     for(;;) {
5631         if (ie->target_cmd == 0) {
5632             qemu_log_mask(
5633                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5634             return -TARGET_ENOTTY;
5635         }
5636         if (ie->target_cmd == cmd)
5637             break;
5638         ie++;
5639     }
5640     arg_type = ie->arg_type;
5641     if (ie->do_ioctl) {
5642         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5643     } else if (!ie->host_cmd) {
5644         /* Some architectures define BSD ioctls in their headers
5645            that are not implemented in Linux.  */
5646         return -TARGET_ENOTTY;
5647     }
5648 
5649     switch(arg_type[0]) {
5650     case TYPE_NULL:
5651         /* no argument */
5652         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5653         break;
5654     case TYPE_PTRVOID:
5655     case TYPE_INT:
5656     case TYPE_LONG:
5657     case TYPE_ULONG:
5658         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5659         break;
5660     case TYPE_PTR:
5661         arg_type++;
5662         target_size = thunk_type_size(arg_type, 0);
5663         switch(ie->access) {
5664         case IOC_R:
5665             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5666             if (!is_error(ret)) {
5667                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5668                 if (!argptr)
5669                     return -TARGET_EFAULT;
5670                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5671                 unlock_user(argptr, arg, target_size);
5672             }
5673             break;
5674         case IOC_W:
5675             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5676             if (!argptr)
5677                 return -TARGET_EFAULT;
5678             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5679             unlock_user(argptr, arg, 0);
5680             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5681             break;
5682         default:
5683         case IOC_RW:
5684             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5685             if (!argptr)
5686                 return -TARGET_EFAULT;
5687             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5688             unlock_user(argptr, arg, 0);
5689             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5690             if (!is_error(ret)) {
5691                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5692                 if (!argptr)
5693                     return -TARGET_EFAULT;
5694                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5695                 unlock_user(argptr, arg, target_size);
5696             }
5697             break;
5698         }
5699         break;
5700     default:
5701         qemu_log_mask(LOG_UNIMP,
5702                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5703                       (long)cmd, arg_type[0]);
5704         ret = -TARGET_ENOTTY;
5705         break;
5706     }
5707     return ret;
5708 }
5709 
5710 static const bitmask_transtbl iflag_tbl[] = {
5711         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5712         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5713         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5714         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5715         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5716         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5717         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5718         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5719         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5720         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5721         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5722         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5723         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5724         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5725         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5726 };
5727 
5728 static const bitmask_transtbl oflag_tbl[] = {
5729 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5730 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5731 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5732 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5733 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5734 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5735 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5736 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5737 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5738 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5739 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5740 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5741 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5742 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5743 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5744 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5745 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5746 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5747 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5748 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5749 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5750 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5751 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5752 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5753 };
5754 
5755 static const bitmask_transtbl cflag_tbl[] = {
5756 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5757 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5758 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5759 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5760 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5761 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5762 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5763 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5764 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5765 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5766 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5767 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5768 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5769 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5770 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5771 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5772 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5773 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5774 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5775 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5776 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5777 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5778 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5779 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5780 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5781 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5782 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5783 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5784 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5785 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5786 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5787 };
5788 
5789 static const bitmask_transtbl lflag_tbl[] = {
5790   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5791   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5792   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5793   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5794   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5795   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5796   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5797   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5798   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5799   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5800   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5801   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5802   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5803   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5804   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5805   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5806 };
5807 
5808 static void target_to_host_termios (void *dst, const void *src)
5809 {
5810     struct host_termios *host = dst;
5811     const struct target_termios *target = src;
5812 
5813     host->c_iflag =
5814         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5815     host->c_oflag =
5816         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5817     host->c_cflag =
5818         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5819     host->c_lflag =
5820         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5821     host->c_line = target->c_line;
5822 
5823     memset(host->c_cc, 0, sizeof(host->c_cc));
5824     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5825     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5826     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5827     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5828     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5829     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5830     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5831     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5832     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5833     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5834     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5835     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5836     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5837     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5838     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5839     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5840     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5841 }
5842 
5843 static void host_to_target_termios (void *dst, const void *src)
5844 {
5845     struct target_termios *target = dst;
5846     const struct host_termios *host = src;
5847 
5848     target->c_iflag =
5849         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5850     target->c_oflag =
5851         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5852     target->c_cflag =
5853         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5854     target->c_lflag =
5855         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5856     target->c_line = host->c_line;
5857 
5858     memset(target->c_cc, 0, sizeof(target->c_cc));
5859     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5860     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5861     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5862     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5863     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5864     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5865     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5866     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5867     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5868     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5869     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5870     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5871     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5872     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5873     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5874     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5875     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5876 }
5877 
5878 static const StructEntry struct_termios_def = {
5879     .convert = { host_to_target_termios, target_to_host_termios },
5880     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5881     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5882     .print = print_termios,
5883 };
5884 
5885 /* If the host does not provide these bits, they may be safely discarded. */
5886 #ifndef MAP_SYNC
5887 #define MAP_SYNC 0
5888 #endif
5889 #ifndef MAP_UNINITIALIZED
5890 #define MAP_UNINITIALIZED 0
5891 #endif
5892 
5893 static const bitmask_transtbl mmap_flags_tbl[] = {
5894     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5895     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5896       MAP_ANONYMOUS, MAP_ANONYMOUS },
5897     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5898       MAP_GROWSDOWN, MAP_GROWSDOWN },
5899     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5900       MAP_DENYWRITE, MAP_DENYWRITE },
5901     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5902       MAP_EXECUTABLE, MAP_EXECUTABLE },
5903     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5904     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5905       MAP_NORESERVE, MAP_NORESERVE },
5906     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5907     /* MAP_STACK had been ignored by the kernel for quite some time.
5908        Recognize it for the target insofar as we do not want to pass
5909        it through to the host.  */
5910     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5911     { TARGET_MAP_NONBLOCK, TARGET_MAP_NONBLOCK, MAP_NONBLOCK, MAP_NONBLOCK },
5912     { TARGET_MAP_POPULATE, TARGET_MAP_POPULATE, MAP_POPULATE, MAP_POPULATE },
5913     { TARGET_MAP_FIXED_NOREPLACE, TARGET_MAP_FIXED_NOREPLACE,
5914       MAP_FIXED_NOREPLACE, MAP_FIXED_NOREPLACE },
5915     { TARGET_MAP_UNINITIALIZED, TARGET_MAP_UNINITIALIZED,
5916       MAP_UNINITIALIZED, MAP_UNINITIALIZED },
5917 };
5918 
5919 /*
5920  * Arrange for legacy / undefined architecture specific flags to be
5921  * ignored by mmap handling code.
5922  */
5923 #ifndef TARGET_MAP_32BIT
5924 #define TARGET_MAP_32BIT 0
5925 #endif
5926 #ifndef TARGET_MAP_HUGE_2MB
5927 #define TARGET_MAP_HUGE_2MB 0
5928 #endif
5929 #ifndef TARGET_MAP_HUGE_1GB
5930 #define TARGET_MAP_HUGE_1GB 0
5931 #endif
5932 
5933 static abi_long do_mmap(abi_ulong addr, abi_ulong len, int prot,
5934                         int target_flags, int fd, off_t offset)
5935 {
5936     /*
5937      * The historical set of flags that all mmap types implicitly support.
5938      */
5939     enum {
5940         TARGET_LEGACY_MAP_MASK = TARGET_MAP_SHARED
5941                                | TARGET_MAP_PRIVATE
5942                                | TARGET_MAP_FIXED
5943                                | TARGET_MAP_ANONYMOUS
5944                                | TARGET_MAP_DENYWRITE
5945                                | TARGET_MAP_EXECUTABLE
5946                                | TARGET_MAP_UNINITIALIZED
5947                                | TARGET_MAP_GROWSDOWN
5948                                | TARGET_MAP_LOCKED
5949                                | TARGET_MAP_NORESERVE
5950                                | TARGET_MAP_POPULATE
5951                                | TARGET_MAP_NONBLOCK
5952                                | TARGET_MAP_STACK
5953                                | TARGET_MAP_HUGETLB
5954                                | TARGET_MAP_32BIT
5955                                | TARGET_MAP_HUGE_2MB
5956                                | TARGET_MAP_HUGE_1GB
5957     };
5958     int host_flags;
5959 
5960     switch (target_flags & TARGET_MAP_TYPE) {
5961     case TARGET_MAP_PRIVATE:
5962         host_flags = MAP_PRIVATE;
5963         break;
5964     case TARGET_MAP_SHARED:
5965         host_flags = MAP_SHARED;
5966         break;
5967     case TARGET_MAP_SHARED_VALIDATE:
5968         /*
5969          * MAP_SYNC is only supported for MAP_SHARED_VALIDATE, and is
5970          * therefore omitted from mmap_flags_tbl and TARGET_LEGACY_MAP_MASK.
5971          */
5972         if (target_flags & ~(TARGET_LEGACY_MAP_MASK | TARGET_MAP_SYNC)) {
5973             return -TARGET_EOPNOTSUPP;
5974         }
5975         host_flags = MAP_SHARED_VALIDATE;
5976         if (target_flags & TARGET_MAP_SYNC) {
5977             host_flags |= MAP_SYNC;
5978         }
5979         break;
5980     default:
5981         return -TARGET_EINVAL;
5982     }
5983     host_flags |= target_to_host_bitmask(target_flags, mmap_flags_tbl);
5984 
5985     return get_errno(target_mmap(addr, len, prot, host_flags, fd, offset));
5986 }
5987 
5988 /*
5989  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5990  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5991  */
5992 #if defined(TARGET_I386)
5993 
5994 /* NOTE: there is really one LDT for all the threads */
5995 static uint8_t *ldt_table;
5996 
5997 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5998 {
5999     int size;
6000     void *p;
6001 
6002     if (!ldt_table)
6003         return 0;
6004     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6005     if (size > bytecount)
6006         size = bytecount;
6007     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6008     if (!p)
6009         return -TARGET_EFAULT;
6010     /* ??? Should this by byteswapped?  */
6011     memcpy(p, ldt_table, size);
6012     unlock_user(p, ptr, size);
6013     return size;
6014 }
6015 
6016 /* XXX: add locking support */
6017 static abi_long write_ldt(CPUX86State *env,
6018                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6019 {
6020     struct target_modify_ldt_ldt_s ldt_info;
6021     struct target_modify_ldt_ldt_s *target_ldt_info;
6022     int seg_32bit, contents, read_exec_only, limit_in_pages;
6023     int seg_not_present, useable, lm;
6024     uint32_t *lp, entry_1, entry_2;
6025 
6026     if (bytecount != sizeof(ldt_info))
6027         return -TARGET_EINVAL;
6028     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6029         return -TARGET_EFAULT;
6030     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6031     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6032     ldt_info.limit = tswap32(target_ldt_info->limit);
6033     ldt_info.flags = tswap32(target_ldt_info->flags);
6034     unlock_user_struct(target_ldt_info, ptr, 0);
6035 
6036     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6037         return -TARGET_EINVAL;
6038     seg_32bit = ldt_info.flags & 1;
6039     contents = (ldt_info.flags >> 1) & 3;
6040     read_exec_only = (ldt_info.flags >> 3) & 1;
6041     limit_in_pages = (ldt_info.flags >> 4) & 1;
6042     seg_not_present = (ldt_info.flags >> 5) & 1;
6043     useable = (ldt_info.flags >> 6) & 1;
6044 #ifdef TARGET_ABI32
6045     lm = 0;
6046 #else
6047     lm = (ldt_info.flags >> 7) & 1;
6048 #endif
6049     if (contents == 3) {
6050         if (oldmode)
6051             return -TARGET_EINVAL;
6052         if (seg_not_present == 0)
6053             return -TARGET_EINVAL;
6054     }
6055     /* allocate the LDT */
6056     if (!ldt_table) {
6057         env->ldt.base = target_mmap(0,
6058                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6059                                     PROT_READ|PROT_WRITE,
6060                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6061         if (env->ldt.base == -1)
6062             return -TARGET_ENOMEM;
6063         memset(g2h_untagged(env->ldt.base), 0,
6064                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6065         env->ldt.limit = 0xffff;
6066         ldt_table = g2h_untagged(env->ldt.base);
6067     }
6068 
6069     /* NOTE: same code as Linux kernel */
6070     /* Allow LDTs to be cleared by the user. */
6071     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6072         if (oldmode ||
6073             (contents == 0		&&
6074              read_exec_only == 1	&&
6075              seg_32bit == 0		&&
6076              limit_in_pages == 0	&&
6077              seg_not_present == 1	&&
6078              useable == 0 )) {
6079             entry_1 = 0;
6080             entry_2 = 0;
6081             goto install;
6082         }
6083     }
6084 
6085     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6086         (ldt_info.limit & 0x0ffff);
6087     entry_2 = (ldt_info.base_addr & 0xff000000) |
6088         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6089         (ldt_info.limit & 0xf0000) |
6090         ((read_exec_only ^ 1) << 9) |
6091         (contents << 10) |
6092         ((seg_not_present ^ 1) << 15) |
6093         (seg_32bit << 22) |
6094         (limit_in_pages << 23) |
6095         (lm << 21) |
6096         0x7000;
6097     if (!oldmode)
6098         entry_2 |= (useable << 20);
6099 
6100     /* Install the new entry ...  */
6101 install:
6102     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6103     lp[0] = tswap32(entry_1);
6104     lp[1] = tswap32(entry_2);
6105     return 0;
6106 }
6107 
6108 /* specific and weird i386 syscalls */
6109 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6110                               unsigned long bytecount)
6111 {
6112     abi_long ret;
6113 
6114     switch (func) {
6115     case 0:
6116         ret = read_ldt(ptr, bytecount);
6117         break;
6118     case 1:
6119         ret = write_ldt(env, ptr, bytecount, 1);
6120         break;
6121     case 0x11:
6122         ret = write_ldt(env, ptr, bytecount, 0);
6123         break;
6124     default:
6125         ret = -TARGET_ENOSYS;
6126         break;
6127     }
6128     return ret;
6129 }
6130 
6131 #if defined(TARGET_ABI32)
6132 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6133 {
6134     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6135     struct target_modify_ldt_ldt_s ldt_info;
6136     struct target_modify_ldt_ldt_s *target_ldt_info;
6137     int seg_32bit, contents, read_exec_only, limit_in_pages;
6138     int seg_not_present, useable, lm;
6139     uint32_t *lp, entry_1, entry_2;
6140     int i;
6141 
6142     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6143     if (!target_ldt_info)
6144         return -TARGET_EFAULT;
6145     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6146     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6147     ldt_info.limit = tswap32(target_ldt_info->limit);
6148     ldt_info.flags = tswap32(target_ldt_info->flags);
6149     if (ldt_info.entry_number == -1) {
6150         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6151             if (gdt_table[i] == 0) {
6152                 ldt_info.entry_number = i;
6153                 target_ldt_info->entry_number = tswap32(i);
6154                 break;
6155             }
6156         }
6157     }
6158     unlock_user_struct(target_ldt_info, ptr, 1);
6159 
6160     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6161         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6162            return -TARGET_EINVAL;
6163     seg_32bit = ldt_info.flags & 1;
6164     contents = (ldt_info.flags >> 1) & 3;
6165     read_exec_only = (ldt_info.flags >> 3) & 1;
6166     limit_in_pages = (ldt_info.flags >> 4) & 1;
6167     seg_not_present = (ldt_info.flags >> 5) & 1;
6168     useable = (ldt_info.flags >> 6) & 1;
6169 #ifdef TARGET_ABI32
6170     lm = 0;
6171 #else
6172     lm = (ldt_info.flags >> 7) & 1;
6173 #endif
6174 
6175     if (contents == 3) {
6176         if (seg_not_present == 0)
6177             return -TARGET_EINVAL;
6178     }
6179 
6180     /* NOTE: same code as Linux kernel */
6181     /* Allow LDTs to be cleared by the user. */
6182     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6183         if ((contents == 0             &&
6184              read_exec_only == 1       &&
6185              seg_32bit == 0            &&
6186              limit_in_pages == 0       &&
6187              seg_not_present == 1      &&
6188              useable == 0 )) {
6189             entry_1 = 0;
6190             entry_2 = 0;
6191             goto install;
6192         }
6193     }
6194 
6195     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6196         (ldt_info.limit & 0x0ffff);
6197     entry_2 = (ldt_info.base_addr & 0xff000000) |
6198         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6199         (ldt_info.limit & 0xf0000) |
6200         ((read_exec_only ^ 1) << 9) |
6201         (contents << 10) |
6202         ((seg_not_present ^ 1) << 15) |
6203         (seg_32bit << 22) |
6204         (limit_in_pages << 23) |
6205         (useable << 20) |
6206         (lm << 21) |
6207         0x7000;
6208 
6209     /* Install the new entry ...  */
6210 install:
6211     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6212     lp[0] = tswap32(entry_1);
6213     lp[1] = tswap32(entry_2);
6214     return 0;
6215 }
6216 
6217 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6218 {
6219     struct target_modify_ldt_ldt_s *target_ldt_info;
6220     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6221     uint32_t base_addr, limit, flags;
6222     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6223     int seg_not_present, useable, lm;
6224     uint32_t *lp, entry_1, entry_2;
6225 
6226     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6227     if (!target_ldt_info)
6228         return -TARGET_EFAULT;
6229     idx = tswap32(target_ldt_info->entry_number);
6230     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6231         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6232         unlock_user_struct(target_ldt_info, ptr, 1);
6233         return -TARGET_EINVAL;
6234     }
6235     lp = (uint32_t *)(gdt_table + idx);
6236     entry_1 = tswap32(lp[0]);
6237     entry_2 = tswap32(lp[1]);
6238 
6239     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6240     contents = (entry_2 >> 10) & 3;
6241     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6242     seg_32bit = (entry_2 >> 22) & 1;
6243     limit_in_pages = (entry_2 >> 23) & 1;
6244     useable = (entry_2 >> 20) & 1;
6245 #ifdef TARGET_ABI32
6246     lm = 0;
6247 #else
6248     lm = (entry_2 >> 21) & 1;
6249 #endif
6250     flags = (seg_32bit << 0) | (contents << 1) |
6251         (read_exec_only << 3) | (limit_in_pages << 4) |
6252         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6253     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6254     base_addr = (entry_1 >> 16) |
6255         (entry_2 & 0xff000000) |
6256         ((entry_2 & 0xff) << 16);
6257     target_ldt_info->base_addr = tswapal(base_addr);
6258     target_ldt_info->limit = tswap32(limit);
6259     target_ldt_info->flags = tswap32(flags);
6260     unlock_user_struct(target_ldt_info, ptr, 1);
6261     return 0;
6262 }
6263 
6264 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6265 {
6266     return -TARGET_ENOSYS;
6267 }
6268 #else
6269 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6270 {
6271     abi_long ret = 0;
6272     abi_ulong val;
6273     int idx;
6274 
6275     switch(code) {
6276     case TARGET_ARCH_SET_GS:
6277     case TARGET_ARCH_SET_FS:
6278         if (code == TARGET_ARCH_SET_GS)
6279             idx = R_GS;
6280         else
6281             idx = R_FS;
6282         cpu_x86_load_seg(env, idx, 0);
6283         env->segs[idx].base = addr;
6284         break;
6285     case TARGET_ARCH_GET_GS:
6286     case TARGET_ARCH_GET_FS:
6287         if (code == TARGET_ARCH_GET_GS)
6288             idx = R_GS;
6289         else
6290             idx = R_FS;
6291         val = env->segs[idx].base;
6292         if (put_user(val, addr, abi_ulong))
6293             ret = -TARGET_EFAULT;
6294         break;
6295     default:
6296         ret = -TARGET_EINVAL;
6297         break;
6298     }
6299     return ret;
6300 }
6301 #endif /* defined(TARGET_ABI32 */
6302 #endif /* defined(TARGET_I386) */
6303 
6304 /*
6305  * These constants are generic.  Supply any that are missing from the host.
6306  */
6307 #ifndef PR_SET_NAME
6308 # define PR_SET_NAME    15
6309 # define PR_GET_NAME    16
6310 #endif
6311 #ifndef PR_SET_FP_MODE
6312 # define PR_SET_FP_MODE 45
6313 # define PR_GET_FP_MODE 46
6314 # define PR_FP_MODE_FR   (1 << 0)
6315 # define PR_FP_MODE_FRE  (1 << 1)
6316 #endif
6317 #ifndef PR_SVE_SET_VL
6318 # define PR_SVE_SET_VL  50
6319 # define PR_SVE_GET_VL  51
6320 # define PR_SVE_VL_LEN_MASK  0xffff
6321 # define PR_SVE_VL_INHERIT   (1 << 17)
6322 #endif
6323 #ifndef PR_PAC_RESET_KEYS
6324 # define PR_PAC_RESET_KEYS  54
6325 # define PR_PAC_APIAKEY   (1 << 0)
6326 # define PR_PAC_APIBKEY   (1 << 1)
6327 # define PR_PAC_APDAKEY   (1 << 2)
6328 # define PR_PAC_APDBKEY   (1 << 3)
6329 # define PR_PAC_APGAKEY   (1 << 4)
6330 #endif
6331 #ifndef PR_SET_TAGGED_ADDR_CTRL
6332 # define PR_SET_TAGGED_ADDR_CTRL 55
6333 # define PR_GET_TAGGED_ADDR_CTRL 56
6334 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6335 #endif
6336 #ifndef PR_SET_IO_FLUSHER
6337 # define PR_SET_IO_FLUSHER 57
6338 # define PR_GET_IO_FLUSHER 58
6339 #endif
6340 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6341 # define PR_SET_SYSCALL_USER_DISPATCH 59
6342 #endif
6343 #ifndef PR_SME_SET_VL
6344 # define PR_SME_SET_VL  63
6345 # define PR_SME_GET_VL  64
6346 # define PR_SME_VL_LEN_MASK  0xffff
6347 # define PR_SME_VL_INHERIT   (1 << 17)
6348 #endif
6349 
6350 #include "target_prctl.h"
6351 
6352 static abi_long do_prctl_inval0(CPUArchState *env)
6353 {
6354     return -TARGET_EINVAL;
6355 }
6356 
6357 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6358 {
6359     return -TARGET_EINVAL;
6360 }
6361 
6362 #ifndef do_prctl_get_fp_mode
6363 #define do_prctl_get_fp_mode do_prctl_inval0
6364 #endif
6365 #ifndef do_prctl_set_fp_mode
6366 #define do_prctl_set_fp_mode do_prctl_inval1
6367 #endif
6368 #ifndef do_prctl_sve_get_vl
6369 #define do_prctl_sve_get_vl do_prctl_inval0
6370 #endif
6371 #ifndef do_prctl_sve_set_vl
6372 #define do_prctl_sve_set_vl do_prctl_inval1
6373 #endif
6374 #ifndef do_prctl_reset_keys
6375 #define do_prctl_reset_keys do_prctl_inval1
6376 #endif
6377 #ifndef do_prctl_set_tagged_addr_ctrl
6378 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6379 #endif
6380 #ifndef do_prctl_get_tagged_addr_ctrl
6381 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6382 #endif
6383 #ifndef do_prctl_get_unalign
6384 #define do_prctl_get_unalign do_prctl_inval1
6385 #endif
6386 #ifndef do_prctl_set_unalign
6387 #define do_prctl_set_unalign do_prctl_inval1
6388 #endif
6389 #ifndef do_prctl_sme_get_vl
6390 #define do_prctl_sme_get_vl do_prctl_inval0
6391 #endif
6392 #ifndef do_prctl_sme_set_vl
6393 #define do_prctl_sme_set_vl do_prctl_inval1
6394 #endif
6395 
6396 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6397                          abi_long arg3, abi_long arg4, abi_long arg5)
6398 {
6399     abi_long ret;
6400 
6401     switch (option) {
6402     case PR_GET_PDEATHSIG:
6403         {
6404             int deathsig;
6405             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6406                                   arg3, arg4, arg5));
6407             if (!is_error(ret) &&
6408                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6409                 return -TARGET_EFAULT;
6410             }
6411             return ret;
6412         }
6413     case PR_SET_PDEATHSIG:
6414         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6415                                arg3, arg4, arg5));
6416     case PR_GET_NAME:
6417         {
6418             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6419             if (!name) {
6420                 return -TARGET_EFAULT;
6421             }
6422             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6423                                   arg3, arg4, arg5));
6424             unlock_user(name, arg2, 16);
6425             return ret;
6426         }
6427     case PR_SET_NAME:
6428         {
6429             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6430             if (!name) {
6431                 return -TARGET_EFAULT;
6432             }
6433             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6434                                   arg3, arg4, arg5));
6435             unlock_user(name, arg2, 0);
6436             return ret;
6437         }
6438     case PR_GET_FP_MODE:
6439         return do_prctl_get_fp_mode(env);
6440     case PR_SET_FP_MODE:
6441         return do_prctl_set_fp_mode(env, arg2);
6442     case PR_SVE_GET_VL:
6443         return do_prctl_sve_get_vl(env);
6444     case PR_SVE_SET_VL:
6445         return do_prctl_sve_set_vl(env, arg2);
6446     case PR_SME_GET_VL:
6447         return do_prctl_sme_get_vl(env);
6448     case PR_SME_SET_VL:
6449         return do_prctl_sme_set_vl(env, arg2);
6450     case PR_PAC_RESET_KEYS:
6451         if (arg3 || arg4 || arg5) {
6452             return -TARGET_EINVAL;
6453         }
6454         return do_prctl_reset_keys(env, arg2);
6455     case PR_SET_TAGGED_ADDR_CTRL:
6456         if (arg3 || arg4 || arg5) {
6457             return -TARGET_EINVAL;
6458         }
6459         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6460     case PR_GET_TAGGED_ADDR_CTRL:
6461         if (arg2 || arg3 || arg4 || arg5) {
6462             return -TARGET_EINVAL;
6463         }
6464         return do_prctl_get_tagged_addr_ctrl(env);
6465 
6466     case PR_GET_UNALIGN:
6467         return do_prctl_get_unalign(env, arg2);
6468     case PR_SET_UNALIGN:
6469         return do_prctl_set_unalign(env, arg2);
6470 
6471     case PR_CAP_AMBIENT:
6472     case PR_CAPBSET_READ:
6473     case PR_CAPBSET_DROP:
6474     case PR_GET_DUMPABLE:
6475     case PR_SET_DUMPABLE:
6476     case PR_GET_KEEPCAPS:
6477     case PR_SET_KEEPCAPS:
6478     case PR_GET_SECUREBITS:
6479     case PR_SET_SECUREBITS:
6480     case PR_GET_TIMING:
6481     case PR_SET_TIMING:
6482     case PR_GET_TIMERSLACK:
6483     case PR_SET_TIMERSLACK:
6484     case PR_MCE_KILL:
6485     case PR_MCE_KILL_GET:
6486     case PR_GET_NO_NEW_PRIVS:
6487     case PR_SET_NO_NEW_PRIVS:
6488     case PR_GET_IO_FLUSHER:
6489     case PR_SET_IO_FLUSHER:
6490     case PR_SET_CHILD_SUBREAPER:
6491     case PR_GET_SPECULATION_CTRL:
6492     case PR_SET_SPECULATION_CTRL:
6493         /* Some prctl options have no pointer arguments and we can pass on. */
6494         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6495 
6496     case PR_GET_CHILD_SUBREAPER:
6497         {
6498             int val;
6499             ret = get_errno(prctl(PR_GET_CHILD_SUBREAPER, &val,
6500                                   arg3, arg4, arg5));
6501             if (!is_error(ret) && put_user_s32(val, arg2)) {
6502                 return -TARGET_EFAULT;
6503             }
6504             return ret;
6505         }
6506 
6507     case PR_GET_TID_ADDRESS:
6508         {
6509             TaskState *ts = get_task_state(env_cpu(env));
6510             return put_user_ual(ts->child_tidptr, arg2);
6511         }
6512 
6513     case PR_GET_FPEXC:
6514     case PR_SET_FPEXC:
6515         /* Was used for SPE on PowerPC. */
6516         return -TARGET_EINVAL;
6517 
6518     case PR_GET_ENDIAN:
6519     case PR_SET_ENDIAN:
6520     case PR_GET_FPEMU:
6521     case PR_SET_FPEMU:
6522     case PR_SET_MM:
6523     case PR_GET_SECCOMP:
6524     case PR_SET_SECCOMP:
6525     case PR_SET_SYSCALL_USER_DISPATCH:
6526     case PR_GET_THP_DISABLE:
6527     case PR_SET_THP_DISABLE:
6528     case PR_GET_TSC:
6529     case PR_SET_TSC:
6530         /* Disable to prevent the target disabling stuff we need. */
6531         return -TARGET_EINVAL;
6532 
6533     default:
6534         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6535                       option);
6536         return -TARGET_EINVAL;
6537     }
6538 }
6539 
6540 #define NEW_STACK_SIZE 0x40000
6541 
6542 
6543 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6544 typedef struct {
6545     CPUArchState *env;
6546     pthread_mutex_t mutex;
6547     pthread_cond_t cond;
6548     pthread_t thread;
6549     uint32_t tid;
6550     abi_ulong child_tidptr;
6551     abi_ulong parent_tidptr;
6552     sigset_t sigmask;
6553 } new_thread_info;
6554 
6555 static void *clone_func(void *arg)
6556 {
6557     new_thread_info *info = arg;
6558     CPUArchState *env;
6559     CPUState *cpu;
6560     TaskState *ts;
6561 
6562     rcu_register_thread();
6563     tcg_register_thread();
6564     env = info->env;
6565     cpu = env_cpu(env);
6566     thread_cpu = cpu;
6567     ts = get_task_state(cpu);
6568     info->tid = sys_gettid();
6569     task_settid(ts);
6570     if (info->child_tidptr)
6571         put_user_u32(info->tid, info->child_tidptr);
6572     if (info->parent_tidptr)
6573         put_user_u32(info->tid, info->parent_tidptr);
6574     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6575     /* Enable signals.  */
6576     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6577     /* Signal to the parent that we're ready.  */
6578     pthread_mutex_lock(&info->mutex);
6579     pthread_cond_broadcast(&info->cond);
6580     pthread_mutex_unlock(&info->mutex);
6581     /* Wait until the parent has finished initializing the tls state.  */
6582     pthread_mutex_lock(&clone_lock);
6583     pthread_mutex_unlock(&clone_lock);
6584     cpu_loop(env);
6585     /* never exits */
6586     return NULL;
6587 }
6588 
6589 /* do_fork() Must return host values and target errnos (unlike most
6590    do_*() functions). */
6591 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6592                    abi_ulong parent_tidptr, target_ulong newtls,
6593                    abi_ulong child_tidptr)
6594 {
6595     CPUState *cpu = env_cpu(env);
6596     int ret;
6597     TaskState *ts;
6598     CPUState *new_cpu;
6599     CPUArchState *new_env;
6600     sigset_t sigmask;
6601 
6602     flags &= ~CLONE_IGNORED_FLAGS;
6603 
6604     /* Emulate vfork() with fork() */
6605     if (flags & CLONE_VFORK)
6606         flags &= ~(CLONE_VFORK | CLONE_VM);
6607 
6608     if (flags & CLONE_VM) {
6609         TaskState *parent_ts = get_task_state(cpu);
6610         new_thread_info info;
6611         pthread_attr_t attr;
6612 
6613         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6614             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6615             return -TARGET_EINVAL;
6616         }
6617 
6618         ts = g_new0(TaskState, 1);
6619         init_task_state(ts);
6620 
6621         /* Grab a mutex so that thread setup appears atomic.  */
6622         pthread_mutex_lock(&clone_lock);
6623 
6624         /*
6625          * If this is our first additional thread, we need to ensure we
6626          * generate code for parallel execution and flush old translations.
6627          * Do this now so that the copy gets CF_PARALLEL too.
6628          */
6629         if (!tcg_cflags_has(cpu, CF_PARALLEL)) {
6630             tcg_cflags_set(cpu, CF_PARALLEL);
6631             tb_flush(cpu);
6632         }
6633 
6634         /* we create a new CPU instance. */
6635         new_env = cpu_copy(env);
6636         /* Init regs that differ from the parent.  */
6637         cpu_clone_regs_child(new_env, newsp, flags);
6638         cpu_clone_regs_parent(env, flags);
6639         new_cpu = env_cpu(new_env);
6640         new_cpu->opaque = ts;
6641         ts->bprm = parent_ts->bprm;
6642         ts->info = parent_ts->info;
6643         ts->signal_mask = parent_ts->signal_mask;
6644 
6645         if (flags & CLONE_CHILD_CLEARTID) {
6646             ts->child_tidptr = child_tidptr;
6647         }
6648 
6649         if (flags & CLONE_SETTLS) {
6650             cpu_set_tls (new_env, newtls);
6651         }
6652 
6653         memset(&info, 0, sizeof(info));
6654         pthread_mutex_init(&info.mutex, NULL);
6655         pthread_mutex_lock(&info.mutex);
6656         pthread_cond_init(&info.cond, NULL);
6657         info.env = new_env;
6658         if (flags & CLONE_CHILD_SETTID) {
6659             info.child_tidptr = child_tidptr;
6660         }
6661         if (flags & CLONE_PARENT_SETTID) {
6662             info.parent_tidptr = parent_tidptr;
6663         }
6664 
6665         ret = pthread_attr_init(&attr);
6666         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6667         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6668         /* It is not safe to deliver signals until the child has finished
6669            initializing, so temporarily block all signals.  */
6670         sigfillset(&sigmask);
6671         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6672         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6673 
6674         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6675         /* TODO: Free new CPU state if thread creation failed.  */
6676 
6677         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6678         pthread_attr_destroy(&attr);
6679         if (ret == 0) {
6680             /* Wait for the child to initialize.  */
6681             pthread_cond_wait(&info.cond, &info.mutex);
6682             ret = info.tid;
6683         } else {
6684             ret = -1;
6685         }
6686         pthread_mutex_unlock(&info.mutex);
6687         pthread_cond_destroy(&info.cond);
6688         pthread_mutex_destroy(&info.mutex);
6689         pthread_mutex_unlock(&clone_lock);
6690     } else {
6691         /* if no CLONE_VM, we consider it is a fork */
6692         if (flags & CLONE_INVALID_FORK_FLAGS) {
6693             return -TARGET_EINVAL;
6694         }
6695 
6696         /* We can't support custom termination signals */
6697         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6698             return -TARGET_EINVAL;
6699         }
6700 
6701 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6702         if (flags & CLONE_PIDFD) {
6703             return -TARGET_EINVAL;
6704         }
6705 #endif
6706 
6707         /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6708         if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) {
6709             return -TARGET_EINVAL;
6710         }
6711 
6712         if (block_signals()) {
6713             return -QEMU_ERESTARTSYS;
6714         }
6715 
6716         fork_start();
6717         ret = fork();
6718         if (ret == 0) {
6719             /* Child Process.  */
6720             cpu_clone_regs_child(env, newsp, flags);
6721             fork_end(ret);
6722             /* There is a race condition here.  The parent process could
6723                theoretically read the TID in the child process before the child
6724                tid is set.  This would require using either ptrace
6725                (not implemented) or having *_tidptr to point at a shared memory
6726                mapping.  We can't repeat the spinlock hack used above because
6727                the child process gets its own copy of the lock.  */
6728             if (flags & CLONE_CHILD_SETTID)
6729                 put_user_u32(sys_gettid(), child_tidptr);
6730             if (flags & CLONE_PARENT_SETTID)
6731                 put_user_u32(sys_gettid(), parent_tidptr);
6732             ts = get_task_state(cpu);
6733             if (flags & CLONE_SETTLS)
6734                 cpu_set_tls (env, newtls);
6735             if (flags & CLONE_CHILD_CLEARTID)
6736                 ts->child_tidptr = child_tidptr;
6737         } else {
6738             cpu_clone_regs_parent(env, flags);
6739             if (flags & CLONE_PIDFD) {
6740                 int pid_fd = 0;
6741 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6742                 int pid_child = ret;
6743                 pid_fd = pidfd_open(pid_child, 0);
6744                 if (pid_fd >= 0) {
6745                         fcntl(pid_fd, F_SETFD, fcntl(pid_fd, F_GETFL)
6746                                                | FD_CLOEXEC);
6747                 } else {
6748                         pid_fd = 0;
6749                 }
6750 #endif
6751                 put_user_u32(pid_fd, parent_tidptr);
6752             }
6753             fork_end(ret);
6754         }
6755         g_assert(!cpu_in_exclusive_context(cpu));
6756     }
6757     return ret;
6758 }
6759 
6760 /* warning : doesn't handle linux specific flags... */
6761 static int target_to_host_fcntl_cmd(int cmd)
6762 {
6763     int ret;
6764 
6765     switch(cmd) {
6766     case TARGET_F_DUPFD:
6767     case TARGET_F_GETFD:
6768     case TARGET_F_SETFD:
6769     case TARGET_F_GETFL:
6770     case TARGET_F_SETFL:
6771     case TARGET_F_OFD_GETLK:
6772     case TARGET_F_OFD_SETLK:
6773     case TARGET_F_OFD_SETLKW:
6774         ret = cmd;
6775         break;
6776     case TARGET_F_GETLK:
6777         ret = F_GETLK;
6778         break;
6779     case TARGET_F_SETLK:
6780         ret = F_SETLK;
6781         break;
6782     case TARGET_F_SETLKW:
6783         ret = F_SETLKW;
6784         break;
6785     case TARGET_F_GETOWN:
6786         ret = F_GETOWN;
6787         break;
6788     case TARGET_F_SETOWN:
6789         ret = F_SETOWN;
6790         break;
6791     case TARGET_F_GETSIG:
6792         ret = F_GETSIG;
6793         break;
6794     case TARGET_F_SETSIG:
6795         ret = F_SETSIG;
6796         break;
6797 #if TARGET_ABI_BITS == 32
6798     case TARGET_F_GETLK64:
6799         ret = F_GETLK;
6800         break;
6801     case TARGET_F_SETLK64:
6802         ret = F_SETLK;
6803         break;
6804     case TARGET_F_SETLKW64:
6805         ret = F_SETLKW;
6806         break;
6807 #endif
6808     case TARGET_F_SETLEASE:
6809         ret = F_SETLEASE;
6810         break;
6811     case TARGET_F_GETLEASE:
6812         ret = F_GETLEASE;
6813         break;
6814 #ifdef F_DUPFD_CLOEXEC
6815     case TARGET_F_DUPFD_CLOEXEC:
6816         ret = F_DUPFD_CLOEXEC;
6817         break;
6818 #endif
6819     case TARGET_F_NOTIFY:
6820         ret = F_NOTIFY;
6821         break;
6822 #ifdef F_GETOWN_EX
6823     case TARGET_F_GETOWN_EX:
6824         ret = F_GETOWN_EX;
6825         break;
6826 #endif
6827 #ifdef F_SETOWN_EX
6828     case TARGET_F_SETOWN_EX:
6829         ret = F_SETOWN_EX;
6830         break;
6831 #endif
6832 #ifdef F_SETPIPE_SZ
6833     case TARGET_F_SETPIPE_SZ:
6834         ret = F_SETPIPE_SZ;
6835         break;
6836     case TARGET_F_GETPIPE_SZ:
6837         ret = F_GETPIPE_SZ;
6838         break;
6839 #endif
6840 #ifdef F_ADD_SEALS
6841     case TARGET_F_ADD_SEALS:
6842         ret = F_ADD_SEALS;
6843         break;
6844     case TARGET_F_GET_SEALS:
6845         ret = F_GET_SEALS;
6846         break;
6847 #endif
6848     default:
6849         ret = -TARGET_EINVAL;
6850         break;
6851     }
6852 
6853 #if defined(__powerpc64__)
6854     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6855      * is not supported by kernel. The glibc fcntl call actually adjusts
6856      * them to 5, 6 and 7 before making the syscall(). Since we make the
6857      * syscall directly, adjust to what is supported by the kernel.
6858      */
6859     if (ret >= F_GETLK && ret <= F_SETLKW) {
6860         ret -= F_GETLK - 5;
6861     }
6862 #endif
6863 
6864     return ret;
6865 }
6866 
6867 #define FLOCK_TRANSTBL \
6868     switch (type) { \
6869     TRANSTBL_CONVERT(F_RDLCK); \
6870     TRANSTBL_CONVERT(F_WRLCK); \
6871     TRANSTBL_CONVERT(F_UNLCK); \
6872     }
6873 
6874 static int target_to_host_flock(int type)
6875 {
6876 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6877     FLOCK_TRANSTBL
6878 #undef  TRANSTBL_CONVERT
6879     return -TARGET_EINVAL;
6880 }
6881 
6882 static int host_to_target_flock(int type)
6883 {
6884 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6885     FLOCK_TRANSTBL
6886 #undef  TRANSTBL_CONVERT
6887     /* if we don't know how to convert the value coming
6888      * from the host we copy to the target field as-is
6889      */
6890     return type;
6891 }
6892 
6893 static inline abi_long copy_from_user_flock(struct flock *fl,
6894                                             abi_ulong target_flock_addr)
6895 {
6896     struct target_flock *target_fl;
6897     int l_type;
6898 
6899     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6900         return -TARGET_EFAULT;
6901     }
6902 
6903     __get_user(l_type, &target_fl->l_type);
6904     l_type = target_to_host_flock(l_type);
6905     if (l_type < 0) {
6906         return l_type;
6907     }
6908     fl->l_type = l_type;
6909     __get_user(fl->l_whence, &target_fl->l_whence);
6910     __get_user(fl->l_start, &target_fl->l_start);
6911     __get_user(fl->l_len, &target_fl->l_len);
6912     __get_user(fl->l_pid, &target_fl->l_pid);
6913     unlock_user_struct(target_fl, target_flock_addr, 0);
6914     return 0;
6915 }
6916 
6917 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6918                                           const struct flock *fl)
6919 {
6920     struct target_flock *target_fl;
6921     short l_type;
6922 
6923     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6924         return -TARGET_EFAULT;
6925     }
6926 
6927     l_type = host_to_target_flock(fl->l_type);
6928     __put_user(l_type, &target_fl->l_type);
6929     __put_user(fl->l_whence, &target_fl->l_whence);
6930     __put_user(fl->l_start, &target_fl->l_start);
6931     __put_user(fl->l_len, &target_fl->l_len);
6932     __put_user(fl->l_pid, &target_fl->l_pid);
6933     unlock_user_struct(target_fl, target_flock_addr, 1);
6934     return 0;
6935 }
6936 
6937 typedef abi_long from_flock64_fn(struct flock *fl, abi_ulong target_addr);
6938 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock *fl);
6939 
6940 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6941 struct target_oabi_flock64 {
6942     abi_short l_type;
6943     abi_short l_whence;
6944     abi_llong l_start;
6945     abi_llong l_len;
6946     abi_int   l_pid;
6947 } QEMU_PACKED;
6948 
6949 static inline abi_long copy_from_user_oabi_flock64(struct flock *fl,
6950                                                    abi_ulong target_flock_addr)
6951 {
6952     struct target_oabi_flock64 *target_fl;
6953     int l_type;
6954 
6955     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6956         return -TARGET_EFAULT;
6957     }
6958 
6959     __get_user(l_type, &target_fl->l_type);
6960     l_type = target_to_host_flock(l_type);
6961     if (l_type < 0) {
6962         return l_type;
6963     }
6964     fl->l_type = l_type;
6965     __get_user(fl->l_whence, &target_fl->l_whence);
6966     __get_user(fl->l_start, &target_fl->l_start);
6967     __get_user(fl->l_len, &target_fl->l_len);
6968     __get_user(fl->l_pid, &target_fl->l_pid);
6969     unlock_user_struct(target_fl, target_flock_addr, 0);
6970     return 0;
6971 }
6972 
6973 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6974                                                  const struct flock *fl)
6975 {
6976     struct target_oabi_flock64 *target_fl;
6977     short l_type;
6978 
6979     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6980         return -TARGET_EFAULT;
6981     }
6982 
6983     l_type = host_to_target_flock(fl->l_type);
6984     __put_user(l_type, &target_fl->l_type);
6985     __put_user(fl->l_whence, &target_fl->l_whence);
6986     __put_user(fl->l_start, &target_fl->l_start);
6987     __put_user(fl->l_len, &target_fl->l_len);
6988     __put_user(fl->l_pid, &target_fl->l_pid);
6989     unlock_user_struct(target_fl, target_flock_addr, 1);
6990     return 0;
6991 }
6992 #endif
6993 
6994 static inline abi_long copy_from_user_flock64(struct flock *fl,
6995                                               abi_ulong target_flock_addr)
6996 {
6997     struct target_flock64 *target_fl;
6998     int l_type;
6999 
7000     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7001         return -TARGET_EFAULT;
7002     }
7003 
7004     __get_user(l_type, &target_fl->l_type);
7005     l_type = target_to_host_flock(l_type);
7006     if (l_type < 0) {
7007         return l_type;
7008     }
7009     fl->l_type = l_type;
7010     __get_user(fl->l_whence, &target_fl->l_whence);
7011     __get_user(fl->l_start, &target_fl->l_start);
7012     __get_user(fl->l_len, &target_fl->l_len);
7013     __get_user(fl->l_pid, &target_fl->l_pid);
7014     unlock_user_struct(target_fl, target_flock_addr, 0);
7015     return 0;
7016 }
7017 
7018 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7019                                             const struct flock *fl)
7020 {
7021     struct target_flock64 *target_fl;
7022     short l_type;
7023 
7024     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7025         return -TARGET_EFAULT;
7026     }
7027 
7028     l_type = host_to_target_flock(fl->l_type);
7029     __put_user(l_type, &target_fl->l_type);
7030     __put_user(fl->l_whence, &target_fl->l_whence);
7031     __put_user(fl->l_start, &target_fl->l_start);
7032     __put_user(fl->l_len, &target_fl->l_len);
7033     __put_user(fl->l_pid, &target_fl->l_pid);
7034     unlock_user_struct(target_fl, target_flock_addr, 1);
7035     return 0;
7036 }
7037 
7038 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7039 {
7040     struct flock fl;
7041 #ifdef F_GETOWN_EX
7042     struct f_owner_ex fox;
7043     struct target_f_owner_ex *target_fox;
7044 #endif
7045     abi_long ret;
7046     int host_cmd = target_to_host_fcntl_cmd(cmd);
7047 
7048     if (host_cmd == -TARGET_EINVAL)
7049 	    return host_cmd;
7050 
7051     switch(cmd) {
7052     case TARGET_F_GETLK:
7053         ret = copy_from_user_flock(&fl, arg);
7054         if (ret) {
7055             return ret;
7056         }
7057         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7058         if (ret == 0) {
7059             ret = copy_to_user_flock(arg, &fl);
7060         }
7061         break;
7062 
7063     case TARGET_F_SETLK:
7064     case TARGET_F_SETLKW:
7065         ret = copy_from_user_flock(&fl, arg);
7066         if (ret) {
7067             return ret;
7068         }
7069         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7070         break;
7071 
7072     case TARGET_F_GETLK64:
7073     case TARGET_F_OFD_GETLK:
7074         ret = copy_from_user_flock64(&fl, arg);
7075         if (ret) {
7076             return ret;
7077         }
7078         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7079         if (ret == 0) {
7080             ret = copy_to_user_flock64(arg, &fl);
7081         }
7082         break;
7083     case TARGET_F_SETLK64:
7084     case TARGET_F_SETLKW64:
7085     case TARGET_F_OFD_SETLK:
7086     case TARGET_F_OFD_SETLKW:
7087         ret = copy_from_user_flock64(&fl, arg);
7088         if (ret) {
7089             return ret;
7090         }
7091         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7092         break;
7093 
7094     case TARGET_F_GETFL:
7095         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7096         if (ret >= 0) {
7097             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7098             /* tell 32-bit guests it uses largefile on 64-bit hosts: */
7099             if (O_LARGEFILE == 0 && HOST_LONG_BITS == 64) {
7100                 ret |= TARGET_O_LARGEFILE;
7101             }
7102         }
7103         break;
7104 
7105     case TARGET_F_SETFL:
7106         ret = get_errno(safe_fcntl(fd, host_cmd,
7107                                    target_to_host_bitmask(arg,
7108                                                           fcntl_flags_tbl)));
7109         break;
7110 
7111 #ifdef F_GETOWN_EX
7112     case TARGET_F_GETOWN_EX:
7113         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7114         if (ret >= 0) {
7115             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7116                 return -TARGET_EFAULT;
7117             target_fox->type = tswap32(fox.type);
7118             target_fox->pid = tswap32(fox.pid);
7119             unlock_user_struct(target_fox, arg, 1);
7120         }
7121         break;
7122 #endif
7123 
7124 #ifdef F_SETOWN_EX
7125     case TARGET_F_SETOWN_EX:
7126         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7127             return -TARGET_EFAULT;
7128         fox.type = tswap32(target_fox->type);
7129         fox.pid = tswap32(target_fox->pid);
7130         unlock_user_struct(target_fox, arg, 0);
7131         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7132         break;
7133 #endif
7134 
7135     case TARGET_F_SETSIG:
7136         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7137         break;
7138 
7139     case TARGET_F_GETSIG:
7140         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7141         break;
7142 
7143     case TARGET_F_SETOWN:
7144     case TARGET_F_GETOWN:
7145     case TARGET_F_SETLEASE:
7146     case TARGET_F_GETLEASE:
7147     case TARGET_F_SETPIPE_SZ:
7148     case TARGET_F_GETPIPE_SZ:
7149     case TARGET_F_ADD_SEALS:
7150     case TARGET_F_GET_SEALS:
7151         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7152         break;
7153 
7154     default:
7155         ret = get_errno(safe_fcntl(fd, cmd, arg));
7156         break;
7157     }
7158     return ret;
7159 }
7160 
7161 #ifdef USE_UID16
7162 
7163 static inline int high2lowuid(int uid)
7164 {
7165     if (uid > 65535)
7166         return 65534;
7167     else
7168         return uid;
7169 }
7170 
7171 static inline int high2lowgid(int gid)
7172 {
7173     if (gid > 65535)
7174         return 65534;
7175     else
7176         return gid;
7177 }
7178 
7179 static inline int low2highuid(int uid)
7180 {
7181     if ((int16_t)uid == -1)
7182         return -1;
7183     else
7184         return uid;
7185 }
7186 
7187 static inline int low2highgid(int gid)
7188 {
7189     if ((int16_t)gid == -1)
7190         return -1;
7191     else
7192         return gid;
7193 }
7194 static inline int tswapid(int id)
7195 {
7196     return tswap16(id);
7197 }
7198 
7199 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7200 
7201 #else /* !USE_UID16 */
7202 static inline int high2lowuid(int uid)
7203 {
7204     return uid;
7205 }
7206 static inline int high2lowgid(int gid)
7207 {
7208     return gid;
7209 }
7210 static inline int low2highuid(int uid)
7211 {
7212     return uid;
7213 }
7214 static inline int low2highgid(int gid)
7215 {
7216     return gid;
7217 }
7218 static inline int tswapid(int id)
7219 {
7220     return tswap32(id);
7221 }
7222 
7223 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7224 
7225 #endif /* USE_UID16 */
7226 
7227 /* We must do direct syscalls for setting UID/GID, because we want to
7228  * implement the Linux system call semantics of "change only for this thread",
7229  * not the libc/POSIX semantics of "change for all threads in process".
7230  * (See http://ewontfix.com/17/ for more details.)
7231  * We use the 32-bit version of the syscalls if present; if it is not
7232  * then either the host architecture supports 32-bit UIDs natively with
7233  * the standard syscall, or the 16-bit UID is the best we can do.
7234  */
7235 #ifdef __NR_setuid32
7236 #define __NR_sys_setuid __NR_setuid32
7237 #else
7238 #define __NR_sys_setuid __NR_setuid
7239 #endif
7240 #ifdef __NR_setgid32
7241 #define __NR_sys_setgid __NR_setgid32
7242 #else
7243 #define __NR_sys_setgid __NR_setgid
7244 #endif
7245 #ifdef __NR_setresuid32
7246 #define __NR_sys_setresuid __NR_setresuid32
7247 #else
7248 #define __NR_sys_setresuid __NR_setresuid
7249 #endif
7250 #ifdef __NR_setresgid32
7251 #define __NR_sys_setresgid __NR_setresgid32
7252 #else
7253 #define __NR_sys_setresgid __NR_setresgid
7254 #endif
7255 #ifdef __NR_setgroups32
7256 #define __NR_sys_setgroups __NR_setgroups32
7257 #else
7258 #define __NR_sys_setgroups __NR_setgroups
7259 #endif
7260 #ifdef __NR_sys_setreuid32
7261 #define __NR_sys_setreuid __NR_setreuid32
7262 #else
7263 #define __NR_sys_setreuid __NR_setreuid
7264 #endif
7265 #ifdef __NR_sys_setregid32
7266 #define __NR_sys_setregid __NR_setregid32
7267 #else
7268 #define __NR_sys_setregid __NR_setregid
7269 #endif
7270 
7271 _syscall1(int, sys_setuid, uid_t, uid)
7272 _syscall1(int, sys_setgid, gid_t, gid)
7273 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7274 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7275 _syscall2(int, sys_setgroups, int, size, gid_t *, grouplist)
7276 _syscall2(int, sys_setreuid, uid_t, ruid, uid_t, euid);
7277 _syscall2(int, sys_setregid, gid_t, rgid, gid_t, egid);
7278 
7279 void syscall_init(void)
7280 {
7281     IOCTLEntry *ie;
7282     const argtype *arg_type;
7283     int size;
7284 
7285     thunk_init(STRUCT_MAX);
7286 
7287 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7288 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7289 #include "syscall_types.h"
7290 #undef STRUCT
7291 #undef STRUCT_SPECIAL
7292 
7293     /* we patch the ioctl size if necessary. We rely on the fact that
7294        no ioctl has all the bits at '1' in the size field */
7295     ie = ioctl_entries;
7296     while (ie->target_cmd != 0) {
7297         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7298             TARGET_IOC_SIZEMASK) {
7299             arg_type = ie->arg_type;
7300             if (arg_type[0] != TYPE_PTR) {
7301                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7302                         ie->target_cmd);
7303                 exit(1);
7304             }
7305             arg_type++;
7306             size = thunk_type_size(arg_type, 0);
7307             ie->target_cmd = (ie->target_cmd &
7308                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7309                 (size << TARGET_IOC_SIZESHIFT);
7310         }
7311 
7312         /* automatic consistency check if same arch */
7313 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7314     (defined(__x86_64__) && defined(TARGET_X86_64))
7315         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7316             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7317                     ie->name, ie->target_cmd, ie->host_cmd);
7318         }
7319 #endif
7320         ie++;
7321     }
7322 }
7323 
7324 #ifdef TARGET_NR_truncate64
7325 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7326                                          abi_long arg2,
7327                                          abi_long arg3,
7328                                          abi_long arg4)
7329 {
7330     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7331         arg2 = arg3;
7332         arg3 = arg4;
7333     }
7334     return get_errno(truncate(arg1, target_offset64(arg2, arg3)));
7335 }
7336 #endif
7337 
7338 #ifdef TARGET_NR_ftruncate64
7339 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7340                                           abi_long arg2,
7341                                           abi_long arg3,
7342                                           abi_long arg4)
7343 {
7344     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7345         arg2 = arg3;
7346         arg3 = arg4;
7347     }
7348     return get_errno(ftruncate(arg1, target_offset64(arg2, arg3)));
7349 }
7350 #endif
7351 
7352 #if defined(TARGET_NR_timer_settime) || \
7353     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7354 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7355                                                  abi_ulong target_addr)
7356 {
7357     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7358                                 offsetof(struct target_itimerspec,
7359                                          it_interval)) ||
7360         target_to_host_timespec(&host_its->it_value, target_addr +
7361                                 offsetof(struct target_itimerspec,
7362                                          it_value))) {
7363         return -TARGET_EFAULT;
7364     }
7365 
7366     return 0;
7367 }
7368 #endif
7369 
7370 #if defined(TARGET_NR_timer_settime64) || \
7371     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7372 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7373                                                    abi_ulong target_addr)
7374 {
7375     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7376                                   offsetof(struct target__kernel_itimerspec,
7377                                            it_interval)) ||
7378         target_to_host_timespec64(&host_its->it_value, target_addr +
7379                                   offsetof(struct target__kernel_itimerspec,
7380                                            it_value))) {
7381         return -TARGET_EFAULT;
7382     }
7383 
7384     return 0;
7385 }
7386 #endif
7387 
7388 #if ((defined(TARGET_NR_timerfd_gettime) || \
7389       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7390       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7391 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7392                                                  struct itimerspec *host_its)
7393 {
7394     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7395                                                        it_interval),
7396                                 &host_its->it_interval) ||
7397         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7398                                                        it_value),
7399                                 &host_its->it_value)) {
7400         return -TARGET_EFAULT;
7401     }
7402     return 0;
7403 }
7404 #endif
7405 
7406 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7407       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7408       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7409 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7410                                                    struct itimerspec *host_its)
7411 {
7412     if (host_to_target_timespec64(target_addr +
7413                                   offsetof(struct target__kernel_itimerspec,
7414                                            it_interval),
7415                                   &host_its->it_interval) ||
7416         host_to_target_timespec64(target_addr +
7417                                   offsetof(struct target__kernel_itimerspec,
7418                                            it_value),
7419                                   &host_its->it_value)) {
7420         return -TARGET_EFAULT;
7421     }
7422     return 0;
7423 }
7424 #endif
7425 
7426 #if defined(TARGET_NR_adjtimex) || \
7427     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7428 static inline abi_long target_to_host_timex(struct timex *host_tx,
7429                                             abi_long target_addr)
7430 {
7431     struct target_timex *target_tx;
7432 
7433     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7434         return -TARGET_EFAULT;
7435     }
7436 
7437     __get_user(host_tx->modes, &target_tx->modes);
7438     __get_user(host_tx->offset, &target_tx->offset);
7439     __get_user(host_tx->freq, &target_tx->freq);
7440     __get_user(host_tx->maxerror, &target_tx->maxerror);
7441     __get_user(host_tx->esterror, &target_tx->esterror);
7442     __get_user(host_tx->status, &target_tx->status);
7443     __get_user(host_tx->constant, &target_tx->constant);
7444     __get_user(host_tx->precision, &target_tx->precision);
7445     __get_user(host_tx->tolerance, &target_tx->tolerance);
7446     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7447     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7448     __get_user(host_tx->tick, &target_tx->tick);
7449     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7450     __get_user(host_tx->jitter, &target_tx->jitter);
7451     __get_user(host_tx->shift, &target_tx->shift);
7452     __get_user(host_tx->stabil, &target_tx->stabil);
7453     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7454     __get_user(host_tx->calcnt, &target_tx->calcnt);
7455     __get_user(host_tx->errcnt, &target_tx->errcnt);
7456     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7457     __get_user(host_tx->tai, &target_tx->tai);
7458 
7459     unlock_user_struct(target_tx, target_addr, 0);
7460     return 0;
7461 }
7462 
7463 static inline abi_long host_to_target_timex(abi_long target_addr,
7464                                             struct timex *host_tx)
7465 {
7466     struct target_timex *target_tx;
7467 
7468     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7469         return -TARGET_EFAULT;
7470     }
7471 
7472     __put_user(host_tx->modes, &target_tx->modes);
7473     __put_user(host_tx->offset, &target_tx->offset);
7474     __put_user(host_tx->freq, &target_tx->freq);
7475     __put_user(host_tx->maxerror, &target_tx->maxerror);
7476     __put_user(host_tx->esterror, &target_tx->esterror);
7477     __put_user(host_tx->status, &target_tx->status);
7478     __put_user(host_tx->constant, &target_tx->constant);
7479     __put_user(host_tx->precision, &target_tx->precision);
7480     __put_user(host_tx->tolerance, &target_tx->tolerance);
7481     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7482     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7483     __put_user(host_tx->tick, &target_tx->tick);
7484     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7485     __put_user(host_tx->jitter, &target_tx->jitter);
7486     __put_user(host_tx->shift, &target_tx->shift);
7487     __put_user(host_tx->stabil, &target_tx->stabil);
7488     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7489     __put_user(host_tx->calcnt, &target_tx->calcnt);
7490     __put_user(host_tx->errcnt, &target_tx->errcnt);
7491     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7492     __put_user(host_tx->tai, &target_tx->tai);
7493 
7494     unlock_user_struct(target_tx, target_addr, 1);
7495     return 0;
7496 }
7497 #endif
7498 
7499 
7500 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7501 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7502                                               abi_long target_addr)
7503 {
7504     struct target__kernel_timex *target_tx;
7505 
7506     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7507                                  offsetof(struct target__kernel_timex,
7508                                           time))) {
7509         return -TARGET_EFAULT;
7510     }
7511 
7512     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7513         return -TARGET_EFAULT;
7514     }
7515 
7516     __get_user(host_tx->modes, &target_tx->modes);
7517     __get_user(host_tx->offset, &target_tx->offset);
7518     __get_user(host_tx->freq, &target_tx->freq);
7519     __get_user(host_tx->maxerror, &target_tx->maxerror);
7520     __get_user(host_tx->esterror, &target_tx->esterror);
7521     __get_user(host_tx->status, &target_tx->status);
7522     __get_user(host_tx->constant, &target_tx->constant);
7523     __get_user(host_tx->precision, &target_tx->precision);
7524     __get_user(host_tx->tolerance, &target_tx->tolerance);
7525     __get_user(host_tx->tick, &target_tx->tick);
7526     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7527     __get_user(host_tx->jitter, &target_tx->jitter);
7528     __get_user(host_tx->shift, &target_tx->shift);
7529     __get_user(host_tx->stabil, &target_tx->stabil);
7530     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7531     __get_user(host_tx->calcnt, &target_tx->calcnt);
7532     __get_user(host_tx->errcnt, &target_tx->errcnt);
7533     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7534     __get_user(host_tx->tai, &target_tx->tai);
7535 
7536     unlock_user_struct(target_tx, target_addr, 0);
7537     return 0;
7538 }
7539 
7540 static inline abi_long host_to_target_timex64(abi_long target_addr,
7541                                               struct timex *host_tx)
7542 {
7543     struct target__kernel_timex *target_tx;
7544 
7545    if (copy_to_user_timeval64(target_addr +
7546                               offsetof(struct target__kernel_timex, time),
7547                               &host_tx->time)) {
7548         return -TARGET_EFAULT;
7549     }
7550 
7551     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7552         return -TARGET_EFAULT;
7553     }
7554 
7555     __put_user(host_tx->modes, &target_tx->modes);
7556     __put_user(host_tx->offset, &target_tx->offset);
7557     __put_user(host_tx->freq, &target_tx->freq);
7558     __put_user(host_tx->maxerror, &target_tx->maxerror);
7559     __put_user(host_tx->esterror, &target_tx->esterror);
7560     __put_user(host_tx->status, &target_tx->status);
7561     __put_user(host_tx->constant, &target_tx->constant);
7562     __put_user(host_tx->precision, &target_tx->precision);
7563     __put_user(host_tx->tolerance, &target_tx->tolerance);
7564     __put_user(host_tx->tick, &target_tx->tick);
7565     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7566     __put_user(host_tx->jitter, &target_tx->jitter);
7567     __put_user(host_tx->shift, &target_tx->shift);
7568     __put_user(host_tx->stabil, &target_tx->stabil);
7569     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7570     __put_user(host_tx->calcnt, &target_tx->calcnt);
7571     __put_user(host_tx->errcnt, &target_tx->errcnt);
7572     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7573     __put_user(host_tx->tai, &target_tx->tai);
7574 
7575     unlock_user_struct(target_tx, target_addr, 1);
7576     return 0;
7577 }
7578 #endif
7579 
7580 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7581 #define sigev_notify_thread_id _sigev_un._tid
7582 #endif
7583 
7584 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7585                                                abi_ulong target_addr)
7586 {
7587     struct target_sigevent *target_sevp;
7588 
7589     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7590         return -TARGET_EFAULT;
7591     }
7592 
7593     /* This union is awkward on 64 bit systems because it has a 32 bit
7594      * integer and a pointer in it; we follow the conversion approach
7595      * used for handling sigval types in signal.c so the guest should get
7596      * the correct value back even if we did a 64 bit byteswap and it's
7597      * using the 32 bit integer.
7598      */
7599     host_sevp->sigev_value.sival_ptr =
7600         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7601     host_sevp->sigev_signo =
7602         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7603     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7604     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7605 
7606     unlock_user_struct(target_sevp, target_addr, 1);
7607     return 0;
7608 }
7609 
7610 #if defined(TARGET_NR_mlockall)
7611 static inline int target_to_host_mlockall_arg(int arg)
7612 {
7613     int result = 0;
7614 
7615     if (arg & TARGET_MCL_CURRENT) {
7616         result |= MCL_CURRENT;
7617     }
7618     if (arg & TARGET_MCL_FUTURE) {
7619         result |= MCL_FUTURE;
7620     }
7621 #ifdef MCL_ONFAULT
7622     if (arg & TARGET_MCL_ONFAULT) {
7623         result |= MCL_ONFAULT;
7624     }
7625 #endif
7626 
7627     return result;
7628 }
7629 #endif
7630 
7631 static inline int target_to_host_msync_arg(abi_long arg)
7632 {
7633     return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
7634            ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
7635            ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
7636            (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
7637 }
7638 
7639 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7640      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7641      defined(TARGET_NR_newfstatat))
7642 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7643                                              abi_ulong target_addr,
7644                                              struct stat *host_st)
7645 {
7646 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7647     if (cpu_env->eabi) {
7648         struct target_eabi_stat64 *target_st;
7649 
7650         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7651             return -TARGET_EFAULT;
7652         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7653         __put_user(host_st->st_dev, &target_st->st_dev);
7654         __put_user(host_st->st_ino, &target_st->st_ino);
7655 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7656         __put_user(host_st->st_ino, &target_st->__st_ino);
7657 #endif
7658         __put_user(host_st->st_mode, &target_st->st_mode);
7659         __put_user(host_st->st_nlink, &target_st->st_nlink);
7660         __put_user(host_st->st_uid, &target_st->st_uid);
7661         __put_user(host_st->st_gid, &target_st->st_gid);
7662         __put_user(host_st->st_rdev, &target_st->st_rdev);
7663         __put_user(host_st->st_size, &target_st->st_size);
7664         __put_user(host_st->st_blksize, &target_st->st_blksize);
7665         __put_user(host_st->st_blocks, &target_st->st_blocks);
7666         __put_user(host_st->st_atime, &target_st->target_st_atime);
7667         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7668         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7669 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7670         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7671         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7672         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7673 #endif
7674         unlock_user_struct(target_st, target_addr, 1);
7675     } else
7676 #endif
7677     {
7678 #if defined(TARGET_HAS_STRUCT_STAT64)
7679         struct target_stat64 *target_st;
7680 #else
7681         struct target_stat *target_st;
7682 #endif
7683 
7684         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7685             return -TARGET_EFAULT;
7686         memset(target_st, 0, sizeof(*target_st));
7687         __put_user(host_st->st_dev, &target_st->st_dev);
7688         __put_user(host_st->st_ino, &target_st->st_ino);
7689 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7690         __put_user(host_st->st_ino, &target_st->__st_ino);
7691 #endif
7692         __put_user(host_st->st_mode, &target_st->st_mode);
7693         __put_user(host_st->st_nlink, &target_st->st_nlink);
7694         __put_user(host_st->st_uid, &target_st->st_uid);
7695         __put_user(host_st->st_gid, &target_st->st_gid);
7696         __put_user(host_st->st_rdev, &target_st->st_rdev);
7697         /* XXX: better use of kernel struct */
7698         __put_user(host_st->st_size, &target_st->st_size);
7699         __put_user(host_st->st_blksize, &target_st->st_blksize);
7700         __put_user(host_st->st_blocks, &target_st->st_blocks);
7701         __put_user(host_st->st_atime, &target_st->target_st_atime);
7702         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7703         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7704 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7705         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7706         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7707         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7708 #endif
7709         unlock_user_struct(target_st, target_addr, 1);
7710     }
7711 
7712     return 0;
7713 }
7714 #endif
7715 
7716 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7717 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7718                                             abi_ulong target_addr)
7719 {
7720     struct target_statx *target_stx;
7721 
7722     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7723         return -TARGET_EFAULT;
7724     }
7725     memset(target_stx, 0, sizeof(*target_stx));
7726 
7727     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7728     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7729     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7730     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7731     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7732     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7733     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7734     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7735     __put_user(host_stx->stx_size, &target_stx->stx_size);
7736     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7737     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7738     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7739     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7740     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7741     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7742     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7743     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7744     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7745     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7746     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7747     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7748     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7749     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7750 
7751     unlock_user_struct(target_stx, target_addr, 1);
7752 
7753     return 0;
7754 }
7755 #endif
7756 
7757 static int do_sys_futex(int *uaddr, int op, int val,
7758                          const struct timespec *timeout, int *uaddr2,
7759                          int val3)
7760 {
7761 #if HOST_LONG_BITS == 64
7762 #if defined(__NR_futex)
7763     /* always a 64-bit time_t, it doesn't define _time64 version  */
7764     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7765 
7766 #endif
7767 #else /* HOST_LONG_BITS == 64 */
7768 #if defined(__NR_futex_time64)
7769     if (sizeof(timeout->tv_sec) == 8) {
7770         /* _time64 function on 32bit arch */
7771         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7772     }
7773 #endif
7774 #if defined(__NR_futex)
7775     /* old function on 32bit arch */
7776     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7777 #endif
7778 #endif /* HOST_LONG_BITS == 64 */
7779     g_assert_not_reached();
7780 }
7781 
7782 static int do_safe_futex(int *uaddr, int op, int val,
7783                          const struct timespec *timeout, int *uaddr2,
7784                          int val3)
7785 {
7786 #if HOST_LONG_BITS == 64
7787 #if defined(__NR_futex)
7788     /* always a 64-bit time_t, it doesn't define _time64 version  */
7789     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7790 #endif
7791 #else /* HOST_LONG_BITS == 64 */
7792 #if defined(__NR_futex_time64)
7793     if (sizeof(timeout->tv_sec) == 8) {
7794         /* _time64 function on 32bit arch */
7795         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7796                                            val3));
7797     }
7798 #endif
7799 #if defined(__NR_futex)
7800     /* old function on 32bit arch */
7801     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7802 #endif
7803 #endif /* HOST_LONG_BITS == 64 */
7804     return -TARGET_ENOSYS;
7805 }
7806 
7807 /* ??? Using host futex calls even when target atomic operations
7808    are not really atomic probably breaks things.  However implementing
7809    futexes locally would make futexes shared between multiple processes
7810    tricky.  However they're probably useless because guest atomic
7811    operations won't work either.  */
7812 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7813 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7814                     int op, int val, target_ulong timeout,
7815                     target_ulong uaddr2, int val3)
7816 {
7817     struct timespec ts, *pts = NULL;
7818     void *haddr2 = NULL;
7819     int base_op;
7820 
7821     /* We assume FUTEX_* constants are the same on both host and target. */
7822 #ifdef FUTEX_CMD_MASK
7823     base_op = op & FUTEX_CMD_MASK;
7824 #else
7825     base_op = op;
7826 #endif
7827     switch (base_op) {
7828     case FUTEX_WAIT:
7829     case FUTEX_WAIT_BITSET:
7830         val = tswap32(val);
7831         break;
7832     case FUTEX_WAIT_REQUEUE_PI:
7833         val = tswap32(val);
7834         haddr2 = g2h(cpu, uaddr2);
7835         break;
7836     case FUTEX_LOCK_PI:
7837     case FUTEX_LOCK_PI2:
7838         break;
7839     case FUTEX_WAKE:
7840     case FUTEX_WAKE_BITSET:
7841     case FUTEX_TRYLOCK_PI:
7842     case FUTEX_UNLOCK_PI:
7843         timeout = 0;
7844         break;
7845     case FUTEX_FD:
7846         val = target_to_host_signal(val);
7847         timeout = 0;
7848         break;
7849     case FUTEX_CMP_REQUEUE:
7850     case FUTEX_CMP_REQUEUE_PI:
7851         val3 = tswap32(val3);
7852         /* fall through */
7853     case FUTEX_REQUEUE:
7854     case FUTEX_WAKE_OP:
7855         /*
7856          * For these, the 4th argument is not TIMEOUT, but VAL2.
7857          * But the prototype of do_safe_futex takes a pointer, so
7858          * insert casts to satisfy the compiler.  We do not need
7859          * to tswap VAL2 since it's not compared to guest memory.
7860           */
7861         pts = (struct timespec *)(uintptr_t)timeout;
7862         timeout = 0;
7863         haddr2 = g2h(cpu, uaddr2);
7864         break;
7865     default:
7866         return -TARGET_ENOSYS;
7867     }
7868     if (timeout) {
7869         pts = &ts;
7870         if (time64
7871             ? target_to_host_timespec64(pts, timeout)
7872             : target_to_host_timespec(pts, timeout)) {
7873             return -TARGET_EFAULT;
7874         }
7875     }
7876     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7877 }
7878 #endif
7879 
7880 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7881 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7882                                      abi_long handle, abi_long mount_id,
7883                                      abi_long flags)
7884 {
7885     struct file_handle *target_fh;
7886     struct file_handle *fh;
7887     int mid = 0;
7888     abi_long ret;
7889     char *name;
7890     unsigned int size, total_size;
7891 
7892     if (get_user_s32(size, handle)) {
7893         return -TARGET_EFAULT;
7894     }
7895 
7896     name = lock_user_string(pathname);
7897     if (!name) {
7898         return -TARGET_EFAULT;
7899     }
7900 
7901     total_size = sizeof(struct file_handle) + size;
7902     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7903     if (!target_fh) {
7904         unlock_user(name, pathname, 0);
7905         return -TARGET_EFAULT;
7906     }
7907 
7908     fh = g_malloc0(total_size);
7909     fh->handle_bytes = size;
7910 
7911     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7912     unlock_user(name, pathname, 0);
7913 
7914     /* man name_to_handle_at(2):
7915      * Other than the use of the handle_bytes field, the caller should treat
7916      * the file_handle structure as an opaque data type
7917      */
7918 
7919     memcpy(target_fh, fh, total_size);
7920     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7921     target_fh->handle_type = tswap32(fh->handle_type);
7922     g_free(fh);
7923     unlock_user(target_fh, handle, total_size);
7924 
7925     if (put_user_s32(mid, mount_id)) {
7926         return -TARGET_EFAULT;
7927     }
7928 
7929     return ret;
7930 
7931 }
7932 #endif
7933 
7934 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7935 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7936                                      abi_long flags)
7937 {
7938     struct file_handle *target_fh;
7939     struct file_handle *fh;
7940     unsigned int size, total_size;
7941     abi_long ret;
7942 
7943     if (get_user_s32(size, handle)) {
7944         return -TARGET_EFAULT;
7945     }
7946 
7947     total_size = sizeof(struct file_handle) + size;
7948     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7949     if (!target_fh) {
7950         return -TARGET_EFAULT;
7951     }
7952 
7953     fh = g_memdup(target_fh, total_size);
7954     fh->handle_bytes = size;
7955     fh->handle_type = tswap32(target_fh->handle_type);
7956 
7957     ret = get_errno(open_by_handle_at(mount_fd, fh,
7958                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7959 
7960     g_free(fh);
7961 
7962     unlock_user(target_fh, handle, total_size);
7963 
7964     return ret;
7965 }
7966 #endif
7967 
7968 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7969 
7970 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7971 {
7972     int host_flags;
7973     target_sigset_t *target_mask;
7974     sigset_t host_mask;
7975     abi_long ret;
7976 
7977     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7978         return -TARGET_EINVAL;
7979     }
7980     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7981         return -TARGET_EFAULT;
7982     }
7983 
7984     target_to_host_sigset(&host_mask, target_mask);
7985 
7986     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7987 
7988     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7989     if (ret >= 0) {
7990         fd_trans_register(ret, &target_signalfd_trans);
7991     }
7992 
7993     unlock_user_struct(target_mask, mask, 0);
7994 
7995     return ret;
7996 }
7997 #endif
7998 
7999 /* Map host to target signal numbers for the wait family of syscalls.
8000    Assume all other status bits are the same.  */
8001 int host_to_target_waitstatus(int status)
8002 {
8003     if (WIFSIGNALED(status)) {
8004         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
8005     }
8006     if (WIFSTOPPED(status)) {
8007         return (host_to_target_signal(WSTOPSIG(status)) << 8)
8008                | (status & 0xff);
8009     }
8010     return status;
8011 }
8012 
8013 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
8014 {
8015     CPUState *cpu = env_cpu(cpu_env);
8016     struct linux_binprm *bprm = get_task_state(cpu)->bprm;
8017     int i;
8018 
8019     for (i = 0; i < bprm->argc; i++) {
8020         size_t len = strlen(bprm->argv[i]) + 1;
8021 
8022         if (write(fd, bprm->argv[i], len) != len) {
8023             return -1;
8024         }
8025     }
8026 
8027     return 0;
8028 }
8029 
8030 struct open_self_maps_data {
8031     TaskState *ts;
8032     IntervalTreeRoot *host_maps;
8033     int fd;
8034     bool smaps;
8035 };
8036 
8037 /*
8038  * Subroutine to output one line of /proc/self/maps,
8039  * or one region of /proc/self/smaps.
8040  */
8041 
8042 #ifdef TARGET_HPPA
8043 # define test_stack(S, E, L)  (E == L)
8044 #else
8045 # define test_stack(S, E, L)  (S == L)
8046 #endif
8047 
8048 static void open_self_maps_4(const struct open_self_maps_data *d,
8049                              const MapInfo *mi, abi_ptr start,
8050                              abi_ptr end, unsigned flags)
8051 {
8052     const struct image_info *info = d->ts->info;
8053     const char *path = mi->path;
8054     uint64_t offset;
8055     int fd = d->fd;
8056     int count;
8057 
8058     if (test_stack(start, end, info->stack_limit)) {
8059         path = "[stack]";
8060     } else if (start == info->brk) {
8061         path = "[heap]";
8062     } else if (start == info->vdso) {
8063         path = "[vdso]";
8064 #ifdef TARGET_X86_64
8065     } else if (start == TARGET_VSYSCALL_PAGE) {
8066         path = "[vsyscall]";
8067 #endif
8068     }
8069 
8070     /* Except null device (MAP_ANON), adjust offset for this fragment. */
8071     offset = mi->offset;
8072     if (mi->dev) {
8073         uintptr_t hstart = (uintptr_t)g2h_untagged(start);
8074         offset += hstart - mi->itree.start;
8075     }
8076 
8077     count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8078                     " %c%c%c%c %08" PRIx64 " %02x:%02x %"PRId64,
8079                     start, end,
8080                     (flags & PAGE_READ) ? 'r' : '-',
8081                     (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8082                     (flags & PAGE_EXEC) ? 'x' : '-',
8083                     mi->is_priv ? 'p' : 's',
8084                     offset, major(mi->dev), minor(mi->dev),
8085                     (uint64_t)mi->inode);
8086     if (path) {
8087         dprintf(fd, "%*s%s\n", 73 - count, "", path);
8088     } else {
8089         dprintf(fd, "\n");
8090     }
8091 
8092     if (d->smaps) {
8093         unsigned long size = end - start;
8094         unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
8095         unsigned long size_kb = size >> 10;
8096 
8097         dprintf(fd, "Size:                  %lu kB\n"
8098                 "KernelPageSize:        %lu kB\n"
8099                 "MMUPageSize:           %lu kB\n"
8100                 "Rss:                   0 kB\n"
8101                 "Pss:                   0 kB\n"
8102                 "Pss_Dirty:             0 kB\n"
8103                 "Shared_Clean:          0 kB\n"
8104                 "Shared_Dirty:          0 kB\n"
8105                 "Private_Clean:         0 kB\n"
8106                 "Private_Dirty:         0 kB\n"
8107                 "Referenced:            0 kB\n"
8108                 "Anonymous:             %lu kB\n"
8109                 "LazyFree:              0 kB\n"
8110                 "AnonHugePages:         0 kB\n"
8111                 "ShmemPmdMapped:        0 kB\n"
8112                 "FilePmdMapped:         0 kB\n"
8113                 "Shared_Hugetlb:        0 kB\n"
8114                 "Private_Hugetlb:       0 kB\n"
8115                 "Swap:                  0 kB\n"
8116                 "SwapPss:               0 kB\n"
8117                 "Locked:                0 kB\n"
8118                 "THPeligible:    0\n"
8119                 "VmFlags:%s%s%s%s%s%s%s%s\n",
8120                 size_kb, page_size_kb, page_size_kb,
8121                 (flags & PAGE_ANON ? size_kb : 0),
8122                 (flags & PAGE_READ) ? " rd" : "",
8123                 (flags & PAGE_WRITE_ORG) ? " wr" : "",
8124                 (flags & PAGE_EXEC) ? " ex" : "",
8125                 mi->is_priv ? "" : " sh",
8126                 (flags & PAGE_READ) ? " mr" : "",
8127                 (flags & PAGE_WRITE_ORG) ? " mw" : "",
8128                 (flags & PAGE_EXEC) ? " me" : "",
8129                 mi->is_priv ? "" : " ms");
8130     }
8131 }
8132 
8133 /*
8134  * Callback for walk_memory_regions, when read_self_maps() fails.
8135  * Proceed without the benefit of host /proc/self/maps cross-check.
8136  */
8137 static int open_self_maps_3(void *opaque, target_ulong guest_start,
8138                             target_ulong guest_end, unsigned long flags)
8139 {
8140     static const MapInfo mi = { .is_priv = true };
8141 
8142     open_self_maps_4(opaque, &mi, guest_start, guest_end, flags);
8143     return 0;
8144 }
8145 
8146 /*
8147  * Callback for walk_memory_regions, when read_self_maps() succeeds.
8148  */
8149 static int open_self_maps_2(void *opaque, target_ulong guest_start,
8150                             target_ulong guest_end, unsigned long flags)
8151 {
8152     const struct open_self_maps_data *d = opaque;
8153     uintptr_t host_start = (uintptr_t)g2h_untagged(guest_start);
8154     uintptr_t host_last = (uintptr_t)g2h_untagged(guest_end - 1);
8155 
8156 #ifdef TARGET_X86_64
8157     /*
8158      * Because of the extremely high position of the page within the guest
8159      * virtual address space, this is not backed by host memory at all.
8160      * Therefore the loop below would fail.  This is the only instance
8161      * of not having host backing memory.
8162      */
8163     if (guest_start == TARGET_VSYSCALL_PAGE) {
8164         return open_self_maps_3(opaque, guest_start, guest_end, flags);
8165     }
8166 #endif
8167 
8168     while (1) {
8169         IntervalTreeNode *n =
8170             interval_tree_iter_first(d->host_maps, host_start, host_start);
8171         MapInfo *mi = container_of(n, MapInfo, itree);
8172         uintptr_t this_hlast = MIN(host_last, n->last);
8173         target_ulong this_gend = h2g(this_hlast) + 1;
8174 
8175         open_self_maps_4(d, mi, guest_start, this_gend, flags);
8176 
8177         if (this_hlast == host_last) {
8178             return 0;
8179         }
8180         host_start = this_hlast + 1;
8181         guest_start = h2g(host_start);
8182     }
8183 }
8184 
8185 static int open_self_maps_1(CPUArchState *env, int fd, bool smaps)
8186 {
8187     struct open_self_maps_data d = {
8188         .ts = get_task_state(env_cpu(env)),
8189         .fd = fd,
8190         .smaps = smaps
8191     };
8192 
8193     mmap_lock();
8194     d.host_maps = read_self_maps();
8195     if (d.host_maps) {
8196         walk_memory_regions(&d, open_self_maps_2);
8197         free_self_maps(d.host_maps);
8198     } else {
8199         walk_memory_regions(&d, open_self_maps_3);
8200     }
8201     mmap_unlock();
8202     return 0;
8203 }
8204 
8205 static int open_self_maps(CPUArchState *cpu_env, int fd)
8206 {
8207     return open_self_maps_1(cpu_env, fd, false);
8208 }
8209 
8210 static int open_self_smaps(CPUArchState *cpu_env, int fd)
8211 {
8212     return open_self_maps_1(cpu_env, fd, true);
8213 }
8214 
8215 static int open_self_stat(CPUArchState *cpu_env, int fd)
8216 {
8217     CPUState *cpu = env_cpu(cpu_env);
8218     TaskState *ts = get_task_state(cpu);
8219     g_autoptr(GString) buf = g_string_new(NULL);
8220     int i;
8221 
8222     for (i = 0; i < 44; i++) {
8223         if (i == 0) {
8224             /* pid */
8225             g_string_printf(buf, FMT_pid " ", getpid());
8226         } else if (i == 1) {
8227             /* app name */
8228             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8229             bin = bin ? bin + 1 : ts->bprm->argv[0];
8230             g_string_printf(buf, "(%.15s) ", bin);
8231         } else if (i == 2) {
8232             /* task state */
8233             g_string_assign(buf, "R "); /* we are running right now */
8234         } else if (i == 3) {
8235             /* ppid */
8236             g_string_printf(buf, FMT_pid " ", getppid());
8237         } else if (i == 19) {
8238             /* num_threads */
8239             int cpus = 0;
8240             WITH_RCU_READ_LOCK_GUARD() {
8241                 CPUState *cpu_iter;
8242                 CPU_FOREACH(cpu_iter) {
8243                     cpus++;
8244                 }
8245             }
8246             g_string_printf(buf, "%d ", cpus);
8247         } else if (i == 21) {
8248             /* starttime */
8249             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8250         } else if (i == 27) {
8251             /* stack bottom */
8252             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8253         } else {
8254             /* for the rest, there is MasterCard */
8255             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8256         }
8257 
8258         if (write(fd, buf->str, buf->len) != buf->len) {
8259             return -1;
8260         }
8261     }
8262 
8263     return 0;
8264 }
8265 
8266 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8267 {
8268     CPUState *cpu = env_cpu(cpu_env);
8269     TaskState *ts = get_task_state(cpu);
8270     abi_ulong auxv = ts->info->saved_auxv;
8271     abi_ulong len = ts->info->auxv_len;
8272     char *ptr;
8273 
8274     /*
8275      * Auxiliary vector is stored in target process stack.
8276      * read in whole auxv vector and copy it to file
8277      */
8278     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8279     if (ptr != NULL) {
8280         while (len > 0) {
8281             ssize_t r;
8282             r = write(fd, ptr, len);
8283             if (r <= 0) {
8284                 break;
8285             }
8286             len -= r;
8287             ptr += r;
8288         }
8289         lseek(fd, 0, SEEK_SET);
8290         unlock_user(ptr, auxv, len);
8291     }
8292 
8293     return 0;
8294 }
8295 
8296 static int is_proc_myself(const char *filename, const char *entry)
8297 {
8298     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8299         filename += strlen("/proc/");
8300         if (!strncmp(filename, "self/", strlen("self/"))) {
8301             filename += strlen("self/");
8302         } else if (*filename >= '1' && *filename <= '9') {
8303             char myself[80];
8304             snprintf(myself, sizeof(myself), "%d/", getpid());
8305             if (!strncmp(filename, myself, strlen(myself))) {
8306                 filename += strlen(myself);
8307             } else {
8308                 return 0;
8309             }
8310         } else {
8311             return 0;
8312         }
8313         if (!strcmp(filename, entry)) {
8314             return 1;
8315         }
8316     }
8317     return 0;
8318 }
8319 
8320 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8321                       const char *fmt, int code)
8322 {
8323     if (logfile) {
8324         CPUState *cs = env_cpu(env);
8325 
8326         fprintf(logfile, fmt, code);
8327         fprintf(logfile, "Failing executable: %s\n", exec_path);
8328         cpu_dump_state(cs, logfile, 0);
8329         open_self_maps(env, fileno(logfile));
8330     }
8331 }
8332 
8333 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8334 {
8335     /* dump to console */
8336     excp_dump_file(stderr, env, fmt, code);
8337 
8338     /* dump to log file */
8339     if (qemu_log_separate()) {
8340         FILE *logfile = qemu_log_trylock();
8341 
8342         excp_dump_file(logfile, env, fmt, code);
8343         qemu_log_unlock(logfile);
8344     }
8345 }
8346 
8347 #include "target_proc.h"
8348 
8349 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8350     defined(HAVE_ARCH_PROC_CPUINFO) || \
8351     defined(HAVE_ARCH_PROC_HARDWARE)
8352 static int is_proc(const char *filename, const char *entry)
8353 {
8354     return strcmp(filename, entry) == 0;
8355 }
8356 #endif
8357 
8358 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8359 static int open_net_route(CPUArchState *cpu_env, int fd)
8360 {
8361     FILE *fp;
8362     char *line = NULL;
8363     size_t len = 0;
8364     ssize_t read;
8365 
8366     fp = fopen("/proc/net/route", "r");
8367     if (fp == NULL) {
8368         return -1;
8369     }
8370 
8371     /* read header */
8372 
8373     read = getline(&line, &len, fp);
8374     dprintf(fd, "%s", line);
8375 
8376     /* read routes */
8377 
8378     while ((read = getline(&line, &len, fp)) != -1) {
8379         char iface[16];
8380         uint32_t dest, gw, mask;
8381         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8382         int fields;
8383 
8384         fields = sscanf(line,
8385                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8386                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8387                         &mask, &mtu, &window, &irtt);
8388         if (fields != 11) {
8389             continue;
8390         }
8391         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8392                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8393                 metric, tswap32(mask), mtu, window, irtt);
8394     }
8395 
8396     free(line);
8397     fclose(fp);
8398 
8399     return 0;
8400 }
8401 #endif
8402 
8403 static int maybe_do_fake_open(CPUArchState *cpu_env, int dirfd,
8404                               const char *fname, int flags, mode_t mode,
8405                               int openat2_resolve, bool safe)
8406 {
8407     g_autofree char *proc_name = NULL;
8408     const char *pathname;
8409     struct fake_open {
8410         const char *filename;
8411         int (*fill)(CPUArchState *cpu_env, int fd);
8412         int (*cmp)(const char *s1, const char *s2);
8413     };
8414     const struct fake_open *fake_open;
8415     static const struct fake_open fakes[] = {
8416         { "maps", open_self_maps, is_proc_myself },
8417         { "smaps", open_self_smaps, is_proc_myself },
8418         { "stat", open_self_stat, is_proc_myself },
8419         { "auxv", open_self_auxv, is_proc_myself },
8420         { "cmdline", open_self_cmdline, is_proc_myself },
8421 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8422         { "/proc/net/route", open_net_route, is_proc },
8423 #endif
8424 #if defined(HAVE_ARCH_PROC_CPUINFO)
8425         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8426 #endif
8427 #if defined(HAVE_ARCH_PROC_HARDWARE)
8428         { "/proc/hardware", open_hardware, is_proc },
8429 #endif
8430         { NULL, NULL, NULL }
8431     };
8432 
8433     /* if this is a file from /proc/ filesystem, expand full name */
8434     proc_name = realpath(fname, NULL);
8435     if (proc_name && strncmp(proc_name, "/proc/", 6) == 0) {
8436         pathname = proc_name;
8437     } else {
8438         pathname = fname;
8439     }
8440 
8441     if (is_proc_myself(pathname, "exe")) {
8442         /* Honor openat2 resolve flags */
8443         if ((openat2_resolve & RESOLVE_NO_MAGICLINKS) ||
8444             (openat2_resolve & RESOLVE_NO_SYMLINKS)) {
8445             errno = ELOOP;
8446             return -1;
8447         }
8448         if (safe) {
8449             return safe_openat(dirfd, exec_path, flags, mode);
8450         } else {
8451             return openat(dirfd, exec_path, flags, mode);
8452         }
8453     }
8454 
8455     for (fake_open = fakes; fake_open->filename; fake_open++) {
8456         if (fake_open->cmp(pathname, fake_open->filename)) {
8457             break;
8458         }
8459     }
8460 
8461     if (fake_open->filename) {
8462         const char *tmpdir;
8463         char filename[PATH_MAX];
8464         int fd, r;
8465 
8466         fd = memfd_create("qemu-open", 0);
8467         if (fd < 0) {
8468             if (errno != ENOSYS) {
8469                 return fd;
8470             }
8471             /* create temporary file to map stat to */
8472             tmpdir = getenv("TMPDIR");
8473             if (!tmpdir)
8474                 tmpdir = "/tmp";
8475             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8476             fd = mkstemp(filename);
8477             if (fd < 0) {
8478                 return fd;
8479             }
8480             unlink(filename);
8481         }
8482 
8483         if ((r = fake_open->fill(cpu_env, fd))) {
8484             int e = errno;
8485             close(fd);
8486             errno = e;
8487             return r;
8488         }
8489         lseek(fd, 0, SEEK_SET);
8490 
8491         return fd;
8492     }
8493 
8494     return -2;
8495 }
8496 
8497 int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *pathname,
8498                     int flags, mode_t mode, bool safe)
8499 {
8500     int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, flags, mode, 0, safe);
8501     if (fd > -2) {
8502         return fd;
8503     }
8504 
8505     if (safe) {
8506         return safe_openat(dirfd, path(pathname), flags, mode);
8507     } else {
8508         return openat(dirfd, path(pathname), flags, mode);
8509     }
8510 }
8511 
8512 
8513 static int do_openat2(CPUArchState *cpu_env, abi_long dirfd,
8514                       abi_ptr guest_pathname, abi_ptr guest_open_how,
8515                       abi_ulong guest_size)
8516 {
8517     struct open_how_ver0 how = {0};
8518     char *pathname;
8519     int ret;
8520 
8521     if (guest_size < sizeof(struct target_open_how_ver0)) {
8522         return -TARGET_EINVAL;
8523     }
8524     ret = copy_struct_from_user(&how, sizeof(how), guest_open_how, guest_size);
8525     if (ret) {
8526         if (ret == -TARGET_E2BIG) {
8527             qemu_log_mask(LOG_UNIMP,
8528                           "Unimplemented openat2 open_how size: "
8529                           TARGET_ABI_FMT_lu "\n", guest_size);
8530         }
8531         return ret;
8532     }
8533     pathname = lock_user_string(guest_pathname);
8534     if (!pathname) {
8535         return -TARGET_EFAULT;
8536     }
8537 
8538     how.flags = target_to_host_bitmask(tswap64(how.flags), fcntl_flags_tbl);
8539     how.mode = tswap64(how.mode);
8540     how.resolve = tswap64(how.resolve);
8541     int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, how.flags, how.mode,
8542                                 how.resolve, true);
8543     if (fd > -2) {
8544         ret = get_errno(fd);
8545     } else {
8546         ret = get_errno(safe_openat2(dirfd, pathname, &how,
8547                                      sizeof(struct open_how_ver0)));
8548     }
8549 
8550     fd_trans_unregister(ret);
8551     unlock_user(pathname, guest_pathname, 0);
8552     return ret;
8553 }
8554 
8555 ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz)
8556 {
8557     ssize_t ret;
8558 
8559     if (!pathname || !buf) {
8560         errno = EFAULT;
8561         return -1;
8562     }
8563 
8564     if (!bufsiz) {
8565         /* Short circuit this for the magic exe check. */
8566         errno = EINVAL;
8567         return -1;
8568     }
8569 
8570     if (is_proc_myself((const char *)pathname, "exe")) {
8571         /*
8572          * Don't worry about sign mismatch as earlier mapping
8573          * logic would have thrown a bad address error.
8574          */
8575         ret = MIN(strlen(exec_path), bufsiz);
8576         /* We cannot NUL terminate the string. */
8577         memcpy(buf, exec_path, ret);
8578     } else {
8579         ret = readlink(path(pathname), buf, bufsiz);
8580     }
8581 
8582     return ret;
8583 }
8584 
8585 static int do_execv(CPUArchState *cpu_env, int dirfd,
8586                     abi_long pathname, abi_long guest_argp,
8587                     abi_long guest_envp, int flags, bool is_execveat)
8588 {
8589     int ret;
8590     char **argp, **envp;
8591     int argc, envc;
8592     abi_ulong gp;
8593     abi_ulong addr;
8594     char **q;
8595     void *p;
8596 
8597     argc = 0;
8598 
8599     for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8600         if (get_user_ual(addr, gp)) {
8601             return -TARGET_EFAULT;
8602         }
8603         if (!addr) {
8604             break;
8605         }
8606         argc++;
8607     }
8608     envc = 0;
8609     for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8610         if (get_user_ual(addr, gp)) {
8611             return -TARGET_EFAULT;
8612         }
8613         if (!addr) {
8614             break;
8615         }
8616         envc++;
8617     }
8618 
8619     argp = g_new0(char *, argc + 1);
8620     envp = g_new0(char *, envc + 1);
8621 
8622     for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8623         if (get_user_ual(addr, gp)) {
8624             goto execve_efault;
8625         }
8626         if (!addr) {
8627             break;
8628         }
8629         *q = lock_user_string(addr);
8630         if (!*q) {
8631             goto execve_efault;
8632         }
8633     }
8634     *q = NULL;
8635 
8636     for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8637         if (get_user_ual(addr, gp)) {
8638             goto execve_efault;
8639         }
8640         if (!addr) {
8641             break;
8642         }
8643         *q = lock_user_string(addr);
8644         if (!*q) {
8645             goto execve_efault;
8646         }
8647     }
8648     *q = NULL;
8649 
8650     /*
8651      * Although execve() is not an interruptible syscall it is
8652      * a special case where we must use the safe_syscall wrapper:
8653      * if we allow a signal to happen before we make the host
8654      * syscall then we will 'lose' it, because at the point of
8655      * execve the process leaves QEMU's control. So we use the
8656      * safe syscall wrapper to ensure that we either take the
8657      * signal as a guest signal, or else it does not happen
8658      * before the execve completes and makes it the other
8659      * program's problem.
8660      */
8661     p = lock_user_string(pathname);
8662     if (!p) {
8663         goto execve_efault;
8664     }
8665 
8666     const char *exe = p;
8667     if (is_proc_myself(p, "exe")) {
8668         exe = exec_path;
8669     }
8670     ret = is_execveat
8671         ? safe_execveat(dirfd, exe, argp, envp, flags)
8672         : safe_execve(exe, argp, envp);
8673     ret = get_errno(ret);
8674 
8675     unlock_user(p, pathname, 0);
8676 
8677     goto execve_end;
8678 
8679 execve_efault:
8680     ret = -TARGET_EFAULT;
8681 
8682 execve_end:
8683     for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8684         if (get_user_ual(addr, gp) || !addr) {
8685             break;
8686         }
8687         unlock_user(*q, addr, 0);
8688     }
8689     for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8690         if (get_user_ual(addr, gp) || !addr) {
8691             break;
8692         }
8693         unlock_user(*q, addr, 0);
8694     }
8695 
8696     g_free(argp);
8697     g_free(envp);
8698     return ret;
8699 }
8700 
8701 #define TIMER_MAGIC 0x0caf0000
8702 #define TIMER_MAGIC_MASK 0xffff0000
8703 
8704 /* Convert QEMU provided timer ID back to internal 16bit index format */
8705 static target_timer_t get_timer_id(abi_long arg)
8706 {
8707     target_timer_t timerid = arg;
8708 
8709     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8710         return -TARGET_EINVAL;
8711     }
8712 
8713     timerid &= 0xffff;
8714 
8715     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8716         return -TARGET_EINVAL;
8717     }
8718 
8719     return timerid;
8720 }
8721 
8722 static int target_to_host_cpu_mask(unsigned long *host_mask,
8723                                    size_t host_size,
8724                                    abi_ulong target_addr,
8725                                    size_t target_size)
8726 {
8727     unsigned target_bits = sizeof(abi_ulong) * 8;
8728     unsigned host_bits = sizeof(*host_mask) * 8;
8729     abi_ulong *target_mask;
8730     unsigned i, j;
8731 
8732     assert(host_size >= target_size);
8733 
8734     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8735     if (!target_mask) {
8736         return -TARGET_EFAULT;
8737     }
8738     memset(host_mask, 0, host_size);
8739 
8740     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8741         unsigned bit = i * target_bits;
8742         abi_ulong val;
8743 
8744         __get_user(val, &target_mask[i]);
8745         for (j = 0; j < target_bits; j++, bit++) {
8746             if (val & (1UL << j)) {
8747                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8748             }
8749         }
8750     }
8751 
8752     unlock_user(target_mask, target_addr, 0);
8753     return 0;
8754 }
8755 
8756 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8757                                    size_t host_size,
8758                                    abi_ulong target_addr,
8759                                    size_t target_size)
8760 {
8761     unsigned target_bits = sizeof(abi_ulong) * 8;
8762     unsigned host_bits = sizeof(*host_mask) * 8;
8763     abi_ulong *target_mask;
8764     unsigned i, j;
8765 
8766     assert(host_size >= target_size);
8767 
8768     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8769     if (!target_mask) {
8770         return -TARGET_EFAULT;
8771     }
8772 
8773     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8774         unsigned bit = i * target_bits;
8775         abi_ulong val = 0;
8776 
8777         for (j = 0; j < target_bits; j++, bit++) {
8778             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8779                 val |= 1UL << j;
8780             }
8781         }
8782         __put_user(val, &target_mask[i]);
8783     }
8784 
8785     unlock_user(target_mask, target_addr, target_size);
8786     return 0;
8787 }
8788 
8789 #ifdef TARGET_NR_getdents
8790 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8791 {
8792     g_autofree void *hdirp = NULL;
8793     void *tdirp;
8794     int hlen, hoff, toff;
8795     int hreclen, treclen;
8796     off_t prev_diroff = 0;
8797 
8798     hdirp = g_try_malloc(count);
8799     if (!hdirp) {
8800         return -TARGET_ENOMEM;
8801     }
8802 
8803 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8804     hlen = sys_getdents(dirfd, hdirp, count);
8805 #else
8806     hlen = sys_getdents64(dirfd, hdirp, count);
8807 #endif
8808 
8809     hlen = get_errno(hlen);
8810     if (is_error(hlen)) {
8811         return hlen;
8812     }
8813 
8814     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8815     if (!tdirp) {
8816         return -TARGET_EFAULT;
8817     }
8818 
8819     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8820 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8821         struct linux_dirent *hde = hdirp + hoff;
8822 #else
8823         struct linux_dirent64 *hde = hdirp + hoff;
8824 #endif
8825         struct target_dirent *tde = tdirp + toff;
8826         int namelen;
8827         uint8_t type;
8828 
8829         namelen = strlen(hde->d_name);
8830         hreclen = hde->d_reclen;
8831         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8832         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8833 
8834         if (toff + treclen > count) {
8835             /*
8836              * If the host struct is smaller than the target struct, or
8837              * requires less alignment and thus packs into less space,
8838              * then the host can return more entries than we can pass
8839              * on to the guest.
8840              */
8841             if (toff == 0) {
8842                 toff = -TARGET_EINVAL; /* result buffer is too small */
8843                 break;
8844             }
8845             /*
8846              * Return what we have, resetting the file pointer to the
8847              * location of the first record not returned.
8848              */
8849             lseek(dirfd, prev_diroff, SEEK_SET);
8850             break;
8851         }
8852 
8853         prev_diroff = hde->d_off;
8854         tde->d_ino = tswapal(hde->d_ino);
8855         tde->d_off = tswapal(hde->d_off);
8856         tde->d_reclen = tswap16(treclen);
8857         memcpy(tde->d_name, hde->d_name, namelen + 1);
8858 
8859         /*
8860          * The getdents type is in what was formerly a padding byte at the
8861          * end of the structure.
8862          */
8863 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8864         type = *((uint8_t *)hde + hreclen - 1);
8865 #else
8866         type = hde->d_type;
8867 #endif
8868         *((uint8_t *)tde + treclen - 1) = type;
8869     }
8870 
8871     unlock_user(tdirp, arg2, toff);
8872     return toff;
8873 }
8874 #endif /* TARGET_NR_getdents */
8875 
8876 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8877 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8878 {
8879     g_autofree void *hdirp = NULL;
8880     void *tdirp;
8881     int hlen, hoff, toff;
8882     int hreclen, treclen;
8883     off_t prev_diroff = 0;
8884 
8885     hdirp = g_try_malloc(count);
8886     if (!hdirp) {
8887         return -TARGET_ENOMEM;
8888     }
8889 
8890     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8891     if (is_error(hlen)) {
8892         return hlen;
8893     }
8894 
8895     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8896     if (!tdirp) {
8897         return -TARGET_EFAULT;
8898     }
8899 
8900     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8901         struct linux_dirent64 *hde = hdirp + hoff;
8902         struct target_dirent64 *tde = tdirp + toff;
8903         int namelen;
8904 
8905         namelen = strlen(hde->d_name) + 1;
8906         hreclen = hde->d_reclen;
8907         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8908         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8909 
8910         if (toff + treclen > count) {
8911             /*
8912              * If the host struct is smaller than the target struct, or
8913              * requires less alignment and thus packs into less space,
8914              * then the host can return more entries than we can pass
8915              * on to the guest.
8916              */
8917             if (toff == 0) {
8918                 toff = -TARGET_EINVAL; /* result buffer is too small */
8919                 break;
8920             }
8921             /*
8922              * Return what we have, resetting the file pointer to the
8923              * location of the first record not returned.
8924              */
8925             lseek(dirfd, prev_diroff, SEEK_SET);
8926             break;
8927         }
8928 
8929         prev_diroff = hde->d_off;
8930         tde->d_ino = tswap64(hde->d_ino);
8931         tde->d_off = tswap64(hde->d_off);
8932         tde->d_reclen = tswap16(treclen);
8933         tde->d_type = hde->d_type;
8934         memcpy(tde->d_name, hde->d_name, namelen);
8935     }
8936 
8937     unlock_user(tdirp, arg2, toff);
8938     return toff;
8939 }
8940 #endif /* TARGET_NR_getdents64 */
8941 
8942 #if defined(TARGET_NR_riscv_hwprobe)
8943 
8944 #define RISCV_HWPROBE_KEY_MVENDORID     0
8945 #define RISCV_HWPROBE_KEY_MARCHID       1
8946 #define RISCV_HWPROBE_KEY_MIMPID        2
8947 
8948 #define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
8949 #define     RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
8950 
8951 #define RISCV_HWPROBE_KEY_IMA_EXT_0         4
8952 #define     RISCV_HWPROBE_IMA_FD            (1 << 0)
8953 #define     RISCV_HWPROBE_IMA_C             (1 << 1)
8954 #define     RISCV_HWPROBE_IMA_V             (1 << 2)
8955 #define     RISCV_HWPROBE_EXT_ZBA           (1 << 3)
8956 #define     RISCV_HWPROBE_EXT_ZBB           (1 << 4)
8957 #define     RISCV_HWPROBE_EXT_ZBS           (1 << 5)
8958 #define     RISCV_HWPROBE_EXT_ZICBOZ        (1 << 6)
8959 #define     RISCV_HWPROBE_EXT_ZBC           (1 << 7)
8960 #define     RISCV_HWPROBE_EXT_ZBKB          (1 << 8)
8961 #define     RISCV_HWPROBE_EXT_ZBKC          (1 << 9)
8962 #define     RISCV_HWPROBE_EXT_ZBKX          (1 << 10)
8963 #define     RISCV_HWPROBE_EXT_ZKND          (1 << 11)
8964 #define     RISCV_HWPROBE_EXT_ZKNE          (1 << 12)
8965 #define     RISCV_HWPROBE_EXT_ZKNH          (1 << 13)
8966 #define     RISCV_HWPROBE_EXT_ZKSED         (1 << 14)
8967 #define     RISCV_HWPROBE_EXT_ZKSH          (1 << 15)
8968 #define     RISCV_HWPROBE_EXT_ZKT           (1 << 16)
8969 #define     RISCV_HWPROBE_EXT_ZVBB          (1 << 17)
8970 #define     RISCV_HWPROBE_EXT_ZVBC          (1 << 18)
8971 #define     RISCV_HWPROBE_EXT_ZVKB          (1 << 19)
8972 #define     RISCV_HWPROBE_EXT_ZVKG          (1 << 20)
8973 #define     RISCV_HWPROBE_EXT_ZVKNED        (1 << 21)
8974 #define     RISCV_HWPROBE_EXT_ZVKNHA        (1 << 22)
8975 #define     RISCV_HWPROBE_EXT_ZVKNHB        (1 << 23)
8976 #define     RISCV_HWPROBE_EXT_ZVKSED        (1 << 24)
8977 #define     RISCV_HWPROBE_EXT_ZVKSH         (1 << 25)
8978 #define     RISCV_HWPROBE_EXT_ZVKT          (1 << 26)
8979 #define     RISCV_HWPROBE_EXT_ZFH           (1 << 27)
8980 #define     RISCV_HWPROBE_EXT_ZFHMIN        (1 << 28)
8981 #define     RISCV_HWPROBE_EXT_ZIHINTNTL     (1 << 29)
8982 #define     RISCV_HWPROBE_EXT_ZVFH          (1 << 30)
8983 #define     RISCV_HWPROBE_EXT_ZVFHMIN       (1ULL << 31)
8984 #define     RISCV_HWPROBE_EXT_ZFA           (1ULL << 32)
8985 #define     RISCV_HWPROBE_EXT_ZTSO          (1ULL << 33)
8986 #define     RISCV_HWPROBE_EXT_ZACAS         (1ULL << 34)
8987 #define     RISCV_HWPROBE_EXT_ZICOND        (1ULL << 35)
8988 
8989 #define RISCV_HWPROBE_KEY_CPUPERF_0     5
8990 #define     RISCV_HWPROBE_MISALIGNED_UNKNOWN     (0 << 0)
8991 #define     RISCV_HWPROBE_MISALIGNED_EMULATED    (1 << 0)
8992 #define     RISCV_HWPROBE_MISALIGNED_SLOW        (2 << 0)
8993 #define     RISCV_HWPROBE_MISALIGNED_FAST        (3 << 0)
8994 #define     RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
8995 #define     RISCV_HWPROBE_MISALIGNED_MASK        (7 << 0)
8996 
8997 #define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6
8998 
8999 struct riscv_hwprobe {
9000     abi_llong  key;
9001     abi_ullong value;
9002 };
9003 
9004 static void risc_hwprobe_fill_pairs(CPURISCVState *env,
9005                                     struct riscv_hwprobe *pair,
9006                                     size_t pair_count)
9007 {
9008     const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
9009 
9010     for (; pair_count > 0; pair_count--, pair++) {
9011         abi_llong key;
9012         abi_ullong value;
9013         __put_user(0, &pair->value);
9014         __get_user(key, &pair->key);
9015         switch (key) {
9016         case RISCV_HWPROBE_KEY_MVENDORID:
9017             __put_user(cfg->mvendorid, &pair->value);
9018             break;
9019         case RISCV_HWPROBE_KEY_MARCHID:
9020             __put_user(cfg->marchid, &pair->value);
9021             break;
9022         case RISCV_HWPROBE_KEY_MIMPID:
9023             __put_user(cfg->mimpid, &pair->value);
9024             break;
9025         case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
9026             value = riscv_has_ext(env, RVI) &&
9027                     riscv_has_ext(env, RVM) &&
9028                     riscv_has_ext(env, RVA) ?
9029                     RISCV_HWPROBE_BASE_BEHAVIOR_IMA : 0;
9030             __put_user(value, &pair->value);
9031             break;
9032         case RISCV_HWPROBE_KEY_IMA_EXT_0:
9033             value = riscv_has_ext(env, RVF) &&
9034                     riscv_has_ext(env, RVD) ?
9035                     RISCV_HWPROBE_IMA_FD : 0;
9036             value |= riscv_has_ext(env, RVC) ?
9037                      RISCV_HWPROBE_IMA_C : 0;
9038             value |= riscv_has_ext(env, RVV) ?
9039                      RISCV_HWPROBE_IMA_V : 0;
9040             value |= cfg->ext_zba ?
9041                      RISCV_HWPROBE_EXT_ZBA : 0;
9042             value |= cfg->ext_zbb ?
9043                      RISCV_HWPROBE_EXT_ZBB : 0;
9044             value |= cfg->ext_zbs ?
9045                      RISCV_HWPROBE_EXT_ZBS : 0;
9046             value |= cfg->ext_zicboz ?
9047                      RISCV_HWPROBE_EXT_ZICBOZ : 0;
9048             value |= cfg->ext_zbc ?
9049                      RISCV_HWPROBE_EXT_ZBC : 0;
9050             value |= cfg->ext_zbkb ?
9051                      RISCV_HWPROBE_EXT_ZBKB : 0;
9052             value |= cfg->ext_zbkc ?
9053                      RISCV_HWPROBE_EXT_ZBKC : 0;
9054             value |= cfg->ext_zbkx ?
9055                      RISCV_HWPROBE_EXT_ZBKX : 0;
9056             value |= cfg->ext_zknd ?
9057                      RISCV_HWPROBE_EXT_ZKND : 0;
9058             value |= cfg->ext_zkne ?
9059                      RISCV_HWPROBE_EXT_ZKNE : 0;
9060             value |= cfg->ext_zknh ?
9061                      RISCV_HWPROBE_EXT_ZKNH : 0;
9062             value |= cfg->ext_zksed ?
9063                      RISCV_HWPROBE_EXT_ZKSED : 0;
9064             value |= cfg->ext_zksh ?
9065                      RISCV_HWPROBE_EXT_ZKSH : 0;
9066             value |= cfg->ext_zkt ?
9067                      RISCV_HWPROBE_EXT_ZKT : 0;
9068             value |= cfg->ext_zvbb ?
9069                      RISCV_HWPROBE_EXT_ZVBB : 0;
9070             value |= cfg->ext_zvbc ?
9071                      RISCV_HWPROBE_EXT_ZVBC : 0;
9072             value |= cfg->ext_zvkb ?
9073                      RISCV_HWPROBE_EXT_ZVKB : 0;
9074             value |= cfg->ext_zvkg ?
9075                      RISCV_HWPROBE_EXT_ZVKG : 0;
9076             value |= cfg->ext_zvkned ?
9077                      RISCV_HWPROBE_EXT_ZVKNED : 0;
9078             value |= cfg->ext_zvknha ?
9079                      RISCV_HWPROBE_EXT_ZVKNHA : 0;
9080             value |= cfg->ext_zvknhb ?
9081                      RISCV_HWPROBE_EXT_ZVKNHB : 0;
9082             value |= cfg->ext_zvksed ?
9083                      RISCV_HWPROBE_EXT_ZVKSED : 0;
9084             value |= cfg->ext_zvksh ?
9085                      RISCV_HWPROBE_EXT_ZVKSH : 0;
9086             value |= cfg->ext_zvkt ?
9087                      RISCV_HWPROBE_EXT_ZVKT : 0;
9088             value |= cfg->ext_zfh ?
9089                      RISCV_HWPROBE_EXT_ZFH : 0;
9090             value |= cfg->ext_zfhmin ?
9091                      RISCV_HWPROBE_EXT_ZFHMIN : 0;
9092             value |= cfg->ext_zihintntl ?
9093                      RISCV_HWPROBE_EXT_ZIHINTNTL : 0;
9094             value |= cfg->ext_zvfh ?
9095                      RISCV_HWPROBE_EXT_ZVFH : 0;
9096             value |= cfg->ext_zvfhmin ?
9097                      RISCV_HWPROBE_EXT_ZVFHMIN : 0;
9098             value |= cfg->ext_zfa ?
9099                      RISCV_HWPROBE_EXT_ZFA : 0;
9100             value |= cfg->ext_ztso ?
9101                      RISCV_HWPROBE_EXT_ZTSO : 0;
9102             value |= cfg->ext_zacas ?
9103                      RISCV_HWPROBE_EXT_ZACAS : 0;
9104             value |= cfg->ext_zicond ?
9105                      RISCV_HWPROBE_EXT_ZICOND : 0;
9106             __put_user(value, &pair->value);
9107             break;
9108         case RISCV_HWPROBE_KEY_CPUPERF_0:
9109             __put_user(RISCV_HWPROBE_MISALIGNED_FAST, &pair->value);
9110             break;
9111         case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
9112             value = cfg->ext_zicboz ? cfg->cboz_blocksize : 0;
9113             __put_user(value, &pair->value);
9114             break;
9115         default:
9116             __put_user(-1, &pair->key);
9117             break;
9118         }
9119     }
9120 }
9121 
9122 /*
9123  * If the cpumask_t of (target_cpus, cpusetsize) cannot be read: -EFAULT.
9124  * If the cpumast_t has no bits set: -EINVAL.
9125  * Otherwise the cpumask_t contains some bit set: 0.
9126  * Unlike the kernel, we do not mask cpumask_t by the set of online cpus,
9127  * nor bound the search by cpumask_size().
9128  */
9129 static int nonempty_cpu_set(abi_ulong cpusetsize, abi_ptr target_cpus)
9130 {
9131     unsigned char *p = lock_user(VERIFY_READ, target_cpus, cpusetsize, 1);
9132     int ret = -TARGET_EFAULT;
9133 
9134     if (p) {
9135         ret = -TARGET_EINVAL;
9136         /*
9137          * Since we only care about the empty/non-empty state of the cpumask_t
9138          * not the individual bits, we do not need to repartition the bits
9139          * from target abi_ulong to host unsigned long.
9140          *
9141          * Note that the kernel does not round up cpusetsize to a multiple of
9142          * sizeof(abi_ulong).  After bounding cpusetsize by cpumask_size(),
9143          * it copies exactly cpusetsize bytes into a zeroed buffer.
9144          */
9145         for (abi_ulong i = 0; i < cpusetsize; ++i) {
9146             if (p[i]) {
9147                 ret = 0;
9148                 break;
9149             }
9150         }
9151         unlock_user(p, target_cpus, 0);
9152     }
9153     return ret;
9154 }
9155 
9156 static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
9157                                  abi_long arg2, abi_long arg3,
9158                                  abi_long arg4, abi_long arg5)
9159 {
9160     int ret;
9161     struct riscv_hwprobe *host_pairs;
9162 
9163     /* flags must be 0 */
9164     if (arg5 != 0) {
9165         return -TARGET_EINVAL;
9166     }
9167 
9168     /* check cpu_set */
9169     if (arg3 != 0) {
9170         ret = nonempty_cpu_set(arg3, arg4);
9171         if (ret != 0) {
9172             return ret;
9173         }
9174     } else if (arg4 != 0) {
9175         return -TARGET_EINVAL;
9176     }
9177 
9178     /* no pairs */
9179     if (arg2 == 0) {
9180         return 0;
9181     }
9182 
9183     host_pairs = lock_user(VERIFY_WRITE, arg1,
9184                            sizeof(*host_pairs) * (size_t)arg2, 0);
9185     if (host_pairs == NULL) {
9186         return -TARGET_EFAULT;
9187     }
9188     risc_hwprobe_fill_pairs(cpu_env, host_pairs, arg2);
9189     unlock_user(host_pairs, arg1, sizeof(*host_pairs) * (size_t)arg2);
9190     return 0;
9191 }
9192 #endif /* TARGET_NR_riscv_hwprobe */
9193 
9194 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
9195 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
9196 #endif
9197 
9198 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9199 #define __NR_sys_open_tree __NR_open_tree
9200 _syscall3(int, sys_open_tree, int, __dfd, const char *, __filename,
9201           unsigned int, __flags)
9202 #endif
9203 
9204 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9205 #define __NR_sys_move_mount __NR_move_mount
9206 _syscall5(int, sys_move_mount, int, __from_dfd, const char *, __from_pathname,
9207            int, __to_dfd, const char *, __to_pathname, unsigned int, flag)
9208 #endif
9209 
9210 /* This is an internal helper for do_syscall so that it is easier
9211  * to have a single return point, so that actions, such as logging
9212  * of syscall results, can be performed.
9213  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
9214  */
9215 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
9216                             abi_long arg2, abi_long arg3, abi_long arg4,
9217                             abi_long arg5, abi_long arg6, abi_long arg7,
9218                             abi_long arg8)
9219 {
9220     CPUState *cpu = env_cpu(cpu_env);
9221     abi_long ret;
9222 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
9223     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
9224     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
9225     || defined(TARGET_NR_statx)
9226     struct stat st;
9227 #endif
9228 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
9229     || defined(TARGET_NR_fstatfs)
9230     struct statfs stfs;
9231 #endif
9232     void *p;
9233 
9234     switch(num) {
9235     case TARGET_NR_exit:
9236         /* In old applications this may be used to implement _exit(2).
9237            However in threaded applications it is used for thread termination,
9238            and _exit_group is used for application termination.
9239            Do thread termination if we have more then one thread.  */
9240 
9241         if (block_signals()) {
9242             return -QEMU_ERESTARTSYS;
9243         }
9244 
9245         pthread_mutex_lock(&clone_lock);
9246 
9247         if (CPU_NEXT(first_cpu)) {
9248             TaskState *ts = get_task_state(cpu);
9249 
9250             if (ts->child_tidptr) {
9251                 put_user_u32(0, ts->child_tidptr);
9252                 do_sys_futex(g2h(cpu, ts->child_tidptr),
9253                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
9254             }
9255 
9256             object_unparent(OBJECT(cpu));
9257             object_unref(OBJECT(cpu));
9258             /*
9259              * At this point the CPU should be unrealized and removed
9260              * from cpu lists. We can clean-up the rest of the thread
9261              * data without the lock held.
9262              */
9263 
9264             pthread_mutex_unlock(&clone_lock);
9265 
9266             thread_cpu = NULL;
9267             g_free(ts);
9268             rcu_unregister_thread();
9269             pthread_exit(NULL);
9270         }
9271 
9272         pthread_mutex_unlock(&clone_lock);
9273         preexit_cleanup(cpu_env, arg1);
9274         _exit(arg1);
9275         return 0; /* avoid warning */
9276     case TARGET_NR_read:
9277         if (arg2 == 0 && arg3 == 0) {
9278             return get_errno(safe_read(arg1, 0, 0));
9279         } else {
9280             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9281                 return -TARGET_EFAULT;
9282             ret = get_errno(safe_read(arg1, p, arg3));
9283             if (ret >= 0 &&
9284                 fd_trans_host_to_target_data(arg1)) {
9285                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
9286             }
9287             unlock_user(p, arg2, ret);
9288         }
9289         return ret;
9290     case TARGET_NR_write:
9291         if (arg2 == 0 && arg3 == 0) {
9292             return get_errno(safe_write(arg1, 0, 0));
9293         }
9294         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9295             return -TARGET_EFAULT;
9296         if (fd_trans_target_to_host_data(arg1)) {
9297             void *copy = g_malloc(arg3);
9298             memcpy(copy, p, arg3);
9299             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
9300             if (ret >= 0) {
9301                 ret = get_errno(safe_write(arg1, copy, ret));
9302             }
9303             g_free(copy);
9304         } else {
9305             ret = get_errno(safe_write(arg1, p, arg3));
9306         }
9307         unlock_user(p, arg2, 0);
9308         return ret;
9309 
9310 #ifdef TARGET_NR_open
9311     case TARGET_NR_open:
9312         if (!(p = lock_user_string(arg1)))
9313             return -TARGET_EFAULT;
9314         ret = get_errno(do_guest_openat(cpu_env, AT_FDCWD, p,
9315                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
9316                                   arg3, true));
9317         fd_trans_unregister(ret);
9318         unlock_user(p, arg1, 0);
9319         return ret;
9320 #endif
9321     case TARGET_NR_openat:
9322         if (!(p = lock_user_string(arg2)))
9323             return -TARGET_EFAULT;
9324         ret = get_errno(do_guest_openat(cpu_env, arg1, p,
9325                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
9326                                   arg4, true));
9327         fd_trans_unregister(ret);
9328         unlock_user(p, arg2, 0);
9329         return ret;
9330     case TARGET_NR_openat2:
9331         ret = do_openat2(cpu_env, arg1, arg2, arg3, arg4);
9332         return ret;
9333 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9334     case TARGET_NR_name_to_handle_at:
9335         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
9336         return ret;
9337 #endif
9338 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9339     case TARGET_NR_open_by_handle_at:
9340         ret = do_open_by_handle_at(arg1, arg2, arg3);
9341         fd_trans_unregister(ret);
9342         return ret;
9343 #endif
9344 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
9345     case TARGET_NR_pidfd_open:
9346         return get_errno(pidfd_open(arg1, arg2));
9347 #endif
9348 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
9349     case TARGET_NR_pidfd_send_signal:
9350         {
9351             siginfo_t uinfo, *puinfo;
9352 
9353             if (arg3) {
9354                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9355                 if (!p) {
9356                     return -TARGET_EFAULT;
9357                  }
9358                  target_to_host_siginfo(&uinfo, p);
9359                  unlock_user(p, arg3, 0);
9360                  puinfo = &uinfo;
9361             } else {
9362                  puinfo = NULL;
9363             }
9364             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
9365                                               puinfo, arg4));
9366         }
9367         return ret;
9368 #endif
9369 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
9370     case TARGET_NR_pidfd_getfd:
9371         return get_errno(pidfd_getfd(arg1, arg2, arg3));
9372 #endif
9373     case TARGET_NR_close:
9374         fd_trans_unregister(arg1);
9375         return get_errno(close(arg1));
9376 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
9377     case TARGET_NR_close_range:
9378         ret = get_errno(sys_close_range(arg1, arg2, arg3));
9379         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
9380             abi_long fd, maxfd;
9381             maxfd = MIN(arg2, target_fd_max);
9382             for (fd = arg1; fd < maxfd; fd++) {
9383                 fd_trans_unregister(fd);
9384             }
9385         }
9386         return ret;
9387 #endif
9388 
9389     case TARGET_NR_brk:
9390         return do_brk(arg1);
9391 #ifdef TARGET_NR_fork
9392     case TARGET_NR_fork:
9393         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
9394 #endif
9395 #ifdef TARGET_NR_waitpid
9396     case TARGET_NR_waitpid:
9397         {
9398             int status;
9399             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
9400             if (!is_error(ret) && arg2 && ret
9401                 && put_user_s32(host_to_target_waitstatus(status), arg2))
9402                 return -TARGET_EFAULT;
9403         }
9404         return ret;
9405 #endif
9406 #ifdef TARGET_NR_waitid
9407     case TARGET_NR_waitid:
9408         {
9409             struct rusage ru;
9410             siginfo_t info;
9411 
9412             ret = get_errno(safe_waitid(arg1, arg2, (arg3 ? &info : NULL),
9413                                         arg4, (arg5 ? &ru : NULL)));
9414             if (!is_error(ret)) {
9415                 if (arg3) {
9416                     p = lock_user(VERIFY_WRITE, arg3,
9417                                   sizeof(target_siginfo_t), 0);
9418                     if (!p) {
9419                         return -TARGET_EFAULT;
9420                     }
9421                     host_to_target_siginfo(p, &info);
9422                     unlock_user(p, arg3, sizeof(target_siginfo_t));
9423                 }
9424                 if (arg5 && host_to_target_rusage(arg5, &ru)) {
9425                     return -TARGET_EFAULT;
9426                 }
9427             }
9428         }
9429         return ret;
9430 #endif
9431 #ifdef TARGET_NR_creat /* not on alpha */
9432     case TARGET_NR_creat:
9433         if (!(p = lock_user_string(arg1)))
9434             return -TARGET_EFAULT;
9435         ret = get_errno(creat(p, arg2));
9436         fd_trans_unregister(ret);
9437         unlock_user(p, arg1, 0);
9438         return ret;
9439 #endif
9440 #ifdef TARGET_NR_link
9441     case TARGET_NR_link:
9442         {
9443             void * p2;
9444             p = lock_user_string(arg1);
9445             p2 = lock_user_string(arg2);
9446             if (!p || !p2)
9447                 ret = -TARGET_EFAULT;
9448             else
9449                 ret = get_errno(link(p, p2));
9450             unlock_user(p2, arg2, 0);
9451             unlock_user(p, arg1, 0);
9452         }
9453         return ret;
9454 #endif
9455 #if defined(TARGET_NR_linkat)
9456     case TARGET_NR_linkat:
9457         {
9458             void * p2 = NULL;
9459             if (!arg2 || !arg4)
9460                 return -TARGET_EFAULT;
9461             p  = lock_user_string(arg2);
9462             p2 = lock_user_string(arg4);
9463             if (!p || !p2)
9464                 ret = -TARGET_EFAULT;
9465             else
9466                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
9467             unlock_user(p, arg2, 0);
9468             unlock_user(p2, arg4, 0);
9469         }
9470         return ret;
9471 #endif
9472 #ifdef TARGET_NR_unlink
9473     case TARGET_NR_unlink:
9474         if (!(p = lock_user_string(arg1)))
9475             return -TARGET_EFAULT;
9476         ret = get_errno(unlink(p));
9477         unlock_user(p, arg1, 0);
9478         return ret;
9479 #endif
9480 #if defined(TARGET_NR_unlinkat)
9481     case TARGET_NR_unlinkat:
9482         if (!(p = lock_user_string(arg2)))
9483             return -TARGET_EFAULT;
9484         ret = get_errno(unlinkat(arg1, p, arg3));
9485         unlock_user(p, arg2, 0);
9486         return ret;
9487 #endif
9488     case TARGET_NR_execveat:
9489         return do_execv(cpu_env, arg1, arg2, arg3, arg4, arg5, true);
9490     case TARGET_NR_execve:
9491         return do_execv(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0, false);
9492     case TARGET_NR_chdir:
9493         if (!(p = lock_user_string(arg1)))
9494             return -TARGET_EFAULT;
9495         ret = get_errno(chdir(p));
9496         unlock_user(p, arg1, 0);
9497         return ret;
9498 #ifdef TARGET_NR_time
9499     case TARGET_NR_time:
9500         {
9501             time_t host_time;
9502             ret = get_errno(time(&host_time));
9503             if (!is_error(ret)
9504                 && arg1
9505                 && put_user_sal(host_time, arg1))
9506                 return -TARGET_EFAULT;
9507         }
9508         return ret;
9509 #endif
9510 #ifdef TARGET_NR_mknod
9511     case TARGET_NR_mknod:
9512         if (!(p = lock_user_string(arg1)))
9513             return -TARGET_EFAULT;
9514         ret = get_errno(mknod(p, arg2, arg3));
9515         unlock_user(p, arg1, 0);
9516         return ret;
9517 #endif
9518 #if defined(TARGET_NR_mknodat)
9519     case TARGET_NR_mknodat:
9520         if (!(p = lock_user_string(arg2)))
9521             return -TARGET_EFAULT;
9522         ret = get_errno(mknodat(arg1, p, arg3, arg4));
9523         unlock_user(p, arg2, 0);
9524         return ret;
9525 #endif
9526 #ifdef TARGET_NR_chmod
9527     case TARGET_NR_chmod:
9528         if (!(p = lock_user_string(arg1)))
9529             return -TARGET_EFAULT;
9530         ret = get_errno(chmod(p, arg2));
9531         unlock_user(p, arg1, 0);
9532         return ret;
9533 #endif
9534 #ifdef TARGET_NR_lseek
9535     case TARGET_NR_lseek:
9536         return get_errno(lseek(arg1, arg2, arg3));
9537 #endif
9538 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9539     /* Alpha specific */
9540     case TARGET_NR_getxpid:
9541         cpu_env->ir[IR_A4] = getppid();
9542         return get_errno(getpid());
9543 #endif
9544 #ifdef TARGET_NR_getpid
9545     case TARGET_NR_getpid:
9546         return get_errno(getpid());
9547 #endif
9548     case TARGET_NR_mount:
9549         {
9550             /* need to look at the data field */
9551             void *p2, *p3;
9552 
9553             if (arg1) {
9554                 p = lock_user_string(arg1);
9555                 if (!p) {
9556                     return -TARGET_EFAULT;
9557                 }
9558             } else {
9559                 p = NULL;
9560             }
9561 
9562             p2 = lock_user_string(arg2);
9563             if (!p2) {
9564                 if (arg1) {
9565                     unlock_user(p, arg1, 0);
9566                 }
9567                 return -TARGET_EFAULT;
9568             }
9569 
9570             if (arg3) {
9571                 p3 = lock_user_string(arg3);
9572                 if (!p3) {
9573                     if (arg1) {
9574                         unlock_user(p, arg1, 0);
9575                     }
9576                     unlock_user(p2, arg2, 0);
9577                     return -TARGET_EFAULT;
9578                 }
9579             } else {
9580                 p3 = NULL;
9581             }
9582 
9583             /* FIXME - arg5 should be locked, but it isn't clear how to
9584              * do that since it's not guaranteed to be a NULL-terminated
9585              * string.
9586              */
9587             if (!arg5) {
9588                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9589             } else {
9590                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9591             }
9592             ret = get_errno(ret);
9593 
9594             if (arg1) {
9595                 unlock_user(p, arg1, 0);
9596             }
9597             unlock_user(p2, arg2, 0);
9598             if (arg3) {
9599                 unlock_user(p3, arg3, 0);
9600             }
9601         }
9602         return ret;
9603 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9604 #if defined(TARGET_NR_umount)
9605     case TARGET_NR_umount:
9606 #endif
9607 #if defined(TARGET_NR_oldumount)
9608     case TARGET_NR_oldumount:
9609 #endif
9610         if (!(p = lock_user_string(arg1)))
9611             return -TARGET_EFAULT;
9612         ret = get_errno(umount(p));
9613         unlock_user(p, arg1, 0);
9614         return ret;
9615 #endif
9616 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9617     case TARGET_NR_move_mount:
9618         {
9619             void *p2, *p4;
9620 
9621             if (!arg2 || !arg4) {
9622                 return -TARGET_EFAULT;
9623             }
9624 
9625             p2 = lock_user_string(arg2);
9626             if (!p2) {
9627                 return -TARGET_EFAULT;
9628             }
9629 
9630             p4 = lock_user_string(arg4);
9631             if (!p4) {
9632                 unlock_user(p2, arg2, 0);
9633                 return -TARGET_EFAULT;
9634             }
9635             ret = get_errno(sys_move_mount(arg1, p2, arg3, p4, arg5));
9636 
9637             unlock_user(p2, arg2, 0);
9638             unlock_user(p4, arg4, 0);
9639 
9640             return ret;
9641         }
9642 #endif
9643 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9644     case TARGET_NR_open_tree:
9645         {
9646             void *p2;
9647             int host_flags;
9648 
9649             if (!arg2) {
9650                 return -TARGET_EFAULT;
9651             }
9652 
9653             p2 = lock_user_string(arg2);
9654             if (!p2) {
9655                 return -TARGET_EFAULT;
9656             }
9657 
9658             host_flags = arg3 & ~TARGET_O_CLOEXEC;
9659             if (arg3 & TARGET_O_CLOEXEC) {
9660                 host_flags |= O_CLOEXEC;
9661             }
9662 
9663             ret = get_errno(sys_open_tree(arg1, p2, host_flags));
9664 
9665             unlock_user(p2, arg2, 0);
9666 
9667             return ret;
9668         }
9669 #endif
9670 #ifdef TARGET_NR_stime /* not on alpha */
9671     case TARGET_NR_stime:
9672         {
9673             struct timespec ts;
9674             ts.tv_nsec = 0;
9675             if (get_user_sal(ts.tv_sec, arg1)) {
9676                 return -TARGET_EFAULT;
9677             }
9678             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9679         }
9680 #endif
9681 #ifdef TARGET_NR_alarm /* not on alpha */
9682     case TARGET_NR_alarm:
9683         return alarm(arg1);
9684 #endif
9685 #ifdef TARGET_NR_pause /* not on alpha */
9686     case TARGET_NR_pause:
9687         if (!block_signals()) {
9688             sigsuspend(&get_task_state(cpu)->signal_mask);
9689         }
9690         return -TARGET_EINTR;
9691 #endif
9692 #ifdef TARGET_NR_utime
9693     case TARGET_NR_utime:
9694         {
9695             struct utimbuf tbuf, *host_tbuf;
9696             struct target_utimbuf *target_tbuf;
9697             if (arg2) {
9698                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9699                     return -TARGET_EFAULT;
9700                 tbuf.actime = tswapal(target_tbuf->actime);
9701                 tbuf.modtime = tswapal(target_tbuf->modtime);
9702                 unlock_user_struct(target_tbuf, arg2, 0);
9703                 host_tbuf = &tbuf;
9704             } else {
9705                 host_tbuf = NULL;
9706             }
9707             if (!(p = lock_user_string(arg1)))
9708                 return -TARGET_EFAULT;
9709             ret = get_errno(utime(p, host_tbuf));
9710             unlock_user(p, arg1, 0);
9711         }
9712         return ret;
9713 #endif
9714 #ifdef TARGET_NR_utimes
9715     case TARGET_NR_utimes:
9716         {
9717             struct timeval *tvp, tv[2];
9718             if (arg2) {
9719                 if (copy_from_user_timeval(&tv[0], arg2)
9720                     || copy_from_user_timeval(&tv[1],
9721                                               arg2 + sizeof(struct target_timeval)))
9722                     return -TARGET_EFAULT;
9723                 tvp = tv;
9724             } else {
9725                 tvp = NULL;
9726             }
9727             if (!(p = lock_user_string(arg1)))
9728                 return -TARGET_EFAULT;
9729             ret = get_errno(utimes(p, tvp));
9730             unlock_user(p, arg1, 0);
9731         }
9732         return ret;
9733 #endif
9734 #if defined(TARGET_NR_futimesat)
9735     case TARGET_NR_futimesat:
9736         {
9737             struct timeval *tvp, tv[2];
9738             if (arg3) {
9739                 if (copy_from_user_timeval(&tv[0], arg3)
9740                     || copy_from_user_timeval(&tv[1],
9741                                               arg3 + sizeof(struct target_timeval)))
9742                     return -TARGET_EFAULT;
9743                 tvp = tv;
9744             } else {
9745                 tvp = NULL;
9746             }
9747             if (!(p = lock_user_string(arg2))) {
9748                 return -TARGET_EFAULT;
9749             }
9750             ret = get_errno(futimesat(arg1, path(p), tvp));
9751             unlock_user(p, arg2, 0);
9752         }
9753         return ret;
9754 #endif
9755 #ifdef TARGET_NR_access
9756     case TARGET_NR_access:
9757         if (!(p = lock_user_string(arg1))) {
9758             return -TARGET_EFAULT;
9759         }
9760         ret = get_errno(access(path(p), arg2));
9761         unlock_user(p, arg1, 0);
9762         return ret;
9763 #endif
9764 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9765     case TARGET_NR_faccessat:
9766         if (!(p = lock_user_string(arg2))) {
9767             return -TARGET_EFAULT;
9768         }
9769         ret = get_errno(faccessat(arg1, p, arg3, 0));
9770         unlock_user(p, arg2, 0);
9771         return ret;
9772 #endif
9773 #if defined(TARGET_NR_faccessat2)
9774     case TARGET_NR_faccessat2:
9775         if (!(p = lock_user_string(arg2))) {
9776             return -TARGET_EFAULT;
9777         }
9778         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9779         unlock_user(p, arg2, 0);
9780         return ret;
9781 #endif
9782 #ifdef TARGET_NR_nice /* not on alpha */
9783     case TARGET_NR_nice:
9784         return get_errno(nice(arg1));
9785 #endif
9786     case TARGET_NR_sync:
9787         sync();
9788         return 0;
9789 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9790     case TARGET_NR_syncfs:
9791         return get_errno(syncfs(arg1));
9792 #endif
9793     case TARGET_NR_kill:
9794         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9795 #ifdef TARGET_NR_rename
9796     case TARGET_NR_rename:
9797         {
9798             void *p2;
9799             p = lock_user_string(arg1);
9800             p2 = lock_user_string(arg2);
9801             if (!p || !p2)
9802                 ret = -TARGET_EFAULT;
9803             else
9804                 ret = get_errno(rename(p, p2));
9805             unlock_user(p2, arg2, 0);
9806             unlock_user(p, arg1, 0);
9807         }
9808         return ret;
9809 #endif
9810 #if defined(TARGET_NR_renameat)
9811     case TARGET_NR_renameat:
9812         {
9813             void *p2;
9814             p  = lock_user_string(arg2);
9815             p2 = lock_user_string(arg4);
9816             if (!p || !p2)
9817                 ret = -TARGET_EFAULT;
9818             else
9819                 ret = get_errno(renameat(arg1, p, arg3, p2));
9820             unlock_user(p2, arg4, 0);
9821             unlock_user(p, arg2, 0);
9822         }
9823         return ret;
9824 #endif
9825 #if defined(TARGET_NR_renameat2)
9826     case TARGET_NR_renameat2:
9827         {
9828             void *p2;
9829             p  = lock_user_string(arg2);
9830             p2 = lock_user_string(arg4);
9831             if (!p || !p2) {
9832                 ret = -TARGET_EFAULT;
9833             } else {
9834                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9835             }
9836             unlock_user(p2, arg4, 0);
9837             unlock_user(p, arg2, 0);
9838         }
9839         return ret;
9840 #endif
9841 #ifdef TARGET_NR_mkdir
9842     case TARGET_NR_mkdir:
9843         if (!(p = lock_user_string(arg1)))
9844             return -TARGET_EFAULT;
9845         ret = get_errno(mkdir(p, arg2));
9846         unlock_user(p, arg1, 0);
9847         return ret;
9848 #endif
9849 #if defined(TARGET_NR_mkdirat)
9850     case TARGET_NR_mkdirat:
9851         if (!(p = lock_user_string(arg2)))
9852             return -TARGET_EFAULT;
9853         ret = get_errno(mkdirat(arg1, p, arg3));
9854         unlock_user(p, arg2, 0);
9855         return ret;
9856 #endif
9857 #ifdef TARGET_NR_rmdir
9858     case TARGET_NR_rmdir:
9859         if (!(p = lock_user_string(arg1)))
9860             return -TARGET_EFAULT;
9861         ret = get_errno(rmdir(p));
9862         unlock_user(p, arg1, 0);
9863         return ret;
9864 #endif
9865     case TARGET_NR_dup:
9866         ret = get_errno(dup(arg1));
9867         if (ret >= 0) {
9868             fd_trans_dup(arg1, ret);
9869         }
9870         return ret;
9871 #ifdef TARGET_NR_pipe
9872     case TARGET_NR_pipe:
9873         return do_pipe(cpu_env, arg1, 0, 0);
9874 #endif
9875 #ifdef TARGET_NR_pipe2
9876     case TARGET_NR_pipe2:
9877         return do_pipe(cpu_env, arg1,
9878                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9879 #endif
9880     case TARGET_NR_times:
9881         {
9882             struct target_tms *tmsp;
9883             struct tms tms;
9884             ret = get_errno(times(&tms));
9885             if (arg1) {
9886                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9887                 if (!tmsp)
9888                     return -TARGET_EFAULT;
9889                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9890                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9891                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9892                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9893             }
9894             if (!is_error(ret))
9895                 ret = host_to_target_clock_t(ret);
9896         }
9897         return ret;
9898     case TARGET_NR_acct:
9899         if (arg1 == 0) {
9900             ret = get_errno(acct(NULL));
9901         } else {
9902             if (!(p = lock_user_string(arg1))) {
9903                 return -TARGET_EFAULT;
9904             }
9905             ret = get_errno(acct(path(p)));
9906             unlock_user(p, arg1, 0);
9907         }
9908         return ret;
9909 #ifdef TARGET_NR_umount2
9910     case TARGET_NR_umount2:
9911         if (!(p = lock_user_string(arg1)))
9912             return -TARGET_EFAULT;
9913         ret = get_errno(umount2(p, arg2));
9914         unlock_user(p, arg1, 0);
9915         return ret;
9916 #endif
9917     case TARGET_NR_ioctl:
9918         return do_ioctl(arg1, arg2, arg3);
9919 #ifdef TARGET_NR_fcntl
9920     case TARGET_NR_fcntl:
9921         return do_fcntl(arg1, arg2, arg3);
9922 #endif
9923     case TARGET_NR_setpgid:
9924         return get_errno(setpgid(arg1, arg2));
9925     case TARGET_NR_umask:
9926         return get_errno(umask(arg1));
9927     case TARGET_NR_chroot:
9928         if (!(p = lock_user_string(arg1)))
9929             return -TARGET_EFAULT;
9930         ret = get_errno(chroot(p));
9931         unlock_user(p, arg1, 0);
9932         return ret;
9933 #ifdef TARGET_NR_dup2
9934     case TARGET_NR_dup2:
9935         ret = get_errno(dup2(arg1, arg2));
9936         if (ret >= 0) {
9937             fd_trans_dup(arg1, arg2);
9938         }
9939         return ret;
9940 #endif
9941 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9942     case TARGET_NR_dup3:
9943     {
9944         int host_flags;
9945 
9946         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9947             return -EINVAL;
9948         }
9949         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9950         ret = get_errno(dup3(arg1, arg2, host_flags));
9951         if (ret >= 0) {
9952             fd_trans_dup(arg1, arg2);
9953         }
9954         return ret;
9955     }
9956 #endif
9957 #ifdef TARGET_NR_getppid /* not on alpha */
9958     case TARGET_NR_getppid:
9959         return get_errno(getppid());
9960 #endif
9961 #ifdef TARGET_NR_getpgrp
9962     case TARGET_NR_getpgrp:
9963         return get_errno(getpgrp());
9964 #endif
9965     case TARGET_NR_setsid:
9966         return get_errno(setsid());
9967 #ifdef TARGET_NR_sigaction
9968     case TARGET_NR_sigaction:
9969         {
9970 #if defined(TARGET_MIPS)
9971 	    struct target_sigaction act, oact, *pact, *old_act;
9972 
9973 	    if (arg2) {
9974                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9975                     return -TARGET_EFAULT;
9976 		act._sa_handler = old_act->_sa_handler;
9977 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9978 		act.sa_flags = old_act->sa_flags;
9979 		unlock_user_struct(old_act, arg2, 0);
9980 		pact = &act;
9981 	    } else {
9982 		pact = NULL;
9983 	    }
9984 
9985         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9986 
9987 	    if (!is_error(ret) && arg3) {
9988                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9989                     return -TARGET_EFAULT;
9990 		old_act->_sa_handler = oact._sa_handler;
9991 		old_act->sa_flags = oact.sa_flags;
9992 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9993 		old_act->sa_mask.sig[1] = 0;
9994 		old_act->sa_mask.sig[2] = 0;
9995 		old_act->sa_mask.sig[3] = 0;
9996 		unlock_user_struct(old_act, arg3, 1);
9997 	    }
9998 #else
9999             struct target_old_sigaction *old_act;
10000             struct target_sigaction act, oact, *pact;
10001             if (arg2) {
10002                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
10003                     return -TARGET_EFAULT;
10004                 act._sa_handler = old_act->_sa_handler;
10005                 target_siginitset(&act.sa_mask, old_act->sa_mask);
10006                 act.sa_flags = old_act->sa_flags;
10007 #ifdef TARGET_ARCH_HAS_SA_RESTORER
10008                 act.sa_restorer = old_act->sa_restorer;
10009 #endif
10010                 unlock_user_struct(old_act, arg2, 0);
10011                 pact = &act;
10012             } else {
10013                 pact = NULL;
10014             }
10015             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
10016             if (!is_error(ret) && arg3) {
10017                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
10018                     return -TARGET_EFAULT;
10019                 old_act->_sa_handler = oact._sa_handler;
10020                 old_act->sa_mask = oact.sa_mask.sig[0];
10021                 old_act->sa_flags = oact.sa_flags;
10022 #ifdef TARGET_ARCH_HAS_SA_RESTORER
10023                 old_act->sa_restorer = oact.sa_restorer;
10024 #endif
10025                 unlock_user_struct(old_act, arg3, 1);
10026             }
10027 #endif
10028         }
10029         return ret;
10030 #endif
10031     case TARGET_NR_rt_sigaction:
10032         {
10033             /*
10034              * For Alpha and SPARC this is a 5 argument syscall, with
10035              * a 'restorer' parameter which must be copied into the
10036              * sa_restorer field of the sigaction struct.
10037              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
10038              * and arg5 is the sigsetsize.
10039              */
10040 #if defined(TARGET_ALPHA)
10041             target_ulong sigsetsize = arg4;
10042             target_ulong restorer = arg5;
10043 #elif defined(TARGET_SPARC)
10044             target_ulong restorer = arg4;
10045             target_ulong sigsetsize = arg5;
10046 #else
10047             target_ulong sigsetsize = arg4;
10048             target_ulong restorer = 0;
10049 #endif
10050             struct target_sigaction *act = NULL;
10051             struct target_sigaction *oact = NULL;
10052 
10053             if (sigsetsize != sizeof(target_sigset_t)) {
10054                 return -TARGET_EINVAL;
10055             }
10056             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
10057                 return -TARGET_EFAULT;
10058             }
10059             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
10060                 ret = -TARGET_EFAULT;
10061             } else {
10062                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
10063                 if (oact) {
10064                     unlock_user_struct(oact, arg3, 1);
10065                 }
10066             }
10067             if (act) {
10068                 unlock_user_struct(act, arg2, 0);
10069             }
10070         }
10071         return ret;
10072 #ifdef TARGET_NR_sgetmask /* not on alpha */
10073     case TARGET_NR_sgetmask:
10074         {
10075             sigset_t cur_set;
10076             abi_ulong target_set;
10077             ret = do_sigprocmask(0, NULL, &cur_set);
10078             if (!ret) {
10079                 host_to_target_old_sigset(&target_set, &cur_set);
10080                 ret = target_set;
10081             }
10082         }
10083         return ret;
10084 #endif
10085 #ifdef TARGET_NR_ssetmask /* not on alpha */
10086     case TARGET_NR_ssetmask:
10087         {
10088             sigset_t set, oset;
10089             abi_ulong target_set = arg1;
10090             target_to_host_old_sigset(&set, &target_set);
10091             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
10092             if (!ret) {
10093                 host_to_target_old_sigset(&target_set, &oset);
10094                 ret = target_set;
10095             }
10096         }
10097         return ret;
10098 #endif
10099 #ifdef TARGET_NR_sigprocmask
10100     case TARGET_NR_sigprocmask:
10101         {
10102 #if defined(TARGET_ALPHA)
10103             sigset_t set, oldset;
10104             abi_ulong mask;
10105             int how;
10106 
10107             switch (arg1) {
10108             case TARGET_SIG_BLOCK:
10109                 how = SIG_BLOCK;
10110                 break;
10111             case TARGET_SIG_UNBLOCK:
10112                 how = SIG_UNBLOCK;
10113                 break;
10114             case TARGET_SIG_SETMASK:
10115                 how = SIG_SETMASK;
10116                 break;
10117             default:
10118                 return -TARGET_EINVAL;
10119             }
10120             mask = arg2;
10121             target_to_host_old_sigset(&set, &mask);
10122 
10123             ret = do_sigprocmask(how, &set, &oldset);
10124             if (!is_error(ret)) {
10125                 host_to_target_old_sigset(&mask, &oldset);
10126                 ret = mask;
10127                 cpu_env->ir[IR_V0] = 0; /* force no error */
10128             }
10129 #else
10130             sigset_t set, oldset, *set_ptr;
10131             int how;
10132 
10133             if (arg2) {
10134                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10135                 if (!p) {
10136                     return -TARGET_EFAULT;
10137                 }
10138                 target_to_host_old_sigset(&set, p);
10139                 unlock_user(p, arg2, 0);
10140                 set_ptr = &set;
10141                 switch (arg1) {
10142                 case TARGET_SIG_BLOCK:
10143                     how = SIG_BLOCK;
10144                     break;
10145                 case TARGET_SIG_UNBLOCK:
10146                     how = SIG_UNBLOCK;
10147                     break;
10148                 case TARGET_SIG_SETMASK:
10149                     how = SIG_SETMASK;
10150                     break;
10151                 default:
10152                     return -TARGET_EINVAL;
10153                 }
10154             } else {
10155                 how = 0;
10156                 set_ptr = NULL;
10157             }
10158             ret = do_sigprocmask(how, set_ptr, &oldset);
10159             if (!is_error(ret) && arg3) {
10160                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10161                     return -TARGET_EFAULT;
10162                 host_to_target_old_sigset(p, &oldset);
10163                 unlock_user(p, arg3, sizeof(target_sigset_t));
10164             }
10165 #endif
10166         }
10167         return ret;
10168 #endif
10169     case TARGET_NR_rt_sigprocmask:
10170         {
10171             int how = arg1;
10172             sigset_t set, oldset, *set_ptr;
10173 
10174             if (arg4 != sizeof(target_sigset_t)) {
10175                 return -TARGET_EINVAL;
10176             }
10177 
10178             if (arg2) {
10179                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10180                 if (!p) {
10181                     return -TARGET_EFAULT;
10182                 }
10183                 target_to_host_sigset(&set, p);
10184                 unlock_user(p, arg2, 0);
10185                 set_ptr = &set;
10186                 switch(how) {
10187                 case TARGET_SIG_BLOCK:
10188                     how = SIG_BLOCK;
10189                     break;
10190                 case TARGET_SIG_UNBLOCK:
10191                     how = SIG_UNBLOCK;
10192                     break;
10193                 case TARGET_SIG_SETMASK:
10194                     how = SIG_SETMASK;
10195                     break;
10196                 default:
10197                     return -TARGET_EINVAL;
10198                 }
10199             } else {
10200                 how = 0;
10201                 set_ptr = NULL;
10202             }
10203             ret = do_sigprocmask(how, set_ptr, &oldset);
10204             if (!is_error(ret) && arg3) {
10205                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10206                     return -TARGET_EFAULT;
10207                 host_to_target_sigset(p, &oldset);
10208                 unlock_user(p, arg3, sizeof(target_sigset_t));
10209             }
10210         }
10211         return ret;
10212 #ifdef TARGET_NR_sigpending
10213     case TARGET_NR_sigpending:
10214         {
10215             sigset_t set;
10216             ret = get_errno(sigpending(&set));
10217             if (!is_error(ret)) {
10218                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10219                     return -TARGET_EFAULT;
10220                 host_to_target_old_sigset(p, &set);
10221                 unlock_user(p, arg1, sizeof(target_sigset_t));
10222             }
10223         }
10224         return ret;
10225 #endif
10226     case TARGET_NR_rt_sigpending:
10227         {
10228             sigset_t set;
10229 
10230             /* Yes, this check is >, not != like most. We follow the kernel's
10231              * logic and it does it like this because it implements
10232              * NR_sigpending through the same code path, and in that case
10233              * the old_sigset_t is smaller in size.
10234              */
10235             if (arg2 > sizeof(target_sigset_t)) {
10236                 return -TARGET_EINVAL;
10237             }
10238 
10239             ret = get_errno(sigpending(&set));
10240             if (!is_error(ret)) {
10241                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10242                     return -TARGET_EFAULT;
10243                 host_to_target_sigset(p, &set);
10244                 unlock_user(p, arg1, sizeof(target_sigset_t));
10245             }
10246         }
10247         return ret;
10248 #ifdef TARGET_NR_sigsuspend
10249     case TARGET_NR_sigsuspend:
10250         {
10251             sigset_t *set;
10252 
10253 #if defined(TARGET_ALPHA)
10254             TaskState *ts = get_task_state(cpu);
10255             /* target_to_host_old_sigset will bswap back */
10256             abi_ulong mask = tswapal(arg1);
10257             set = &ts->sigsuspend_mask;
10258             target_to_host_old_sigset(set, &mask);
10259 #else
10260             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
10261             if (ret != 0) {
10262                 return ret;
10263             }
10264 #endif
10265             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10266             finish_sigsuspend_mask(ret);
10267         }
10268         return ret;
10269 #endif
10270     case TARGET_NR_rt_sigsuspend:
10271         {
10272             sigset_t *set;
10273 
10274             ret = process_sigsuspend_mask(&set, arg1, arg2);
10275             if (ret != 0) {
10276                 return ret;
10277             }
10278             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10279             finish_sigsuspend_mask(ret);
10280         }
10281         return ret;
10282 #ifdef TARGET_NR_rt_sigtimedwait
10283     case TARGET_NR_rt_sigtimedwait:
10284         {
10285             sigset_t set;
10286             struct timespec uts, *puts;
10287             siginfo_t uinfo;
10288 
10289             if (arg4 != sizeof(target_sigset_t)) {
10290                 return -TARGET_EINVAL;
10291             }
10292 
10293             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
10294                 return -TARGET_EFAULT;
10295             target_to_host_sigset(&set, p);
10296             unlock_user(p, arg1, 0);
10297             if (arg3) {
10298                 puts = &uts;
10299                 if (target_to_host_timespec(puts, arg3)) {
10300                     return -TARGET_EFAULT;
10301                 }
10302             } else {
10303                 puts = NULL;
10304             }
10305             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10306                                                  SIGSET_T_SIZE));
10307             if (!is_error(ret)) {
10308                 if (arg2) {
10309                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
10310                                   0);
10311                     if (!p) {
10312                         return -TARGET_EFAULT;
10313                     }
10314                     host_to_target_siginfo(p, &uinfo);
10315                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10316                 }
10317                 ret = host_to_target_signal(ret);
10318             }
10319         }
10320         return ret;
10321 #endif
10322 #ifdef TARGET_NR_rt_sigtimedwait_time64
10323     case TARGET_NR_rt_sigtimedwait_time64:
10324         {
10325             sigset_t set;
10326             struct timespec uts, *puts;
10327             siginfo_t uinfo;
10328 
10329             if (arg4 != sizeof(target_sigset_t)) {
10330                 return -TARGET_EINVAL;
10331             }
10332 
10333             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
10334             if (!p) {
10335                 return -TARGET_EFAULT;
10336             }
10337             target_to_host_sigset(&set, p);
10338             unlock_user(p, arg1, 0);
10339             if (arg3) {
10340                 puts = &uts;
10341                 if (target_to_host_timespec64(puts, arg3)) {
10342                     return -TARGET_EFAULT;
10343                 }
10344             } else {
10345                 puts = NULL;
10346             }
10347             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10348                                                  SIGSET_T_SIZE));
10349             if (!is_error(ret)) {
10350                 if (arg2) {
10351                     p = lock_user(VERIFY_WRITE, arg2,
10352                                   sizeof(target_siginfo_t), 0);
10353                     if (!p) {
10354                         return -TARGET_EFAULT;
10355                     }
10356                     host_to_target_siginfo(p, &uinfo);
10357                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10358                 }
10359                 ret = host_to_target_signal(ret);
10360             }
10361         }
10362         return ret;
10363 #endif
10364     case TARGET_NR_rt_sigqueueinfo:
10365         {
10366             siginfo_t uinfo;
10367 
10368             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
10369             if (!p) {
10370                 return -TARGET_EFAULT;
10371             }
10372             target_to_host_siginfo(&uinfo, p);
10373             unlock_user(p, arg3, 0);
10374             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
10375         }
10376         return ret;
10377     case TARGET_NR_rt_tgsigqueueinfo:
10378         {
10379             siginfo_t uinfo;
10380 
10381             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
10382             if (!p) {
10383                 return -TARGET_EFAULT;
10384             }
10385             target_to_host_siginfo(&uinfo, p);
10386             unlock_user(p, arg4, 0);
10387             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
10388         }
10389         return ret;
10390 #ifdef TARGET_NR_sigreturn
10391     case TARGET_NR_sigreturn:
10392         if (block_signals()) {
10393             return -QEMU_ERESTARTSYS;
10394         }
10395         return do_sigreturn(cpu_env);
10396 #endif
10397     case TARGET_NR_rt_sigreturn:
10398         if (block_signals()) {
10399             return -QEMU_ERESTARTSYS;
10400         }
10401         return do_rt_sigreturn(cpu_env);
10402     case TARGET_NR_sethostname:
10403         if (!(p = lock_user_string(arg1)))
10404             return -TARGET_EFAULT;
10405         ret = get_errno(sethostname(p, arg2));
10406         unlock_user(p, arg1, 0);
10407         return ret;
10408 #ifdef TARGET_NR_setrlimit
10409     case TARGET_NR_setrlimit:
10410         {
10411             int resource = target_to_host_resource(arg1);
10412             struct target_rlimit *target_rlim;
10413             struct rlimit rlim;
10414             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
10415                 return -TARGET_EFAULT;
10416             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
10417             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
10418             unlock_user_struct(target_rlim, arg2, 0);
10419             /*
10420              * If we just passed through resource limit settings for memory then
10421              * they would also apply to QEMU's own allocations, and QEMU will
10422              * crash or hang or die if its allocations fail. Ideally we would
10423              * track the guest allocations in QEMU and apply the limits ourselves.
10424              * For now, just tell the guest the call succeeded but don't actually
10425              * limit anything.
10426              */
10427             if (resource != RLIMIT_AS &&
10428                 resource != RLIMIT_DATA &&
10429                 resource != RLIMIT_STACK) {
10430                 return get_errno(setrlimit(resource, &rlim));
10431             } else {
10432                 return 0;
10433             }
10434         }
10435 #endif
10436 #ifdef TARGET_NR_getrlimit
10437     case TARGET_NR_getrlimit:
10438         {
10439             int resource = target_to_host_resource(arg1);
10440             struct target_rlimit *target_rlim;
10441             struct rlimit rlim;
10442 
10443             ret = get_errno(getrlimit(resource, &rlim));
10444             if (!is_error(ret)) {
10445                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10446                     return -TARGET_EFAULT;
10447                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10448                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10449                 unlock_user_struct(target_rlim, arg2, 1);
10450             }
10451         }
10452         return ret;
10453 #endif
10454     case TARGET_NR_getrusage:
10455         {
10456             struct rusage rusage;
10457             ret = get_errno(getrusage(arg1, &rusage));
10458             if (!is_error(ret)) {
10459                 ret = host_to_target_rusage(arg2, &rusage);
10460             }
10461         }
10462         return ret;
10463 #if defined(TARGET_NR_gettimeofday)
10464     case TARGET_NR_gettimeofday:
10465         {
10466             struct timeval tv;
10467             struct timezone tz;
10468 
10469             ret = get_errno(gettimeofday(&tv, &tz));
10470             if (!is_error(ret)) {
10471                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
10472                     return -TARGET_EFAULT;
10473                 }
10474                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
10475                     return -TARGET_EFAULT;
10476                 }
10477             }
10478         }
10479         return ret;
10480 #endif
10481 #if defined(TARGET_NR_settimeofday)
10482     case TARGET_NR_settimeofday:
10483         {
10484             struct timeval tv, *ptv = NULL;
10485             struct timezone tz, *ptz = NULL;
10486 
10487             if (arg1) {
10488                 if (copy_from_user_timeval(&tv, arg1)) {
10489                     return -TARGET_EFAULT;
10490                 }
10491                 ptv = &tv;
10492             }
10493 
10494             if (arg2) {
10495                 if (copy_from_user_timezone(&tz, arg2)) {
10496                     return -TARGET_EFAULT;
10497                 }
10498                 ptz = &tz;
10499             }
10500 
10501             return get_errno(settimeofday(ptv, ptz));
10502         }
10503 #endif
10504 #if defined(TARGET_NR_select)
10505     case TARGET_NR_select:
10506 #if defined(TARGET_WANT_NI_OLD_SELECT)
10507         /* some architectures used to have old_select here
10508          * but now ENOSYS it.
10509          */
10510         ret = -TARGET_ENOSYS;
10511 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
10512         ret = do_old_select(arg1);
10513 #else
10514         ret = do_select(arg1, arg2, arg3, arg4, arg5);
10515 #endif
10516         return ret;
10517 #endif
10518 #ifdef TARGET_NR_pselect6
10519     case TARGET_NR_pselect6:
10520         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
10521 #endif
10522 #ifdef TARGET_NR_pselect6_time64
10523     case TARGET_NR_pselect6_time64:
10524         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
10525 #endif
10526 #ifdef TARGET_NR_symlink
10527     case TARGET_NR_symlink:
10528         {
10529             void *p2;
10530             p = lock_user_string(arg1);
10531             p2 = lock_user_string(arg2);
10532             if (!p || !p2)
10533                 ret = -TARGET_EFAULT;
10534             else
10535                 ret = get_errno(symlink(p, p2));
10536             unlock_user(p2, arg2, 0);
10537             unlock_user(p, arg1, 0);
10538         }
10539         return ret;
10540 #endif
10541 #if defined(TARGET_NR_symlinkat)
10542     case TARGET_NR_symlinkat:
10543         {
10544             void *p2;
10545             p  = lock_user_string(arg1);
10546             p2 = lock_user_string(arg3);
10547             if (!p || !p2)
10548                 ret = -TARGET_EFAULT;
10549             else
10550                 ret = get_errno(symlinkat(p, arg2, p2));
10551             unlock_user(p2, arg3, 0);
10552             unlock_user(p, arg1, 0);
10553         }
10554         return ret;
10555 #endif
10556 #ifdef TARGET_NR_readlink
10557     case TARGET_NR_readlink:
10558         {
10559             void *p2;
10560             p = lock_user_string(arg1);
10561             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10562             ret = get_errno(do_guest_readlink(p, p2, arg3));
10563             unlock_user(p2, arg2, ret);
10564             unlock_user(p, arg1, 0);
10565         }
10566         return ret;
10567 #endif
10568 #if defined(TARGET_NR_readlinkat)
10569     case TARGET_NR_readlinkat:
10570         {
10571             void *p2;
10572             p  = lock_user_string(arg2);
10573             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10574             if (!p || !p2) {
10575                 ret = -TARGET_EFAULT;
10576             } else if (!arg4) {
10577                 /* Short circuit this for the magic exe check. */
10578                 ret = -TARGET_EINVAL;
10579             } else if (is_proc_myself((const char *)p, "exe")) {
10580                 /*
10581                  * Don't worry about sign mismatch as earlier mapping
10582                  * logic would have thrown a bad address error.
10583                  */
10584                 ret = MIN(strlen(exec_path), arg4);
10585                 /* We cannot NUL terminate the string. */
10586                 memcpy(p2, exec_path, ret);
10587             } else {
10588                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10589             }
10590             unlock_user(p2, arg3, ret);
10591             unlock_user(p, arg2, 0);
10592         }
10593         return ret;
10594 #endif
10595 #ifdef TARGET_NR_swapon
10596     case TARGET_NR_swapon:
10597         if (!(p = lock_user_string(arg1)))
10598             return -TARGET_EFAULT;
10599         ret = get_errno(swapon(p, arg2));
10600         unlock_user(p, arg1, 0);
10601         return ret;
10602 #endif
10603     case TARGET_NR_reboot:
10604         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10605            /* arg4 must be ignored in all other cases */
10606            p = lock_user_string(arg4);
10607            if (!p) {
10608                return -TARGET_EFAULT;
10609            }
10610            ret = get_errno(reboot(arg1, arg2, arg3, p));
10611            unlock_user(p, arg4, 0);
10612         } else {
10613            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10614         }
10615         return ret;
10616 #ifdef TARGET_NR_mmap
10617     case TARGET_NR_mmap:
10618 #ifdef TARGET_ARCH_WANT_SYS_OLD_MMAP
10619         {
10620             abi_ulong *v;
10621             abi_ulong v1, v2, v3, v4, v5, v6;
10622             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10623                 return -TARGET_EFAULT;
10624             v1 = tswapal(v[0]);
10625             v2 = tswapal(v[1]);
10626             v3 = tswapal(v[2]);
10627             v4 = tswapal(v[3]);
10628             v5 = tswapal(v[4]);
10629             v6 = tswapal(v[5]);
10630             unlock_user(v, arg1, 0);
10631             return do_mmap(v1, v2, v3, v4, v5, v6);
10632         }
10633 #else
10634         /* mmap pointers are always untagged */
10635         return do_mmap(arg1, arg2, arg3, arg4, arg5, arg6);
10636 #endif
10637 #endif
10638 #ifdef TARGET_NR_mmap2
10639     case TARGET_NR_mmap2:
10640 #ifndef MMAP_SHIFT
10641 #define MMAP_SHIFT 12
10642 #endif
10643         return do_mmap(arg1, arg2, arg3, arg4, arg5,
10644                        (off_t)(abi_ulong)arg6 << MMAP_SHIFT);
10645 #endif
10646     case TARGET_NR_munmap:
10647         arg1 = cpu_untagged_addr(cpu, arg1);
10648         return get_errno(target_munmap(arg1, arg2));
10649     case TARGET_NR_mprotect:
10650         arg1 = cpu_untagged_addr(cpu, arg1);
10651         {
10652             TaskState *ts = get_task_state(cpu);
10653             /* Special hack to detect libc making the stack executable.  */
10654             if ((arg3 & PROT_GROWSDOWN)
10655                 && arg1 >= ts->info->stack_limit
10656                 && arg1 <= ts->info->start_stack) {
10657                 arg3 &= ~PROT_GROWSDOWN;
10658                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10659                 arg1 = ts->info->stack_limit;
10660             }
10661         }
10662         return get_errno(target_mprotect(arg1, arg2, arg3));
10663 #ifdef TARGET_NR_mremap
10664     case TARGET_NR_mremap:
10665         arg1 = cpu_untagged_addr(cpu, arg1);
10666         /* mremap new_addr (arg5) is always untagged */
10667         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10668 #endif
10669         /* ??? msync/mlock/munlock are broken for softmmu.  */
10670 #ifdef TARGET_NR_msync
10671     case TARGET_NR_msync:
10672         return get_errno(msync(g2h(cpu, arg1), arg2,
10673                                target_to_host_msync_arg(arg3)));
10674 #endif
10675 #ifdef TARGET_NR_mlock
10676     case TARGET_NR_mlock:
10677         return get_errno(mlock(g2h(cpu, arg1), arg2));
10678 #endif
10679 #ifdef TARGET_NR_munlock
10680     case TARGET_NR_munlock:
10681         return get_errno(munlock(g2h(cpu, arg1), arg2));
10682 #endif
10683 #ifdef TARGET_NR_mlockall
10684     case TARGET_NR_mlockall:
10685         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10686 #endif
10687 #ifdef TARGET_NR_munlockall
10688     case TARGET_NR_munlockall:
10689         return get_errno(munlockall());
10690 #endif
10691 #ifdef TARGET_NR_truncate
10692     case TARGET_NR_truncate:
10693         if (!(p = lock_user_string(arg1)))
10694             return -TARGET_EFAULT;
10695         ret = get_errno(truncate(p, arg2));
10696         unlock_user(p, arg1, 0);
10697         return ret;
10698 #endif
10699 #ifdef TARGET_NR_ftruncate
10700     case TARGET_NR_ftruncate:
10701         return get_errno(ftruncate(arg1, arg2));
10702 #endif
10703     case TARGET_NR_fchmod:
10704         return get_errno(fchmod(arg1, arg2));
10705 #if defined(TARGET_NR_fchmodat)
10706     case TARGET_NR_fchmodat:
10707         if (!(p = lock_user_string(arg2)))
10708             return -TARGET_EFAULT;
10709         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10710         unlock_user(p, arg2, 0);
10711         return ret;
10712 #endif
10713     case TARGET_NR_getpriority:
10714         /* Note that negative values are valid for getpriority, so we must
10715            differentiate based on errno settings.  */
10716         errno = 0;
10717         ret = getpriority(arg1, arg2);
10718         if (ret == -1 && errno != 0) {
10719             return -host_to_target_errno(errno);
10720         }
10721 #ifdef TARGET_ALPHA
10722         /* Return value is the unbiased priority.  Signal no error.  */
10723         cpu_env->ir[IR_V0] = 0;
10724 #else
10725         /* Return value is a biased priority to avoid negative numbers.  */
10726         ret = 20 - ret;
10727 #endif
10728         return ret;
10729     case TARGET_NR_setpriority:
10730         return get_errno(setpriority(arg1, arg2, arg3));
10731 #ifdef TARGET_NR_statfs
10732     case TARGET_NR_statfs:
10733         if (!(p = lock_user_string(arg1))) {
10734             return -TARGET_EFAULT;
10735         }
10736         ret = get_errno(statfs(path(p), &stfs));
10737         unlock_user(p, arg1, 0);
10738     convert_statfs:
10739         if (!is_error(ret)) {
10740             struct target_statfs *target_stfs;
10741 
10742             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10743                 return -TARGET_EFAULT;
10744             __put_user(stfs.f_type, &target_stfs->f_type);
10745             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10746             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10747             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10748             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10749             __put_user(stfs.f_files, &target_stfs->f_files);
10750             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10751             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10752             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10753             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10754             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10755 #ifdef _STATFS_F_FLAGS
10756             __put_user(stfs.f_flags, &target_stfs->f_flags);
10757 #else
10758             __put_user(0, &target_stfs->f_flags);
10759 #endif
10760             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10761             unlock_user_struct(target_stfs, arg2, 1);
10762         }
10763         return ret;
10764 #endif
10765 #ifdef TARGET_NR_fstatfs
10766     case TARGET_NR_fstatfs:
10767         ret = get_errno(fstatfs(arg1, &stfs));
10768         goto convert_statfs;
10769 #endif
10770 #ifdef TARGET_NR_statfs64
10771     case TARGET_NR_statfs64:
10772         if (!(p = lock_user_string(arg1))) {
10773             return -TARGET_EFAULT;
10774         }
10775         ret = get_errno(statfs(path(p), &stfs));
10776         unlock_user(p, arg1, 0);
10777     convert_statfs64:
10778         if (!is_error(ret)) {
10779             struct target_statfs64 *target_stfs;
10780 
10781             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10782                 return -TARGET_EFAULT;
10783             __put_user(stfs.f_type, &target_stfs->f_type);
10784             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10785             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10786             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10787             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10788             __put_user(stfs.f_files, &target_stfs->f_files);
10789             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10790             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10791             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10792             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10793             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10794 #ifdef _STATFS_F_FLAGS
10795             __put_user(stfs.f_flags, &target_stfs->f_flags);
10796 #else
10797             __put_user(0, &target_stfs->f_flags);
10798 #endif
10799             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10800             unlock_user_struct(target_stfs, arg3, 1);
10801         }
10802         return ret;
10803     case TARGET_NR_fstatfs64:
10804         ret = get_errno(fstatfs(arg1, &stfs));
10805         goto convert_statfs64;
10806 #endif
10807 #ifdef TARGET_NR_socketcall
10808     case TARGET_NR_socketcall:
10809         return do_socketcall(arg1, arg2);
10810 #endif
10811 #ifdef TARGET_NR_accept
10812     case TARGET_NR_accept:
10813         return do_accept4(arg1, arg2, arg3, 0);
10814 #endif
10815 #ifdef TARGET_NR_accept4
10816     case TARGET_NR_accept4:
10817         return do_accept4(arg1, arg2, arg3, arg4);
10818 #endif
10819 #ifdef TARGET_NR_bind
10820     case TARGET_NR_bind:
10821         return do_bind(arg1, arg2, arg3);
10822 #endif
10823 #ifdef TARGET_NR_connect
10824     case TARGET_NR_connect:
10825         return do_connect(arg1, arg2, arg3);
10826 #endif
10827 #ifdef TARGET_NR_getpeername
10828     case TARGET_NR_getpeername:
10829         return do_getpeername(arg1, arg2, arg3);
10830 #endif
10831 #ifdef TARGET_NR_getsockname
10832     case TARGET_NR_getsockname:
10833         return do_getsockname(arg1, arg2, arg3);
10834 #endif
10835 #ifdef TARGET_NR_getsockopt
10836     case TARGET_NR_getsockopt:
10837         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10838 #endif
10839 #ifdef TARGET_NR_listen
10840     case TARGET_NR_listen:
10841         return get_errno(listen(arg1, arg2));
10842 #endif
10843 #ifdef TARGET_NR_recv
10844     case TARGET_NR_recv:
10845         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10846 #endif
10847 #ifdef TARGET_NR_recvfrom
10848     case TARGET_NR_recvfrom:
10849         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10850 #endif
10851 #ifdef TARGET_NR_recvmsg
10852     case TARGET_NR_recvmsg:
10853         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10854 #endif
10855 #ifdef TARGET_NR_send
10856     case TARGET_NR_send:
10857         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10858 #endif
10859 #ifdef TARGET_NR_sendmsg
10860     case TARGET_NR_sendmsg:
10861         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10862 #endif
10863 #ifdef TARGET_NR_sendmmsg
10864     case TARGET_NR_sendmmsg:
10865         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10866 #endif
10867 #ifdef TARGET_NR_recvmmsg
10868     case TARGET_NR_recvmmsg:
10869         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10870 #endif
10871 #ifdef TARGET_NR_sendto
10872     case TARGET_NR_sendto:
10873         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10874 #endif
10875 #ifdef TARGET_NR_shutdown
10876     case TARGET_NR_shutdown:
10877         return get_errno(shutdown(arg1, arg2));
10878 #endif
10879 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10880     case TARGET_NR_getrandom:
10881         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10882         if (!p) {
10883             return -TARGET_EFAULT;
10884         }
10885         ret = get_errno(getrandom(p, arg2, arg3));
10886         unlock_user(p, arg1, ret);
10887         return ret;
10888 #endif
10889 #ifdef TARGET_NR_socket
10890     case TARGET_NR_socket:
10891         return do_socket(arg1, arg2, arg3);
10892 #endif
10893 #ifdef TARGET_NR_socketpair
10894     case TARGET_NR_socketpair:
10895         return do_socketpair(arg1, arg2, arg3, arg4);
10896 #endif
10897 #ifdef TARGET_NR_setsockopt
10898     case TARGET_NR_setsockopt:
10899         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10900 #endif
10901 #if defined(TARGET_NR_syslog)
10902     case TARGET_NR_syslog:
10903         {
10904             int len = arg2;
10905 
10906             switch (arg1) {
10907             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10908             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10909             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10910             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10911             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10912             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10913             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10914             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10915                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10916             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10917             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10918             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10919                 {
10920                     if (len < 0) {
10921                         return -TARGET_EINVAL;
10922                     }
10923                     if (len == 0) {
10924                         return 0;
10925                     }
10926                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10927                     if (!p) {
10928                         return -TARGET_EFAULT;
10929                     }
10930                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10931                     unlock_user(p, arg2, arg3);
10932                 }
10933                 return ret;
10934             default:
10935                 return -TARGET_EINVAL;
10936             }
10937         }
10938         break;
10939 #endif
10940     case TARGET_NR_setitimer:
10941         {
10942             struct itimerval value, ovalue, *pvalue;
10943 
10944             if (arg2) {
10945                 pvalue = &value;
10946                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10947                     || copy_from_user_timeval(&pvalue->it_value,
10948                                               arg2 + sizeof(struct target_timeval)))
10949                     return -TARGET_EFAULT;
10950             } else {
10951                 pvalue = NULL;
10952             }
10953             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10954             if (!is_error(ret) && arg3) {
10955                 if (copy_to_user_timeval(arg3,
10956                                          &ovalue.it_interval)
10957                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10958                                             &ovalue.it_value))
10959                     return -TARGET_EFAULT;
10960             }
10961         }
10962         return ret;
10963     case TARGET_NR_getitimer:
10964         {
10965             struct itimerval value;
10966 
10967             ret = get_errno(getitimer(arg1, &value));
10968             if (!is_error(ret) && arg2) {
10969                 if (copy_to_user_timeval(arg2,
10970                                          &value.it_interval)
10971                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10972                                             &value.it_value))
10973                     return -TARGET_EFAULT;
10974             }
10975         }
10976         return ret;
10977 #ifdef TARGET_NR_stat
10978     case TARGET_NR_stat:
10979         if (!(p = lock_user_string(arg1))) {
10980             return -TARGET_EFAULT;
10981         }
10982         ret = get_errno(stat(path(p), &st));
10983         unlock_user(p, arg1, 0);
10984         goto do_stat;
10985 #endif
10986 #ifdef TARGET_NR_lstat
10987     case TARGET_NR_lstat:
10988         if (!(p = lock_user_string(arg1))) {
10989             return -TARGET_EFAULT;
10990         }
10991         ret = get_errno(lstat(path(p), &st));
10992         unlock_user(p, arg1, 0);
10993         goto do_stat;
10994 #endif
10995 #ifdef TARGET_NR_fstat
10996     case TARGET_NR_fstat:
10997         {
10998             ret = get_errno(fstat(arg1, &st));
10999 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
11000         do_stat:
11001 #endif
11002             if (!is_error(ret)) {
11003                 struct target_stat *target_st;
11004 
11005                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
11006                     return -TARGET_EFAULT;
11007                 memset(target_st, 0, sizeof(*target_st));
11008                 __put_user(st.st_dev, &target_st->st_dev);
11009                 __put_user(st.st_ino, &target_st->st_ino);
11010                 __put_user(st.st_mode, &target_st->st_mode);
11011                 __put_user(st.st_uid, &target_st->st_uid);
11012                 __put_user(st.st_gid, &target_st->st_gid);
11013                 __put_user(st.st_nlink, &target_st->st_nlink);
11014                 __put_user(st.st_rdev, &target_st->st_rdev);
11015                 __put_user(st.st_size, &target_st->st_size);
11016                 __put_user(st.st_blksize, &target_st->st_blksize);
11017                 __put_user(st.st_blocks, &target_st->st_blocks);
11018                 __put_user(st.st_atime, &target_st->target_st_atime);
11019                 __put_user(st.st_mtime, &target_st->target_st_mtime);
11020                 __put_user(st.st_ctime, &target_st->target_st_ctime);
11021 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
11022                 __put_user(st.st_atim.tv_nsec,
11023                            &target_st->target_st_atime_nsec);
11024                 __put_user(st.st_mtim.tv_nsec,
11025                            &target_st->target_st_mtime_nsec);
11026                 __put_user(st.st_ctim.tv_nsec,
11027                            &target_st->target_st_ctime_nsec);
11028 #endif
11029                 unlock_user_struct(target_st, arg2, 1);
11030             }
11031         }
11032         return ret;
11033 #endif
11034     case TARGET_NR_vhangup:
11035         return get_errno(vhangup());
11036 #ifdef TARGET_NR_syscall
11037     case TARGET_NR_syscall:
11038         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
11039                           arg6, arg7, arg8, 0);
11040 #endif
11041 #if defined(TARGET_NR_wait4)
11042     case TARGET_NR_wait4:
11043         {
11044             int status;
11045             abi_long status_ptr = arg2;
11046             struct rusage rusage, *rusage_ptr;
11047             abi_ulong target_rusage = arg4;
11048             abi_long rusage_err;
11049             if (target_rusage)
11050                 rusage_ptr = &rusage;
11051             else
11052                 rusage_ptr = NULL;
11053             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
11054             if (!is_error(ret)) {
11055                 if (status_ptr && ret) {
11056                     status = host_to_target_waitstatus(status);
11057                     if (put_user_s32(status, status_ptr))
11058                         return -TARGET_EFAULT;
11059                 }
11060                 if (target_rusage) {
11061                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
11062                     if (rusage_err) {
11063                         ret = rusage_err;
11064                     }
11065                 }
11066             }
11067         }
11068         return ret;
11069 #endif
11070 #ifdef TARGET_NR_swapoff
11071     case TARGET_NR_swapoff:
11072         if (!(p = lock_user_string(arg1)))
11073             return -TARGET_EFAULT;
11074         ret = get_errno(swapoff(p));
11075         unlock_user(p, arg1, 0);
11076         return ret;
11077 #endif
11078     case TARGET_NR_sysinfo:
11079         {
11080             struct target_sysinfo *target_value;
11081             struct sysinfo value;
11082             ret = get_errno(sysinfo(&value));
11083             if (!is_error(ret) && arg1)
11084             {
11085                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
11086                     return -TARGET_EFAULT;
11087                 __put_user(value.uptime, &target_value->uptime);
11088                 __put_user(value.loads[0], &target_value->loads[0]);
11089                 __put_user(value.loads[1], &target_value->loads[1]);
11090                 __put_user(value.loads[2], &target_value->loads[2]);
11091                 __put_user(value.totalram, &target_value->totalram);
11092                 __put_user(value.freeram, &target_value->freeram);
11093                 __put_user(value.sharedram, &target_value->sharedram);
11094                 __put_user(value.bufferram, &target_value->bufferram);
11095                 __put_user(value.totalswap, &target_value->totalswap);
11096                 __put_user(value.freeswap, &target_value->freeswap);
11097                 __put_user(value.procs, &target_value->procs);
11098                 __put_user(value.totalhigh, &target_value->totalhigh);
11099                 __put_user(value.freehigh, &target_value->freehigh);
11100                 __put_user(value.mem_unit, &target_value->mem_unit);
11101                 unlock_user_struct(target_value, arg1, 1);
11102             }
11103         }
11104         return ret;
11105 #ifdef TARGET_NR_ipc
11106     case TARGET_NR_ipc:
11107         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
11108 #endif
11109 #ifdef TARGET_NR_semget
11110     case TARGET_NR_semget:
11111         return get_errno(semget(arg1, arg2, arg3));
11112 #endif
11113 #ifdef TARGET_NR_semop
11114     case TARGET_NR_semop:
11115         return do_semtimedop(arg1, arg2, arg3, 0, false);
11116 #endif
11117 #ifdef TARGET_NR_semtimedop
11118     case TARGET_NR_semtimedop:
11119         return do_semtimedop(arg1, arg2, arg3, arg4, false);
11120 #endif
11121 #ifdef TARGET_NR_semtimedop_time64
11122     case TARGET_NR_semtimedop_time64:
11123         return do_semtimedop(arg1, arg2, arg3, arg4, true);
11124 #endif
11125 #ifdef TARGET_NR_semctl
11126     case TARGET_NR_semctl:
11127         return do_semctl(arg1, arg2, arg3, arg4);
11128 #endif
11129 #ifdef TARGET_NR_msgctl
11130     case TARGET_NR_msgctl:
11131         return do_msgctl(arg1, arg2, arg3);
11132 #endif
11133 #ifdef TARGET_NR_msgget
11134     case TARGET_NR_msgget:
11135         return get_errno(msgget(arg1, arg2));
11136 #endif
11137 #ifdef TARGET_NR_msgrcv
11138     case TARGET_NR_msgrcv:
11139         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
11140 #endif
11141 #ifdef TARGET_NR_msgsnd
11142     case TARGET_NR_msgsnd:
11143         return do_msgsnd(arg1, arg2, arg3, arg4);
11144 #endif
11145 #ifdef TARGET_NR_shmget
11146     case TARGET_NR_shmget:
11147         return get_errno(shmget(arg1, arg2, arg3));
11148 #endif
11149 #ifdef TARGET_NR_shmctl
11150     case TARGET_NR_shmctl:
11151         return do_shmctl(arg1, arg2, arg3);
11152 #endif
11153 #ifdef TARGET_NR_shmat
11154     case TARGET_NR_shmat:
11155         return target_shmat(cpu_env, arg1, arg2, arg3);
11156 #endif
11157 #ifdef TARGET_NR_shmdt
11158     case TARGET_NR_shmdt:
11159         return target_shmdt(arg1);
11160 #endif
11161     case TARGET_NR_fsync:
11162         return get_errno(fsync(arg1));
11163     case TARGET_NR_clone:
11164         /* Linux manages to have three different orderings for its
11165          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
11166          * match the kernel's CONFIG_CLONE_* settings.
11167          * Microblaze is further special in that it uses a sixth
11168          * implicit argument to clone for the TLS pointer.
11169          */
11170 #if defined(TARGET_MICROBLAZE)
11171         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
11172 #elif defined(TARGET_CLONE_BACKWARDS)
11173         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
11174 #elif defined(TARGET_CLONE_BACKWARDS2)
11175         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
11176 #else
11177         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
11178 #endif
11179         return ret;
11180 #ifdef __NR_exit_group
11181         /* new thread calls */
11182     case TARGET_NR_exit_group:
11183         preexit_cleanup(cpu_env, arg1);
11184         return get_errno(exit_group(arg1));
11185 #endif
11186     case TARGET_NR_setdomainname:
11187         if (!(p = lock_user_string(arg1)))
11188             return -TARGET_EFAULT;
11189         ret = get_errno(setdomainname(p, arg2));
11190         unlock_user(p, arg1, 0);
11191         return ret;
11192     case TARGET_NR_uname:
11193         /* no need to transcode because we use the linux syscall */
11194         {
11195             struct new_utsname * buf;
11196 
11197             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
11198                 return -TARGET_EFAULT;
11199             ret = get_errno(sys_uname(buf));
11200             if (!is_error(ret)) {
11201                 /* Overwrite the native machine name with whatever is being
11202                    emulated. */
11203                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
11204                           sizeof(buf->machine));
11205                 /* Allow the user to override the reported release.  */
11206                 if (qemu_uname_release && *qemu_uname_release) {
11207                     g_strlcpy(buf->release, qemu_uname_release,
11208                               sizeof(buf->release));
11209                 }
11210             }
11211             unlock_user_struct(buf, arg1, 1);
11212         }
11213         return ret;
11214 #ifdef TARGET_I386
11215     case TARGET_NR_modify_ldt:
11216         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
11217 #if !defined(TARGET_X86_64)
11218     case TARGET_NR_vm86:
11219         return do_vm86(cpu_env, arg1, arg2);
11220 #endif
11221 #endif
11222 #if defined(TARGET_NR_adjtimex)
11223     case TARGET_NR_adjtimex:
11224         {
11225             struct timex host_buf;
11226 
11227             if (target_to_host_timex(&host_buf, arg1) != 0) {
11228                 return -TARGET_EFAULT;
11229             }
11230             ret = get_errno(adjtimex(&host_buf));
11231             if (!is_error(ret)) {
11232                 if (host_to_target_timex(arg1, &host_buf) != 0) {
11233                     return -TARGET_EFAULT;
11234                 }
11235             }
11236         }
11237         return ret;
11238 #endif
11239 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
11240     case TARGET_NR_clock_adjtime:
11241         {
11242             struct timex htx;
11243 
11244             if (target_to_host_timex(&htx, arg2) != 0) {
11245                 return -TARGET_EFAULT;
11246             }
11247             ret = get_errno(clock_adjtime(arg1, &htx));
11248             if (!is_error(ret) && host_to_target_timex(arg2, &htx)) {
11249                 return -TARGET_EFAULT;
11250             }
11251         }
11252         return ret;
11253 #endif
11254 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
11255     case TARGET_NR_clock_adjtime64:
11256         {
11257             struct timex htx;
11258 
11259             if (target_to_host_timex64(&htx, arg2) != 0) {
11260                 return -TARGET_EFAULT;
11261             }
11262             ret = get_errno(clock_adjtime(arg1, &htx));
11263             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
11264                     return -TARGET_EFAULT;
11265             }
11266         }
11267         return ret;
11268 #endif
11269     case TARGET_NR_getpgid:
11270         return get_errno(getpgid(arg1));
11271     case TARGET_NR_fchdir:
11272         return get_errno(fchdir(arg1));
11273     case TARGET_NR_personality:
11274         return get_errno(personality(arg1));
11275 #ifdef TARGET_NR__llseek /* Not on alpha */
11276     case TARGET_NR__llseek:
11277         {
11278             int64_t res;
11279 #if !defined(__NR_llseek)
11280             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
11281             if (res == -1) {
11282                 ret = get_errno(res);
11283             } else {
11284                 ret = 0;
11285             }
11286 #else
11287             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
11288 #endif
11289             if ((ret == 0) && put_user_s64(res, arg4)) {
11290                 return -TARGET_EFAULT;
11291             }
11292         }
11293         return ret;
11294 #endif
11295 #ifdef TARGET_NR_getdents
11296     case TARGET_NR_getdents:
11297         return do_getdents(arg1, arg2, arg3);
11298 #endif /* TARGET_NR_getdents */
11299 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
11300     case TARGET_NR_getdents64:
11301         return do_getdents64(arg1, arg2, arg3);
11302 #endif /* TARGET_NR_getdents64 */
11303 #if defined(TARGET_NR__newselect)
11304     case TARGET_NR__newselect:
11305         return do_select(arg1, arg2, arg3, arg4, arg5);
11306 #endif
11307 #ifdef TARGET_NR_poll
11308     case TARGET_NR_poll:
11309         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
11310 #endif
11311 #ifdef TARGET_NR_ppoll
11312     case TARGET_NR_ppoll:
11313         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
11314 #endif
11315 #ifdef TARGET_NR_ppoll_time64
11316     case TARGET_NR_ppoll_time64:
11317         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
11318 #endif
11319     case TARGET_NR_flock:
11320         /* NOTE: the flock constant seems to be the same for every
11321            Linux platform */
11322         return get_errno(safe_flock(arg1, arg2));
11323     case TARGET_NR_readv:
11324         {
11325             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11326             if (vec != NULL) {
11327                 ret = get_errno(safe_readv(arg1, vec, arg3));
11328                 unlock_iovec(vec, arg2, arg3, 1);
11329             } else {
11330                 ret = -host_to_target_errno(errno);
11331             }
11332         }
11333         return ret;
11334     case TARGET_NR_writev:
11335         {
11336             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11337             if (vec != NULL) {
11338                 ret = get_errno(safe_writev(arg1, vec, arg3));
11339                 unlock_iovec(vec, arg2, arg3, 0);
11340             } else {
11341                 ret = -host_to_target_errno(errno);
11342             }
11343         }
11344         return ret;
11345 #if defined(TARGET_NR_preadv)
11346     case TARGET_NR_preadv:
11347         {
11348             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11349             if (vec != NULL) {
11350                 unsigned long low, high;
11351 
11352                 target_to_host_low_high(arg4, arg5, &low, &high);
11353                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
11354                 unlock_iovec(vec, arg2, arg3, 1);
11355             } else {
11356                 ret = -host_to_target_errno(errno);
11357            }
11358         }
11359         return ret;
11360 #endif
11361 #if defined(TARGET_NR_pwritev)
11362     case TARGET_NR_pwritev:
11363         {
11364             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11365             if (vec != NULL) {
11366                 unsigned long low, high;
11367 
11368                 target_to_host_low_high(arg4, arg5, &low, &high);
11369                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
11370                 unlock_iovec(vec, arg2, arg3, 0);
11371             } else {
11372                 ret = -host_to_target_errno(errno);
11373            }
11374         }
11375         return ret;
11376 #endif
11377     case TARGET_NR_getsid:
11378         return get_errno(getsid(arg1));
11379 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
11380     case TARGET_NR_fdatasync:
11381         return get_errno(fdatasync(arg1));
11382 #endif
11383     case TARGET_NR_sched_getaffinity:
11384         {
11385             unsigned int mask_size;
11386             unsigned long *mask;
11387 
11388             /*
11389              * sched_getaffinity needs multiples of ulong, so need to take
11390              * care of mismatches between target ulong and host ulong sizes.
11391              */
11392             if (arg2 & (sizeof(abi_ulong) - 1)) {
11393                 return -TARGET_EINVAL;
11394             }
11395             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11396 
11397             mask = alloca(mask_size);
11398             memset(mask, 0, mask_size);
11399             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
11400 
11401             if (!is_error(ret)) {
11402                 if (ret > arg2) {
11403                     /* More data returned than the caller's buffer will fit.
11404                      * This only happens if sizeof(abi_long) < sizeof(long)
11405                      * and the caller passed us a buffer holding an odd number
11406                      * of abi_longs. If the host kernel is actually using the
11407                      * extra 4 bytes then fail EINVAL; otherwise we can just
11408                      * ignore them and only copy the interesting part.
11409                      */
11410                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
11411                     if (numcpus > arg2 * 8) {
11412                         return -TARGET_EINVAL;
11413                     }
11414                     ret = arg2;
11415                 }
11416 
11417                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
11418                     return -TARGET_EFAULT;
11419                 }
11420             }
11421         }
11422         return ret;
11423     case TARGET_NR_sched_setaffinity:
11424         {
11425             unsigned int mask_size;
11426             unsigned long *mask;
11427 
11428             /*
11429              * sched_setaffinity needs multiples of ulong, so need to take
11430              * care of mismatches between target ulong and host ulong sizes.
11431              */
11432             if (arg2 & (sizeof(abi_ulong) - 1)) {
11433                 return -TARGET_EINVAL;
11434             }
11435             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11436             mask = alloca(mask_size);
11437 
11438             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
11439             if (ret) {
11440                 return ret;
11441             }
11442 
11443             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
11444         }
11445     case TARGET_NR_getcpu:
11446         {
11447             unsigned cpuid, node;
11448             ret = get_errno(sys_getcpu(arg1 ? &cpuid : NULL,
11449                                        arg2 ? &node : NULL,
11450                                        NULL));
11451             if (is_error(ret)) {
11452                 return ret;
11453             }
11454             if (arg1 && put_user_u32(cpuid, arg1)) {
11455                 return -TARGET_EFAULT;
11456             }
11457             if (arg2 && put_user_u32(node, arg2)) {
11458                 return -TARGET_EFAULT;
11459             }
11460         }
11461         return ret;
11462     case TARGET_NR_sched_setparam:
11463         {
11464             struct target_sched_param *target_schp;
11465             struct sched_param schp;
11466 
11467             if (arg2 == 0) {
11468                 return -TARGET_EINVAL;
11469             }
11470             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
11471                 return -TARGET_EFAULT;
11472             }
11473             schp.sched_priority = tswap32(target_schp->sched_priority);
11474             unlock_user_struct(target_schp, arg2, 0);
11475             return get_errno(sys_sched_setparam(arg1, &schp));
11476         }
11477     case TARGET_NR_sched_getparam:
11478         {
11479             struct target_sched_param *target_schp;
11480             struct sched_param schp;
11481 
11482             if (arg2 == 0) {
11483                 return -TARGET_EINVAL;
11484             }
11485             ret = get_errno(sys_sched_getparam(arg1, &schp));
11486             if (!is_error(ret)) {
11487                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
11488                     return -TARGET_EFAULT;
11489                 }
11490                 target_schp->sched_priority = tswap32(schp.sched_priority);
11491                 unlock_user_struct(target_schp, arg2, 1);
11492             }
11493         }
11494         return ret;
11495     case TARGET_NR_sched_setscheduler:
11496         {
11497             struct target_sched_param *target_schp;
11498             struct sched_param schp;
11499             if (arg3 == 0) {
11500                 return -TARGET_EINVAL;
11501             }
11502             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
11503                 return -TARGET_EFAULT;
11504             }
11505             schp.sched_priority = tswap32(target_schp->sched_priority);
11506             unlock_user_struct(target_schp, arg3, 0);
11507             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
11508         }
11509     case TARGET_NR_sched_getscheduler:
11510         return get_errno(sys_sched_getscheduler(arg1));
11511     case TARGET_NR_sched_getattr:
11512         {
11513             struct target_sched_attr *target_scha;
11514             struct sched_attr scha;
11515             if (arg2 == 0) {
11516                 return -TARGET_EINVAL;
11517             }
11518             if (arg3 > sizeof(scha)) {
11519                 arg3 = sizeof(scha);
11520             }
11521             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
11522             if (!is_error(ret)) {
11523                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11524                 if (!target_scha) {
11525                     return -TARGET_EFAULT;
11526                 }
11527                 target_scha->size = tswap32(scha.size);
11528                 target_scha->sched_policy = tswap32(scha.sched_policy);
11529                 target_scha->sched_flags = tswap64(scha.sched_flags);
11530                 target_scha->sched_nice = tswap32(scha.sched_nice);
11531                 target_scha->sched_priority = tswap32(scha.sched_priority);
11532                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
11533                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
11534                 target_scha->sched_period = tswap64(scha.sched_period);
11535                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
11536                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
11537                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
11538                 }
11539                 unlock_user(target_scha, arg2, arg3);
11540             }
11541             return ret;
11542         }
11543     case TARGET_NR_sched_setattr:
11544         {
11545             struct target_sched_attr *target_scha;
11546             struct sched_attr scha;
11547             uint32_t size;
11548             int zeroed;
11549             if (arg2 == 0) {
11550                 return -TARGET_EINVAL;
11551             }
11552             if (get_user_u32(size, arg2)) {
11553                 return -TARGET_EFAULT;
11554             }
11555             if (!size) {
11556                 size = offsetof(struct target_sched_attr, sched_util_min);
11557             }
11558             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11559                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11560                     return -TARGET_EFAULT;
11561                 }
11562                 return -TARGET_E2BIG;
11563             }
11564 
11565             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11566             if (zeroed < 0) {
11567                 return zeroed;
11568             } else if (zeroed == 0) {
11569                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11570                     return -TARGET_EFAULT;
11571                 }
11572                 return -TARGET_E2BIG;
11573             }
11574             if (size > sizeof(struct target_sched_attr)) {
11575                 size = sizeof(struct target_sched_attr);
11576             }
11577 
11578             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11579             if (!target_scha) {
11580                 return -TARGET_EFAULT;
11581             }
11582             scha.size = size;
11583             scha.sched_policy = tswap32(target_scha->sched_policy);
11584             scha.sched_flags = tswap64(target_scha->sched_flags);
11585             scha.sched_nice = tswap32(target_scha->sched_nice);
11586             scha.sched_priority = tswap32(target_scha->sched_priority);
11587             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11588             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11589             scha.sched_period = tswap64(target_scha->sched_period);
11590             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11591                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11592                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11593             }
11594             unlock_user(target_scha, arg2, 0);
11595             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11596         }
11597     case TARGET_NR_sched_yield:
11598         return get_errno(sched_yield());
11599     case TARGET_NR_sched_get_priority_max:
11600         return get_errno(sched_get_priority_max(arg1));
11601     case TARGET_NR_sched_get_priority_min:
11602         return get_errno(sched_get_priority_min(arg1));
11603 #ifdef TARGET_NR_sched_rr_get_interval
11604     case TARGET_NR_sched_rr_get_interval:
11605         {
11606             struct timespec ts;
11607             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11608             if (!is_error(ret)) {
11609                 ret = host_to_target_timespec(arg2, &ts);
11610             }
11611         }
11612         return ret;
11613 #endif
11614 #ifdef TARGET_NR_sched_rr_get_interval_time64
11615     case TARGET_NR_sched_rr_get_interval_time64:
11616         {
11617             struct timespec ts;
11618             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11619             if (!is_error(ret)) {
11620                 ret = host_to_target_timespec64(arg2, &ts);
11621             }
11622         }
11623         return ret;
11624 #endif
11625 #if defined(TARGET_NR_nanosleep)
11626     case TARGET_NR_nanosleep:
11627         {
11628             struct timespec req, rem;
11629             target_to_host_timespec(&req, arg1);
11630             ret = get_errno(safe_nanosleep(&req, &rem));
11631             if (is_error(ret) && arg2) {
11632                 host_to_target_timespec(arg2, &rem);
11633             }
11634         }
11635         return ret;
11636 #endif
11637     case TARGET_NR_prctl:
11638         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11639         break;
11640 #ifdef TARGET_NR_arch_prctl
11641     case TARGET_NR_arch_prctl:
11642         return do_arch_prctl(cpu_env, arg1, arg2);
11643 #endif
11644 #ifdef TARGET_NR_pread64
11645     case TARGET_NR_pread64:
11646         if (regpairs_aligned(cpu_env, num)) {
11647             arg4 = arg5;
11648             arg5 = arg6;
11649         }
11650         if (arg2 == 0 && arg3 == 0) {
11651             /* Special-case NULL buffer and zero length, which should succeed */
11652             p = 0;
11653         } else {
11654             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11655             if (!p) {
11656                 return -TARGET_EFAULT;
11657             }
11658         }
11659         ret = get_errno(pread(arg1, p, arg3, target_offset64(arg4, arg5)));
11660         unlock_user(p, arg2, ret);
11661         return ret;
11662     case TARGET_NR_pwrite64:
11663         if (regpairs_aligned(cpu_env, num)) {
11664             arg4 = arg5;
11665             arg5 = arg6;
11666         }
11667         if (arg2 == 0 && arg3 == 0) {
11668             /* Special-case NULL buffer and zero length, which should succeed */
11669             p = 0;
11670         } else {
11671             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11672             if (!p) {
11673                 return -TARGET_EFAULT;
11674             }
11675         }
11676         ret = get_errno(pwrite(arg1, p, arg3, target_offset64(arg4, arg5)));
11677         unlock_user(p, arg2, 0);
11678         return ret;
11679 #endif
11680     case TARGET_NR_getcwd:
11681         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11682             return -TARGET_EFAULT;
11683         ret = get_errno(sys_getcwd1(p, arg2));
11684         unlock_user(p, arg1, ret);
11685         return ret;
11686     case TARGET_NR_capget:
11687     case TARGET_NR_capset:
11688     {
11689         struct target_user_cap_header *target_header;
11690         struct target_user_cap_data *target_data = NULL;
11691         struct __user_cap_header_struct header;
11692         struct __user_cap_data_struct data[2];
11693         struct __user_cap_data_struct *dataptr = NULL;
11694         int i, target_datalen;
11695         int data_items = 1;
11696 
11697         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11698             return -TARGET_EFAULT;
11699         }
11700         header.version = tswap32(target_header->version);
11701         header.pid = tswap32(target_header->pid);
11702 
11703         if (header.version != _LINUX_CAPABILITY_VERSION) {
11704             /* Version 2 and up takes pointer to two user_data structs */
11705             data_items = 2;
11706         }
11707 
11708         target_datalen = sizeof(*target_data) * data_items;
11709 
11710         if (arg2) {
11711             if (num == TARGET_NR_capget) {
11712                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11713             } else {
11714                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11715             }
11716             if (!target_data) {
11717                 unlock_user_struct(target_header, arg1, 0);
11718                 return -TARGET_EFAULT;
11719             }
11720 
11721             if (num == TARGET_NR_capset) {
11722                 for (i = 0; i < data_items; i++) {
11723                     data[i].effective = tswap32(target_data[i].effective);
11724                     data[i].permitted = tswap32(target_data[i].permitted);
11725                     data[i].inheritable = tswap32(target_data[i].inheritable);
11726                 }
11727             }
11728 
11729             dataptr = data;
11730         }
11731 
11732         if (num == TARGET_NR_capget) {
11733             ret = get_errno(capget(&header, dataptr));
11734         } else {
11735             ret = get_errno(capset(&header, dataptr));
11736         }
11737 
11738         /* The kernel always updates version for both capget and capset */
11739         target_header->version = tswap32(header.version);
11740         unlock_user_struct(target_header, arg1, 1);
11741 
11742         if (arg2) {
11743             if (num == TARGET_NR_capget) {
11744                 for (i = 0; i < data_items; i++) {
11745                     target_data[i].effective = tswap32(data[i].effective);
11746                     target_data[i].permitted = tswap32(data[i].permitted);
11747                     target_data[i].inheritable = tswap32(data[i].inheritable);
11748                 }
11749                 unlock_user(target_data, arg2, target_datalen);
11750             } else {
11751                 unlock_user(target_data, arg2, 0);
11752             }
11753         }
11754         return ret;
11755     }
11756     case TARGET_NR_sigaltstack:
11757         return do_sigaltstack(arg1, arg2, cpu_env);
11758 
11759 #ifdef CONFIG_SENDFILE
11760 #ifdef TARGET_NR_sendfile
11761     case TARGET_NR_sendfile:
11762     {
11763         off_t *offp = NULL;
11764         off_t off;
11765         if (arg3) {
11766             ret = get_user_sal(off, arg3);
11767             if (is_error(ret)) {
11768                 return ret;
11769             }
11770             offp = &off;
11771         }
11772         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11773         if (!is_error(ret) && arg3) {
11774             abi_long ret2 = put_user_sal(off, arg3);
11775             if (is_error(ret2)) {
11776                 ret = ret2;
11777             }
11778         }
11779         return ret;
11780     }
11781 #endif
11782 #ifdef TARGET_NR_sendfile64
11783     case TARGET_NR_sendfile64:
11784     {
11785         off_t *offp = NULL;
11786         off_t off;
11787         if (arg3) {
11788             ret = get_user_s64(off, arg3);
11789             if (is_error(ret)) {
11790                 return ret;
11791             }
11792             offp = &off;
11793         }
11794         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11795         if (!is_error(ret) && arg3) {
11796             abi_long ret2 = put_user_s64(off, arg3);
11797             if (is_error(ret2)) {
11798                 ret = ret2;
11799             }
11800         }
11801         return ret;
11802     }
11803 #endif
11804 #endif
11805 #ifdef TARGET_NR_vfork
11806     case TARGET_NR_vfork:
11807         return get_errno(do_fork(cpu_env,
11808                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11809                          0, 0, 0, 0));
11810 #endif
11811 #ifdef TARGET_NR_ugetrlimit
11812     case TARGET_NR_ugetrlimit:
11813     {
11814 	struct rlimit rlim;
11815 	int resource = target_to_host_resource(arg1);
11816 	ret = get_errno(getrlimit(resource, &rlim));
11817 	if (!is_error(ret)) {
11818 	    struct target_rlimit *target_rlim;
11819             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11820                 return -TARGET_EFAULT;
11821 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11822 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11823             unlock_user_struct(target_rlim, arg2, 1);
11824 	}
11825         return ret;
11826     }
11827 #endif
11828 #ifdef TARGET_NR_truncate64
11829     case TARGET_NR_truncate64:
11830         if (!(p = lock_user_string(arg1)))
11831             return -TARGET_EFAULT;
11832 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11833         unlock_user(p, arg1, 0);
11834         return ret;
11835 #endif
11836 #ifdef TARGET_NR_ftruncate64
11837     case TARGET_NR_ftruncate64:
11838         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11839 #endif
11840 #ifdef TARGET_NR_stat64
11841     case TARGET_NR_stat64:
11842         if (!(p = lock_user_string(arg1))) {
11843             return -TARGET_EFAULT;
11844         }
11845         ret = get_errno(stat(path(p), &st));
11846         unlock_user(p, arg1, 0);
11847         if (!is_error(ret))
11848             ret = host_to_target_stat64(cpu_env, arg2, &st);
11849         return ret;
11850 #endif
11851 #ifdef TARGET_NR_lstat64
11852     case TARGET_NR_lstat64:
11853         if (!(p = lock_user_string(arg1))) {
11854             return -TARGET_EFAULT;
11855         }
11856         ret = get_errno(lstat(path(p), &st));
11857         unlock_user(p, arg1, 0);
11858         if (!is_error(ret))
11859             ret = host_to_target_stat64(cpu_env, arg2, &st);
11860         return ret;
11861 #endif
11862 #ifdef TARGET_NR_fstat64
11863     case TARGET_NR_fstat64:
11864         ret = get_errno(fstat(arg1, &st));
11865         if (!is_error(ret))
11866             ret = host_to_target_stat64(cpu_env, arg2, &st);
11867         return ret;
11868 #endif
11869 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11870 #ifdef TARGET_NR_fstatat64
11871     case TARGET_NR_fstatat64:
11872 #endif
11873 #ifdef TARGET_NR_newfstatat
11874     case TARGET_NR_newfstatat:
11875 #endif
11876         if (!(p = lock_user_string(arg2))) {
11877             return -TARGET_EFAULT;
11878         }
11879         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11880         unlock_user(p, arg2, 0);
11881         if (!is_error(ret))
11882             ret = host_to_target_stat64(cpu_env, arg3, &st);
11883         return ret;
11884 #endif
11885 #if defined(TARGET_NR_statx)
11886     case TARGET_NR_statx:
11887         {
11888             struct target_statx *target_stx;
11889             int dirfd = arg1;
11890             int flags = arg3;
11891 
11892             p = lock_user_string(arg2);
11893             if (p == NULL) {
11894                 return -TARGET_EFAULT;
11895             }
11896 #if defined(__NR_statx)
11897             {
11898                 /*
11899                  * It is assumed that struct statx is architecture independent.
11900                  */
11901                 struct target_statx host_stx;
11902                 int mask = arg4;
11903 
11904                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11905                 if (!is_error(ret)) {
11906                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11907                         unlock_user(p, arg2, 0);
11908                         return -TARGET_EFAULT;
11909                     }
11910                 }
11911 
11912                 if (ret != -TARGET_ENOSYS) {
11913                     unlock_user(p, arg2, 0);
11914                     return ret;
11915                 }
11916             }
11917 #endif
11918             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11919             unlock_user(p, arg2, 0);
11920 
11921             if (!is_error(ret)) {
11922                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11923                     return -TARGET_EFAULT;
11924                 }
11925                 memset(target_stx, 0, sizeof(*target_stx));
11926                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11927                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11928                 __put_user(st.st_ino, &target_stx->stx_ino);
11929                 __put_user(st.st_mode, &target_stx->stx_mode);
11930                 __put_user(st.st_uid, &target_stx->stx_uid);
11931                 __put_user(st.st_gid, &target_stx->stx_gid);
11932                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11933                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11934                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11935                 __put_user(st.st_size, &target_stx->stx_size);
11936                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11937                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11938                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11939                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11940                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11941                 unlock_user_struct(target_stx, arg5, 1);
11942             }
11943         }
11944         return ret;
11945 #endif
11946 #ifdef TARGET_NR_lchown
11947     case TARGET_NR_lchown:
11948         if (!(p = lock_user_string(arg1)))
11949             return -TARGET_EFAULT;
11950         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11951         unlock_user(p, arg1, 0);
11952         return ret;
11953 #endif
11954 #ifdef TARGET_NR_getuid
11955     case TARGET_NR_getuid:
11956         return get_errno(high2lowuid(getuid()));
11957 #endif
11958 #ifdef TARGET_NR_getgid
11959     case TARGET_NR_getgid:
11960         return get_errno(high2lowgid(getgid()));
11961 #endif
11962 #ifdef TARGET_NR_geteuid
11963     case TARGET_NR_geteuid:
11964         return get_errno(high2lowuid(geteuid()));
11965 #endif
11966 #ifdef TARGET_NR_getegid
11967     case TARGET_NR_getegid:
11968         return get_errno(high2lowgid(getegid()));
11969 #endif
11970     case TARGET_NR_setreuid:
11971         return get_errno(sys_setreuid(low2highuid(arg1), low2highuid(arg2)));
11972     case TARGET_NR_setregid:
11973         return get_errno(sys_setregid(low2highgid(arg1), low2highgid(arg2)));
11974     case TARGET_NR_getgroups:
11975         { /* the same code as for TARGET_NR_getgroups32 */
11976             int gidsetsize = arg1;
11977             target_id *target_grouplist;
11978             g_autofree gid_t *grouplist = NULL;
11979             int i;
11980 
11981             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11982                 return -TARGET_EINVAL;
11983             }
11984             if (gidsetsize > 0) {
11985                 grouplist = g_try_new(gid_t, gidsetsize);
11986                 if (!grouplist) {
11987                     return -TARGET_ENOMEM;
11988                 }
11989             }
11990             ret = get_errno(getgroups(gidsetsize, grouplist));
11991             if (!is_error(ret) && gidsetsize > 0) {
11992                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
11993                                              gidsetsize * sizeof(target_id), 0);
11994                 if (!target_grouplist) {
11995                     return -TARGET_EFAULT;
11996                 }
11997                 for (i = 0; i < ret; i++) {
11998                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11999                 }
12000                 unlock_user(target_grouplist, arg2,
12001                             gidsetsize * sizeof(target_id));
12002             }
12003             return ret;
12004         }
12005     case TARGET_NR_setgroups:
12006         { /* the same code as for TARGET_NR_setgroups32 */
12007             int gidsetsize = arg1;
12008             target_id *target_grouplist;
12009             g_autofree gid_t *grouplist = NULL;
12010             int i;
12011 
12012             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12013                 return -TARGET_EINVAL;
12014             }
12015             if (gidsetsize > 0) {
12016                 grouplist = g_try_new(gid_t, gidsetsize);
12017                 if (!grouplist) {
12018                     return -TARGET_ENOMEM;
12019                 }
12020                 target_grouplist = lock_user(VERIFY_READ, arg2,
12021                                              gidsetsize * sizeof(target_id), 1);
12022                 if (!target_grouplist) {
12023                     return -TARGET_EFAULT;
12024                 }
12025                 for (i = 0; i < gidsetsize; i++) {
12026                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
12027                 }
12028                 unlock_user(target_grouplist, arg2,
12029                             gidsetsize * sizeof(target_id));
12030             }
12031             return get_errno(sys_setgroups(gidsetsize, grouplist));
12032         }
12033     case TARGET_NR_fchown:
12034         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
12035 #if defined(TARGET_NR_fchownat)
12036     case TARGET_NR_fchownat:
12037         if (!(p = lock_user_string(arg2)))
12038             return -TARGET_EFAULT;
12039         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
12040                                  low2highgid(arg4), arg5));
12041         unlock_user(p, arg2, 0);
12042         return ret;
12043 #endif
12044 #ifdef TARGET_NR_setresuid
12045     case TARGET_NR_setresuid:
12046         return get_errno(sys_setresuid(low2highuid(arg1),
12047                                        low2highuid(arg2),
12048                                        low2highuid(arg3)));
12049 #endif
12050 #ifdef TARGET_NR_getresuid
12051     case TARGET_NR_getresuid:
12052         {
12053             uid_t ruid, euid, suid;
12054             ret = get_errno(getresuid(&ruid, &euid, &suid));
12055             if (!is_error(ret)) {
12056                 if (put_user_id(high2lowuid(ruid), arg1)
12057                     || put_user_id(high2lowuid(euid), arg2)
12058                     || put_user_id(high2lowuid(suid), arg3))
12059                     return -TARGET_EFAULT;
12060             }
12061         }
12062         return ret;
12063 #endif
12064 #ifdef TARGET_NR_getresgid
12065     case TARGET_NR_setresgid:
12066         return get_errno(sys_setresgid(low2highgid(arg1),
12067                                        low2highgid(arg2),
12068                                        low2highgid(arg3)));
12069 #endif
12070 #ifdef TARGET_NR_getresgid
12071     case TARGET_NR_getresgid:
12072         {
12073             gid_t rgid, egid, sgid;
12074             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12075             if (!is_error(ret)) {
12076                 if (put_user_id(high2lowgid(rgid), arg1)
12077                     || put_user_id(high2lowgid(egid), arg2)
12078                     || put_user_id(high2lowgid(sgid), arg3))
12079                     return -TARGET_EFAULT;
12080             }
12081         }
12082         return ret;
12083 #endif
12084 #ifdef TARGET_NR_chown
12085     case TARGET_NR_chown:
12086         if (!(p = lock_user_string(arg1)))
12087             return -TARGET_EFAULT;
12088         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
12089         unlock_user(p, arg1, 0);
12090         return ret;
12091 #endif
12092     case TARGET_NR_setuid:
12093         return get_errno(sys_setuid(low2highuid(arg1)));
12094     case TARGET_NR_setgid:
12095         return get_errno(sys_setgid(low2highgid(arg1)));
12096     case TARGET_NR_setfsuid:
12097         return get_errno(setfsuid(arg1));
12098     case TARGET_NR_setfsgid:
12099         return get_errno(setfsgid(arg1));
12100 
12101 #ifdef TARGET_NR_lchown32
12102     case TARGET_NR_lchown32:
12103         if (!(p = lock_user_string(arg1)))
12104             return -TARGET_EFAULT;
12105         ret = get_errno(lchown(p, arg2, arg3));
12106         unlock_user(p, arg1, 0);
12107         return ret;
12108 #endif
12109 #ifdef TARGET_NR_getuid32
12110     case TARGET_NR_getuid32:
12111         return get_errno(getuid());
12112 #endif
12113 
12114 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
12115    /* Alpha specific */
12116     case TARGET_NR_getxuid:
12117          {
12118             uid_t euid;
12119             euid=geteuid();
12120             cpu_env->ir[IR_A4]=euid;
12121          }
12122         return get_errno(getuid());
12123 #endif
12124 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
12125    /* Alpha specific */
12126     case TARGET_NR_getxgid:
12127          {
12128             uid_t egid;
12129             egid=getegid();
12130             cpu_env->ir[IR_A4]=egid;
12131          }
12132         return get_errno(getgid());
12133 #endif
12134 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
12135     /* Alpha specific */
12136     case TARGET_NR_osf_getsysinfo:
12137         ret = -TARGET_EOPNOTSUPP;
12138         switch (arg1) {
12139           case TARGET_GSI_IEEE_FP_CONTROL:
12140             {
12141                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
12142                 uint64_t swcr = cpu_env->swcr;
12143 
12144                 swcr &= ~SWCR_STATUS_MASK;
12145                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
12146 
12147                 if (put_user_u64 (swcr, arg2))
12148                         return -TARGET_EFAULT;
12149                 ret = 0;
12150             }
12151             break;
12152 
12153           /* case GSI_IEEE_STATE_AT_SIGNAL:
12154              -- Not implemented in linux kernel.
12155              case GSI_UACPROC:
12156              -- Retrieves current unaligned access state; not much used.
12157              case GSI_PROC_TYPE:
12158              -- Retrieves implver information; surely not used.
12159              case GSI_GET_HWRPB:
12160              -- Grabs a copy of the HWRPB; surely not used.
12161           */
12162         }
12163         return ret;
12164 #endif
12165 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
12166     /* Alpha specific */
12167     case TARGET_NR_osf_setsysinfo:
12168         ret = -TARGET_EOPNOTSUPP;
12169         switch (arg1) {
12170           case TARGET_SSI_IEEE_FP_CONTROL:
12171             {
12172                 uint64_t swcr, fpcr;
12173 
12174                 if (get_user_u64 (swcr, arg2)) {
12175                     return -TARGET_EFAULT;
12176                 }
12177 
12178                 /*
12179                  * The kernel calls swcr_update_status to update the
12180                  * status bits from the fpcr at every point that it
12181                  * could be queried.  Therefore, we store the status
12182                  * bits only in FPCR.
12183                  */
12184                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
12185 
12186                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12187                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
12188                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
12189                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12190                 ret = 0;
12191             }
12192             break;
12193 
12194           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
12195             {
12196                 uint64_t exc, fpcr, fex;
12197 
12198                 if (get_user_u64(exc, arg2)) {
12199                     return -TARGET_EFAULT;
12200                 }
12201                 exc &= SWCR_STATUS_MASK;
12202                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12203 
12204                 /* Old exceptions are not signaled.  */
12205                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
12206                 fex = exc & ~fex;
12207                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
12208                 fex &= (cpu_env)->swcr;
12209 
12210                 /* Update the hardware fpcr.  */
12211                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
12212                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12213 
12214                 if (fex) {
12215                     int si_code = TARGET_FPE_FLTUNK;
12216                     target_siginfo_t info;
12217 
12218                     if (fex & SWCR_TRAP_ENABLE_DNO) {
12219                         si_code = TARGET_FPE_FLTUND;
12220                     }
12221                     if (fex & SWCR_TRAP_ENABLE_INE) {
12222                         si_code = TARGET_FPE_FLTRES;
12223                     }
12224                     if (fex & SWCR_TRAP_ENABLE_UNF) {
12225                         si_code = TARGET_FPE_FLTUND;
12226                     }
12227                     if (fex & SWCR_TRAP_ENABLE_OVF) {
12228                         si_code = TARGET_FPE_FLTOVF;
12229                     }
12230                     if (fex & SWCR_TRAP_ENABLE_DZE) {
12231                         si_code = TARGET_FPE_FLTDIV;
12232                     }
12233                     if (fex & SWCR_TRAP_ENABLE_INV) {
12234                         si_code = TARGET_FPE_FLTINV;
12235                     }
12236 
12237                     info.si_signo = SIGFPE;
12238                     info.si_errno = 0;
12239                     info.si_code = si_code;
12240                     info._sifields._sigfault._addr = (cpu_env)->pc;
12241                     queue_signal(cpu_env, info.si_signo,
12242                                  QEMU_SI_FAULT, &info);
12243                 }
12244                 ret = 0;
12245             }
12246             break;
12247 
12248           /* case SSI_NVPAIRS:
12249              -- Used with SSIN_UACPROC to enable unaligned accesses.
12250              case SSI_IEEE_STATE_AT_SIGNAL:
12251              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
12252              -- Not implemented in linux kernel
12253           */
12254         }
12255         return ret;
12256 #endif
12257 #ifdef TARGET_NR_osf_sigprocmask
12258     /* Alpha specific.  */
12259     case TARGET_NR_osf_sigprocmask:
12260         {
12261             abi_ulong mask;
12262             int how;
12263             sigset_t set, oldset;
12264 
12265             switch(arg1) {
12266             case TARGET_SIG_BLOCK:
12267                 how = SIG_BLOCK;
12268                 break;
12269             case TARGET_SIG_UNBLOCK:
12270                 how = SIG_UNBLOCK;
12271                 break;
12272             case TARGET_SIG_SETMASK:
12273                 how = SIG_SETMASK;
12274                 break;
12275             default:
12276                 return -TARGET_EINVAL;
12277             }
12278             mask = arg2;
12279             target_to_host_old_sigset(&set, &mask);
12280             ret = do_sigprocmask(how, &set, &oldset);
12281             if (!ret) {
12282                 host_to_target_old_sigset(&mask, &oldset);
12283                 ret = mask;
12284             }
12285         }
12286         return ret;
12287 #endif
12288 
12289 #ifdef TARGET_NR_getgid32
12290     case TARGET_NR_getgid32:
12291         return get_errno(getgid());
12292 #endif
12293 #ifdef TARGET_NR_geteuid32
12294     case TARGET_NR_geteuid32:
12295         return get_errno(geteuid());
12296 #endif
12297 #ifdef TARGET_NR_getegid32
12298     case TARGET_NR_getegid32:
12299         return get_errno(getegid());
12300 #endif
12301 #ifdef TARGET_NR_setreuid32
12302     case TARGET_NR_setreuid32:
12303         return get_errno(sys_setreuid(arg1, arg2));
12304 #endif
12305 #ifdef TARGET_NR_setregid32
12306     case TARGET_NR_setregid32:
12307         return get_errno(sys_setregid(arg1, arg2));
12308 #endif
12309 #ifdef TARGET_NR_getgroups32
12310     case TARGET_NR_getgroups32:
12311         { /* the same code as for TARGET_NR_getgroups */
12312             int gidsetsize = arg1;
12313             uint32_t *target_grouplist;
12314             g_autofree gid_t *grouplist = NULL;
12315             int i;
12316 
12317             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12318                 return -TARGET_EINVAL;
12319             }
12320             if (gidsetsize > 0) {
12321                 grouplist = g_try_new(gid_t, gidsetsize);
12322                 if (!grouplist) {
12323                     return -TARGET_ENOMEM;
12324                 }
12325             }
12326             ret = get_errno(getgroups(gidsetsize, grouplist));
12327             if (!is_error(ret) && gidsetsize > 0) {
12328                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
12329                                              gidsetsize * 4, 0);
12330                 if (!target_grouplist) {
12331                     return -TARGET_EFAULT;
12332                 }
12333                 for (i = 0; i < ret; i++) {
12334                     target_grouplist[i] = tswap32(grouplist[i]);
12335                 }
12336                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
12337             }
12338             return ret;
12339         }
12340 #endif
12341 #ifdef TARGET_NR_setgroups32
12342     case TARGET_NR_setgroups32:
12343         { /* the same code as for TARGET_NR_setgroups */
12344             int gidsetsize = arg1;
12345             uint32_t *target_grouplist;
12346             g_autofree gid_t *grouplist = NULL;
12347             int i;
12348 
12349             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12350                 return -TARGET_EINVAL;
12351             }
12352             if (gidsetsize > 0) {
12353                 grouplist = g_try_new(gid_t, gidsetsize);
12354                 if (!grouplist) {
12355                     return -TARGET_ENOMEM;
12356                 }
12357                 target_grouplist = lock_user(VERIFY_READ, arg2,
12358                                              gidsetsize * 4, 1);
12359                 if (!target_grouplist) {
12360                     return -TARGET_EFAULT;
12361                 }
12362                 for (i = 0; i < gidsetsize; i++) {
12363                     grouplist[i] = tswap32(target_grouplist[i]);
12364                 }
12365                 unlock_user(target_grouplist, arg2, 0);
12366             }
12367             return get_errno(sys_setgroups(gidsetsize, grouplist));
12368         }
12369 #endif
12370 #ifdef TARGET_NR_fchown32
12371     case TARGET_NR_fchown32:
12372         return get_errno(fchown(arg1, arg2, arg3));
12373 #endif
12374 #ifdef TARGET_NR_setresuid32
12375     case TARGET_NR_setresuid32:
12376         return get_errno(sys_setresuid(arg1, arg2, arg3));
12377 #endif
12378 #ifdef TARGET_NR_getresuid32
12379     case TARGET_NR_getresuid32:
12380         {
12381             uid_t ruid, euid, suid;
12382             ret = get_errno(getresuid(&ruid, &euid, &suid));
12383             if (!is_error(ret)) {
12384                 if (put_user_u32(ruid, arg1)
12385                     || put_user_u32(euid, arg2)
12386                     || put_user_u32(suid, arg3))
12387                     return -TARGET_EFAULT;
12388             }
12389         }
12390         return ret;
12391 #endif
12392 #ifdef TARGET_NR_setresgid32
12393     case TARGET_NR_setresgid32:
12394         return get_errno(sys_setresgid(arg1, arg2, arg3));
12395 #endif
12396 #ifdef TARGET_NR_getresgid32
12397     case TARGET_NR_getresgid32:
12398         {
12399             gid_t rgid, egid, sgid;
12400             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12401             if (!is_error(ret)) {
12402                 if (put_user_u32(rgid, arg1)
12403                     || put_user_u32(egid, arg2)
12404                     || put_user_u32(sgid, arg3))
12405                     return -TARGET_EFAULT;
12406             }
12407         }
12408         return ret;
12409 #endif
12410 #ifdef TARGET_NR_chown32
12411     case TARGET_NR_chown32:
12412         if (!(p = lock_user_string(arg1)))
12413             return -TARGET_EFAULT;
12414         ret = get_errno(chown(p, arg2, arg3));
12415         unlock_user(p, arg1, 0);
12416         return ret;
12417 #endif
12418 #ifdef TARGET_NR_setuid32
12419     case TARGET_NR_setuid32:
12420         return get_errno(sys_setuid(arg1));
12421 #endif
12422 #ifdef TARGET_NR_setgid32
12423     case TARGET_NR_setgid32:
12424         return get_errno(sys_setgid(arg1));
12425 #endif
12426 #ifdef TARGET_NR_setfsuid32
12427     case TARGET_NR_setfsuid32:
12428         return get_errno(setfsuid(arg1));
12429 #endif
12430 #ifdef TARGET_NR_setfsgid32
12431     case TARGET_NR_setfsgid32:
12432         return get_errno(setfsgid(arg1));
12433 #endif
12434 #ifdef TARGET_NR_mincore
12435     case TARGET_NR_mincore:
12436         {
12437             void *a = lock_user(VERIFY_NONE, arg1, arg2, 0);
12438             if (!a) {
12439                 return -TARGET_ENOMEM;
12440             }
12441             p = lock_user_string(arg3);
12442             if (!p) {
12443                 ret = -TARGET_EFAULT;
12444             } else {
12445                 ret = get_errno(mincore(a, arg2, p));
12446                 unlock_user(p, arg3, ret);
12447             }
12448             unlock_user(a, arg1, 0);
12449         }
12450         return ret;
12451 #endif
12452 #ifdef TARGET_NR_arm_fadvise64_64
12453     case TARGET_NR_arm_fadvise64_64:
12454         /* arm_fadvise64_64 looks like fadvise64_64 but
12455          * with different argument order: fd, advice, offset, len
12456          * rather than the usual fd, offset, len, advice.
12457          * Note that offset and len are both 64-bit so appear as
12458          * pairs of 32-bit registers.
12459          */
12460         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
12461                             target_offset64(arg5, arg6), arg2);
12462         return -host_to_target_errno(ret);
12463 #endif
12464 
12465 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12466 
12467 #ifdef TARGET_NR_fadvise64_64
12468     case TARGET_NR_fadvise64_64:
12469 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
12470         /* 6 args: fd, advice, offset (high, low), len (high, low) */
12471         ret = arg2;
12472         arg2 = arg3;
12473         arg3 = arg4;
12474         arg4 = arg5;
12475         arg5 = arg6;
12476         arg6 = ret;
12477 #else
12478         /* 6 args: fd, offset (high, low), len (high, low), advice */
12479         if (regpairs_aligned(cpu_env, num)) {
12480             /* offset is in (3,4), len in (5,6) and advice in 7 */
12481             arg2 = arg3;
12482             arg3 = arg4;
12483             arg4 = arg5;
12484             arg5 = arg6;
12485             arg6 = arg7;
12486         }
12487 #endif
12488         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
12489                             target_offset64(arg4, arg5), arg6);
12490         return -host_to_target_errno(ret);
12491 #endif
12492 
12493 #ifdef TARGET_NR_fadvise64
12494     case TARGET_NR_fadvise64:
12495         /* 5 args: fd, offset (high, low), len, advice */
12496         if (regpairs_aligned(cpu_env, num)) {
12497             /* offset is in (3,4), len in 5 and advice in 6 */
12498             arg2 = arg3;
12499             arg3 = arg4;
12500             arg4 = arg5;
12501             arg5 = arg6;
12502         }
12503         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
12504         return -host_to_target_errno(ret);
12505 #endif
12506 
12507 #else /* not a 32-bit ABI */
12508 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
12509 #ifdef TARGET_NR_fadvise64_64
12510     case TARGET_NR_fadvise64_64:
12511 #endif
12512 #ifdef TARGET_NR_fadvise64
12513     case TARGET_NR_fadvise64:
12514 #endif
12515 #ifdef TARGET_S390X
12516         switch (arg4) {
12517         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
12518         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
12519         case 6: arg4 = POSIX_FADV_DONTNEED; break;
12520         case 7: arg4 = POSIX_FADV_NOREUSE; break;
12521         default: break;
12522         }
12523 #endif
12524         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
12525 #endif
12526 #endif /* end of 64-bit ABI fadvise handling */
12527 
12528 #ifdef TARGET_NR_madvise
12529     case TARGET_NR_madvise:
12530         return target_madvise(arg1, arg2, arg3);
12531 #endif
12532 #ifdef TARGET_NR_fcntl64
12533     case TARGET_NR_fcntl64:
12534     {
12535         int cmd;
12536         struct flock fl;
12537         from_flock64_fn *copyfrom = copy_from_user_flock64;
12538         to_flock64_fn *copyto = copy_to_user_flock64;
12539 
12540 #ifdef TARGET_ARM
12541         if (!cpu_env->eabi) {
12542             copyfrom = copy_from_user_oabi_flock64;
12543             copyto = copy_to_user_oabi_flock64;
12544         }
12545 #endif
12546 
12547         cmd = target_to_host_fcntl_cmd(arg2);
12548         if (cmd == -TARGET_EINVAL) {
12549             return cmd;
12550         }
12551 
12552         switch(arg2) {
12553         case TARGET_F_GETLK64:
12554             ret = copyfrom(&fl, arg3);
12555             if (ret) {
12556                 break;
12557             }
12558             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12559             if (ret == 0) {
12560                 ret = copyto(arg3, &fl);
12561             }
12562 	    break;
12563 
12564         case TARGET_F_SETLK64:
12565         case TARGET_F_SETLKW64:
12566             ret = copyfrom(&fl, arg3);
12567             if (ret) {
12568                 break;
12569             }
12570             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12571 	    break;
12572         default:
12573             ret = do_fcntl(arg1, arg2, arg3);
12574             break;
12575         }
12576         return ret;
12577     }
12578 #endif
12579 #ifdef TARGET_NR_cacheflush
12580     case TARGET_NR_cacheflush:
12581         /* self-modifying code is handled automatically, so nothing needed */
12582         return 0;
12583 #endif
12584 #ifdef TARGET_NR_getpagesize
12585     case TARGET_NR_getpagesize:
12586         return TARGET_PAGE_SIZE;
12587 #endif
12588     case TARGET_NR_gettid:
12589         return get_errno(sys_gettid());
12590 #ifdef TARGET_NR_readahead
12591     case TARGET_NR_readahead:
12592 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12593         if (regpairs_aligned(cpu_env, num)) {
12594             arg2 = arg3;
12595             arg3 = arg4;
12596             arg4 = arg5;
12597         }
12598         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12599 #else
12600         ret = get_errno(readahead(arg1, arg2, arg3));
12601 #endif
12602         return ret;
12603 #endif
12604 #ifdef CONFIG_ATTR
12605 #ifdef TARGET_NR_setxattr
12606     case TARGET_NR_listxattr:
12607     case TARGET_NR_llistxattr:
12608     {
12609         void *b = 0;
12610         if (arg2) {
12611             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12612             if (!b) {
12613                 return -TARGET_EFAULT;
12614             }
12615         }
12616         p = lock_user_string(arg1);
12617         if (p) {
12618             if (num == TARGET_NR_listxattr) {
12619                 ret = get_errno(listxattr(p, b, arg3));
12620             } else {
12621                 ret = get_errno(llistxattr(p, b, arg3));
12622             }
12623         } else {
12624             ret = -TARGET_EFAULT;
12625         }
12626         unlock_user(p, arg1, 0);
12627         unlock_user(b, arg2, arg3);
12628         return ret;
12629     }
12630     case TARGET_NR_flistxattr:
12631     {
12632         void *b = 0;
12633         if (arg2) {
12634             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12635             if (!b) {
12636                 return -TARGET_EFAULT;
12637             }
12638         }
12639         ret = get_errno(flistxattr(arg1, b, arg3));
12640         unlock_user(b, arg2, arg3);
12641         return ret;
12642     }
12643     case TARGET_NR_setxattr:
12644     case TARGET_NR_lsetxattr:
12645         {
12646             void *n, *v = 0;
12647             if (arg3) {
12648                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12649                 if (!v) {
12650                     return -TARGET_EFAULT;
12651                 }
12652             }
12653             p = lock_user_string(arg1);
12654             n = lock_user_string(arg2);
12655             if (p && n) {
12656                 if (num == TARGET_NR_setxattr) {
12657                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12658                 } else {
12659                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12660                 }
12661             } else {
12662                 ret = -TARGET_EFAULT;
12663             }
12664             unlock_user(p, arg1, 0);
12665             unlock_user(n, arg2, 0);
12666             unlock_user(v, arg3, 0);
12667         }
12668         return ret;
12669     case TARGET_NR_fsetxattr:
12670         {
12671             void *n, *v = 0;
12672             if (arg3) {
12673                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12674                 if (!v) {
12675                     return -TARGET_EFAULT;
12676                 }
12677             }
12678             n = lock_user_string(arg2);
12679             if (n) {
12680                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12681             } else {
12682                 ret = -TARGET_EFAULT;
12683             }
12684             unlock_user(n, arg2, 0);
12685             unlock_user(v, arg3, 0);
12686         }
12687         return ret;
12688     case TARGET_NR_getxattr:
12689     case TARGET_NR_lgetxattr:
12690         {
12691             void *n, *v = 0;
12692             if (arg3) {
12693                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12694                 if (!v) {
12695                     return -TARGET_EFAULT;
12696                 }
12697             }
12698             p = lock_user_string(arg1);
12699             n = lock_user_string(arg2);
12700             if (p && n) {
12701                 if (num == TARGET_NR_getxattr) {
12702                     ret = get_errno(getxattr(p, n, v, arg4));
12703                 } else {
12704                     ret = get_errno(lgetxattr(p, n, v, arg4));
12705                 }
12706             } else {
12707                 ret = -TARGET_EFAULT;
12708             }
12709             unlock_user(p, arg1, 0);
12710             unlock_user(n, arg2, 0);
12711             unlock_user(v, arg3, arg4);
12712         }
12713         return ret;
12714     case TARGET_NR_fgetxattr:
12715         {
12716             void *n, *v = 0;
12717             if (arg3) {
12718                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12719                 if (!v) {
12720                     return -TARGET_EFAULT;
12721                 }
12722             }
12723             n = lock_user_string(arg2);
12724             if (n) {
12725                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12726             } else {
12727                 ret = -TARGET_EFAULT;
12728             }
12729             unlock_user(n, arg2, 0);
12730             unlock_user(v, arg3, arg4);
12731         }
12732         return ret;
12733     case TARGET_NR_removexattr:
12734     case TARGET_NR_lremovexattr:
12735         {
12736             void *n;
12737             p = lock_user_string(arg1);
12738             n = lock_user_string(arg2);
12739             if (p && n) {
12740                 if (num == TARGET_NR_removexattr) {
12741                     ret = get_errno(removexattr(p, n));
12742                 } else {
12743                     ret = get_errno(lremovexattr(p, n));
12744                 }
12745             } else {
12746                 ret = -TARGET_EFAULT;
12747             }
12748             unlock_user(p, arg1, 0);
12749             unlock_user(n, arg2, 0);
12750         }
12751         return ret;
12752     case TARGET_NR_fremovexattr:
12753         {
12754             void *n;
12755             n = lock_user_string(arg2);
12756             if (n) {
12757                 ret = get_errno(fremovexattr(arg1, n));
12758             } else {
12759                 ret = -TARGET_EFAULT;
12760             }
12761             unlock_user(n, arg2, 0);
12762         }
12763         return ret;
12764 #endif
12765 #endif /* CONFIG_ATTR */
12766 #ifdef TARGET_NR_set_thread_area
12767     case TARGET_NR_set_thread_area:
12768 #if defined(TARGET_MIPS)
12769       cpu_env->active_tc.CP0_UserLocal = arg1;
12770       return 0;
12771 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12772       return do_set_thread_area(cpu_env, arg1);
12773 #elif defined(TARGET_M68K)
12774       {
12775           TaskState *ts = get_task_state(cpu);
12776           ts->tp_value = arg1;
12777           return 0;
12778       }
12779 #else
12780       return -TARGET_ENOSYS;
12781 #endif
12782 #endif
12783 #ifdef TARGET_NR_get_thread_area
12784     case TARGET_NR_get_thread_area:
12785 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12786         return do_get_thread_area(cpu_env, arg1);
12787 #elif defined(TARGET_M68K)
12788         {
12789             TaskState *ts = get_task_state(cpu);
12790             return ts->tp_value;
12791         }
12792 #else
12793         return -TARGET_ENOSYS;
12794 #endif
12795 #endif
12796 #ifdef TARGET_NR_getdomainname
12797     case TARGET_NR_getdomainname:
12798         return -TARGET_ENOSYS;
12799 #endif
12800 
12801 #ifdef TARGET_NR_clock_settime
12802     case TARGET_NR_clock_settime:
12803     {
12804         struct timespec ts;
12805 
12806         ret = target_to_host_timespec(&ts, arg2);
12807         if (!is_error(ret)) {
12808             ret = get_errno(clock_settime(arg1, &ts));
12809         }
12810         return ret;
12811     }
12812 #endif
12813 #ifdef TARGET_NR_clock_settime64
12814     case TARGET_NR_clock_settime64:
12815     {
12816         struct timespec ts;
12817 
12818         ret = target_to_host_timespec64(&ts, arg2);
12819         if (!is_error(ret)) {
12820             ret = get_errno(clock_settime(arg1, &ts));
12821         }
12822         return ret;
12823     }
12824 #endif
12825 #ifdef TARGET_NR_clock_gettime
12826     case TARGET_NR_clock_gettime:
12827     {
12828         struct timespec ts;
12829         ret = get_errno(clock_gettime(arg1, &ts));
12830         if (!is_error(ret)) {
12831             ret = host_to_target_timespec(arg2, &ts);
12832         }
12833         return ret;
12834     }
12835 #endif
12836 #ifdef TARGET_NR_clock_gettime64
12837     case TARGET_NR_clock_gettime64:
12838     {
12839         struct timespec ts;
12840         ret = get_errno(clock_gettime(arg1, &ts));
12841         if (!is_error(ret)) {
12842             ret = host_to_target_timespec64(arg2, &ts);
12843         }
12844         return ret;
12845     }
12846 #endif
12847 #ifdef TARGET_NR_clock_getres
12848     case TARGET_NR_clock_getres:
12849     {
12850         struct timespec ts;
12851         ret = get_errno(clock_getres(arg1, &ts));
12852         if (!is_error(ret)) {
12853             host_to_target_timespec(arg2, &ts);
12854         }
12855         return ret;
12856     }
12857 #endif
12858 #ifdef TARGET_NR_clock_getres_time64
12859     case TARGET_NR_clock_getres_time64:
12860     {
12861         struct timespec ts;
12862         ret = get_errno(clock_getres(arg1, &ts));
12863         if (!is_error(ret)) {
12864             host_to_target_timespec64(arg2, &ts);
12865         }
12866         return ret;
12867     }
12868 #endif
12869 #ifdef TARGET_NR_clock_nanosleep
12870     case TARGET_NR_clock_nanosleep:
12871     {
12872         struct timespec ts;
12873         if (target_to_host_timespec(&ts, arg3)) {
12874             return -TARGET_EFAULT;
12875         }
12876         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12877                                              &ts, arg4 ? &ts : NULL));
12878         /*
12879          * if the call is interrupted by a signal handler, it fails
12880          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12881          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12882          */
12883         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12884             host_to_target_timespec(arg4, &ts)) {
12885               return -TARGET_EFAULT;
12886         }
12887 
12888         return ret;
12889     }
12890 #endif
12891 #ifdef TARGET_NR_clock_nanosleep_time64
12892     case TARGET_NR_clock_nanosleep_time64:
12893     {
12894         struct timespec ts;
12895 
12896         if (target_to_host_timespec64(&ts, arg3)) {
12897             return -TARGET_EFAULT;
12898         }
12899 
12900         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12901                                              &ts, arg4 ? &ts : NULL));
12902 
12903         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12904             host_to_target_timespec64(arg4, &ts)) {
12905             return -TARGET_EFAULT;
12906         }
12907         return ret;
12908     }
12909 #endif
12910 
12911 #if defined(TARGET_NR_set_tid_address)
12912     case TARGET_NR_set_tid_address:
12913     {
12914         TaskState *ts = get_task_state(cpu);
12915         ts->child_tidptr = arg1;
12916         /* do not call host set_tid_address() syscall, instead return tid() */
12917         return get_errno(sys_gettid());
12918     }
12919 #endif
12920 
12921     case TARGET_NR_tkill:
12922         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12923 
12924     case TARGET_NR_tgkill:
12925         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12926                          target_to_host_signal(arg3)));
12927 
12928 #ifdef TARGET_NR_set_robust_list
12929     case TARGET_NR_set_robust_list:
12930     case TARGET_NR_get_robust_list:
12931         /* The ABI for supporting robust futexes has userspace pass
12932          * the kernel a pointer to a linked list which is updated by
12933          * userspace after the syscall; the list is walked by the kernel
12934          * when the thread exits. Since the linked list in QEMU guest
12935          * memory isn't a valid linked list for the host and we have
12936          * no way to reliably intercept the thread-death event, we can't
12937          * support these. Silently return ENOSYS so that guest userspace
12938          * falls back to a non-robust futex implementation (which should
12939          * be OK except in the corner case of the guest crashing while
12940          * holding a mutex that is shared with another process via
12941          * shared memory).
12942          */
12943         return -TARGET_ENOSYS;
12944 #endif
12945 
12946 #if defined(TARGET_NR_utimensat)
12947     case TARGET_NR_utimensat:
12948         {
12949             struct timespec *tsp, ts[2];
12950             if (!arg3) {
12951                 tsp = NULL;
12952             } else {
12953                 if (target_to_host_timespec(ts, arg3)) {
12954                     return -TARGET_EFAULT;
12955                 }
12956                 if (target_to_host_timespec(ts + 1, arg3 +
12957                                             sizeof(struct target_timespec))) {
12958                     return -TARGET_EFAULT;
12959                 }
12960                 tsp = ts;
12961             }
12962             if (!arg2)
12963                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12964             else {
12965                 if (!(p = lock_user_string(arg2))) {
12966                     return -TARGET_EFAULT;
12967                 }
12968                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12969                 unlock_user(p, arg2, 0);
12970             }
12971         }
12972         return ret;
12973 #endif
12974 #ifdef TARGET_NR_utimensat_time64
12975     case TARGET_NR_utimensat_time64:
12976         {
12977             struct timespec *tsp, ts[2];
12978             if (!arg3) {
12979                 tsp = NULL;
12980             } else {
12981                 if (target_to_host_timespec64(ts, arg3)) {
12982                     return -TARGET_EFAULT;
12983                 }
12984                 if (target_to_host_timespec64(ts + 1, arg3 +
12985                                      sizeof(struct target__kernel_timespec))) {
12986                     return -TARGET_EFAULT;
12987                 }
12988                 tsp = ts;
12989             }
12990             if (!arg2)
12991                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12992             else {
12993                 p = lock_user_string(arg2);
12994                 if (!p) {
12995                     return -TARGET_EFAULT;
12996                 }
12997                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12998                 unlock_user(p, arg2, 0);
12999             }
13000         }
13001         return ret;
13002 #endif
13003 #ifdef TARGET_NR_futex
13004     case TARGET_NR_futex:
13005         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
13006 #endif
13007 #ifdef TARGET_NR_futex_time64
13008     case TARGET_NR_futex_time64:
13009         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
13010 #endif
13011 #ifdef CONFIG_INOTIFY
13012 #if defined(TARGET_NR_inotify_init)
13013     case TARGET_NR_inotify_init:
13014         ret = get_errno(inotify_init());
13015         if (ret >= 0) {
13016             fd_trans_register(ret, &target_inotify_trans);
13017         }
13018         return ret;
13019 #endif
13020 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
13021     case TARGET_NR_inotify_init1:
13022         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
13023                                           fcntl_flags_tbl)));
13024         if (ret >= 0) {
13025             fd_trans_register(ret, &target_inotify_trans);
13026         }
13027         return ret;
13028 #endif
13029 #if defined(TARGET_NR_inotify_add_watch)
13030     case TARGET_NR_inotify_add_watch:
13031         p = lock_user_string(arg2);
13032         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
13033         unlock_user(p, arg2, 0);
13034         return ret;
13035 #endif
13036 #if defined(TARGET_NR_inotify_rm_watch)
13037     case TARGET_NR_inotify_rm_watch:
13038         return get_errno(inotify_rm_watch(arg1, arg2));
13039 #endif
13040 #endif
13041 
13042 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
13043     case TARGET_NR_mq_open:
13044         {
13045             struct mq_attr posix_mq_attr;
13046             struct mq_attr *pposix_mq_attr;
13047             int host_flags;
13048 
13049             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
13050             pposix_mq_attr = NULL;
13051             if (arg4) {
13052                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
13053                     return -TARGET_EFAULT;
13054                 }
13055                 pposix_mq_attr = &posix_mq_attr;
13056             }
13057             p = lock_user_string(arg1 - 1);
13058             if (!p) {
13059                 return -TARGET_EFAULT;
13060             }
13061             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
13062             unlock_user (p, arg1, 0);
13063         }
13064         return ret;
13065 
13066     case TARGET_NR_mq_unlink:
13067         p = lock_user_string(arg1 - 1);
13068         if (!p) {
13069             return -TARGET_EFAULT;
13070         }
13071         ret = get_errno(mq_unlink(p));
13072         unlock_user (p, arg1, 0);
13073         return ret;
13074 
13075 #ifdef TARGET_NR_mq_timedsend
13076     case TARGET_NR_mq_timedsend:
13077         {
13078             struct timespec ts;
13079 
13080             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13081             if (arg5 != 0) {
13082                 if (target_to_host_timespec(&ts, arg5)) {
13083                     return -TARGET_EFAULT;
13084                 }
13085                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13086                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13087                     return -TARGET_EFAULT;
13088                 }
13089             } else {
13090                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13091             }
13092             unlock_user (p, arg2, arg3);
13093         }
13094         return ret;
13095 #endif
13096 #ifdef TARGET_NR_mq_timedsend_time64
13097     case TARGET_NR_mq_timedsend_time64:
13098         {
13099             struct timespec ts;
13100 
13101             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13102             if (arg5 != 0) {
13103                 if (target_to_host_timespec64(&ts, arg5)) {
13104                     return -TARGET_EFAULT;
13105                 }
13106                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13107                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13108                     return -TARGET_EFAULT;
13109                 }
13110             } else {
13111                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13112             }
13113             unlock_user(p, arg2, arg3);
13114         }
13115         return ret;
13116 #endif
13117 
13118 #ifdef TARGET_NR_mq_timedreceive
13119     case TARGET_NR_mq_timedreceive:
13120         {
13121             struct timespec ts;
13122             unsigned int prio;
13123 
13124             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13125             if (arg5 != 0) {
13126                 if (target_to_host_timespec(&ts, arg5)) {
13127                     return -TARGET_EFAULT;
13128                 }
13129                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13130                                                      &prio, &ts));
13131                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13132                     return -TARGET_EFAULT;
13133                 }
13134             } else {
13135                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13136                                                      &prio, NULL));
13137             }
13138             unlock_user (p, arg2, arg3);
13139             if (arg4 != 0)
13140                 put_user_u32(prio, arg4);
13141         }
13142         return ret;
13143 #endif
13144 #ifdef TARGET_NR_mq_timedreceive_time64
13145     case TARGET_NR_mq_timedreceive_time64:
13146         {
13147             struct timespec ts;
13148             unsigned int prio;
13149 
13150             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13151             if (arg5 != 0) {
13152                 if (target_to_host_timespec64(&ts, arg5)) {
13153                     return -TARGET_EFAULT;
13154                 }
13155                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13156                                                      &prio, &ts));
13157                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13158                     return -TARGET_EFAULT;
13159                 }
13160             } else {
13161                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13162                                                      &prio, NULL));
13163             }
13164             unlock_user(p, arg2, arg3);
13165             if (arg4 != 0) {
13166                 put_user_u32(prio, arg4);
13167             }
13168         }
13169         return ret;
13170 #endif
13171 
13172     /* Not implemented for now... */
13173 /*     case TARGET_NR_mq_notify: */
13174 /*         break; */
13175 
13176     case TARGET_NR_mq_getsetattr:
13177         {
13178             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
13179             ret = 0;
13180             if (arg2 != 0) {
13181                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
13182                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
13183                                            &posix_mq_attr_out));
13184             } else if (arg3 != 0) {
13185                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
13186             }
13187             if (ret == 0 && arg3 != 0) {
13188                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
13189             }
13190         }
13191         return ret;
13192 #endif
13193 
13194 #ifdef CONFIG_SPLICE
13195 #ifdef TARGET_NR_tee
13196     case TARGET_NR_tee:
13197         {
13198             ret = get_errno(tee(arg1,arg2,arg3,arg4));
13199         }
13200         return ret;
13201 #endif
13202 #ifdef TARGET_NR_splice
13203     case TARGET_NR_splice:
13204         {
13205             loff_t loff_in, loff_out;
13206             loff_t *ploff_in = NULL, *ploff_out = NULL;
13207             if (arg2) {
13208                 if (get_user_u64(loff_in, arg2)) {
13209                     return -TARGET_EFAULT;
13210                 }
13211                 ploff_in = &loff_in;
13212             }
13213             if (arg4) {
13214                 if (get_user_u64(loff_out, arg4)) {
13215                     return -TARGET_EFAULT;
13216                 }
13217                 ploff_out = &loff_out;
13218             }
13219             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
13220             if (arg2) {
13221                 if (put_user_u64(loff_in, arg2)) {
13222                     return -TARGET_EFAULT;
13223                 }
13224             }
13225             if (arg4) {
13226                 if (put_user_u64(loff_out, arg4)) {
13227                     return -TARGET_EFAULT;
13228                 }
13229             }
13230         }
13231         return ret;
13232 #endif
13233 #ifdef TARGET_NR_vmsplice
13234 	case TARGET_NR_vmsplice:
13235         {
13236             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
13237             if (vec != NULL) {
13238                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
13239                 unlock_iovec(vec, arg2, arg3, 0);
13240             } else {
13241                 ret = -host_to_target_errno(errno);
13242             }
13243         }
13244         return ret;
13245 #endif
13246 #endif /* CONFIG_SPLICE */
13247 #ifdef CONFIG_EVENTFD
13248 #if defined(TARGET_NR_eventfd)
13249     case TARGET_NR_eventfd:
13250         ret = get_errno(eventfd(arg1, 0));
13251         if (ret >= 0) {
13252             fd_trans_register(ret, &target_eventfd_trans);
13253         }
13254         return ret;
13255 #endif
13256 #if defined(TARGET_NR_eventfd2)
13257     case TARGET_NR_eventfd2:
13258     {
13259         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
13260         if (arg2 & TARGET_O_NONBLOCK) {
13261             host_flags |= O_NONBLOCK;
13262         }
13263         if (arg2 & TARGET_O_CLOEXEC) {
13264             host_flags |= O_CLOEXEC;
13265         }
13266         ret = get_errno(eventfd(arg1, host_flags));
13267         if (ret >= 0) {
13268             fd_trans_register(ret, &target_eventfd_trans);
13269         }
13270         return ret;
13271     }
13272 #endif
13273 #endif /* CONFIG_EVENTFD  */
13274 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
13275     case TARGET_NR_fallocate:
13276 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13277         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
13278                                   target_offset64(arg5, arg6)));
13279 #else
13280         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
13281 #endif
13282         return ret;
13283 #endif
13284 #if defined(CONFIG_SYNC_FILE_RANGE)
13285 #if defined(TARGET_NR_sync_file_range)
13286     case TARGET_NR_sync_file_range:
13287 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13288 #if defined(TARGET_MIPS)
13289         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13290                                         target_offset64(arg5, arg6), arg7));
13291 #else
13292         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
13293                                         target_offset64(arg4, arg5), arg6));
13294 #endif /* !TARGET_MIPS */
13295 #else
13296         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
13297 #endif
13298         return ret;
13299 #endif
13300 #if defined(TARGET_NR_sync_file_range2) || \
13301     defined(TARGET_NR_arm_sync_file_range)
13302 #if defined(TARGET_NR_sync_file_range2)
13303     case TARGET_NR_sync_file_range2:
13304 #endif
13305 #if defined(TARGET_NR_arm_sync_file_range)
13306     case TARGET_NR_arm_sync_file_range:
13307 #endif
13308         /* This is like sync_file_range but the arguments are reordered */
13309 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13310         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13311                                         target_offset64(arg5, arg6), arg2));
13312 #else
13313         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
13314 #endif
13315         return ret;
13316 #endif
13317 #endif
13318 #if defined(TARGET_NR_signalfd4)
13319     case TARGET_NR_signalfd4:
13320         return do_signalfd4(arg1, arg2, arg4);
13321 #endif
13322 #if defined(TARGET_NR_signalfd)
13323     case TARGET_NR_signalfd:
13324         return do_signalfd4(arg1, arg2, 0);
13325 #endif
13326 #if defined(CONFIG_EPOLL)
13327 #if defined(TARGET_NR_epoll_create)
13328     case TARGET_NR_epoll_create:
13329         return get_errno(epoll_create(arg1));
13330 #endif
13331 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
13332     case TARGET_NR_epoll_create1:
13333         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
13334 #endif
13335 #if defined(TARGET_NR_epoll_ctl)
13336     case TARGET_NR_epoll_ctl:
13337     {
13338         struct epoll_event ep;
13339         struct epoll_event *epp = 0;
13340         if (arg4) {
13341             if (arg2 != EPOLL_CTL_DEL) {
13342                 struct target_epoll_event *target_ep;
13343                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
13344                     return -TARGET_EFAULT;
13345                 }
13346                 ep.events = tswap32(target_ep->events);
13347                 /*
13348                  * The epoll_data_t union is just opaque data to the kernel,
13349                  * so we transfer all 64 bits across and need not worry what
13350                  * actual data type it is.
13351                  */
13352                 ep.data.u64 = tswap64(target_ep->data.u64);
13353                 unlock_user_struct(target_ep, arg4, 0);
13354             }
13355             /*
13356              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
13357              * non-null pointer, even though this argument is ignored.
13358              *
13359              */
13360             epp = &ep;
13361         }
13362         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
13363     }
13364 #endif
13365 
13366 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
13367 #if defined(TARGET_NR_epoll_wait)
13368     case TARGET_NR_epoll_wait:
13369 #endif
13370 #if defined(TARGET_NR_epoll_pwait)
13371     case TARGET_NR_epoll_pwait:
13372 #endif
13373     {
13374         struct target_epoll_event *target_ep;
13375         struct epoll_event *ep;
13376         int epfd = arg1;
13377         int maxevents = arg3;
13378         int timeout = arg4;
13379 
13380         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
13381             return -TARGET_EINVAL;
13382         }
13383 
13384         target_ep = lock_user(VERIFY_WRITE, arg2,
13385                               maxevents * sizeof(struct target_epoll_event), 1);
13386         if (!target_ep) {
13387             return -TARGET_EFAULT;
13388         }
13389 
13390         ep = g_try_new(struct epoll_event, maxevents);
13391         if (!ep) {
13392             unlock_user(target_ep, arg2, 0);
13393             return -TARGET_ENOMEM;
13394         }
13395 
13396         switch (num) {
13397 #if defined(TARGET_NR_epoll_pwait)
13398         case TARGET_NR_epoll_pwait:
13399         {
13400             sigset_t *set = NULL;
13401 
13402             if (arg5) {
13403                 ret = process_sigsuspend_mask(&set, arg5, arg6);
13404                 if (ret != 0) {
13405                     break;
13406                 }
13407             }
13408 
13409             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13410                                              set, SIGSET_T_SIZE));
13411 
13412             if (set) {
13413                 finish_sigsuspend_mask(ret);
13414             }
13415             break;
13416         }
13417 #endif
13418 #if defined(TARGET_NR_epoll_wait)
13419         case TARGET_NR_epoll_wait:
13420             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13421                                              NULL, 0));
13422             break;
13423 #endif
13424         default:
13425             ret = -TARGET_ENOSYS;
13426         }
13427         if (!is_error(ret)) {
13428             int i;
13429             for (i = 0; i < ret; i++) {
13430                 target_ep[i].events = tswap32(ep[i].events);
13431                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
13432             }
13433             unlock_user(target_ep, arg2,
13434                         ret * sizeof(struct target_epoll_event));
13435         } else {
13436             unlock_user(target_ep, arg2, 0);
13437         }
13438         g_free(ep);
13439         return ret;
13440     }
13441 #endif
13442 #endif
13443 #ifdef TARGET_NR_prlimit64
13444     case TARGET_NR_prlimit64:
13445     {
13446         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
13447         struct target_rlimit64 *target_rnew, *target_rold;
13448         struct host_rlimit64 rnew, rold, *rnewp = 0;
13449         int resource = target_to_host_resource(arg2);
13450 
13451         if (arg3 && (resource != RLIMIT_AS &&
13452                      resource != RLIMIT_DATA &&
13453                      resource != RLIMIT_STACK)) {
13454             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
13455                 return -TARGET_EFAULT;
13456             }
13457             __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
13458             __get_user(rnew.rlim_max, &target_rnew->rlim_max);
13459             unlock_user_struct(target_rnew, arg3, 0);
13460             rnewp = &rnew;
13461         }
13462 
13463         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
13464         if (!is_error(ret) && arg4) {
13465             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
13466                 return -TARGET_EFAULT;
13467             }
13468             __put_user(rold.rlim_cur, &target_rold->rlim_cur);
13469             __put_user(rold.rlim_max, &target_rold->rlim_max);
13470             unlock_user_struct(target_rold, arg4, 1);
13471         }
13472         return ret;
13473     }
13474 #endif
13475 #ifdef TARGET_NR_gethostname
13476     case TARGET_NR_gethostname:
13477     {
13478         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
13479         if (name) {
13480             ret = get_errno(gethostname(name, arg2));
13481             unlock_user(name, arg1, arg2);
13482         } else {
13483             ret = -TARGET_EFAULT;
13484         }
13485         return ret;
13486     }
13487 #endif
13488 #ifdef TARGET_NR_atomic_cmpxchg_32
13489     case TARGET_NR_atomic_cmpxchg_32:
13490     {
13491         /* should use start_exclusive from main.c */
13492         abi_ulong mem_value;
13493         if (get_user_u32(mem_value, arg6)) {
13494             target_siginfo_t info;
13495             info.si_signo = SIGSEGV;
13496             info.si_errno = 0;
13497             info.si_code = TARGET_SEGV_MAPERR;
13498             info._sifields._sigfault._addr = arg6;
13499             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
13500             ret = 0xdeadbeef;
13501 
13502         }
13503         if (mem_value == arg2)
13504             put_user_u32(arg1, arg6);
13505         return mem_value;
13506     }
13507 #endif
13508 #ifdef TARGET_NR_atomic_barrier
13509     case TARGET_NR_atomic_barrier:
13510         /* Like the kernel implementation and the
13511            qemu arm barrier, no-op this? */
13512         return 0;
13513 #endif
13514 
13515 #ifdef TARGET_NR_timer_create
13516     case TARGET_NR_timer_create:
13517     {
13518         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
13519 
13520         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
13521 
13522         int clkid = arg1;
13523         int timer_index = next_free_host_timer();
13524 
13525         if (timer_index < 0) {
13526             ret = -TARGET_EAGAIN;
13527         } else {
13528             timer_t *phtimer = g_posix_timers  + timer_index;
13529 
13530             if (arg2) {
13531                 phost_sevp = &host_sevp;
13532                 ret = target_to_host_sigevent(phost_sevp, arg2);
13533                 if (ret != 0) {
13534                     free_host_timer_slot(timer_index);
13535                     return ret;
13536                 }
13537             }
13538 
13539             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
13540             if (ret) {
13541                 free_host_timer_slot(timer_index);
13542             } else {
13543                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
13544                     timer_delete(*phtimer);
13545                     free_host_timer_slot(timer_index);
13546                     return -TARGET_EFAULT;
13547                 }
13548             }
13549         }
13550         return ret;
13551     }
13552 #endif
13553 
13554 #ifdef TARGET_NR_timer_settime
13555     case TARGET_NR_timer_settime:
13556     {
13557         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13558          * struct itimerspec * old_value */
13559         target_timer_t timerid = get_timer_id(arg1);
13560 
13561         if (timerid < 0) {
13562             ret = timerid;
13563         } else if (arg3 == 0) {
13564             ret = -TARGET_EINVAL;
13565         } else {
13566             timer_t htimer = g_posix_timers[timerid];
13567             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13568 
13569             if (target_to_host_itimerspec(&hspec_new, arg3)) {
13570                 return -TARGET_EFAULT;
13571             }
13572             ret = get_errno(
13573                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13574             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13575                 return -TARGET_EFAULT;
13576             }
13577         }
13578         return ret;
13579     }
13580 #endif
13581 
13582 #ifdef TARGET_NR_timer_settime64
13583     case TARGET_NR_timer_settime64:
13584     {
13585         target_timer_t timerid = get_timer_id(arg1);
13586 
13587         if (timerid < 0) {
13588             ret = timerid;
13589         } else if (arg3 == 0) {
13590             ret = -TARGET_EINVAL;
13591         } else {
13592             timer_t htimer = g_posix_timers[timerid];
13593             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13594 
13595             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13596                 return -TARGET_EFAULT;
13597             }
13598             ret = get_errno(
13599                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13600             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13601                 return -TARGET_EFAULT;
13602             }
13603         }
13604         return ret;
13605     }
13606 #endif
13607 
13608 #ifdef TARGET_NR_timer_gettime
13609     case TARGET_NR_timer_gettime:
13610     {
13611         /* args: timer_t timerid, struct itimerspec *curr_value */
13612         target_timer_t timerid = get_timer_id(arg1);
13613 
13614         if (timerid < 0) {
13615             ret = timerid;
13616         } else if (!arg2) {
13617             ret = -TARGET_EFAULT;
13618         } else {
13619             timer_t htimer = g_posix_timers[timerid];
13620             struct itimerspec hspec;
13621             ret = get_errno(timer_gettime(htimer, &hspec));
13622 
13623             if (host_to_target_itimerspec(arg2, &hspec)) {
13624                 ret = -TARGET_EFAULT;
13625             }
13626         }
13627         return ret;
13628     }
13629 #endif
13630 
13631 #ifdef TARGET_NR_timer_gettime64
13632     case TARGET_NR_timer_gettime64:
13633     {
13634         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13635         target_timer_t timerid = get_timer_id(arg1);
13636 
13637         if (timerid < 0) {
13638             ret = timerid;
13639         } else if (!arg2) {
13640             ret = -TARGET_EFAULT;
13641         } else {
13642             timer_t htimer = g_posix_timers[timerid];
13643             struct itimerspec hspec;
13644             ret = get_errno(timer_gettime(htimer, &hspec));
13645 
13646             if (host_to_target_itimerspec64(arg2, &hspec)) {
13647                 ret = -TARGET_EFAULT;
13648             }
13649         }
13650         return ret;
13651     }
13652 #endif
13653 
13654 #ifdef TARGET_NR_timer_getoverrun
13655     case TARGET_NR_timer_getoverrun:
13656     {
13657         /* args: timer_t timerid */
13658         target_timer_t timerid = get_timer_id(arg1);
13659 
13660         if (timerid < 0) {
13661             ret = timerid;
13662         } else {
13663             timer_t htimer = g_posix_timers[timerid];
13664             ret = get_errno(timer_getoverrun(htimer));
13665         }
13666         return ret;
13667     }
13668 #endif
13669 
13670 #ifdef TARGET_NR_timer_delete
13671     case TARGET_NR_timer_delete:
13672     {
13673         /* args: timer_t timerid */
13674         target_timer_t timerid = get_timer_id(arg1);
13675 
13676         if (timerid < 0) {
13677             ret = timerid;
13678         } else {
13679             timer_t htimer = g_posix_timers[timerid];
13680             ret = get_errno(timer_delete(htimer));
13681             free_host_timer_slot(timerid);
13682         }
13683         return ret;
13684     }
13685 #endif
13686 
13687 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13688     case TARGET_NR_timerfd_create:
13689         ret = get_errno(timerfd_create(arg1,
13690                         target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13691         if (ret >= 0) {
13692             fd_trans_register(ret, &target_timerfd_trans);
13693         }
13694         return ret;
13695 #endif
13696 
13697 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13698     case TARGET_NR_timerfd_gettime:
13699         {
13700             struct itimerspec its_curr;
13701 
13702             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13703 
13704             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13705                 return -TARGET_EFAULT;
13706             }
13707         }
13708         return ret;
13709 #endif
13710 
13711 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13712     case TARGET_NR_timerfd_gettime64:
13713         {
13714             struct itimerspec its_curr;
13715 
13716             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13717 
13718             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13719                 return -TARGET_EFAULT;
13720             }
13721         }
13722         return ret;
13723 #endif
13724 
13725 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13726     case TARGET_NR_timerfd_settime:
13727         {
13728             struct itimerspec its_new, its_old, *p_new;
13729 
13730             if (arg3) {
13731                 if (target_to_host_itimerspec(&its_new, arg3)) {
13732                     return -TARGET_EFAULT;
13733                 }
13734                 p_new = &its_new;
13735             } else {
13736                 p_new = NULL;
13737             }
13738 
13739             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13740 
13741             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13742                 return -TARGET_EFAULT;
13743             }
13744         }
13745         return ret;
13746 #endif
13747 
13748 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13749     case TARGET_NR_timerfd_settime64:
13750         {
13751             struct itimerspec its_new, its_old, *p_new;
13752 
13753             if (arg3) {
13754                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13755                     return -TARGET_EFAULT;
13756                 }
13757                 p_new = &its_new;
13758             } else {
13759                 p_new = NULL;
13760             }
13761 
13762             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13763 
13764             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13765                 return -TARGET_EFAULT;
13766             }
13767         }
13768         return ret;
13769 #endif
13770 
13771 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13772     case TARGET_NR_ioprio_get:
13773         return get_errno(ioprio_get(arg1, arg2));
13774 #endif
13775 
13776 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13777     case TARGET_NR_ioprio_set:
13778         return get_errno(ioprio_set(arg1, arg2, arg3));
13779 #endif
13780 
13781 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13782     case TARGET_NR_setns:
13783         return get_errno(setns(arg1, arg2));
13784 #endif
13785 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13786     case TARGET_NR_unshare:
13787         return get_errno(unshare(arg1));
13788 #endif
13789 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13790     case TARGET_NR_kcmp:
13791         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13792 #endif
13793 #ifdef TARGET_NR_swapcontext
13794     case TARGET_NR_swapcontext:
13795         /* PowerPC specific.  */
13796         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13797 #endif
13798 #ifdef TARGET_NR_memfd_create
13799     case TARGET_NR_memfd_create:
13800         p = lock_user_string(arg1);
13801         if (!p) {
13802             return -TARGET_EFAULT;
13803         }
13804         ret = get_errno(memfd_create(p, arg2));
13805         fd_trans_unregister(ret);
13806         unlock_user(p, arg1, 0);
13807         return ret;
13808 #endif
13809 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13810     case TARGET_NR_membarrier:
13811         return get_errno(membarrier(arg1, arg2));
13812 #endif
13813 
13814 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13815     case TARGET_NR_copy_file_range:
13816         {
13817             loff_t inoff, outoff;
13818             loff_t *pinoff = NULL, *poutoff = NULL;
13819 
13820             if (arg2) {
13821                 if (get_user_u64(inoff, arg2)) {
13822                     return -TARGET_EFAULT;
13823                 }
13824                 pinoff = &inoff;
13825             }
13826             if (arg4) {
13827                 if (get_user_u64(outoff, arg4)) {
13828                     return -TARGET_EFAULT;
13829                 }
13830                 poutoff = &outoff;
13831             }
13832             /* Do not sign-extend the count parameter. */
13833             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13834                                                  (abi_ulong)arg5, arg6));
13835             if (!is_error(ret) && ret > 0) {
13836                 if (arg2) {
13837                     if (put_user_u64(inoff, arg2)) {
13838                         return -TARGET_EFAULT;
13839                     }
13840                 }
13841                 if (arg4) {
13842                     if (put_user_u64(outoff, arg4)) {
13843                         return -TARGET_EFAULT;
13844                     }
13845                 }
13846             }
13847         }
13848         return ret;
13849 #endif
13850 
13851 #if defined(TARGET_NR_pivot_root)
13852     case TARGET_NR_pivot_root:
13853         {
13854             void *p2;
13855             p = lock_user_string(arg1); /* new_root */
13856             p2 = lock_user_string(arg2); /* put_old */
13857             if (!p || !p2) {
13858                 ret = -TARGET_EFAULT;
13859             } else {
13860                 ret = get_errno(pivot_root(p, p2));
13861             }
13862             unlock_user(p2, arg2, 0);
13863             unlock_user(p, arg1, 0);
13864         }
13865         return ret;
13866 #endif
13867 
13868 #if defined(TARGET_NR_riscv_hwprobe)
13869     case TARGET_NR_riscv_hwprobe:
13870         return do_riscv_hwprobe(cpu_env, arg1, arg2, arg3, arg4, arg5);
13871 #endif
13872 
13873     default:
13874         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13875         return -TARGET_ENOSYS;
13876     }
13877     return ret;
13878 }
13879 
13880 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13881                     abi_long arg2, abi_long arg3, abi_long arg4,
13882                     abi_long arg5, abi_long arg6, abi_long arg7,
13883                     abi_long arg8)
13884 {
13885     CPUState *cpu = env_cpu(cpu_env);
13886     abi_long ret;
13887 
13888 #ifdef DEBUG_ERESTARTSYS
13889     /* Debug-only code for exercising the syscall-restart code paths
13890      * in the per-architecture cpu main loops: restart every syscall
13891      * the guest makes once before letting it through.
13892      */
13893     {
13894         static bool flag;
13895         flag = !flag;
13896         if (flag) {
13897             return -QEMU_ERESTARTSYS;
13898         }
13899     }
13900 #endif
13901 
13902     record_syscall_start(cpu, num, arg1,
13903                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13904 
13905     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13906         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13907     }
13908 
13909     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13910                       arg5, arg6, arg7, arg8);
13911 
13912     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13913         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13914                           arg3, arg4, arg5, arg6);
13915     }
13916 
13917     record_syscall_return(cpu, num, ret);
13918     return ret;
13919 }
13920