xref: /qemu/linux-user/syscall.c (revision 1405d7e60d8c98a28b29885f70da4f2e4407fbc6)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "qemu/plugin.h"
26 #include "tcg/startup.h"
27 #include "target_mman.h"
28 #include "exec/page-protection.h"
29 #include "exec/tb-flush.h"
30 #include "exec/translation-block.h"
31 #include <elf.h>
32 #include <endian.h>
33 #include <grp.h>
34 #include <sys/ipc.h>
35 #include <sys/msg.h>
36 #include <sys/wait.h>
37 #include <sys/mount.h>
38 #include <sys/file.h>
39 #include <sys/fsuid.h>
40 #include <sys/personality.h>
41 #include <sys/prctl.h>
42 #include <sys/resource.h>
43 #include <sys/swap.h>
44 #include <linux/capability.h>
45 #include <sched.h>
46 #include <sys/timex.h>
47 #include <sys/socket.h>
48 #include <linux/sockios.h>
49 #include <sys/un.h>
50 #include <sys/uio.h>
51 #include <poll.h>
52 #include <sys/times.h>
53 #include <sys/shm.h>
54 #include <sys/sem.h>
55 #include <sys/statfs.h>
56 #include <utime.h>
57 #include <sys/sysinfo.h>
58 #include <sys/signalfd.h>
59 #include <netinet/in.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <netinet/udp.h>
63 #include <linux/wireless.h>
64 #include <linux/icmp.h>
65 #include <linux/icmpv6.h>
66 #include <linux/if_tun.h>
67 #include <linux/in6.h>
68 #include <linux/errqueue.h>
69 #include <linux/random.h>
70 #ifdef CONFIG_TIMERFD
71 #include <sys/timerfd.h>
72 #endif
73 #ifdef CONFIG_EVENTFD
74 #include <sys/eventfd.h>
75 #endif
76 #ifdef CONFIG_EPOLL
77 #include <sys/epoll.h>
78 #endif
79 #ifdef CONFIG_ATTR
80 #include "qemu/xattr.h"
81 #endif
82 #ifdef CONFIG_SENDFILE
83 #include <sys/sendfile.h>
84 #endif
85 #ifdef HAVE_SYS_KCOV_H
86 #include <sys/kcov.h>
87 #endif
88 
89 #define termios host_termios
90 #define winsize host_winsize
91 #define termio host_termio
92 #define sgttyb host_sgttyb /* same as target */
93 #define tchars host_tchars /* same as target */
94 #define ltchars host_ltchars /* same as target */
95 
96 #include <linux/termios.h>
97 #include <linux/unistd.h>
98 #include <linux/cdrom.h>
99 #include <linux/hdreg.h>
100 #include <linux/soundcard.h>
101 #include <linux/kd.h>
102 #include <linux/mtio.h>
103 #include <linux/fs.h>
104 #include <linux/fd.h>
105 #if defined(CONFIG_FIEMAP)
106 #include <linux/fiemap.h>
107 #endif
108 #include <linux/fb.h>
109 #if defined(CONFIG_USBFS)
110 #include <linux/usbdevice_fs.h>
111 #include <linux/usb/ch9.h>
112 #endif
113 #include <linux/vt.h>
114 #include <linux/dm-ioctl.h>
115 #include <linux/reboot.h>
116 #include <linux/route.h>
117 #include <linux/filter.h>
118 #include <linux/blkpg.h>
119 #include <netpacket/packet.h>
120 #include <linux/netlink.h>
121 #include <linux/if_alg.h>
122 #include <linux/rtc.h>
123 #include <sound/asound.h>
124 #ifdef HAVE_BTRFS_H
125 #include <linux/btrfs.h>
126 #endif
127 #ifdef HAVE_DRM_H
128 #include <libdrm/drm.h>
129 #include <libdrm/i915_drm.h>
130 #endif
131 #include "linux_loop.h"
132 #include "uname.h"
133 
134 #include "qemu.h"
135 #include "user-internals.h"
136 #include "strace.h"
137 #include "signal-common.h"
138 #include "loader.h"
139 #include "user-mmap.h"
140 #include "user/page-protection.h"
141 #include "user/safe-syscall.h"
142 #include "user/signal.h"
143 #include "qemu/guest-random.h"
144 #include "qemu/selfmap.h"
145 #include "user/syscall-trace.h"
146 #include "special-errno.h"
147 #include "qapi/error.h"
148 #include "fd-trans.h"
149 #include "user/cpu_loop.h"
150 
151 #ifndef CLONE_IO
152 #define CLONE_IO                0x80000000      /* Clone io context */
153 #endif
154 
155 /* We can't directly call the host clone syscall, because this will
156  * badly confuse libc (breaking mutexes, for example). So we must
157  * divide clone flags into:
158  *  * flag combinations that look like pthread_create()
159  *  * flag combinations that look like fork()
160  *  * flags we can implement within QEMU itself
161  *  * flags we can't support and will return an error for
162  */
163 /* For thread creation, all these flags must be present; for
164  * fork, none must be present.
165  */
166 #define CLONE_THREAD_FLAGS                              \
167     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
168      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
169 
170 /* These flags are ignored:
171  * CLONE_DETACHED is now ignored by the kernel;
172  * CLONE_IO is just an optimisation hint to the I/O scheduler
173  */
174 #define CLONE_IGNORED_FLAGS                     \
175     (CLONE_DETACHED | CLONE_IO)
176 
177 #ifndef CLONE_PIDFD
178 # define CLONE_PIDFD 0x00001000
179 #endif
180 
181 /* Flags for fork which we can implement within QEMU itself */
182 #define CLONE_OPTIONAL_FORK_FLAGS               \
183     (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
184      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
185 
186 /* Flags for thread creation which we can implement within QEMU itself */
187 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
188     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
189      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
190 
191 #define CLONE_INVALID_FORK_FLAGS                                        \
192     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
193 
194 #define CLONE_INVALID_THREAD_FLAGS                                      \
195     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
196        CLONE_IGNORED_FLAGS))
197 
198 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
199  * have almost all been allocated. We cannot support any of
200  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
201  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
202  * The checks against the invalid thread masks above will catch these.
203  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
204  */
205 
206 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
207  * once. This exercises the codepaths for restart.
208  */
209 //#define DEBUG_ERESTARTSYS
210 
211 //#include <linux/msdos_fs.h>
212 #define VFAT_IOCTL_READDIR_BOTH \
213     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
214 #define VFAT_IOCTL_READDIR_SHORT \
215     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
216 
217 #undef _syscall0
218 #undef _syscall1
219 #undef _syscall2
220 #undef _syscall3
221 #undef _syscall4
222 #undef _syscall5
223 #undef _syscall6
224 
225 #define _syscall0(type,name)		\
226 static type name (void)			\
227 {					\
228 	return syscall(__NR_##name);	\
229 }
230 
231 #define _syscall1(type,name,type1,arg1)		\
232 static type name (type1 arg1)			\
233 {						\
234 	return syscall(__NR_##name, arg1);	\
235 }
236 
237 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
238 static type name (type1 arg1,type2 arg2)		\
239 {							\
240 	return syscall(__NR_##name, arg1, arg2);	\
241 }
242 
243 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
244 static type name (type1 arg1,type2 arg2,type3 arg3)		\
245 {								\
246 	return syscall(__NR_##name, arg1, arg2, arg3);		\
247 }
248 
249 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
250 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
251 {										\
252 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
253 }
254 
255 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
256 		  type5,arg5)							\
257 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
258 {										\
259 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
260 }
261 
262 
263 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
264 		  type5,arg5,type6,arg6)					\
265 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
266                   type6 arg6)							\
267 {										\
268 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
269 }
270 
271 
272 #define __NR_sys_uname __NR_uname
273 #define __NR_sys_getcwd1 __NR_getcwd
274 #define __NR_sys_getdents __NR_getdents
275 #define __NR_sys_getdents64 __NR_getdents64
276 #define __NR_sys_getpriority __NR_getpriority
277 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
278 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
279 #define __NR_sys_syslog __NR_syslog
280 #if defined(__NR_futex)
281 # define __NR_sys_futex __NR_futex
282 #endif
283 #if defined(__NR_futex_time64)
284 # define __NR_sys_futex_time64 __NR_futex_time64
285 #endif
286 #define __NR_sys_statx __NR_statx
287 
288 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
289 #define __NR__llseek __NR_lseek
290 #endif
291 
292 /* Newer kernel ports have llseek() instead of _llseek() */
293 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
294 #define TARGET_NR__llseek TARGET_NR_llseek
295 #endif
296 
297 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
298 #ifndef TARGET_O_NONBLOCK_MASK
299 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
300 #endif
301 
302 #define __NR_sys_gettid __NR_gettid
303 _syscall0(int, sys_gettid)
304 
305 /* For the 64-bit guest on 32-bit host case we must emulate
306  * getdents using getdents64, because otherwise the host
307  * might hand us back more dirent records than we can fit
308  * into the guest buffer after structure format conversion.
309  * Otherwise we emulate getdents with getdents if the host has it.
310  */
311 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
312 #define EMULATE_GETDENTS_WITH_GETDENTS
313 #endif
314 
315 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
316 _syscall3(int, sys_getdents, unsigned int, fd, struct linux_dirent *, dirp, unsigned int, count);
317 #endif
318 #if (defined(TARGET_NR_getdents) && \
319       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
320     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
321 _syscall3(int, sys_getdents64, unsigned int, fd, struct linux_dirent64 *, dirp, unsigned int, count);
322 #endif
323 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
324 _syscall5(int, _llseek,  unsigned int,  fd, unsigned long, hi, unsigned long, lo,
325           loff_t *, res, unsigned int, wh);
326 #endif
327 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
328 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
329           siginfo_t *, uinfo)
330 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
331 #ifdef __NR_exit_group
332 _syscall1(int,exit_group,int,error_code)
333 #endif
334 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
335 #define __NR_sys_close_range __NR_close_range
336 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
337 #ifndef CLOSE_RANGE_CLOEXEC
338 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
339 #endif
340 #endif
341 #if defined(__NR_futex)
342 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
343           const struct timespec *,timeout,int *,uaddr2,int,val3)
344 #endif
345 #if defined(__NR_futex_time64)
346 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
347           const struct timespec *,timeout,int *,uaddr2,int,val3)
348 #endif
349 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
350 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
351 #endif
352 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
353 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
354                              unsigned int, flags);
355 #endif
356 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
357 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
358 #endif
359 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
360 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
361           unsigned long *, user_mask_ptr);
362 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
363 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
364           unsigned long *, user_mask_ptr);
365 /* sched_attr is not defined in glibc < 2.41 */
366 #ifndef SCHED_ATTR_SIZE_VER0
367 struct sched_attr {
368     uint32_t size;
369     uint32_t sched_policy;
370     uint64_t sched_flags;
371     int32_t sched_nice;
372     uint32_t sched_priority;
373     uint64_t sched_runtime;
374     uint64_t sched_deadline;
375     uint64_t sched_period;
376     uint32_t sched_util_min;
377     uint32_t sched_util_max;
378 };
379 #endif
380 #define __NR_sys_sched_getattr __NR_sched_getattr
381 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
382           unsigned int, size, unsigned int, flags);
383 #define __NR_sys_sched_setattr __NR_sched_setattr
384 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
385           unsigned int, flags);
386 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
387 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
388 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
389 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
390           const struct sched_param *, param);
391 #define __NR_sys_sched_getparam __NR_sched_getparam
392 _syscall2(int, sys_sched_getparam, pid_t, pid,
393           struct sched_param *, param);
394 #define __NR_sys_sched_setparam __NR_sched_setparam
395 _syscall2(int, sys_sched_setparam, pid_t, pid,
396           const struct sched_param *, param);
397 #define __NR_sys_getcpu __NR_getcpu
398 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
399 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
400           void *, arg);
401 _syscall2(int, capget, struct __user_cap_header_struct *, header,
402           struct __user_cap_data_struct *, data);
403 _syscall2(int, capset, struct __user_cap_header_struct *, header,
404           struct __user_cap_data_struct *, data);
405 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
406 _syscall2(int, ioprio_get, int, which, int, who)
407 #endif
408 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
409 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
410 #endif
411 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
412 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
413 #endif
414 
415 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
416 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
417           unsigned long, idx1, unsigned long, idx2)
418 #endif
419 
420 /*
421  * It is assumed that struct statx is architecture independent.
422  */
423 #if defined(TARGET_NR_statx) && defined(__NR_statx)
424 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
425           unsigned int, mask, struct target_statx *, statxbuf)
426 #endif
427 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
428 _syscall2(int, membarrier, int, cmd, int, flags)
429 #endif
430 
431 static const bitmask_transtbl fcntl_flags_tbl[] = {
432   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
433   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
434   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
435   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
436   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
437   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
438   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
439   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
440   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
441   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
442   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
443   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
444   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
445 #if defined(O_DIRECT)
446   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
447 #endif
448 #if defined(O_NOATIME)
449   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
450 #endif
451 #if defined(O_CLOEXEC)
452   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
453 #endif
454 #if defined(O_PATH)
455   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
456 #endif
457 #if defined(O_TMPFILE)
458   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
459 #endif
460   /* Don't terminate the list prematurely on 64-bit host+guest.  */
461 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
462   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
463 #endif
464 };
465 
466 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
467 
468 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
469 #if defined(__NR_utimensat)
470 #define __NR_sys_utimensat __NR_utimensat
471 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
472           const struct timespec *,tsp,int,flags)
473 #else
474 static int sys_utimensat(int dirfd, const char *pathname,
475                          const struct timespec times[2], int flags)
476 {
477     errno = ENOSYS;
478     return -1;
479 }
480 #endif
481 #endif /* TARGET_NR_utimensat */
482 
483 #ifdef TARGET_NR_renameat2
484 #if defined(__NR_renameat2)
485 #define __NR_sys_renameat2 __NR_renameat2
486 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
487           const char *, new, unsigned int, flags)
488 #else
489 static int sys_renameat2(int oldfd, const char *old,
490                          int newfd, const char *new, int flags)
491 {
492     if (flags == 0) {
493         return renameat(oldfd, old, newfd, new);
494     }
495     errno = ENOSYS;
496     return -1;
497 }
498 #endif
499 #endif /* TARGET_NR_renameat2 */
500 
501 #ifdef CONFIG_INOTIFY
502 #include <sys/inotify.h>
503 #else
504 /* Userspace can usually survive runtime without inotify */
505 #undef TARGET_NR_inotify_init
506 #undef TARGET_NR_inotify_init1
507 #undef TARGET_NR_inotify_add_watch
508 #undef TARGET_NR_inotify_rm_watch
509 #endif /* CONFIG_INOTIFY  */
510 
511 #if defined(TARGET_NR_prlimit64)
512 #ifndef __NR_prlimit64
513 # define __NR_prlimit64 -1
514 #endif
515 #define __NR_sys_prlimit64 __NR_prlimit64
516 /* The glibc rlimit structure may not be that used by the underlying syscall */
517 struct host_rlimit64 {
518     uint64_t rlim_cur;
519     uint64_t rlim_max;
520 };
521 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
522           const struct host_rlimit64 *, new_limit,
523           struct host_rlimit64 *, old_limit)
524 #endif
525 
526 
527 #if defined(TARGET_NR_timer_create)
528 /* Maximum of 32 active POSIX timers allowed at any one time. */
529 #define GUEST_TIMER_MAX 32
530 static timer_t g_posix_timers[GUEST_TIMER_MAX];
531 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
532 
533 static inline int next_free_host_timer(void)
534 {
535     int k;
536     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
537         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
538             return k;
539         }
540     }
541     return -1;
542 }
543 
544 static inline void free_host_timer_slot(int id)
545 {
546     qatomic_store_release(g_posix_timer_allocated + id, 0);
547 }
548 #endif
549 
550 static inline int host_to_target_errno(int host_errno)
551 {
552     switch (host_errno) {
553 #define E(X)  case X: return TARGET_##X;
554 #include "errnos.c.inc"
555 #undef E
556     default:
557         return host_errno;
558     }
559 }
560 
561 static inline int target_to_host_errno(int target_errno)
562 {
563     switch (target_errno) {
564 #define E(X)  case TARGET_##X: return X;
565 #include "errnos.c.inc"
566 #undef E
567     default:
568         return target_errno;
569     }
570 }
571 
572 abi_long get_errno(abi_long ret)
573 {
574     if (ret == -1)
575         return -host_to_target_errno(errno);
576     else
577         return ret;
578 }
579 
580 const char *target_strerror(int err)
581 {
582     if (err == QEMU_ERESTARTSYS) {
583         return "To be restarted";
584     }
585     if (err == QEMU_ESIGRETURN) {
586         return "Successful exit from sigreturn";
587     }
588 
589     return strerror(target_to_host_errno(err));
590 }
591 
592 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
593 {
594     int i;
595     uint8_t b;
596     if (usize <= ksize) {
597         return 1;
598     }
599     for (i = ksize; i < usize; i++) {
600         if (get_user_u8(b, addr + i)) {
601             return -TARGET_EFAULT;
602         }
603         if (b != 0) {
604             return 0;
605         }
606     }
607     return 1;
608 }
609 
610 /*
611  * Copies a target struct to a host struct, in a way that guarantees
612  * backwards-compatibility for struct syscall arguments.
613  *
614  * Similar to kernels uaccess.h:copy_struct_from_user()
615  */
616 int copy_struct_from_user(void *dst, size_t ksize, abi_ptr src, size_t usize)
617 {
618     size_t size = MIN(ksize, usize);
619     size_t rest = MAX(ksize, usize) - size;
620 
621     /* Deal with trailing bytes. */
622     if (usize < ksize) {
623         memset(dst + size, 0, rest);
624     } else if (usize > ksize) {
625         int ret = check_zeroed_user(src, ksize, usize);
626         if (ret <= 0) {
627             return ret ?: -TARGET_E2BIG;
628         }
629     }
630     /* Copy the interoperable parts of the struct. */
631     if (copy_from_user(dst, src, size)) {
632         return -TARGET_EFAULT;
633     }
634     return 0;
635 }
636 
637 #define safe_syscall0(type, name) \
638 static type safe_##name(void) \
639 { \
640     return safe_syscall(__NR_##name); \
641 }
642 
643 #define safe_syscall1(type, name, type1, arg1) \
644 static type safe_##name(type1 arg1) \
645 { \
646     return safe_syscall(__NR_##name, arg1); \
647 }
648 
649 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
650 static type safe_##name(type1 arg1, type2 arg2) \
651 { \
652     return safe_syscall(__NR_##name, arg1, arg2); \
653 }
654 
655 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
656 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
657 { \
658     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
659 }
660 
661 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
662     type4, arg4) \
663 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
664 { \
665     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
666 }
667 
668 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
669     type4, arg4, type5, arg5) \
670 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
671     type5 arg5) \
672 { \
673     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
674 }
675 
676 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
677     type4, arg4, type5, arg5, type6, arg6) \
678 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
679     type5 arg5, type6 arg6) \
680 { \
681     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
682 }
683 
684 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
685 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
686 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
687               int, flags, mode_t, mode)
688 
689 safe_syscall4(int, openat2, int, dirfd, const char *, pathname, \
690               const struct open_how_ver0 *, how, size_t, size)
691 
692 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
693 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
694               struct rusage *, rusage)
695 #endif
696 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
697               int, options, struct rusage *, rusage)
698 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
699 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
700               char **, argv, char **, envp, int, flags)
701 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
702     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
703 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
704               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
705 #endif
706 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
707 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
708               struct timespec *, tsp, const sigset_t *, sigmask,
709               size_t, sigsetsize)
710 #endif
711 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
712               int, maxevents, int, timeout, const sigset_t *, sigmask,
713               size_t, sigsetsize)
714 #if defined(__NR_futex)
715 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
716               const struct timespec *,timeout,int *,uaddr2,int,val3)
717 #endif
718 #if defined(__NR_futex_time64)
719 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
720               const struct timespec *,timeout,int *,uaddr2,int,val3)
721 #endif
722 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
723 safe_syscall2(int, kill, pid_t, pid, int, sig)
724 safe_syscall2(int, tkill, int, tid, int, sig)
725 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
726 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
727 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
728 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
729               unsigned long, pos_l, unsigned long, pos_h)
730 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
731               unsigned long, pos_l, unsigned long, pos_h)
732 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
733               socklen_t, addrlen)
734 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
735               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
736 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
737               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
738 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
739 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
740 safe_syscall2(int, flock, int, fd, int, operation)
741 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
742 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
743               const struct timespec *, uts, size_t, sigsetsize)
744 #endif
745 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
746               int, flags)
747 #if defined(TARGET_NR_nanosleep)
748 safe_syscall2(int, nanosleep, const struct timespec *, req,
749               struct timespec *, rem)
750 #endif
751 #if defined(TARGET_NR_clock_nanosleep) || \
752     defined(TARGET_NR_clock_nanosleep_time64)
753 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
754               const struct timespec *, req, struct timespec *, rem)
755 #endif
756 #ifdef __NR_ipc
757 #ifdef __s390x__
758 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
759               void *, ptr)
760 #else
761 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
762               void *, ptr, long, fifth)
763 #endif
764 #endif
765 #ifdef __NR_msgsnd
766 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
767               int, flags)
768 #endif
769 #ifdef __NR_msgrcv
770 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
771               long, msgtype, int, flags)
772 #endif
773 #ifdef __NR_semtimedop
774 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
775               unsigned, nsops, const struct timespec *, timeout)
776 #endif
777 #if defined(TARGET_NR_mq_timedsend) || \
778     defined(TARGET_NR_mq_timedsend_time64)
779 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
780               size_t, len, unsigned, prio, const struct timespec *, timeout)
781 #endif
782 #if defined(TARGET_NR_mq_timedreceive) || \
783     defined(TARGET_NR_mq_timedreceive_time64)
784 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
785               size_t, len, unsigned *, prio, const struct timespec *, timeout)
786 #endif
787 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
788 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
789               int, outfd, loff_t *, poutoff, size_t, length,
790               unsigned int, flags)
791 #endif
792 
793 /* We do ioctl like this rather than via safe_syscall3 to preserve the
794  * "third argument might be integer or pointer or not present" behaviour of
795  * the libc function.
796  */
797 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
798 /* Similarly for fcntl. Since we always build with LFS enabled,
799  * we should be using the 64-bit structures automatically.
800  */
801 #ifdef __NR_fcntl64
802 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
803 #else
804 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
805 #endif
806 
807 static inline int host_to_target_sock_type(int host_type)
808 {
809     int target_type;
810 
811     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
812     case SOCK_DGRAM:
813         target_type = TARGET_SOCK_DGRAM;
814         break;
815     case SOCK_STREAM:
816         target_type = TARGET_SOCK_STREAM;
817         break;
818     default:
819         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
820         break;
821     }
822 
823 #if defined(SOCK_CLOEXEC)
824     if (host_type & SOCK_CLOEXEC) {
825         target_type |= TARGET_SOCK_CLOEXEC;
826     }
827 #endif
828 
829 #if defined(SOCK_NONBLOCK)
830     if (host_type & SOCK_NONBLOCK) {
831         target_type |= TARGET_SOCK_NONBLOCK;
832     }
833 #endif
834 
835     return target_type;
836 }
837 
838 static abi_ulong target_brk, initial_target_brk;
839 
840 void target_set_brk(abi_ulong new_brk)
841 {
842     target_brk = TARGET_PAGE_ALIGN(new_brk);
843     initial_target_brk = target_brk;
844 }
845 
846 /* do_brk() must return target values and target errnos. */
847 abi_long do_brk(abi_ulong brk_val)
848 {
849     abi_long mapped_addr;
850     abi_ulong new_brk;
851     abi_ulong old_brk;
852 
853     /* brk pointers are always untagged */
854 
855     /* do not allow to shrink below initial brk value */
856     if (brk_val < initial_target_brk) {
857         return target_brk;
858     }
859 
860     new_brk = TARGET_PAGE_ALIGN(brk_val);
861     old_brk = TARGET_PAGE_ALIGN(target_brk);
862 
863     /* new and old target_brk might be on the same page */
864     if (new_brk == old_brk) {
865         target_brk = brk_val;
866         return target_brk;
867     }
868 
869     /* Release heap if necessary */
870     if (new_brk < old_brk) {
871         target_munmap(new_brk, old_brk - new_brk);
872 
873         target_brk = brk_val;
874         return target_brk;
875     }
876 
877     mapped_addr = target_mmap(old_brk, new_brk - old_brk,
878                               PROT_READ | PROT_WRITE,
879                               MAP_FIXED_NOREPLACE | MAP_ANON | MAP_PRIVATE,
880                               -1, 0);
881 
882     if (mapped_addr == old_brk) {
883         target_brk = brk_val;
884         return target_brk;
885     }
886 
887 #if defined(TARGET_ALPHA)
888     /* We (partially) emulate OSF/1 on Alpha, which requires we
889        return a proper errno, not an unchanged brk value.  */
890     return -TARGET_ENOMEM;
891 #endif
892     /* For everything else, return the previous break. */
893     return target_brk;
894 }
895 
896 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
897     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
898 static inline abi_long copy_from_user_fdset(fd_set *fds,
899                                             abi_ulong target_fds_addr,
900                                             int n)
901 {
902     int i, nw, j, k;
903     abi_ulong b, *target_fds;
904 
905     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
906     if (!(target_fds = lock_user(VERIFY_READ,
907                                  target_fds_addr,
908                                  sizeof(abi_ulong) * nw,
909                                  1)))
910         return -TARGET_EFAULT;
911 
912     FD_ZERO(fds);
913     k = 0;
914     for (i = 0; i < nw; i++) {
915         /* grab the abi_ulong */
916         __get_user(b, &target_fds[i]);
917         for (j = 0; j < TARGET_ABI_BITS; j++) {
918             /* check the bit inside the abi_ulong */
919             if ((b >> j) & 1)
920                 FD_SET(k, fds);
921             k++;
922         }
923     }
924 
925     unlock_user(target_fds, target_fds_addr, 0);
926 
927     return 0;
928 }
929 
930 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
931                                                  abi_ulong target_fds_addr,
932                                                  int n)
933 {
934     if (target_fds_addr) {
935         if (copy_from_user_fdset(fds, target_fds_addr, n))
936             return -TARGET_EFAULT;
937         *fds_ptr = fds;
938     } else {
939         *fds_ptr = NULL;
940     }
941     return 0;
942 }
943 
944 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
945                                           const fd_set *fds,
946                                           int n)
947 {
948     int i, nw, j, k;
949     abi_long v;
950     abi_ulong *target_fds;
951 
952     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
953     if (!(target_fds = lock_user(VERIFY_WRITE,
954                                  target_fds_addr,
955                                  sizeof(abi_ulong) * nw,
956                                  0)))
957         return -TARGET_EFAULT;
958 
959     k = 0;
960     for (i = 0; i < nw; i++) {
961         v = 0;
962         for (j = 0; j < TARGET_ABI_BITS; j++) {
963             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
964             k++;
965         }
966         __put_user(v, &target_fds[i]);
967     }
968 
969     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
970 
971     return 0;
972 }
973 #endif
974 
975 #if defined(__alpha__)
976 #define HOST_HZ 1024
977 #else
978 #define HOST_HZ 100
979 #endif
980 
981 static inline abi_long host_to_target_clock_t(long ticks)
982 {
983 #if HOST_HZ == TARGET_HZ
984     return ticks;
985 #else
986     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
987 #endif
988 }
989 
990 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
991                                              const struct rusage *rusage)
992 {
993     struct target_rusage *target_rusage;
994 
995     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
996         return -TARGET_EFAULT;
997     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
998     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
999     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1000     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1001     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1002     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1003     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1004     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1005     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1006     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1007     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1008     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1009     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1010     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1011     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1012     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1013     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1014     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1015     unlock_user_struct(target_rusage, target_addr, 1);
1016 
1017     return 0;
1018 }
1019 
1020 #ifdef TARGET_NR_setrlimit
1021 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1022 {
1023     abi_ulong target_rlim_swap;
1024     rlim_t result;
1025 
1026     target_rlim_swap = tswapal(target_rlim);
1027     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1028         return RLIM_INFINITY;
1029 
1030     result = target_rlim_swap;
1031     if (target_rlim_swap != (rlim_t)result)
1032         return RLIM_INFINITY;
1033 
1034     return result;
1035 }
1036 #endif
1037 
1038 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1039 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1040 {
1041     abi_ulong target_rlim_swap;
1042     abi_ulong result;
1043 
1044     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1045         target_rlim_swap = TARGET_RLIM_INFINITY;
1046     else
1047         target_rlim_swap = rlim;
1048     result = tswapal(target_rlim_swap);
1049 
1050     return result;
1051 }
1052 #endif
1053 
1054 static inline int target_to_host_resource(int code)
1055 {
1056     switch (code) {
1057     case TARGET_RLIMIT_AS:
1058         return RLIMIT_AS;
1059     case TARGET_RLIMIT_CORE:
1060         return RLIMIT_CORE;
1061     case TARGET_RLIMIT_CPU:
1062         return RLIMIT_CPU;
1063     case TARGET_RLIMIT_DATA:
1064         return RLIMIT_DATA;
1065     case TARGET_RLIMIT_FSIZE:
1066         return RLIMIT_FSIZE;
1067     case TARGET_RLIMIT_LOCKS:
1068         return RLIMIT_LOCKS;
1069     case TARGET_RLIMIT_MEMLOCK:
1070         return RLIMIT_MEMLOCK;
1071     case TARGET_RLIMIT_MSGQUEUE:
1072         return RLIMIT_MSGQUEUE;
1073     case TARGET_RLIMIT_NICE:
1074         return RLIMIT_NICE;
1075     case TARGET_RLIMIT_NOFILE:
1076         return RLIMIT_NOFILE;
1077     case TARGET_RLIMIT_NPROC:
1078         return RLIMIT_NPROC;
1079     case TARGET_RLIMIT_RSS:
1080         return RLIMIT_RSS;
1081     case TARGET_RLIMIT_RTPRIO:
1082         return RLIMIT_RTPRIO;
1083 #ifdef RLIMIT_RTTIME
1084     case TARGET_RLIMIT_RTTIME:
1085         return RLIMIT_RTTIME;
1086 #endif
1087     case TARGET_RLIMIT_SIGPENDING:
1088         return RLIMIT_SIGPENDING;
1089     case TARGET_RLIMIT_STACK:
1090         return RLIMIT_STACK;
1091     default:
1092         return code;
1093     }
1094 }
1095 
1096 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1097                                               abi_ulong target_tv_addr)
1098 {
1099     struct target_timeval *target_tv;
1100 
1101     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1102         return -TARGET_EFAULT;
1103     }
1104 
1105     __get_user(tv->tv_sec, &target_tv->tv_sec);
1106     __get_user(tv->tv_usec, &target_tv->tv_usec);
1107 
1108     unlock_user_struct(target_tv, target_tv_addr, 0);
1109 
1110     return 0;
1111 }
1112 
1113 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1114                                             const struct timeval *tv)
1115 {
1116     struct target_timeval *target_tv;
1117 
1118     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1119         return -TARGET_EFAULT;
1120     }
1121 
1122     __put_user(tv->tv_sec, &target_tv->tv_sec);
1123     __put_user(tv->tv_usec, &target_tv->tv_usec);
1124 
1125     unlock_user_struct(target_tv, target_tv_addr, 1);
1126 
1127     return 0;
1128 }
1129 
1130 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1131 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1132                                                 abi_ulong target_tv_addr)
1133 {
1134     struct target__kernel_sock_timeval *target_tv;
1135 
1136     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1137         return -TARGET_EFAULT;
1138     }
1139 
1140     __get_user(tv->tv_sec, &target_tv->tv_sec);
1141     __get_user(tv->tv_usec, &target_tv->tv_usec);
1142 
1143     unlock_user_struct(target_tv, target_tv_addr, 0);
1144 
1145     return 0;
1146 }
1147 #endif
1148 
1149 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1150                                               const struct timeval *tv)
1151 {
1152     struct target__kernel_sock_timeval *target_tv;
1153 
1154     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1155         return -TARGET_EFAULT;
1156     }
1157 
1158     __put_user(tv->tv_sec, &target_tv->tv_sec);
1159     __put_user(tv->tv_usec, &target_tv->tv_usec);
1160 
1161     unlock_user_struct(target_tv, target_tv_addr, 1);
1162 
1163     return 0;
1164 }
1165 
1166 #if defined(TARGET_NR_futex) || \
1167     defined(TARGET_NR_rt_sigtimedwait) || \
1168     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1169     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1170     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1171     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1172     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1173     defined(TARGET_NR_timer_settime) || \
1174     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1175 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1176                                                abi_ulong target_addr)
1177 {
1178     struct target_timespec *target_ts;
1179 
1180     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1181         return -TARGET_EFAULT;
1182     }
1183     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1184     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1185     unlock_user_struct(target_ts, target_addr, 0);
1186     return 0;
1187 }
1188 #endif
1189 
1190 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1191     defined(TARGET_NR_timer_settime64) || \
1192     defined(TARGET_NR_mq_timedsend_time64) || \
1193     defined(TARGET_NR_mq_timedreceive_time64) || \
1194     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1195     defined(TARGET_NR_clock_nanosleep_time64) || \
1196     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1197     defined(TARGET_NR_utimensat) || \
1198     defined(TARGET_NR_utimensat_time64) || \
1199     defined(TARGET_NR_semtimedop_time64) || \
1200     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1201 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1202                                                  abi_ulong target_addr)
1203 {
1204     struct target__kernel_timespec *target_ts;
1205 
1206     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1207         return -TARGET_EFAULT;
1208     }
1209     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1210     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1211     /* in 32bit mode, this drops the padding */
1212     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1213     unlock_user_struct(target_ts, target_addr, 0);
1214     return 0;
1215 }
1216 #endif
1217 
1218 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1219                                                struct timespec *host_ts)
1220 {
1221     struct target_timespec *target_ts;
1222 
1223     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1224         return -TARGET_EFAULT;
1225     }
1226     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1227     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1228     unlock_user_struct(target_ts, target_addr, 1);
1229     return 0;
1230 }
1231 
1232 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1233                                                  struct timespec *host_ts)
1234 {
1235     struct target__kernel_timespec *target_ts;
1236 
1237     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1238         return -TARGET_EFAULT;
1239     }
1240     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1241     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1242     unlock_user_struct(target_ts, target_addr, 1);
1243     return 0;
1244 }
1245 
1246 #if defined(TARGET_NR_gettimeofday)
1247 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1248                                              struct timezone *tz)
1249 {
1250     struct target_timezone *target_tz;
1251 
1252     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1253         return -TARGET_EFAULT;
1254     }
1255 
1256     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1257     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1258 
1259     unlock_user_struct(target_tz, target_tz_addr, 1);
1260 
1261     return 0;
1262 }
1263 #endif
1264 
1265 #if defined(TARGET_NR_settimeofday)
1266 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1267                                                abi_ulong target_tz_addr)
1268 {
1269     struct target_timezone *target_tz;
1270 
1271     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1272         return -TARGET_EFAULT;
1273     }
1274 
1275     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1276     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1277 
1278     unlock_user_struct(target_tz, target_tz_addr, 0);
1279 
1280     return 0;
1281 }
1282 #endif
1283 
1284 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1285 #include <mqueue.h>
1286 
1287 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1288                                               abi_ulong target_mq_attr_addr)
1289 {
1290     struct target_mq_attr *target_mq_attr;
1291 
1292     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1293                           target_mq_attr_addr, 1))
1294         return -TARGET_EFAULT;
1295 
1296     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1297     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1298     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1299     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1300 
1301     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1302 
1303     return 0;
1304 }
1305 
1306 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1307                                             const struct mq_attr *attr)
1308 {
1309     struct target_mq_attr *target_mq_attr;
1310 
1311     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1312                           target_mq_attr_addr, 0))
1313         return -TARGET_EFAULT;
1314 
1315     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1316     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1317     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1318     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1319 
1320     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1321 
1322     return 0;
1323 }
1324 #endif
1325 
1326 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1327 /* do_select() must return target values and target errnos. */
1328 static abi_long do_select(int n,
1329                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1330                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1331 {
1332     fd_set rfds, wfds, efds;
1333     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1334     struct timeval tv;
1335     struct timespec ts, *ts_ptr;
1336     abi_long ret;
1337 
1338     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1339     if (ret) {
1340         return ret;
1341     }
1342     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1343     if (ret) {
1344         return ret;
1345     }
1346     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1347     if (ret) {
1348         return ret;
1349     }
1350 
1351     if (target_tv_addr) {
1352         if (copy_from_user_timeval(&tv, target_tv_addr))
1353             return -TARGET_EFAULT;
1354         ts.tv_sec = tv.tv_sec;
1355         ts.tv_nsec = tv.tv_usec * 1000;
1356         ts_ptr = &ts;
1357     } else {
1358         ts_ptr = NULL;
1359     }
1360 
1361     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1362                                   ts_ptr, NULL));
1363 
1364     if (!is_error(ret)) {
1365         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1366             return -TARGET_EFAULT;
1367         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1368             return -TARGET_EFAULT;
1369         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1370             return -TARGET_EFAULT;
1371 
1372         if (target_tv_addr) {
1373             tv.tv_sec = ts.tv_sec;
1374             tv.tv_usec = ts.tv_nsec / 1000;
1375             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1376                 return -TARGET_EFAULT;
1377             }
1378         }
1379     }
1380 
1381     return ret;
1382 }
1383 
1384 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1385 static abi_long do_old_select(abi_ulong arg1)
1386 {
1387     struct target_sel_arg_struct *sel;
1388     abi_ulong inp, outp, exp, tvp;
1389     long nsel;
1390 
1391     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1392         return -TARGET_EFAULT;
1393     }
1394 
1395     nsel = tswapal(sel->n);
1396     inp = tswapal(sel->inp);
1397     outp = tswapal(sel->outp);
1398     exp = tswapal(sel->exp);
1399     tvp = tswapal(sel->tvp);
1400 
1401     unlock_user_struct(sel, arg1, 0);
1402 
1403     return do_select(nsel, inp, outp, exp, tvp);
1404 }
1405 #endif
1406 #endif
1407 
1408 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1409 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1410                             abi_long arg4, abi_long arg5, abi_long arg6,
1411                             bool time64)
1412 {
1413     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1414     fd_set rfds, wfds, efds;
1415     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1416     struct timespec ts, *ts_ptr;
1417     abi_long ret;
1418 
1419     /*
1420      * The 6th arg is actually two args smashed together,
1421      * so we cannot use the C library.
1422      */
1423     struct {
1424         sigset_t *set;
1425         size_t size;
1426     } sig, *sig_ptr;
1427 
1428     abi_ulong arg_sigset, arg_sigsize, *arg7;
1429 
1430     n = arg1;
1431     rfd_addr = arg2;
1432     wfd_addr = arg3;
1433     efd_addr = arg4;
1434     ts_addr = arg5;
1435 
1436     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1437     if (ret) {
1438         return ret;
1439     }
1440     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1441     if (ret) {
1442         return ret;
1443     }
1444     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1445     if (ret) {
1446         return ret;
1447     }
1448 
1449     /*
1450      * This takes a timespec, and not a timeval, so we cannot
1451      * use the do_select() helper ...
1452      */
1453     if (ts_addr) {
1454         if (time64) {
1455             if (target_to_host_timespec64(&ts, ts_addr)) {
1456                 return -TARGET_EFAULT;
1457             }
1458         } else {
1459             if (target_to_host_timespec(&ts, ts_addr)) {
1460                 return -TARGET_EFAULT;
1461             }
1462         }
1463             ts_ptr = &ts;
1464     } else {
1465         ts_ptr = NULL;
1466     }
1467 
1468     /* Extract the two packed args for the sigset */
1469     sig_ptr = NULL;
1470     if (arg6) {
1471         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1472         if (!arg7) {
1473             return -TARGET_EFAULT;
1474         }
1475         arg_sigset = tswapal(arg7[0]);
1476         arg_sigsize = tswapal(arg7[1]);
1477         unlock_user(arg7, arg6, 0);
1478 
1479         if (arg_sigset) {
1480             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1481             if (ret != 0) {
1482                 return ret;
1483             }
1484             sig_ptr = &sig;
1485             sig.size = SIGSET_T_SIZE;
1486         }
1487     }
1488 
1489     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1490                                   ts_ptr, sig_ptr));
1491 
1492     if (sig_ptr) {
1493         finish_sigsuspend_mask(ret);
1494     }
1495 
1496     if (!is_error(ret)) {
1497         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1498             return -TARGET_EFAULT;
1499         }
1500         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1501             return -TARGET_EFAULT;
1502         }
1503         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1504             return -TARGET_EFAULT;
1505         }
1506         if (time64) {
1507             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1508                 return -TARGET_EFAULT;
1509             }
1510         } else {
1511             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1512                 return -TARGET_EFAULT;
1513             }
1514         }
1515     }
1516     return ret;
1517 }
1518 #endif
1519 
1520 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1521     defined(TARGET_NR_ppoll_time64)
1522 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1523                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1524 {
1525     struct target_pollfd *target_pfd;
1526     unsigned int nfds = arg2;
1527     struct pollfd *pfd;
1528     unsigned int i;
1529     abi_long ret;
1530 
1531     pfd = NULL;
1532     target_pfd = NULL;
1533     if (nfds) {
1534         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1535             return -TARGET_EINVAL;
1536         }
1537         target_pfd = lock_user(VERIFY_WRITE, arg1,
1538                                sizeof(struct target_pollfd) * nfds, 1);
1539         if (!target_pfd) {
1540             return -TARGET_EFAULT;
1541         }
1542 
1543         pfd = alloca(sizeof(struct pollfd) * nfds);
1544         for (i = 0; i < nfds; i++) {
1545             pfd[i].fd = tswap32(target_pfd[i].fd);
1546             pfd[i].events = tswap16(target_pfd[i].events);
1547         }
1548     }
1549     if (ppoll) {
1550         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1551         sigset_t *set = NULL;
1552 
1553         if (arg3) {
1554             if (time64) {
1555                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1556                     unlock_user(target_pfd, arg1, 0);
1557                     return -TARGET_EFAULT;
1558                 }
1559             } else {
1560                 if (target_to_host_timespec(timeout_ts, arg3)) {
1561                     unlock_user(target_pfd, arg1, 0);
1562                     return -TARGET_EFAULT;
1563                 }
1564             }
1565         } else {
1566             timeout_ts = NULL;
1567         }
1568 
1569         if (arg4) {
1570             ret = process_sigsuspend_mask(&set, arg4, arg5);
1571             if (ret != 0) {
1572                 unlock_user(target_pfd, arg1, 0);
1573                 return ret;
1574             }
1575         }
1576 
1577         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1578                                    set, SIGSET_T_SIZE));
1579 
1580         if (set) {
1581             finish_sigsuspend_mask(ret);
1582         }
1583         if (!is_error(ret) && arg3) {
1584             if (time64) {
1585                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1586                     return -TARGET_EFAULT;
1587                 }
1588             } else {
1589                 if (host_to_target_timespec(arg3, timeout_ts)) {
1590                     return -TARGET_EFAULT;
1591                 }
1592             }
1593         }
1594     } else {
1595           struct timespec ts, *pts;
1596 
1597           if (arg3 >= 0) {
1598               /* Convert ms to secs, ns */
1599               ts.tv_sec = arg3 / 1000;
1600               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1601               pts = &ts;
1602           } else {
1603               /* -ve poll() timeout means "infinite" */
1604               pts = NULL;
1605           }
1606           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1607     }
1608 
1609     if (!is_error(ret)) {
1610         for (i = 0; i < nfds; i++) {
1611             target_pfd[i].revents = tswap16(pfd[i].revents);
1612         }
1613     }
1614     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1615     return ret;
1616 }
1617 #endif
1618 
1619 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1620                         int flags, int is_pipe2)
1621 {
1622     int host_pipe[2];
1623     abi_long ret;
1624     ret = pipe2(host_pipe, flags);
1625 
1626     if (is_error(ret))
1627         return get_errno(ret);
1628 
1629     /* Several targets have special calling conventions for the original
1630        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1631     if (!is_pipe2) {
1632 #if defined(TARGET_ALPHA)
1633         cpu_env->ir[IR_A4] = host_pipe[1];
1634         return host_pipe[0];
1635 #elif defined(TARGET_MIPS)
1636         cpu_env->active_tc.gpr[3] = host_pipe[1];
1637         return host_pipe[0];
1638 #elif defined(TARGET_SH4)
1639         cpu_env->gregs[1] = host_pipe[1];
1640         return host_pipe[0];
1641 #elif defined(TARGET_SPARC)
1642         cpu_env->regwptr[1] = host_pipe[1];
1643         return host_pipe[0];
1644 #endif
1645     }
1646 
1647     if (put_user_s32(host_pipe[0], pipedes)
1648         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1649         return -TARGET_EFAULT;
1650     return get_errno(ret);
1651 }
1652 
1653 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1654                                                abi_ulong target_addr,
1655                                                socklen_t len)
1656 {
1657     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1658     sa_family_t sa_family;
1659     struct target_sockaddr *target_saddr;
1660 
1661     if (fd_trans_target_to_host_addr(fd)) {
1662         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1663     }
1664 
1665     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1666     if (!target_saddr)
1667         return -TARGET_EFAULT;
1668 
1669     sa_family = tswap16(target_saddr->sa_family);
1670 
1671     /* Oops. The caller might send a incomplete sun_path; sun_path
1672      * must be terminated by \0 (see the manual page), but
1673      * unfortunately it is quite common to specify sockaddr_un
1674      * length as "strlen(x->sun_path)" while it should be
1675      * "strlen(...) + 1". We'll fix that here if needed.
1676      * Linux kernel has a similar feature.
1677      */
1678 
1679     if (sa_family == AF_UNIX) {
1680         if (len < unix_maxlen && len > 0) {
1681             char *cp = (char*)target_saddr;
1682 
1683             if ( cp[len-1] && !cp[len] )
1684                 len++;
1685         }
1686         if (len > unix_maxlen)
1687             len = unix_maxlen;
1688     }
1689 
1690     memcpy(addr, target_saddr, len);
1691     addr->sa_family = sa_family;
1692     if (sa_family == AF_NETLINK) {
1693         struct sockaddr_nl *nladdr;
1694 
1695         nladdr = (struct sockaddr_nl *)addr;
1696         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1697         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1698     } else if (sa_family == AF_PACKET) {
1699 	struct target_sockaddr_ll *lladdr;
1700 
1701 	lladdr = (struct target_sockaddr_ll *)addr;
1702 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1703 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1704     } else if (sa_family == AF_INET6) {
1705         struct sockaddr_in6 *in6addr;
1706 
1707         in6addr = (struct sockaddr_in6 *)addr;
1708         in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
1709     }
1710     unlock_user(target_saddr, target_addr, 0);
1711 
1712     return 0;
1713 }
1714 
1715 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1716                                                struct sockaddr *addr,
1717                                                socklen_t len)
1718 {
1719     struct target_sockaddr *target_saddr;
1720 
1721     if (len == 0) {
1722         return 0;
1723     }
1724     assert(addr);
1725 
1726     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1727     if (!target_saddr)
1728         return -TARGET_EFAULT;
1729     memcpy(target_saddr, addr, len);
1730     if (len >= offsetof(struct target_sockaddr, sa_family) +
1731         sizeof(target_saddr->sa_family)) {
1732         target_saddr->sa_family = tswap16(addr->sa_family);
1733     }
1734     if (addr->sa_family == AF_NETLINK &&
1735         len >= sizeof(struct target_sockaddr_nl)) {
1736         struct target_sockaddr_nl *target_nl =
1737                (struct target_sockaddr_nl *)target_saddr;
1738         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1739         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1740     } else if (addr->sa_family == AF_PACKET) {
1741         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1742         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1743         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1744     } else if (addr->sa_family == AF_INET6 &&
1745                len >= sizeof(struct target_sockaddr_in6)) {
1746         struct target_sockaddr_in6 *target_in6 =
1747                (struct target_sockaddr_in6 *)target_saddr;
1748         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1749     }
1750     unlock_user(target_saddr, target_addr, len);
1751 
1752     return 0;
1753 }
1754 
1755 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1756                                            struct target_msghdr *target_msgh)
1757 {
1758     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1759     abi_long msg_controllen;
1760     abi_ulong target_cmsg_addr;
1761     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1762     socklen_t space = 0;
1763 
1764     msg_controllen = tswapal(target_msgh->msg_controllen);
1765     if (msg_controllen < sizeof (struct target_cmsghdr))
1766         goto the_end;
1767     target_cmsg_addr = tswapal(target_msgh->msg_control);
1768     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1769     target_cmsg_start = target_cmsg;
1770     if (!target_cmsg)
1771         return -TARGET_EFAULT;
1772 
1773     while (cmsg && target_cmsg) {
1774         void *data = CMSG_DATA(cmsg);
1775         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1776 
1777         int len = tswapal(target_cmsg->cmsg_len)
1778             - sizeof(struct target_cmsghdr);
1779 
1780         space += CMSG_SPACE(len);
1781         if (space > msgh->msg_controllen) {
1782             space -= CMSG_SPACE(len);
1783             /* This is a QEMU bug, since we allocated the payload
1784              * area ourselves (unlike overflow in host-to-target
1785              * conversion, which is just the guest giving us a buffer
1786              * that's too small). It can't happen for the payload types
1787              * we currently support; if it becomes an issue in future
1788              * we would need to improve our allocation strategy to
1789              * something more intelligent than "twice the size of the
1790              * target buffer we're reading from".
1791              */
1792             qemu_log_mask(LOG_UNIMP,
1793                           ("Unsupported ancillary data %d/%d: "
1794                            "unhandled msg size\n"),
1795                           tswap32(target_cmsg->cmsg_level),
1796                           tswap32(target_cmsg->cmsg_type));
1797             break;
1798         }
1799 
1800         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1801             cmsg->cmsg_level = SOL_SOCKET;
1802         } else {
1803             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1804         }
1805         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1806         cmsg->cmsg_len = CMSG_LEN(len);
1807 
1808         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1809             int *fd = (int *)data;
1810             int *target_fd = (int *)target_data;
1811             int i, numfds = len / sizeof(int);
1812 
1813             for (i = 0; i < numfds; i++) {
1814                 __get_user(fd[i], target_fd + i);
1815             }
1816         } else if (cmsg->cmsg_level == SOL_SOCKET
1817                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1818             struct ucred *cred = (struct ucred *)data;
1819             struct target_ucred *target_cred =
1820                 (struct target_ucred *)target_data;
1821 
1822             __get_user(cred->pid, &target_cred->pid);
1823             __get_user(cred->uid, &target_cred->uid);
1824             __get_user(cred->gid, &target_cred->gid);
1825         } else if (cmsg->cmsg_level == SOL_ALG) {
1826             uint32_t *dst = (uint32_t *)data;
1827 
1828             memcpy(dst, target_data, len);
1829             /* fix endianness of first 32-bit word */
1830             if (len >= sizeof(uint32_t)) {
1831                 *dst = tswap32(*dst);
1832             }
1833         } else {
1834             qemu_log_mask(LOG_UNIMP, "Unsupported target ancillary data: %d/%d\n",
1835                           cmsg->cmsg_level, cmsg->cmsg_type);
1836             memcpy(data, target_data, len);
1837         }
1838 
1839         cmsg = CMSG_NXTHDR(msgh, cmsg);
1840         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1841                                          target_cmsg_start);
1842     }
1843     unlock_user(target_cmsg, target_cmsg_addr, 0);
1844  the_end:
1845     msgh->msg_controllen = space;
1846     return 0;
1847 }
1848 
1849 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1850                                            struct msghdr *msgh)
1851 {
1852     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1853     abi_long msg_controllen;
1854     abi_ulong target_cmsg_addr;
1855     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1856     socklen_t space = 0;
1857 
1858     msg_controllen = tswapal(target_msgh->msg_controllen);
1859     if (msg_controllen < sizeof (struct target_cmsghdr))
1860         goto the_end;
1861     target_cmsg_addr = tswapal(target_msgh->msg_control);
1862     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1863     target_cmsg_start = target_cmsg;
1864     if (!target_cmsg)
1865         return -TARGET_EFAULT;
1866 
1867     while (cmsg && target_cmsg) {
1868         void *data = CMSG_DATA(cmsg);
1869         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1870 
1871         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1872         int tgt_len, tgt_space;
1873 
1874         /* We never copy a half-header but may copy half-data;
1875          * this is Linux's behaviour in put_cmsg(). Note that
1876          * truncation here is a guest problem (which we report
1877          * to the guest via the CTRUNC bit), unlike truncation
1878          * in target_to_host_cmsg, which is a QEMU bug.
1879          */
1880         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1881             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1882             break;
1883         }
1884 
1885         if (cmsg->cmsg_level == SOL_SOCKET) {
1886             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1887         } else {
1888             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1889         }
1890         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1891 
1892         /* Payload types which need a different size of payload on
1893          * the target must adjust tgt_len here.
1894          */
1895         tgt_len = len;
1896         switch (cmsg->cmsg_level) {
1897         case SOL_SOCKET:
1898             switch (cmsg->cmsg_type) {
1899             case SO_TIMESTAMP:
1900                 tgt_len = sizeof(struct target_timeval);
1901                 break;
1902             default:
1903                 break;
1904             }
1905             break;
1906         default:
1907             break;
1908         }
1909 
1910         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1911             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1912             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1913         }
1914 
1915         /* We must now copy-and-convert len bytes of payload
1916          * into tgt_len bytes of destination space. Bear in mind
1917          * that in both source and destination we may be dealing
1918          * with a truncated value!
1919          */
1920         switch (cmsg->cmsg_level) {
1921         case SOL_SOCKET:
1922             switch (cmsg->cmsg_type) {
1923             case SCM_RIGHTS:
1924             {
1925                 int *fd = (int *)data;
1926                 int *target_fd = (int *)target_data;
1927                 int i, numfds = tgt_len / sizeof(int);
1928 
1929                 for (i = 0; i < numfds; i++) {
1930                     __put_user(fd[i], target_fd + i);
1931                 }
1932                 break;
1933             }
1934             case SO_TIMESTAMP:
1935             {
1936                 struct timeval *tv = (struct timeval *)data;
1937                 struct target_timeval *target_tv =
1938                     (struct target_timeval *)target_data;
1939 
1940                 if (len != sizeof(struct timeval) ||
1941                     tgt_len != sizeof(struct target_timeval)) {
1942                     goto unimplemented;
1943                 }
1944 
1945                 /* copy struct timeval to target */
1946                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1947                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1948                 break;
1949             }
1950             case SCM_CREDENTIALS:
1951             {
1952                 struct ucred *cred = (struct ucred *)data;
1953                 struct target_ucred *target_cred =
1954                     (struct target_ucred *)target_data;
1955 
1956                 __put_user(cred->pid, &target_cred->pid);
1957                 __put_user(cred->uid, &target_cred->uid);
1958                 __put_user(cred->gid, &target_cred->gid);
1959                 break;
1960             }
1961             default:
1962                 goto unimplemented;
1963             }
1964             break;
1965 
1966         case SOL_IP:
1967             switch (cmsg->cmsg_type) {
1968             case IP_TTL:
1969             {
1970                 uint32_t *v = (uint32_t *)data;
1971                 uint32_t *t_int = (uint32_t *)target_data;
1972 
1973                 if (len != sizeof(uint32_t) ||
1974                     tgt_len != sizeof(uint32_t)) {
1975                     goto unimplemented;
1976                 }
1977                 __put_user(*v, t_int);
1978                 break;
1979             }
1980             case IP_RECVERR:
1981             {
1982                 struct errhdr_t {
1983                    struct sock_extended_err ee;
1984                    struct sockaddr_in offender;
1985                 };
1986                 struct errhdr_t *errh = (struct errhdr_t *)data;
1987                 struct errhdr_t *target_errh =
1988                     (struct errhdr_t *)target_data;
1989 
1990                 if (len != sizeof(struct errhdr_t) ||
1991                     tgt_len != sizeof(struct errhdr_t)) {
1992                     goto unimplemented;
1993                 }
1994                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1995                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1996                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1997                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1998                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1999                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2000                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2001                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2002                     (void *) &errh->offender, sizeof(errh->offender));
2003                 break;
2004             }
2005             case IP_PKTINFO:
2006             {
2007                 struct in_pktinfo *pkti = data;
2008                 struct target_in_pktinfo *target_pi = target_data;
2009 
2010                 __put_user(pkti->ipi_ifindex, &target_pi->ipi_ifindex);
2011                 target_pi->ipi_spec_dst.s_addr = pkti->ipi_spec_dst.s_addr;
2012                 target_pi->ipi_addr.s_addr = pkti->ipi_addr.s_addr;
2013                 break;
2014             }
2015             default:
2016                 goto unimplemented;
2017             }
2018             break;
2019 
2020         case SOL_IPV6:
2021             switch (cmsg->cmsg_type) {
2022             case IPV6_HOPLIMIT:
2023             {
2024                 uint32_t *v = (uint32_t *)data;
2025                 uint32_t *t_int = (uint32_t *)target_data;
2026 
2027                 if (len != sizeof(uint32_t) ||
2028                     tgt_len != sizeof(uint32_t)) {
2029                     goto unimplemented;
2030                 }
2031                 __put_user(*v, t_int);
2032                 break;
2033             }
2034             case IPV6_RECVERR:
2035             {
2036                 struct errhdr6_t {
2037                    struct sock_extended_err ee;
2038                    struct sockaddr_in6 offender;
2039                 };
2040                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2041                 struct errhdr6_t *target_errh =
2042                     (struct errhdr6_t *)target_data;
2043 
2044                 if (len != sizeof(struct errhdr6_t) ||
2045                     tgt_len != sizeof(struct errhdr6_t)) {
2046                     goto unimplemented;
2047                 }
2048                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2049                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2050                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2051                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2052                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2053                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2054                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2055                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2056                     (void *) &errh->offender, sizeof(errh->offender));
2057                 break;
2058             }
2059             default:
2060                 goto unimplemented;
2061             }
2062             break;
2063 
2064         default:
2065         unimplemented:
2066             qemu_log_mask(LOG_UNIMP, "Unsupported host ancillary data: %d/%d\n",
2067                           cmsg->cmsg_level, cmsg->cmsg_type);
2068             memcpy(target_data, data, MIN(len, tgt_len));
2069             if (tgt_len > len) {
2070                 memset(target_data + len, 0, tgt_len - len);
2071             }
2072         }
2073 
2074         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2075         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2076         if (msg_controllen < tgt_space) {
2077             tgt_space = msg_controllen;
2078         }
2079         msg_controllen -= tgt_space;
2080         space += tgt_space;
2081         cmsg = CMSG_NXTHDR(msgh, cmsg);
2082         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2083                                          target_cmsg_start);
2084     }
2085     unlock_user(target_cmsg, target_cmsg_addr, space);
2086  the_end:
2087     target_msgh->msg_controllen = tswapal(space);
2088     return 0;
2089 }
2090 
2091 /* do_setsockopt() Must return target values and target errnos. */
2092 static abi_long do_setsockopt(int sockfd, int level, int optname,
2093                               abi_ulong optval_addr, socklen_t optlen)
2094 {
2095     abi_long ret;
2096     int val;
2097 
2098     switch(level) {
2099     case SOL_TCP:
2100     case SOL_UDP:
2101         /* TCP and UDP options all take an 'int' value.  */
2102         if (optlen < sizeof(uint32_t))
2103             return -TARGET_EINVAL;
2104 
2105         if (get_user_u32(val, optval_addr))
2106             return -TARGET_EFAULT;
2107         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2108         break;
2109     case SOL_IP:
2110         switch(optname) {
2111         case IP_TOS:
2112         case IP_TTL:
2113         case IP_HDRINCL:
2114         case IP_ROUTER_ALERT:
2115         case IP_RECVOPTS:
2116         case IP_RETOPTS:
2117         case IP_PKTINFO:
2118         case IP_MTU_DISCOVER:
2119         case IP_RECVERR:
2120         case IP_RECVTTL:
2121         case IP_RECVTOS:
2122 #ifdef IP_FREEBIND
2123         case IP_FREEBIND:
2124 #endif
2125         case IP_MULTICAST_TTL:
2126         case IP_MULTICAST_LOOP:
2127             val = 0;
2128             if (optlen >= sizeof(uint32_t)) {
2129                 if (get_user_u32(val, optval_addr))
2130                     return -TARGET_EFAULT;
2131             } else if (optlen >= 1) {
2132                 if (get_user_u8(val, optval_addr))
2133                     return -TARGET_EFAULT;
2134             }
2135             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2136             break;
2137         case IP_MULTICAST_IF:
2138         case IP_ADD_MEMBERSHIP:
2139         case IP_DROP_MEMBERSHIP:
2140         {
2141             struct ip_mreqn ip_mreq;
2142             struct target_ip_mreqn *target_smreqn;
2143             int min_size;
2144 
2145             QEMU_BUILD_BUG_ON(sizeof(struct ip_mreq) !=
2146                               sizeof(struct target_ip_mreq));
2147 
2148             if (optname == IP_MULTICAST_IF) {
2149                 min_size = sizeof(struct in_addr);
2150             } else {
2151                 min_size = sizeof(struct target_ip_mreq);
2152             }
2153             if (optlen < min_size ||
2154                 optlen > sizeof (struct target_ip_mreqn)) {
2155                 return -TARGET_EINVAL;
2156             }
2157 
2158             target_smreqn = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2159             if (!target_smreqn) {
2160                 return -TARGET_EFAULT;
2161             }
2162             ip_mreq.imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
2163             if (optlen >= sizeof(struct target_ip_mreq)) {
2164                 ip_mreq.imr_address.s_addr = target_smreqn->imr_address.s_addr;
2165                 if (optlen >= sizeof(struct target_ip_mreqn)) {
2166                     __put_user(target_smreqn->imr_ifindex, &ip_mreq.imr_ifindex);
2167                     optlen = sizeof(struct ip_mreqn);
2168                 }
2169             }
2170             unlock_user(target_smreqn, optval_addr, 0);
2171             ret = get_errno(setsockopt(sockfd, level, optname, &ip_mreq, optlen));
2172             break;
2173         }
2174         case IP_BLOCK_SOURCE:
2175         case IP_UNBLOCK_SOURCE:
2176         case IP_ADD_SOURCE_MEMBERSHIP:
2177         case IP_DROP_SOURCE_MEMBERSHIP:
2178         {
2179             struct ip_mreq_source *ip_mreq_source;
2180 
2181             if (optlen != sizeof (struct target_ip_mreq_source))
2182                 return -TARGET_EINVAL;
2183 
2184             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2185             if (!ip_mreq_source) {
2186                 return -TARGET_EFAULT;
2187             }
2188             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2189             unlock_user (ip_mreq_source, optval_addr, 0);
2190             break;
2191         }
2192         default:
2193             goto unimplemented;
2194         }
2195         break;
2196     case SOL_IPV6:
2197         switch (optname) {
2198         case IPV6_MTU_DISCOVER:
2199         case IPV6_MTU:
2200         case IPV6_V6ONLY:
2201         case IPV6_RECVPKTINFO:
2202         case IPV6_UNICAST_HOPS:
2203         case IPV6_MULTICAST_HOPS:
2204         case IPV6_MULTICAST_LOOP:
2205         case IPV6_RECVERR:
2206         case IPV6_RECVHOPLIMIT:
2207         case IPV6_2292HOPLIMIT:
2208         case IPV6_CHECKSUM:
2209         case IPV6_ADDRFORM:
2210         case IPV6_2292PKTINFO:
2211         case IPV6_RECVTCLASS:
2212         case IPV6_RECVRTHDR:
2213         case IPV6_2292RTHDR:
2214         case IPV6_RECVHOPOPTS:
2215         case IPV6_2292HOPOPTS:
2216         case IPV6_RECVDSTOPTS:
2217         case IPV6_2292DSTOPTS:
2218         case IPV6_TCLASS:
2219         case IPV6_ADDR_PREFERENCES:
2220 #ifdef IPV6_RECVPATHMTU
2221         case IPV6_RECVPATHMTU:
2222 #endif
2223 #ifdef IPV6_TRANSPARENT
2224         case IPV6_TRANSPARENT:
2225 #endif
2226 #ifdef IPV6_FREEBIND
2227         case IPV6_FREEBIND:
2228 #endif
2229 #ifdef IPV6_RECVORIGDSTADDR
2230         case IPV6_RECVORIGDSTADDR:
2231 #endif
2232             val = 0;
2233             if (optlen < sizeof(uint32_t)) {
2234                 return -TARGET_EINVAL;
2235             }
2236             if (get_user_u32(val, optval_addr)) {
2237                 return -TARGET_EFAULT;
2238             }
2239             ret = get_errno(setsockopt(sockfd, level, optname,
2240                                        &val, sizeof(val)));
2241             break;
2242         case IPV6_PKTINFO:
2243         {
2244             struct in6_pktinfo pki;
2245 
2246             if (optlen < sizeof(pki)) {
2247                 return -TARGET_EINVAL;
2248             }
2249 
2250             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2251                 return -TARGET_EFAULT;
2252             }
2253 
2254             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2255 
2256             ret = get_errno(setsockopt(sockfd, level, optname,
2257                                        &pki, sizeof(pki)));
2258             break;
2259         }
2260         case IPV6_ADD_MEMBERSHIP:
2261         case IPV6_DROP_MEMBERSHIP:
2262         {
2263             struct ipv6_mreq ipv6mreq;
2264 
2265             if (optlen < sizeof(ipv6mreq)) {
2266                 return -TARGET_EINVAL;
2267             }
2268 
2269             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2270                 return -TARGET_EFAULT;
2271             }
2272 
2273             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2274 
2275             ret = get_errno(setsockopt(sockfd, level, optname,
2276                                        &ipv6mreq, sizeof(ipv6mreq)));
2277             break;
2278         }
2279         default:
2280             goto unimplemented;
2281         }
2282         break;
2283     case SOL_ICMPV6:
2284         switch (optname) {
2285         case ICMPV6_FILTER:
2286         {
2287             struct icmp6_filter icmp6f;
2288 
2289             if (optlen > sizeof(icmp6f)) {
2290                 optlen = sizeof(icmp6f);
2291             }
2292 
2293             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2294                 return -TARGET_EFAULT;
2295             }
2296 
2297             for (val = 0; val < 8; val++) {
2298                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2299             }
2300 
2301             ret = get_errno(setsockopt(sockfd, level, optname,
2302                                        &icmp6f, optlen));
2303             break;
2304         }
2305         default:
2306             goto unimplemented;
2307         }
2308         break;
2309     case SOL_RAW:
2310         switch (optname) {
2311         case ICMP_FILTER:
2312         case IPV6_CHECKSUM:
2313             /* those take an u32 value */
2314             if (optlen < sizeof(uint32_t)) {
2315                 return -TARGET_EINVAL;
2316             }
2317 
2318             if (get_user_u32(val, optval_addr)) {
2319                 return -TARGET_EFAULT;
2320             }
2321             ret = get_errno(setsockopt(sockfd, level, optname,
2322                                        &val, sizeof(val)));
2323             break;
2324 
2325         default:
2326             goto unimplemented;
2327         }
2328         break;
2329 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2330     case SOL_ALG:
2331         switch (optname) {
2332         case ALG_SET_KEY:
2333         {
2334             char *alg_key = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2335             if (!alg_key) {
2336                 return -TARGET_EFAULT;
2337             }
2338             ret = get_errno(setsockopt(sockfd, level, optname,
2339                                        alg_key, optlen));
2340             unlock_user(alg_key, optval_addr, optlen);
2341             break;
2342         }
2343         case ALG_SET_AEAD_AUTHSIZE:
2344         {
2345             ret = get_errno(setsockopt(sockfd, level, optname,
2346                                        NULL, optlen));
2347             break;
2348         }
2349         default:
2350             goto unimplemented;
2351         }
2352         break;
2353 #endif
2354     case TARGET_SOL_SOCKET:
2355         switch (optname) {
2356         case TARGET_SO_RCVTIMEO:
2357         case TARGET_SO_SNDTIMEO:
2358         {
2359                 struct timeval tv;
2360 
2361                 if (optlen != sizeof(struct target_timeval)) {
2362                     return -TARGET_EINVAL;
2363                 }
2364 
2365                 if (copy_from_user_timeval(&tv, optval_addr)) {
2366                     return -TARGET_EFAULT;
2367                 }
2368 
2369                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2370                                 optname == TARGET_SO_RCVTIMEO ?
2371                                     SO_RCVTIMEO : SO_SNDTIMEO,
2372                                 &tv, sizeof(tv)));
2373                 return ret;
2374         }
2375         case TARGET_SO_ATTACH_FILTER:
2376         {
2377                 struct target_sock_fprog *tfprog;
2378                 struct target_sock_filter *tfilter;
2379                 struct sock_fprog fprog;
2380                 struct sock_filter *filter;
2381                 int i;
2382 
2383                 if (optlen != sizeof(*tfprog)) {
2384                     return -TARGET_EINVAL;
2385                 }
2386                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2387                     return -TARGET_EFAULT;
2388                 }
2389                 if (!lock_user_struct(VERIFY_READ, tfilter,
2390                                       tswapal(tfprog->filter), 0)) {
2391                     unlock_user_struct(tfprog, optval_addr, 1);
2392                     return -TARGET_EFAULT;
2393                 }
2394 
2395                 fprog.len = tswap16(tfprog->len);
2396                 filter = g_try_new(struct sock_filter, fprog.len);
2397                 if (filter == NULL) {
2398                     unlock_user_struct(tfilter, tfprog->filter, 1);
2399                     unlock_user_struct(tfprog, optval_addr, 1);
2400                     return -TARGET_ENOMEM;
2401                 }
2402                 for (i = 0; i < fprog.len; i++) {
2403                     filter[i].code = tswap16(tfilter[i].code);
2404                     filter[i].jt = tfilter[i].jt;
2405                     filter[i].jf = tfilter[i].jf;
2406                     filter[i].k = tswap32(tfilter[i].k);
2407                 }
2408                 fprog.filter = filter;
2409 
2410                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2411                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2412                 g_free(filter);
2413 
2414                 unlock_user_struct(tfilter, tfprog->filter, 1);
2415                 unlock_user_struct(tfprog, optval_addr, 1);
2416                 return ret;
2417         }
2418 	case TARGET_SO_BINDTODEVICE:
2419 	{
2420 		char *dev_ifname, *addr_ifname;
2421 
2422 		if (optlen > IFNAMSIZ - 1) {
2423 		    optlen = IFNAMSIZ - 1;
2424 		}
2425 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2426 		if (!dev_ifname) {
2427 		    return -TARGET_EFAULT;
2428 		}
2429 		optname = SO_BINDTODEVICE;
2430 		addr_ifname = alloca(IFNAMSIZ);
2431 		memcpy(addr_ifname, dev_ifname, optlen);
2432 		addr_ifname[optlen] = 0;
2433 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2434                                            addr_ifname, optlen));
2435 		unlock_user (dev_ifname, optval_addr, 0);
2436 		return ret;
2437 	}
2438         case TARGET_SO_LINGER:
2439         {
2440                 struct linger lg;
2441                 struct target_linger *tlg;
2442 
2443                 if (optlen != sizeof(struct target_linger)) {
2444                     return -TARGET_EINVAL;
2445                 }
2446                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2447                     return -TARGET_EFAULT;
2448                 }
2449                 __get_user(lg.l_onoff, &tlg->l_onoff);
2450                 __get_user(lg.l_linger, &tlg->l_linger);
2451                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2452                                 &lg, sizeof(lg)));
2453                 unlock_user_struct(tlg, optval_addr, 0);
2454                 return ret;
2455         }
2456             /* Options with 'int' argument.  */
2457         case TARGET_SO_DEBUG:
2458 		optname = SO_DEBUG;
2459 		break;
2460         case TARGET_SO_REUSEADDR:
2461 		optname = SO_REUSEADDR;
2462 		break;
2463 #ifdef SO_REUSEPORT
2464         case TARGET_SO_REUSEPORT:
2465                 optname = SO_REUSEPORT;
2466                 break;
2467 #endif
2468         case TARGET_SO_TYPE:
2469 		optname = SO_TYPE;
2470 		break;
2471         case TARGET_SO_ERROR:
2472 		optname = SO_ERROR;
2473 		break;
2474         case TARGET_SO_DONTROUTE:
2475 		optname = SO_DONTROUTE;
2476 		break;
2477         case TARGET_SO_BROADCAST:
2478 		optname = SO_BROADCAST;
2479 		break;
2480         case TARGET_SO_SNDBUF:
2481 		optname = SO_SNDBUF;
2482 		break;
2483         case TARGET_SO_SNDBUFFORCE:
2484                 optname = SO_SNDBUFFORCE;
2485                 break;
2486         case TARGET_SO_RCVBUF:
2487 		optname = SO_RCVBUF;
2488 		break;
2489         case TARGET_SO_RCVBUFFORCE:
2490                 optname = SO_RCVBUFFORCE;
2491                 break;
2492         case TARGET_SO_KEEPALIVE:
2493 		optname = SO_KEEPALIVE;
2494 		break;
2495         case TARGET_SO_OOBINLINE:
2496 		optname = SO_OOBINLINE;
2497 		break;
2498         case TARGET_SO_NO_CHECK:
2499 		optname = SO_NO_CHECK;
2500 		break;
2501         case TARGET_SO_PRIORITY:
2502 		optname = SO_PRIORITY;
2503 		break;
2504 #ifdef SO_BSDCOMPAT
2505         case TARGET_SO_BSDCOMPAT:
2506 		optname = SO_BSDCOMPAT;
2507 		break;
2508 #endif
2509         case TARGET_SO_PASSCRED:
2510 		optname = SO_PASSCRED;
2511 		break;
2512         case TARGET_SO_PASSSEC:
2513                 optname = SO_PASSSEC;
2514                 break;
2515         case TARGET_SO_TIMESTAMP:
2516 		optname = SO_TIMESTAMP;
2517 		break;
2518         case TARGET_SO_RCVLOWAT:
2519 		optname = SO_RCVLOWAT;
2520 		break;
2521         default:
2522             goto unimplemented;
2523         }
2524 	if (optlen < sizeof(uint32_t))
2525             return -TARGET_EINVAL;
2526 
2527 	if (get_user_u32(val, optval_addr))
2528             return -TARGET_EFAULT;
2529 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2530         break;
2531 #ifdef SOL_NETLINK
2532     case SOL_NETLINK:
2533         switch (optname) {
2534         case NETLINK_PKTINFO:
2535         case NETLINK_ADD_MEMBERSHIP:
2536         case NETLINK_DROP_MEMBERSHIP:
2537         case NETLINK_BROADCAST_ERROR:
2538         case NETLINK_NO_ENOBUFS:
2539 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2540         case NETLINK_LISTEN_ALL_NSID:
2541         case NETLINK_CAP_ACK:
2542 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2543 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2544         case NETLINK_EXT_ACK:
2545 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2546 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2547         case NETLINK_GET_STRICT_CHK:
2548 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2549             break;
2550         default:
2551             goto unimplemented;
2552         }
2553         val = 0;
2554         if (optlen < sizeof(uint32_t)) {
2555             return -TARGET_EINVAL;
2556         }
2557         if (get_user_u32(val, optval_addr)) {
2558             return -TARGET_EFAULT;
2559         }
2560         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2561                                    sizeof(val)));
2562         break;
2563 #endif /* SOL_NETLINK */
2564     default:
2565     unimplemented:
2566         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2567                       level, optname);
2568         ret = -TARGET_ENOPROTOOPT;
2569     }
2570     return ret;
2571 }
2572 
2573 /* do_getsockopt() Must return target values and target errnos. */
2574 static abi_long do_getsockopt(int sockfd, int level, int optname,
2575                               abi_ulong optval_addr, abi_ulong optlen)
2576 {
2577     abi_long ret;
2578     int len, val;
2579     socklen_t lv;
2580 
2581     switch(level) {
2582     case TARGET_SOL_SOCKET:
2583         level = SOL_SOCKET;
2584         switch (optname) {
2585         /* These don't just return a single integer */
2586         case TARGET_SO_PEERNAME:
2587             goto unimplemented;
2588         case TARGET_SO_RCVTIMEO: {
2589             struct timeval tv;
2590             socklen_t tvlen;
2591 
2592             optname = SO_RCVTIMEO;
2593 
2594 get_timeout:
2595             if (get_user_u32(len, optlen)) {
2596                 return -TARGET_EFAULT;
2597             }
2598             if (len < 0) {
2599                 return -TARGET_EINVAL;
2600             }
2601 
2602             tvlen = sizeof(tv);
2603             ret = get_errno(getsockopt(sockfd, level, optname,
2604                                        &tv, &tvlen));
2605             if (ret < 0) {
2606                 return ret;
2607             }
2608             if (len > sizeof(struct target_timeval)) {
2609                 len = sizeof(struct target_timeval);
2610             }
2611             if (copy_to_user_timeval(optval_addr, &tv)) {
2612                 return -TARGET_EFAULT;
2613             }
2614             if (put_user_u32(len, optlen)) {
2615                 return -TARGET_EFAULT;
2616             }
2617             break;
2618         }
2619         case TARGET_SO_SNDTIMEO:
2620             optname = SO_SNDTIMEO;
2621             goto get_timeout;
2622         case TARGET_SO_PEERCRED: {
2623             struct ucred cr;
2624             socklen_t crlen;
2625             struct target_ucred *tcr;
2626 
2627             if (get_user_u32(len, optlen)) {
2628                 return -TARGET_EFAULT;
2629             }
2630             if (len < 0) {
2631                 return -TARGET_EINVAL;
2632             }
2633 
2634             crlen = sizeof(cr);
2635             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2636                                        &cr, &crlen));
2637             if (ret < 0) {
2638                 return ret;
2639             }
2640             if (len > crlen) {
2641                 len = crlen;
2642             }
2643             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2644                 return -TARGET_EFAULT;
2645             }
2646             __put_user(cr.pid, &tcr->pid);
2647             __put_user(cr.uid, &tcr->uid);
2648             __put_user(cr.gid, &tcr->gid);
2649             unlock_user_struct(tcr, optval_addr, 1);
2650             if (put_user_u32(len, optlen)) {
2651                 return -TARGET_EFAULT;
2652             }
2653             break;
2654         }
2655         case TARGET_SO_PEERSEC: {
2656             char *name;
2657 
2658             if (get_user_u32(len, optlen)) {
2659                 return -TARGET_EFAULT;
2660             }
2661             if (len < 0) {
2662                 return -TARGET_EINVAL;
2663             }
2664             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2665             if (!name) {
2666                 return -TARGET_EFAULT;
2667             }
2668             lv = len;
2669             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2670                                        name, &lv));
2671             if (put_user_u32(lv, optlen)) {
2672                 ret = -TARGET_EFAULT;
2673             }
2674             unlock_user(name, optval_addr, lv);
2675             break;
2676         }
2677         case TARGET_SO_LINGER:
2678         {
2679             struct linger lg;
2680             socklen_t lglen;
2681             struct target_linger *tlg;
2682 
2683             if (get_user_u32(len, optlen)) {
2684                 return -TARGET_EFAULT;
2685             }
2686             if (len < 0) {
2687                 return -TARGET_EINVAL;
2688             }
2689 
2690             lglen = sizeof(lg);
2691             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2692                                        &lg, &lglen));
2693             if (ret < 0) {
2694                 return ret;
2695             }
2696             if (len > lglen) {
2697                 len = lglen;
2698             }
2699             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2700                 return -TARGET_EFAULT;
2701             }
2702             __put_user(lg.l_onoff, &tlg->l_onoff);
2703             __put_user(lg.l_linger, &tlg->l_linger);
2704             unlock_user_struct(tlg, optval_addr, 1);
2705             if (put_user_u32(len, optlen)) {
2706                 return -TARGET_EFAULT;
2707             }
2708             break;
2709         }
2710         /* Options with 'int' argument.  */
2711         case TARGET_SO_DEBUG:
2712             optname = SO_DEBUG;
2713             goto int_case;
2714         case TARGET_SO_REUSEADDR:
2715             optname = SO_REUSEADDR;
2716             goto int_case;
2717 #ifdef SO_REUSEPORT
2718         case TARGET_SO_REUSEPORT:
2719             optname = SO_REUSEPORT;
2720             goto int_case;
2721 #endif
2722         case TARGET_SO_TYPE:
2723             optname = SO_TYPE;
2724             goto int_case;
2725         case TARGET_SO_ERROR:
2726             optname = SO_ERROR;
2727             goto int_case;
2728         case TARGET_SO_DONTROUTE:
2729             optname = SO_DONTROUTE;
2730             goto int_case;
2731         case TARGET_SO_BROADCAST:
2732             optname = SO_BROADCAST;
2733             goto int_case;
2734         case TARGET_SO_SNDBUF:
2735             optname = SO_SNDBUF;
2736             goto int_case;
2737         case TARGET_SO_RCVBUF:
2738             optname = SO_RCVBUF;
2739             goto int_case;
2740         case TARGET_SO_KEEPALIVE:
2741             optname = SO_KEEPALIVE;
2742             goto int_case;
2743         case TARGET_SO_OOBINLINE:
2744             optname = SO_OOBINLINE;
2745             goto int_case;
2746         case TARGET_SO_NO_CHECK:
2747             optname = SO_NO_CHECK;
2748             goto int_case;
2749         case TARGET_SO_PRIORITY:
2750             optname = SO_PRIORITY;
2751             goto int_case;
2752 #ifdef SO_BSDCOMPAT
2753         case TARGET_SO_BSDCOMPAT:
2754             optname = SO_BSDCOMPAT;
2755             goto int_case;
2756 #endif
2757         case TARGET_SO_PASSCRED:
2758             optname = SO_PASSCRED;
2759             goto int_case;
2760         case TARGET_SO_TIMESTAMP:
2761             optname = SO_TIMESTAMP;
2762             goto int_case;
2763         case TARGET_SO_RCVLOWAT:
2764             optname = SO_RCVLOWAT;
2765             goto int_case;
2766         case TARGET_SO_ACCEPTCONN:
2767             optname = SO_ACCEPTCONN;
2768             goto int_case;
2769         case TARGET_SO_PROTOCOL:
2770             optname = SO_PROTOCOL;
2771             goto int_case;
2772         case TARGET_SO_DOMAIN:
2773             optname = SO_DOMAIN;
2774             goto int_case;
2775         default:
2776             goto int_case;
2777         }
2778         break;
2779     case SOL_TCP:
2780     case SOL_UDP:
2781         /* TCP and UDP options all take an 'int' value.  */
2782     int_case:
2783         if (get_user_u32(len, optlen))
2784             return -TARGET_EFAULT;
2785         if (len < 0)
2786             return -TARGET_EINVAL;
2787         lv = sizeof(lv);
2788         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2789         if (ret < 0)
2790             return ret;
2791         switch (optname) {
2792         case SO_TYPE:
2793             val = host_to_target_sock_type(val);
2794             break;
2795         case SO_ERROR:
2796             val = host_to_target_errno(val);
2797             break;
2798         }
2799         if (len > lv)
2800             len = lv;
2801         if (len == 4) {
2802             if (put_user_u32(val, optval_addr))
2803                 return -TARGET_EFAULT;
2804         } else {
2805             if (put_user_u8(val, optval_addr))
2806                 return -TARGET_EFAULT;
2807         }
2808         if (put_user_u32(len, optlen))
2809             return -TARGET_EFAULT;
2810         break;
2811     case SOL_IP:
2812         switch(optname) {
2813         case IP_TOS:
2814         case IP_TTL:
2815         case IP_HDRINCL:
2816         case IP_ROUTER_ALERT:
2817         case IP_RECVOPTS:
2818         case IP_RETOPTS:
2819         case IP_PKTINFO:
2820         case IP_MTU_DISCOVER:
2821         case IP_RECVERR:
2822         case IP_RECVTOS:
2823 #ifdef IP_FREEBIND
2824         case IP_FREEBIND:
2825 #endif
2826         case IP_MULTICAST_TTL:
2827         case IP_MULTICAST_LOOP:
2828             if (get_user_u32(len, optlen))
2829                 return -TARGET_EFAULT;
2830             if (len < 0)
2831                 return -TARGET_EINVAL;
2832             lv = sizeof(lv);
2833             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2834             if (ret < 0)
2835                 return ret;
2836             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2837                 len = 1;
2838                 if (put_user_u32(len, optlen)
2839                     || put_user_u8(val, optval_addr))
2840                     return -TARGET_EFAULT;
2841             } else {
2842                 if (len > sizeof(int))
2843                     len = sizeof(int);
2844                 if (put_user_u32(len, optlen)
2845                     || put_user_u32(val, optval_addr))
2846                     return -TARGET_EFAULT;
2847             }
2848             break;
2849         default:
2850             ret = -TARGET_ENOPROTOOPT;
2851             break;
2852         }
2853         break;
2854     case SOL_IPV6:
2855         switch (optname) {
2856         case IPV6_MTU_DISCOVER:
2857         case IPV6_MTU:
2858         case IPV6_V6ONLY:
2859         case IPV6_RECVPKTINFO:
2860         case IPV6_UNICAST_HOPS:
2861         case IPV6_MULTICAST_HOPS:
2862         case IPV6_MULTICAST_LOOP:
2863         case IPV6_RECVERR:
2864         case IPV6_RECVHOPLIMIT:
2865         case IPV6_2292HOPLIMIT:
2866         case IPV6_CHECKSUM:
2867         case IPV6_ADDRFORM:
2868         case IPV6_2292PKTINFO:
2869         case IPV6_RECVTCLASS:
2870         case IPV6_RECVRTHDR:
2871         case IPV6_2292RTHDR:
2872         case IPV6_RECVHOPOPTS:
2873         case IPV6_2292HOPOPTS:
2874         case IPV6_RECVDSTOPTS:
2875         case IPV6_2292DSTOPTS:
2876         case IPV6_TCLASS:
2877         case IPV6_ADDR_PREFERENCES:
2878 #ifdef IPV6_RECVPATHMTU
2879         case IPV6_RECVPATHMTU:
2880 #endif
2881 #ifdef IPV6_TRANSPARENT
2882         case IPV6_TRANSPARENT:
2883 #endif
2884 #ifdef IPV6_FREEBIND
2885         case IPV6_FREEBIND:
2886 #endif
2887 #ifdef IPV6_RECVORIGDSTADDR
2888         case IPV6_RECVORIGDSTADDR:
2889 #endif
2890             if (get_user_u32(len, optlen))
2891                 return -TARGET_EFAULT;
2892             if (len < 0)
2893                 return -TARGET_EINVAL;
2894             lv = sizeof(lv);
2895             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2896             if (ret < 0)
2897                 return ret;
2898             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2899                 len = 1;
2900                 if (put_user_u32(len, optlen)
2901                     || put_user_u8(val, optval_addr))
2902                     return -TARGET_EFAULT;
2903             } else {
2904                 if (len > sizeof(int))
2905                     len = sizeof(int);
2906                 if (put_user_u32(len, optlen)
2907                     || put_user_u32(val, optval_addr))
2908                     return -TARGET_EFAULT;
2909             }
2910             break;
2911         default:
2912             ret = -TARGET_ENOPROTOOPT;
2913             break;
2914         }
2915         break;
2916 #ifdef SOL_NETLINK
2917     case SOL_NETLINK:
2918         switch (optname) {
2919         case NETLINK_PKTINFO:
2920         case NETLINK_BROADCAST_ERROR:
2921         case NETLINK_NO_ENOBUFS:
2922 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2923         case NETLINK_LISTEN_ALL_NSID:
2924         case NETLINK_CAP_ACK:
2925 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2926 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2927         case NETLINK_EXT_ACK:
2928 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2929 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2930         case NETLINK_GET_STRICT_CHK:
2931 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2932             if (get_user_u32(len, optlen)) {
2933                 return -TARGET_EFAULT;
2934             }
2935             if (len != sizeof(val)) {
2936                 return -TARGET_EINVAL;
2937             }
2938             lv = len;
2939             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2940             if (ret < 0) {
2941                 return ret;
2942             }
2943             if (put_user_u32(lv, optlen)
2944                 || put_user_u32(val, optval_addr)) {
2945                 return -TARGET_EFAULT;
2946             }
2947             break;
2948 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2949         case NETLINK_LIST_MEMBERSHIPS:
2950         {
2951             uint32_t *results;
2952             int i;
2953             if (get_user_u32(len, optlen)) {
2954                 return -TARGET_EFAULT;
2955             }
2956             if (len < 0) {
2957                 return -TARGET_EINVAL;
2958             }
2959             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2960             if (!results && len > 0) {
2961                 return -TARGET_EFAULT;
2962             }
2963             lv = len;
2964             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2965             if (ret < 0) {
2966                 unlock_user(results, optval_addr, 0);
2967                 return ret;
2968             }
2969             /* swap host endianness to target endianness. */
2970             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2971                 results[i] = tswap32(results[i]);
2972             }
2973             if (put_user_u32(lv, optlen)) {
2974                 return -TARGET_EFAULT;
2975             }
2976             unlock_user(results, optval_addr, 0);
2977             break;
2978         }
2979 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2980         default:
2981             goto unimplemented;
2982         }
2983         break;
2984 #endif /* SOL_NETLINK */
2985     default:
2986     unimplemented:
2987         qemu_log_mask(LOG_UNIMP,
2988                       "getsockopt level=%d optname=%d not yet supported\n",
2989                       level, optname);
2990         ret = -TARGET_EOPNOTSUPP;
2991         break;
2992     }
2993     return ret;
2994 }
2995 
2996 /* Convert target low/high pair representing file offset into the host
2997  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2998  * as the kernel doesn't handle them either.
2999  */
3000 static void target_to_host_low_high(abi_ulong tlow,
3001                                     abi_ulong thigh,
3002                                     unsigned long *hlow,
3003                                     unsigned long *hhigh)
3004 {
3005     uint64_t off = tlow |
3006         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3007         TARGET_LONG_BITS / 2;
3008 
3009     *hlow = off;
3010     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3011 }
3012 
3013 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3014                                 abi_ulong count, int copy)
3015 {
3016     struct target_iovec *target_vec;
3017     struct iovec *vec;
3018     abi_ulong total_len, max_len;
3019     int i;
3020     int err = 0;
3021     bool bad_address = false;
3022 
3023     if (count == 0) {
3024         errno = 0;
3025         return NULL;
3026     }
3027     if (count > IOV_MAX) {
3028         errno = EINVAL;
3029         return NULL;
3030     }
3031 
3032     vec = g_try_new0(struct iovec, count);
3033     if (vec == NULL) {
3034         errno = ENOMEM;
3035         return NULL;
3036     }
3037 
3038     target_vec = lock_user(VERIFY_READ, target_addr,
3039                            count * sizeof(struct target_iovec), 1);
3040     if (target_vec == NULL) {
3041         err = EFAULT;
3042         goto fail2;
3043     }
3044 
3045     /* ??? If host page size > target page size, this will result in a
3046        value larger than what we can actually support.  */
3047     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3048     total_len = 0;
3049 
3050     for (i = 0; i < count; i++) {
3051         abi_ulong base = tswapal(target_vec[i].iov_base);
3052         abi_long len = tswapal(target_vec[i].iov_len);
3053 
3054         if (len < 0) {
3055             err = EINVAL;
3056             goto fail;
3057         } else if (len == 0) {
3058             /* Zero length pointer is ignored.  */
3059             vec[i].iov_base = 0;
3060         } else {
3061             vec[i].iov_base = lock_user(type, base, len, copy);
3062             /* If the first buffer pointer is bad, this is a fault.  But
3063              * subsequent bad buffers will result in a partial write; this
3064              * is realized by filling the vector with null pointers and
3065              * zero lengths. */
3066             if (!vec[i].iov_base) {
3067                 if (i == 0) {
3068                     err = EFAULT;
3069                     goto fail;
3070                 } else {
3071                     bad_address = true;
3072                 }
3073             }
3074             if (bad_address) {
3075                 len = 0;
3076             }
3077             if (len > max_len - total_len) {
3078                 len = max_len - total_len;
3079             }
3080         }
3081         vec[i].iov_len = len;
3082         total_len += len;
3083     }
3084 
3085     unlock_user(target_vec, target_addr, 0);
3086     return vec;
3087 
3088  fail:
3089     while (--i >= 0) {
3090         if (tswapal(target_vec[i].iov_len) > 0) {
3091             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3092         }
3093     }
3094     unlock_user(target_vec, target_addr, 0);
3095  fail2:
3096     g_free(vec);
3097     errno = err;
3098     return NULL;
3099 }
3100 
3101 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3102                          abi_ulong count, int copy)
3103 {
3104     struct target_iovec *target_vec;
3105     int i;
3106 
3107     target_vec = lock_user(VERIFY_READ, target_addr,
3108                            count * sizeof(struct target_iovec), 1);
3109     if (target_vec) {
3110         for (i = 0; i < count; i++) {
3111             abi_ulong base = tswapal(target_vec[i].iov_base);
3112             abi_long len = tswapal(target_vec[i].iov_len);
3113             if (len < 0) {
3114                 break;
3115             }
3116             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3117         }
3118         unlock_user(target_vec, target_addr, 0);
3119     }
3120 
3121     g_free(vec);
3122 }
3123 
3124 static inline int target_to_host_sock_type(int *type)
3125 {
3126     int host_type = 0;
3127     int target_type = *type;
3128 
3129     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3130     case TARGET_SOCK_DGRAM:
3131         host_type = SOCK_DGRAM;
3132         break;
3133     case TARGET_SOCK_STREAM:
3134         host_type = SOCK_STREAM;
3135         break;
3136     default:
3137         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3138         break;
3139     }
3140     if (target_type & TARGET_SOCK_CLOEXEC) {
3141 #if defined(SOCK_CLOEXEC)
3142         host_type |= SOCK_CLOEXEC;
3143 #else
3144         return -TARGET_EINVAL;
3145 #endif
3146     }
3147     if (target_type & TARGET_SOCK_NONBLOCK) {
3148 #if defined(SOCK_NONBLOCK)
3149         host_type |= SOCK_NONBLOCK;
3150 #elif !defined(O_NONBLOCK)
3151         return -TARGET_EINVAL;
3152 #endif
3153     }
3154     *type = host_type;
3155     return 0;
3156 }
3157 
3158 /* Try to emulate socket type flags after socket creation.  */
3159 static int sock_flags_fixup(int fd, int target_type)
3160 {
3161 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3162     if (target_type & TARGET_SOCK_NONBLOCK) {
3163         int flags = fcntl(fd, F_GETFL);
3164         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3165             close(fd);
3166             return -TARGET_EINVAL;
3167         }
3168     }
3169 #endif
3170     return fd;
3171 }
3172 
3173 /* do_socket() Must return target values and target errnos. */
3174 static abi_long do_socket(int domain, int type, int protocol)
3175 {
3176     int target_type = type;
3177     int ret;
3178 
3179     ret = target_to_host_sock_type(&type);
3180     if (ret) {
3181         return ret;
3182     }
3183 
3184     if (domain == PF_NETLINK && !(
3185 #ifdef CONFIG_RTNETLINK
3186          protocol == NETLINK_ROUTE ||
3187 #endif
3188          protocol == NETLINK_KOBJECT_UEVENT ||
3189          protocol == NETLINK_AUDIT)) {
3190         return -TARGET_EPROTONOSUPPORT;
3191     }
3192 
3193     if (domain == AF_PACKET ||
3194         (domain == AF_INET && type == SOCK_PACKET)) {
3195         protocol = tswap16(protocol);
3196     }
3197 
3198     ret = get_errno(socket(domain, type, protocol));
3199     if (ret >= 0) {
3200         ret = sock_flags_fixup(ret, target_type);
3201         if (type == SOCK_PACKET) {
3202             /* Manage an obsolete case :
3203              * if socket type is SOCK_PACKET, bind by name
3204              */
3205             fd_trans_register(ret, &target_packet_trans);
3206         } else if (domain == PF_NETLINK) {
3207             switch (protocol) {
3208 #ifdef CONFIG_RTNETLINK
3209             case NETLINK_ROUTE:
3210                 fd_trans_register(ret, &target_netlink_route_trans);
3211                 break;
3212 #endif
3213             case NETLINK_KOBJECT_UEVENT:
3214                 /* nothing to do: messages are strings */
3215                 break;
3216             case NETLINK_AUDIT:
3217                 fd_trans_register(ret, &target_netlink_audit_trans);
3218                 break;
3219             default:
3220                 g_assert_not_reached();
3221             }
3222         }
3223     }
3224     return ret;
3225 }
3226 
3227 /* do_bind() Must return target values and target errnos. */
3228 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3229                         socklen_t addrlen)
3230 {
3231     void *addr;
3232     abi_long ret;
3233 
3234     if ((int)addrlen < 0) {
3235         return -TARGET_EINVAL;
3236     }
3237 
3238     addr = alloca(addrlen+1);
3239 
3240     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3241     if (ret)
3242         return ret;
3243 
3244     return get_errno(bind(sockfd, addr, addrlen));
3245 }
3246 
3247 /* do_connect() Must return target values and target errnos. */
3248 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3249                            socklen_t addrlen)
3250 {
3251     void *addr;
3252     abi_long ret;
3253 
3254     if ((int)addrlen < 0) {
3255         return -TARGET_EINVAL;
3256     }
3257 
3258     addr = alloca(addrlen+1);
3259 
3260     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3261     if (ret)
3262         return ret;
3263 
3264     return get_errno(safe_connect(sockfd, addr, addrlen));
3265 }
3266 
3267 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3268 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3269                                       int flags, int send)
3270 {
3271     abi_long ret, len;
3272     struct msghdr msg;
3273     abi_ulong count;
3274     struct iovec *vec;
3275     abi_ulong target_vec;
3276 
3277     if (msgp->msg_name) {
3278         msg.msg_namelen = tswap32(msgp->msg_namelen);
3279         msg.msg_name = alloca(msg.msg_namelen+1);
3280         ret = target_to_host_sockaddr(fd, msg.msg_name,
3281                                       tswapal(msgp->msg_name),
3282                                       msg.msg_namelen);
3283         if (ret == -TARGET_EFAULT) {
3284             /* For connected sockets msg_name and msg_namelen must
3285              * be ignored, so returning EFAULT immediately is wrong.
3286              * Instead, pass a bad msg_name to the host kernel, and
3287              * let it decide whether to return EFAULT or not.
3288              */
3289             msg.msg_name = (void *)-1;
3290         } else if (ret) {
3291             goto out2;
3292         }
3293     } else {
3294         msg.msg_name = NULL;
3295         msg.msg_namelen = 0;
3296     }
3297     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3298     msg.msg_control = alloca(msg.msg_controllen);
3299     memset(msg.msg_control, 0, msg.msg_controllen);
3300 
3301     msg.msg_flags = tswap32(msgp->msg_flags);
3302 
3303     count = tswapal(msgp->msg_iovlen);
3304     target_vec = tswapal(msgp->msg_iov);
3305 
3306     if (count > IOV_MAX) {
3307         /* sendrcvmsg returns a different errno for this condition than
3308          * readv/writev, so we must catch it here before lock_iovec() does.
3309          */
3310         ret = -TARGET_EMSGSIZE;
3311         goto out2;
3312     }
3313 
3314     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3315                      target_vec, count, send);
3316     if (vec == NULL) {
3317         ret = -host_to_target_errno(errno);
3318         /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3319         if (!send || ret) {
3320             goto out2;
3321         }
3322     }
3323     msg.msg_iovlen = count;
3324     msg.msg_iov = vec;
3325 
3326     if (send) {
3327         if (fd_trans_target_to_host_data(fd)) {
3328             void *host_msg;
3329 
3330             host_msg = g_malloc(msg.msg_iov->iov_len);
3331             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3332             ret = fd_trans_target_to_host_data(fd)(host_msg,
3333                                                    msg.msg_iov->iov_len);
3334             if (ret >= 0) {
3335                 msg.msg_iov->iov_base = host_msg;
3336                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3337             }
3338             g_free(host_msg);
3339         } else {
3340             ret = target_to_host_cmsg(&msg, msgp);
3341             if (ret == 0) {
3342                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3343             }
3344         }
3345     } else {
3346         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3347         if (!is_error(ret)) {
3348             len = ret;
3349             if (fd_trans_host_to_target_data(fd)) {
3350                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3351                                                MIN(msg.msg_iov->iov_len, len));
3352             }
3353             if (!is_error(ret)) {
3354                 ret = host_to_target_cmsg(msgp, &msg);
3355             }
3356             if (!is_error(ret)) {
3357                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3358                 msgp->msg_flags = tswap32(msg.msg_flags);
3359                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3360                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3361                                     msg.msg_name, msg.msg_namelen);
3362                     if (ret) {
3363                         goto out;
3364                     }
3365                 }
3366 
3367                 ret = len;
3368             }
3369         }
3370     }
3371 
3372 out:
3373     if (vec) {
3374         unlock_iovec(vec, target_vec, count, !send);
3375     }
3376 out2:
3377     return ret;
3378 }
3379 
3380 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3381                                int flags, int send)
3382 {
3383     abi_long ret;
3384     struct target_msghdr *msgp;
3385 
3386     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3387                           msgp,
3388                           target_msg,
3389                           send ? 1 : 0)) {
3390         return -TARGET_EFAULT;
3391     }
3392     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3393     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3394     return ret;
3395 }
3396 
3397 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3398  * so it might not have this *mmsg-specific flag either.
3399  */
3400 #ifndef MSG_WAITFORONE
3401 #define MSG_WAITFORONE 0x10000
3402 #endif
3403 
3404 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3405                                 unsigned int vlen, unsigned int flags,
3406                                 int send)
3407 {
3408     struct target_mmsghdr *mmsgp;
3409     abi_long ret = 0;
3410     int i;
3411 
3412     if (vlen > UIO_MAXIOV) {
3413         vlen = UIO_MAXIOV;
3414     }
3415 
3416     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3417     if (!mmsgp) {
3418         return -TARGET_EFAULT;
3419     }
3420 
3421     for (i = 0; i < vlen; i++) {
3422         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3423         if (is_error(ret)) {
3424             break;
3425         }
3426         mmsgp[i].msg_len = tswap32(ret);
3427         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3428         if (flags & MSG_WAITFORONE) {
3429             flags |= MSG_DONTWAIT;
3430         }
3431     }
3432 
3433     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3434 
3435     /* Return number of datagrams sent if we sent any at all;
3436      * otherwise return the error.
3437      */
3438     if (i) {
3439         return i;
3440     }
3441     return ret;
3442 }
3443 
3444 /* do_accept4() Must return target values and target errnos. */
3445 static abi_long do_accept4(int fd, abi_ulong target_addr,
3446                            abi_ulong target_addrlen_addr, int flags)
3447 {
3448     socklen_t addrlen, ret_addrlen;
3449     void *addr;
3450     abi_long ret;
3451     int host_flags;
3452 
3453     if (flags & ~(TARGET_SOCK_CLOEXEC | TARGET_SOCK_NONBLOCK)) {
3454         return -TARGET_EINVAL;
3455     }
3456 
3457     host_flags = 0;
3458     if (flags & TARGET_SOCK_NONBLOCK) {
3459         host_flags |= SOCK_NONBLOCK;
3460     }
3461     if (flags & TARGET_SOCK_CLOEXEC) {
3462         host_flags |= SOCK_CLOEXEC;
3463     }
3464 
3465     if (target_addr == 0) {
3466         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3467     }
3468 
3469     /* linux returns EFAULT if addrlen pointer is invalid */
3470     if (get_user_u32(addrlen, target_addrlen_addr))
3471         return -TARGET_EFAULT;
3472 
3473     if ((int)addrlen < 0) {
3474         return -TARGET_EINVAL;
3475     }
3476 
3477     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3478         return -TARGET_EFAULT;
3479     }
3480 
3481     addr = alloca(addrlen);
3482 
3483     ret_addrlen = addrlen;
3484     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3485     if (!is_error(ret)) {
3486         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3487         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3488             ret = -TARGET_EFAULT;
3489         }
3490     }
3491     return ret;
3492 }
3493 
3494 /* do_getpeername() Must return target values and target errnos. */
3495 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3496                                abi_ulong target_addrlen_addr)
3497 {
3498     socklen_t addrlen, ret_addrlen;
3499     void *addr;
3500     abi_long ret;
3501 
3502     if (get_user_u32(addrlen, target_addrlen_addr))
3503         return -TARGET_EFAULT;
3504 
3505     if ((int)addrlen < 0) {
3506         return -TARGET_EINVAL;
3507     }
3508 
3509     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3510         return -TARGET_EFAULT;
3511     }
3512 
3513     addr = alloca(addrlen);
3514 
3515     ret_addrlen = addrlen;
3516     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3517     if (!is_error(ret)) {
3518         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3519         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3520             ret = -TARGET_EFAULT;
3521         }
3522     }
3523     return ret;
3524 }
3525 
3526 /* do_getsockname() Must return target values and target errnos. */
3527 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3528                                abi_ulong target_addrlen_addr)
3529 {
3530     socklen_t addrlen, ret_addrlen;
3531     void *addr;
3532     abi_long ret;
3533 
3534     if (get_user_u32(addrlen, target_addrlen_addr))
3535         return -TARGET_EFAULT;
3536 
3537     if ((int)addrlen < 0) {
3538         return -TARGET_EINVAL;
3539     }
3540 
3541     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3542         return -TARGET_EFAULT;
3543     }
3544 
3545     addr = alloca(addrlen);
3546 
3547     ret_addrlen = addrlen;
3548     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3549     if (!is_error(ret)) {
3550         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3551         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3552             ret = -TARGET_EFAULT;
3553         }
3554     }
3555     return ret;
3556 }
3557 
3558 /* do_socketpair() Must return target values and target errnos. */
3559 static abi_long do_socketpair(int domain, int type, int protocol,
3560                               abi_ulong target_tab_addr)
3561 {
3562     int tab[2];
3563     abi_long ret;
3564 
3565     target_to_host_sock_type(&type);
3566 
3567     ret = get_errno(socketpair(domain, type, protocol, tab));
3568     if (!is_error(ret)) {
3569         if (put_user_s32(tab[0], target_tab_addr)
3570             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3571             ret = -TARGET_EFAULT;
3572     }
3573     return ret;
3574 }
3575 
3576 /* do_sendto() Must return target values and target errnos. */
3577 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3578                           abi_ulong target_addr, socklen_t addrlen)
3579 {
3580     void *addr;
3581     void *host_msg;
3582     void *copy_msg = NULL;
3583     abi_long ret;
3584 
3585     if ((int)addrlen < 0) {
3586         return -TARGET_EINVAL;
3587     }
3588 
3589     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3590     if (!host_msg)
3591         return -TARGET_EFAULT;
3592     if (fd_trans_target_to_host_data(fd)) {
3593         copy_msg = host_msg;
3594         host_msg = g_malloc(len);
3595         memcpy(host_msg, copy_msg, len);
3596         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3597         if (ret < 0) {
3598             goto fail;
3599         }
3600     }
3601     if (target_addr) {
3602         addr = alloca(addrlen+1);
3603         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3604         if (ret) {
3605             goto fail;
3606         }
3607         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3608     } else {
3609         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3610     }
3611 fail:
3612     if (copy_msg) {
3613         g_free(host_msg);
3614         host_msg = copy_msg;
3615     }
3616     unlock_user(host_msg, msg, 0);
3617     return ret;
3618 }
3619 
3620 /* do_recvfrom() Must return target values and target errnos. */
3621 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3622                             abi_ulong target_addr,
3623                             abi_ulong target_addrlen)
3624 {
3625     socklen_t addrlen, ret_addrlen;
3626     void *addr;
3627     void *host_msg;
3628     abi_long ret;
3629 
3630     if (!msg) {
3631         host_msg = NULL;
3632     } else {
3633         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3634         if (!host_msg) {
3635             return -TARGET_EFAULT;
3636         }
3637     }
3638     if (target_addr) {
3639         if (get_user_u32(addrlen, target_addrlen)) {
3640             ret = -TARGET_EFAULT;
3641             goto fail;
3642         }
3643         if ((int)addrlen < 0) {
3644             ret = -TARGET_EINVAL;
3645             goto fail;
3646         }
3647         addr = alloca(addrlen);
3648         ret_addrlen = addrlen;
3649         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3650                                       addr, &ret_addrlen));
3651     } else {
3652         addr = NULL; /* To keep compiler quiet.  */
3653         addrlen = 0; /* To keep compiler quiet.  */
3654         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3655     }
3656     if (!is_error(ret)) {
3657         if (fd_trans_host_to_target_data(fd)) {
3658             abi_long trans;
3659             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3660             if (is_error(trans)) {
3661                 ret = trans;
3662                 goto fail;
3663             }
3664         }
3665         if (target_addr) {
3666             host_to_target_sockaddr(target_addr, addr,
3667                                     MIN(addrlen, ret_addrlen));
3668             if (put_user_u32(ret_addrlen, target_addrlen)) {
3669                 ret = -TARGET_EFAULT;
3670                 goto fail;
3671             }
3672         }
3673         unlock_user(host_msg, msg, len);
3674     } else {
3675 fail:
3676         unlock_user(host_msg, msg, 0);
3677     }
3678     return ret;
3679 }
3680 
3681 #ifdef TARGET_NR_socketcall
3682 /* do_socketcall() must return target values and target errnos. */
3683 static abi_long do_socketcall(int num, abi_ulong vptr)
3684 {
3685     static const unsigned nargs[] = { /* number of arguments per operation */
3686         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3687         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3688         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3689         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3690         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3691         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3692         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3693         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3694         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3695         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3696         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3697         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3698         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3699         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3700         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3701         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3702         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3703         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3704         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3705         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3706     };
3707     abi_long a[6]; /* max 6 args */
3708     unsigned i;
3709 
3710     /* check the range of the first argument num */
3711     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3712     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3713         return -TARGET_EINVAL;
3714     }
3715     /* ensure we have space for args */
3716     if (nargs[num] > ARRAY_SIZE(a)) {
3717         return -TARGET_EINVAL;
3718     }
3719     /* collect the arguments in a[] according to nargs[] */
3720     for (i = 0; i < nargs[num]; ++i) {
3721         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3722             return -TARGET_EFAULT;
3723         }
3724     }
3725     /* now when we have the args, invoke the appropriate underlying function */
3726     switch (num) {
3727     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3728         return do_socket(a[0], a[1], a[2]);
3729     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3730         return do_bind(a[0], a[1], a[2]);
3731     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3732         return do_connect(a[0], a[1], a[2]);
3733     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3734         return get_errno(listen(a[0], a[1]));
3735     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3736         return do_accept4(a[0], a[1], a[2], 0);
3737     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3738         return do_getsockname(a[0], a[1], a[2]);
3739     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3740         return do_getpeername(a[0], a[1], a[2]);
3741     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3742         return do_socketpair(a[0], a[1], a[2], a[3]);
3743     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3744         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3745     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3746         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3747     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3748         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3749     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3750         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3751     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3752         return get_errno(shutdown(a[0], a[1]));
3753     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3754         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3755     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3756         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3757     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3758         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3759     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3760         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3761     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3762         return do_accept4(a[0], a[1], a[2], a[3]);
3763     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3764         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3765     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3766         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3767     default:
3768         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3769         return -TARGET_EINVAL;
3770     }
3771 }
3772 #endif
3773 
3774 #ifndef TARGET_SEMID64_DS
3775 /* asm-generic version of this struct */
3776 struct target_semid64_ds
3777 {
3778   struct target_ipc_perm sem_perm;
3779   abi_ulong sem_otime;
3780 #if TARGET_ABI_BITS == 32
3781   abi_ulong __unused1;
3782 #endif
3783   abi_ulong sem_ctime;
3784 #if TARGET_ABI_BITS == 32
3785   abi_ulong __unused2;
3786 #endif
3787   abi_ulong sem_nsems;
3788   abi_ulong __unused3;
3789   abi_ulong __unused4;
3790 };
3791 #endif
3792 
3793 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3794                                                abi_ulong target_addr)
3795 {
3796     struct target_ipc_perm *target_ip;
3797     struct target_semid64_ds *target_sd;
3798 
3799     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3800         return -TARGET_EFAULT;
3801     target_ip = &(target_sd->sem_perm);
3802     host_ip->__key = tswap32(target_ip->__key);
3803     host_ip->uid = tswap32(target_ip->uid);
3804     host_ip->gid = tswap32(target_ip->gid);
3805     host_ip->cuid = tswap32(target_ip->cuid);
3806     host_ip->cgid = tswap32(target_ip->cgid);
3807 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3808     host_ip->mode = tswap32(target_ip->mode);
3809 #else
3810     host_ip->mode = tswap16(target_ip->mode);
3811 #endif
3812 #if defined(TARGET_PPC)
3813     host_ip->__seq = tswap32(target_ip->__seq);
3814 #else
3815     host_ip->__seq = tswap16(target_ip->__seq);
3816 #endif
3817     unlock_user_struct(target_sd, target_addr, 0);
3818     return 0;
3819 }
3820 
3821 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3822                                                struct ipc_perm *host_ip)
3823 {
3824     struct target_ipc_perm *target_ip;
3825     struct target_semid64_ds *target_sd;
3826 
3827     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3828         return -TARGET_EFAULT;
3829     target_ip = &(target_sd->sem_perm);
3830     target_ip->__key = tswap32(host_ip->__key);
3831     target_ip->uid = tswap32(host_ip->uid);
3832     target_ip->gid = tswap32(host_ip->gid);
3833     target_ip->cuid = tswap32(host_ip->cuid);
3834     target_ip->cgid = tswap32(host_ip->cgid);
3835 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3836     target_ip->mode = tswap32(host_ip->mode);
3837 #else
3838     target_ip->mode = tswap16(host_ip->mode);
3839 #endif
3840 #if defined(TARGET_PPC)
3841     target_ip->__seq = tswap32(host_ip->__seq);
3842 #else
3843     target_ip->__seq = tswap16(host_ip->__seq);
3844 #endif
3845     unlock_user_struct(target_sd, target_addr, 1);
3846     return 0;
3847 }
3848 
3849 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3850                                                abi_ulong target_addr)
3851 {
3852     struct target_semid64_ds *target_sd;
3853 
3854     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3855         return -TARGET_EFAULT;
3856     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3857         return -TARGET_EFAULT;
3858     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3859     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3860     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3861     unlock_user_struct(target_sd, target_addr, 0);
3862     return 0;
3863 }
3864 
3865 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3866                                                struct semid_ds *host_sd)
3867 {
3868     struct target_semid64_ds *target_sd;
3869 
3870     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3871         return -TARGET_EFAULT;
3872     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3873         return -TARGET_EFAULT;
3874     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3875     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3876     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3877     unlock_user_struct(target_sd, target_addr, 1);
3878     return 0;
3879 }
3880 
3881 struct target_seminfo {
3882     int semmap;
3883     int semmni;
3884     int semmns;
3885     int semmnu;
3886     int semmsl;
3887     int semopm;
3888     int semume;
3889     int semusz;
3890     int semvmx;
3891     int semaem;
3892 };
3893 
3894 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3895                                               struct seminfo *host_seminfo)
3896 {
3897     struct target_seminfo *target_seminfo;
3898     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3899         return -TARGET_EFAULT;
3900     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3901     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3902     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3903     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3904     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3905     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3906     __put_user(host_seminfo->semume, &target_seminfo->semume);
3907     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3908     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3909     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3910     unlock_user_struct(target_seminfo, target_addr, 1);
3911     return 0;
3912 }
3913 
3914 union semun {
3915 	int val;
3916 	struct semid_ds *buf;
3917 	unsigned short *array;
3918 	struct seminfo *__buf;
3919 };
3920 
3921 union target_semun {
3922 	int val;
3923 	abi_ulong buf;
3924 	abi_ulong array;
3925 	abi_ulong __buf;
3926 };
3927 
3928 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3929                                                abi_ulong target_addr)
3930 {
3931     int nsems;
3932     unsigned short *array;
3933     union semun semun;
3934     struct semid_ds semid_ds;
3935     int i, ret;
3936 
3937     semun.buf = &semid_ds;
3938 
3939     ret = semctl(semid, 0, IPC_STAT, semun);
3940     if (ret == -1)
3941         return get_errno(ret);
3942 
3943     nsems = semid_ds.sem_nsems;
3944 
3945     *host_array = g_try_new(unsigned short, nsems);
3946     if (!*host_array) {
3947         return -TARGET_ENOMEM;
3948     }
3949     array = lock_user(VERIFY_READ, target_addr,
3950                       nsems*sizeof(unsigned short), 1);
3951     if (!array) {
3952         g_free(*host_array);
3953         return -TARGET_EFAULT;
3954     }
3955 
3956     for(i=0; i<nsems; i++) {
3957         __get_user((*host_array)[i], &array[i]);
3958     }
3959     unlock_user(array, target_addr, 0);
3960 
3961     return 0;
3962 }
3963 
3964 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3965                                                unsigned short **host_array)
3966 {
3967     int nsems;
3968     unsigned short *array;
3969     union semun semun;
3970     struct semid_ds semid_ds;
3971     int i, ret;
3972 
3973     semun.buf = &semid_ds;
3974 
3975     ret = semctl(semid, 0, IPC_STAT, semun);
3976     if (ret == -1)
3977         return get_errno(ret);
3978 
3979     nsems = semid_ds.sem_nsems;
3980 
3981     array = lock_user(VERIFY_WRITE, target_addr,
3982                       nsems*sizeof(unsigned short), 0);
3983     if (!array)
3984         return -TARGET_EFAULT;
3985 
3986     for(i=0; i<nsems; i++) {
3987         __put_user((*host_array)[i], &array[i]);
3988     }
3989     g_free(*host_array);
3990     unlock_user(array, target_addr, 1);
3991 
3992     return 0;
3993 }
3994 
3995 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3996                                  abi_ulong target_arg)
3997 {
3998     union target_semun target_su = { .buf = target_arg };
3999     union semun arg;
4000     struct semid_ds dsarg;
4001     unsigned short *array = NULL;
4002     struct seminfo seminfo;
4003     abi_long ret = -TARGET_EINVAL;
4004     abi_long err;
4005     cmd &= 0xff;
4006 
4007     switch( cmd ) {
4008 	case GETVAL:
4009 	case SETVAL:
4010             /* In 64 bit cross-endian situations, we will erroneously pick up
4011              * the wrong half of the union for the "val" element.  To rectify
4012              * this, the entire 8-byte structure is byteswapped, followed by
4013 	     * a swap of the 4 byte val field. In other cases, the data is
4014 	     * already in proper host byte order. */
4015 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4016 		target_su.buf = tswapal(target_su.buf);
4017 		arg.val = tswap32(target_su.val);
4018 	    } else {
4019 		arg.val = target_su.val;
4020 	    }
4021             ret = get_errno(semctl(semid, semnum, cmd, arg));
4022             break;
4023 	case GETALL:
4024 	case SETALL:
4025             err = target_to_host_semarray(semid, &array, target_su.array);
4026             if (err)
4027                 return err;
4028             arg.array = array;
4029             ret = get_errno(semctl(semid, semnum, cmd, arg));
4030             err = host_to_target_semarray(semid, target_su.array, &array);
4031             if (err)
4032                 return err;
4033             break;
4034 	case IPC_STAT:
4035 	case IPC_SET:
4036 	case SEM_STAT:
4037             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4038             if (err)
4039                 return err;
4040             arg.buf = &dsarg;
4041             ret = get_errno(semctl(semid, semnum, cmd, arg));
4042             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4043             if (err)
4044                 return err;
4045             break;
4046 	case IPC_INFO:
4047 	case SEM_INFO:
4048             arg.__buf = &seminfo;
4049             ret = get_errno(semctl(semid, semnum, cmd, arg));
4050             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4051             if (err)
4052                 return err;
4053             break;
4054 	case IPC_RMID:
4055 	case GETPID:
4056 	case GETNCNT:
4057 	case GETZCNT:
4058             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4059             break;
4060     }
4061 
4062     return ret;
4063 }
4064 
4065 struct target_sembuf {
4066     unsigned short sem_num;
4067     short sem_op;
4068     short sem_flg;
4069 };
4070 
4071 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4072                                              abi_ulong target_addr,
4073                                              unsigned nsops)
4074 {
4075     struct target_sembuf *target_sembuf;
4076     int i;
4077 
4078     target_sembuf = lock_user(VERIFY_READ, target_addr,
4079                               nsops*sizeof(struct target_sembuf), 1);
4080     if (!target_sembuf)
4081         return -TARGET_EFAULT;
4082 
4083     for(i=0; i<nsops; i++) {
4084         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4085         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4086         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4087     }
4088 
4089     unlock_user(target_sembuf, target_addr, 0);
4090 
4091     return 0;
4092 }
4093 
4094 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4095     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4096 
4097 /*
4098  * This macro is required to handle the s390 variants, which passes the
4099  * arguments in a different order than default.
4100  */
4101 #ifdef __s390x__
4102 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4103   (__nsops), (__timeout), (__sops)
4104 #else
4105 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4106   (__nsops), 0, (__sops), (__timeout)
4107 #endif
4108 
4109 static inline abi_long do_semtimedop(int semid,
4110                                      abi_long ptr,
4111                                      unsigned nsops,
4112                                      abi_long timeout, bool time64)
4113 {
4114     struct sembuf *sops;
4115     struct timespec ts, *pts = NULL;
4116     abi_long ret;
4117 
4118     if (timeout) {
4119         pts = &ts;
4120         if (time64) {
4121             if (target_to_host_timespec64(pts, timeout)) {
4122                 return -TARGET_EFAULT;
4123             }
4124         } else {
4125             if (target_to_host_timespec(pts, timeout)) {
4126                 return -TARGET_EFAULT;
4127             }
4128         }
4129     }
4130 
4131     if (nsops > TARGET_SEMOPM) {
4132         return -TARGET_E2BIG;
4133     }
4134 
4135     sops = g_new(struct sembuf, nsops);
4136 
4137     if (target_to_host_sembuf(sops, ptr, nsops)) {
4138         g_free(sops);
4139         return -TARGET_EFAULT;
4140     }
4141 
4142     ret = -TARGET_ENOSYS;
4143 #ifdef __NR_semtimedop
4144     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4145 #endif
4146 #ifdef __NR_ipc
4147     if (ret == -TARGET_ENOSYS) {
4148         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4149                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4150     }
4151 #endif
4152     g_free(sops);
4153     return ret;
4154 }
4155 #endif
4156 
4157 struct target_msqid_ds
4158 {
4159     struct target_ipc_perm msg_perm;
4160     abi_ulong msg_stime;
4161 #if TARGET_ABI_BITS == 32
4162     abi_ulong __unused1;
4163 #endif
4164     abi_ulong msg_rtime;
4165 #if TARGET_ABI_BITS == 32
4166     abi_ulong __unused2;
4167 #endif
4168     abi_ulong msg_ctime;
4169 #if TARGET_ABI_BITS == 32
4170     abi_ulong __unused3;
4171 #endif
4172     abi_ulong __msg_cbytes;
4173     abi_ulong msg_qnum;
4174     abi_ulong msg_qbytes;
4175     abi_ulong msg_lspid;
4176     abi_ulong msg_lrpid;
4177     abi_ulong __unused4;
4178     abi_ulong __unused5;
4179 };
4180 
4181 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4182                                                abi_ulong target_addr)
4183 {
4184     struct target_msqid_ds *target_md;
4185 
4186     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4187         return -TARGET_EFAULT;
4188     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4189         return -TARGET_EFAULT;
4190     host_md->msg_stime = tswapal(target_md->msg_stime);
4191     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4192     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4193     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4194     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4195     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4196     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4197     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4198     unlock_user_struct(target_md, target_addr, 0);
4199     return 0;
4200 }
4201 
4202 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4203                                                struct msqid_ds *host_md)
4204 {
4205     struct target_msqid_ds *target_md;
4206 
4207     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4208         return -TARGET_EFAULT;
4209     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4210         return -TARGET_EFAULT;
4211     target_md->msg_stime = tswapal(host_md->msg_stime);
4212     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4213     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4214     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4215     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4216     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4217     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4218     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4219     unlock_user_struct(target_md, target_addr, 1);
4220     return 0;
4221 }
4222 
4223 struct target_msginfo {
4224     int msgpool;
4225     int msgmap;
4226     int msgmax;
4227     int msgmnb;
4228     int msgmni;
4229     int msgssz;
4230     int msgtql;
4231     unsigned short int msgseg;
4232 };
4233 
4234 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4235                                               struct msginfo *host_msginfo)
4236 {
4237     struct target_msginfo *target_msginfo;
4238     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4239         return -TARGET_EFAULT;
4240     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4241     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4242     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4243     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4244     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4245     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4246     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4247     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4248     unlock_user_struct(target_msginfo, target_addr, 1);
4249     return 0;
4250 }
4251 
4252 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4253 {
4254     struct msqid_ds dsarg;
4255     struct msginfo msginfo;
4256     abi_long ret = -TARGET_EINVAL;
4257 
4258     cmd &= 0xff;
4259 
4260     switch (cmd) {
4261     case IPC_STAT:
4262     case IPC_SET:
4263     case MSG_STAT:
4264         if (target_to_host_msqid_ds(&dsarg,ptr))
4265             return -TARGET_EFAULT;
4266         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4267         if (host_to_target_msqid_ds(ptr,&dsarg))
4268             return -TARGET_EFAULT;
4269         break;
4270     case IPC_RMID:
4271         ret = get_errno(msgctl(msgid, cmd, NULL));
4272         break;
4273     case IPC_INFO:
4274     case MSG_INFO:
4275         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4276         if (host_to_target_msginfo(ptr, &msginfo))
4277             return -TARGET_EFAULT;
4278         break;
4279     }
4280 
4281     return ret;
4282 }
4283 
4284 struct target_msgbuf {
4285     abi_long mtype;
4286     char	mtext[1];
4287 };
4288 
4289 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4290                                  ssize_t msgsz, int msgflg)
4291 {
4292     struct target_msgbuf *target_mb;
4293     struct msgbuf *host_mb;
4294     abi_long ret = 0;
4295 
4296     if (msgsz < 0) {
4297         return -TARGET_EINVAL;
4298     }
4299 
4300     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4301         return -TARGET_EFAULT;
4302     host_mb = g_try_malloc(msgsz + sizeof(long));
4303     if (!host_mb) {
4304         unlock_user_struct(target_mb, msgp, 0);
4305         return -TARGET_ENOMEM;
4306     }
4307     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4308     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4309     ret = -TARGET_ENOSYS;
4310 #ifdef __NR_msgsnd
4311     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4312 #endif
4313 #ifdef __NR_ipc
4314     if (ret == -TARGET_ENOSYS) {
4315 #ifdef __s390x__
4316         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4317                                  host_mb));
4318 #else
4319         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4320                                  host_mb, 0));
4321 #endif
4322     }
4323 #endif
4324     g_free(host_mb);
4325     unlock_user_struct(target_mb, msgp, 0);
4326 
4327     return ret;
4328 }
4329 
4330 #ifdef __NR_ipc
4331 #if defined(__sparc__)
4332 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4333 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4334 #elif defined(__s390x__)
4335 /* The s390 sys_ipc variant has only five parameters.  */
4336 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4337     ((long int[]){(long int)__msgp, __msgtyp})
4338 #else
4339 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4340     ((long int[]){(long int)__msgp, __msgtyp}), 0
4341 #endif
4342 #endif
4343 
4344 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4345                                  ssize_t msgsz, abi_long msgtyp,
4346                                  int msgflg)
4347 {
4348     struct target_msgbuf *target_mb;
4349     char *target_mtext;
4350     struct msgbuf *host_mb;
4351     abi_long ret = 0;
4352 
4353     if (msgsz < 0) {
4354         return -TARGET_EINVAL;
4355     }
4356 
4357     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4358         return -TARGET_EFAULT;
4359 
4360     host_mb = g_try_malloc(msgsz + sizeof(long));
4361     if (!host_mb) {
4362         ret = -TARGET_ENOMEM;
4363         goto end;
4364     }
4365     ret = -TARGET_ENOSYS;
4366 #ifdef __NR_msgrcv
4367     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4368 #endif
4369 #ifdef __NR_ipc
4370     if (ret == -TARGET_ENOSYS) {
4371         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4372                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4373     }
4374 #endif
4375 
4376     if (ret > 0) {
4377         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4378         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4379         if (!target_mtext) {
4380             ret = -TARGET_EFAULT;
4381             goto end;
4382         }
4383         memcpy(target_mb->mtext, host_mb->mtext, ret);
4384         unlock_user(target_mtext, target_mtext_addr, ret);
4385     }
4386 
4387     target_mb->mtype = tswapal(host_mb->mtype);
4388 
4389 end:
4390     if (target_mb)
4391         unlock_user_struct(target_mb, msgp, 1);
4392     g_free(host_mb);
4393     return ret;
4394 }
4395 
4396 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4397                                                abi_ulong target_addr)
4398 {
4399     struct target_shmid_ds *target_sd;
4400 
4401     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4402         return -TARGET_EFAULT;
4403     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4404         return -TARGET_EFAULT;
4405     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4406     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4407     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4408     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4409     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4410     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4411     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4412     unlock_user_struct(target_sd, target_addr, 0);
4413     return 0;
4414 }
4415 
4416 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4417                                                struct shmid_ds *host_sd)
4418 {
4419     struct target_shmid_ds *target_sd;
4420 
4421     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4422         return -TARGET_EFAULT;
4423     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4424         return -TARGET_EFAULT;
4425     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4426     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4427     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4428     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4429     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4430     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4431     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4432     unlock_user_struct(target_sd, target_addr, 1);
4433     return 0;
4434 }
4435 
4436 struct  target_shminfo {
4437     abi_ulong shmmax;
4438     abi_ulong shmmin;
4439     abi_ulong shmmni;
4440     abi_ulong shmseg;
4441     abi_ulong shmall;
4442 };
4443 
4444 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4445                                               struct shminfo *host_shminfo)
4446 {
4447     struct target_shminfo *target_shminfo;
4448     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4449         return -TARGET_EFAULT;
4450     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4451     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4452     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4453     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4454     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4455     unlock_user_struct(target_shminfo, target_addr, 1);
4456     return 0;
4457 }
4458 
4459 struct target_shm_info {
4460     int used_ids;
4461     abi_ulong shm_tot;
4462     abi_ulong shm_rss;
4463     abi_ulong shm_swp;
4464     abi_ulong swap_attempts;
4465     abi_ulong swap_successes;
4466 };
4467 
4468 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4469                                                struct shm_info *host_shm_info)
4470 {
4471     struct target_shm_info *target_shm_info;
4472     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4473         return -TARGET_EFAULT;
4474     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4475     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4476     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4477     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4478     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4479     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4480     unlock_user_struct(target_shm_info, target_addr, 1);
4481     return 0;
4482 }
4483 
4484 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4485 {
4486     struct shmid_ds dsarg;
4487     struct shminfo shminfo;
4488     struct shm_info shm_info;
4489     abi_long ret = -TARGET_EINVAL;
4490 
4491     cmd &= 0xff;
4492 
4493     switch(cmd) {
4494     case IPC_STAT:
4495     case IPC_SET:
4496     case SHM_STAT:
4497         if (target_to_host_shmid_ds(&dsarg, buf))
4498             return -TARGET_EFAULT;
4499         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4500         if (host_to_target_shmid_ds(buf, &dsarg))
4501             return -TARGET_EFAULT;
4502         break;
4503     case IPC_INFO:
4504         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4505         if (host_to_target_shminfo(buf, &shminfo))
4506             return -TARGET_EFAULT;
4507         break;
4508     case SHM_INFO:
4509         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4510         if (host_to_target_shm_info(buf, &shm_info))
4511             return -TARGET_EFAULT;
4512         break;
4513     case IPC_RMID:
4514     case SHM_LOCK:
4515     case SHM_UNLOCK:
4516         ret = get_errno(shmctl(shmid, cmd, NULL));
4517         break;
4518     }
4519 
4520     return ret;
4521 }
4522 
4523 #ifdef TARGET_NR_ipc
4524 /* ??? This only works with linear mappings.  */
4525 /* do_ipc() must return target values and target errnos. */
4526 static abi_long do_ipc(CPUArchState *cpu_env,
4527                        unsigned int call, abi_long first,
4528                        abi_long second, abi_long third,
4529                        abi_long ptr, abi_long fifth)
4530 {
4531     int version;
4532     abi_long ret = 0;
4533 
4534     version = call >> 16;
4535     call &= 0xffff;
4536 
4537     switch (call) {
4538     case IPCOP_semop:
4539         ret = do_semtimedop(first, ptr, second, 0, false);
4540         break;
4541     case IPCOP_semtimedop:
4542     /*
4543      * The s390 sys_ipc variant has only five parameters instead of six
4544      * (as for default variant) and the only difference is the handling of
4545      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4546      * to a struct timespec where the generic variant uses fifth parameter.
4547      */
4548 #if defined(TARGET_S390X)
4549         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4550 #else
4551         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4552 #endif
4553         break;
4554 
4555     case IPCOP_semget:
4556         ret = get_errno(semget(first, second, third));
4557         break;
4558 
4559     case IPCOP_semctl: {
4560         /* The semun argument to semctl is passed by value, so dereference the
4561          * ptr argument. */
4562         abi_ulong atptr;
4563         get_user_ual(atptr, ptr);
4564         ret = do_semctl(first, second, third, atptr);
4565         break;
4566     }
4567 
4568     case IPCOP_msgget:
4569         ret = get_errno(msgget(first, second));
4570         break;
4571 
4572     case IPCOP_msgsnd:
4573         ret = do_msgsnd(first, ptr, second, third);
4574         break;
4575 
4576     case IPCOP_msgctl:
4577         ret = do_msgctl(first, second, ptr);
4578         break;
4579 
4580     case IPCOP_msgrcv:
4581         switch (version) {
4582         case 0:
4583             {
4584                 struct target_ipc_kludge {
4585                     abi_long msgp;
4586                     abi_long msgtyp;
4587                 } *tmp;
4588 
4589                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4590                     ret = -TARGET_EFAULT;
4591                     break;
4592                 }
4593 
4594                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4595 
4596                 unlock_user_struct(tmp, ptr, 0);
4597                 break;
4598             }
4599         default:
4600             ret = do_msgrcv(first, ptr, second, fifth, third);
4601         }
4602         break;
4603 
4604     case IPCOP_shmat:
4605         switch (version) {
4606         default:
4607         {
4608             abi_ulong raddr;
4609             raddr = target_shmat(cpu_env, first, ptr, second);
4610             if (is_error(raddr))
4611                 return get_errno(raddr);
4612             if (put_user_ual(raddr, third))
4613                 return -TARGET_EFAULT;
4614             break;
4615         }
4616         case 1:
4617             ret = -TARGET_EINVAL;
4618             break;
4619         }
4620 	break;
4621     case IPCOP_shmdt:
4622         ret = target_shmdt(ptr);
4623 	break;
4624 
4625     case IPCOP_shmget:
4626 	/* IPC_* flag values are the same on all linux platforms */
4627 	ret = get_errno(shmget(first, second, third));
4628 	break;
4629 
4630 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4631     case IPCOP_shmctl:
4632         ret = do_shmctl(first, second, ptr);
4633         break;
4634     default:
4635         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4636                       call, version);
4637 	ret = -TARGET_ENOSYS;
4638 	break;
4639     }
4640     return ret;
4641 }
4642 #endif
4643 
4644 /* kernel structure types definitions */
4645 
4646 #define STRUCT(name, ...) STRUCT_ ## name,
4647 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4648 enum {
4649 #include "syscall_types.h"
4650 STRUCT_MAX
4651 };
4652 #undef STRUCT
4653 #undef STRUCT_SPECIAL
4654 
4655 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4656 #define STRUCT_SPECIAL(name)
4657 #include "syscall_types.h"
4658 #undef STRUCT
4659 #undef STRUCT_SPECIAL
4660 
4661 #define MAX_STRUCT_SIZE 4096
4662 
4663 #ifdef CONFIG_FIEMAP
4664 /* So fiemap access checks don't overflow on 32 bit systems.
4665  * This is very slightly smaller than the limit imposed by
4666  * the underlying kernel.
4667  */
4668 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4669                             / sizeof(struct fiemap_extent))
4670 
4671 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4672                                        int fd, int cmd, abi_long arg)
4673 {
4674     /* The parameter for this ioctl is a struct fiemap followed
4675      * by an array of struct fiemap_extent whose size is set
4676      * in fiemap->fm_extent_count. The array is filled in by the
4677      * ioctl.
4678      */
4679     int target_size_in, target_size_out;
4680     struct fiemap *fm;
4681     const argtype *arg_type = ie->arg_type;
4682     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4683     void *argptr, *p;
4684     abi_long ret;
4685     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4686     uint32_t outbufsz;
4687     int free_fm = 0;
4688 
4689     assert(arg_type[0] == TYPE_PTR);
4690     assert(ie->access == IOC_RW);
4691     arg_type++;
4692     target_size_in = thunk_type_size(arg_type, 0);
4693     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4694     if (!argptr) {
4695         return -TARGET_EFAULT;
4696     }
4697     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4698     unlock_user(argptr, arg, 0);
4699     fm = (struct fiemap *)buf_temp;
4700     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4701         return -TARGET_EINVAL;
4702     }
4703 
4704     outbufsz = sizeof (*fm) +
4705         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4706 
4707     if (outbufsz > MAX_STRUCT_SIZE) {
4708         /* We can't fit all the extents into the fixed size buffer.
4709          * Allocate one that is large enough and use it instead.
4710          */
4711         fm = g_try_malloc(outbufsz);
4712         if (!fm) {
4713             return -TARGET_ENOMEM;
4714         }
4715         memcpy(fm, buf_temp, sizeof(struct fiemap));
4716         free_fm = 1;
4717     }
4718     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4719     if (!is_error(ret)) {
4720         target_size_out = target_size_in;
4721         /* An extent_count of 0 means we were only counting the extents
4722          * so there are no structs to copy
4723          */
4724         if (fm->fm_extent_count != 0) {
4725             target_size_out += fm->fm_mapped_extents * extent_size;
4726         }
4727         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4728         if (!argptr) {
4729             ret = -TARGET_EFAULT;
4730         } else {
4731             /* Convert the struct fiemap */
4732             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4733             if (fm->fm_extent_count != 0) {
4734                 p = argptr + target_size_in;
4735                 /* ...and then all the struct fiemap_extents */
4736                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4737                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4738                                   THUNK_TARGET);
4739                     p += extent_size;
4740                 }
4741             }
4742             unlock_user(argptr, arg, target_size_out);
4743         }
4744     }
4745     if (free_fm) {
4746         g_free(fm);
4747     }
4748     return ret;
4749 }
4750 #endif
4751 
4752 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4753                                 int fd, int cmd, abi_long arg)
4754 {
4755     const argtype *arg_type = ie->arg_type;
4756     int target_size;
4757     void *argptr;
4758     int ret;
4759     struct ifconf *host_ifconf;
4760     uint32_t outbufsz;
4761     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4762     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4763     int target_ifreq_size;
4764     int nb_ifreq;
4765     int free_buf = 0;
4766     int i;
4767     int target_ifc_len;
4768     abi_long target_ifc_buf;
4769     int host_ifc_len;
4770     char *host_ifc_buf;
4771 
4772     assert(arg_type[0] == TYPE_PTR);
4773     assert(ie->access == IOC_RW);
4774 
4775     arg_type++;
4776     target_size = thunk_type_size(arg_type, 0);
4777 
4778     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4779     if (!argptr)
4780         return -TARGET_EFAULT;
4781     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4782     unlock_user(argptr, arg, 0);
4783 
4784     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4785     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4786     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4787 
4788     if (target_ifc_buf != 0) {
4789         target_ifc_len = host_ifconf->ifc_len;
4790         nb_ifreq = target_ifc_len / target_ifreq_size;
4791         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4792 
4793         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4794         if (outbufsz > MAX_STRUCT_SIZE) {
4795             /*
4796              * We can't fit all the extents into the fixed size buffer.
4797              * Allocate one that is large enough and use it instead.
4798              */
4799             host_ifconf = g_try_malloc(outbufsz);
4800             if (!host_ifconf) {
4801                 return -TARGET_ENOMEM;
4802             }
4803             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4804             free_buf = 1;
4805         }
4806         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4807 
4808         host_ifconf->ifc_len = host_ifc_len;
4809     } else {
4810       host_ifc_buf = NULL;
4811     }
4812     host_ifconf->ifc_buf = host_ifc_buf;
4813 
4814     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4815     if (!is_error(ret)) {
4816 	/* convert host ifc_len to target ifc_len */
4817 
4818         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4819         target_ifc_len = nb_ifreq * target_ifreq_size;
4820         host_ifconf->ifc_len = target_ifc_len;
4821 
4822 	/* restore target ifc_buf */
4823 
4824         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4825 
4826 	/* copy struct ifconf to target user */
4827 
4828         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4829         if (!argptr)
4830             return -TARGET_EFAULT;
4831         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4832         unlock_user(argptr, arg, target_size);
4833 
4834         if (target_ifc_buf != 0) {
4835             /* copy ifreq[] to target user */
4836             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4837             for (i = 0; i < nb_ifreq ; i++) {
4838                 thunk_convert(argptr + i * target_ifreq_size,
4839                               host_ifc_buf + i * sizeof(struct ifreq),
4840                               ifreq_arg_type, THUNK_TARGET);
4841             }
4842             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4843         }
4844     }
4845 
4846     if (free_buf) {
4847         g_free(host_ifconf);
4848     }
4849 
4850     return ret;
4851 }
4852 
4853 #if defined(CONFIG_USBFS)
4854 #if HOST_LONG_BITS > 64
4855 #error USBDEVFS thunks do not support >64 bit hosts yet.
4856 #endif
4857 struct live_urb {
4858     uint64_t target_urb_adr;
4859     uint64_t target_buf_adr;
4860     char *target_buf_ptr;
4861     struct usbdevfs_urb host_urb;
4862 };
4863 
4864 static GHashTable *usbdevfs_urb_hashtable(void)
4865 {
4866     static GHashTable *urb_hashtable;
4867 
4868     if (!urb_hashtable) {
4869         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4870     }
4871     return urb_hashtable;
4872 }
4873 
4874 static void urb_hashtable_insert(struct live_urb *urb)
4875 {
4876     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4877     g_hash_table_insert(urb_hashtable, urb, urb);
4878 }
4879 
4880 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4881 {
4882     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4883     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4884 }
4885 
4886 static void urb_hashtable_remove(struct live_urb *urb)
4887 {
4888     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4889     g_hash_table_remove(urb_hashtable, urb);
4890 }
4891 
4892 static abi_long
4893 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4894                           int fd, int cmd, abi_long arg)
4895 {
4896     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4897     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4898     struct live_urb *lurb;
4899     void *argptr;
4900     uint64_t hurb;
4901     int target_size;
4902     uintptr_t target_urb_adr;
4903     abi_long ret;
4904 
4905     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4906 
4907     memset(buf_temp, 0, sizeof(uint64_t));
4908     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4909     if (is_error(ret)) {
4910         return ret;
4911     }
4912 
4913     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4914     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4915     if (!lurb->target_urb_adr) {
4916         return -TARGET_EFAULT;
4917     }
4918     urb_hashtable_remove(lurb);
4919     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4920         lurb->host_urb.buffer_length);
4921     lurb->target_buf_ptr = NULL;
4922 
4923     /* restore the guest buffer pointer */
4924     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4925 
4926     /* update the guest urb struct */
4927     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4928     if (!argptr) {
4929         g_free(lurb);
4930         return -TARGET_EFAULT;
4931     }
4932     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4933     unlock_user(argptr, lurb->target_urb_adr, target_size);
4934 
4935     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4936     /* write back the urb handle */
4937     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4938     if (!argptr) {
4939         g_free(lurb);
4940         return -TARGET_EFAULT;
4941     }
4942 
4943     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4944     target_urb_adr = lurb->target_urb_adr;
4945     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4946     unlock_user(argptr, arg, target_size);
4947 
4948     g_free(lurb);
4949     return ret;
4950 }
4951 
4952 static abi_long
4953 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4954                              uint8_t *buf_temp __attribute__((unused)),
4955                              int fd, int cmd, abi_long arg)
4956 {
4957     struct live_urb *lurb;
4958 
4959     /* map target address back to host URB with metadata. */
4960     lurb = urb_hashtable_lookup(arg);
4961     if (!lurb) {
4962         return -TARGET_EFAULT;
4963     }
4964     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4965 }
4966 
4967 static abi_long
4968 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4969                             int fd, int cmd, abi_long arg)
4970 {
4971     const argtype *arg_type = ie->arg_type;
4972     int target_size;
4973     abi_long ret;
4974     void *argptr;
4975     int rw_dir;
4976     struct live_urb *lurb;
4977 
4978     /*
4979      * each submitted URB needs to map to a unique ID for the
4980      * kernel, and that unique ID needs to be a pointer to
4981      * host memory.  hence, we need to malloc for each URB.
4982      * isochronous transfers have a variable length struct.
4983      */
4984     arg_type++;
4985     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4986 
4987     /* construct host copy of urb and metadata */
4988     lurb = g_try_new0(struct live_urb, 1);
4989     if (!lurb) {
4990         return -TARGET_ENOMEM;
4991     }
4992 
4993     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4994     if (!argptr) {
4995         g_free(lurb);
4996         return -TARGET_EFAULT;
4997     }
4998     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4999     unlock_user(argptr, arg, 0);
5000 
5001     lurb->target_urb_adr = arg;
5002     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5003 
5004     /* buffer space used depends on endpoint type so lock the entire buffer */
5005     /* control type urbs should check the buffer contents for true direction */
5006     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5007     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5008         lurb->host_urb.buffer_length, 1);
5009     if (lurb->target_buf_ptr == NULL) {
5010         g_free(lurb);
5011         return -TARGET_EFAULT;
5012     }
5013 
5014     /* update buffer pointer in host copy */
5015     lurb->host_urb.buffer = lurb->target_buf_ptr;
5016 
5017     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5018     if (is_error(ret)) {
5019         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5020         g_free(lurb);
5021     } else {
5022         urb_hashtable_insert(lurb);
5023     }
5024 
5025     return ret;
5026 }
5027 #endif /* CONFIG_USBFS */
5028 
5029 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5030                             int cmd, abi_long arg)
5031 {
5032     void *argptr;
5033     struct dm_ioctl *host_dm;
5034     abi_long guest_data;
5035     uint32_t guest_data_size;
5036     int target_size;
5037     const argtype *arg_type = ie->arg_type;
5038     abi_long ret;
5039     void *big_buf = NULL;
5040     char *host_data;
5041 
5042     arg_type++;
5043     target_size = thunk_type_size(arg_type, 0);
5044     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5045     if (!argptr) {
5046         ret = -TARGET_EFAULT;
5047         goto out;
5048     }
5049     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5050     unlock_user(argptr, arg, 0);
5051 
5052     /* buf_temp is too small, so fetch things into a bigger buffer */
5053     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5054     memcpy(big_buf, buf_temp, target_size);
5055     buf_temp = big_buf;
5056     host_dm = big_buf;
5057 
5058     guest_data = arg + host_dm->data_start;
5059     if ((guest_data - arg) < 0) {
5060         ret = -TARGET_EINVAL;
5061         goto out;
5062     }
5063     guest_data_size = host_dm->data_size - host_dm->data_start;
5064     host_data = (char*)host_dm + host_dm->data_start;
5065 
5066     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5067     if (!argptr) {
5068         ret = -TARGET_EFAULT;
5069         goto out;
5070     }
5071 
5072     switch (ie->host_cmd) {
5073     case DM_REMOVE_ALL:
5074     case DM_LIST_DEVICES:
5075     case DM_DEV_CREATE:
5076     case DM_DEV_REMOVE:
5077     case DM_DEV_SUSPEND:
5078     case DM_DEV_STATUS:
5079     case DM_DEV_WAIT:
5080     case DM_TABLE_STATUS:
5081     case DM_TABLE_CLEAR:
5082     case DM_TABLE_DEPS:
5083     case DM_LIST_VERSIONS:
5084         /* no input data */
5085         break;
5086     case DM_DEV_RENAME:
5087     case DM_DEV_SET_GEOMETRY:
5088         /* data contains only strings */
5089         memcpy(host_data, argptr, guest_data_size);
5090         break;
5091     case DM_TARGET_MSG:
5092         memcpy(host_data, argptr, guest_data_size);
5093         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5094         break;
5095     case DM_TABLE_LOAD:
5096     {
5097         void *gspec = argptr;
5098         void *cur_data = host_data;
5099         const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5100         int spec_size = thunk_type_size(dm_arg_type, 0);
5101         int i;
5102 
5103         for (i = 0; i < host_dm->target_count; i++) {
5104             struct dm_target_spec *spec = cur_data;
5105             uint32_t next;
5106             int slen;
5107 
5108             thunk_convert(spec, gspec, dm_arg_type, THUNK_HOST);
5109             slen = strlen((char*)gspec + spec_size) + 1;
5110             next = spec->next;
5111             spec->next = sizeof(*spec) + slen;
5112             strcpy((char*)&spec[1], gspec + spec_size);
5113             gspec += next;
5114             cur_data += spec->next;
5115         }
5116         break;
5117     }
5118     default:
5119         ret = -TARGET_EINVAL;
5120         unlock_user(argptr, guest_data, 0);
5121         goto out;
5122     }
5123     unlock_user(argptr, guest_data, 0);
5124 
5125     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5126     if (!is_error(ret)) {
5127         guest_data = arg + host_dm->data_start;
5128         guest_data_size = host_dm->data_size - host_dm->data_start;
5129         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5130         switch (ie->host_cmd) {
5131         case DM_REMOVE_ALL:
5132         case DM_DEV_CREATE:
5133         case DM_DEV_REMOVE:
5134         case DM_DEV_RENAME:
5135         case DM_DEV_SUSPEND:
5136         case DM_DEV_STATUS:
5137         case DM_TABLE_LOAD:
5138         case DM_TABLE_CLEAR:
5139         case DM_TARGET_MSG:
5140         case DM_DEV_SET_GEOMETRY:
5141             /* no return data */
5142             break;
5143         case DM_LIST_DEVICES:
5144         {
5145             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5146             uint32_t remaining_data = guest_data_size;
5147             void *cur_data = argptr;
5148             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5149             int nl_size = 12; /* can't use thunk_size due to alignment */
5150 
5151             while (1) {
5152                 uint32_t next = nl->next;
5153                 if (next) {
5154                     nl->next = nl_size + (strlen(nl->name) + 1);
5155                 }
5156                 if (remaining_data < nl->next) {
5157                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5158                     break;
5159                 }
5160                 thunk_convert(cur_data, nl, dm_arg_type, THUNK_TARGET);
5161                 strcpy(cur_data + nl_size, nl->name);
5162                 cur_data += nl->next;
5163                 remaining_data -= nl->next;
5164                 if (!next) {
5165                     break;
5166                 }
5167                 nl = (void*)nl + next;
5168             }
5169             break;
5170         }
5171         case DM_DEV_WAIT:
5172         case DM_TABLE_STATUS:
5173         {
5174             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5175             void *cur_data = argptr;
5176             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5177             int spec_size = thunk_type_size(dm_arg_type, 0);
5178             int i;
5179 
5180             for (i = 0; i < host_dm->target_count; i++) {
5181                 uint32_t next = spec->next;
5182                 int slen = strlen((char*)&spec[1]) + 1;
5183                 spec->next = (cur_data - argptr) + spec_size + slen;
5184                 if (guest_data_size < spec->next) {
5185                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5186                     break;
5187                 }
5188                 thunk_convert(cur_data, spec, dm_arg_type, THUNK_TARGET);
5189                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5190                 cur_data = argptr + spec->next;
5191                 spec = (void*)host_dm + host_dm->data_start + next;
5192             }
5193             break;
5194         }
5195         case DM_TABLE_DEPS:
5196         {
5197             void *hdata = (void*)host_dm + host_dm->data_start;
5198             int count = *(uint32_t*)hdata;
5199             uint64_t *hdev = hdata + 8;
5200             uint64_t *gdev = argptr + 8;
5201             int i;
5202 
5203             *(uint32_t*)argptr = tswap32(count);
5204             for (i = 0; i < count; i++) {
5205                 *gdev = tswap64(*hdev);
5206                 gdev++;
5207                 hdev++;
5208             }
5209             break;
5210         }
5211         case DM_LIST_VERSIONS:
5212         {
5213             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5214             uint32_t remaining_data = guest_data_size;
5215             void *cur_data = argptr;
5216             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5217             int vers_size = thunk_type_size(dm_arg_type, 0);
5218 
5219             while (1) {
5220                 uint32_t next = vers->next;
5221                 if (next) {
5222                     vers->next = vers_size + (strlen(vers->name) + 1);
5223                 }
5224                 if (remaining_data < vers->next) {
5225                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5226                     break;
5227                 }
5228                 thunk_convert(cur_data, vers, dm_arg_type, THUNK_TARGET);
5229                 strcpy(cur_data + vers_size, vers->name);
5230                 cur_data += vers->next;
5231                 remaining_data -= vers->next;
5232                 if (!next) {
5233                     break;
5234                 }
5235                 vers = (void*)vers + next;
5236             }
5237             break;
5238         }
5239         default:
5240             unlock_user(argptr, guest_data, 0);
5241             ret = -TARGET_EINVAL;
5242             goto out;
5243         }
5244         unlock_user(argptr, guest_data, guest_data_size);
5245 
5246         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5247         if (!argptr) {
5248             ret = -TARGET_EFAULT;
5249             goto out;
5250         }
5251         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5252         unlock_user(argptr, arg, target_size);
5253     }
5254 out:
5255     g_free(big_buf);
5256     return ret;
5257 }
5258 
5259 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5260                                int cmd, abi_long arg)
5261 {
5262     void *argptr;
5263     int target_size;
5264     const argtype *arg_type = ie->arg_type;
5265     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5266     abi_long ret;
5267 
5268     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5269     struct blkpg_partition host_part;
5270 
5271     /* Read and convert blkpg */
5272     arg_type++;
5273     target_size = thunk_type_size(arg_type, 0);
5274     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5275     if (!argptr) {
5276         ret = -TARGET_EFAULT;
5277         goto out;
5278     }
5279     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5280     unlock_user(argptr, arg, 0);
5281 
5282     switch (host_blkpg->op) {
5283     case BLKPG_ADD_PARTITION:
5284     case BLKPG_DEL_PARTITION:
5285         /* payload is struct blkpg_partition */
5286         break;
5287     default:
5288         /* Unknown opcode */
5289         ret = -TARGET_EINVAL;
5290         goto out;
5291     }
5292 
5293     /* Read and convert blkpg->data */
5294     arg = (abi_long)(uintptr_t)host_blkpg->data;
5295     target_size = thunk_type_size(part_arg_type, 0);
5296     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5297     if (!argptr) {
5298         ret = -TARGET_EFAULT;
5299         goto out;
5300     }
5301     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5302     unlock_user(argptr, arg, 0);
5303 
5304     /* Swizzle the data pointer to our local copy and call! */
5305     host_blkpg->data = &host_part;
5306     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5307 
5308 out:
5309     return ret;
5310 }
5311 
5312 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5313                                 int fd, int cmd, abi_long arg)
5314 {
5315     const argtype *arg_type = ie->arg_type;
5316     const StructEntry *se;
5317     const argtype *field_types;
5318     const int *dst_offsets, *src_offsets;
5319     int target_size;
5320     void *argptr;
5321     abi_ulong *target_rt_dev_ptr = NULL;
5322     unsigned long *host_rt_dev_ptr = NULL;
5323     abi_long ret;
5324     int i;
5325 
5326     assert(ie->access == IOC_W);
5327     assert(*arg_type == TYPE_PTR);
5328     arg_type++;
5329     assert(*arg_type == TYPE_STRUCT);
5330     target_size = thunk_type_size(arg_type, 0);
5331     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5332     if (!argptr) {
5333         return -TARGET_EFAULT;
5334     }
5335     arg_type++;
5336     assert(*arg_type == (int)STRUCT_rtentry);
5337     se = struct_entries + *arg_type++;
5338     assert(se->convert[0] == NULL);
5339     /* convert struct here to be able to catch rt_dev string */
5340     field_types = se->field_types;
5341     dst_offsets = se->field_offsets[THUNK_HOST];
5342     src_offsets = se->field_offsets[THUNK_TARGET];
5343     for (i = 0; i < se->nb_fields; i++) {
5344         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5345             assert(*field_types == TYPE_PTRVOID);
5346             target_rt_dev_ptr = argptr + src_offsets[i];
5347             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5348             if (*target_rt_dev_ptr != 0) {
5349                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5350                                                   tswapal(*target_rt_dev_ptr));
5351                 if (!*host_rt_dev_ptr) {
5352                     unlock_user(argptr, arg, 0);
5353                     return -TARGET_EFAULT;
5354                 }
5355             } else {
5356                 *host_rt_dev_ptr = 0;
5357             }
5358             field_types++;
5359             continue;
5360         }
5361         field_types = thunk_convert(buf_temp + dst_offsets[i],
5362                                     argptr + src_offsets[i],
5363                                     field_types, THUNK_HOST);
5364     }
5365     unlock_user(argptr, arg, 0);
5366 
5367     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5368 
5369     assert(host_rt_dev_ptr != NULL);
5370     assert(target_rt_dev_ptr != NULL);
5371     if (*host_rt_dev_ptr != 0) {
5372         unlock_user((void *)*host_rt_dev_ptr,
5373                     *target_rt_dev_ptr, 0);
5374     }
5375     return ret;
5376 }
5377 
5378 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5379                                      int fd, int cmd, abi_long arg)
5380 {
5381     int sig = target_to_host_signal(arg);
5382     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5383 }
5384 
5385 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5386                                     int fd, int cmd, abi_long arg)
5387 {
5388     struct timeval tv;
5389     abi_long ret;
5390 
5391     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5392     if (is_error(ret)) {
5393         return ret;
5394     }
5395 
5396     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5397         if (copy_to_user_timeval(arg, &tv)) {
5398             return -TARGET_EFAULT;
5399         }
5400     } else {
5401         if (copy_to_user_timeval64(arg, &tv)) {
5402             return -TARGET_EFAULT;
5403         }
5404     }
5405 
5406     return ret;
5407 }
5408 
5409 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5410                                       int fd, int cmd, abi_long arg)
5411 {
5412     struct timespec ts;
5413     abi_long ret;
5414 
5415     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5416     if (is_error(ret)) {
5417         return ret;
5418     }
5419 
5420     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5421         if (host_to_target_timespec(arg, &ts)) {
5422             return -TARGET_EFAULT;
5423         }
5424     } else{
5425         if (host_to_target_timespec64(arg, &ts)) {
5426             return -TARGET_EFAULT;
5427         }
5428     }
5429 
5430     return ret;
5431 }
5432 
5433 #ifdef TIOCGPTPEER
5434 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5435                                      int fd, int cmd, abi_long arg)
5436 {
5437     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5438     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5439 }
5440 #endif
5441 
5442 #ifdef HAVE_DRM_H
5443 
5444 static void unlock_drm_version(struct drm_version *host_ver,
5445                                struct target_drm_version *target_ver,
5446                                bool copy)
5447 {
5448     unlock_user(host_ver->name, target_ver->name,
5449                                 copy ? host_ver->name_len : 0);
5450     unlock_user(host_ver->date, target_ver->date,
5451                                 copy ? host_ver->date_len : 0);
5452     unlock_user(host_ver->desc, target_ver->desc,
5453                                 copy ? host_ver->desc_len : 0);
5454 }
5455 
5456 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5457                                           struct target_drm_version *target_ver)
5458 {
5459     memset(host_ver, 0, sizeof(*host_ver));
5460 
5461     __get_user(host_ver->name_len, &target_ver->name_len);
5462     if (host_ver->name_len) {
5463         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5464                                    target_ver->name_len, 0);
5465         if (!host_ver->name) {
5466             return -EFAULT;
5467         }
5468     }
5469 
5470     __get_user(host_ver->date_len, &target_ver->date_len);
5471     if (host_ver->date_len) {
5472         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5473                                    target_ver->date_len, 0);
5474         if (!host_ver->date) {
5475             goto err;
5476         }
5477     }
5478 
5479     __get_user(host_ver->desc_len, &target_ver->desc_len);
5480     if (host_ver->desc_len) {
5481         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5482                                    target_ver->desc_len, 0);
5483         if (!host_ver->desc) {
5484             goto err;
5485         }
5486     }
5487 
5488     return 0;
5489 err:
5490     unlock_drm_version(host_ver, target_ver, false);
5491     return -EFAULT;
5492 }
5493 
5494 static inline void host_to_target_drmversion(
5495                                           struct target_drm_version *target_ver,
5496                                           struct drm_version *host_ver)
5497 {
5498     __put_user(host_ver->version_major, &target_ver->version_major);
5499     __put_user(host_ver->version_minor, &target_ver->version_minor);
5500     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5501     __put_user(host_ver->name_len, &target_ver->name_len);
5502     __put_user(host_ver->date_len, &target_ver->date_len);
5503     __put_user(host_ver->desc_len, &target_ver->desc_len);
5504     unlock_drm_version(host_ver, target_ver, true);
5505 }
5506 
5507 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5508                              int fd, int cmd, abi_long arg)
5509 {
5510     struct drm_version *ver;
5511     struct target_drm_version *target_ver;
5512     abi_long ret;
5513 
5514     switch (ie->host_cmd) {
5515     case DRM_IOCTL_VERSION:
5516         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5517             return -TARGET_EFAULT;
5518         }
5519         ver = (struct drm_version *)buf_temp;
5520         ret = target_to_host_drmversion(ver, target_ver);
5521         if (!is_error(ret)) {
5522             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5523             if (is_error(ret)) {
5524                 unlock_drm_version(ver, target_ver, false);
5525             } else {
5526                 host_to_target_drmversion(target_ver, ver);
5527             }
5528         }
5529         unlock_user_struct(target_ver, arg, 0);
5530         return ret;
5531     }
5532     return -TARGET_ENOSYS;
5533 }
5534 
5535 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5536                                            struct drm_i915_getparam *gparam,
5537                                            int fd, abi_long arg)
5538 {
5539     abi_long ret;
5540     int value;
5541     struct target_drm_i915_getparam *target_gparam;
5542 
5543     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5544         return -TARGET_EFAULT;
5545     }
5546 
5547     __get_user(gparam->param, &target_gparam->param);
5548     gparam->value = &value;
5549     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5550     put_user_s32(value, target_gparam->value);
5551 
5552     unlock_user_struct(target_gparam, arg, 0);
5553     return ret;
5554 }
5555 
5556 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5557                                   int fd, int cmd, abi_long arg)
5558 {
5559     switch (ie->host_cmd) {
5560     case DRM_IOCTL_I915_GETPARAM:
5561         return do_ioctl_drm_i915_getparam(ie,
5562                                           (struct drm_i915_getparam *)buf_temp,
5563                                           fd, arg);
5564     default:
5565         return -TARGET_ENOSYS;
5566     }
5567 }
5568 
5569 #endif
5570 
5571 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5572                                         int fd, int cmd, abi_long arg)
5573 {
5574     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5575     struct tun_filter *target_filter;
5576     char *target_addr;
5577 
5578     assert(ie->access == IOC_W);
5579 
5580     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5581     if (!target_filter) {
5582         return -TARGET_EFAULT;
5583     }
5584     filter->flags = tswap16(target_filter->flags);
5585     filter->count = tswap16(target_filter->count);
5586     unlock_user(target_filter, arg, 0);
5587 
5588     if (filter->count) {
5589         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5590             MAX_STRUCT_SIZE) {
5591             return -TARGET_EFAULT;
5592         }
5593 
5594         target_addr = lock_user(VERIFY_READ,
5595                                 arg + offsetof(struct tun_filter, addr),
5596                                 filter->count * ETH_ALEN, 1);
5597         if (!target_addr) {
5598             return -TARGET_EFAULT;
5599         }
5600         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5601         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5602     }
5603 
5604     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5605 }
5606 
5607 IOCTLEntry ioctl_entries[] = {
5608 #define IOCTL(cmd, access, ...) \
5609     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5610 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5611     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5612 #define IOCTL_IGNORE(cmd) \
5613     { TARGET_ ## cmd, 0, #cmd },
5614 #include "ioctls.h"
5615     { 0, 0, },
5616 };
5617 
5618 /* ??? Implement proper locking for ioctls.  */
5619 /* do_ioctl() Must return target values and target errnos. */
5620 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5621 {
5622     const IOCTLEntry *ie;
5623     const argtype *arg_type;
5624     abi_long ret;
5625     uint8_t buf_temp[MAX_STRUCT_SIZE];
5626     int target_size;
5627     void *argptr;
5628 
5629     ie = ioctl_entries;
5630     for(;;) {
5631         if (ie->target_cmd == 0) {
5632             qemu_log_mask(
5633                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5634             return -TARGET_ENOTTY;
5635         }
5636         if (ie->target_cmd == cmd)
5637             break;
5638         ie++;
5639     }
5640     arg_type = ie->arg_type;
5641     if (ie->do_ioctl) {
5642         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5643     } else if (!ie->host_cmd) {
5644         /* Some architectures define BSD ioctls in their headers
5645            that are not implemented in Linux.  */
5646         return -TARGET_ENOTTY;
5647     }
5648 
5649     switch(arg_type[0]) {
5650     case TYPE_NULL:
5651         /* no argument */
5652         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5653         break;
5654     case TYPE_PTRVOID:
5655     case TYPE_INT:
5656     case TYPE_LONG:
5657     case TYPE_ULONG:
5658         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5659         break;
5660     case TYPE_PTR:
5661         arg_type++;
5662         target_size = thunk_type_size(arg_type, 0);
5663         switch(ie->access) {
5664         case IOC_R:
5665             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5666             if (!is_error(ret)) {
5667                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5668                 if (!argptr)
5669                     return -TARGET_EFAULT;
5670                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5671                 unlock_user(argptr, arg, target_size);
5672             }
5673             break;
5674         case IOC_W:
5675             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5676             if (!argptr)
5677                 return -TARGET_EFAULT;
5678             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5679             unlock_user(argptr, arg, 0);
5680             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5681             break;
5682         default:
5683         case IOC_RW:
5684             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5685             if (!argptr)
5686                 return -TARGET_EFAULT;
5687             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5688             unlock_user(argptr, arg, 0);
5689             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5690             if (!is_error(ret)) {
5691                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5692                 if (!argptr)
5693                     return -TARGET_EFAULT;
5694                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5695                 unlock_user(argptr, arg, target_size);
5696             }
5697             break;
5698         }
5699         break;
5700     default:
5701         qemu_log_mask(LOG_UNIMP,
5702                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5703                       (long)cmd, arg_type[0]);
5704         ret = -TARGET_ENOTTY;
5705         break;
5706     }
5707     return ret;
5708 }
5709 
5710 static const bitmask_transtbl iflag_tbl[] = {
5711         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5712         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5713         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5714         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5715         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5716         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5717         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5718         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5719         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5720         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5721         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5722         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5723         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5724         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5725         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5726 };
5727 
5728 static const bitmask_transtbl oflag_tbl[] = {
5729 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5730 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5731 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5732 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5733 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5734 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5735 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5736 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5737 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5738 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5739 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5740 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5741 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5742 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5743 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5744 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5745 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5746 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5747 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5748 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5749 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5750 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5751 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5752 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5753 };
5754 
5755 static const bitmask_transtbl cflag_tbl[] = {
5756 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5757 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5758 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5759 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5760 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5761 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5762 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5763 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5764 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5765 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5766 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5767 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5768 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5769 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5770 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5771 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5772 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5773 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5774 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5775 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5776 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5777 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5778 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5779 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5780 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5781 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5782 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5783 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5784 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5785 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5786 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5787 };
5788 
5789 static const bitmask_transtbl lflag_tbl[] = {
5790   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5791   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5792   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5793   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5794   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5795   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5796   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5797   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5798   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5799   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5800   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5801   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5802   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5803   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5804   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5805   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5806 };
5807 
5808 static void target_to_host_termios (void *dst, const void *src)
5809 {
5810     struct host_termios *host = dst;
5811     const struct target_termios *target = src;
5812 
5813     host->c_iflag =
5814         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5815     host->c_oflag =
5816         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5817     host->c_cflag =
5818         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5819     host->c_lflag =
5820         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5821     host->c_line = target->c_line;
5822 
5823     memset(host->c_cc, 0, sizeof(host->c_cc));
5824     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5825     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5826     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5827     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5828     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5829     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5830     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5831     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5832     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5833     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5834     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5835     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5836     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5837     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5838     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5839     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5840     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5841 }
5842 
5843 static void host_to_target_termios (void *dst, const void *src)
5844 {
5845     struct target_termios *target = dst;
5846     const struct host_termios *host = src;
5847 
5848     target->c_iflag =
5849         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5850     target->c_oflag =
5851         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5852     target->c_cflag =
5853         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5854     target->c_lflag =
5855         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5856     target->c_line = host->c_line;
5857 
5858     memset(target->c_cc, 0, sizeof(target->c_cc));
5859     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5860     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5861     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5862     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5863     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5864     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5865     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5866     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5867     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5868     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5869     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5870     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5871     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5872     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5873     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5874     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5875     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5876 }
5877 
5878 static const StructEntry struct_termios_def = {
5879     .convert = { host_to_target_termios, target_to_host_termios },
5880     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5881     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5882     .print = print_termios,
5883 };
5884 
5885 /* If the host does not provide these bits, they may be safely discarded. */
5886 #ifndef MAP_SYNC
5887 #define MAP_SYNC 0
5888 #endif
5889 #ifndef MAP_UNINITIALIZED
5890 #define MAP_UNINITIALIZED 0
5891 #endif
5892 
5893 static const bitmask_transtbl mmap_flags_tbl[] = {
5894     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5895     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5896       MAP_ANONYMOUS, MAP_ANONYMOUS },
5897     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5898       MAP_GROWSDOWN, MAP_GROWSDOWN },
5899     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5900       MAP_DENYWRITE, MAP_DENYWRITE },
5901     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5902       MAP_EXECUTABLE, MAP_EXECUTABLE },
5903     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5904     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5905       MAP_NORESERVE, MAP_NORESERVE },
5906     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5907     /* MAP_STACK had been ignored by the kernel for quite some time.
5908        Recognize it for the target insofar as we do not want to pass
5909        it through to the host.  */
5910     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5911     { TARGET_MAP_NONBLOCK, TARGET_MAP_NONBLOCK, MAP_NONBLOCK, MAP_NONBLOCK },
5912     { TARGET_MAP_POPULATE, TARGET_MAP_POPULATE, MAP_POPULATE, MAP_POPULATE },
5913     { TARGET_MAP_FIXED_NOREPLACE, TARGET_MAP_FIXED_NOREPLACE,
5914       MAP_FIXED_NOREPLACE, MAP_FIXED_NOREPLACE },
5915     { TARGET_MAP_UNINITIALIZED, TARGET_MAP_UNINITIALIZED,
5916       MAP_UNINITIALIZED, MAP_UNINITIALIZED },
5917 };
5918 
5919 /*
5920  * Arrange for legacy / undefined architecture specific flags to be
5921  * ignored by mmap handling code.
5922  */
5923 #ifndef TARGET_MAP_32BIT
5924 #define TARGET_MAP_32BIT 0
5925 #endif
5926 #ifndef TARGET_MAP_HUGE_2MB
5927 #define TARGET_MAP_HUGE_2MB 0
5928 #endif
5929 #ifndef TARGET_MAP_HUGE_1GB
5930 #define TARGET_MAP_HUGE_1GB 0
5931 #endif
5932 
5933 static abi_long do_mmap(abi_ulong addr, abi_ulong len, int prot,
5934                         int target_flags, int fd, off_t offset)
5935 {
5936     /*
5937      * The historical set of flags that all mmap types implicitly support.
5938      */
5939     enum {
5940         TARGET_LEGACY_MAP_MASK = TARGET_MAP_SHARED
5941                                | TARGET_MAP_PRIVATE
5942                                | TARGET_MAP_FIXED
5943                                | TARGET_MAP_ANONYMOUS
5944                                | TARGET_MAP_DENYWRITE
5945                                | TARGET_MAP_EXECUTABLE
5946                                | TARGET_MAP_UNINITIALIZED
5947                                | TARGET_MAP_GROWSDOWN
5948                                | TARGET_MAP_LOCKED
5949                                | TARGET_MAP_NORESERVE
5950                                | TARGET_MAP_POPULATE
5951                                | TARGET_MAP_NONBLOCK
5952                                | TARGET_MAP_STACK
5953                                | TARGET_MAP_HUGETLB
5954                                | TARGET_MAP_32BIT
5955                                | TARGET_MAP_HUGE_2MB
5956                                | TARGET_MAP_HUGE_1GB
5957     };
5958     int host_flags;
5959 
5960     switch (target_flags & TARGET_MAP_TYPE) {
5961     case TARGET_MAP_PRIVATE:
5962         host_flags = MAP_PRIVATE;
5963         break;
5964     case TARGET_MAP_SHARED:
5965         host_flags = MAP_SHARED;
5966         break;
5967     case TARGET_MAP_SHARED_VALIDATE:
5968         /*
5969          * MAP_SYNC is only supported for MAP_SHARED_VALIDATE, and is
5970          * therefore omitted from mmap_flags_tbl and TARGET_LEGACY_MAP_MASK.
5971          */
5972         if (target_flags & ~(TARGET_LEGACY_MAP_MASK | TARGET_MAP_SYNC)) {
5973             return -TARGET_EOPNOTSUPP;
5974         }
5975         host_flags = MAP_SHARED_VALIDATE;
5976         if (target_flags & TARGET_MAP_SYNC) {
5977             host_flags |= MAP_SYNC;
5978         }
5979         break;
5980     default:
5981         return -TARGET_EINVAL;
5982     }
5983     host_flags |= target_to_host_bitmask(target_flags, mmap_flags_tbl);
5984 
5985     return get_errno(target_mmap(addr, len, prot, host_flags, fd, offset));
5986 }
5987 
5988 /*
5989  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5990  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5991  */
5992 #if defined(TARGET_I386)
5993 
5994 /* NOTE: there is really one LDT for all the threads */
5995 static uint8_t *ldt_table;
5996 
5997 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5998 {
5999     int size;
6000     void *p;
6001 
6002     if (!ldt_table)
6003         return 0;
6004     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6005     if (size > bytecount)
6006         size = bytecount;
6007     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6008     if (!p)
6009         return -TARGET_EFAULT;
6010     /* ??? Should this by byteswapped?  */
6011     memcpy(p, ldt_table, size);
6012     unlock_user(p, ptr, size);
6013     return size;
6014 }
6015 
6016 /* XXX: add locking support */
6017 static abi_long write_ldt(CPUX86State *env,
6018                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6019 {
6020     struct target_modify_ldt_ldt_s ldt_info;
6021     struct target_modify_ldt_ldt_s *target_ldt_info;
6022     int seg_32bit, contents, read_exec_only, limit_in_pages;
6023     int seg_not_present, useable, lm;
6024     uint32_t *lp, entry_1, entry_2;
6025 
6026     if (bytecount != sizeof(ldt_info))
6027         return -TARGET_EINVAL;
6028     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6029         return -TARGET_EFAULT;
6030     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6031     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6032     ldt_info.limit = tswap32(target_ldt_info->limit);
6033     ldt_info.flags = tswap32(target_ldt_info->flags);
6034     unlock_user_struct(target_ldt_info, ptr, 0);
6035 
6036     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6037         return -TARGET_EINVAL;
6038     seg_32bit = ldt_info.flags & 1;
6039     contents = (ldt_info.flags >> 1) & 3;
6040     read_exec_only = (ldt_info.flags >> 3) & 1;
6041     limit_in_pages = (ldt_info.flags >> 4) & 1;
6042     seg_not_present = (ldt_info.flags >> 5) & 1;
6043     useable = (ldt_info.flags >> 6) & 1;
6044 #ifdef TARGET_ABI32
6045     lm = 0;
6046 #else
6047     lm = (ldt_info.flags >> 7) & 1;
6048 #endif
6049     if (contents == 3) {
6050         if (oldmode)
6051             return -TARGET_EINVAL;
6052         if (seg_not_present == 0)
6053             return -TARGET_EINVAL;
6054     }
6055     /* allocate the LDT */
6056     if (!ldt_table) {
6057         env->ldt.base = target_mmap(0,
6058                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6059                                     PROT_READ|PROT_WRITE,
6060                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6061         if (env->ldt.base == -1)
6062             return -TARGET_ENOMEM;
6063         memset(g2h_untagged(env->ldt.base), 0,
6064                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6065         env->ldt.limit = 0xffff;
6066         ldt_table = g2h_untagged(env->ldt.base);
6067     }
6068 
6069     /* NOTE: same code as Linux kernel */
6070     /* Allow LDTs to be cleared by the user. */
6071     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6072         if (oldmode ||
6073             (contents == 0		&&
6074              read_exec_only == 1	&&
6075              seg_32bit == 0		&&
6076              limit_in_pages == 0	&&
6077              seg_not_present == 1	&&
6078              useable == 0 )) {
6079             entry_1 = 0;
6080             entry_2 = 0;
6081             goto install;
6082         }
6083     }
6084 
6085     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6086         (ldt_info.limit & 0x0ffff);
6087     entry_2 = (ldt_info.base_addr & 0xff000000) |
6088         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6089         (ldt_info.limit & 0xf0000) |
6090         ((read_exec_only ^ 1) << 9) |
6091         (contents << 10) |
6092         ((seg_not_present ^ 1) << 15) |
6093         (seg_32bit << 22) |
6094         (limit_in_pages << 23) |
6095         (lm << 21) |
6096         0x7000;
6097     if (!oldmode)
6098         entry_2 |= (useable << 20);
6099 
6100     /* Install the new entry ...  */
6101 install:
6102     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6103     lp[0] = tswap32(entry_1);
6104     lp[1] = tswap32(entry_2);
6105     return 0;
6106 }
6107 
6108 /* specific and weird i386 syscalls */
6109 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6110                               unsigned long bytecount)
6111 {
6112     abi_long ret;
6113 
6114     switch (func) {
6115     case 0:
6116         ret = read_ldt(ptr, bytecount);
6117         break;
6118     case 1:
6119         ret = write_ldt(env, ptr, bytecount, 1);
6120         break;
6121     case 0x11:
6122         ret = write_ldt(env, ptr, bytecount, 0);
6123         break;
6124     default:
6125         ret = -TARGET_ENOSYS;
6126         break;
6127     }
6128     return ret;
6129 }
6130 
6131 #if defined(TARGET_ABI32)
6132 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6133 {
6134     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6135     struct target_modify_ldt_ldt_s ldt_info;
6136     struct target_modify_ldt_ldt_s *target_ldt_info;
6137     int seg_32bit, contents, read_exec_only, limit_in_pages;
6138     int seg_not_present, useable, lm;
6139     uint32_t *lp, entry_1, entry_2;
6140     int i;
6141 
6142     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6143     if (!target_ldt_info)
6144         return -TARGET_EFAULT;
6145     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6146     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6147     ldt_info.limit = tswap32(target_ldt_info->limit);
6148     ldt_info.flags = tswap32(target_ldt_info->flags);
6149     if (ldt_info.entry_number == -1) {
6150         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6151             if (gdt_table[i] == 0) {
6152                 ldt_info.entry_number = i;
6153                 target_ldt_info->entry_number = tswap32(i);
6154                 break;
6155             }
6156         }
6157     }
6158     unlock_user_struct(target_ldt_info, ptr, 1);
6159 
6160     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6161         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6162            return -TARGET_EINVAL;
6163     seg_32bit = ldt_info.flags & 1;
6164     contents = (ldt_info.flags >> 1) & 3;
6165     read_exec_only = (ldt_info.flags >> 3) & 1;
6166     limit_in_pages = (ldt_info.flags >> 4) & 1;
6167     seg_not_present = (ldt_info.flags >> 5) & 1;
6168     useable = (ldt_info.flags >> 6) & 1;
6169 #ifdef TARGET_ABI32
6170     lm = 0;
6171 #else
6172     lm = (ldt_info.flags >> 7) & 1;
6173 #endif
6174 
6175     if (contents == 3) {
6176         if (seg_not_present == 0)
6177             return -TARGET_EINVAL;
6178     }
6179 
6180     /* NOTE: same code as Linux kernel */
6181     /* Allow LDTs to be cleared by the user. */
6182     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6183         if ((contents == 0             &&
6184              read_exec_only == 1       &&
6185              seg_32bit == 0            &&
6186              limit_in_pages == 0       &&
6187              seg_not_present == 1      &&
6188              useable == 0 )) {
6189             entry_1 = 0;
6190             entry_2 = 0;
6191             goto install;
6192         }
6193     }
6194 
6195     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6196         (ldt_info.limit & 0x0ffff);
6197     entry_2 = (ldt_info.base_addr & 0xff000000) |
6198         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6199         (ldt_info.limit & 0xf0000) |
6200         ((read_exec_only ^ 1) << 9) |
6201         (contents << 10) |
6202         ((seg_not_present ^ 1) << 15) |
6203         (seg_32bit << 22) |
6204         (limit_in_pages << 23) |
6205         (useable << 20) |
6206         (lm << 21) |
6207         0x7000;
6208 
6209     /* Install the new entry ...  */
6210 install:
6211     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6212     lp[0] = tswap32(entry_1);
6213     lp[1] = tswap32(entry_2);
6214     return 0;
6215 }
6216 
6217 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6218 {
6219     struct target_modify_ldt_ldt_s *target_ldt_info;
6220     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6221     uint32_t base_addr, limit, flags;
6222     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6223     int seg_not_present, useable, lm;
6224     uint32_t *lp, entry_1, entry_2;
6225 
6226     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6227     if (!target_ldt_info)
6228         return -TARGET_EFAULT;
6229     idx = tswap32(target_ldt_info->entry_number);
6230     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6231         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6232         unlock_user_struct(target_ldt_info, ptr, 1);
6233         return -TARGET_EINVAL;
6234     }
6235     lp = (uint32_t *)(gdt_table + idx);
6236     entry_1 = tswap32(lp[0]);
6237     entry_2 = tswap32(lp[1]);
6238 
6239     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6240     contents = (entry_2 >> 10) & 3;
6241     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6242     seg_32bit = (entry_2 >> 22) & 1;
6243     limit_in_pages = (entry_2 >> 23) & 1;
6244     useable = (entry_2 >> 20) & 1;
6245 #ifdef TARGET_ABI32
6246     lm = 0;
6247 #else
6248     lm = (entry_2 >> 21) & 1;
6249 #endif
6250     flags = (seg_32bit << 0) | (contents << 1) |
6251         (read_exec_only << 3) | (limit_in_pages << 4) |
6252         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6253     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6254     base_addr = (entry_1 >> 16) |
6255         (entry_2 & 0xff000000) |
6256         ((entry_2 & 0xff) << 16);
6257     target_ldt_info->base_addr = tswapal(base_addr);
6258     target_ldt_info->limit = tswap32(limit);
6259     target_ldt_info->flags = tswap32(flags);
6260     unlock_user_struct(target_ldt_info, ptr, 1);
6261     return 0;
6262 }
6263 
6264 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6265 {
6266     return -TARGET_ENOSYS;
6267 }
6268 #else
6269 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6270 {
6271     abi_long ret = 0;
6272     abi_ulong val;
6273     int idx;
6274 
6275     switch(code) {
6276     case TARGET_ARCH_SET_GS:
6277     case TARGET_ARCH_SET_FS:
6278         if (code == TARGET_ARCH_SET_GS)
6279             idx = R_GS;
6280         else
6281             idx = R_FS;
6282         cpu_x86_load_seg(env, idx, 0);
6283         env->segs[idx].base = addr;
6284         break;
6285     case TARGET_ARCH_GET_GS:
6286     case TARGET_ARCH_GET_FS:
6287         if (code == TARGET_ARCH_GET_GS)
6288             idx = R_GS;
6289         else
6290             idx = R_FS;
6291         val = env->segs[idx].base;
6292         if (put_user(val, addr, abi_ulong))
6293             ret = -TARGET_EFAULT;
6294         break;
6295     default:
6296         ret = -TARGET_EINVAL;
6297         break;
6298     }
6299     return ret;
6300 }
6301 #endif /* defined(TARGET_ABI32 */
6302 #endif /* defined(TARGET_I386) */
6303 
6304 /*
6305  * These constants are generic.  Supply any that are missing from the host.
6306  */
6307 #ifndef PR_SET_NAME
6308 # define PR_SET_NAME    15
6309 # define PR_GET_NAME    16
6310 #endif
6311 #ifndef PR_SET_FP_MODE
6312 # define PR_SET_FP_MODE 45
6313 # define PR_GET_FP_MODE 46
6314 # define PR_FP_MODE_FR   (1 << 0)
6315 # define PR_FP_MODE_FRE  (1 << 1)
6316 #endif
6317 #ifndef PR_SVE_SET_VL
6318 # define PR_SVE_SET_VL  50
6319 # define PR_SVE_GET_VL  51
6320 # define PR_SVE_VL_LEN_MASK  0xffff
6321 # define PR_SVE_VL_INHERIT   (1 << 17)
6322 #endif
6323 #ifndef PR_PAC_RESET_KEYS
6324 # define PR_PAC_RESET_KEYS  54
6325 # define PR_PAC_APIAKEY   (1 << 0)
6326 # define PR_PAC_APIBKEY   (1 << 1)
6327 # define PR_PAC_APDAKEY   (1 << 2)
6328 # define PR_PAC_APDBKEY   (1 << 3)
6329 # define PR_PAC_APGAKEY   (1 << 4)
6330 #endif
6331 #ifndef PR_SET_TAGGED_ADDR_CTRL
6332 # define PR_SET_TAGGED_ADDR_CTRL 55
6333 # define PR_GET_TAGGED_ADDR_CTRL 56
6334 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6335 #endif
6336 #ifndef PR_SET_IO_FLUSHER
6337 # define PR_SET_IO_FLUSHER 57
6338 # define PR_GET_IO_FLUSHER 58
6339 #endif
6340 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6341 # define PR_SET_SYSCALL_USER_DISPATCH 59
6342 #endif
6343 #ifndef PR_SME_SET_VL
6344 # define PR_SME_SET_VL  63
6345 # define PR_SME_GET_VL  64
6346 # define PR_SME_VL_LEN_MASK  0xffff
6347 # define PR_SME_VL_INHERIT   (1 << 17)
6348 #endif
6349 
6350 #include "target_prctl.h"
6351 
6352 static abi_long do_prctl_inval0(CPUArchState *env)
6353 {
6354     return -TARGET_EINVAL;
6355 }
6356 
6357 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6358 {
6359     return -TARGET_EINVAL;
6360 }
6361 
6362 #ifndef do_prctl_get_fp_mode
6363 #define do_prctl_get_fp_mode do_prctl_inval0
6364 #endif
6365 #ifndef do_prctl_set_fp_mode
6366 #define do_prctl_set_fp_mode do_prctl_inval1
6367 #endif
6368 #ifndef do_prctl_sve_get_vl
6369 #define do_prctl_sve_get_vl do_prctl_inval0
6370 #endif
6371 #ifndef do_prctl_sve_set_vl
6372 #define do_prctl_sve_set_vl do_prctl_inval1
6373 #endif
6374 #ifndef do_prctl_reset_keys
6375 #define do_prctl_reset_keys do_prctl_inval1
6376 #endif
6377 #ifndef do_prctl_set_tagged_addr_ctrl
6378 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6379 #endif
6380 #ifndef do_prctl_get_tagged_addr_ctrl
6381 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6382 #endif
6383 #ifndef do_prctl_get_unalign
6384 #define do_prctl_get_unalign do_prctl_inval1
6385 #endif
6386 #ifndef do_prctl_set_unalign
6387 #define do_prctl_set_unalign do_prctl_inval1
6388 #endif
6389 #ifndef do_prctl_sme_get_vl
6390 #define do_prctl_sme_get_vl do_prctl_inval0
6391 #endif
6392 #ifndef do_prctl_sme_set_vl
6393 #define do_prctl_sme_set_vl do_prctl_inval1
6394 #endif
6395 
6396 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6397                          abi_long arg3, abi_long arg4, abi_long arg5)
6398 {
6399     abi_long ret;
6400 
6401     switch (option) {
6402     case PR_GET_PDEATHSIG:
6403         {
6404             int deathsig;
6405             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6406                                   arg3, arg4, arg5));
6407             if (!is_error(ret) &&
6408                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6409                 return -TARGET_EFAULT;
6410             }
6411             return ret;
6412         }
6413     case PR_SET_PDEATHSIG:
6414         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6415                                arg3, arg4, arg5));
6416     case PR_GET_NAME:
6417         {
6418             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6419             if (!name) {
6420                 return -TARGET_EFAULT;
6421             }
6422             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6423                                   arg3, arg4, arg5));
6424             unlock_user(name, arg2, 16);
6425             return ret;
6426         }
6427     case PR_SET_NAME:
6428         {
6429             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6430             if (!name) {
6431                 return -TARGET_EFAULT;
6432             }
6433             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6434                                   arg3, arg4, arg5));
6435             unlock_user(name, arg2, 0);
6436             return ret;
6437         }
6438     case PR_GET_FP_MODE:
6439         return do_prctl_get_fp_mode(env);
6440     case PR_SET_FP_MODE:
6441         return do_prctl_set_fp_mode(env, arg2);
6442     case PR_SVE_GET_VL:
6443         return do_prctl_sve_get_vl(env);
6444     case PR_SVE_SET_VL:
6445         return do_prctl_sve_set_vl(env, arg2);
6446     case PR_SME_GET_VL:
6447         return do_prctl_sme_get_vl(env);
6448     case PR_SME_SET_VL:
6449         return do_prctl_sme_set_vl(env, arg2);
6450     case PR_PAC_RESET_KEYS:
6451         if (arg3 || arg4 || arg5) {
6452             return -TARGET_EINVAL;
6453         }
6454         return do_prctl_reset_keys(env, arg2);
6455     case PR_SET_TAGGED_ADDR_CTRL:
6456         if (arg3 || arg4 || arg5) {
6457             return -TARGET_EINVAL;
6458         }
6459         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6460     case PR_GET_TAGGED_ADDR_CTRL:
6461         if (arg2 || arg3 || arg4 || arg5) {
6462             return -TARGET_EINVAL;
6463         }
6464         return do_prctl_get_tagged_addr_ctrl(env);
6465 
6466     case PR_GET_UNALIGN:
6467         return do_prctl_get_unalign(env, arg2);
6468     case PR_SET_UNALIGN:
6469         return do_prctl_set_unalign(env, arg2);
6470 
6471     case PR_CAP_AMBIENT:
6472     case PR_CAPBSET_READ:
6473     case PR_CAPBSET_DROP:
6474     case PR_GET_DUMPABLE:
6475     case PR_SET_DUMPABLE:
6476     case PR_GET_KEEPCAPS:
6477     case PR_SET_KEEPCAPS:
6478     case PR_GET_SECUREBITS:
6479     case PR_SET_SECUREBITS:
6480     case PR_GET_TIMING:
6481     case PR_SET_TIMING:
6482     case PR_GET_TIMERSLACK:
6483     case PR_SET_TIMERSLACK:
6484     case PR_MCE_KILL:
6485     case PR_MCE_KILL_GET:
6486     case PR_GET_NO_NEW_PRIVS:
6487     case PR_SET_NO_NEW_PRIVS:
6488     case PR_GET_IO_FLUSHER:
6489     case PR_SET_IO_FLUSHER:
6490     case PR_SET_CHILD_SUBREAPER:
6491     case PR_GET_SPECULATION_CTRL:
6492     case PR_SET_SPECULATION_CTRL:
6493         /* Some prctl options have no pointer arguments and we can pass on. */
6494         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6495 
6496     case PR_GET_CHILD_SUBREAPER:
6497         {
6498             int val;
6499             ret = get_errno(prctl(PR_GET_CHILD_SUBREAPER, &val,
6500                                   arg3, arg4, arg5));
6501             if (!is_error(ret) && put_user_s32(val, arg2)) {
6502                 return -TARGET_EFAULT;
6503             }
6504             return ret;
6505         }
6506 
6507     case PR_GET_TID_ADDRESS:
6508         {
6509             TaskState *ts = get_task_state(env_cpu(env));
6510             return put_user_ual(ts->child_tidptr, arg2);
6511         }
6512 
6513     case PR_GET_FPEXC:
6514     case PR_SET_FPEXC:
6515         /* Was used for SPE on PowerPC. */
6516         return -TARGET_EINVAL;
6517 
6518     case PR_GET_ENDIAN:
6519     case PR_SET_ENDIAN:
6520     case PR_GET_FPEMU:
6521     case PR_SET_FPEMU:
6522     case PR_SET_MM:
6523     case PR_GET_SECCOMP:
6524     case PR_SET_SECCOMP:
6525     case PR_SET_SYSCALL_USER_DISPATCH:
6526     case PR_GET_THP_DISABLE:
6527     case PR_SET_THP_DISABLE:
6528     case PR_GET_TSC:
6529     case PR_SET_TSC:
6530         /* Disable to prevent the target disabling stuff we need. */
6531         return -TARGET_EINVAL;
6532 
6533     default:
6534         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6535                       option);
6536         return -TARGET_EINVAL;
6537     }
6538 }
6539 
6540 #define NEW_STACK_SIZE 0x40000
6541 
6542 
6543 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6544 typedef struct {
6545     CPUArchState *env;
6546     pthread_mutex_t mutex;
6547     pthread_cond_t cond;
6548     pthread_t thread;
6549     uint32_t tid;
6550     abi_ulong child_tidptr;
6551     abi_ulong parent_tidptr;
6552     sigset_t sigmask;
6553 } new_thread_info;
6554 
6555 static void *clone_func(void *arg)
6556 {
6557     new_thread_info *info = arg;
6558     CPUArchState *env;
6559     CPUState *cpu;
6560     TaskState *ts;
6561 
6562     rcu_register_thread();
6563     tcg_register_thread();
6564     env = info->env;
6565     cpu = env_cpu(env);
6566     thread_cpu = cpu;
6567     ts = get_task_state(cpu);
6568     info->tid = sys_gettid();
6569     task_settid(ts);
6570     if (info->child_tidptr)
6571         put_user_u32(info->tid, info->child_tidptr);
6572     if (info->parent_tidptr)
6573         put_user_u32(info->tid, info->parent_tidptr);
6574     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6575     /* Enable signals.  */
6576     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6577     /* Signal to the parent that we're ready.  */
6578     pthread_mutex_lock(&info->mutex);
6579     pthread_cond_broadcast(&info->cond);
6580     pthread_mutex_unlock(&info->mutex);
6581     /* Wait until the parent has finished initializing the tls state.  */
6582     pthread_mutex_lock(&clone_lock);
6583     pthread_mutex_unlock(&clone_lock);
6584     cpu_loop(env);
6585     /* never exits */
6586     return NULL;
6587 }
6588 
6589 /* do_fork() Must return host values and target errnos (unlike most
6590    do_*() functions). */
6591 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6592                    abi_ulong parent_tidptr, target_ulong newtls,
6593                    abi_ulong child_tidptr)
6594 {
6595     CPUState *cpu = env_cpu(env);
6596     int ret;
6597     TaskState *ts;
6598     CPUState *new_cpu;
6599     CPUArchState *new_env;
6600     sigset_t sigmask;
6601 
6602     flags &= ~CLONE_IGNORED_FLAGS;
6603 
6604     /* Emulate vfork() with fork() */
6605     if (flags & CLONE_VFORK)
6606         flags &= ~(CLONE_VFORK | CLONE_VM);
6607 
6608     if (flags & CLONE_VM) {
6609         TaskState *parent_ts = get_task_state(cpu);
6610         new_thread_info info;
6611         pthread_attr_t attr;
6612 
6613         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6614             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6615             return -TARGET_EINVAL;
6616         }
6617 
6618         ts = g_new0(TaskState, 1);
6619         init_task_state(ts);
6620 
6621         /* Grab a mutex so that thread setup appears atomic.  */
6622         pthread_mutex_lock(&clone_lock);
6623 
6624         /*
6625          * If this is our first additional thread, we need to ensure we
6626          * generate code for parallel execution and flush old translations.
6627          * Do this now so that the copy gets CF_PARALLEL too.
6628          */
6629         if (!tcg_cflags_has(cpu, CF_PARALLEL)) {
6630             tcg_cflags_set(cpu, CF_PARALLEL);
6631             tb_flush(cpu);
6632         }
6633 
6634         /* we create a new CPU instance. */
6635         new_env = cpu_copy(env);
6636         /* Init regs that differ from the parent.  */
6637         cpu_clone_regs_child(new_env, newsp, flags);
6638         cpu_clone_regs_parent(env, flags);
6639         new_cpu = env_cpu(new_env);
6640         new_cpu->opaque = ts;
6641         ts->bprm = parent_ts->bprm;
6642         ts->info = parent_ts->info;
6643         ts->signal_mask = parent_ts->signal_mask;
6644 
6645         if (flags & CLONE_CHILD_CLEARTID) {
6646             ts->child_tidptr = child_tidptr;
6647         }
6648 
6649         if (flags & CLONE_SETTLS) {
6650             cpu_set_tls (new_env, newtls);
6651         }
6652 
6653         memset(&info, 0, sizeof(info));
6654         pthread_mutex_init(&info.mutex, NULL);
6655         pthread_mutex_lock(&info.mutex);
6656         pthread_cond_init(&info.cond, NULL);
6657         info.env = new_env;
6658         if (flags & CLONE_CHILD_SETTID) {
6659             info.child_tidptr = child_tidptr;
6660         }
6661         if (flags & CLONE_PARENT_SETTID) {
6662             info.parent_tidptr = parent_tidptr;
6663         }
6664 
6665         ret = pthread_attr_init(&attr);
6666         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6667         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6668         /* It is not safe to deliver signals until the child has finished
6669            initializing, so temporarily block all signals.  */
6670         sigfillset(&sigmask);
6671         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6672         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6673 
6674         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6675         /* TODO: Free new CPU state if thread creation failed.  */
6676 
6677         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6678         pthread_attr_destroy(&attr);
6679         if (ret == 0) {
6680             /* Wait for the child to initialize.  */
6681             pthread_cond_wait(&info.cond, &info.mutex);
6682             ret = info.tid;
6683         } else {
6684             ret = -1;
6685         }
6686         pthread_mutex_unlock(&info.mutex);
6687         pthread_cond_destroy(&info.cond);
6688         pthread_mutex_destroy(&info.mutex);
6689         pthread_mutex_unlock(&clone_lock);
6690     } else {
6691         /* if no CLONE_VM, we consider it is a fork */
6692         if (flags & CLONE_INVALID_FORK_FLAGS) {
6693             return -TARGET_EINVAL;
6694         }
6695 
6696         /* We can't support custom termination signals */
6697         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6698             return -TARGET_EINVAL;
6699         }
6700 
6701 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6702         if (flags & CLONE_PIDFD) {
6703             return -TARGET_EINVAL;
6704         }
6705 #endif
6706 
6707         /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6708         if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) {
6709             return -TARGET_EINVAL;
6710         }
6711 
6712         if (block_signals()) {
6713             return -QEMU_ERESTARTSYS;
6714         }
6715 
6716         fork_start();
6717         ret = fork();
6718         if (ret == 0) {
6719             /* Child Process.  */
6720             cpu_clone_regs_child(env, newsp, flags);
6721             fork_end(ret);
6722             /* There is a race condition here.  The parent process could
6723                theoretically read the TID in the child process before the child
6724                tid is set.  This would require using either ptrace
6725                (not implemented) or having *_tidptr to point at a shared memory
6726                mapping.  We can't repeat the spinlock hack used above because
6727                the child process gets its own copy of the lock.  */
6728             if (flags & CLONE_CHILD_SETTID)
6729                 put_user_u32(sys_gettid(), child_tidptr);
6730             if (flags & CLONE_PARENT_SETTID)
6731                 put_user_u32(sys_gettid(), parent_tidptr);
6732             ts = get_task_state(cpu);
6733             if (flags & CLONE_SETTLS)
6734                 cpu_set_tls (env, newtls);
6735             if (flags & CLONE_CHILD_CLEARTID)
6736                 ts->child_tidptr = child_tidptr;
6737         } else {
6738             cpu_clone_regs_parent(env, flags);
6739             if (flags & CLONE_PIDFD) {
6740                 int pid_fd = 0;
6741 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6742                 int pid_child = ret;
6743                 pid_fd = pidfd_open(pid_child, 0);
6744                 if (pid_fd >= 0) {
6745                         fcntl(pid_fd, F_SETFD, fcntl(pid_fd, F_GETFL)
6746                                                | FD_CLOEXEC);
6747                 } else {
6748                         pid_fd = 0;
6749                 }
6750 #endif
6751                 put_user_u32(pid_fd, parent_tidptr);
6752             }
6753             fork_end(ret);
6754         }
6755         g_assert(!cpu_in_exclusive_context(cpu));
6756     }
6757     return ret;
6758 }
6759 
6760 /* warning : doesn't handle linux specific flags... */
6761 static int target_to_host_fcntl_cmd(int cmd)
6762 {
6763     int ret;
6764 
6765     switch(cmd) {
6766     case TARGET_F_DUPFD:
6767     case TARGET_F_GETFD:
6768     case TARGET_F_SETFD:
6769     case TARGET_F_GETFL:
6770     case TARGET_F_SETFL:
6771     case TARGET_F_OFD_GETLK:
6772     case TARGET_F_OFD_SETLK:
6773     case TARGET_F_OFD_SETLKW:
6774         ret = cmd;
6775         break;
6776     case TARGET_F_GETLK:
6777         ret = F_GETLK;
6778         break;
6779     case TARGET_F_SETLK:
6780         ret = F_SETLK;
6781         break;
6782     case TARGET_F_SETLKW:
6783         ret = F_SETLKW;
6784         break;
6785     case TARGET_F_GETOWN:
6786         ret = F_GETOWN;
6787         break;
6788     case TARGET_F_SETOWN:
6789         ret = F_SETOWN;
6790         break;
6791     case TARGET_F_GETSIG:
6792         ret = F_GETSIG;
6793         break;
6794     case TARGET_F_SETSIG:
6795         ret = F_SETSIG;
6796         break;
6797 #if TARGET_ABI_BITS == 32
6798     case TARGET_F_GETLK64:
6799         ret = F_GETLK;
6800         break;
6801     case TARGET_F_SETLK64:
6802         ret = F_SETLK;
6803         break;
6804     case TARGET_F_SETLKW64:
6805         ret = F_SETLKW;
6806         break;
6807 #endif
6808     case TARGET_F_SETLEASE:
6809         ret = F_SETLEASE;
6810         break;
6811     case TARGET_F_GETLEASE:
6812         ret = F_GETLEASE;
6813         break;
6814 #ifdef F_DUPFD_CLOEXEC
6815     case TARGET_F_DUPFD_CLOEXEC:
6816         ret = F_DUPFD_CLOEXEC;
6817         break;
6818 #endif
6819     case TARGET_F_NOTIFY:
6820         ret = F_NOTIFY;
6821         break;
6822 #ifdef F_GETOWN_EX
6823     case TARGET_F_GETOWN_EX:
6824         ret = F_GETOWN_EX;
6825         break;
6826 #endif
6827 #ifdef F_SETOWN_EX
6828     case TARGET_F_SETOWN_EX:
6829         ret = F_SETOWN_EX;
6830         break;
6831 #endif
6832 #ifdef F_SETPIPE_SZ
6833     case TARGET_F_SETPIPE_SZ:
6834         ret = F_SETPIPE_SZ;
6835         break;
6836     case TARGET_F_GETPIPE_SZ:
6837         ret = F_GETPIPE_SZ;
6838         break;
6839 #endif
6840 #ifdef F_ADD_SEALS
6841     case TARGET_F_ADD_SEALS:
6842         ret = F_ADD_SEALS;
6843         break;
6844     case TARGET_F_GET_SEALS:
6845         ret = F_GET_SEALS;
6846         break;
6847 #endif
6848     default:
6849         ret = -TARGET_EINVAL;
6850         break;
6851     }
6852 
6853 #if defined(__powerpc64__)
6854     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6855      * is not supported by kernel. The glibc fcntl call actually adjusts
6856      * them to 5, 6 and 7 before making the syscall(). Since we make the
6857      * syscall directly, adjust to what is supported by the kernel.
6858      */
6859     if (ret >= F_GETLK && ret <= F_SETLKW) {
6860         ret -= F_GETLK - 5;
6861     }
6862 #endif
6863 
6864     return ret;
6865 }
6866 
6867 #define FLOCK_TRANSTBL \
6868     switch (type) { \
6869     TRANSTBL_CONVERT(F_RDLCK); \
6870     TRANSTBL_CONVERT(F_WRLCK); \
6871     TRANSTBL_CONVERT(F_UNLCK); \
6872     }
6873 
6874 static int target_to_host_flock(int type)
6875 {
6876 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6877     FLOCK_TRANSTBL
6878 #undef  TRANSTBL_CONVERT
6879     return -TARGET_EINVAL;
6880 }
6881 
6882 static int host_to_target_flock(int type)
6883 {
6884 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6885     FLOCK_TRANSTBL
6886 #undef  TRANSTBL_CONVERT
6887     /* if we don't know how to convert the value coming
6888      * from the host we copy to the target field as-is
6889      */
6890     return type;
6891 }
6892 
6893 static inline abi_long copy_from_user_flock(struct flock *fl,
6894                                             abi_ulong target_flock_addr)
6895 {
6896     struct target_flock *target_fl;
6897     int l_type;
6898 
6899     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6900         return -TARGET_EFAULT;
6901     }
6902 
6903     __get_user(l_type, &target_fl->l_type);
6904     l_type = target_to_host_flock(l_type);
6905     if (l_type < 0) {
6906         return l_type;
6907     }
6908     fl->l_type = l_type;
6909     __get_user(fl->l_whence, &target_fl->l_whence);
6910     __get_user(fl->l_start, &target_fl->l_start);
6911     __get_user(fl->l_len, &target_fl->l_len);
6912     __get_user(fl->l_pid, &target_fl->l_pid);
6913     unlock_user_struct(target_fl, target_flock_addr, 0);
6914     return 0;
6915 }
6916 
6917 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6918                                           const struct flock *fl)
6919 {
6920     struct target_flock *target_fl;
6921     short l_type;
6922 
6923     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6924         return -TARGET_EFAULT;
6925     }
6926 
6927     l_type = host_to_target_flock(fl->l_type);
6928     __put_user(l_type, &target_fl->l_type);
6929     __put_user(fl->l_whence, &target_fl->l_whence);
6930     __put_user(fl->l_start, &target_fl->l_start);
6931     __put_user(fl->l_len, &target_fl->l_len);
6932     __put_user(fl->l_pid, &target_fl->l_pid);
6933     unlock_user_struct(target_fl, target_flock_addr, 1);
6934     return 0;
6935 }
6936 
6937 typedef abi_long from_flock64_fn(struct flock *fl, abi_ulong target_addr);
6938 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock *fl);
6939 
6940 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6941 struct target_oabi_flock64 {
6942     abi_short l_type;
6943     abi_short l_whence;
6944     abi_llong l_start;
6945     abi_llong l_len;
6946     abi_int   l_pid;
6947 } QEMU_PACKED;
6948 
6949 static inline abi_long copy_from_user_oabi_flock64(struct flock *fl,
6950                                                    abi_ulong target_flock_addr)
6951 {
6952     struct target_oabi_flock64 *target_fl;
6953     int l_type;
6954 
6955     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6956         return -TARGET_EFAULT;
6957     }
6958 
6959     __get_user(l_type, &target_fl->l_type);
6960     l_type = target_to_host_flock(l_type);
6961     if (l_type < 0) {
6962         return l_type;
6963     }
6964     fl->l_type = l_type;
6965     __get_user(fl->l_whence, &target_fl->l_whence);
6966     __get_user(fl->l_start, &target_fl->l_start);
6967     __get_user(fl->l_len, &target_fl->l_len);
6968     __get_user(fl->l_pid, &target_fl->l_pid);
6969     unlock_user_struct(target_fl, target_flock_addr, 0);
6970     return 0;
6971 }
6972 
6973 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6974                                                  const struct flock *fl)
6975 {
6976     struct target_oabi_flock64 *target_fl;
6977     short l_type;
6978 
6979     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6980         return -TARGET_EFAULT;
6981     }
6982 
6983     l_type = host_to_target_flock(fl->l_type);
6984     __put_user(l_type, &target_fl->l_type);
6985     __put_user(fl->l_whence, &target_fl->l_whence);
6986     __put_user(fl->l_start, &target_fl->l_start);
6987     __put_user(fl->l_len, &target_fl->l_len);
6988     __put_user(fl->l_pid, &target_fl->l_pid);
6989     unlock_user_struct(target_fl, target_flock_addr, 1);
6990     return 0;
6991 }
6992 #endif
6993 
6994 static inline abi_long copy_from_user_flock64(struct flock *fl,
6995                                               abi_ulong target_flock_addr)
6996 {
6997     struct target_flock64 *target_fl;
6998     int l_type;
6999 
7000     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7001         return -TARGET_EFAULT;
7002     }
7003 
7004     __get_user(l_type, &target_fl->l_type);
7005     l_type = target_to_host_flock(l_type);
7006     if (l_type < 0) {
7007         return l_type;
7008     }
7009     fl->l_type = l_type;
7010     __get_user(fl->l_whence, &target_fl->l_whence);
7011     __get_user(fl->l_start, &target_fl->l_start);
7012     __get_user(fl->l_len, &target_fl->l_len);
7013     __get_user(fl->l_pid, &target_fl->l_pid);
7014     unlock_user_struct(target_fl, target_flock_addr, 0);
7015     return 0;
7016 }
7017 
7018 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7019                                             const struct flock *fl)
7020 {
7021     struct target_flock64 *target_fl;
7022     short l_type;
7023 
7024     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7025         return -TARGET_EFAULT;
7026     }
7027 
7028     l_type = host_to_target_flock(fl->l_type);
7029     __put_user(l_type, &target_fl->l_type);
7030     __put_user(fl->l_whence, &target_fl->l_whence);
7031     __put_user(fl->l_start, &target_fl->l_start);
7032     __put_user(fl->l_len, &target_fl->l_len);
7033     __put_user(fl->l_pid, &target_fl->l_pid);
7034     unlock_user_struct(target_fl, target_flock_addr, 1);
7035     return 0;
7036 }
7037 
7038 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7039 {
7040     struct flock fl;
7041 #ifdef F_GETOWN_EX
7042     struct f_owner_ex fox;
7043     struct target_f_owner_ex *target_fox;
7044 #endif
7045     abi_long ret;
7046     int host_cmd = target_to_host_fcntl_cmd(cmd);
7047 
7048     if (host_cmd == -TARGET_EINVAL)
7049 	    return host_cmd;
7050 
7051     switch(cmd) {
7052     case TARGET_F_GETLK:
7053         ret = copy_from_user_flock(&fl, arg);
7054         if (ret) {
7055             return ret;
7056         }
7057         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7058         if (ret == 0) {
7059             ret = copy_to_user_flock(arg, &fl);
7060         }
7061         break;
7062 
7063     case TARGET_F_SETLK:
7064     case TARGET_F_SETLKW:
7065         ret = copy_from_user_flock(&fl, arg);
7066         if (ret) {
7067             return ret;
7068         }
7069         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7070         break;
7071 
7072     case TARGET_F_GETLK64:
7073     case TARGET_F_OFD_GETLK:
7074         ret = copy_from_user_flock64(&fl, arg);
7075         if (ret) {
7076             return ret;
7077         }
7078         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7079         if (ret == 0) {
7080             ret = copy_to_user_flock64(arg, &fl);
7081         }
7082         break;
7083     case TARGET_F_SETLK64:
7084     case TARGET_F_SETLKW64:
7085     case TARGET_F_OFD_SETLK:
7086     case TARGET_F_OFD_SETLKW:
7087         ret = copy_from_user_flock64(&fl, arg);
7088         if (ret) {
7089             return ret;
7090         }
7091         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7092         break;
7093 
7094     case TARGET_F_GETFL:
7095         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7096         if (ret >= 0) {
7097             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7098             /* tell 32-bit guests it uses largefile on 64-bit hosts: */
7099             if (O_LARGEFILE == 0 && HOST_LONG_BITS == 64) {
7100                 ret |= TARGET_O_LARGEFILE;
7101             }
7102         }
7103         break;
7104 
7105     case TARGET_F_SETFL:
7106         ret = get_errno(safe_fcntl(fd, host_cmd,
7107                                    target_to_host_bitmask(arg,
7108                                                           fcntl_flags_tbl)));
7109         break;
7110 
7111 #ifdef F_GETOWN_EX
7112     case TARGET_F_GETOWN_EX:
7113         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7114         if (ret >= 0) {
7115             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7116                 return -TARGET_EFAULT;
7117             target_fox->type = tswap32(fox.type);
7118             target_fox->pid = tswap32(fox.pid);
7119             unlock_user_struct(target_fox, arg, 1);
7120         }
7121         break;
7122 #endif
7123 
7124 #ifdef F_SETOWN_EX
7125     case TARGET_F_SETOWN_EX:
7126         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7127             return -TARGET_EFAULT;
7128         fox.type = tswap32(target_fox->type);
7129         fox.pid = tswap32(target_fox->pid);
7130         unlock_user_struct(target_fox, arg, 0);
7131         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7132         break;
7133 #endif
7134 
7135     case TARGET_F_SETSIG:
7136         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7137         break;
7138 
7139     case TARGET_F_GETSIG:
7140         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7141         break;
7142 
7143     case TARGET_F_SETOWN:
7144     case TARGET_F_GETOWN:
7145     case TARGET_F_SETLEASE:
7146     case TARGET_F_GETLEASE:
7147     case TARGET_F_SETPIPE_SZ:
7148     case TARGET_F_GETPIPE_SZ:
7149     case TARGET_F_ADD_SEALS:
7150     case TARGET_F_GET_SEALS:
7151         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7152         break;
7153 
7154     default:
7155         ret = get_errno(safe_fcntl(fd, cmd, arg));
7156         break;
7157     }
7158     return ret;
7159 }
7160 
7161 #ifdef USE_UID16
7162 
7163 static inline int high2lowuid(int uid)
7164 {
7165     if (uid > 65535)
7166         return 65534;
7167     else
7168         return uid;
7169 }
7170 
7171 static inline int high2lowgid(int gid)
7172 {
7173     if (gid > 65535)
7174         return 65534;
7175     else
7176         return gid;
7177 }
7178 
7179 static inline int low2highuid(int uid)
7180 {
7181     if ((int16_t)uid == -1)
7182         return -1;
7183     else
7184         return uid;
7185 }
7186 
7187 static inline int low2highgid(int gid)
7188 {
7189     if ((int16_t)gid == -1)
7190         return -1;
7191     else
7192         return gid;
7193 }
7194 static inline int tswapid(int id)
7195 {
7196     return tswap16(id);
7197 }
7198 
7199 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7200 
7201 #else /* !USE_UID16 */
7202 static inline int high2lowuid(int uid)
7203 {
7204     return uid;
7205 }
7206 static inline int high2lowgid(int gid)
7207 {
7208     return gid;
7209 }
7210 static inline int low2highuid(int uid)
7211 {
7212     return uid;
7213 }
7214 static inline int low2highgid(int gid)
7215 {
7216     return gid;
7217 }
7218 static inline int tswapid(int id)
7219 {
7220     return tswap32(id);
7221 }
7222 
7223 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7224 
7225 #endif /* USE_UID16 */
7226 
7227 /* We must do direct syscalls for setting UID/GID, because we want to
7228  * implement the Linux system call semantics of "change only for this thread",
7229  * not the libc/POSIX semantics of "change for all threads in process".
7230  * (See http://ewontfix.com/17/ for more details.)
7231  * We use the 32-bit version of the syscalls if present; if it is not
7232  * then either the host architecture supports 32-bit UIDs natively with
7233  * the standard syscall, or the 16-bit UID is the best we can do.
7234  */
7235 #ifdef __NR_setuid32
7236 #define __NR_sys_setuid __NR_setuid32
7237 #else
7238 #define __NR_sys_setuid __NR_setuid
7239 #endif
7240 #ifdef __NR_setgid32
7241 #define __NR_sys_setgid __NR_setgid32
7242 #else
7243 #define __NR_sys_setgid __NR_setgid
7244 #endif
7245 #ifdef __NR_setresuid32
7246 #define __NR_sys_setresuid __NR_setresuid32
7247 #else
7248 #define __NR_sys_setresuid __NR_setresuid
7249 #endif
7250 #ifdef __NR_setresgid32
7251 #define __NR_sys_setresgid __NR_setresgid32
7252 #else
7253 #define __NR_sys_setresgid __NR_setresgid
7254 #endif
7255 #ifdef __NR_setgroups32
7256 #define __NR_sys_setgroups __NR_setgroups32
7257 #else
7258 #define __NR_sys_setgroups __NR_setgroups
7259 #endif
7260 #ifdef __NR_sys_setreuid32
7261 #define __NR_sys_setreuid __NR_setreuid32
7262 #else
7263 #define __NR_sys_setreuid __NR_setreuid
7264 #endif
7265 #ifdef __NR_sys_setregid32
7266 #define __NR_sys_setregid __NR_setregid32
7267 #else
7268 #define __NR_sys_setregid __NR_setregid
7269 #endif
7270 
7271 _syscall1(int, sys_setuid, uid_t, uid)
7272 _syscall1(int, sys_setgid, gid_t, gid)
7273 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7274 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7275 _syscall2(int, sys_setgroups, int, size, gid_t *, grouplist)
7276 _syscall2(int, sys_setreuid, uid_t, ruid, uid_t, euid);
7277 _syscall2(int, sys_setregid, gid_t, rgid, gid_t, egid);
7278 
7279 void syscall_init(void)
7280 {
7281     IOCTLEntry *ie;
7282     const argtype *arg_type;
7283     int size;
7284 
7285     thunk_init(STRUCT_MAX);
7286 
7287 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7288 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7289 #include "syscall_types.h"
7290 #undef STRUCT
7291 #undef STRUCT_SPECIAL
7292 
7293     /* we patch the ioctl size if necessary. We rely on the fact that
7294        no ioctl has all the bits at '1' in the size field */
7295     ie = ioctl_entries;
7296     while (ie->target_cmd != 0) {
7297         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7298             TARGET_IOC_SIZEMASK) {
7299             arg_type = ie->arg_type;
7300             if (arg_type[0] != TYPE_PTR) {
7301                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7302                         ie->target_cmd);
7303                 exit(1);
7304             }
7305             arg_type++;
7306             size = thunk_type_size(arg_type, 0);
7307             ie->target_cmd = (ie->target_cmd &
7308                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7309                 (size << TARGET_IOC_SIZESHIFT);
7310         }
7311 
7312         /* automatic consistency check if same arch */
7313 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7314     (defined(__x86_64__) && defined(TARGET_X86_64))
7315         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7316             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7317                     ie->name, ie->target_cmd, ie->host_cmd);
7318         }
7319 #endif
7320         ie++;
7321     }
7322 }
7323 
7324 #ifdef TARGET_NR_truncate64
7325 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7326                                          abi_long arg2,
7327                                          abi_long arg3,
7328                                          abi_long arg4)
7329 {
7330     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7331         arg2 = arg3;
7332         arg3 = arg4;
7333     }
7334     return get_errno(truncate(arg1, target_offset64(arg2, arg3)));
7335 }
7336 #endif
7337 
7338 #ifdef TARGET_NR_ftruncate64
7339 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7340                                           abi_long arg2,
7341                                           abi_long arg3,
7342                                           abi_long arg4)
7343 {
7344     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7345         arg2 = arg3;
7346         arg3 = arg4;
7347     }
7348     return get_errno(ftruncate(arg1, target_offset64(arg2, arg3)));
7349 }
7350 #endif
7351 
7352 #if defined(TARGET_NR_timer_settime) || \
7353     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7354 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7355                                                  abi_ulong target_addr)
7356 {
7357     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7358                                 offsetof(struct target_itimerspec,
7359                                          it_interval)) ||
7360         target_to_host_timespec(&host_its->it_value, target_addr +
7361                                 offsetof(struct target_itimerspec,
7362                                          it_value))) {
7363         return -TARGET_EFAULT;
7364     }
7365 
7366     return 0;
7367 }
7368 #endif
7369 
7370 #if defined(TARGET_NR_timer_settime64) || \
7371     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7372 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7373                                                    abi_ulong target_addr)
7374 {
7375     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7376                                   offsetof(struct target__kernel_itimerspec,
7377                                            it_interval)) ||
7378         target_to_host_timespec64(&host_its->it_value, target_addr +
7379                                   offsetof(struct target__kernel_itimerspec,
7380                                            it_value))) {
7381         return -TARGET_EFAULT;
7382     }
7383 
7384     return 0;
7385 }
7386 #endif
7387 
7388 #if ((defined(TARGET_NR_timerfd_gettime) || \
7389       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7390       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7391 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7392                                                  struct itimerspec *host_its)
7393 {
7394     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7395                                                        it_interval),
7396                                 &host_its->it_interval) ||
7397         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7398                                                        it_value),
7399                                 &host_its->it_value)) {
7400         return -TARGET_EFAULT;
7401     }
7402     return 0;
7403 }
7404 #endif
7405 
7406 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7407       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7408       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7409 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7410                                                    struct itimerspec *host_its)
7411 {
7412     if (host_to_target_timespec64(target_addr +
7413                                   offsetof(struct target__kernel_itimerspec,
7414                                            it_interval),
7415                                   &host_its->it_interval) ||
7416         host_to_target_timespec64(target_addr +
7417                                   offsetof(struct target__kernel_itimerspec,
7418                                            it_value),
7419                                   &host_its->it_value)) {
7420         return -TARGET_EFAULT;
7421     }
7422     return 0;
7423 }
7424 #endif
7425 
7426 #if defined(TARGET_NR_adjtimex) || \
7427     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7428 static inline abi_long target_to_host_timex(struct timex *host_tx,
7429                                             abi_long target_addr)
7430 {
7431     struct target_timex *target_tx;
7432 
7433     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7434         return -TARGET_EFAULT;
7435     }
7436 
7437     __get_user(host_tx->modes, &target_tx->modes);
7438     __get_user(host_tx->offset, &target_tx->offset);
7439     __get_user(host_tx->freq, &target_tx->freq);
7440     __get_user(host_tx->maxerror, &target_tx->maxerror);
7441     __get_user(host_tx->esterror, &target_tx->esterror);
7442     __get_user(host_tx->status, &target_tx->status);
7443     __get_user(host_tx->constant, &target_tx->constant);
7444     __get_user(host_tx->precision, &target_tx->precision);
7445     __get_user(host_tx->tolerance, &target_tx->tolerance);
7446     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7447     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7448     __get_user(host_tx->tick, &target_tx->tick);
7449     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7450     __get_user(host_tx->jitter, &target_tx->jitter);
7451     __get_user(host_tx->shift, &target_tx->shift);
7452     __get_user(host_tx->stabil, &target_tx->stabil);
7453     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7454     __get_user(host_tx->calcnt, &target_tx->calcnt);
7455     __get_user(host_tx->errcnt, &target_tx->errcnt);
7456     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7457     __get_user(host_tx->tai, &target_tx->tai);
7458 
7459     unlock_user_struct(target_tx, target_addr, 0);
7460     return 0;
7461 }
7462 
7463 static inline abi_long host_to_target_timex(abi_long target_addr,
7464                                             struct timex *host_tx)
7465 {
7466     struct target_timex *target_tx;
7467 
7468     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7469         return -TARGET_EFAULT;
7470     }
7471 
7472     __put_user(host_tx->modes, &target_tx->modes);
7473     __put_user(host_tx->offset, &target_tx->offset);
7474     __put_user(host_tx->freq, &target_tx->freq);
7475     __put_user(host_tx->maxerror, &target_tx->maxerror);
7476     __put_user(host_tx->esterror, &target_tx->esterror);
7477     __put_user(host_tx->status, &target_tx->status);
7478     __put_user(host_tx->constant, &target_tx->constant);
7479     __put_user(host_tx->precision, &target_tx->precision);
7480     __put_user(host_tx->tolerance, &target_tx->tolerance);
7481     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7482     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7483     __put_user(host_tx->tick, &target_tx->tick);
7484     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7485     __put_user(host_tx->jitter, &target_tx->jitter);
7486     __put_user(host_tx->shift, &target_tx->shift);
7487     __put_user(host_tx->stabil, &target_tx->stabil);
7488     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7489     __put_user(host_tx->calcnt, &target_tx->calcnt);
7490     __put_user(host_tx->errcnt, &target_tx->errcnt);
7491     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7492     __put_user(host_tx->tai, &target_tx->tai);
7493 
7494     unlock_user_struct(target_tx, target_addr, 1);
7495     return 0;
7496 }
7497 #endif
7498 
7499 
7500 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7501 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7502                                               abi_long target_addr)
7503 {
7504     struct target__kernel_timex *target_tx;
7505 
7506     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7507                                  offsetof(struct target__kernel_timex,
7508                                           time))) {
7509         return -TARGET_EFAULT;
7510     }
7511 
7512     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7513         return -TARGET_EFAULT;
7514     }
7515 
7516     __get_user(host_tx->modes, &target_tx->modes);
7517     __get_user(host_tx->offset, &target_tx->offset);
7518     __get_user(host_tx->freq, &target_tx->freq);
7519     __get_user(host_tx->maxerror, &target_tx->maxerror);
7520     __get_user(host_tx->esterror, &target_tx->esterror);
7521     __get_user(host_tx->status, &target_tx->status);
7522     __get_user(host_tx->constant, &target_tx->constant);
7523     __get_user(host_tx->precision, &target_tx->precision);
7524     __get_user(host_tx->tolerance, &target_tx->tolerance);
7525     __get_user(host_tx->tick, &target_tx->tick);
7526     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7527     __get_user(host_tx->jitter, &target_tx->jitter);
7528     __get_user(host_tx->shift, &target_tx->shift);
7529     __get_user(host_tx->stabil, &target_tx->stabil);
7530     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7531     __get_user(host_tx->calcnt, &target_tx->calcnt);
7532     __get_user(host_tx->errcnt, &target_tx->errcnt);
7533     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7534     __get_user(host_tx->tai, &target_tx->tai);
7535 
7536     unlock_user_struct(target_tx, target_addr, 0);
7537     return 0;
7538 }
7539 
7540 static inline abi_long host_to_target_timex64(abi_long target_addr,
7541                                               struct timex *host_tx)
7542 {
7543     struct target__kernel_timex *target_tx;
7544 
7545    if (copy_to_user_timeval64(target_addr +
7546                               offsetof(struct target__kernel_timex, time),
7547                               &host_tx->time)) {
7548         return -TARGET_EFAULT;
7549     }
7550 
7551     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7552         return -TARGET_EFAULT;
7553     }
7554 
7555     __put_user(host_tx->modes, &target_tx->modes);
7556     __put_user(host_tx->offset, &target_tx->offset);
7557     __put_user(host_tx->freq, &target_tx->freq);
7558     __put_user(host_tx->maxerror, &target_tx->maxerror);
7559     __put_user(host_tx->esterror, &target_tx->esterror);
7560     __put_user(host_tx->status, &target_tx->status);
7561     __put_user(host_tx->constant, &target_tx->constant);
7562     __put_user(host_tx->precision, &target_tx->precision);
7563     __put_user(host_tx->tolerance, &target_tx->tolerance);
7564     __put_user(host_tx->tick, &target_tx->tick);
7565     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7566     __put_user(host_tx->jitter, &target_tx->jitter);
7567     __put_user(host_tx->shift, &target_tx->shift);
7568     __put_user(host_tx->stabil, &target_tx->stabil);
7569     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7570     __put_user(host_tx->calcnt, &target_tx->calcnt);
7571     __put_user(host_tx->errcnt, &target_tx->errcnt);
7572     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7573     __put_user(host_tx->tai, &target_tx->tai);
7574 
7575     unlock_user_struct(target_tx, target_addr, 1);
7576     return 0;
7577 }
7578 #endif
7579 
7580 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7581 #define sigev_notify_thread_id _sigev_un._tid
7582 #endif
7583 
7584 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7585                                                abi_ulong target_addr)
7586 {
7587     struct target_sigevent *target_sevp;
7588 
7589     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7590         return -TARGET_EFAULT;
7591     }
7592 
7593     /* This union is awkward on 64 bit systems because it has a 32 bit
7594      * integer and a pointer in it; we follow the conversion approach
7595      * used for handling sigval types in signal.c so the guest should get
7596      * the correct value back even if we did a 64 bit byteswap and it's
7597      * using the 32 bit integer.
7598      */
7599     host_sevp->sigev_value.sival_ptr =
7600         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7601     host_sevp->sigev_signo =
7602         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7603     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7604     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7605 
7606     unlock_user_struct(target_sevp, target_addr, 1);
7607     return 0;
7608 }
7609 
7610 #if defined(TARGET_NR_mlockall)
7611 static inline int target_to_host_mlockall_arg(int arg)
7612 {
7613     int result = 0;
7614 
7615     if (arg & TARGET_MCL_CURRENT) {
7616         result |= MCL_CURRENT;
7617     }
7618     if (arg & TARGET_MCL_FUTURE) {
7619         result |= MCL_FUTURE;
7620     }
7621 #ifdef MCL_ONFAULT
7622     if (arg & TARGET_MCL_ONFAULT) {
7623         result |= MCL_ONFAULT;
7624     }
7625 #endif
7626 
7627     return result;
7628 }
7629 #endif
7630 
7631 static inline int target_to_host_msync_arg(abi_long arg)
7632 {
7633     return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
7634            ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
7635            ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
7636            (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
7637 }
7638 
7639 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7640      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7641      defined(TARGET_NR_newfstatat))
7642 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7643                                              abi_ulong target_addr,
7644                                              struct stat *host_st)
7645 {
7646 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7647     if (cpu_env->eabi) {
7648         struct target_eabi_stat64 *target_st;
7649 
7650         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7651             return -TARGET_EFAULT;
7652         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7653         __put_user(host_st->st_dev, &target_st->st_dev);
7654         __put_user(host_st->st_ino, &target_st->st_ino);
7655 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7656         __put_user(host_st->st_ino, &target_st->__st_ino);
7657 #endif
7658         __put_user(host_st->st_mode, &target_st->st_mode);
7659         __put_user(host_st->st_nlink, &target_st->st_nlink);
7660         __put_user(host_st->st_uid, &target_st->st_uid);
7661         __put_user(host_st->st_gid, &target_st->st_gid);
7662         __put_user(host_st->st_rdev, &target_st->st_rdev);
7663         __put_user(host_st->st_size, &target_st->st_size);
7664         __put_user(host_st->st_blksize, &target_st->st_blksize);
7665         __put_user(host_st->st_blocks, &target_st->st_blocks);
7666         __put_user(host_st->st_atime, &target_st->target_st_atime);
7667         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7668         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7669 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7670         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7671         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7672         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7673 #endif
7674         unlock_user_struct(target_st, target_addr, 1);
7675     } else
7676 #endif
7677     {
7678 #if defined(TARGET_HAS_STRUCT_STAT64)
7679         struct target_stat64 *target_st;
7680 #else
7681         struct target_stat *target_st;
7682 #endif
7683 
7684         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7685             return -TARGET_EFAULT;
7686         memset(target_st, 0, sizeof(*target_st));
7687         __put_user(host_st->st_dev, &target_st->st_dev);
7688         __put_user(host_st->st_ino, &target_st->st_ino);
7689 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7690         __put_user(host_st->st_ino, &target_st->__st_ino);
7691 #endif
7692         __put_user(host_st->st_mode, &target_st->st_mode);
7693         __put_user(host_st->st_nlink, &target_st->st_nlink);
7694         __put_user(host_st->st_uid, &target_st->st_uid);
7695         __put_user(host_st->st_gid, &target_st->st_gid);
7696         __put_user(host_st->st_rdev, &target_st->st_rdev);
7697         /* XXX: better use of kernel struct */
7698         __put_user(host_st->st_size, &target_st->st_size);
7699         __put_user(host_st->st_blksize, &target_st->st_blksize);
7700         __put_user(host_st->st_blocks, &target_st->st_blocks);
7701         __put_user(host_st->st_atime, &target_st->target_st_atime);
7702         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7703         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7704 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7705         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7706         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7707         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7708 #endif
7709         unlock_user_struct(target_st, target_addr, 1);
7710     }
7711 
7712     return 0;
7713 }
7714 #endif
7715 
7716 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7717 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7718                                             abi_ulong target_addr)
7719 {
7720     struct target_statx *target_stx;
7721 
7722     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7723         return -TARGET_EFAULT;
7724     }
7725     memset(target_stx, 0, sizeof(*target_stx));
7726 
7727     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7728     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7729     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7730     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7731     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7732     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7733     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7734     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7735     __put_user(host_stx->stx_size, &target_stx->stx_size);
7736     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7737     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7738     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7739     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7740     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7741     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7742     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7743     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7744     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7745     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7746     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7747     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7748     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7749     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7750 
7751     unlock_user_struct(target_stx, target_addr, 1);
7752 
7753     return 0;
7754 }
7755 #endif
7756 
7757 static int do_sys_futex(int *uaddr, int op, int val,
7758                          const struct timespec *timeout, int *uaddr2,
7759                          int val3)
7760 {
7761 #if HOST_LONG_BITS == 64
7762 #if defined(__NR_futex)
7763     /* always a 64-bit time_t, it doesn't define _time64 version  */
7764     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7765 
7766 #endif
7767 #else /* HOST_LONG_BITS == 64 */
7768 #if defined(__NR_futex_time64)
7769     if (sizeof(timeout->tv_sec) == 8) {
7770         /* _time64 function on 32bit arch */
7771         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7772     }
7773 #endif
7774 #if defined(__NR_futex)
7775     /* old function on 32bit arch */
7776     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7777 #endif
7778 #endif /* HOST_LONG_BITS == 64 */
7779     g_assert_not_reached();
7780 }
7781 
7782 static int do_safe_futex(int *uaddr, int op, int val,
7783                          const struct timespec *timeout, int *uaddr2,
7784                          int val3)
7785 {
7786 #if HOST_LONG_BITS == 64
7787 #if defined(__NR_futex)
7788     /* always a 64-bit time_t, it doesn't define _time64 version  */
7789     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7790 #endif
7791 #else /* HOST_LONG_BITS == 64 */
7792 #if defined(__NR_futex_time64)
7793     if (sizeof(timeout->tv_sec) == 8) {
7794         /* _time64 function on 32bit arch */
7795         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7796                                            val3));
7797     }
7798 #endif
7799 #if defined(__NR_futex)
7800     /* old function on 32bit arch */
7801     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7802 #endif
7803 #endif /* HOST_LONG_BITS == 64 */
7804     return -TARGET_ENOSYS;
7805 }
7806 
7807 /* ??? Using host futex calls even when target atomic operations
7808    are not really atomic probably breaks things.  However implementing
7809    futexes locally would make futexes shared between multiple processes
7810    tricky.  However they're probably useless because guest atomic
7811    operations won't work either.  */
7812 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7813 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7814                     int op, int val, target_ulong timeout,
7815                     target_ulong uaddr2, int val3)
7816 {
7817     struct timespec ts, *pts = NULL;
7818     void *haddr2 = NULL;
7819     int base_op;
7820 
7821     /* We assume FUTEX_* constants are the same on both host and target. */
7822 #ifdef FUTEX_CMD_MASK
7823     base_op = op & FUTEX_CMD_MASK;
7824 #else
7825     base_op = op;
7826 #endif
7827     switch (base_op) {
7828     case FUTEX_WAIT:
7829     case FUTEX_WAIT_BITSET:
7830         val = tswap32(val);
7831         break;
7832     case FUTEX_WAIT_REQUEUE_PI:
7833         val = tswap32(val);
7834         haddr2 = g2h(cpu, uaddr2);
7835         break;
7836     case FUTEX_LOCK_PI:
7837     case FUTEX_LOCK_PI2:
7838         break;
7839     case FUTEX_WAKE:
7840     case FUTEX_WAKE_BITSET:
7841     case FUTEX_TRYLOCK_PI:
7842     case FUTEX_UNLOCK_PI:
7843         timeout = 0;
7844         break;
7845     case FUTEX_FD:
7846         val = target_to_host_signal(val);
7847         timeout = 0;
7848         break;
7849     case FUTEX_CMP_REQUEUE:
7850     case FUTEX_CMP_REQUEUE_PI:
7851         val3 = tswap32(val3);
7852         /* fall through */
7853     case FUTEX_REQUEUE:
7854     case FUTEX_WAKE_OP:
7855         /*
7856          * For these, the 4th argument is not TIMEOUT, but VAL2.
7857          * But the prototype of do_safe_futex takes a pointer, so
7858          * insert casts to satisfy the compiler.  We do not need
7859          * to tswap VAL2 since it's not compared to guest memory.
7860           */
7861         pts = (struct timespec *)(uintptr_t)timeout;
7862         timeout = 0;
7863         haddr2 = g2h(cpu, uaddr2);
7864         break;
7865     default:
7866         return -TARGET_ENOSYS;
7867     }
7868     if (timeout) {
7869         pts = &ts;
7870         if (time64
7871             ? target_to_host_timespec64(pts, timeout)
7872             : target_to_host_timespec(pts, timeout)) {
7873             return -TARGET_EFAULT;
7874         }
7875     }
7876     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7877 }
7878 #endif
7879 
7880 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7881 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7882                                      abi_long handle, abi_long mount_id,
7883                                      abi_long flags)
7884 {
7885     struct file_handle *target_fh;
7886     struct file_handle *fh;
7887     int mid = 0;
7888     abi_long ret;
7889     char *name;
7890     unsigned int size, total_size;
7891 
7892     if (get_user_s32(size, handle)) {
7893         return -TARGET_EFAULT;
7894     }
7895 
7896     name = lock_user_string(pathname);
7897     if (!name) {
7898         return -TARGET_EFAULT;
7899     }
7900 
7901     total_size = sizeof(struct file_handle) + size;
7902     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7903     if (!target_fh) {
7904         unlock_user(name, pathname, 0);
7905         return -TARGET_EFAULT;
7906     }
7907 
7908     fh = g_malloc0(total_size);
7909     fh->handle_bytes = size;
7910 
7911     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7912     unlock_user(name, pathname, 0);
7913 
7914     /* man name_to_handle_at(2):
7915      * Other than the use of the handle_bytes field, the caller should treat
7916      * the file_handle structure as an opaque data type
7917      */
7918 
7919     memcpy(target_fh, fh, total_size);
7920     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7921     target_fh->handle_type = tswap32(fh->handle_type);
7922     g_free(fh);
7923     unlock_user(target_fh, handle, total_size);
7924 
7925     if (put_user_s32(mid, mount_id)) {
7926         return -TARGET_EFAULT;
7927     }
7928 
7929     return ret;
7930 
7931 }
7932 #endif
7933 
7934 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7935 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7936                                      abi_long flags)
7937 {
7938     struct file_handle *target_fh;
7939     struct file_handle *fh;
7940     unsigned int size, total_size;
7941     abi_long ret;
7942 
7943     if (get_user_s32(size, handle)) {
7944         return -TARGET_EFAULT;
7945     }
7946 
7947     total_size = sizeof(struct file_handle) + size;
7948     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7949     if (!target_fh) {
7950         return -TARGET_EFAULT;
7951     }
7952 
7953     fh = g_memdup(target_fh, total_size);
7954     fh->handle_bytes = size;
7955     fh->handle_type = tswap32(target_fh->handle_type);
7956 
7957     ret = get_errno(open_by_handle_at(mount_fd, fh,
7958                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7959 
7960     g_free(fh);
7961 
7962     unlock_user(target_fh, handle, total_size);
7963 
7964     return ret;
7965 }
7966 #endif
7967 
7968 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7969 
7970 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7971 {
7972     int host_flags;
7973     target_sigset_t *target_mask;
7974     sigset_t host_mask;
7975     abi_long ret;
7976 
7977     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7978         return -TARGET_EINVAL;
7979     }
7980     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7981         return -TARGET_EFAULT;
7982     }
7983 
7984     target_to_host_sigset(&host_mask, target_mask);
7985 
7986     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7987 
7988     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7989     if (ret >= 0) {
7990         fd_trans_register(ret, &target_signalfd_trans);
7991     }
7992 
7993     unlock_user_struct(target_mask, mask, 0);
7994 
7995     return ret;
7996 }
7997 #endif
7998 
7999 /* Map host to target signal numbers for the wait family of syscalls.
8000    Assume all other status bits are the same.  */
8001 int host_to_target_waitstatus(int status)
8002 {
8003     if (WIFSIGNALED(status)) {
8004         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
8005     }
8006     if (WIFSTOPPED(status)) {
8007         return (host_to_target_signal(WSTOPSIG(status)) << 8)
8008                | (status & 0xff);
8009     }
8010     return status;
8011 }
8012 
8013 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
8014 {
8015     CPUState *cpu = env_cpu(cpu_env);
8016     struct linux_binprm *bprm = get_task_state(cpu)->bprm;
8017     int i;
8018 
8019     for (i = 0; i < bprm->argc; i++) {
8020         size_t len = strlen(bprm->argv[i]) + 1;
8021 
8022         if (write(fd, bprm->argv[i], len) != len) {
8023             return -1;
8024         }
8025     }
8026 
8027     return 0;
8028 }
8029 
8030 struct open_self_maps_data {
8031     TaskState *ts;
8032     IntervalTreeRoot *host_maps;
8033     int fd;
8034     bool smaps;
8035 };
8036 
8037 /*
8038  * Subroutine to output one line of /proc/self/maps,
8039  * or one region of /proc/self/smaps.
8040  */
8041 
8042 #ifdef TARGET_HPPA
8043 # define test_stack(S, E, L)  (E == L)
8044 #else
8045 # define test_stack(S, E, L)  (S == L)
8046 #endif
8047 
8048 static void open_self_maps_4(const struct open_self_maps_data *d,
8049                              const MapInfo *mi, abi_ptr start,
8050                              abi_ptr end, unsigned flags)
8051 {
8052     const struct image_info *info = d->ts->info;
8053     const char *path = mi->path;
8054     uint64_t offset;
8055     int fd = d->fd;
8056     int count;
8057 
8058     if (test_stack(start, end, info->stack_limit)) {
8059         path = "[stack]";
8060     } else if (start == info->brk) {
8061         path = "[heap]";
8062     } else if (start == info->vdso) {
8063         path = "[vdso]";
8064 #ifdef TARGET_X86_64
8065     } else if (start == TARGET_VSYSCALL_PAGE) {
8066         path = "[vsyscall]";
8067 #endif
8068     }
8069 
8070     /* Except null device (MAP_ANON), adjust offset for this fragment. */
8071     offset = mi->offset;
8072     if (mi->dev) {
8073         uintptr_t hstart = (uintptr_t)g2h_untagged(start);
8074         offset += hstart - mi->itree.start;
8075     }
8076 
8077     count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8078                     " %c%c%c%c %08" PRIx64 " %02x:%02x %"PRId64,
8079                     start, end,
8080                     (flags & PAGE_READ) ? 'r' : '-',
8081                     (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8082                     (flags & PAGE_EXEC) ? 'x' : '-',
8083                     mi->is_priv ? 'p' : 's',
8084                     offset, major(mi->dev), minor(mi->dev),
8085                     (uint64_t)mi->inode);
8086     if (path) {
8087         dprintf(fd, "%*s%s\n", 73 - count, "", path);
8088     } else {
8089         dprintf(fd, "\n");
8090     }
8091 
8092     if (d->smaps) {
8093         unsigned long size = end - start;
8094         unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
8095         unsigned long size_kb = size >> 10;
8096 
8097         dprintf(fd, "Size:                  %lu kB\n"
8098                 "KernelPageSize:        %lu kB\n"
8099                 "MMUPageSize:           %lu kB\n"
8100                 "Rss:                   0 kB\n"
8101                 "Pss:                   0 kB\n"
8102                 "Pss_Dirty:             0 kB\n"
8103                 "Shared_Clean:          0 kB\n"
8104                 "Shared_Dirty:          0 kB\n"
8105                 "Private_Clean:         0 kB\n"
8106                 "Private_Dirty:         0 kB\n"
8107                 "Referenced:            0 kB\n"
8108                 "Anonymous:             %lu kB\n"
8109                 "LazyFree:              0 kB\n"
8110                 "AnonHugePages:         0 kB\n"
8111                 "ShmemPmdMapped:        0 kB\n"
8112                 "FilePmdMapped:         0 kB\n"
8113                 "Shared_Hugetlb:        0 kB\n"
8114                 "Private_Hugetlb:       0 kB\n"
8115                 "Swap:                  0 kB\n"
8116                 "SwapPss:               0 kB\n"
8117                 "Locked:                0 kB\n"
8118                 "THPeligible:    0\n"
8119                 "VmFlags:%s%s%s%s%s%s%s%s\n",
8120                 size_kb, page_size_kb, page_size_kb,
8121                 (flags & PAGE_ANON ? size_kb : 0),
8122                 (flags & PAGE_READ) ? " rd" : "",
8123                 (flags & PAGE_WRITE_ORG) ? " wr" : "",
8124                 (flags & PAGE_EXEC) ? " ex" : "",
8125                 mi->is_priv ? "" : " sh",
8126                 (flags & PAGE_READ) ? " mr" : "",
8127                 (flags & PAGE_WRITE_ORG) ? " mw" : "",
8128                 (flags & PAGE_EXEC) ? " me" : "",
8129                 mi->is_priv ? "" : " ms");
8130     }
8131 }
8132 
8133 /*
8134  * Callback for walk_memory_regions, when read_self_maps() fails.
8135  * Proceed without the benefit of host /proc/self/maps cross-check.
8136  */
8137 static int open_self_maps_3(void *opaque, target_ulong guest_start,
8138                             target_ulong guest_end, unsigned long flags)
8139 {
8140     static const MapInfo mi = { .is_priv = true };
8141 
8142     open_self_maps_4(opaque, &mi, guest_start, guest_end, flags);
8143     return 0;
8144 }
8145 
8146 /*
8147  * Callback for walk_memory_regions, when read_self_maps() succeeds.
8148  */
8149 static int open_self_maps_2(void *opaque, target_ulong guest_start,
8150                             target_ulong guest_end, unsigned long flags)
8151 {
8152     const struct open_self_maps_data *d = opaque;
8153     uintptr_t host_start = (uintptr_t)g2h_untagged(guest_start);
8154     uintptr_t host_last = (uintptr_t)g2h_untagged(guest_end - 1);
8155 
8156 #ifdef TARGET_X86_64
8157     /*
8158      * Because of the extremely high position of the page within the guest
8159      * virtual address space, this is not backed by host memory at all.
8160      * Therefore the loop below would fail.  This is the only instance
8161      * of not having host backing memory.
8162      */
8163     if (guest_start == TARGET_VSYSCALL_PAGE) {
8164         return open_self_maps_3(opaque, guest_start, guest_end, flags);
8165     }
8166 #endif
8167 
8168     while (1) {
8169         IntervalTreeNode *n =
8170             interval_tree_iter_first(d->host_maps, host_start, host_start);
8171         MapInfo *mi = container_of(n, MapInfo, itree);
8172         uintptr_t this_hlast = MIN(host_last, n->last);
8173         target_ulong this_gend = h2g(this_hlast) + 1;
8174 
8175         open_self_maps_4(d, mi, guest_start, this_gend, flags);
8176 
8177         if (this_hlast == host_last) {
8178             return 0;
8179         }
8180         host_start = this_hlast + 1;
8181         guest_start = h2g(host_start);
8182     }
8183 }
8184 
8185 static int open_self_maps_1(CPUArchState *env, int fd, bool smaps)
8186 {
8187     struct open_self_maps_data d = {
8188         .ts = get_task_state(env_cpu(env)),
8189         .fd = fd,
8190         .smaps = smaps
8191     };
8192 
8193     mmap_lock();
8194     d.host_maps = read_self_maps();
8195     if (d.host_maps) {
8196         walk_memory_regions(&d, open_self_maps_2);
8197         free_self_maps(d.host_maps);
8198     } else {
8199         walk_memory_regions(&d, open_self_maps_3);
8200     }
8201     mmap_unlock();
8202     return 0;
8203 }
8204 
8205 static int open_self_maps(CPUArchState *cpu_env, int fd)
8206 {
8207     return open_self_maps_1(cpu_env, fd, false);
8208 }
8209 
8210 static int open_self_smaps(CPUArchState *cpu_env, int fd)
8211 {
8212     return open_self_maps_1(cpu_env, fd, true);
8213 }
8214 
8215 static int open_self_stat(CPUArchState *cpu_env, int fd)
8216 {
8217     CPUState *cpu = env_cpu(cpu_env);
8218     TaskState *ts = get_task_state(cpu);
8219     g_autoptr(GString) buf = g_string_new(NULL);
8220     int i;
8221 
8222     for (i = 0; i < 44; i++) {
8223         if (i == 0) {
8224             /* pid */
8225             g_string_printf(buf, FMT_pid " ", getpid());
8226         } else if (i == 1) {
8227             /* app name */
8228             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8229             bin = bin ? bin + 1 : ts->bprm->argv[0];
8230             g_string_printf(buf, "(%.15s) ", bin);
8231         } else if (i == 2) {
8232             /* task state */
8233             g_string_assign(buf, "R "); /* we are running right now */
8234         } else if (i == 3) {
8235             /* ppid */
8236             g_string_printf(buf, FMT_pid " ", getppid());
8237         } else if (i == 19) {
8238             /* num_threads */
8239             int cpus = 0;
8240             WITH_RCU_READ_LOCK_GUARD() {
8241                 CPUState *cpu_iter;
8242                 CPU_FOREACH(cpu_iter) {
8243                     cpus++;
8244                 }
8245             }
8246             g_string_printf(buf, "%d ", cpus);
8247         } else if (i == 21) {
8248             /* starttime */
8249             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8250         } else if (i == 27) {
8251             /* stack bottom */
8252             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8253         } else {
8254             /* for the rest, there is MasterCard */
8255             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8256         }
8257 
8258         if (write(fd, buf->str, buf->len) != buf->len) {
8259             return -1;
8260         }
8261     }
8262 
8263     return 0;
8264 }
8265 
8266 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8267 {
8268     CPUState *cpu = env_cpu(cpu_env);
8269     TaskState *ts = get_task_state(cpu);
8270     abi_ulong auxv = ts->info->saved_auxv;
8271     abi_ulong len = ts->info->auxv_len;
8272     char *ptr;
8273 
8274     /*
8275      * Auxiliary vector is stored in target process stack.
8276      * read in whole auxv vector and copy it to file
8277      */
8278     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8279     if (ptr != NULL) {
8280         while (len > 0) {
8281             ssize_t r;
8282             r = write(fd, ptr, len);
8283             if (r <= 0) {
8284                 break;
8285             }
8286             len -= r;
8287             ptr += r;
8288         }
8289         lseek(fd, 0, SEEK_SET);
8290         unlock_user(ptr, auxv, len);
8291     }
8292 
8293     return 0;
8294 }
8295 
8296 static int is_proc_myself(const char *filename, const char *entry)
8297 {
8298     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8299         filename += strlen("/proc/");
8300         if (!strncmp(filename, "self/", strlen("self/"))) {
8301             filename += strlen("self/");
8302         } else if (*filename >= '1' && *filename <= '9') {
8303             char myself[80];
8304             snprintf(myself, sizeof(myself), "%d/", getpid());
8305             if (!strncmp(filename, myself, strlen(myself))) {
8306                 filename += strlen(myself);
8307             } else {
8308                 return 0;
8309             }
8310         } else {
8311             return 0;
8312         }
8313         if (!strcmp(filename, entry)) {
8314             return 1;
8315         }
8316     }
8317     return 0;
8318 }
8319 
8320 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8321                       const char *fmt, int code)
8322 {
8323     if (logfile) {
8324         CPUState *cs = env_cpu(env);
8325 
8326         fprintf(logfile, fmt, code);
8327         fprintf(logfile, "Failing executable: %s\n", exec_path);
8328         cpu_dump_state(cs, logfile, 0);
8329         open_self_maps(env, fileno(logfile));
8330     }
8331 }
8332 
8333 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8334 {
8335     /* dump to console */
8336     excp_dump_file(stderr, env, fmt, code);
8337 
8338     /* dump to log file */
8339     if (qemu_log_separate()) {
8340         FILE *logfile = qemu_log_trylock();
8341 
8342         excp_dump_file(logfile, env, fmt, code);
8343         qemu_log_unlock(logfile);
8344     }
8345 }
8346 
8347 #include "target_proc.h"
8348 
8349 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8350     defined(HAVE_ARCH_PROC_CPUINFO) || \
8351     defined(HAVE_ARCH_PROC_HARDWARE)
8352 static int is_proc(const char *filename, const char *entry)
8353 {
8354     return strcmp(filename, entry) == 0;
8355 }
8356 #endif
8357 
8358 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8359 static int open_net_route(CPUArchState *cpu_env, int fd)
8360 {
8361     FILE *fp;
8362     char *line = NULL;
8363     size_t len = 0;
8364     ssize_t read;
8365 
8366     fp = fopen("/proc/net/route", "r");
8367     if (fp == NULL) {
8368         return -1;
8369     }
8370 
8371     /* read header */
8372 
8373     read = getline(&line, &len, fp);
8374     dprintf(fd, "%s", line);
8375 
8376     /* read routes */
8377 
8378     while ((read = getline(&line, &len, fp)) != -1) {
8379         char iface[16];
8380         uint32_t dest, gw, mask;
8381         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8382         int fields;
8383 
8384         fields = sscanf(line,
8385                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8386                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8387                         &mask, &mtu, &window, &irtt);
8388         if (fields != 11) {
8389             continue;
8390         }
8391         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8392                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8393                 metric, tswap32(mask), mtu, window, irtt);
8394     }
8395 
8396     free(line);
8397     fclose(fp);
8398 
8399     return 0;
8400 }
8401 #endif
8402 
8403 static int maybe_do_fake_open(CPUArchState *cpu_env, int dirfd,
8404                               const char *fname, int flags, mode_t mode,
8405                               int openat2_resolve, bool safe)
8406 {
8407     g_autofree char *proc_name = NULL;
8408     const char *pathname;
8409     struct fake_open {
8410         const char *filename;
8411         int (*fill)(CPUArchState *cpu_env, int fd);
8412         int (*cmp)(const char *s1, const char *s2);
8413     };
8414     const struct fake_open *fake_open;
8415     static const struct fake_open fakes[] = {
8416         { "maps", open_self_maps, is_proc_myself },
8417         { "smaps", open_self_smaps, is_proc_myself },
8418         { "stat", open_self_stat, is_proc_myself },
8419         { "auxv", open_self_auxv, is_proc_myself },
8420         { "cmdline", open_self_cmdline, is_proc_myself },
8421 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8422         { "/proc/net/route", open_net_route, is_proc },
8423 #endif
8424 #if defined(HAVE_ARCH_PROC_CPUINFO)
8425         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8426 #endif
8427 #if defined(HAVE_ARCH_PROC_HARDWARE)
8428         { "/proc/hardware", open_hardware, is_proc },
8429 #endif
8430         { NULL, NULL, NULL }
8431     };
8432 
8433     /* if this is a file from /proc/ filesystem, expand full name */
8434     proc_name = realpath(fname, NULL);
8435     if (proc_name && strncmp(proc_name, "/proc/", 6) == 0) {
8436         pathname = proc_name;
8437     } else {
8438         pathname = fname;
8439     }
8440 
8441     if (is_proc_myself(pathname, "exe")) {
8442         /* Honor openat2 resolve flags */
8443         if ((openat2_resolve & RESOLVE_NO_MAGICLINKS) ||
8444             (openat2_resolve & RESOLVE_NO_SYMLINKS)) {
8445             errno = ELOOP;
8446             return -1;
8447         }
8448         if (safe) {
8449             return safe_openat(dirfd, exec_path, flags, mode);
8450         } else {
8451             return openat(dirfd, exec_path, flags, mode);
8452         }
8453     }
8454 
8455     for (fake_open = fakes; fake_open->filename; fake_open++) {
8456         if (fake_open->cmp(pathname, fake_open->filename)) {
8457             break;
8458         }
8459     }
8460 
8461     if (fake_open->filename) {
8462         const char *tmpdir;
8463         char filename[PATH_MAX];
8464         int fd, r;
8465 
8466         fd = memfd_create("qemu-open", 0);
8467         if (fd < 0) {
8468             if (errno != ENOSYS) {
8469                 return fd;
8470             }
8471             /* create temporary file to map stat to */
8472             tmpdir = getenv("TMPDIR");
8473             if (!tmpdir)
8474                 tmpdir = "/tmp";
8475             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8476             fd = mkstemp(filename);
8477             if (fd < 0) {
8478                 return fd;
8479             }
8480             unlink(filename);
8481         }
8482 
8483         if ((r = fake_open->fill(cpu_env, fd))) {
8484             int e = errno;
8485             close(fd);
8486             errno = e;
8487             return r;
8488         }
8489         lseek(fd, 0, SEEK_SET);
8490 
8491         return fd;
8492     }
8493 
8494     return -2;
8495 }
8496 
8497 int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *pathname,
8498                     int flags, mode_t mode, bool safe)
8499 {
8500     int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, flags, mode, 0, safe);
8501     if (fd > -2) {
8502         return fd;
8503     }
8504 
8505     if (safe) {
8506         return safe_openat(dirfd, path(pathname), flags, mode);
8507     } else {
8508         return openat(dirfd, path(pathname), flags, mode);
8509     }
8510 }
8511 
8512 
8513 static int do_openat2(CPUArchState *cpu_env, abi_long dirfd,
8514                       abi_ptr guest_pathname, abi_ptr guest_open_how,
8515                       abi_ulong guest_size)
8516 {
8517     struct open_how_ver0 how = {0};
8518     char *pathname;
8519     int ret;
8520 
8521     if (guest_size < sizeof(struct target_open_how_ver0)) {
8522         return -TARGET_EINVAL;
8523     }
8524     ret = copy_struct_from_user(&how, sizeof(how), guest_open_how, guest_size);
8525     if (ret) {
8526         if (ret == -TARGET_E2BIG) {
8527             qemu_log_mask(LOG_UNIMP,
8528                           "Unimplemented openat2 open_how size: "
8529                           TARGET_ABI_FMT_lu "\n", guest_size);
8530         }
8531         return ret;
8532     }
8533     pathname = lock_user_string(guest_pathname);
8534     if (!pathname) {
8535         return -TARGET_EFAULT;
8536     }
8537 
8538     how.flags = target_to_host_bitmask(tswap64(how.flags), fcntl_flags_tbl);
8539     how.mode = tswap64(how.mode);
8540     how.resolve = tswap64(how.resolve);
8541     int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, how.flags, how.mode,
8542                                 how.resolve, true);
8543     if (fd > -2) {
8544         ret = get_errno(fd);
8545     } else {
8546         ret = get_errno(safe_openat2(dirfd, pathname, &how,
8547                                      sizeof(struct open_how_ver0)));
8548     }
8549 
8550     fd_trans_unregister(ret);
8551     unlock_user(pathname, guest_pathname, 0);
8552     return ret;
8553 }
8554 
8555 ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz)
8556 {
8557     ssize_t ret;
8558 
8559     if (!pathname || !buf) {
8560         errno = EFAULT;
8561         return -1;
8562     }
8563 
8564     if (!bufsiz) {
8565         /* Short circuit this for the magic exe check. */
8566         errno = EINVAL;
8567         return -1;
8568     }
8569 
8570     if (is_proc_myself((const char *)pathname, "exe")) {
8571         /*
8572          * Don't worry about sign mismatch as earlier mapping
8573          * logic would have thrown a bad address error.
8574          */
8575         ret = MIN(strlen(exec_path), bufsiz);
8576         /* We cannot NUL terminate the string. */
8577         memcpy(buf, exec_path, ret);
8578     } else {
8579         ret = readlink(path(pathname), buf, bufsiz);
8580     }
8581 
8582     return ret;
8583 }
8584 
8585 static int do_execv(CPUArchState *cpu_env, int dirfd,
8586                     abi_long pathname, abi_long guest_argp,
8587                     abi_long guest_envp, int flags, bool is_execveat)
8588 {
8589     int ret;
8590     char **argp, **envp;
8591     int argc, envc;
8592     abi_ulong gp;
8593     abi_ulong addr;
8594     char **q;
8595     void *p;
8596 
8597     argc = 0;
8598 
8599     for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8600         if (get_user_ual(addr, gp)) {
8601             return -TARGET_EFAULT;
8602         }
8603         if (!addr) {
8604             break;
8605         }
8606         argc++;
8607     }
8608     envc = 0;
8609     for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8610         if (get_user_ual(addr, gp)) {
8611             return -TARGET_EFAULT;
8612         }
8613         if (!addr) {
8614             break;
8615         }
8616         envc++;
8617     }
8618 
8619     argp = g_new0(char *, argc + 1);
8620     envp = g_new0(char *, envc + 1);
8621 
8622     for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8623         if (get_user_ual(addr, gp)) {
8624             goto execve_efault;
8625         }
8626         if (!addr) {
8627             break;
8628         }
8629         *q = lock_user_string(addr);
8630         if (!*q) {
8631             goto execve_efault;
8632         }
8633     }
8634     *q = NULL;
8635 
8636     for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8637         if (get_user_ual(addr, gp)) {
8638             goto execve_efault;
8639         }
8640         if (!addr) {
8641             break;
8642         }
8643         *q = lock_user_string(addr);
8644         if (!*q) {
8645             goto execve_efault;
8646         }
8647     }
8648     *q = NULL;
8649 
8650     /*
8651      * Although execve() is not an interruptible syscall it is
8652      * a special case where we must use the safe_syscall wrapper:
8653      * if we allow a signal to happen before we make the host
8654      * syscall then we will 'lose' it, because at the point of
8655      * execve the process leaves QEMU's control. So we use the
8656      * safe syscall wrapper to ensure that we either take the
8657      * signal as a guest signal, or else it does not happen
8658      * before the execve completes and makes it the other
8659      * program's problem.
8660      */
8661     p = lock_user_string(pathname);
8662     if (!p) {
8663         goto execve_efault;
8664     }
8665 
8666     const char *exe = p;
8667     if (is_proc_myself(p, "exe")) {
8668         exe = exec_path;
8669     }
8670     ret = is_execveat
8671         ? safe_execveat(dirfd, exe, argp, envp, flags)
8672         : safe_execve(exe, argp, envp);
8673     ret = get_errno(ret);
8674 
8675     unlock_user(p, pathname, 0);
8676 
8677     goto execve_end;
8678 
8679 execve_efault:
8680     ret = -TARGET_EFAULT;
8681 
8682 execve_end:
8683     for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8684         if (get_user_ual(addr, gp) || !addr) {
8685             break;
8686         }
8687         unlock_user(*q, addr, 0);
8688     }
8689     for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8690         if (get_user_ual(addr, gp) || !addr) {
8691             break;
8692         }
8693         unlock_user(*q, addr, 0);
8694     }
8695 
8696     g_free(argp);
8697     g_free(envp);
8698     return ret;
8699 }
8700 
8701 #define TIMER_MAGIC 0x0caf0000
8702 #define TIMER_MAGIC_MASK 0xffff0000
8703 
8704 /* Convert QEMU provided timer ID back to internal 16bit index format */
8705 static target_timer_t get_timer_id(abi_long arg)
8706 {
8707     target_timer_t timerid = arg;
8708 
8709     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8710         return -TARGET_EINVAL;
8711     }
8712 
8713     timerid &= 0xffff;
8714 
8715     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8716         return -TARGET_EINVAL;
8717     }
8718 
8719     return timerid;
8720 }
8721 
8722 static int target_to_host_cpu_mask(unsigned long *host_mask,
8723                                    size_t host_size,
8724                                    abi_ulong target_addr,
8725                                    size_t target_size)
8726 {
8727     unsigned target_bits = sizeof(abi_ulong) * 8;
8728     unsigned host_bits = sizeof(*host_mask) * 8;
8729     abi_ulong *target_mask;
8730     unsigned i, j;
8731 
8732     assert(host_size >= target_size);
8733 
8734     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8735     if (!target_mask) {
8736         return -TARGET_EFAULT;
8737     }
8738     memset(host_mask, 0, host_size);
8739 
8740     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8741         unsigned bit = i * target_bits;
8742         abi_ulong val;
8743 
8744         __get_user(val, &target_mask[i]);
8745         for (j = 0; j < target_bits; j++, bit++) {
8746             if (val & (1UL << j)) {
8747                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8748             }
8749         }
8750     }
8751 
8752     unlock_user(target_mask, target_addr, 0);
8753     return 0;
8754 }
8755 
8756 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8757                                    size_t host_size,
8758                                    abi_ulong target_addr,
8759                                    size_t target_size)
8760 {
8761     unsigned target_bits = sizeof(abi_ulong) * 8;
8762     unsigned host_bits = sizeof(*host_mask) * 8;
8763     abi_ulong *target_mask;
8764     unsigned i, j;
8765 
8766     assert(host_size >= target_size);
8767 
8768     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8769     if (!target_mask) {
8770         return -TARGET_EFAULT;
8771     }
8772 
8773     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8774         unsigned bit = i * target_bits;
8775         abi_ulong val = 0;
8776 
8777         for (j = 0; j < target_bits; j++, bit++) {
8778             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8779                 val |= 1UL << j;
8780             }
8781         }
8782         __put_user(val, &target_mask[i]);
8783     }
8784 
8785     unlock_user(target_mask, target_addr, target_size);
8786     return 0;
8787 }
8788 
8789 #ifdef TARGET_NR_getdents
8790 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8791 {
8792     g_autofree void *hdirp = NULL;
8793     void *tdirp;
8794     int hlen, hoff, toff;
8795     int hreclen, treclen;
8796     off_t prev_diroff = 0;
8797 
8798     hdirp = g_try_malloc(count);
8799     if (!hdirp) {
8800         return -TARGET_ENOMEM;
8801     }
8802 
8803 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8804     hlen = sys_getdents(dirfd, hdirp, count);
8805 #else
8806     hlen = sys_getdents64(dirfd, hdirp, count);
8807 #endif
8808 
8809     hlen = get_errno(hlen);
8810     if (is_error(hlen)) {
8811         return hlen;
8812     }
8813 
8814     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8815     if (!tdirp) {
8816         return -TARGET_EFAULT;
8817     }
8818 
8819     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8820 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8821         struct linux_dirent *hde = hdirp + hoff;
8822 #else
8823         struct linux_dirent64 *hde = hdirp + hoff;
8824 #endif
8825         struct target_dirent *tde = tdirp + toff;
8826         int namelen;
8827         uint8_t type;
8828 
8829         namelen = strlen(hde->d_name);
8830         hreclen = hde->d_reclen;
8831         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8832         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8833 
8834         if (toff + treclen > count) {
8835             /*
8836              * If the host struct is smaller than the target struct, or
8837              * requires less alignment and thus packs into less space,
8838              * then the host can return more entries than we can pass
8839              * on to the guest.
8840              */
8841             if (toff == 0) {
8842                 toff = -TARGET_EINVAL; /* result buffer is too small */
8843                 break;
8844             }
8845             /*
8846              * Return what we have, resetting the file pointer to the
8847              * location of the first record not returned.
8848              */
8849             lseek(dirfd, prev_diroff, SEEK_SET);
8850             break;
8851         }
8852 
8853         prev_diroff = hde->d_off;
8854         tde->d_ino = tswapal(hde->d_ino);
8855         tde->d_off = tswapal(hde->d_off);
8856         tde->d_reclen = tswap16(treclen);
8857         memcpy(tde->d_name, hde->d_name, namelen + 1);
8858 
8859         /*
8860          * The getdents type is in what was formerly a padding byte at the
8861          * end of the structure.
8862          */
8863 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8864         type = *((uint8_t *)hde + hreclen - 1);
8865 #else
8866         type = hde->d_type;
8867 #endif
8868         *((uint8_t *)tde + treclen - 1) = type;
8869     }
8870 
8871     unlock_user(tdirp, arg2, toff);
8872     return toff;
8873 }
8874 #endif /* TARGET_NR_getdents */
8875 
8876 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8877 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8878 {
8879     g_autofree void *hdirp = NULL;
8880     void *tdirp;
8881     int hlen, hoff, toff;
8882     int hreclen, treclen;
8883     off_t prev_diroff = 0;
8884 
8885     hdirp = g_try_malloc(count);
8886     if (!hdirp) {
8887         return -TARGET_ENOMEM;
8888     }
8889 
8890     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8891     if (is_error(hlen)) {
8892         return hlen;
8893     }
8894 
8895     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8896     if (!tdirp) {
8897         return -TARGET_EFAULT;
8898     }
8899 
8900     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8901         struct linux_dirent64 *hde = hdirp + hoff;
8902         struct target_dirent64 *tde = tdirp + toff;
8903         int namelen;
8904 
8905         namelen = strlen(hde->d_name) + 1;
8906         hreclen = hde->d_reclen;
8907         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8908         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8909 
8910         if (toff + treclen > count) {
8911             /*
8912              * If the host struct is smaller than the target struct, or
8913              * requires less alignment and thus packs into less space,
8914              * then the host can return more entries than we can pass
8915              * on to the guest.
8916              */
8917             if (toff == 0) {
8918                 toff = -TARGET_EINVAL; /* result buffer is too small */
8919                 break;
8920             }
8921             /*
8922              * Return what we have, resetting the file pointer to the
8923              * location of the first record not returned.
8924              */
8925             lseek(dirfd, prev_diroff, SEEK_SET);
8926             break;
8927         }
8928 
8929         prev_diroff = hde->d_off;
8930         tde->d_ino = tswap64(hde->d_ino);
8931         tde->d_off = tswap64(hde->d_off);
8932         tde->d_reclen = tswap16(treclen);
8933         tde->d_type = hde->d_type;
8934         memcpy(tde->d_name, hde->d_name, namelen);
8935     }
8936 
8937     unlock_user(tdirp, arg2, toff);
8938     return toff;
8939 }
8940 #endif /* TARGET_NR_getdents64 */
8941 
8942 #if defined(TARGET_NR_riscv_hwprobe)
8943 
8944 #define RISCV_HWPROBE_KEY_MVENDORID     0
8945 #define RISCV_HWPROBE_KEY_MARCHID       1
8946 #define RISCV_HWPROBE_KEY_MIMPID        2
8947 
8948 #define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
8949 #define     RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
8950 
8951 #define RISCV_HWPROBE_KEY_IMA_EXT_0         4
8952 #define     RISCV_HWPROBE_IMA_FD            (1 << 0)
8953 #define     RISCV_HWPROBE_IMA_C             (1 << 1)
8954 #define     RISCV_HWPROBE_IMA_V             (1 << 2)
8955 #define     RISCV_HWPROBE_EXT_ZBA           (1 << 3)
8956 #define     RISCV_HWPROBE_EXT_ZBB           (1 << 4)
8957 #define     RISCV_HWPROBE_EXT_ZBS           (1 << 5)
8958 #define     RISCV_HWPROBE_EXT_ZICBOZ        (1 << 6)
8959 #define     RISCV_HWPROBE_EXT_ZBC           (1 << 7)
8960 #define     RISCV_HWPROBE_EXT_ZBKB          (1 << 8)
8961 #define     RISCV_HWPROBE_EXT_ZBKC          (1 << 9)
8962 #define     RISCV_HWPROBE_EXT_ZBKX          (1 << 10)
8963 #define     RISCV_HWPROBE_EXT_ZKND          (1 << 11)
8964 #define     RISCV_HWPROBE_EXT_ZKNE          (1 << 12)
8965 #define     RISCV_HWPROBE_EXT_ZKNH          (1 << 13)
8966 #define     RISCV_HWPROBE_EXT_ZKSED         (1 << 14)
8967 #define     RISCV_HWPROBE_EXT_ZKSH          (1 << 15)
8968 #define     RISCV_HWPROBE_EXT_ZKT           (1 << 16)
8969 #define     RISCV_HWPROBE_EXT_ZVBB          (1 << 17)
8970 #define     RISCV_HWPROBE_EXT_ZVBC          (1 << 18)
8971 #define     RISCV_HWPROBE_EXT_ZVKB          (1 << 19)
8972 #define     RISCV_HWPROBE_EXT_ZVKG          (1 << 20)
8973 #define     RISCV_HWPROBE_EXT_ZVKNED        (1 << 21)
8974 #define     RISCV_HWPROBE_EXT_ZVKNHA        (1 << 22)
8975 #define     RISCV_HWPROBE_EXT_ZVKNHB        (1 << 23)
8976 #define     RISCV_HWPROBE_EXT_ZVKSED        (1 << 24)
8977 #define     RISCV_HWPROBE_EXT_ZVKSH         (1 << 25)
8978 #define     RISCV_HWPROBE_EXT_ZVKT          (1 << 26)
8979 #define     RISCV_HWPROBE_EXT_ZFH           (1 << 27)
8980 #define     RISCV_HWPROBE_EXT_ZFHMIN        (1 << 28)
8981 #define     RISCV_HWPROBE_EXT_ZIHINTNTL     (1 << 29)
8982 #define     RISCV_HWPROBE_EXT_ZVFH          (1 << 30)
8983 #define     RISCV_HWPROBE_EXT_ZVFHMIN       (1ULL << 31)
8984 #define     RISCV_HWPROBE_EXT_ZFA           (1ULL << 32)
8985 #define     RISCV_HWPROBE_EXT_ZTSO          (1ULL << 33)
8986 #define     RISCV_HWPROBE_EXT_ZACAS         (1ULL << 34)
8987 #define     RISCV_HWPROBE_EXT_ZICOND        (1ULL << 35)
8988 
8989 #define RISCV_HWPROBE_KEY_CPUPERF_0     5
8990 #define     RISCV_HWPROBE_MISALIGNED_UNKNOWN     (0 << 0)
8991 #define     RISCV_HWPROBE_MISALIGNED_EMULATED    (1 << 0)
8992 #define     RISCV_HWPROBE_MISALIGNED_SLOW        (2 << 0)
8993 #define     RISCV_HWPROBE_MISALIGNED_FAST        (3 << 0)
8994 #define     RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
8995 #define     RISCV_HWPROBE_MISALIGNED_MASK        (7 << 0)
8996 
8997 #define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6
8998 
8999 struct riscv_hwprobe {
9000     abi_llong  key;
9001     abi_ullong value;
9002 };
9003 
9004 static void risc_hwprobe_fill_pairs(CPURISCVState *env,
9005                                     struct riscv_hwprobe *pair,
9006                                     size_t pair_count)
9007 {
9008     const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
9009 
9010     for (; pair_count > 0; pair_count--, pair++) {
9011         abi_llong key;
9012         abi_ullong value;
9013         __put_user(0, &pair->value);
9014         __get_user(key, &pair->key);
9015         switch (key) {
9016         case RISCV_HWPROBE_KEY_MVENDORID:
9017             __put_user(cfg->mvendorid, &pair->value);
9018             break;
9019         case RISCV_HWPROBE_KEY_MARCHID:
9020             __put_user(cfg->marchid, &pair->value);
9021             break;
9022         case RISCV_HWPROBE_KEY_MIMPID:
9023             __put_user(cfg->mimpid, &pair->value);
9024             break;
9025         case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
9026             value = riscv_has_ext(env, RVI) &&
9027                     riscv_has_ext(env, RVM) &&
9028                     riscv_has_ext(env, RVA) ?
9029                     RISCV_HWPROBE_BASE_BEHAVIOR_IMA : 0;
9030             __put_user(value, &pair->value);
9031             break;
9032         case RISCV_HWPROBE_KEY_IMA_EXT_0:
9033             value = riscv_has_ext(env, RVF) &&
9034                     riscv_has_ext(env, RVD) ?
9035                     RISCV_HWPROBE_IMA_FD : 0;
9036             value |= riscv_has_ext(env, RVC) ?
9037                      RISCV_HWPROBE_IMA_C : 0;
9038             value |= riscv_has_ext(env, RVV) ?
9039                      RISCV_HWPROBE_IMA_V : 0;
9040             value |= cfg->ext_zba ?
9041                      RISCV_HWPROBE_EXT_ZBA : 0;
9042             value |= cfg->ext_zbb ?
9043                      RISCV_HWPROBE_EXT_ZBB : 0;
9044             value |= cfg->ext_zbs ?
9045                      RISCV_HWPROBE_EXT_ZBS : 0;
9046             value |= cfg->ext_zicboz ?
9047                      RISCV_HWPROBE_EXT_ZICBOZ : 0;
9048             value |= cfg->ext_zbc ?
9049                      RISCV_HWPROBE_EXT_ZBC : 0;
9050             value |= cfg->ext_zbkb ?
9051                      RISCV_HWPROBE_EXT_ZBKB : 0;
9052             value |= cfg->ext_zbkc ?
9053                      RISCV_HWPROBE_EXT_ZBKC : 0;
9054             value |= cfg->ext_zbkx ?
9055                      RISCV_HWPROBE_EXT_ZBKX : 0;
9056             value |= cfg->ext_zknd ?
9057                      RISCV_HWPROBE_EXT_ZKND : 0;
9058             value |= cfg->ext_zkne ?
9059                      RISCV_HWPROBE_EXT_ZKNE : 0;
9060             value |= cfg->ext_zknh ?
9061                      RISCV_HWPROBE_EXT_ZKNH : 0;
9062             value |= cfg->ext_zksed ?
9063                      RISCV_HWPROBE_EXT_ZKSED : 0;
9064             value |= cfg->ext_zksh ?
9065                      RISCV_HWPROBE_EXT_ZKSH : 0;
9066             value |= cfg->ext_zkt ?
9067                      RISCV_HWPROBE_EXT_ZKT : 0;
9068             value |= cfg->ext_zvbb ?
9069                      RISCV_HWPROBE_EXT_ZVBB : 0;
9070             value |= cfg->ext_zvbc ?
9071                      RISCV_HWPROBE_EXT_ZVBC : 0;
9072             value |= cfg->ext_zvkb ?
9073                      RISCV_HWPROBE_EXT_ZVKB : 0;
9074             value |= cfg->ext_zvkg ?
9075                      RISCV_HWPROBE_EXT_ZVKG : 0;
9076             value |= cfg->ext_zvkned ?
9077                      RISCV_HWPROBE_EXT_ZVKNED : 0;
9078             value |= cfg->ext_zvknha ?
9079                      RISCV_HWPROBE_EXT_ZVKNHA : 0;
9080             value |= cfg->ext_zvknhb ?
9081                      RISCV_HWPROBE_EXT_ZVKNHB : 0;
9082             value |= cfg->ext_zvksed ?
9083                      RISCV_HWPROBE_EXT_ZVKSED : 0;
9084             value |= cfg->ext_zvksh ?
9085                      RISCV_HWPROBE_EXT_ZVKSH : 0;
9086             value |= cfg->ext_zvkt ?
9087                      RISCV_HWPROBE_EXT_ZVKT : 0;
9088             value |= cfg->ext_zfh ?
9089                      RISCV_HWPROBE_EXT_ZFH : 0;
9090             value |= cfg->ext_zfhmin ?
9091                      RISCV_HWPROBE_EXT_ZFHMIN : 0;
9092             value |= cfg->ext_zihintntl ?
9093                      RISCV_HWPROBE_EXT_ZIHINTNTL : 0;
9094             value |= cfg->ext_zvfh ?
9095                      RISCV_HWPROBE_EXT_ZVFH : 0;
9096             value |= cfg->ext_zvfhmin ?
9097                      RISCV_HWPROBE_EXT_ZVFHMIN : 0;
9098             value |= cfg->ext_zfa ?
9099                      RISCV_HWPROBE_EXT_ZFA : 0;
9100             value |= cfg->ext_ztso ?
9101                      RISCV_HWPROBE_EXT_ZTSO : 0;
9102             value |= cfg->ext_zacas ?
9103                      RISCV_HWPROBE_EXT_ZACAS : 0;
9104             value |= cfg->ext_zicond ?
9105                      RISCV_HWPROBE_EXT_ZICOND : 0;
9106             __put_user(value, &pair->value);
9107             break;
9108         case RISCV_HWPROBE_KEY_CPUPERF_0:
9109             __put_user(RISCV_HWPROBE_MISALIGNED_FAST, &pair->value);
9110             break;
9111         case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
9112             value = cfg->ext_zicboz ? cfg->cboz_blocksize : 0;
9113             __put_user(value, &pair->value);
9114             break;
9115         default:
9116             __put_user(-1, &pair->key);
9117             break;
9118         }
9119     }
9120 }
9121 
9122 static int cpu_set_valid(abi_long arg3, abi_long arg4)
9123 {
9124     int ret, i, tmp;
9125     size_t host_mask_size, target_mask_size;
9126     unsigned long *host_mask;
9127 
9128     /*
9129      * cpu_set_t represent CPU masks as bit masks of type unsigned long *.
9130      * arg3 contains the cpu count.
9131      */
9132     tmp = (8 * sizeof(abi_ulong));
9133     target_mask_size = ((arg3 + tmp - 1) / tmp) * sizeof(abi_ulong);
9134     host_mask_size = (target_mask_size + (sizeof(*host_mask) - 1)) &
9135                      ~(sizeof(*host_mask) - 1);
9136 
9137     host_mask = alloca(host_mask_size);
9138 
9139     ret = target_to_host_cpu_mask(host_mask, host_mask_size,
9140                                   arg4, target_mask_size);
9141     if (ret != 0) {
9142         return ret;
9143     }
9144 
9145     for (i = 0 ; i < host_mask_size / sizeof(*host_mask); i++) {
9146         if (host_mask[i] != 0) {
9147             return 0;
9148         }
9149     }
9150     return -TARGET_EINVAL;
9151 }
9152 
9153 static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
9154                                  abi_long arg2, abi_long arg3,
9155                                  abi_long arg4, abi_long arg5)
9156 {
9157     int ret;
9158     struct riscv_hwprobe *host_pairs;
9159 
9160     /* flags must be 0 */
9161     if (arg5 != 0) {
9162         return -TARGET_EINVAL;
9163     }
9164 
9165     /* check cpu_set */
9166     if (arg3 != 0) {
9167         ret = cpu_set_valid(arg3, arg4);
9168         if (ret != 0) {
9169             return ret;
9170         }
9171     } else if (arg4 != 0) {
9172         return -TARGET_EINVAL;
9173     }
9174 
9175     /* no pairs */
9176     if (arg2 == 0) {
9177         return 0;
9178     }
9179 
9180     host_pairs = lock_user(VERIFY_WRITE, arg1,
9181                            sizeof(*host_pairs) * (size_t)arg2, 0);
9182     if (host_pairs == NULL) {
9183         return -TARGET_EFAULT;
9184     }
9185     risc_hwprobe_fill_pairs(cpu_env, host_pairs, arg2);
9186     unlock_user(host_pairs, arg1, sizeof(*host_pairs) * (size_t)arg2);
9187     return 0;
9188 }
9189 #endif /* TARGET_NR_riscv_hwprobe */
9190 
9191 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
9192 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
9193 #endif
9194 
9195 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9196 #define __NR_sys_open_tree __NR_open_tree
9197 _syscall3(int, sys_open_tree, int, __dfd, const char *, __filename,
9198           unsigned int, __flags)
9199 #endif
9200 
9201 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9202 #define __NR_sys_move_mount __NR_move_mount
9203 _syscall5(int, sys_move_mount, int, __from_dfd, const char *, __from_pathname,
9204            int, __to_dfd, const char *, __to_pathname, unsigned int, flag)
9205 #endif
9206 
9207 /* This is an internal helper for do_syscall so that it is easier
9208  * to have a single return point, so that actions, such as logging
9209  * of syscall results, can be performed.
9210  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
9211  */
9212 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
9213                             abi_long arg2, abi_long arg3, abi_long arg4,
9214                             abi_long arg5, abi_long arg6, abi_long arg7,
9215                             abi_long arg8)
9216 {
9217     CPUState *cpu = env_cpu(cpu_env);
9218     abi_long ret;
9219 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
9220     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
9221     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
9222     || defined(TARGET_NR_statx)
9223     struct stat st;
9224 #endif
9225 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
9226     || defined(TARGET_NR_fstatfs)
9227     struct statfs stfs;
9228 #endif
9229     void *p;
9230 
9231     switch(num) {
9232     case TARGET_NR_exit:
9233         /* In old applications this may be used to implement _exit(2).
9234            However in threaded applications it is used for thread termination,
9235            and _exit_group is used for application termination.
9236            Do thread termination if we have more then one thread.  */
9237 
9238         if (block_signals()) {
9239             return -QEMU_ERESTARTSYS;
9240         }
9241 
9242         pthread_mutex_lock(&clone_lock);
9243 
9244         if (CPU_NEXT(first_cpu)) {
9245             TaskState *ts = get_task_state(cpu);
9246 
9247             if (ts->child_tidptr) {
9248                 put_user_u32(0, ts->child_tidptr);
9249                 do_sys_futex(g2h(cpu, ts->child_tidptr),
9250                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
9251             }
9252 
9253             object_unparent(OBJECT(cpu));
9254             object_unref(OBJECT(cpu));
9255             /*
9256              * At this point the CPU should be unrealized and removed
9257              * from cpu lists. We can clean-up the rest of the thread
9258              * data without the lock held.
9259              */
9260 
9261             pthread_mutex_unlock(&clone_lock);
9262 
9263             thread_cpu = NULL;
9264             g_free(ts);
9265             rcu_unregister_thread();
9266             pthread_exit(NULL);
9267         }
9268 
9269         pthread_mutex_unlock(&clone_lock);
9270         preexit_cleanup(cpu_env, arg1);
9271         _exit(arg1);
9272         return 0; /* avoid warning */
9273     case TARGET_NR_read:
9274         if (arg2 == 0 && arg3 == 0) {
9275             return get_errno(safe_read(arg1, 0, 0));
9276         } else {
9277             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9278                 return -TARGET_EFAULT;
9279             ret = get_errno(safe_read(arg1, p, arg3));
9280             if (ret >= 0 &&
9281                 fd_trans_host_to_target_data(arg1)) {
9282                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
9283             }
9284             unlock_user(p, arg2, ret);
9285         }
9286         return ret;
9287     case TARGET_NR_write:
9288         if (arg2 == 0 && arg3 == 0) {
9289             return get_errno(safe_write(arg1, 0, 0));
9290         }
9291         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9292             return -TARGET_EFAULT;
9293         if (fd_trans_target_to_host_data(arg1)) {
9294             void *copy = g_malloc(arg3);
9295             memcpy(copy, p, arg3);
9296             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
9297             if (ret >= 0) {
9298                 ret = get_errno(safe_write(arg1, copy, ret));
9299             }
9300             g_free(copy);
9301         } else {
9302             ret = get_errno(safe_write(arg1, p, arg3));
9303         }
9304         unlock_user(p, arg2, 0);
9305         return ret;
9306 
9307 #ifdef TARGET_NR_open
9308     case TARGET_NR_open:
9309         if (!(p = lock_user_string(arg1)))
9310             return -TARGET_EFAULT;
9311         ret = get_errno(do_guest_openat(cpu_env, AT_FDCWD, p,
9312                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
9313                                   arg3, true));
9314         fd_trans_unregister(ret);
9315         unlock_user(p, arg1, 0);
9316         return ret;
9317 #endif
9318     case TARGET_NR_openat:
9319         if (!(p = lock_user_string(arg2)))
9320             return -TARGET_EFAULT;
9321         ret = get_errno(do_guest_openat(cpu_env, arg1, p,
9322                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
9323                                   arg4, true));
9324         fd_trans_unregister(ret);
9325         unlock_user(p, arg2, 0);
9326         return ret;
9327     case TARGET_NR_openat2:
9328         ret = do_openat2(cpu_env, arg1, arg2, arg3, arg4);
9329         return ret;
9330 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9331     case TARGET_NR_name_to_handle_at:
9332         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
9333         return ret;
9334 #endif
9335 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9336     case TARGET_NR_open_by_handle_at:
9337         ret = do_open_by_handle_at(arg1, arg2, arg3);
9338         fd_trans_unregister(ret);
9339         return ret;
9340 #endif
9341 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
9342     case TARGET_NR_pidfd_open:
9343         return get_errno(pidfd_open(arg1, arg2));
9344 #endif
9345 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
9346     case TARGET_NR_pidfd_send_signal:
9347         {
9348             siginfo_t uinfo, *puinfo;
9349 
9350             if (arg3) {
9351                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9352                 if (!p) {
9353                     return -TARGET_EFAULT;
9354                  }
9355                  target_to_host_siginfo(&uinfo, p);
9356                  unlock_user(p, arg3, 0);
9357                  puinfo = &uinfo;
9358             } else {
9359                  puinfo = NULL;
9360             }
9361             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
9362                                               puinfo, arg4));
9363         }
9364         return ret;
9365 #endif
9366 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
9367     case TARGET_NR_pidfd_getfd:
9368         return get_errno(pidfd_getfd(arg1, arg2, arg3));
9369 #endif
9370     case TARGET_NR_close:
9371         fd_trans_unregister(arg1);
9372         return get_errno(close(arg1));
9373 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
9374     case TARGET_NR_close_range:
9375         ret = get_errno(sys_close_range(arg1, arg2, arg3));
9376         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
9377             abi_long fd, maxfd;
9378             maxfd = MIN(arg2, target_fd_max);
9379             for (fd = arg1; fd < maxfd; fd++) {
9380                 fd_trans_unregister(fd);
9381             }
9382         }
9383         return ret;
9384 #endif
9385 
9386     case TARGET_NR_brk:
9387         return do_brk(arg1);
9388 #ifdef TARGET_NR_fork
9389     case TARGET_NR_fork:
9390         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
9391 #endif
9392 #ifdef TARGET_NR_waitpid
9393     case TARGET_NR_waitpid:
9394         {
9395             int status;
9396             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
9397             if (!is_error(ret) && arg2 && ret
9398                 && put_user_s32(host_to_target_waitstatus(status), arg2))
9399                 return -TARGET_EFAULT;
9400         }
9401         return ret;
9402 #endif
9403 #ifdef TARGET_NR_waitid
9404     case TARGET_NR_waitid:
9405         {
9406             struct rusage ru;
9407             siginfo_t info;
9408 
9409             ret = get_errno(safe_waitid(arg1, arg2, (arg3 ? &info : NULL),
9410                                         arg4, (arg5 ? &ru : NULL)));
9411             if (!is_error(ret)) {
9412                 if (arg3) {
9413                     p = lock_user(VERIFY_WRITE, arg3,
9414                                   sizeof(target_siginfo_t), 0);
9415                     if (!p) {
9416                         return -TARGET_EFAULT;
9417                     }
9418                     host_to_target_siginfo(p, &info);
9419                     unlock_user(p, arg3, sizeof(target_siginfo_t));
9420                 }
9421                 if (arg5 && host_to_target_rusage(arg5, &ru)) {
9422                     return -TARGET_EFAULT;
9423                 }
9424             }
9425         }
9426         return ret;
9427 #endif
9428 #ifdef TARGET_NR_creat /* not on alpha */
9429     case TARGET_NR_creat:
9430         if (!(p = lock_user_string(arg1)))
9431             return -TARGET_EFAULT;
9432         ret = get_errno(creat(p, arg2));
9433         fd_trans_unregister(ret);
9434         unlock_user(p, arg1, 0);
9435         return ret;
9436 #endif
9437 #ifdef TARGET_NR_link
9438     case TARGET_NR_link:
9439         {
9440             void * p2;
9441             p = lock_user_string(arg1);
9442             p2 = lock_user_string(arg2);
9443             if (!p || !p2)
9444                 ret = -TARGET_EFAULT;
9445             else
9446                 ret = get_errno(link(p, p2));
9447             unlock_user(p2, arg2, 0);
9448             unlock_user(p, arg1, 0);
9449         }
9450         return ret;
9451 #endif
9452 #if defined(TARGET_NR_linkat)
9453     case TARGET_NR_linkat:
9454         {
9455             void * p2 = NULL;
9456             if (!arg2 || !arg4)
9457                 return -TARGET_EFAULT;
9458             p  = lock_user_string(arg2);
9459             p2 = lock_user_string(arg4);
9460             if (!p || !p2)
9461                 ret = -TARGET_EFAULT;
9462             else
9463                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
9464             unlock_user(p, arg2, 0);
9465             unlock_user(p2, arg4, 0);
9466         }
9467         return ret;
9468 #endif
9469 #ifdef TARGET_NR_unlink
9470     case TARGET_NR_unlink:
9471         if (!(p = lock_user_string(arg1)))
9472             return -TARGET_EFAULT;
9473         ret = get_errno(unlink(p));
9474         unlock_user(p, arg1, 0);
9475         return ret;
9476 #endif
9477 #if defined(TARGET_NR_unlinkat)
9478     case TARGET_NR_unlinkat:
9479         if (!(p = lock_user_string(arg2)))
9480             return -TARGET_EFAULT;
9481         ret = get_errno(unlinkat(arg1, p, arg3));
9482         unlock_user(p, arg2, 0);
9483         return ret;
9484 #endif
9485     case TARGET_NR_execveat:
9486         return do_execv(cpu_env, arg1, arg2, arg3, arg4, arg5, true);
9487     case TARGET_NR_execve:
9488         return do_execv(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0, false);
9489     case TARGET_NR_chdir:
9490         if (!(p = lock_user_string(arg1)))
9491             return -TARGET_EFAULT;
9492         ret = get_errno(chdir(p));
9493         unlock_user(p, arg1, 0);
9494         return ret;
9495 #ifdef TARGET_NR_time
9496     case TARGET_NR_time:
9497         {
9498             time_t host_time;
9499             ret = get_errno(time(&host_time));
9500             if (!is_error(ret)
9501                 && arg1
9502                 && put_user_sal(host_time, arg1))
9503                 return -TARGET_EFAULT;
9504         }
9505         return ret;
9506 #endif
9507 #ifdef TARGET_NR_mknod
9508     case TARGET_NR_mknod:
9509         if (!(p = lock_user_string(arg1)))
9510             return -TARGET_EFAULT;
9511         ret = get_errno(mknod(p, arg2, arg3));
9512         unlock_user(p, arg1, 0);
9513         return ret;
9514 #endif
9515 #if defined(TARGET_NR_mknodat)
9516     case TARGET_NR_mknodat:
9517         if (!(p = lock_user_string(arg2)))
9518             return -TARGET_EFAULT;
9519         ret = get_errno(mknodat(arg1, p, arg3, arg4));
9520         unlock_user(p, arg2, 0);
9521         return ret;
9522 #endif
9523 #ifdef TARGET_NR_chmod
9524     case TARGET_NR_chmod:
9525         if (!(p = lock_user_string(arg1)))
9526             return -TARGET_EFAULT;
9527         ret = get_errno(chmod(p, arg2));
9528         unlock_user(p, arg1, 0);
9529         return ret;
9530 #endif
9531 #ifdef TARGET_NR_lseek
9532     case TARGET_NR_lseek:
9533         return get_errno(lseek(arg1, arg2, arg3));
9534 #endif
9535 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9536     /* Alpha specific */
9537     case TARGET_NR_getxpid:
9538         cpu_env->ir[IR_A4] = getppid();
9539         return get_errno(getpid());
9540 #endif
9541 #ifdef TARGET_NR_getpid
9542     case TARGET_NR_getpid:
9543         return get_errno(getpid());
9544 #endif
9545     case TARGET_NR_mount:
9546         {
9547             /* need to look at the data field */
9548             void *p2, *p3;
9549 
9550             if (arg1) {
9551                 p = lock_user_string(arg1);
9552                 if (!p) {
9553                     return -TARGET_EFAULT;
9554                 }
9555             } else {
9556                 p = NULL;
9557             }
9558 
9559             p2 = lock_user_string(arg2);
9560             if (!p2) {
9561                 if (arg1) {
9562                     unlock_user(p, arg1, 0);
9563                 }
9564                 return -TARGET_EFAULT;
9565             }
9566 
9567             if (arg3) {
9568                 p3 = lock_user_string(arg3);
9569                 if (!p3) {
9570                     if (arg1) {
9571                         unlock_user(p, arg1, 0);
9572                     }
9573                     unlock_user(p2, arg2, 0);
9574                     return -TARGET_EFAULT;
9575                 }
9576             } else {
9577                 p3 = NULL;
9578             }
9579 
9580             /* FIXME - arg5 should be locked, but it isn't clear how to
9581              * do that since it's not guaranteed to be a NULL-terminated
9582              * string.
9583              */
9584             if (!arg5) {
9585                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9586             } else {
9587                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9588             }
9589             ret = get_errno(ret);
9590 
9591             if (arg1) {
9592                 unlock_user(p, arg1, 0);
9593             }
9594             unlock_user(p2, arg2, 0);
9595             if (arg3) {
9596                 unlock_user(p3, arg3, 0);
9597             }
9598         }
9599         return ret;
9600 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9601 #if defined(TARGET_NR_umount)
9602     case TARGET_NR_umount:
9603 #endif
9604 #if defined(TARGET_NR_oldumount)
9605     case TARGET_NR_oldumount:
9606 #endif
9607         if (!(p = lock_user_string(arg1)))
9608             return -TARGET_EFAULT;
9609         ret = get_errno(umount(p));
9610         unlock_user(p, arg1, 0);
9611         return ret;
9612 #endif
9613 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9614     case TARGET_NR_move_mount:
9615         {
9616             void *p2, *p4;
9617 
9618             if (!arg2 || !arg4) {
9619                 return -TARGET_EFAULT;
9620             }
9621 
9622             p2 = lock_user_string(arg2);
9623             if (!p2) {
9624                 return -TARGET_EFAULT;
9625             }
9626 
9627             p4 = lock_user_string(arg4);
9628             if (!p4) {
9629                 unlock_user(p2, arg2, 0);
9630                 return -TARGET_EFAULT;
9631             }
9632             ret = get_errno(sys_move_mount(arg1, p2, arg3, p4, arg5));
9633 
9634             unlock_user(p2, arg2, 0);
9635             unlock_user(p4, arg4, 0);
9636 
9637             return ret;
9638         }
9639 #endif
9640 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9641     case TARGET_NR_open_tree:
9642         {
9643             void *p2;
9644             int host_flags;
9645 
9646             if (!arg2) {
9647                 return -TARGET_EFAULT;
9648             }
9649 
9650             p2 = lock_user_string(arg2);
9651             if (!p2) {
9652                 return -TARGET_EFAULT;
9653             }
9654 
9655             host_flags = arg3 & ~TARGET_O_CLOEXEC;
9656             if (arg3 & TARGET_O_CLOEXEC) {
9657                 host_flags |= O_CLOEXEC;
9658             }
9659 
9660             ret = get_errno(sys_open_tree(arg1, p2, host_flags));
9661 
9662             unlock_user(p2, arg2, 0);
9663 
9664             return ret;
9665         }
9666 #endif
9667 #ifdef TARGET_NR_stime /* not on alpha */
9668     case TARGET_NR_stime:
9669         {
9670             struct timespec ts;
9671             ts.tv_nsec = 0;
9672             if (get_user_sal(ts.tv_sec, arg1)) {
9673                 return -TARGET_EFAULT;
9674             }
9675             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9676         }
9677 #endif
9678 #ifdef TARGET_NR_alarm /* not on alpha */
9679     case TARGET_NR_alarm:
9680         return alarm(arg1);
9681 #endif
9682 #ifdef TARGET_NR_pause /* not on alpha */
9683     case TARGET_NR_pause:
9684         if (!block_signals()) {
9685             sigsuspend(&get_task_state(cpu)->signal_mask);
9686         }
9687         return -TARGET_EINTR;
9688 #endif
9689 #ifdef TARGET_NR_utime
9690     case TARGET_NR_utime:
9691         {
9692             struct utimbuf tbuf, *host_tbuf;
9693             struct target_utimbuf *target_tbuf;
9694             if (arg2) {
9695                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9696                     return -TARGET_EFAULT;
9697                 tbuf.actime = tswapal(target_tbuf->actime);
9698                 tbuf.modtime = tswapal(target_tbuf->modtime);
9699                 unlock_user_struct(target_tbuf, arg2, 0);
9700                 host_tbuf = &tbuf;
9701             } else {
9702                 host_tbuf = NULL;
9703             }
9704             if (!(p = lock_user_string(arg1)))
9705                 return -TARGET_EFAULT;
9706             ret = get_errno(utime(p, host_tbuf));
9707             unlock_user(p, arg1, 0);
9708         }
9709         return ret;
9710 #endif
9711 #ifdef TARGET_NR_utimes
9712     case TARGET_NR_utimes:
9713         {
9714             struct timeval *tvp, tv[2];
9715             if (arg2) {
9716                 if (copy_from_user_timeval(&tv[0], arg2)
9717                     || copy_from_user_timeval(&tv[1],
9718                                               arg2 + sizeof(struct target_timeval)))
9719                     return -TARGET_EFAULT;
9720                 tvp = tv;
9721             } else {
9722                 tvp = NULL;
9723             }
9724             if (!(p = lock_user_string(arg1)))
9725                 return -TARGET_EFAULT;
9726             ret = get_errno(utimes(p, tvp));
9727             unlock_user(p, arg1, 0);
9728         }
9729         return ret;
9730 #endif
9731 #if defined(TARGET_NR_futimesat)
9732     case TARGET_NR_futimesat:
9733         {
9734             struct timeval *tvp, tv[2];
9735             if (arg3) {
9736                 if (copy_from_user_timeval(&tv[0], arg3)
9737                     || copy_from_user_timeval(&tv[1],
9738                                               arg3 + sizeof(struct target_timeval)))
9739                     return -TARGET_EFAULT;
9740                 tvp = tv;
9741             } else {
9742                 tvp = NULL;
9743             }
9744             if (!(p = lock_user_string(arg2))) {
9745                 return -TARGET_EFAULT;
9746             }
9747             ret = get_errno(futimesat(arg1, path(p), tvp));
9748             unlock_user(p, arg2, 0);
9749         }
9750         return ret;
9751 #endif
9752 #ifdef TARGET_NR_access
9753     case TARGET_NR_access:
9754         if (!(p = lock_user_string(arg1))) {
9755             return -TARGET_EFAULT;
9756         }
9757         ret = get_errno(access(path(p), arg2));
9758         unlock_user(p, arg1, 0);
9759         return ret;
9760 #endif
9761 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9762     case TARGET_NR_faccessat:
9763         if (!(p = lock_user_string(arg2))) {
9764             return -TARGET_EFAULT;
9765         }
9766         ret = get_errno(faccessat(arg1, p, arg3, 0));
9767         unlock_user(p, arg2, 0);
9768         return ret;
9769 #endif
9770 #if defined(TARGET_NR_faccessat2)
9771     case TARGET_NR_faccessat2:
9772         if (!(p = lock_user_string(arg2))) {
9773             return -TARGET_EFAULT;
9774         }
9775         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9776         unlock_user(p, arg2, 0);
9777         return ret;
9778 #endif
9779 #ifdef TARGET_NR_nice /* not on alpha */
9780     case TARGET_NR_nice:
9781         return get_errno(nice(arg1));
9782 #endif
9783     case TARGET_NR_sync:
9784         sync();
9785         return 0;
9786 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9787     case TARGET_NR_syncfs:
9788         return get_errno(syncfs(arg1));
9789 #endif
9790     case TARGET_NR_kill:
9791         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9792 #ifdef TARGET_NR_rename
9793     case TARGET_NR_rename:
9794         {
9795             void *p2;
9796             p = lock_user_string(arg1);
9797             p2 = lock_user_string(arg2);
9798             if (!p || !p2)
9799                 ret = -TARGET_EFAULT;
9800             else
9801                 ret = get_errno(rename(p, p2));
9802             unlock_user(p2, arg2, 0);
9803             unlock_user(p, arg1, 0);
9804         }
9805         return ret;
9806 #endif
9807 #if defined(TARGET_NR_renameat)
9808     case TARGET_NR_renameat:
9809         {
9810             void *p2;
9811             p  = lock_user_string(arg2);
9812             p2 = lock_user_string(arg4);
9813             if (!p || !p2)
9814                 ret = -TARGET_EFAULT;
9815             else
9816                 ret = get_errno(renameat(arg1, p, arg3, p2));
9817             unlock_user(p2, arg4, 0);
9818             unlock_user(p, arg2, 0);
9819         }
9820         return ret;
9821 #endif
9822 #if defined(TARGET_NR_renameat2)
9823     case TARGET_NR_renameat2:
9824         {
9825             void *p2;
9826             p  = lock_user_string(arg2);
9827             p2 = lock_user_string(arg4);
9828             if (!p || !p2) {
9829                 ret = -TARGET_EFAULT;
9830             } else {
9831                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9832             }
9833             unlock_user(p2, arg4, 0);
9834             unlock_user(p, arg2, 0);
9835         }
9836         return ret;
9837 #endif
9838 #ifdef TARGET_NR_mkdir
9839     case TARGET_NR_mkdir:
9840         if (!(p = lock_user_string(arg1)))
9841             return -TARGET_EFAULT;
9842         ret = get_errno(mkdir(p, arg2));
9843         unlock_user(p, arg1, 0);
9844         return ret;
9845 #endif
9846 #if defined(TARGET_NR_mkdirat)
9847     case TARGET_NR_mkdirat:
9848         if (!(p = lock_user_string(arg2)))
9849             return -TARGET_EFAULT;
9850         ret = get_errno(mkdirat(arg1, p, arg3));
9851         unlock_user(p, arg2, 0);
9852         return ret;
9853 #endif
9854 #ifdef TARGET_NR_rmdir
9855     case TARGET_NR_rmdir:
9856         if (!(p = lock_user_string(arg1)))
9857             return -TARGET_EFAULT;
9858         ret = get_errno(rmdir(p));
9859         unlock_user(p, arg1, 0);
9860         return ret;
9861 #endif
9862     case TARGET_NR_dup:
9863         ret = get_errno(dup(arg1));
9864         if (ret >= 0) {
9865             fd_trans_dup(arg1, ret);
9866         }
9867         return ret;
9868 #ifdef TARGET_NR_pipe
9869     case TARGET_NR_pipe:
9870         return do_pipe(cpu_env, arg1, 0, 0);
9871 #endif
9872 #ifdef TARGET_NR_pipe2
9873     case TARGET_NR_pipe2:
9874         return do_pipe(cpu_env, arg1,
9875                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9876 #endif
9877     case TARGET_NR_times:
9878         {
9879             struct target_tms *tmsp;
9880             struct tms tms;
9881             ret = get_errno(times(&tms));
9882             if (arg1) {
9883                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9884                 if (!tmsp)
9885                     return -TARGET_EFAULT;
9886                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9887                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9888                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9889                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9890             }
9891             if (!is_error(ret))
9892                 ret = host_to_target_clock_t(ret);
9893         }
9894         return ret;
9895     case TARGET_NR_acct:
9896         if (arg1 == 0) {
9897             ret = get_errno(acct(NULL));
9898         } else {
9899             if (!(p = lock_user_string(arg1))) {
9900                 return -TARGET_EFAULT;
9901             }
9902             ret = get_errno(acct(path(p)));
9903             unlock_user(p, arg1, 0);
9904         }
9905         return ret;
9906 #ifdef TARGET_NR_umount2
9907     case TARGET_NR_umount2:
9908         if (!(p = lock_user_string(arg1)))
9909             return -TARGET_EFAULT;
9910         ret = get_errno(umount2(p, arg2));
9911         unlock_user(p, arg1, 0);
9912         return ret;
9913 #endif
9914     case TARGET_NR_ioctl:
9915         return do_ioctl(arg1, arg2, arg3);
9916 #ifdef TARGET_NR_fcntl
9917     case TARGET_NR_fcntl:
9918         return do_fcntl(arg1, arg2, arg3);
9919 #endif
9920     case TARGET_NR_setpgid:
9921         return get_errno(setpgid(arg1, arg2));
9922     case TARGET_NR_umask:
9923         return get_errno(umask(arg1));
9924     case TARGET_NR_chroot:
9925         if (!(p = lock_user_string(arg1)))
9926             return -TARGET_EFAULT;
9927         ret = get_errno(chroot(p));
9928         unlock_user(p, arg1, 0);
9929         return ret;
9930 #ifdef TARGET_NR_dup2
9931     case TARGET_NR_dup2:
9932         ret = get_errno(dup2(arg1, arg2));
9933         if (ret >= 0) {
9934             fd_trans_dup(arg1, arg2);
9935         }
9936         return ret;
9937 #endif
9938 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9939     case TARGET_NR_dup3:
9940     {
9941         int host_flags;
9942 
9943         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9944             return -EINVAL;
9945         }
9946         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9947         ret = get_errno(dup3(arg1, arg2, host_flags));
9948         if (ret >= 0) {
9949             fd_trans_dup(arg1, arg2);
9950         }
9951         return ret;
9952     }
9953 #endif
9954 #ifdef TARGET_NR_getppid /* not on alpha */
9955     case TARGET_NR_getppid:
9956         return get_errno(getppid());
9957 #endif
9958 #ifdef TARGET_NR_getpgrp
9959     case TARGET_NR_getpgrp:
9960         return get_errno(getpgrp());
9961 #endif
9962     case TARGET_NR_setsid:
9963         return get_errno(setsid());
9964 #ifdef TARGET_NR_sigaction
9965     case TARGET_NR_sigaction:
9966         {
9967 #if defined(TARGET_MIPS)
9968 	    struct target_sigaction act, oact, *pact, *old_act;
9969 
9970 	    if (arg2) {
9971                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9972                     return -TARGET_EFAULT;
9973 		act._sa_handler = old_act->_sa_handler;
9974 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9975 		act.sa_flags = old_act->sa_flags;
9976 		unlock_user_struct(old_act, arg2, 0);
9977 		pact = &act;
9978 	    } else {
9979 		pact = NULL;
9980 	    }
9981 
9982         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9983 
9984 	    if (!is_error(ret) && arg3) {
9985                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9986                     return -TARGET_EFAULT;
9987 		old_act->_sa_handler = oact._sa_handler;
9988 		old_act->sa_flags = oact.sa_flags;
9989 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9990 		old_act->sa_mask.sig[1] = 0;
9991 		old_act->sa_mask.sig[2] = 0;
9992 		old_act->sa_mask.sig[3] = 0;
9993 		unlock_user_struct(old_act, arg3, 1);
9994 	    }
9995 #else
9996             struct target_old_sigaction *old_act;
9997             struct target_sigaction act, oact, *pact;
9998             if (arg2) {
9999                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
10000                     return -TARGET_EFAULT;
10001                 act._sa_handler = old_act->_sa_handler;
10002                 target_siginitset(&act.sa_mask, old_act->sa_mask);
10003                 act.sa_flags = old_act->sa_flags;
10004 #ifdef TARGET_ARCH_HAS_SA_RESTORER
10005                 act.sa_restorer = old_act->sa_restorer;
10006 #endif
10007                 unlock_user_struct(old_act, arg2, 0);
10008                 pact = &act;
10009             } else {
10010                 pact = NULL;
10011             }
10012             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
10013             if (!is_error(ret) && arg3) {
10014                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
10015                     return -TARGET_EFAULT;
10016                 old_act->_sa_handler = oact._sa_handler;
10017                 old_act->sa_mask = oact.sa_mask.sig[0];
10018                 old_act->sa_flags = oact.sa_flags;
10019 #ifdef TARGET_ARCH_HAS_SA_RESTORER
10020                 old_act->sa_restorer = oact.sa_restorer;
10021 #endif
10022                 unlock_user_struct(old_act, arg3, 1);
10023             }
10024 #endif
10025         }
10026         return ret;
10027 #endif
10028     case TARGET_NR_rt_sigaction:
10029         {
10030             /*
10031              * For Alpha and SPARC this is a 5 argument syscall, with
10032              * a 'restorer' parameter which must be copied into the
10033              * sa_restorer field of the sigaction struct.
10034              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
10035              * and arg5 is the sigsetsize.
10036              */
10037 #if defined(TARGET_ALPHA)
10038             target_ulong sigsetsize = arg4;
10039             target_ulong restorer = arg5;
10040 #elif defined(TARGET_SPARC)
10041             target_ulong restorer = arg4;
10042             target_ulong sigsetsize = arg5;
10043 #else
10044             target_ulong sigsetsize = arg4;
10045             target_ulong restorer = 0;
10046 #endif
10047             struct target_sigaction *act = NULL;
10048             struct target_sigaction *oact = NULL;
10049 
10050             if (sigsetsize != sizeof(target_sigset_t)) {
10051                 return -TARGET_EINVAL;
10052             }
10053             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
10054                 return -TARGET_EFAULT;
10055             }
10056             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
10057                 ret = -TARGET_EFAULT;
10058             } else {
10059                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
10060                 if (oact) {
10061                     unlock_user_struct(oact, arg3, 1);
10062                 }
10063             }
10064             if (act) {
10065                 unlock_user_struct(act, arg2, 0);
10066             }
10067         }
10068         return ret;
10069 #ifdef TARGET_NR_sgetmask /* not on alpha */
10070     case TARGET_NR_sgetmask:
10071         {
10072             sigset_t cur_set;
10073             abi_ulong target_set;
10074             ret = do_sigprocmask(0, NULL, &cur_set);
10075             if (!ret) {
10076                 host_to_target_old_sigset(&target_set, &cur_set);
10077                 ret = target_set;
10078             }
10079         }
10080         return ret;
10081 #endif
10082 #ifdef TARGET_NR_ssetmask /* not on alpha */
10083     case TARGET_NR_ssetmask:
10084         {
10085             sigset_t set, oset;
10086             abi_ulong target_set = arg1;
10087             target_to_host_old_sigset(&set, &target_set);
10088             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
10089             if (!ret) {
10090                 host_to_target_old_sigset(&target_set, &oset);
10091                 ret = target_set;
10092             }
10093         }
10094         return ret;
10095 #endif
10096 #ifdef TARGET_NR_sigprocmask
10097     case TARGET_NR_sigprocmask:
10098         {
10099 #if defined(TARGET_ALPHA)
10100             sigset_t set, oldset;
10101             abi_ulong mask;
10102             int how;
10103 
10104             switch (arg1) {
10105             case TARGET_SIG_BLOCK:
10106                 how = SIG_BLOCK;
10107                 break;
10108             case TARGET_SIG_UNBLOCK:
10109                 how = SIG_UNBLOCK;
10110                 break;
10111             case TARGET_SIG_SETMASK:
10112                 how = SIG_SETMASK;
10113                 break;
10114             default:
10115                 return -TARGET_EINVAL;
10116             }
10117             mask = arg2;
10118             target_to_host_old_sigset(&set, &mask);
10119 
10120             ret = do_sigprocmask(how, &set, &oldset);
10121             if (!is_error(ret)) {
10122                 host_to_target_old_sigset(&mask, &oldset);
10123                 ret = mask;
10124                 cpu_env->ir[IR_V0] = 0; /* force no error */
10125             }
10126 #else
10127             sigset_t set, oldset, *set_ptr;
10128             int how;
10129 
10130             if (arg2) {
10131                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10132                 if (!p) {
10133                     return -TARGET_EFAULT;
10134                 }
10135                 target_to_host_old_sigset(&set, p);
10136                 unlock_user(p, arg2, 0);
10137                 set_ptr = &set;
10138                 switch (arg1) {
10139                 case TARGET_SIG_BLOCK:
10140                     how = SIG_BLOCK;
10141                     break;
10142                 case TARGET_SIG_UNBLOCK:
10143                     how = SIG_UNBLOCK;
10144                     break;
10145                 case TARGET_SIG_SETMASK:
10146                     how = SIG_SETMASK;
10147                     break;
10148                 default:
10149                     return -TARGET_EINVAL;
10150                 }
10151             } else {
10152                 how = 0;
10153                 set_ptr = NULL;
10154             }
10155             ret = do_sigprocmask(how, set_ptr, &oldset);
10156             if (!is_error(ret) && arg3) {
10157                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10158                     return -TARGET_EFAULT;
10159                 host_to_target_old_sigset(p, &oldset);
10160                 unlock_user(p, arg3, sizeof(target_sigset_t));
10161             }
10162 #endif
10163         }
10164         return ret;
10165 #endif
10166     case TARGET_NR_rt_sigprocmask:
10167         {
10168             int how = arg1;
10169             sigset_t set, oldset, *set_ptr;
10170 
10171             if (arg4 != sizeof(target_sigset_t)) {
10172                 return -TARGET_EINVAL;
10173             }
10174 
10175             if (arg2) {
10176                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10177                 if (!p) {
10178                     return -TARGET_EFAULT;
10179                 }
10180                 target_to_host_sigset(&set, p);
10181                 unlock_user(p, arg2, 0);
10182                 set_ptr = &set;
10183                 switch(how) {
10184                 case TARGET_SIG_BLOCK:
10185                     how = SIG_BLOCK;
10186                     break;
10187                 case TARGET_SIG_UNBLOCK:
10188                     how = SIG_UNBLOCK;
10189                     break;
10190                 case TARGET_SIG_SETMASK:
10191                     how = SIG_SETMASK;
10192                     break;
10193                 default:
10194                     return -TARGET_EINVAL;
10195                 }
10196             } else {
10197                 how = 0;
10198                 set_ptr = NULL;
10199             }
10200             ret = do_sigprocmask(how, set_ptr, &oldset);
10201             if (!is_error(ret) && arg3) {
10202                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10203                     return -TARGET_EFAULT;
10204                 host_to_target_sigset(p, &oldset);
10205                 unlock_user(p, arg3, sizeof(target_sigset_t));
10206             }
10207         }
10208         return ret;
10209 #ifdef TARGET_NR_sigpending
10210     case TARGET_NR_sigpending:
10211         {
10212             sigset_t set;
10213             ret = get_errno(sigpending(&set));
10214             if (!is_error(ret)) {
10215                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10216                     return -TARGET_EFAULT;
10217                 host_to_target_old_sigset(p, &set);
10218                 unlock_user(p, arg1, sizeof(target_sigset_t));
10219             }
10220         }
10221         return ret;
10222 #endif
10223     case TARGET_NR_rt_sigpending:
10224         {
10225             sigset_t set;
10226 
10227             /* Yes, this check is >, not != like most. We follow the kernel's
10228              * logic and it does it like this because it implements
10229              * NR_sigpending through the same code path, and in that case
10230              * the old_sigset_t is smaller in size.
10231              */
10232             if (arg2 > sizeof(target_sigset_t)) {
10233                 return -TARGET_EINVAL;
10234             }
10235 
10236             ret = get_errno(sigpending(&set));
10237             if (!is_error(ret)) {
10238                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10239                     return -TARGET_EFAULT;
10240                 host_to_target_sigset(p, &set);
10241                 unlock_user(p, arg1, sizeof(target_sigset_t));
10242             }
10243         }
10244         return ret;
10245 #ifdef TARGET_NR_sigsuspend
10246     case TARGET_NR_sigsuspend:
10247         {
10248             sigset_t *set;
10249 
10250 #if defined(TARGET_ALPHA)
10251             TaskState *ts = get_task_state(cpu);
10252             /* target_to_host_old_sigset will bswap back */
10253             abi_ulong mask = tswapal(arg1);
10254             set = &ts->sigsuspend_mask;
10255             target_to_host_old_sigset(set, &mask);
10256 #else
10257             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
10258             if (ret != 0) {
10259                 return ret;
10260             }
10261 #endif
10262             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10263             finish_sigsuspend_mask(ret);
10264         }
10265         return ret;
10266 #endif
10267     case TARGET_NR_rt_sigsuspend:
10268         {
10269             sigset_t *set;
10270 
10271             ret = process_sigsuspend_mask(&set, arg1, arg2);
10272             if (ret != 0) {
10273                 return ret;
10274             }
10275             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10276             finish_sigsuspend_mask(ret);
10277         }
10278         return ret;
10279 #ifdef TARGET_NR_rt_sigtimedwait
10280     case TARGET_NR_rt_sigtimedwait:
10281         {
10282             sigset_t set;
10283             struct timespec uts, *puts;
10284             siginfo_t uinfo;
10285 
10286             if (arg4 != sizeof(target_sigset_t)) {
10287                 return -TARGET_EINVAL;
10288             }
10289 
10290             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
10291                 return -TARGET_EFAULT;
10292             target_to_host_sigset(&set, p);
10293             unlock_user(p, arg1, 0);
10294             if (arg3) {
10295                 puts = &uts;
10296                 if (target_to_host_timespec(puts, arg3)) {
10297                     return -TARGET_EFAULT;
10298                 }
10299             } else {
10300                 puts = NULL;
10301             }
10302             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10303                                                  SIGSET_T_SIZE));
10304             if (!is_error(ret)) {
10305                 if (arg2) {
10306                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
10307                                   0);
10308                     if (!p) {
10309                         return -TARGET_EFAULT;
10310                     }
10311                     host_to_target_siginfo(p, &uinfo);
10312                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10313                 }
10314                 ret = host_to_target_signal(ret);
10315             }
10316         }
10317         return ret;
10318 #endif
10319 #ifdef TARGET_NR_rt_sigtimedwait_time64
10320     case TARGET_NR_rt_sigtimedwait_time64:
10321         {
10322             sigset_t set;
10323             struct timespec uts, *puts;
10324             siginfo_t uinfo;
10325 
10326             if (arg4 != sizeof(target_sigset_t)) {
10327                 return -TARGET_EINVAL;
10328             }
10329 
10330             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
10331             if (!p) {
10332                 return -TARGET_EFAULT;
10333             }
10334             target_to_host_sigset(&set, p);
10335             unlock_user(p, arg1, 0);
10336             if (arg3) {
10337                 puts = &uts;
10338                 if (target_to_host_timespec64(puts, arg3)) {
10339                     return -TARGET_EFAULT;
10340                 }
10341             } else {
10342                 puts = NULL;
10343             }
10344             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10345                                                  SIGSET_T_SIZE));
10346             if (!is_error(ret)) {
10347                 if (arg2) {
10348                     p = lock_user(VERIFY_WRITE, arg2,
10349                                   sizeof(target_siginfo_t), 0);
10350                     if (!p) {
10351                         return -TARGET_EFAULT;
10352                     }
10353                     host_to_target_siginfo(p, &uinfo);
10354                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10355                 }
10356                 ret = host_to_target_signal(ret);
10357             }
10358         }
10359         return ret;
10360 #endif
10361     case TARGET_NR_rt_sigqueueinfo:
10362         {
10363             siginfo_t uinfo;
10364 
10365             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
10366             if (!p) {
10367                 return -TARGET_EFAULT;
10368             }
10369             target_to_host_siginfo(&uinfo, p);
10370             unlock_user(p, arg3, 0);
10371             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
10372         }
10373         return ret;
10374     case TARGET_NR_rt_tgsigqueueinfo:
10375         {
10376             siginfo_t uinfo;
10377 
10378             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
10379             if (!p) {
10380                 return -TARGET_EFAULT;
10381             }
10382             target_to_host_siginfo(&uinfo, p);
10383             unlock_user(p, arg4, 0);
10384             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
10385         }
10386         return ret;
10387 #ifdef TARGET_NR_sigreturn
10388     case TARGET_NR_sigreturn:
10389         if (block_signals()) {
10390             return -QEMU_ERESTARTSYS;
10391         }
10392         return do_sigreturn(cpu_env);
10393 #endif
10394     case TARGET_NR_rt_sigreturn:
10395         if (block_signals()) {
10396             return -QEMU_ERESTARTSYS;
10397         }
10398         return do_rt_sigreturn(cpu_env);
10399     case TARGET_NR_sethostname:
10400         if (!(p = lock_user_string(arg1)))
10401             return -TARGET_EFAULT;
10402         ret = get_errno(sethostname(p, arg2));
10403         unlock_user(p, arg1, 0);
10404         return ret;
10405 #ifdef TARGET_NR_setrlimit
10406     case TARGET_NR_setrlimit:
10407         {
10408             int resource = target_to_host_resource(arg1);
10409             struct target_rlimit *target_rlim;
10410             struct rlimit rlim;
10411             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
10412                 return -TARGET_EFAULT;
10413             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
10414             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
10415             unlock_user_struct(target_rlim, arg2, 0);
10416             /*
10417              * If we just passed through resource limit settings for memory then
10418              * they would also apply to QEMU's own allocations, and QEMU will
10419              * crash or hang or die if its allocations fail. Ideally we would
10420              * track the guest allocations in QEMU and apply the limits ourselves.
10421              * For now, just tell the guest the call succeeded but don't actually
10422              * limit anything.
10423              */
10424             if (resource != RLIMIT_AS &&
10425                 resource != RLIMIT_DATA &&
10426                 resource != RLIMIT_STACK) {
10427                 return get_errno(setrlimit(resource, &rlim));
10428             } else {
10429                 return 0;
10430             }
10431         }
10432 #endif
10433 #ifdef TARGET_NR_getrlimit
10434     case TARGET_NR_getrlimit:
10435         {
10436             int resource = target_to_host_resource(arg1);
10437             struct target_rlimit *target_rlim;
10438             struct rlimit rlim;
10439 
10440             ret = get_errno(getrlimit(resource, &rlim));
10441             if (!is_error(ret)) {
10442                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10443                     return -TARGET_EFAULT;
10444                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10445                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10446                 unlock_user_struct(target_rlim, arg2, 1);
10447             }
10448         }
10449         return ret;
10450 #endif
10451     case TARGET_NR_getrusage:
10452         {
10453             struct rusage rusage;
10454             ret = get_errno(getrusage(arg1, &rusage));
10455             if (!is_error(ret)) {
10456                 ret = host_to_target_rusage(arg2, &rusage);
10457             }
10458         }
10459         return ret;
10460 #if defined(TARGET_NR_gettimeofday)
10461     case TARGET_NR_gettimeofday:
10462         {
10463             struct timeval tv;
10464             struct timezone tz;
10465 
10466             ret = get_errno(gettimeofday(&tv, &tz));
10467             if (!is_error(ret)) {
10468                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
10469                     return -TARGET_EFAULT;
10470                 }
10471                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
10472                     return -TARGET_EFAULT;
10473                 }
10474             }
10475         }
10476         return ret;
10477 #endif
10478 #if defined(TARGET_NR_settimeofday)
10479     case TARGET_NR_settimeofday:
10480         {
10481             struct timeval tv, *ptv = NULL;
10482             struct timezone tz, *ptz = NULL;
10483 
10484             if (arg1) {
10485                 if (copy_from_user_timeval(&tv, arg1)) {
10486                     return -TARGET_EFAULT;
10487                 }
10488                 ptv = &tv;
10489             }
10490 
10491             if (arg2) {
10492                 if (copy_from_user_timezone(&tz, arg2)) {
10493                     return -TARGET_EFAULT;
10494                 }
10495                 ptz = &tz;
10496             }
10497 
10498             return get_errno(settimeofday(ptv, ptz));
10499         }
10500 #endif
10501 #if defined(TARGET_NR_select)
10502     case TARGET_NR_select:
10503 #if defined(TARGET_WANT_NI_OLD_SELECT)
10504         /* some architectures used to have old_select here
10505          * but now ENOSYS it.
10506          */
10507         ret = -TARGET_ENOSYS;
10508 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
10509         ret = do_old_select(arg1);
10510 #else
10511         ret = do_select(arg1, arg2, arg3, arg4, arg5);
10512 #endif
10513         return ret;
10514 #endif
10515 #ifdef TARGET_NR_pselect6
10516     case TARGET_NR_pselect6:
10517         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
10518 #endif
10519 #ifdef TARGET_NR_pselect6_time64
10520     case TARGET_NR_pselect6_time64:
10521         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
10522 #endif
10523 #ifdef TARGET_NR_symlink
10524     case TARGET_NR_symlink:
10525         {
10526             void *p2;
10527             p = lock_user_string(arg1);
10528             p2 = lock_user_string(arg2);
10529             if (!p || !p2)
10530                 ret = -TARGET_EFAULT;
10531             else
10532                 ret = get_errno(symlink(p, p2));
10533             unlock_user(p2, arg2, 0);
10534             unlock_user(p, arg1, 0);
10535         }
10536         return ret;
10537 #endif
10538 #if defined(TARGET_NR_symlinkat)
10539     case TARGET_NR_symlinkat:
10540         {
10541             void *p2;
10542             p  = lock_user_string(arg1);
10543             p2 = lock_user_string(arg3);
10544             if (!p || !p2)
10545                 ret = -TARGET_EFAULT;
10546             else
10547                 ret = get_errno(symlinkat(p, arg2, p2));
10548             unlock_user(p2, arg3, 0);
10549             unlock_user(p, arg1, 0);
10550         }
10551         return ret;
10552 #endif
10553 #ifdef TARGET_NR_readlink
10554     case TARGET_NR_readlink:
10555         {
10556             void *p2;
10557             p = lock_user_string(arg1);
10558             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10559             ret = get_errno(do_guest_readlink(p, p2, arg3));
10560             unlock_user(p2, arg2, ret);
10561             unlock_user(p, arg1, 0);
10562         }
10563         return ret;
10564 #endif
10565 #if defined(TARGET_NR_readlinkat)
10566     case TARGET_NR_readlinkat:
10567         {
10568             void *p2;
10569             p  = lock_user_string(arg2);
10570             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10571             if (!p || !p2) {
10572                 ret = -TARGET_EFAULT;
10573             } else if (!arg4) {
10574                 /* Short circuit this for the magic exe check. */
10575                 ret = -TARGET_EINVAL;
10576             } else if (is_proc_myself((const char *)p, "exe")) {
10577                 /*
10578                  * Don't worry about sign mismatch as earlier mapping
10579                  * logic would have thrown a bad address error.
10580                  */
10581                 ret = MIN(strlen(exec_path), arg4);
10582                 /* We cannot NUL terminate the string. */
10583                 memcpy(p2, exec_path, ret);
10584             } else {
10585                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10586             }
10587             unlock_user(p2, arg3, ret);
10588             unlock_user(p, arg2, 0);
10589         }
10590         return ret;
10591 #endif
10592 #ifdef TARGET_NR_swapon
10593     case TARGET_NR_swapon:
10594         if (!(p = lock_user_string(arg1)))
10595             return -TARGET_EFAULT;
10596         ret = get_errno(swapon(p, arg2));
10597         unlock_user(p, arg1, 0);
10598         return ret;
10599 #endif
10600     case TARGET_NR_reboot:
10601         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10602            /* arg4 must be ignored in all other cases */
10603            p = lock_user_string(arg4);
10604            if (!p) {
10605                return -TARGET_EFAULT;
10606            }
10607            ret = get_errno(reboot(arg1, arg2, arg3, p));
10608            unlock_user(p, arg4, 0);
10609         } else {
10610            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10611         }
10612         return ret;
10613 #ifdef TARGET_NR_mmap
10614     case TARGET_NR_mmap:
10615 #ifdef TARGET_ARCH_WANT_SYS_OLD_MMAP
10616         {
10617             abi_ulong *v;
10618             abi_ulong v1, v2, v3, v4, v5, v6;
10619             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10620                 return -TARGET_EFAULT;
10621             v1 = tswapal(v[0]);
10622             v2 = tswapal(v[1]);
10623             v3 = tswapal(v[2]);
10624             v4 = tswapal(v[3]);
10625             v5 = tswapal(v[4]);
10626             v6 = tswapal(v[5]);
10627             unlock_user(v, arg1, 0);
10628             return do_mmap(v1, v2, v3, v4, v5, v6);
10629         }
10630 #else
10631         /* mmap pointers are always untagged */
10632         return do_mmap(arg1, arg2, arg3, arg4, arg5, arg6);
10633 #endif
10634 #endif
10635 #ifdef TARGET_NR_mmap2
10636     case TARGET_NR_mmap2:
10637 #ifndef MMAP_SHIFT
10638 #define MMAP_SHIFT 12
10639 #endif
10640         return do_mmap(arg1, arg2, arg3, arg4, arg5,
10641                        (off_t)(abi_ulong)arg6 << MMAP_SHIFT);
10642 #endif
10643     case TARGET_NR_munmap:
10644         arg1 = cpu_untagged_addr(cpu, arg1);
10645         return get_errno(target_munmap(arg1, arg2));
10646     case TARGET_NR_mprotect:
10647         arg1 = cpu_untagged_addr(cpu, arg1);
10648         {
10649             TaskState *ts = get_task_state(cpu);
10650             /* Special hack to detect libc making the stack executable.  */
10651             if ((arg3 & PROT_GROWSDOWN)
10652                 && arg1 >= ts->info->stack_limit
10653                 && arg1 <= ts->info->start_stack) {
10654                 arg3 &= ~PROT_GROWSDOWN;
10655                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10656                 arg1 = ts->info->stack_limit;
10657             }
10658         }
10659         return get_errno(target_mprotect(arg1, arg2, arg3));
10660 #ifdef TARGET_NR_mremap
10661     case TARGET_NR_mremap:
10662         arg1 = cpu_untagged_addr(cpu, arg1);
10663         /* mremap new_addr (arg5) is always untagged */
10664         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10665 #endif
10666         /* ??? msync/mlock/munlock are broken for softmmu.  */
10667 #ifdef TARGET_NR_msync
10668     case TARGET_NR_msync:
10669         return get_errno(msync(g2h(cpu, arg1), arg2,
10670                                target_to_host_msync_arg(arg3)));
10671 #endif
10672 #ifdef TARGET_NR_mlock
10673     case TARGET_NR_mlock:
10674         return get_errno(mlock(g2h(cpu, arg1), arg2));
10675 #endif
10676 #ifdef TARGET_NR_munlock
10677     case TARGET_NR_munlock:
10678         return get_errno(munlock(g2h(cpu, arg1), arg2));
10679 #endif
10680 #ifdef TARGET_NR_mlockall
10681     case TARGET_NR_mlockall:
10682         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10683 #endif
10684 #ifdef TARGET_NR_munlockall
10685     case TARGET_NR_munlockall:
10686         return get_errno(munlockall());
10687 #endif
10688 #ifdef TARGET_NR_truncate
10689     case TARGET_NR_truncate:
10690         if (!(p = lock_user_string(arg1)))
10691             return -TARGET_EFAULT;
10692         ret = get_errno(truncate(p, arg2));
10693         unlock_user(p, arg1, 0);
10694         return ret;
10695 #endif
10696 #ifdef TARGET_NR_ftruncate
10697     case TARGET_NR_ftruncate:
10698         return get_errno(ftruncate(arg1, arg2));
10699 #endif
10700     case TARGET_NR_fchmod:
10701         return get_errno(fchmod(arg1, arg2));
10702 #if defined(TARGET_NR_fchmodat)
10703     case TARGET_NR_fchmodat:
10704         if (!(p = lock_user_string(arg2)))
10705             return -TARGET_EFAULT;
10706         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10707         unlock_user(p, arg2, 0);
10708         return ret;
10709 #endif
10710     case TARGET_NR_getpriority:
10711         /* Note that negative values are valid for getpriority, so we must
10712            differentiate based on errno settings.  */
10713         errno = 0;
10714         ret = getpriority(arg1, arg2);
10715         if (ret == -1 && errno != 0) {
10716             return -host_to_target_errno(errno);
10717         }
10718 #ifdef TARGET_ALPHA
10719         /* Return value is the unbiased priority.  Signal no error.  */
10720         cpu_env->ir[IR_V0] = 0;
10721 #else
10722         /* Return value is a biased priority to avoid negative numbers.  */
10723         ret = 20 - ret;
10724 #endif
10725         return ret;
10726     case TARGET_NR_setpriority:
10727         return get_errno(setpriority(arg1, arg2, arg3));
10728 #ifdef TARGET_NR_statfs
10729     case TARGET_NR_statfs:
10730         if (!(p = lock_user_string(arg1))) {
10731             return -TARGET_EFAULT;
10732         }
10733         ret = get_errno(statfs(path(p), &stfs));
10734         unlock_user(p, arg1, 0);
10735     convert_statfs:
10736         if (!is_error(ret)) {
10737             struct target_statfs *target_stfs;
10738 
10739             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10740                 return -TARGET_EFAULT;
10741             __put_user(stfs.f_type, &target_stfs->f_type);
10742             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10743             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10744             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10745             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10746             __put_user(stfs.f_files, &target_stfs->f_files);
10747             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10748             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10749             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10750             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10751             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10752 #ifdef _STATFS_F_FLAGS
10753             __put_user(stfs.f_flags, &target_stfs->f_flags);
10754 #else
10755             __put_user(0, &target_stfs->f_flags);
10756 #endif
10757             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10758             unlock_user_struct(target_stfs, arg2, 1);
10759         }
10760         return ret;
10761 #endif
10762 #ifdef TARGET_NR_fstatfs
10763     case TARGET_NR_fstatfs:
10764         ret = get_errno(fstatfs(arg1, &stfs));
10765         goto convert_statfs;
10766 #endif
10767 #ifdef TARGET_NR_statfs64
10768     case TARGET_NR_statfs64:
10769         if (!(p = lock_user_string(arg1))) {
10770             return -TARGET_EFAULT;
10771         }
10772         ret = get_errno(statfs(path(p), &stfs));
10773         unlock_user(p, arg1, 0);
10774     convert_statfs64:
10775         if (!is_error(ret)) {
10776             struct target_statfs64 *target_stfs;
10777 
10778             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10779                 return -TARGET_EFAULT;
10780             __put_user(stfs.f_type, &target_stfs->f_type);
10781             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10782             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10783             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10784             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10785             __put_user(stfs.f_files, &target_stfs->f_files);
10786             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10787             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10788             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10789             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10790             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10791 #ifdef _STATFS_F_FLAGS
10792             __put_user(stfs.f_flags, &target_stfs->f_flags);
10793 #else
10794             __put_user(0, &target_stfs->f_flags);
10795 #endif
10796             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10797             unlock_user_struct(target_stfs, arg3, 1);
10798         }
10799         return ret;
10800     case TARGET_NR_fstatfs64:
10801         ret = get_errno(fstatfs(arg1, &stfs));
10802         goto convert_statfs64;
10803 #endif
10804 #ifdef TARGET_NR_socketcall
10805     case TARGET_NR_socketcall:
10806         return do_socketcall(arg1, arg2);
10807 #endif
10808 #ifdef TARGET_NR_accept
10809     case TARGET_NR_accept:
10810         return do_accept4(arg1, arg2, arg3, 0);
10811 #endif
10812 #ifdef TARGET_NR_accept4
10813     case TARGET_NR_accept4:
10814         return do_accept4(arg1, arg2, arg3, arg4);
10815 #endif
10816 #ifdef TARGET_NR_bind
10817     case TARGET_NR_bind:
10818         return do_bind(arg1, arg2, arg3);
10819 #endif
10820 #ifdef TARGET_NR_connect
10821     case TARGET_NR_connect:
10822         return do_connect(arg1, arg2, arg3);
10823 #endif
10824 #ifdef TARGET_NR_getpeername
10825     case TARGET_NR_getpeername:
10826         return do_getpeername(arg1, arg2, arg3);
10827 #endif
10828 #ifdef TARGET_NR_getsockname
10829     case TARGET_NR_getsockname:
10830         return do_getsockname(arg1, arg2, arg3);
10831 #endif
10832 #ifdef TARGET_NR_getsockopt
10833     case TARGET_NR_getsockopt:
10834         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10835 #endif
10836 #ifdef TARGET_NR_listen
10837     case TARGET_NR_listen:
10838         return get_errno(listen(arg1, arg2));
10839 #endif
10840 #ifdef TARGET_NR_recv
10841     case TARGET_NR_recv:
10842         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10843 #endif
10844 #ifdef TARGET_NR_recvfrom
10845     case TARGET_NR_recvfrom:
10846         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10847 #endif
10848 #ifdef TARGET_NR_recvmsg
10849     case TARGET_NR_recvmsg:
10850         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10851 #endif
10852 #ifdef TARGET_NR_send
10853     case TARGET_NR_send:
10854         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10855 #endif
10856 #ifdef TARGET_NR_sendmsg
10857     case TARGET_NR_sendmsg:
10858         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10859 #endif
10860 #ifdef TARGET_NR_sendmmsg
10861     case TARGET_NR_sendmmsg:
10862         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10863 #endif
10864 #ifdef TARGET_NR_recvmmsg
10865     case TARGET_NR_recvmmsg:
10866         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10867 #endif
10868 #ifdef TARGET_NR_sendto
10869     case TARGET_NR_sendto:
10870         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10871 #endif
10872 #ifdef TARGET_NR_shutdown
10873     case TARGET_NR_shutdown:
10874         return get_errno(shutdown(arg1, arg2));
10875 #endif
10876 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10877     case TARGET_NR_getrandom:
10878         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10879         if (!p) {
10880             return -TARGET_EFAULT;
10881         }
10882         ret = get_errno(getrandom(p, arg2, arg3));
10883         unlock_user(p, arg1, ret);
10884         return ret;
10885 #endif
10886 #ifdef TARGET_NR_socket
10887     case TARGET_NR_socket:
10888         return do_socket(arg1, arg2, arg3);
10889 #endif
10890 #ifdef TARGET_NR_socketpair
10891     case TARGET_NR_socketpair:
10892         return do_socketpair(arg1, arg2, arg3, arg4);
10893 #endif
10894 #ifdef TARGET_NR_setsockopt
10895     case TARGET_NR_setsockopt:
10896         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10897 #endif
10898 #if defined(TARGET_NR_syslog)
10899     case TARGET_NR_syslog:
10900         {
10901             int len = arg2;
10902 
10903             switch (arg1) {
10904             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10905             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10906             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10907             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10908             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10909             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10910             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10911             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10912                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10913             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10914             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10915             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10916                 {
10917                     if (len < 0) {
10918                         return -TARGET_EINVAL;
10919                     }
10920                     if (len == 0) {
10921                         return 0;
10922                     }
10923                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10924                     if (!p) {
10925                         return -TARGET_EFAULT;
10926                     }
10927                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10928                     unlock_user(p, arg2, arg3);
10929                 }
10930                 return ret;
10931             default:
10932                 return -TARGET_EINVAL;
10933             }
10934         }
10935         break;
10936 #endif
10937     case TARGET_NR_setitimer:
10938         {
10939             struct itimerval value, ovalue, *pvalue;
10940 
10941             if (arg2) {
10942                 pvalue = &value;
10943                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10944                     || copy_from_user_timeval(&pvalue->it_value,
10945                                               arg2 + sizeof(struct target_timeval)))
10946                     return -TARGET_EFAULT;
10947             } else {
10948                 pvalue = NULL;
10949             }
10950             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10951             if (!is_error(ret) && arg3) {
10952                 if (copy_to_user_timeval(arg3,
10953                                          &ovalue.it_interval)
10954                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10955                                             &ovalue.it_value))
10956                     return -TARGET_EFAULT;
10957             }
10958         }
10959         return ret;
10960     case TARGET_NR_getitimer:
10961         {
10962             struct itimerval value;
10963 
10964             ret = get_errno(getitimer(arg1, &value));
10965             if (!is_error(ret) && arg2) {
10966                 if (copy_to_user_timeval(arg2,
10967                                          &value.it_interval)
10968                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10969                                             &value.it_value))
10970                     return -TARGET_EFAULT;
10971             }
10972         }
10973         return ret;
10974 #ifdef TARGET_NR_stat
10975     case TARGET_NR_stat:
10976         if (!(p = lock_user_string(arg1))) {
10977             return -TARGET_EFAULT;
10978         }
10979         ret = get_errno(stat(path(p), &st));
10980         unlock_user(p, arg1, 0);
10981         goto do_stat;
10982 #endif
10983 #ifdef TARGET_NR_lstat
10984     case TARGET_NR_lstat:
10985         if (!(p = lock_user_string(arg1))) {
10986             return -TARGET_EFAULT;
10987         }
10988         ret = get_errno(lstat(path(p), &st));
10989         unlock_user(p, arg1, 0);
10990         goto do_stat;
10991 #endif
10992 #ifdef TARGET_NR_fstat
10993     case TARGET_NR_fstat:
10994         {
10995             ret = get_errno(fstat(arg1, &st));
10996 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10997         do_stat:
10998 #endif
10999             if (!is_error(ret)) {
11000                 struct target_stat *target_st;
11001 
11002                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
11003                     return -TARGET_EFAULT;
11004                 memset(target_st, 0, sizeof(*target_st));
11005                 __put_user(st.st_dev, &target_st->st_dev);
11006                 __put_user(st.st_ino, &target_st->st_ino);
11007                 __put_user(st.st_mode, &target_st->st_mode);
11008                 __put_user(st.st_uid, &target_st->st_uid);
11009                 __put_user(st.st_gid, &target_st->st_gid);
11010                 __put_user(st.st_nlink, &target_st->st_nlink);
11011                 __put_user(st.st_rdev, &target_st->st_rdev);
11012                 __put_user(st.st_size, &target_st->st_size);
11013                 __put_user(st.st_blksize, &target_st->st_blksize);
11014                 __put_user(st.st_blocks, &target_st->st_blocks);
11015                 __put_user(st.st_atime, &target_st->target_st_atime);
11016                 __put_user(st.st_mtime, &target_st->target_st_mtime);
11017                 __put_user(st.st_ctime, &target_st->target_st_ctime);
11018 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
11019                 __put_user(st.st_atim.tv_nsec,
11020                            &target_st->target_st_atime_nsec);
11021                 __put_user(st.st_mtim.tv_nsec,
11022                            &target_st->target_st_mtime_nsec);
11023                 __put_user(st.st_ctim.tv_nsec,
11024                            &target_st->target_st_ctime_nsec);
11025 #endif
11026                 unlock_user_struct(target_st, arg2, 1);
11027             }
11028         }
11029         return ret;
11030 #endif
11031     case TARGET_NR_vhangup:
11032         return get_errno(vhangup());
11033 #ifdef TARGET_NR_syscall
11034     case TARGET_NR_syscall:
11035         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
11036                           arg6, arg7, arg8, 0);
11037 #endif
11038 #if defined(TARGET_NR_wait4)
11039     case TARGET_NR_wait4:
11040         {
11041             int status;
11042             abi_long status_ptr = arg2;
11043             struct rusage rusage, *rusage_ptr;
11044             abi_ulong target_rusage = arg4;
11045             abi_long rusage_err;
11046             if (target_rusage)
11047                 rusage_ptr = &rusage;
11048             else
11049                 rusage_ptr = NULL;
11050             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
11051             if (!is_error(ret)) {
11052                 if (status_ptr && ret) {
11053                     status = host_to_target_waitstatus(status);
11054                     if (put_user_s32(status, status_ptr))
11055                         return -TARGET_EFAULT;
11056                 }
11057                 if (target_rusage) {
11058                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
11059                     if (rusage_err) {
11060                         ret = rusage_err;
11061                     }
11062                 }
11063             }
11064         }
11065         return ret;
11066 #endif
11067 #ifdef TARGET_NR_swapoff
11068     case TARGET_NR_swapoff:
11069         if (!(p = lock_user_string(arg1)))
11070             return -TARGET_EFAULT;
11071         ret = get_errno(swapoff(p));
11072         unlock_user(p, arg1, 0);
11073         return ret;
11074 #endif
11075     case TARGET_NR_sysinfo:
11076         {
11077             struct target_sysinfo *target_value;
11078             struct sysinfo value;
11079             ret = get_errno(sysinfo(&value));
11080             if (!is_error(ret) && arg1)
11081             {
11082                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
11083                     return -TARGET_EFAULT;
11084                 __put_user(value.uptime, &target_value->uptime);
11085                 __put_user(value.loads[0], &target_value->loads[0]);
11086                 __put_user(value.loads[1], &target_value->loads[1]);
11087                 __put_user(value.loads[2], &target_value->loads[2]);
11088                 __put_user(value.totalram, &target_value->totalram);
11089                 __put_user(value.freeram, &target_value->freeram);
11090                 __put_user(value.sharedram, &target_value->sharedram);
11091                 __put_user(value.bufferram, &target_value->bufferram);
11092                 __put_user(value.totalswap, &target_value->totalswap);
11093                 __put_user(value.freeswap, &target_value->freeswap);
11094                 __put_user(value.procs, &target_value->procs);
11095                 __put_user(value.totalhigh, &target_value->totalhigh);
11096                 __put_user(value.freehigh, &target_value->freehigh);
11097                 __put_user(value.mem_unit, &target_value->mem_unit);
11098                 unlock_user_struct(target_value, arg1, 1);
11099             }
11100         }
11101         return ret;
11102 #ifdef TARGET_NR_ipc
11103     case TARGET_NR_ipc:
11104         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
11105 #endif
11106 #ifdef TARGET_NR_semget
11107     case TARGET_NR_semget:
11108         return get_errno(semget(arg1, arg2, arg3));
11109 #endif
11110 #ifdef TARGET_NR_semop
11111     case TARGET_NR_semop:
11112         return do_semtimedop(arg1, arg2, arg3, 0, false);
11113 #endif
11114 #ifdef TARGET_NR_semtimedop
11115     case TARGET_NR_semtimedop:
11116         return do_semtimedop(arg1, arg2, arg3, arg4, false);
11117 #endif
11118 #ifdef TARGET_NR_semtimedop_time64
11119     case TARGET_NR_semtimedop_time64:
11120         return do_semtimedop(arg1, arg2, arg3, arg4, true);
11121 #endif
11122 #ifdef TARGET_NR_semctl
11123     case TARGET_NR_semctl:
11124         return do_semctl(arg1, arg2, arg3, arg4);
11125 #endif
11126 #ifdef TARGET_NR_msgctl
11127     case TARGET_NR_msgctl:
11128         return do_msgctl(arg1, arg2, arg3);
11129 #endif
11130 #ifdef TARGET_NR_msgget
11131     case TARGET_NR_msgget:
11132         return get_errno(msgget(arg1, arg2));
11133 #endif
11134 #ifdef TARGET_NR_msgrcv
11135     case TARGET_NR_msgrcv:
11136         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
11137 #endif
11138 #ifdef TARGET_NR_msgsnd
11139     case TARGET_NR_msgsnd:
11140         return do_msgsnd(arg1, arg2, arg3, arg4);
11141 #endif
11142 #ifdef TARGET_NR_shmget
11143     case TARGET_NR_shmget:
11144         return get_errno(shmget(arg1, arg2, arg3));
11145 #endif
11146 #ifdef TARGET_NR_shmctl
11147     case TARGET_NR_shmctl:
11148         return do_shmctl(arg1, arg2, arg3);
11149 #endif
11150 #ifdef TARGET_NR_shmat
11151     case TARGET_NR_shmat:
11152         return target_shmat(cpu_env, arg1, arg2, arg3);
11153 #endif
11154 #ifdef TARGET_NR_shmdt
11155     case TARGET_NR_shmdt:
11156         return target_shmdt(arg1);
11157 #endif
11158     case TARGET_NR_fsync:
11159         return get_errno(fsync(arg1));
11160     case TARGET_NR_clone:
11161         /* Linux manages to have three different orderings for its
11162          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
11163          * match the kernel's CONFIG_CLONE_* settings.
11164          * Microblaze is further special in that it uses a sixth
11165          * implicit argument to clone for the TLS pointer.
11166          */
11167 #if defined(TARGET_MICROBLAZE)
11168         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
11169 #elif defined(TARGET_CLONE_BACKWARDS)
11170         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
11171 #elif defined(TARGET_CLONE_BACKWARDS2)
11172         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
11173 #else
11174         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
11175 #endif
11176         return ret;
11177 #ifdef __NR_exit_group
11178         /* new thread calls */
11179     case TARGET_NR_exit_group:
11180         preexit_cleanup(cpu_env, arg1);
11181         return get_errno(exit_group(arg1));
11182 #endif
11183     case TARGET_NR_setdomainname:
11184         if (!(p = lock_user_string(arg1)))
11185             return -TARGET_EFAULT;
11186         ret = get_errno(setdomainname(p, arg2));
11187         unlock_user(p, arg1, 0);
11188         return ret;
11189     case TARGET_NR_uname:
11190         /* no need to transcode because we use the linux syscall */
11191         {
11192             struct new_utsname * buf;
11193 
11194             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
11195                 return -TARGET_EFAULT;
11196             ret = get_errno(sys_uname(buf));
11197             if (!is_error(ret)) {
11198                 /* Overwrite the native machine name with whatever is being
11199                    emulated. */
11200                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
11201                           sizeof(buf->machine));
11202                 /* Allow the user to override the reported release.  */
11203                 if (qemu_uname_release && *qemu_uname_release) {
11204                     g_strlcpy(buf->release, qemu_uname_release,
11205                               sizeof(buf->release));
11206                 }
11207             }
11208             unlock_user_struct(buf, arg1, 1);
11209         }
11210         return ret;
11211 #ifdef TARGET_I386
11212     case TARGET_NR_modify_ldt:
11213         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
11214 #if !defined(TARGET_X86_64)
11215     case TARGET_NR_vm86:
11216         return do_vm86(cpu_env, arg1, arg2);
11217 #endif
11218 #endif
11219 #if defined(TARGET_NR_adjtimex)
11220     case TARGET_NR_adjtimex:
11221         {
11222             struct timex host_buf;
11223 
11224             if (target_to_host_timex(&host_buf, arg1) != 0) {
11225                 return -TARGET_EFAULT;
11226             }
11227             ret = get_errno(adjtimex(&host_buf));
11228             if (!is_error(ret)) {
11229                 if (host_to_target_timex(arg1, &host_buf) != 0) {
11230                     return -TARGET_EFAULT;
11231                 }
11232             }
11233         }
11234         return ret;
11235 #endif
11236 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
11237     case TARGET_NR_clock_adjtime:
11238         {
11239             struct timex htx;
11240 
11241             if (target_to_host_timex(&htx, arg2) != 0) {
11242                 return -TARGET_EFAULT;
11243             }
11244             ret = get_errno(clock_adjtime(arg1, &htx));
11245             if (!is_error(ret) && host_to_target_timex(arg2, &htx)) {
11246                 return -TARGET_EFAULT;
11247             }
11248         }
11249         return ret;
11250 #endif
11251 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
11252     case TARGET_NR_clock_adjtime64:
11253         {
11254             struct timex htx;
11255 
11256             if (target_to_host_timex64(&htx, arg2) != 0) {
11257                 return -TARGET_EFAULT;
11258             }
11259             ret = get_errno(clock_adjtime(arg1, &htx));
11260             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
11261                     return -TARGET_EFAULT;
11262             }
11263         }
11264         return ret;
11265 #endif
11266     case TARGET_NR_getpgid:
11267         return get_errno(getpgid(arg1));
11268     case TARGET_NR_fchdir:
11269         return get_errno(fchdir(arg1));
11270     case TARGET_NR_personality:
11271         return get_errno(personality(arg1));
11272 #ifdef TARGET_NR__llseek /* Not on alpha */
11273     case TARGET_NR__llseek:
11274         {
11275             int64_t res;
11276 #if !defined(__NR_llseek)
11277             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
11278             if (res == -1) {
11279                 ret = get_errno(res);
11280             } else {
11281                 ret = 0;
11282             }
11283 #else
11284             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
11285 #endif
11286             if ((ret == 0) && put_user_s64(res, arg4)) {
11287                 return -TARGET_EFAULT;
11288             }
11289         }
11290         return ret;
11291 #endif
11292 #ifdef TARGET_NR_getdents
11293     case TARGET_NR_getdents:
11294         return do_getdents(arg1, arg2, arg3);
11295 #endif /* TARGET_NR_getdents */
11296 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
11297     case TARGET_NR_getdents64:
11298         return do_getdents64(arg1, arg2, arg3);
11299 #endif /* TARGET_NR_getdents64 */
11300 #if defined(TARGET_NR__newselect)
11301     case TARGET_NR__newselect:
11302         return do_select(arg1, arg2, arg3, arg4, arg5);
11303 #endif
11304 #ifdef TARGET_NR_poll
11305     case TARGET_NR_poll:
11306         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
11307 #endif
11308 #ifdef TARGET_NR_ppoll
11309     case TARGET_NR_ppoll:
11310         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
11311 #endif
11312 #ifdef TARGET_NR_ppoll_time64
11313     case TARGET_NR_ppoll_time64:
11314         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
11315 #endif
11316     case TARGET_NR_flock:
11317         /* NOTE: the flock constant seems to be the same for every
11318            Linux platform */
11319         return get_errno(safe_flock(arg1, arg2));
11320     case TARGET_NR_readv:
11321         {
11322             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11323             if (vec != NULL) {
11324                 ret = get_errno(safe_readv(arg1, vec, arg3));
11325                 unlock_iovec(vec, arg2, arg3, 1);
11326             } else {
11327                 ret = -host_to_target_errno(errno);
11328             }
11329         }
11330         return ret;
11331     case TARGET_NR_writev:
11332         {
11333             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11334             if (vec != NULL) {
11335                 ret = get_errno(safe_writev(arg1, vec, arg3));
11336                 unlock_iovec(vec, arg2, arg3, 0);
11337             } else {
11338                 ret = -host_to_target_errno(errno);
11339             }
11340         }
11341         return ret;
11342 #if defined(TARGET_NR_preadv)
11343     case TARGET_NR_preadv:
11344         {
11345             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11346             if (vec != NULL) {
11347                 unsigned long low, high;
11348 
11349                 target_to_host_low_high(arg4, arg5, &low, &high);
11350                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
11351                 unlock_iovec(vec, arg2, arg3, 1);
11352             } else {
11353                 ret = -host_to_target_errno(errno);
11354            }
11355         }
11356         return ret;
11357 #endif
11358 #if defined(TARGET_NR_pwritev)
11359     case TARGET_NR_pwritev:
11360         {
11361             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11362             if (vec != NULL) {
11363                 unsigned long low, high;
11364 
11365                 target_to_host_low_high(arg4, arg5, &low, &high);
11366                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
11367                 unlock_iovec(vec, arg2, arg3, 0);
11368             } else {
11369                 ret = -host_to_target_errno(errno);
11370            }
11371         }
11372         return ret;
11373 #endif
11374     case TARGET_NR_getsid:
11375         return get_errno(getsid(arg1));
11376 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
11377     case TARGET_NR_fdatasync:
11378         return get_errno(fdatasync(arg1));
11379 #endif
11380     case TARGET_NR_sched_getaffinity:
11381         {
11382             unsigned int mask_size;
11383             unsigned long *mask;
11384 
11385             /*
11386              * sched_getaffinity needs multiples of ulong, so need to take
11387              * care of mismatches between target ulong and host ulong sizes.
11388              */
11389             if (arg2 & (sizeof(abi_ulong) - 1)) {
11390                 return -TARGET_EINVAL;
11391             }
11392             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11393 
11394             mask = alloca(mask_size);
11395             memset(mask, 0, mask_size);
11396             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
11397 
11398             if (!is_error(ret)) {
11399                 if (ret > arg2) {
11400                     /* More data returned than the caller's buffer will fit.
11401                      * This only happens if sizeof(abi_long) < sizeof(long)
11402                      * and the caller passed us a buffer holding an odd number
11403                      * of abi_longs. If the host kernel is actually using the
11404                      * extra 4 bytes then fail EINVAL; otherwise we can just
11405                      * ignore them and only copy the interesting part.
11406                      */
11407                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
11408                     if (numcpus > arg2 * 8) {
11409                         return -TARGET_EINVAL;
11410                     }
11411                     ret = arg2;
11412                 }
11413 
11414                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
11415                     return -TARGET_EFAULT;
11416                 }
11417             }
11418         }
11419         return ret;
11420     case TARGET_NR_sched_setaffinity:
11421         {
11422             unsigned int mask_size;
11423             unsigned long *mask;
11424 
11425             /*
11426              * sched_setaffinity needs multiples of ulong, so need to take
11427              * care of mismatches between target ulong and host ulong sizes.
11428              */
11429             if (arg2 & (sizeof(abi_ulong) - 1)) {
11430                 return -TARGET_EINVAL;
11431             }
11432             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11433             mask = alloca(mask_size);
11434 
11435             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
11436             if (ret) {
11437                 return ret;
11438             }
11439 
11440             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
11441         }
11442     case TARGET_NR_getcpu:
11443         {
11444             unsigned cpuid, node;
11445             ret = get_errno(sys_getcpu(arg1 ? &cpuid : NULL,
11446                                        arg2 ? &node : NULL,
11447                                        NULL));
11448             if (is_error(ret)) {
11449                 return ret;
11450             }
11451             if (arg1 && put_user_u32(cpuid, arg1)) {
11452                 return -TARGET_EFAULT;
11453             }
11454             if (arg2 && put_user_u32(node, arg2)) {
11455                 return -TARGET_EFAULT;
11456             }
11457         }
11458         return ret;
11459     case TARGET_NR_sched_setparam:
11460         {
11461             struct target_sched_param *target_schp;
11462             struct sched_param schp;
11463 
11464             if (arg2 == 0) {
11465                 return -TARGET_EINVAL;
11466             }
11467             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
11468                 return -TARGET_EFAULT;
11469             }
11470             schp.sched_priority = tswap32(target_schp->sched_priority);
11471             unlock_user_struct(target_schp, arg2, 0);
11472             return get_errno(sys_sched_setparam(arg1, &schp));
11473         }
11474     case TARGET_NR_sched_getparam:
11475         {
11476             struct target_sched_param *target_schp;
11477             struct sched_param schp;
11478 
11479             if (arg2 == 0) {
11480                 return -TARGET_EINVAL;
11481             }
11482             ret = get_errno(sys_sched_getparam(arg1, &schp));
11483             if (!is_error(ret)) {
11484                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
11485                     return -TARGET_EFAULT;
11486                 }
11487                 target_schp->sched_priority = tswap32(schp.sched_priority);
11488                 unlock_user_struct(target_schp, arg2, 1);
11489             }
11490         }
11491         return ret;
11492     case TARGET_NR_sched_setscheduler:
11493         {
11494             struct target_sched_param *target_schp;
11495             struct sched_param schp;
11496             if (arg3 == 0) {
11497                 return -TARGET_EINVAL;
11498             }
11499             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
11500                 return -TARGET_EFAULT;
11501             }
11502             schp.sched_priority = tswap32(target_schp->sched_priority);
11503             unlock_user_struct(target_schp, arg3, 0);
11504             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
11505         }
11506     case TARGET_NR_sched_getscheduler:
11507         return get_errno(sys_sched_getscheduler(arg1));
11508     case TARGET_NR_sched_getattr:
11509         {
11510             struct target_sched_attr *target_scha;
11511             struct sched_attr scha;
11512             if (arg2 == 0) {
11513                 return -TARGET_EINVAL;
11514             }
11515             if (arg3 > sizeof(scha)) {
11516                 arg3 = sizeof(scha);
11517             }
11518             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
11519             if (!is_error(ret)) {
11520                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11521                 if (!target_scha) {
11522                     return -TARGET_EFAULT;
11523                 }
11524                 target_scha->size = tswap32(scha.size);
11525                 target_scha->sched_policy = tswap32(scha.sched_policy);
11526                 target_scha->sched_flags = tswap64(scha.sched_flags);
11527                 target_scha->sched_nice = tswap32(scha.sched_nice);
11528                 target_scha->sched_priority = tswap32(scha.sched_priority);
11529                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
11530                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
11531                 target_scha->sched_period = tswap64(scha.sched_period);
11532                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
11533                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
11534                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
11535                 }
11536                 unlock_user(target_scha, arg2, arg3);
11537             }
11538             return ret;
11539         }
11540     case TARGET_NR_sched_setattr:
11541         {
11542             struct target_sched_attr *target_scha;
11543             struct sched_attr scha;
11544             uint32_t size;
11545             int zeroed;
11546             if (arg2 == 0) {
11547                 return -TARGET_EINVAL;
11548             }
11549             if (get_user_u32(size, arg2)) {
11550                 return -TARGET_EFAULT;
11551             }
11552             if (!size) {
11553                 size = offsetof(struct target_sched_attr, sched_util_min);
11554             }
11555             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11556                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11557                     return -TARGET_EFAULT;
11558                 }
11559                 return -TARGET_E2BIG;
11560             }
11561 
11562             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11563             if (zeroed < 0) {
11564                 return zeroed;
11565             } else if (zeroed == 0) {
11566                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11567                     return -TARGET_EFAULT;
11568                 }
11569                 return -TARGET_E2BIG;
11570             }
11571             if (size > sizeof(struct target_sched_attr)) {
11572                 size = sizeof(struct target_sched_attr);
11573             }
11574 
11575             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11576             if (!target_scha) {
11577                 return -TARGET_EFAULT;
11578             }
11579             scha.size = size;
11580             scha.sched_policy = tswap32(target_scha->sched_policy);
11581             scha.sched_flags = tswap64(target_scha->sched_flags);
11582             scha.sched_nice = tswap32(target_scha->sched_nice);
11583             scha.sched_priority = tswap32(target_scha->sched_priority);
11584             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11585             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11586             scha.sched_period = tswap64(target_scha->sched_period);
11587             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11588                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11589                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11590             }
11591             unlock_user(target_scha, arg2, 0);
11592             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11593         }
11594     case TARGET_NR_sched_yield:
11595         return get_errno(sched_yield());
11596     case TARGET_NR_sched_get_priority_max:
11597         return get_errno(sched_get_priority_max(arg1));
11598     case TARGET_NR_sched_get_priority_min:
11599         return get_errno(sched_get_priority_min(arg1));
11600 #ifdef TARGET_NR_sched_rr_get_interval
11601     case TARGET_NR_sched_rr_get_interval:
11602         {
11603             struct timespec ts;
11604             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11605             if (!is_error(ret)) {
11606                 ret = host_to_target_timespec(arg2, &ts);
11607             }
11608         }
11609         return ret;
11610 #endif
11611 #ifdef TARGET_NR_sched_rr_get_interval_time64
11612     case TARGET_NR_sched_rr_get_interval_time64:
11613         {
11614             struct timespec ts;
11615             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11616             if (!is_error(ret)) {
11617                 ret = host_to_target_timespec64(arg2, &ts);
11618             }
11619         }
11620         return ret;
11621 #endif
11622 #if defined(TARGET_NR_nanosleep)
11623     case TARGET_NR_nanosleep:
11624         {
11625             struct timespec req, rem;
11626             target_to_host_timespec(&req, arg1);
11627             ret = get_errno(safe_nanosleep(&req, &rem));
11628             if (is_error(ret) && arg2) {
11629                 host_to_target_timespec(arg2, &rem);
11630             }
11631         }
11632         return ret;
11633 #endif
11634     case TARGET_NR_prctl:
11635         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11636         break;
11637 #ifdef TARGET_NR_arch_prctl
11638     case TARGET_NR_arch_prctl:
11639         return do_arch_prctl(cpu_env, arg1, arg2);
11640 #endif
11641 #ifdef TARGET_NR_pread64
11642     case TARGET_NR_pread64:
11643         if (regpairs_aligned(cpu_env, num)) {
11644             arg4 = arg5;
11645             arg5 = arg6;
11646         }
11647         if (arg2 == 0 && arg3 == 0) {
11648             /* Special-case NULL buffer and zero length, which should succeed */
11649             p = 0;
11650         } else {
11651             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11652             if (!p) {
11653                 return -TARGET_EFAULT;
11654             }
11655         }
11656         ret = get_errno(pread(arg1, p, arg3, target_offset64(arg4, arg5)));
11657         unlock_user(p, arg2, ret);
11658         return ret;
11659     case TARGET_NR_pwrite64:
11660         if (regpairs_aligned(cpu_env, num)) {
11661             arg4 = arg5;
11662             arg5 = arg6;
11663         }
11664         if (arg2 == 0 && arg3 == 0) {
11665             /* Special-case NULL buffer and zero length, which should succeed */
11666             p = 0;
11667         } else {
11668             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11669             if (!p) {
11670                 return -TARGET_EFAULT;
11671             }
11672         }
11673         ret = get_errno(pwrite(arg1, p, arg3, target_offset64(arg4, arg5)));
11674         unlock_user(p, arg2, 0);
11675         return ret;
11676 #endif
11677     case TARGET_NR_getcwd:
11678         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11679             return -TARGET_EFAULT;
11680         ret = get_errno(sys_getcwd1(p, arg2));
11681         unlock_user(p, arg1, ret);
11682         return ret;
11683     case TARGET_NR_capget:
11684     case TARGET_NR_capset:
11685     {
11686         struct target_user_cap_header *target_header;
11687         struct target_user_cap_data *target_data = NULL;
11688         struct __user_cap_header_struct header;
11689         struct __user_cap_data_struct data[2];
11690         struct __user_cap_data_struct *dataptr = NULL;
11691         int i, target_datalen;
11692         int data_items = 1;
11693 
11694         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11695             return -TARGET_EFAULT;
11696         }
11697         header.version = tswap32(target_header->version);
11698         header.pid = tswap32(target_header->pid);
11699 
11700         if (header.version != _LINUX_CAPABILITY_VERSION) {
11701             /* Version 2 and up takes pointer to two user_data structs */
11702             data_items = 2;
11703         }
11704 
11705         target_datalen = sizeof(*target_data) * data_items;
11706 
11707         if (arg2) {
11708             if (num == TARGET_NR_capget) {
11709                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11710             } else {
11711                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11712             }
11713             if (!target_data) {
11714                 unlock_user_struct(target_header, arg1, 0);
11715                 return -TARGET_EFAULT;
11716             }
11717 
11718             if (num == TARGET_NR_capset) {
11719                 for (i = 0; i < data_items; i++) {
11720                     data[i].effective = tswap32(target_data[i].effective);
11721                     data[i].permitted = tswap32(target_data[i].permitted);
11722                     data[i].inheritable = tswap32(target_data[i].inheritable);
11723                 }
11724             }
11725 
11726             dataptr = data;
11727         }
11728 
11729         if (num == TARGET_NR_capget) {
11730             ret = get_errno(capget(&header, dataptr));
11731         } else {
11732             ret = get_errno(capset(&header, dataptr));
11733         }
11734 
11735         /* The kernel always updates version for both capget and capset */
11736         target_header->version = tswap32(header.version);
11737         unlock_user_struct(target_header, arg1, 1);
11738 
11739         if (arg2) {
11740             if (num == TARGET_NR_capget) {
11741                 for (i = 0; i < data_items; i++) {
11742                     target_data[i].effective = tswap32(data[i].effective);
11743                     target_data[i].permitted = tswap32(data[i].permitted);
11744                     target_data[i].inheritable = tswap32(data[i].inheritable);
11745                 }
11746                 unlock_user(target_data, arg2, target_datalen);
11747             } else {
11748                 unlock_user(target_data, arg2, 0);
11749             }
11750         }
11751         return ret;
11752     }
11753     case TARGET_NR_sigaltstack:
11754         return do_sigaltstack(arg1, arg2, cpu_env);
11755 
11756 #ifdef CONFIG_SENDFILE
11757 #ifdef TARGET_NR_sendfile
11758     case TARGET_NR_sendfile:
11759     {
11760         off_t *offp = NULL;
11761         off_t off;
11762         if (arg3) {
11763             ret = get_user_sal(off, arg3);
11764             if (is_error(ret)) {
11765                 return ret;
11766             }
11767             offp = &off;
11768         }
11769         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11770         if (!is_error(ret) && arg3) {
11771             abi_long ret2 = put_user_sal(off, arg3);
11772             if (is_error(ret2)) {
11773                 ret = ret2;
11774             }
11775         }
11776         return ret;
11777     }
11778 #endif
11779 #ifdef TARGET_NR_sendfile64
11780     case TARGET_NR_sendfile64:
11781     {
11782         off_t *offp = NULL;
11783         off_t off;
11784         if (arg3) {
11785             ret = get_user_s64(off, arg3);
11786             if (is_error(ret)) {
11787                 return ret;
11788             }
11789             offp = &off;
11790         }
11791         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11792         if (!is_error(ret) && arg3) {
11793             abi_long ret2 = put_user_s64(off, arg3);
11794             if (is_error(ret2)) {
11795                 ret = ret2;
11796             }
11797         }
11798         return ret;
11799     }
11800 #endif
11801 #endif
11802 #ifdef TARGET_NR_vfork
11803     case TARGET_NR_vfork:
11804         return get_errno(do_fork(cpu_env,
11805                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11806                          0, 0, 0, 0));
11807 #endif
11808 #ifdef TARGET_NR_ugetrlimit
11809     case TARGET_NR_ugetrlimit:
11810     {
11811 	struct rlimit rlim;
11812 	int resource = target_to_host_resource(arg1);
11813 	ret = get_errno(getrlimit(resource, &rlim));
11814 	if (!is_error(ret)) {
11815 	    struct target_rlimit *target_rlim;
11816             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11817                 return -TARGET_EFAULT;
11818 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11819 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11820             unlock_user_struct(target_rlim, arg2, 1);
11821 	}
11822         return ret;
11823     }
11824 #endif
11825 #ifdef TARGET_NR_truncate64
11826     case TARGET_NR_truncate64:
11827         if (!(p = lock_user_string(arg1)))
11828             return -TARGET_EFAULT;
11829 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11830         unlock_user(p, arg1, 0);
11831         return ret;
11832 #endif
11833 #ifdef TARGET_NR_ftruncate64
11834     case TARGET_NR_ftruncate64:
11835         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11836 #endif
11837 #ifdef TARGET_NR_stat64
11838     case TARGET_NR_stat64:
11839         if (!(p = lock_user_string(arg1))) {
11840             return -TARGET_EFAULT;
11841         }
11842         ret = get_errno(stat(path(p), &st));
11843         unlock_user(p, arg1, 0);
11844         if (!is_error(ret))
11845             ret = host_to_target_stat64(cpu_env, arg2, &st);
11846         return ret;
11847 #endif
11848 #ifdef TARGET_NR_lstat64
11849     case TARGET_NR_lstat64:
11850         if (!(p = lock_user_string(arg1))) {
11851             return -TARGET_EFAULT;
11852         }
11853         ret = get_errno(lstat(path(p), &st));
11854         unlock_user(p, arg1, 0);
11855         if (!is_error(ret))
11856             ret = host_to_target_stat64(cpu_env, arg2, &st);
11857         return ret;
11858 #endif
11859 #ifdef TARGET_NR_fstat64
11860     case TARGET_NR_fstat64:
11861         ret = get_errno(fstat(arg1, &st));
11862         if (!is_error(ret))
11863             ret = host_to_target_stat64(cpu_env, arg2, &st);
11864         return ret;
11865 #endif
11866 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11867 #ifdef TARGET_NR_fstatat64
11868     case TARGET_NR_fstatat64:
11869 #endif
11870 #ifdef TARGET_NR_newfstatat
11871     case TARGET_NR_newfstatat:
11872 #endif
11873         if (!(p = lock_user_string(arg2))) {
11874             return -TARGET_EFAULT;
11875         }
11876         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11877         unlock_user(p, arg2, 0);
11878         if (!is_error(ret))
11879             ret = host_to_target_stat64(cpu_env, arg3, &st);
11880         return ret;
11881 #endif
11882 #if defined(TARGET_NR_statx)
11883     case TARGET_NR_statx:
11884         {
11885             struct target_statx *target_stx;
11886             int dirfd = arg1;
11887             int flags = arg3;
11888 
11889             p = lock_user_string(arg2);
11890             if (p == NULL) {
11891                 return -TARGET_EFAULT;
11892             }
11893 #if defined(__NR_statx)
11894             {
11895                 /*
11896                  * It is assumed that struct statx is architecture independent.
11897                  */
11898                 struct target_statx host_stx;
11899                 int mask = arg4;
11900 
11901                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11902                 if (!is_error(ret)) {
11903                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11904                         unlock_user(p, arg2, 0);
11905                         return -TARGET_EFAULT;
11906                     }
11907                 }
11908 
11909                 if (ret != -TARGET_ENOSYS) {
11910                     unlock_user(p, arg2, 0);
11911                     return ret;
11912                 }
11913             }
11914 #endif
11915             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11916             unlock_user(p, arg2, 0);
11917 
11918             if (!is_error(ret)) {
11919                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11920                     return -TARGET_EFAULT;
11921                 }
11922                 memset(target_stx, 0, sizeof(*target_stx));
11923                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11924                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11925                 __put_user(st.st_ino, &target_stx->stx_ino);
11926                 __put_user(st.st_mode, &target_stx->stx_mode);
11927                 __put_user(st.st_uid, &target_stx->stx_uid);
11928                 __put_user(st.st_gid, &target_stx->stx_gid);
11929                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11930                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11931                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11932                 __put_user(st.st_size, &target_stx->stx_size);
11933                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11934                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11935                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11936                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11937                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11938                 unlock_user_struct(target_stx, arg5, 1);
11939             }
11940         }
11941         return ret;
11942 #endif
11943 #ifdef TARGET_NR_lchown
11944     case TARGET_NR_lchown:
11945         if (!(p = lock_user_string(arg1)))
11946             return -TARGET_EFAULT;
11947         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11948         unlock_user(p, arg1, 0);
11949         return ret;
11950 #endif
11951 #ifdef TARGET_NR_getuid
11952     case TARGET_NR_getuid:
11953         return get_errno(high2lowuid(getuid()));
11954 #endif
11955 #ifdef TARGET_NR_getgid
11956     case TARGET_NR_getgid:
11957         return get_errno(high2lowgid(getgid()));
11958 #endif
11959 #ifdef TARGET_NR_geteuid
11960     case TARGET_NR_geteuid:
11961         return get_errno(high2lowuid(geteuid()));
11962 #endif
11963 #ifdef TARGET_NR_getegid
11964     case TARGET_NR_getegid:
11965         return get_errno(high2lowgid(getegid()));
11966 #endif
11967     case TARGET_NR_setreuid:
11968         return get_errno(sys_setreuid(low2highuid(arg1), low2highuid(arg2)));
11969     case TARGET_NR_setregid:
11970         return get_errno(sys_setregid(low2highgid(arg1), low2highgid(arg2)));
11971     case TARGET_NR_getgroups:
11972         { /* the same code as for TARGET_NR_getgroups32 */
11973             int gidsetsize = arg1;
11974             target_id *target_grouplist;
11975             g_autofree gid_t *grouplist = NULL;
11976             int i;
11977 
11978             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11979                 return -TARGET_EINVAL;
11980             }
11981             if (gidsetsize > 0) {
11982                 grouplist = g_try_new(gid_t, gidsetsize);
11983                 if (!grouplist) {
11984                     return -TARGET_ENOMEM;
11985                 }
11986             }
11987             ret = get_errno(getgroups(gidsetsize, grouplist));
11988             if (!is_error(ret) && gidsetsize > 0) {
11989                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
11990                                              gidsetsize * sizeof(target_id), 0);
11991                 if (!target_grouplist) {
11992                     return -TARGET_EFAULT;
11993                 }
11994                 for (i = 0; i < ret; i++) {
11995                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11996                 }
11997                 unlock_user(target_grouplist, arg2,
11998                             gidsetsize * sizeof(target_id));
11999             }
12000             return ret;
12001         }
12002     case TARGET_NR_setgroups:
12003         { /* the same code as for TARGET_NR_setgroups32 */
12004             int gidsetsize = arg1;
12005             target_id *target_grouplist;
12006             g_autofree gid_t *grouplist = NULL;
12007             int i;
12008 
12009             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12010                 return -TARGET_EINVAL;
12011             }
12012             if (gidsetsize > 0) {
12013                 grouplist = g_try_new(gid_t, gidsetsize);
12014                 if (!grouplist) {
12015                     return -TARGET_ENOMEM;
12016                 }
12017                 target_grouplist = lock_user(VERIFY_READ, arg2,
12018                                              gidsetsize * sizeof(target_id), 1);
12019                 if (!target_grouplist) {
12020                     return -TARGET_EFAULT;
12021                 }
12022                 for (i = 0; i < gidsetsize; i++) {
12023                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
12024                 }
12025                 unlock_user(target_grouplist, arg2,
12026                             gidsetsize * sizeof(target_id));
12027             }
12028             return get_errno(sys_setgroups(gidsetsize, grouplist));
12029         }
12030     case TARGET_NR_fchown:
12031         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
12032 #if defined(TARGET_NR_fchownat)
12033     case TARGET_NR_fchownat:
12034         if (!(p = lock_user_string(arg2)))
12035             return -TARGET_EFAULT;
12036         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
12037                                  low2highgid(arg4), arg5));
12038         unlock_user(p, arg2, 0);
12039         return ret;
12040 #endif
12041 #ifdef TARGET_NR_setresuid
12042     case TARGET_NR_setresuid:
12043         return get_errno(sys_setresuid(low2highuid(arg1),
12044                                        low2highuid(arg2),
12045                                        low2highuid(arg3)));
12046 #endif
12047 #ifdef TARGET_NR_getresuid
12048     case TARGET_NR_getresuid:
12049         {
12050             uid_t ruid, euid, suid;
12051             ret = get_errno(getresuid(&ruid, &euid, &suid));
12052             if (!is_error(ret)) {
12053                 if (put_user_id(high2lowuid(ruid), arg1)
12054                     || put_user_id(high2lowuid(euid), arg2)
12055                     || put_user_id(high2lowuid(suid), arg3))
12056                     return -TARGET_EFAULT;
12057             }
12058         }
12059         return ret;
12060 #endif
12061 #ifdef TARGET_NR_getresgid
12062     case TARGET_NR_setresgid:
12063         return get_errno(sys_setresgid(low2highgid(arg1),
12064                                        low2highgid(arg2),
12065                                        low2highgid(arg3)));
12066 #endif
12067 #ifdef TARGET_NR_getresgid
12068     case TARGET_NR_getresgid:
12069         {
12070             gid_t rgid, egid, sgid;
12071             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12072             if (!is_error(ret)) {
12073                 if (put_user_id(high2lowgid(rgid), arg1)
12074                     || put_user_id(high2lowgid(egid), arg2)
12075                     || put_user_id(high2lowgid(sgid), arg3))
12076                     return -TARGET_EFAULT;
12077             }
12078         }
12079         return ret;
12080 #endif
12081 #ifdef TARGET_NR_chown
12082     case TARGET_NR_chown:
12083         if (!(p = lock_user_string(arg1)))
12084             return -TARGET_EFAULT;
12085         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
12086         unlock_user(p, arg1, 0);
12087         return ret;
12088 #endif
12089     case TARGET_NR_setuid:
12090         return get_errno(sys_setuid(low2highuid(arg1)));
12091     case TARGET_NR_setgid:
12092         return get_errno(sys_setgid(low2highgid(arg1)));
12093     case TARGET_NR_setfsuid:
12094         return get_errno(setfsuid(arg1));
12095     case TARGET_NR_setfsgid:
12096         return get_errno(setfsgid(arg1));
12097 
12098 #ifdef TARGET_NR_lchown32
12099     case TARGET_NR_lchown32:
12100         if (!(p = lock_user_string(arg1)))
12101             return -TARGET_EFAULT;
12102         ret = get_errno(lchown(p, arg2, arg3));
12103         unlock_user(p, arg1, 0);
12104         return ret;
12105 #endif
12106 #ifdef TARGET_NR_getuid32
12107     case TARGET_NR_getuid32:
12108         return get_errno(getuid());
12109 #endif
12110 
12111 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
12112    /* Alpha specific */
12113     case TARGET_NR_getxuid:
12114          {
12115             uid_t euid;
12116             euid=geteuid();
12117             cpu_env->ir[IR_A4]=euid;
12118          }
12119         return get_errno(getuid());
12120 #endif
12121 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
12122    /* Alpha specific */
12123     case TARGET_NR_getxgid:
12124          {
12125             uid_t egid;
12126             egid=getegid();
12127             cpu_env->ir[IR_A4]=egid;
12128          }
12129         return get_errno(getgid());
12130 #endif
12131 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
12132     /* Alpha specific */
12133     case TARGET_NR_osf_getsysinfo:
12134         ret = -TARGET_EOPNOTSUPP;
12135         switch (arg1) {
12136           case TARGET_GSI_IEEE_FP_CONTROL:
12137             {
12138                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
12139                 uint64_t swcr = cpu_env->swcr;
12140 
12141                 swcr &= ~SWCR_STATUS_MASK;
12142                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
12143 
12144                 if (put_user_u64 (swcr, arg2))
12145                         return -TARGET_EFAULT;
12146                 ret = 0;
12147             }
12148             break;
12149 
12150           /* case GSI_IEEE_STATE_AT_SIGNAL:
12151              -- Not implemented in linux kernel.
12152              case GSI_UACPROC:
12153              -- Retrieves current unaligned access state; not much used.
12154              case GSI_PROC_TYPE:
12155              -- Retrieves implver information; surely not used.
12156              case GSI_GET_HWRPB:
12157              -- Grabs a copy of the HWRPB; surely not used.
12158           */
12159         }
12160         return ret;
12161 #endif
12162 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
12163     /* Alpha specific */
12164     case TARGET_NR_osf_setsysinfo:
12165         ret = -TARGET_EOPNOTSUPP;
12166         switch (arg1) {
12167           case TARGET_SSI_IEEE_FP_CONTROL:
12168             {
12169                 uint64_t swcr, fpcr;
12170 
12171                 if (get_user_u64 (swcr, arg2)) {
12172                     return -TARGET_EFAULT;
12173                 }
12174 
12175                 /*
12176                  * The kernel calls swcr_update_status to update the
12177                  * status bits from the fpcr at every point that it
12178                  * could be queried.  Therefore, we store the status
12179                  * bits only in FPCR.
12180                  */
12181                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
12182 
12183                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12184                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
12185                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
12186                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12187                 ret = 0;
12188             }
12189             break;
12190 
12191           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
12192             {
12193                 uint64_t exc, fpcr, fex;
12194 
12195                 if (get_user_u64(exc, arg2)) {
12196                     return -TARGET_EFAULT;
12197                 }
12198                 exc &= SWCR_STATUS_MASK;
12199                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12200 
12201                 /* Old exceptions are not signaled.  */
12202                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
12203                 fex = exc & ~fex;
12204                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
12205                 fex &= (cpu_env)->swcr;
12206 
12207                 /* Update the hardware fpcr.  */
12208                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
12209                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12210 
12211                 if (fex) {
12212                     int si_code = TARGET_FPE_FLTUNK;
12213                     target_siginfo_t info;
12214 
12215                     if (fex & SWCR_TRAP_ENABLE_DNO) {
12216                         si_code = TARGET_FPE_FLTUND;
12217                     }
12218                     if (fex & SWCR_TRAP_ENABLE_INE) {
12219                         si_code = TARGET_FPE_FLTRES;
12220                     }
12221                     if (fex & SWCR_TRAP_ENABLE_UNF) {
12222                         si_code = TARGET_FPE_FLTUND;
12223                     }
12224                     if (fex & SWCR_TRAP_ENABLE_OVF) {
12225                         si_code = TARGET_FPE_FLTOVF;
12226                     }
12227                     if (fex & SWCR_TRAP_ENABLE_DZE) {
12228                         si_code = TARGET_FPE_FLTDIV;
12229                     }
12230                     if (fex & SWCR_TRAP_ENABLE_INV) {
12231                         si_code = TARGET_FPE_FLTINV;
12232                     }
12233 
12234                     info.si_signo = SIGFPE;
12235                     info.si_errno = 0;
12236                     info.si_code = si_code;
12237                     info._sifields._sigfault._addr = (cpu_env)->pc;
12238                     queue_signal(cpu_env, info.si_signo,
12239                                  QEMU_SI_FAULT, &info);
12240                 }
12241                 ret = 0;
12242             }
12243             break;
12244 
12245           /* case SSI_NVPAIRS:
12246              -- Used with SSIN_UACPROC to enable unaligned accesses.
12247              case SSI_IEEE_STATE_AT_SIGNAL:
12248              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
12249              -- Not implemented in linux kernel
12250           */
12251         }
12252         return ret;
12253 #endif
12254 #ifdef TARGET_NR_osf_sigprocmask
12255     /* Alpha specific.  */
12256     case TARGET_NR_osf_sigprocmask:
12257         {
12258             abi_ulong mask;
12259             int how;
12260             sigset_t set, oldset;
12261 
12262             switch(arg1) {
12263             case TARGET_SIG_BLOCK:
12264                 how = SIG_BLOCK;
12265                 break;
12266             case TARGET_SIG_UNBLOCK:
12267                 how = SIG_UNBLOCK;
12268                 break;
12269             case TARGET_SIG_SETMASK:
12270                 how = SIG_SETMASK;
12271                 break;
12272             default:
12273                 return -TARGET_EINVAL;
12274             }
12275             mask = arg2;
12276             target_to_host_old_sigset(&set, &mask);
12277             ret = do_sigprocmask(how, &set, &oldset);
12278             if (!ret) {
12279                 host_to_target_old_sigset(&mask, &oldset);
12280                 ret = mask;
12281             }
12282         }
12283         return ret;
12284 #endif
12285 
12286 #ifdef TARGET_NR_getgid32
12287     case TARGET_NR_getgid32:
12288         return get_errno(getgid());
12289 #endif
12290 #ifdef TARGET_NR_geteuid32
12291     case TARGET_NR_geteuid32:
12292         return get_errno(geteuid());
12293 #endif
12294 #ifdef TARGET_NR_getegid32
12295     case TARGET_NR_getegid32:
12296         return get_errno(getegid());
12297 #endif
12298 #ifdef TARGET_NR_setreuid32
12299     case TARGET_NR_setreuid32:
12300         return get_errno(sys_setreuid(arg1, arg2));
12301 #endif
12302 #ifdef TARGET_NR_setregid32
12303     case TARGET_NR_setregid32:
12304         return get_errno(sys_setregid(arg1, arg2));
12305 #endif
12306 #ifdef TARGET_NR_getgroups32
12307     case TARGET_NR_getgroups32:
12308         { /* the same code as for TARGET_NR_getgroups */
12309             int gidsetsize = arg1;
12310             uint32_t *target_grouplist;
12311             g_autofree gid_t *grouplist = NULL;
12312             int i;
12313 
12314             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12315                 return -TARGET_EINVAL;
12316             }
12317             if (gidsetsize > 0) {
12318                 grouplist = g_try_new(gid_t, gidsetsize);
12319                 if (!grouplist) {
12320                     return -TARGET_ENOMEM;
12321                 }
12322             }
12323             ret = get_errno(getgroups(gidsetsize, grouplist));
12324             if (!is_error(ret) && gidsetsize > 0) {
12325                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
12326                                              gidsetsize * 4, 0);
12327                 if (!target_grouplist) {
12328                     return -TARGET_EFAULT;
12329                 }
12330                 for (i = 0; i < ret; i++) {
12331                     target_grouplist[i] = tswap32(grouplist[i]);
12332                 }
12333                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
12334             }
12335             return ret;
12336         }
12337 #endif
12338 #ifdef TARGET_NR_setgroups32
12339     case TARGET_NR_setgroups32:
12340         { /* the same code as for TARGET_NR_setgroups */
12341             int gidsetsize = arg1;
12342             uint32_t *target_grouplist;
12343             g_autofree gid_t *grouplist = NULL;
12344             int i;
12345 
12346             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12347                 return -TARGET_EINVAL;
12348             }
12349             if (gidsetsize > 0) {
12350                 grouplist = g_try_new(gid_t, gidsetsize);
12351                 if (!grouplist) {
12352                     return -TARGET_ENOMEM;
12353                 }
12354                 target_grouplist = lock_user(VERIFY_READ, arg2,
12355                                              gidsetsize * 4, 1);
12356                 if (!target_grouplist) {
12357                     return -TARGET_EFAULT;
12358                 }
12359                 for (i = 0; i < gidsetsize; i++) {
12360                     grouplist[i] = tswap32(target_grouplist[i]);
12361                 }
12362                 unlock_user(target_grouplist, arg2, 0);
12363             }
12364             return get_errno(sys_setgroups(gidsetsize, grouplist));
12365         }
12366 #endif
12367 #ifdef TARGET_NR_fchown32
12368     case TARGET_NR_fchown32:
12369         return get_errno(fchown(arg1, arg2, arg3));
12370 #endif
12371 #ifdef TARGET_NR_setresuid32
12372     case TARGET_NR_setresuid32:
12373         return get_errno(sys_setresuid(arg1, arg2, arg3));
12374 #endif
12375 #ifdef TARGET_NR_getresuid32
12376     case TARGET_NR_getresuid32:
12377         {
12378             uid_t ruid, euid, suid;
12379             ret = get_errno(getresuid(&ruid, &euid, &suid));
12380             if (!is_error(ret)) {
12381                 if (put_user_u32(ruid, arg1)
12382                     || put_user_u32(euid, arg2)
12383                     || put_user_u32(suid, arg3))
12384                     return -TARGET_EFAULT;
12385             }
12386         }
12387         return ret;
12388 #endif
12389 #ifdef TARGET_NR_setresgid32
12390     case TARGET_NR_setresgid32:
12391         return get_errno(sys_setresgid(arg1, arg2, arg3));
12392 #endif
12393 #ifdef TARGET_NR_getresgid32
12394     case TARGET_NR_getresgid32:
12395         {
12396             gid_t rgid, egid, sgid;
12397             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12398             if (!is_error(ret)) {
12399                 if (put_user_u32(rgid, arg1)
12400                     || put_user_u32(egid, arg2)
12401                     || put_user_u32(sgid, arg3))
12402                     return -TARGET_EFAULT;
12403             }
12404         }
12405         return ret;
12406 #endif
12407 #ifdef TARGET_NR_chown32
12408     case TARGET_NR_chown32:
12409         if (!(p = lock_user_string(arg1)))
12410             return -TARGET_EFAULT;
12411         ret = get_errno(chown(p, arg2, arg3));
12412         unlock_user(p, arg1, 0);
12413         return ret;
12414 #endif
12415 #ifdef TARGET_NR_setuid32
12416     case TARGET_NR_setuid32:
12417         return get_errno(sys_setuid(arg1));
12418 #endif
12419 #ifdef TARGET_NR_setgid32
12420     case TARGET_NR_setgid32:
12421         return get_errno(sys_setgid(arg1));
12422 #endif
12423 #ifdef TARGET_NR_setfsuid32
12424     case TARGET_NR_setfsuid32:
12425         return get_errno(setfsuid(arg1));
12426 #endif
12427 #ifdef TARGET_NR_setfsgid32
12428     case TARGET_NR_setfsgid32:
12429         return get_errno(setfsgid(arg1));
12430 #endif
12431 #ifdef TARGET_NR_mincore
12432     case TARGET_NR_mincore:
12433         {
12434             void *a = lock_user(VERIFY_NONE, arg1, arg2, 0);
12435             if (!a) {
12436                 return -TARGET_ENOMEM;
12437             }
12438             p = lock_user_string(arg3);
12439             if (!p) {
12440                 ret = -TARGET_EFAULT;
12441             } else {
12442                 ret = get_errno(mincore(a, arg2, p));
12443                 unlock_user(p, arg3, ret);
12444             }
12445             unlock_user(a, arg1, 0);
12446         }
12447         return ret;
12448 #endif
12449 #ifdef TARGET_NR_arm_fadvise64_64
12450     case TARGET_NR_arm_fadvise64_64:
12451         /* arm_fadvise64_64 looks like fadvise64_64 but
12452          * with different argument order: fd, advice, offset, len
12453          * rather than the usual fd, offset, len, advice.
12454          * Note that offset and len are both 64-bit so appear as
12455          * pairs of 32-bit registers.
12456          */
12457         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
12458                             target_offset64(arg5, arg6), arg2);
12459         return -host_to_target_errno(ret);
12460 #endif
12461 
12462 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12463 
12464 #ifdef TARGET_NR_fadvise64_64
12465     case TARGET_NR_fadvise64_64:
12466 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
12467         /* 6 args: fd, advice, offset (high, low), len (high, low) */
12468         ret = arg2;
12469         arg2 = arg3;
12470         arg3 = arg4;
12471         arg4 = arg5;
12472         arg5 = arg6;
12473         arg6 = ret;
12474 #else
12475         /* 6 args: fd, offset (high, low), len (high, low), advice */
12476         if (regpairs_aligned(cpu_env, num)) {
12477             /* offset is in (3,4), len in (5,6) and advice in 7 */
12478             arg2 = arg3;
12479             arg3 = arg4;
12480             arg4 = arg5;
12481             arg5 = arg6;
12482             arg6 = arg7;
12483         }
12484 #endif
12485         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
12486                             target_offset64(arg4, arg5), arg6);
12487         return -host_to_target_errno(ret);
12488 #endif
12489 
12490 #ifdef TARGET_NR_fadvise64
12491     case TARGET_NR_fadvise64:
12492         /* 5 args: fd, offset (high, low), len, advice */
12493         if (regpairs_aligned(cpu_env, num)) {
12494             /* offset is in (3,4), len in 5 and advice in 6 */
12495             arg2 = arg3;
12496             arg3 = arg4;
12497             arg4 = arg5;
12498             arg5 = arg6;
12499         }
12500         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
12501         return -host_to_target_errno(ret);
12502 #endif
12503 
12504 #else /* not a 32-bit ABI */
12505 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
12506 #ifdef TARGET_NR_fadvise64_64
12507     case TARGET_NR_fadvise64_64:
12508 #endif
12509 #ifdef TARGET_NR_fadvise64
12510     case TARGET_NR_fadvise64:
12511 #endif
12512 #ifdef TARGET_S390X
12513         switch (arg4) {
12514         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
12515         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
12516         case 6: arg4 = POSIX_FADV_DONTNEED; break;
12517         case 7: arg4 = POSIX_FADV_NOREUSE; break;
12518         default: break;
12519         }
12520 #endif
12521         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
12522 #endif
12523 #endif /* end of 64-bit ABI fadvise handling */
12524 
12525 #ifdef TARGET_NR_madvise
12526     case TARGET_NR_madvise:
12527         return target_madvise(arg1, arg2, arg3);
12528 #endif
12529 #ifdef TARGET_NR_fcntl64
12530     case TARGET_NR_fcntl64:
12531     {
12532         int cmd;
12533         struct flock fl;
12534         from_flock64_fn *copyfrom = copy_from_user_flock64;
12535         to_flock64_fn *copyto = copy_to_user_flock64;
12536 
12537 #ifdef TARGET_ARM
12538         if (!cpu_env->eabi) {
12539             copyfrom = copy_from_user_oabi_flock64;
12540             copyto = copy_to_user_oabi_flock64;
12541         }
12542 #endif
12543 
12544         cmd = target_to_host_fcntl_cmd(arg2);
12545         if (cmd == -TARGET_EINVAL) {
12546             return cmd;
12547         }
12548 
12549         switch(arg2) {
12550         case TARGET_F_GETLK64:
12551             ret = copyfrom(&fl, arg3);
12552             if (ret) {
12553                 break;
12554             }
12555             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12556             if (ret == 0) {
12557                 ret = copyto(arg3, &fl);
12558             }
12559 	    break;
12560 
12561         case TARGET_F_SETLK64:
12562         case TARGET_F_SETLKW64:
12563             ret = copyfrom(&fl, arg3);
12564             if (ret) {
12565                 break;
12566             }
12567             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12568 	    break;
12569         default:
12570             ret = do_fcntl(arg1, arg2, arg3);
12571             break;
12572         }
12573         return ret;
12574     }
12575 #endif
12576 #ifdef TARGET_NR_cacheflush
12577     case TARGET_NR_cacheflush:
12578         /* self-modifying code is handled automatically, so nothing needed */
12579         return 0;
12580 #endif
12581 #ifdef TARGET_NR_getpagesize
12582     case TARGET_NR_getpagesize:
12583         return TARGET_PAGE_SIZE;
12584 #endif
12585     case TARGET_NR_gettid:
12586         return get_errno(sys_gettid());
12587 #ifdef TARGET_NR_readahead
12588     case TARGET_NR_readahead:
12589 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12590         if (regpairs_aligned(cpu_env, num)) {
12591             arg2 = arg3;
12592             arg3 = arg4;
12593             arg4 = arg5;
12594         }
12595         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12596 #else
12597         ret = get_errno(readahead(arg1, arg2, arg3));
12598 #endif
12599         return ret;
12600 #endif
12601 #ifdef CONFIG_ATTR
12602 #ifdef TARGET_NR_setxattr
12603     case TARGET_NR_listxattr:
12604     case TARGET_NR_llistxattr:
12605     {
12606         void *b = 0;
12607         if (arg2) {
12608             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12609             if (!b) {
12610                 return -TARGET_EFAULT;
12611             }
12612         }
12613         p = lock_user_string(arg1);
12614         if (p) {
12615             if (num == TARGET_NR_listxattr) {
12616                 ret = get_errno(listxattr(p, b, arg3));
12617             } else {
12618                 ret = get_errno(llistxattr(p, b, arg3));
12619             }
12620         } else {
12621             ret = -TARGET_EFAULT;
12622         }
12623         unlock_user(p, arg1, 0);
12624         unlock_user(b, arg2, arg3);
12625         return ret;
12626     }
12627     case TARGET_NR_flistxattr:
12628     {
12629         void *b = 0;
12630         if (arg2) {
12631             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12632             if (!b) {
12633                 return -TARGET_EFAULT;
12634             }
12635         }
12636         ret = get_errno(flistxattr(arg1, b, arg3));
12637         unlock_user(b, arg2, arg3);
12638         return ret;
12639     }
12640     case TARGET_NR_setxattr:
12641     case TARGET_NR_lsetxattr:
12642         {
12643             void *n, *v = 0;
12644             if (arg3) {
12645                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12646                 if (!v) {
12647                     return -TARGET_EFAULT;
12648                 }
12649             }
12650             p = lock_user_string(arg1);
12651             n = lock_user_string(arg2);
12652             if (p && n) {
12653                 if (num == TARGET_NR_setxattr) {
12654                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12655                 } else {
12656                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12657                 }
12658             } else {
12659                 ret = -TARGET_EFAULT;
12660             }
12661             unlock_user(p, arg1, 0);
12662             unlock_user(n, arg2, 0);
12663             unlock_user(v, arg3, 0);
12664         }
12665         return ret;
12666     case TARGET_NR_fsetxattr:
12667         {
12668             void *n, *v = 0;
12669             if (arg3) {
12670                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12671                 if (!v) {
12672                     return -TARGET_EFAULT;
12673                 }
12674             }
12675             n = lock_user_string(arg2);
12676             if (n) {
12677                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12678             } else {
12679                 ret = -TARGET_EFAULT;
12680             }
12681             unlock_user(n, arg2, 0);
12682             unlock_user(v, arg3, 0);
12683         }
12684         return ret;
12685     case TARGET_NR_getxattr:
12686     case TARGET_NR_lgetxattr:
12687         {
12688             void *n, *v = 0;
12689             if (arg3) {
12690                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12691                 if (!v) {
12692                     return -TARGET_EFAULT;
12693                 }
12694             }
12695             p = lock_user_string(arg1);
12696             n = lock_user_string(arg2);
12697             if (p && n) {
12698                 if (num == TARGET_NR_getxattr) {
12699                     ret = get_errno(getxattr(p, n, v, arg4));
12700                 } else {
12701                     ret = get_errno(lgetxattr(p, n, v, arg4));
12702                 }
12703             } else {
12704                 ret = -TARGET_EFAULT;
12705             }
12706             unlock_user(p, arg1, 0);
12707             unlock_user(n, arg2, 0);
12708             unlock_user(v, arg3, arg4);
12709         }
12710         return ret;
12711     case TARGET_NR_fgetxattr:
12712         {
12713             void *n, *v = 0;
12714             if (arg3) {
12715                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12716                 if (!v) {
12717                     return -TARGET_EFAULT;
12718                 }
12719             }
12720             n = lock_user_string(arg2);
12721             if (n) {
12722                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12723             } else {
12724                 ret = -TARGET_EFAULT;
12725             }
12726             unlock_user(n, arg2, 0);
12727             unlock_user(v, arg3, arg4);
12728         }
12729         return ret;
12730     case TARGET_NR_removexattr:
12731     case TARGET_NR_lremovexattr:
12732         {
12733             void *n;
12734             p = lock_user_string(arg1);
12735             n = lock_user_string(arg2);
12736             if (p && n) {
12737                 if (num == TARGET_NR_removexattr) {
12738                     ret = get_errno(removexattr(p, n));
12739                 } else {
12740                     ret = get_errno(lremovexattr(p, n));
12741                 }
12742             } else {
12743                 ret = -TARGET_EFAULT;
12744             }
12745             unlock_user(p, arg1, 0);
12746             unlock_user(n, arg2, 0);
12747         }
12748         return ret;
12749     case TARGET_NR_fremovexattr:
12750         {
12751             void *n;
12752             n = lock_user_string(arg2);
12753             if (n) {
12754                 ret = get_errno(fremovexattr(arg1, n));
12755             } else {
12756                 ret = -TARGET_EFAULT;
12757             }
12758             unlock_user(n, arg2, 0);
12759         }
12760         return ret;
12761 #endif
12762 #endif /* CONFIG_ATTR */
12763 #ifdef TARGET_NR_set_thread_area
12764     case TARGET_NR_set_thread_area:
12765 #if defined(TARGET_MIPS)
12766       cpu_env->active_tc.CP0_UserLocal = arg1;
12767       return 0;
12768 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12769       return do_set_thread_area(cpu_env, arg1);
12770 #elif defined(TARGET_M68K)
12771       {
12772           TaskState *ts = get_task_state(cpu);
12773           ts->tp_value = arg1;
12774           return 0;
12775       }
12776 #else
12777       return -TARGET_ENOSYS;
12778 #endif
12779 #endif
12780 #ifdef TARGET_NR_get_thread_area
12781     case TARGET_NR_get_thread_area:
12782 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12783         return do_get_thread_area(cpu_env, arg1);
12784 #elif defined(TARGET_M68K)
12785         {
12786             TaskState *ts = get_task_state(cpu);
12787             return ts->tp_value;
12788         }
12789 #else
12790         return -TARGET_ENOSYS;
12791 #endif
12792 #endif
12793 #ifdef TARGET_NR_getdomainname
12794     case TARGET_NR_getdomainname:
12795         return -TARGET_ENOSYS;
12796 #endif
12797 
12798 #ifdef TARGET_NR_clock_settime
12799     case TARGET_NR_clock_settime:
12800     {
12801         struct timespec ts;
12802 
12803         ret = target_to_host_timespec(&ts, arg2);
12804         if (!is_error(ret)) {
12805             ret = get_errno(clock_settime(arg1, &ts));
12806         }
12807         return ret;
12808     }
12809 #endif
12810 #ifdef TARGET_NR_clock_settime64
12811     case TARGET_NR_clock_settime64:
12812     {
12813         struct timespec ts;
12814 
12815         ret = target_to_host_timespec64(&ts, arg2);
12816         if (!is_error(ret)) {
12817             ret = get_errno(clock_settime(arg1, &ts));
12818         }
12819         return ret;
12820     }
12821 #endif
12822 #ifdef TARGET_NR_clock_gettime
12823     case TARGET_NR_clock_gettime:
12824     {
12825         struct timespec ts;
12826         ret = get_errno(clock_gettime(arg1, &ts));
12827         if (!is_error(ret)) {
12828             ret = host_to_target_timespec(arg2, &ts);
12829         }
12830         return ret;
12831     }
12832 #endif
12833 #ifdef TARGET_NR_clock_gettime64
12834     case TARGET_NR_clock_gettime64:
12835     {
12836         struct timespec ts;
12837         ret = get_errno(clock_gettime(arg1, &ts));
12838         if (!is_error(ret)) {
12839             ret = host_to_target_timespec64(arg2, &ts);
12840         }
12841         return ret;
12842     }
12843 #endif
12844 #ifdef TARGET_NR_clock_getres
12845     case TARGET_NR_clock_getres:
12846     {
12847         struct timespec ts;
12848         ret = get_errno(clock_getres(arg1, &ts));
12849         if (!is_error(ret)) {
12850             host_to_target_timespec(arg2, &ts);
12851         }
12852         return ret;
12853     }
12854 #endif
12855 #ifdef TARGET_NR_clock_getres_time64
12856     case TARGET_NR_clock_getres_time64:
12857     {
12858         struct timespec ts;
12859         ret = get_errno(clock_getres(arg1, &ts));
12860         if (!is_error(ret)) {
12861             host_to_target_timespec64(arg2, &ts);
12862         }
12863         return ret;
12864     }
12865 #endif
12866 #ifdef TARGET_NR_clock_nanosleep
12867     case TARGET_NR_clock_nanosleep:
12868     {
12869         struct timespec ts;
12870         if (target_to_host_timespec(&ts, arg3)) {
12871             return -TARGET_EFAULT;
12872         }
12873         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12874                                              &ts, arg4 ? &ts : NULL));
12875         /*
12876          * if the call is interrupted by a signal handler, it fails
12877          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12878          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12879          */
12880         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12881             host_to_target_timespec(arg4, &ts)) {
12882               return -TARGET_EFAULT;
12883         }
12884 
12885         return ret;
12886     }
12887 #endif
12888 #ifdef TARGET_NR_clock_nanosleep_time64
12889     case TARGET_NR_clock_nanosleep_time64:
12890     {
12891         struct timespec ts;
12892 
12893         if (target_to_host_timespec64(&ts, arg3)) {
12894             return -TARGET_EFAULT;
12895         }
12896 
12897         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12898                                              &ts, arg4 ? &ts : NULL));
12899 
12900         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12901             host_to_target_timespec64(arg4, &ts)) {
12902             return -TARGET_EFAULT;
12903         }
12904         return ret;
12905     }
12906 #endif
12907 
12908 #if defined(TARGET_NR_set_tid_address)
12909     case TARGET_NR_set_tid_address:
12910     {
12911         TaskState *ts = get_task_state(cpu);
12912         ts->child_tidptr = arg1;
12913         /* do not call host set_tid_address() syscall, instead return tid() */
12914         return get_errno(sys_gettid());
12915     }
12916 #endif
12917 
12918     case TARGET_NR_tkill:
12919         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12920 
12921     case TARGET_NR_tgkill:
12922         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12923                          target_to_host_signal(arg3)));
12924 
12925 #ifdef TARGET_NR_set_robust_list
12926     case TARGET_NR_set_robust_list:
12927     case TARGET_NR_get_robust_list:
12928         /* The ABI for supporting robust futexes has userspace pass
12929          * the kernel a pointer to a linked list which is updated by
12930          * userspace after the syscall; the list is walked by the kernel
12931          * when the thread exits. Since the linked list in QEMU guest
12932          * memory isn't a valid linked list for the host and we have
12933          * no way to reliably intercept the thread-death event, we can't
12934          * support these. Silently return ENOSYS so that guest userspace
12935          * falls back to a non-robust futex implementation (which should
12936          * be OK except in the corner case of the guest crashing while
12937          * holding a mutex that is shared with another process via
12938          * shared memory).
12939          */
12940         return -TARGET_ENOSYS;
12941 #endif
12942 
12943 #if defined(TARGET_NR_utimensat)
12944     case TARGET_NR_utimensat:
12945         {
12946             struct timespec *tsp, ts[2];
12947             if (!arg3) {
12948                 tsp = NULL;
12949             } else {
12950                 if (target_to_host_timespec(ts, arg3)) {
12951                     return -TARGET_EFAULT;
12952                 }
12953                 if (target_to_host_timespec(ts + 1, arg3 +
12954                                             sizeof(struct target_timespec))) {
12955                     return -TARGET_EFAULT;
12956                 }
12957                 tsp = ts;
12958             }
12959             if (!arg2)
12960                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12961             else {
12962                 if (!(p = lock_user_string(arg2))) {
12963                     return -TARGET_EFAULT;
12964                 }
12965                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12966                 unlock_user(p, arg2, 0);
12967             }
12968         }
12969         return ret;
12970 #endif
12971 #ifdef TARGET_NR_utimensat_time64
12972     case TARGET_NR_utimensat_time64:
12973         {
12974             struct timespec *tsp, ts[2];
12975             if (!arg3) {
12976                 tsp = NULL;
12977             } else {
12978                 if (target_to_host_timespec64(ts, arg3)) {
12979                     return -TARGET_EFAULT;
12980                 }
12981                 if (target_to_host_timespec64(ts + 1, arg3 +
12982                                      sizeof(struct target__kernel_timespec))) {
12983                     return -TARGET_EFAULT;
12984                 }
12985                 tsp = ts;
12986             }
12987             if (!arg2)
12988                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12989             else {
12990                 p = lock_user_string(arg2);
12991                 if (!p) {
12992                     return -TARGET_EFAULT;
12993                 }
12994                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12995                 unlock_user(p, arg2, 0);
12996             }
12997         }
12998         return ret;
12999 #endif
13000 #ifdef TARGET_NR_futex
13001     case TARGET_NR_futex:
13002         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
13003 #endif
13004 #ifdef TARGET_NR_futex_time64
13005     case TARGET_NR_futex_time64:
13006         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
13007 #endif
13008 #ifdef CONFIG_INOTIFY
13009 #if defined(TARGET_NR_inotify_init)
13010     case TARGET_NR_inotify_init:
13011         ret = get_errno(inotify_init());
13012         if (ret >= 0) {
13013             fd_trans_register(ret, &target_inotify_trans);
13014         }
13015         return ret;
13016 #endif
13017 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
13018     case TARGET_NR_inotify_init1:
13019         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
13020                                           fcntl_flags_tbl)));
13021         if (ret >= 0) {
13022             fd_trans_register(ret, &target_inotify_trans);
13023         }
13024         return ret;
13025 #endif
13026 #if defined(TARGET_NR_inotify_add_watch)
13027     case TARGET_NR_inotify_add_watch:
13028         p = lock_user_string(arg2);
13029         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
13030         unlock_user(p, arg2, 0);
13031         return ret;
13032 #endif
13033 #if defined(TARGET_NR_inotify_rm_watch)
13034     case TARGET_NR_inotify_rm_watch:
13035         return get_errno(inotify_rm_watch(arg1, arg2));
13036 #endif
13037 #endif
13038 
13039 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
13040     case TARGET_NR_mq_open:
13041         {
13042             struct mq_attr posix_mq_attr;
13043             struct mq_attr *pposix_mq_attr;
13044             int host_flags;
13045 
13046             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
13047             pposix_mq_attr = NULL;
13048             if (arg4) {
13049                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
13050                     return -TARGET_EFAULT;
13051                 }
13052                 pposix_mq_attr = &posix_mq_attr;
13053             }
13054             p = lock_user_string(arg1 - 1);
13055             if (!p) {
13056                 return -TARGET_EFAULT;
13057             }
13058             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
13059             unlock_user (p, arg1, 0);
13060         }
13061         return ret;
13062 
13063     case TARGET_NR_mq_unlink:
13064         p = lock_user_string(arg1 - 1);
13065         if (!p) {
13066             return -TARGET_EFAULT;
13067         }
13068         ret = get_errno(mq_unlink(p));
13069         unlock_user (p, arg1, 0);
13070         return ret;
13071 
13072 #ifdef TARGET_NR_mq_timedsend
13073     case TARGET_NR_mq_timedsend:
13074         {
13075             struct timespec ts;
13076 
13077             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13078             if (arg5 != 0) {
13079                 if (target_to_host_timespec(&ts, arg5)) {
13080                     return -TARGET_EFAULT;
13081                 }
13082                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13083                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13084                     return -TARGET_EFAULT;
13085                 }
13086             } else {
13087                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13088             }
13089             unlock_user (p, arg2, arg3);
13090         }
13091         return ret;
13092 #endif
13093 #ifdef TARGET_NR_mq_timedsend_time64
13094     case TARGET_NR_mq_timedsend_time64:
13095         {
13096             struct timespec ts;
13097 
13098             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13099             if (arg5 != 0) {
13100                 if (target_to_host_timespec64(&ts, arg5)) {
13101                     return -TARGET_EFAULT;
13102                 }
13103                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13104                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13105                     return -TARGET_EFAULT;
13106                 }
13107             } else {
13108                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13109             }
13110             unlock_user(p, arg2, arg3);
13111         }
13112         return ret;
13113 #endif
13114 
13115 #ifdef TARGET_NR_mq_timedreceive
13116     case TARGET_NR_mq_timedreceive:
13117         {
13118             struct timespec ts;
13119             unsigned int prio;
13120 
13121             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13122             if (arg5 != 0) {
13123                 if (target_to_host_timespec(&ts, arg5)) {
13124                     return -TARGET_EFAULT;
13125                 }
13126                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13127                                                      &prio, &ts));
13128                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13129                     return -TARGET_EFAULT;
13130                 }
13131             } else {
13132                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13133                                                      &prio, NULL));
13134             }
13135             unlock_user (p, arg2, arg3);
13136             if (arg4 != 0)
13137                 put_user_u32(prio, arg4);
13138         }
13139         return ret;
13140 #endif
13141 #ifdef TARGET_NR_mq_timedreceive_time64
13142     case TARGET_NR_mq_timedreceive_time64:
13143         {
13144             struct timespec ts;
13145             unsigned int prio;
13146 
13147             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13148             if (arg5 != 0) {
13149                 if (target_to_host_timespec64(&ts, arg5)) {
13150                     return -TARGET_EFAULT;
13151                 }
13152                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13153                                                      &prio, &ts));
13154                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13155                     return -TARGET_EFAULT;
13156                 }
13157             } else {
13158                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13159                                                      &prio, NULL));
13160             }
13161             unlock_user(p, arg2, arg3);
13162             if (arg4 != 0) {
13163                 put_user_u32(prio, arg4);
13164             }
13165         }
13166         return ret;
13167 #endif
13168 
13169     /* Not implemented for now... */
13170 /*     case TARGET_NR_mq_notify: */
13171 /*         break; */
13172 
13173     case TARGET_NR_mq_getsetattr:
13174         {
13175             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
13176             ret = 0;
13177             if (arg2 != 0) {
13178                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
13179                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
13180                                            &posix_mq_attr_out));
13181             } else if (arg3 != 0) {
13182                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
13183             }
13184             if (ret == 0 && arg3 != 0) {
13185                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
13186             }
13187         }
13188         return ret;
13189 #endif
13190 
13191 #ifdef CONFIG_SPLICE
13192 #ifdef TARGET_NR_tee
13193     case TARGET_NR_tee:
13194         {
13195             ret = get_errno(tee(arg1,arg2,arg3,arg4));
13196         }
13197         return ret;
13198 #endif
13199 #ifdef TARGET_NR_splice
13200     case TARGET_NR_splice:
13201         {
13202             loff_t loff_in, loff_out;
13203             loff_t *ploff_in = NULL, *ploff_out = NULL;
13204             if (arg2) {
13205                 if (get_user_u64(loff_in, arg2)) {
13206                     return -TARGET_EFAULT;
13207                 }
13208                 ploff_in = &loff_in;
13209             }
13210             if (arg4) {
13211                 if (get_user_u64(loff_out, arg4)) {
13212                     return -TARGET_EFAULT;
13213                 }
13214                 ploff_out = &loff_out;
13215             }
13216             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
13217             if (arg2) {
13218                 if (put_user_u64(loff_in, arg2)) {
13219                     return -TARGET_EFAULT;
13220                 }
13221             }
13222             if (arg4) {
13223                 if (put_user_u64(loff_out, arg4)) {
13224                     return -TARGET_EFAULT;
13225                 }
13226             }
13227         }
13228         return ret;
13229 #endif
13230 #ifdef TARGET_NR_vmsplice
13231 	case TARGET_NR_vmsplice:
13232         {
13233             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
13234             if (vec != NULL) {
13235                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
13236                 unlock_iovec(vec, arg2, arg3, 0);
13237             } else {
13238                 ret = -host_to_target_errno(errno);
13239             }
13240         }
13241         return ret;
13242 #endif
13243 #endif /* CONFIG_SPLICE */
13244 #ifdef CONFIG_EVENTFD
13245 #if defined(TARGET_NR_eventfd)
13246     case TARGET_NR_eventfd:
13247         ret = get_errno(eventfd(arg1, 0));
13248         if (ret >= 0) {
13249             fd_trans_register(ret, &target_eventfd_trans);
13250         }
13251         return ret;
13252 #endif
13253 #if defined(TARGET_NR_eventfd2)
13254     case TARGET_NR_eventfd2:
13255     {
13256         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
13257         if (arg2 & TARGET_O_NONBLOCK) {
13258             host_flags |= O_NONBLOCK;
13259         }
13260         if (arg2 & TARGET_O_CLOEXEC) {
13261             host_flags |= O_CLOEXEC;
13262         }
13263         ret = get_errno(eventfd(arg1, host_flags));
13264         if (ret >= 0) {
13265             fd_trans_register(ret, &target_eventfd_trans);
13266         }
13267         return ret;
13268     }
13269 #endif
13270 #endif /* CONFIG_EVENTFD  */
13271 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
13272     case TARGET_NR_fallocate:
13273 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13274         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
13275                                   target_offset64(arg5, arg6)));
13276 #else
13277         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
13278 #endif
13279         return ret;
13280 #endif
13281 #if defined(CONFIG_SYNC_FILE_RANGE)
13282 #if defined(TARGET_NR_sync_file_range)
13283     case TARGET_NR_sync_file_range:
13284 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13285 #if defined(TARGET_MIPS)
13286         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13287                                         target_offset64(arg5, arg6), arg7));
13288 #else
13289         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
13290                                         target_offset64(arg4, arg5), arg6));
13291 #endif /* !TARGET_MIPS */
13292 #else
13293         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
13294 #endif
13295         return ret;
13296 #endif
13297 #if defined(TARGET_NR_sync_file_range2) || \
13298     defined(TARGET_NR_arm_sync_file_range)
13299 #if defined(TARGET_NR_sync_file_range2)
13300     case TARGET_NR_sync_file_range2:
13301 #endif
13302 #if defined(TARGET_NR_arm_sync_file_range)
13303     case TARGET_NR_arm_sync_file_range:
13304 #endif
13305         /* This is like sync_file_range but the arguments are reordered */
13306 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13307         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13308                                         target_offset64(arg5, arg6), arg2));
13309 #else
13310         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
13311 #endif
13312         return ret;
13313 #endif
13314 #endif
13315 #if defined(TARGET_NR_signalfd4)
13316     case TARGET_NR_signalfd4:
13317         return do_signalfd4(arg1, arg2, arg4);
13318 #endif
13319 #if defined(TARGET_NR_signalfd)
13320     case TARGET_NR_signalfd:
13321         return do_signalfd4(arg1, arg2, 0);
13322 #endif
13323 #if defined(CONFIG_EPOLL)
13324 #if defined(TARGET_NR_epoll_create)
13325     case TARGET_NR_epoll_create:
13326         return get_errno(epoll_create(arg1));
13327 #endif
13328 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
13329     case TARGET_NR_epoll_create1:
13330         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
13331 #endif
13332 #if defined(TARGET_NR_epoll_ctl)
13333     case TARGET_NR_epoll_ctl:
13334     {
13335         struct epoll_event ep;
13336         struct epoll_event *epp = 0;
13337         if (arg4) {
13338             if (arg2 != EPOLL_CTL_DEL) {
13339                 struct target_epoll_event *target_ep;
13340                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
13341                     return -TARGET_EFAULT;
13342                 }
13343                 ep.events = tswap32(target_ep->events);
13344                 /*
13345                  * The epoll_data_t union is just opaque data to the kernel,
13346                  * so we transfer all 64 bits across and need not worry what
13347                  * actual data type it is.
13348                  */
13349                 ep.data.u64 = tswap64(target_ep->data.u64);
13350                 unlock_user_struct(target_ep, arg4, 0);
13351             }
13352             /*
13353              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
13354              * non-null pointer, even though this argument is ignored.
13355              *
13356              */
13357             epp = &ep;
13358         }
13359         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
13360     }
13361 #endif
13362 
13363 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
13364 #if defined(TARGET_NR_epoll_wait)
13365     case TARGET_NR_epoll_wait:
13366 #endif
13367 #if defined(TARGET_NR_epoll_pwait)
13368     case TARGET_NR_epoll_pwait:
13369 #endif
13370     {
13371         struct target_epoll_event *target_ep;
13372         struct epoll_event *ep;
13373         int epfd = arg1;
13374         int maxevents = arg3;
13375         int timeout = arg4;
13376 
13377         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
13378             return -TARGET_EINVAL;
13379         }
13380 
13381         target_ep = lock_user(VERIFY_WRITE, arg2,
13382                               maxevents * sizeof(struct target_epoll_event), 1);
13383         if (!target_ep) {
13384             return -TARGET_EFAULT;
13385         }
13386 
13387         ep = g_try_new(struct epoll_event, maxevents);
13388         if (!ep) {
13389             unlock_user(target_ep, arg2, 0);
13390             return -TARGET_ENOMEM;
13391         }
13392 
13393         switch (num) {
13394 #if defined(TARGET_NR_epoll_pwait)
13395         case TARGET_NR_epoll_pwait:
13396         {
13397             sigset_t *set = NULL;
13398 
13399             if (arg5) {
13400                 ret = process_sigsuspend_mask(&set, arg5, arg6);
13401                 if (ret != 0) {
13402                     break;
13403                 }
13404             }
13405 
13406             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13407                                              set, SIGSET_T_SIZE));
13408 
13409             if (set) {
13410                 finish_sigsuspend_mask(ret);
13411             }
13412             break;
13413         }
13414 #endif
13415 #if defined(TARGET_NR_epoll_wait)
13416         case TARGET_NR_epoll_wait:
13417             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13418                                              NULL, 0));
13419             break;
13420 #endif
13421         default:
13422             ret = -TARGET_ENOSYS;
13423         }
13424         if (!is_error(ret)) {
13425             int i;
13426             for (i = 0; i < ret; i++) {
13427                 target_ep[i].events = tswap32(ep[i].events);
13428                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
13429             }
13430             unlock_user(target_ep, arg2,
13431                         ret * sizeof(struct target_epoll_event));
13432         } else {
13433             unlock_user(target_ep, arg2, 0);
13434         }
13435         g_free(ep);
13436         return ret;
13437     }
13438 #endif
13439 #endif
13440 #ifdef TARGET_NR_prlimit64
13441     case TARGET_NR_prlimit64:
13442     {
13443         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
13444         struct target_rlimit64 *target_rnew, *target_rold;
13445         struct host_rlimit64 rnew, rold, *rnewp = 0;
13446         int resource = target_to_host_resource(arg2);
13447 
13448         if (arg3 && (resource != RLIMIT_AS &&
13449                      resource != RLIMIT_DATA &&
13450                      resource != RLIMIT_STACK)) {
13451             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
13452                 return -TARGET_EFAULT;
13453             }
13454             __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
13455             __get_user(rnew.rlim_max, &target_rnew->rlim_max);
13456             unlock_user_struct(target_rnew, arg3, 0);
13457             rnewp = &rnew;
13458         }
13459 
13460         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
13461         if (!is_error(ret) && arg4) {
13462             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
13463                 return -TARGET_EFAULT;
13464             }
13465             __put_user(rold.rlim_cur, &target_rold->rlim_cur);
13466             __put_user(rold.rlim_max, &target_rold->rlim_max);
13467             unlock_user_struct(target_rold, arg4, 1);
13468         }
13469         return ret;
13470     }
13471 #endif
13472 #ifdef TARGET_NR_gethostname
13473     case TARGET_NR_gethostname:
13474     {
13475         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
13476         if (name) {
13477             ret = get_errno(gethostname(name, arg2));
13478             unlock_user(name, arg1, arg2);
13479         } else {
13480             ret = -TARGET_EFAULT;
13481         }
13482         return ret;
13483     }
13484 #endif
13485 #ifdef TARGET_NR_atomic_cmpxchg_32
13486     case TARGET_NR_atomic_cmpxchg_32:
13487     {
13488         /* should use start_exclusive from main.c */
13489         abi_ulong mem_value;
13490         if (get_user_u32(mem_value, arg6)) {
13491             target_siginfo_t info;
13492             info.si_signo = SIGSEGV;
13493             info.si_errno = 0;
13494             info.si_code = TARGET_SEGV_MAPERR;
13495             info._sifields._sigfault._addr = arg6;
13496             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
13497             ret = 0xdeadbeef;
13498 
13499         }
13500         if (mem_value == arg2)
13501             put_user_u32(arg1, arg6);
13502         return mem_value;
13503     }
13504 #endif
13505 #ifdef TARGET_NR_atomic_barrier
13506     case TARGET_NR_atomic_barrier:
13507         /* Like the kernel implementation and the
13508            qemu arm barrier, no-op this? */
13509         return 0;
13510 #endif
13511 
13512 #ifdef TARGET_NR_timer_create
13513     case TARGET_NR_timer_create:
13514     {
13515         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
13516 
13517         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
13518 
13519         int clkid = arg1;
13520         int timer_index = next_free_host_timer();
13521 
13522         if (timer_index < 0) {
13523             ret = -TARGET_EAGAIN;
13524         } else {
13525             timer_t *phtimer = g_posix_timers  + timer_index;
13526 
13527             if (arg2) {
13528                 phost_sevp = &host_sevp;
13529                 ret = target_to_host_sigevent(phost_sevp, arg2);
13530                 if (ret != 0) {
13531                     free_host_timer_slot(timer_index);
13532                     return ret;
13533                 }
13534             }
13535 
13536             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
13537             if (ret) {
13538                 free_host_timer_slot(timer_index);
13539             } else {
13540                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
13541                     timer_delete(*phtimer);
13542                     free_host_timer_slot(timer_index);
13543                     return -TARGET_EFAULT;
13544                 }
13545             }
13546         }
13547         return ret;
13548     }
13549 #endif
13550 
13551 #ifdef TARGET_NR_timer_settime
13552     case TARGET_NR_timer_settime:
13553     {
13554         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13555          * struct itimerspec * old_value */
13556         target_timer_t timerid = get_timer_id(arg1);
13557 
13558         if (timerid < 0) {
13559             ret = timerid;
13560         } else if (arg3 == 0) {
13561             ret = -TARGET_EINVAL;
13562         } else {
13563             timer_t htimer = g_posix_timers[timerid];
13564             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13565 
13566             if (target_to_host_itimerspec(&hspec_new, arg3)) {
13567                 return -TARGET_EFAULT;
13568             }
13569             ret = get_errno(
13570                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13571             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13572                 return -TARGET_EFAULT;
13573             }
13574         }
13575         return ret;
13576     }
13577 #endif
13578 
13579 #ifdef TARGET_NR_timer_settime64
13580     case TARGET_NR_timer_settime64:
13581     {
13582         target_timer_t timerid = get_timer_id(arg1);
13583 
13584         if (timerid < 0) {
13585             ret = timerid;
13586         } else if (arg3 == 0) {
13587             ret = -TARGET_EINVAL;
13588         } else {
13589             timer_t htimer = g_posix_timers[timerid];
13590             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13591 
13592             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13593                 return -TARGET_EFAULT;
13594             }
13595             ret = get_errno(
13596                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13597             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13598                 return -TARGET_EFAULT;
13599             }
13600         }
13601         return ret;
13602     }
13603 #endif
13604 
13605 #ifdef TARGET_NR_timer_gettime
13606     case TARGET_NR_timer_gettime:
13607     {
13608         /* args: timer_t timerid, struct itimerspec *curr_value */
13609         target_timer_t timerid = get_timer_id(arg1);
13610 
13611         if (timerid < 0) {
13612             ret = timerid;
13613         } else if (!arg2) {
13614             ret = -TARGET_EFAULT;
13615         } else {
13616             timer_t htimer = g_posix_timers[timerid];
13617             struct itimerspec hspec;
13618             ret = get_errno(timer_gettime(htimer, &hspec));
13619 
13620             if (host_to_target_itimerspec(arg2, &hspec)) {
13621                 ret = -TARGET_EFAULT;
13622             }
13623         }
13624         return ret;
13625     }
13626 #endif
13627 
13628 #ifdef TARGET_NR_timer_gettime64
13629     case TARGET_NR_timer_gettime64:
13630     {
13631         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13632         target_timer_t timerid = get_timer_id(arg1);
13633 
13634         if (timerid < 0) {
13635             ret = timerid;
13636         } else if (!arg2) {
13637             ret = -TARGET_EFAULT;
13638         } else {
13639             timer_t htimer = g_posix_timers[timerid];
13640             struct itimerspec hspec;
13641             ret = get_errno(timer_gettime(htimer, &hspec));
13642 
13643             if (host_to_target_itimerspec64(arg2, &hspec)) {
13644                 ret = -TARGET_EFAULT;
13645             }
13646         }
13647         return ret;
13648     }
13649 #endif
13650 
13651 #ifdef TARGET_NR_timer_getoverrun
13652     case TARGET_NR_timer_getoverrun:
13653     {
13654         /* args: timer_t timerid */
13655         target_timer_t timerid = get_timer_id(arg1);
13656 
13657         if (timerid < 0) {
13658             ret = timerid;
13659         } else {
13660             timer_t htimer = g_posix_timers[timerid];
13661             ret = get_errno(timer_getoverrun(htimer));
13662         }
13663         return ret;
13664     }
13665 #endif
13666 
13667 #ifdef TARGET_NR_timer_delete
13668     case TARGET_NR_timer_delete:
13669     {
13670         /* args: timer_t timerid */
13671         target_timer_t timerid = get_timer_id(arg1);
13672 
13673         if (timerid < 0) {
13674             ret = timerid;
13675         } else {
13676             timer_t htimer = g_posix_timers[timerid];
13677             ret = get_errno(timer_delete(htimer));
13678             free_host_timer_slot(timerid);
13679         }
13680         return ret;
13681     }
13682 #endif
13683 
13684 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13685     case TARGET_NR_timerfd_create:
13686         ret = get_errno(timerfd_create(arg1,
13687                         target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13688         if (ret >= 0) {
13689             fd_trans_register(ret, &target_timerfd_trans);
13690         }
13691         return ret;
13692 #endif
13693 
13694 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13695     case TARGET_NR_timerfd_gettime:
13696         {
13697             struct itimerspec its_curr;
13698 
13699             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13700 
13701             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13702                 return -TARGET_EFAULT;
13703             }
13704         }
13705         return ret;
13706 #endif
13707 
13708 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13709     case TARGET_NR_timerfd_gettime64:
13710         {
13711             struct itimerspec its_curr;
13712 
13713             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13714 
13715             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13716                 return -TARGET_EFAULT;
13717             }
13718         }
13719         return ret;
13720 #endif
13721 
13722 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13723     case TARGET_NR_timerfd_settime:
13724         {
13725             struct itimerspec its_new, its_old, *p_new;
13726 
13727             if (arg3) {
13728                 if (target_to_host_itimerspec(&its_new, arg3)) {
13729                     return -TARGET_EFAULT;
13730                 }
13731                 p_new = &its_new;
13732             } else {
13733                 p_new = NULL;
13734             }
13735 
13736             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13737 
13738             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13739                 return -TARGET_EFAULT;
13740             }
13741         }
13742         return ret;
13743 #endif
13744 
13745 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13746     case TARGET_NR_timerfd_settime64:
13747         {
13748             struct itimerspec its_new, its_old, *p_new;
13749 
13750             if (arg3) {
13751                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13752                     return -TARGET_EFAULT;
13753                 }
13754                 p_new = &its_new;
13755             } else {
13756                 p_new = NULL;
13757             }
13758 
13759             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13760 
13761             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13762                 return -TARGET_EFAULT;
13763             }
13764         }
13765         return ret;
13766 #endif
13767 
13768 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13769     case TARGET_NR_ioprio_get:
13770         return get_errno(ioprio_get(arg1, arg2));
13771 #endif
13772 
13773 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13774     case TARGET_NR_ioprio_set:
13775         return get_errno(ioprio_set(arg1, arg2, arg3));
13776 #endif
13777 
13778 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13779     case TARGET_NR_setns:
13780         return get_errno(setns(arg1, arg2));
13781 #endif
13782 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13783     case TARGET_NR_unshare:
13784         return get_errno(unshare(arg1));
13785 #endif
13786 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13787     case TARGET_NR_kcmp:
13788         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13789 #endif
13790 #ifdef TARGET_NR_swapcontext
13791     case TARGET_NR_swapcontext:
13792         /* PowerPC specific.  */
13793         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13794 #endif
13795 #ifdef TARGET_NR_memfd_create
13796     case TARGET_NR_memfd_create:
13797         p = lock_user_string(arg1);
13798         if (!p) {
13799             return -TARGET_EFAULT;
13800         }
13801         ret = get_errno(memfd_create(p, arg2));
13802         fd_trans_unregister(ret);
13803         unlock_user(p, arg1, 0);
13804         return ret;
13805 #endif
13806 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13807     case TARGET_NR_membarrier:
13808         return get_errno(membarrier(arg1, arg2));
13809 #endif
13810 
13811 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13812     case TARGET_NR_copy_file_range:
13813         {
13814             loff_t inoff, outoff;
13815             loff_t *pinoff = NULL, *poutoff = NULL;
13816 
13817             if (arg2) {
13818                 if (get_user_u64(inoff, arg2)) {
13819                     return -TARGET_EFAULT;
13820                 }
13821                 pinoff = &inoff;
13822             }
13823             if (arg4) {
13824                 if (get_user_u64(outoff, arg4)) {
13825                     return -TARGET_EFAULT;
13826                 }
13827                 poutoff = &outoff;
13828             }
13829             /* Do not sign-extend the count parameter. */
13830             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13831                                                  (abi_ulong)arg5, arg6));
13832             if (!is_error(ret) && ret > 0) {
13833                 if (arg2) {
13834                     if (put_user_u64(inoff, arg2)) {
13835                         return -TARGET_EFAULT;
13836                     }
13837                 }
13838                 if (arg4) {
13839                     if (put_user_u64(outoff, arg4)) {
13840                         return -TARGET_EFAULT;
13841                     }
13842                 }
13843             }
13844         }
13845         return ret;
13846 #endif
13847 
13848 #if defined(TARGET_NR_pivot_root)
13849     case TARGET_NR_pivot_root:
13850         {
13851             void *p2;
13852             p = lock_user_string(arg1); /* new_root */
13853             p2 = lock_user_string(arg2); /* put_old */
13854             if (!p || !p2) {
13855                 ret = -TARGET_EFAULT;
13856             } else {
13857                 ret = get_errno(pivot_root(p, p2));
13858             }
13859             unlock_user(p2, arg2, 0);
13860             unlock_user(p, arg1, 0);
13861         }
13862         return ret;
13863 #endif
13864 
13865 #if defined(TARGET_NR_riscv_hwprobe)
13866     case TARGET_NR_riscv_hwprobe:
13867         return do_riscv_hwprobe(cpu_env, arg1, arg2, arg3, arg4, arg5);
13868 #endif
13869 
13870     default:
13871         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13872         return -TARGET_ENOSYS;
13873     }
13874     return ret;
13875 }
13876 
13877 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13878                     abi_long arg2, abi_long arg3, abi_long arg4,
13879                     abi_long arg5, abi_long arg6, abi_long arg7,
13880                     abi_long arg8)
13881 {
13882     CPUState *cpu = env_cpu(cpu_env);
13883     abi_long ret;
13884 
13885 #ifdef DEBUG_ERESTARTSYS
13886     /* Debug-only code for exercising the syscall-restart code paths
13887      * in the per-architecture cpu main loops: restart every syscall
13888      * the guest makes once before letting it through.
13889      */
13890     {
13891         static bool flag;
13892         flag = !flag;
13893         if (flag) {
13894             return -QEMU_ERESTARTSYS;
13895         }
13896     }
13897 #endif
13898 
13899     record_syscall_start(cpu, num, arg1,
13900                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13901 
13902     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13903         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13904     }
13905 
13906     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13907                       arg5, arg6, arg7, arg8);
13908 
13909     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13910         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13911                           arg3, arg4, arg5, arg6);
13912     }
13913 
13914     record_syscall_return(cpu, num, ret);
13915     return ret;
13916 }
13917