xref: /qemu/linux-user/syscall.c (revision 628d64222e6bef249d23ce3147cbfb47259f2ede)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "qemu/plugin.h"
26 #include "tcg/startup.h"
27 #include "target_mman.h"
28 #include "exec/page-protection.h"
29 #include "exec/translation-block.h"
30 #include <elf.h>
31 #include <endian.h>
32 #include <grp.h>
33 #include <sys/ipc.h>
34 #include <sys/msg.h>
35 #include <sys/wait.h>
36 #include <sys/mount.h>
37 #include <sys/file.h>
38 #include <sys/fsuid.h>
39 #include <sys/personality.h>
40 #include <sys/prctl.h>
41 #include <sys/resource.h>
42 #include <sys/swap.h>
43 #include <linux/capability.h>
44 #include <sched.h>
45 #include <sys/timex.h>
46 #include <sys/socket.h>
47 #include <linux/sockios.h>
48 #include <sys/un.h>
49 #include <sys/uio.h>
50 #include <poll.h>
51 #include <sys/times.h>
52 #include <sys/shm.h>
53 #include <sys/sem.h>
54 #include <sys/statfs.h>
55 #include <utime.h>
56 #include <sys/sysinfo.h>
57 #include <sys/signalfd.h>
58 #include <netinet/in.h>
59 #include <netinet/ip.h>
60 #include <netinet/tcp.h>
61 #include <netinet/udp.h>
62 #include <linux/wireless.h>
63 #include <linux/icmp.h>
64 #include <linux/icmpv6.h>
65 #include <linux/if_tun.h>
66 #include <linux/in6.h>
67 #include <linux/errqueue.h>
68 #include <linux/random.h>
69 #ifdef CONFIG_TIMERFD
70 #include <sys/timerfd.h>
71 #endif
72 #ifdef CONFIG_EVENTFD
73 #include <sys/eventfd.h>
74 #endif
75 #ifdef CONFIG_EPOLL
76 #include <sys/epoll.h>
77 #endif
78 #ifdef CONFIG_ATTR
79 #include "qemu/xattr.h"
80 #endif
81 #ifdef CONFIG_SENDFILE
82 #include <sys/sendfile.h>
83 #endif
84 #ifdef HAVE_SYS_KCOV_H
85 #include <sys/kcov.h>
86 #endif
87 
88 #define termios host_termios
89 #define winsize host_winsize
90 #define termio host_termio
91 #define sgttyb host_sgttyb /* same as target */
92 #define tchars host_tchars /* same as target */
93 #define ltchars host_ltchars /* same as target */
94 
95 #include <linux/termios.h>
96 #include <linux/unistd.h>
97 #include <linux/cdrom.h>
98 #include <linux/hdreg.h>
99 #include <linux/soundcard.h>
100 #include <linux/kd.h>
101 #include <linux/mtio.h>
102 #include <linux/fs.h>
103 #include <linux/fd.h>
104 #if defined(CONFIG_FIEMAP)
105 #include <linux/fiemap.h>
106 #endif
107 #include <linux/fb.h>
108 #if defined(CONFIG_USBFS)
109 #include <linux/usbdevice_fs.h>
110 #include <linux/usb/ch9.h>
111 #endif
112 #include <linux/vt.h>
113 #include <linux/dm-ioctl.h>
114 #include <linux/reboot.h>
115 #include <linux/route.h>
116 #include <linux/filter.h>
117 #include <linux/blkpg.h>
118 #include <netpacket/packet.h>
119 #include <linux/netlink.h>
120 #include <linux/if_alg.h>
121 #include <linux/rtc.h>
122 #include <sound/asound.h>
123 #ifdef HAVE_BTRFS_H
124 #include <linux/btrfs.h>
125 #endif
126 #ifdef HAVE_DRM_H
127 #include <libdrm/drm.h>
128 #include <libdrm/i915_drm.h>
129 #endif
130 #include "linux_loop.h"
131 #include "uname.h"
132 
133 #include "qemu.h"
134 #include "user-internals.h"
135 #include "strace.h"
136 #include "signal-common.h"
137 #include "loader.h"
138 #include "user-mmap.h"
139 #include "user/page-protection.h"
140 #include "user/safe-syscall.h"
141 #include "user/signal.h"
142 #include "qemu/guest-random.h"
143 #include "qemu/selfmap.h"
144 #include "user/syscall-trace.h"
145 #include "special-errno.h"
146 #include "qapi/error.h"
147 #include "fd-trans.h"
148 #include "user/cpu_loop.h"
149 
150 #ifndef CLONE_IO
151 #define CLONE_IO                0x80000000      /* Clone io context */
152 #endif
153 
154 /* We can't directly call the host clone syscall, because this will
155  * badly confuse libc (breaking mutexes, for example). So we must
156  * divide clone flags into:
157  *  * flag combinations that look like pthread_create()
158  *  * flag combinations that look like fork()
159  *  * flags we can implement within QEMU itself
160  *  * flags we can't support and will return an error for
161  */
162 /* For thread creation, all these flags must be present; for
163  * fork, none must be present.
164  */
165 #define CLONE_THREAD_FLAGS                              \
166     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
167      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
168 
169 /* These flags are ignored:
170  * CLONE_DETACHED is now ignored by the kernel;
171  * CLONE_IO is just an optimisation hint to the I/O scheduler
172  */
173 #define CLONE_IGNORED_FLAGS                     \
174     (CLONE_DETACHED | CLONE_IO)
175 
176 #ifndef CLONE_PIDFD
177 # define CLONE_PIDFD 0x00001000
178 #endif
179 
180 /* Flags for fork which we can implement within QEMU itself */
181 #define CLONE_OPTIONAL_FORK_FLAGS               \
182     (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
183      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
184 
185 /* Flags for thread creation which we can implement within QEMU itself */
186 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
187     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
188      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
189 
190 #define CLONE_INVALID_FORK_FLAGS                                        \
191     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
192 
193 #define CLONE_INVALID_THREAD_FLAGS                                      \
194     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
195        CLONE_IGNORED_FLAGS))
196 
197 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
198  * have almost all been allocated. We cannot support any of
199  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
200  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
201  * The checks against the invalid thread masks above will catch these.
202  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
203  */
204 
205 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
206  * once. This exercises the codepaths for restart.
207  */
208 //#define DEBUG_ERESTARTSYS
209 
210 //#include <linux/msdos_fs.h>
211 #define VFAT_IOCTL_READDIR_BOTH \
212     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
213 #define VFAT_IOCTL_READDIR_SHORT \
214     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
215 
216 #undef _syscall0
217 #undef _syscall1
218 #undef _syscall2
219 #undef _syscall3
220 #undef _syscall4
221 #undef _syscall5
222 #undef _syscall6
223 
224 #define _syscall0(type,name)		\
225 static type name (void)			\
226 {					\
227 	return syscall(__NR_##name);	\
228 }
229 
230 #define _syscall1(type,name,type1,arg1)		\
231 static type name (type1 arg1)			\
232 {						\
233 	return syscall(__NR_##name, arg1);	\
234 }
235 
236 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
237 static type name (type1 arg1,type2 arg2)		\
238 {							\
239 	return syscall(__NR_##name, arg1, arg2);	\
240 }
241 
242 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
243 static type name (type1 arg1,type2 arg2,type3 arg3)		\
244 {								\
245 	return syscall(__NR_##name, arg1, arg2, arg3);		\
246 }
247 
248 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
249 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
250 {										\
251 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
252 }
253 
254 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
255 		  type5,arg5)							\
256 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
257 {										\
258 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
259 }
260 
261 
262 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
263 		  type5,arg5,type6,arg6)					\
264 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
265                   type6 arg6)							\
266 {										\
267 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
268 }
269 
270 
271 #define __NR_sys_uname __NR_uname
272 #define __NR_sys_getcwd1 __NR_getcwd
273 #define __NR_sys_getdents __NR_getdents
274 #define __NR_sys_getdents64 __NR_getdents64
275 #define __NR_sys_getpriority __NR_getpriority
276 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
277 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
278 #define __NR_sys_syslog __NR_syslog
279 #if defined(__NR_futex)
280 # define __NR_sys_futex __NR_futex
281 #endif
282 #if defined(__NR_futex_time64)
283 # define __NR_sys_futex_time64 __NR_futex_time64
284 #endif
285 #define __NR_sys_statx __NR_statx
286 
287 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
288 #define __NR__llseek __NR_lseek
289 #endif
290 
291 /* Newer kernel ports have llseek() instead of _llseek() */
292 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
293 #define TARGET_NR__llseek TARGET_NR_llseek
294 #endif
295 
296 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
297 #ifndef TARGET_O_NONBLOCK_MASK
298 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
299 #endif
300 
301 #define __NR_sys_gettid __NR_gettid
302 _syscall0(int, sys_gettid)
303 
304 /* For the 64-bit guest on 32-bit host case we must emulate
305  * getdents using getdents64, because otherwise the host
306  * might hand us back more dirent records than we can fit
307  * into the guest buffer after structure format conversion.
308  * Otherwise we emulate getdents with getdents if the host has it.
309  */
310 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
311 #define EMULATE_GETDENTS_WITH_GETDENTS
312 #endif
313 
314 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
315 _syscall3(int, sys_getdents, unsigned int, fd, struct linux_dirent *, dirp, unsigned int, count);
316 #endif
317 #if (defined(TARGET_NR_getdents) && \
318       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
319     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
320 _syscall3(int, sys_getdents64, unsigned int, fd, struct linux_dirent64 *, dirp, unsigned int, count);
321 #endif
322 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
323 _syscall5(int, _llseek,  unsigned int,  fd, unsigned long, hi, unsigned long, lo,
324           loff_t *, res, unsigned int, wh);
325 #endif
326 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
327 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
328           siginfo_t *, uinfo)
329 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
330 #ifdef __NR_exit_group
331 _syscall1(int,exit_group,int,error_code)
332 #endif
333 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
334 #define __NR_sys_close_range __NR_close_range
335 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
336 #ifndef CLOSE_RANGE_CLOEXEC
337 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
338 #endif
339 #endif
340 #if defined(__NR_futex)
341 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
342           const struct timespec *,timeout,int *,uaddr2,int,val3)
343 #endif
344 #if defined(__NR_futex_time64)
345 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
346           const struct timespec *,timeout,int *,uaddr2,int,val3)
347 #endif
348 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
349 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
350 #endif
351 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
352 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
353                              unsigned int, flags);
354 #endif
355 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
356 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
357 #endif
358 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
359 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
360           unsigned long *, user_mask_ptr);
361 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
362 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
363           unsigned long *, user_mask_ptr);
364 /* sched_attr is not defined in glibc */
365 struct sched_attr {
366     uint32_t size;
367     uint32_t sched_policy;
368     uint64_t sched_flags;
369     int32_t sched_nice;
370     uint32_t sched_priority;
371     uint64_t sched_runtime;
372     uint64_t sched_deadline;
373     uint64_t sched_period;
374     uint32_t sched_util_min;
375     uint32_t sched_util_max;
376 };
377 #define __NR_sys_sched_getattr __NR_sched_getattr
378 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
379           unsigned int, size, unsigned int, flags);
380 #define __NR_sys_sched_setattr __NR_sched_setattr
381 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
382           unsigned int, flags);
383 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
384 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
385 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
386 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
387           const struct sched_param *, param);
388 #define __NR_sys_sched_getparam __NR_sched_getparam
389 _syscall2(int, sys_sched_getparam, pid_t, pid,
390           struct sched_param *, param);
391 #define __NR_sys_sched_setparam __NR_sched_setparam
392 _syscall2(int, sys_sched_setparam, pid_t, pid,
393           const struct sched_param *, param);
394 #define __NR_sys_getcpu __NR_getcpu
395 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
396 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
397           void *, arg);
398 _syscall2(int, capget, struct __user_cap_header_struct *, header,
399           struct __user_cap_data_struct *, data);
400 _syscall2(int, capset, struct __user_cap_header_struct *, header,
401           struct __user_cap_data_struct *, data);
402 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
403 _syscall2(int, ioprio_get, int, which, int, who)
404 #endif
405 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
406 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
407 #endif
408 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
409 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
410 #endif
411 
412 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
413 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
414           unsigned long, idx1, unsigned long, idx2)
415 #endif
416 
417 /*
418  * It is assumed that struct statx is architecture independent.
419  */
420 #if defined(TARGET_NR_statx) && defined(__NR_statx)
421 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
422           unsigned int, mask, struct target_statx *, statxbuf)
423 #endif
424 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
425 _syscall2(int, membarrier, int, cmd, int, flags)
426 #endif
427 
428 static const bitmask_transtbl fcntl_flags_tbl[] = {
429   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
430   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
431   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
432   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
433   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
434   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
435   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
436   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
437   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
438   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
439   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
440   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
441   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
442 #if defined(O_DIRECT)
443   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
444 #endif
445 #if defined(O_NOATIME)
446   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
447 #endif
448 #if defined(O_CLOEXEC)
449   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
450 #endif
451 #if defined(O_PATH)
452   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
453 #endif
454 #if defined(O_TMPFILE)
455   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
456 #endif
457   /* Don't terminate the list prematurely on 64-bit host+guest.  */
458 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
459   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
460 #endif
461 };
462 
463 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
464 
465 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
466 #if defined(__NR_utimensat)
467 #define __NR_sys_utimensat __NR_utimensat
468 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
469           const struct timespec *,tsp,int,flags)
470 #else
471 static int sys_utimensat(int dirfd, const char *pathname,
472                          const struct timespec times[2], int flags)
473 {
474     errno = ENOSYS;
475     return -1;
476 }
477 #endif
478 #endif /* TARGET_NR_utimensat */
479 
480 #ifdef TARGET_NR_renameat2
481 #if defined(__NR_renameat2)
482 #define __NR_sys_renameat2 __NR_renameat2
483 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
484           const char *, new, unsigned int, flags)
485 #else
486 static int sys_renameat2(int oldfd, const char *old,
487                          int newfd, const char *new, int flags)
488 {
489     if (flags == 0) {
490         return renameat(oldfd, old, newfd, new);
491     }
492     errno = ENOSYS;
493     return -1;
494 }
495 #endif
496 #endif /* TARGET_NR_renameat2 */
497 
498 #ifdef CONFIG_INOTIFY
499 #include <sys/inotify.h>
500 #else
501 /* Userspace can usually survive runtime without inotify */
502 #undef TARGET_NR_inotify_init
503 #undef TARGET_NR_inotify_init1
504 #undef TARGET_NR_inotify_add_watch
505 #undef TARGET_NR_inotify_rm_watch
506 #endif /* CONFIG_INOTIFY  */
507 
508 #if defined(TARGET_NR_prlimit64)
509 #ifndef __NR_prlimit64
510 # define __NR_prlimit64 -1
511 #endif
512 #define __NR_sys_prlimit64 __NR_prlimit64
513 /* The glibc rlimit structure may not be that used by the underlying syscall */
514 struct host_rlimit64 {
515     uint64_t rlim_cur;
516     uint64_t rlim_max;
517 };
518 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
519           const struct host_rlimit64 *, new_limit,
520           struct host_rlimit64 *, old_limit)
521 #endif
522 
523 
524 #if defined(TARGET_NR_timer_create)
525 /* Maximum of 32 active POSIX timers allowed at any one time. */
526 #define GUEST_TIMER_MAX 32
527 static timer_t g_posix_timers[GUEST_TIMER_MAX];
528 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
529 
530 static inline int next_free_host_timer(void)
531 {
532     int k;
533     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
534         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
535             return k;
536         }
537     }
538     return -1;
539 }
540 
541 static inline void free_host_timer_slot(int id)
542 {
543     qatomic_store_release(g_posix_timer_allocated + id, 0);
544 }
545 #endif
546 
547 static inline int host_to_target_errno(int host_errno)
548 {
549     switch (host_errno) {
550 #define E(X)  case X: return TARGET_##X;
551 #include "errnos.c.inc"
552 #undef E
553     default:
554         return host_errno;
555     }
556 }
557 
558 static inline int target_to_host_errno(int target_errno)
559 {
560     switch (target_errno) {
561 #define E(X)  case TARGET_##X: return X;
562 #include "errnos.c.inc"
563 #undef E
564     default:
565         return target_errno;
566     }
567 }
568 
569 abi_long get_errno(abi_long ret)
570 {
571     if (ret == -1)
572         return -host_to_target_errno(errno);
573     else
574         return ret;
575 }
576 
577 const char *target_strerror(int err)
578 {
579     if (err == QEMU_ERESTARTSYS) {
580         return "To be restarted";
581     }
582     if (err == QEMU_ESIGRETURN) {
583         return "Successful exit from sigreturn";
584     }
585 
586     return strerror(target_to_host_errno(err));
587 }
588 
589 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
590 {
591     int i;
592     uint8_t b;
593     if (usize <= ksize) {
594         return 1;
595     }
596     for (i = ksize; i < usize; i++) {
597         if (get_user_u8(b, addr + i)) {
598             return -TARGET_EFAULT;
599         }
600         if (b != 0) {
601             return 0;
602         }
603     }
604     return 1;
605 }
606 
607 /*
608  * Copies a target struct to a host struct, in a way that guarantees
609  * backwards-compatibility for struct syscall arguments.
610  *
611  * Similar to kernels uaccess.h:copy_struct_from_user()
612  */
613 int copy_struct_from_user(void *dst, size_t ksize, abi_ptr src, size_t usize)
614 {
615     size_t size = MIN(ksize, usize);
616     size_t rest = MAX(ksize, usize) - size;
617 
618     /* Deal with trailing bytes. */
619     if (usize < ksize) {
620         memset(dst + size, 0, rest);
621     } else if (usize > ksize) {
622         int ret = check_zeroed_user(src, ksize, usize);
623         if (ret <= 0) {
624             return ret ?: -TARGET_E2BIG;
625         }
626     }
627     /* Copy the interoperable parts of the struct. */
628     if (copy_from_user(dst, src, size)) {
629         return -TARGET_EFAULT;
630     }
631     return 0;
632 }
633 
634 #define safe_syscall0(type, name) \
635 static type safe_##name(void) \
636 { \
637     return safe_syscall(__NR_##name); \
638 }
639 
640 #define safe_syscall1(type, name, type1, arg1) \
641 static type safe_##name(type1 arg1) \
642 { \
643     return safe_syscall(__NR_##name, arg1); \
644 }
645 
646 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
647 static type safe_##name(type1 arg1, type2 arg2) \
648 { \
649     return safe_syscall(__NR_##name, arg1, arg2); \
650 }
651 
652 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
653 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
654 { \
655     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
656 }
657 
658 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
659     type4, arg4) \
660 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
661 { \
662     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
663 }
664 
665 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
666     type4, arg4, type5, arg5) \
667 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
668     type5 arg5) \
669 { \
670     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
671 }
672 
673 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
674     type4, arg4, type5, arg5, type6, arg6) \
675 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
676     type5 arg5, type6 arg6) \
677 { \
678     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
679 }
680 
681 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
682 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
683 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
684               int, flags, mode_t, mode)
685 
686 safe_syscall4(int, openat2, int, dirfd, const char *, pathname, \
687               const struct open_how_ver0 *, how, size_t, size)
688 
689 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
690 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
691               struct rusage *, rusage)
692 #endif
693 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
694               int, options, struct rusage *, rusage)
695 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
696 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
697               char **, argv, char **, envp, int, flags)
698 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
699     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
700 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
701               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
702 #endif
703 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
704 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
705               struct timespec *, tsp, const sigset_t *, sigmask,
706               size_t, sigsetsize)
707 #endif
708 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
709               int, maxevents, int, timeout, const sigset_t *, sigmask,
710               size_t, sigsetsize)
711 #if defined(__NR_futex)
712 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
713               const struct timespec *,timeout,int *,uaddr2,int,val3)
714 #endif
715 #if defined(__NR_futex_time64)
716 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
717               const struct timespec *,timeout,int *,uaddr2,int,val3)
718 #endif
719 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
720 safe_syscall2(int, kill, pid_t, pid, int, sig)
721 safe_syscall2(int, tkill, int, tid, int, sig)
722 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
723 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
724 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
725 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
726               unsigned long, pos_l, unsigned long, pos_h)
727 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
728               unsigned long, pos_l, unsigned long, pos_h)
729 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
730               socklen_t, addrlen)
731 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
732               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
733 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
734               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
735 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
736 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
737 safe_syscall2(int, flock, int, fd, int, operation)
738 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
739 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
740               const struct timespec *, uts, size_t, sigsetsize)
741 #endif
742 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
743               int, flags)
744 #if defined(TARGET_NR_nanosleep)
745 safe_syscall2(int, nanosleep, const struct timespec *, req,
746               struct timespec *, rem)
747 #endif
748 #if defined(TARGET_NR_clock_nanosleep) || \
749     defined(TARGET_NR_clock_nanosleep_time64)
750 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
751               const struct timespec *, req, struct timespec *, rem)
752 #endif
753 #ifdef __NR_ipc
754 #ifdef __s390x__
755 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
756               void *, ptr)
757 #else
758 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
759               void *, ptr, long, fifth)
760 #endif
761 #endif
762 #ifdef __NR_msgsnd
763 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
764               int, flags)
765 #endif
766 #ifdef __NR_msgrcv
767 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
768               long, msgtype, int, flags)
769 #endif
770 #ifdef __NR_semtimedop
771 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
772               unsigned, nsops, const struct timespec *, timeout)
773 #endif
774 #if defined(TARGET_NR_mq_timedsend) || \
775     defined(TARGET_NR_mq_timedsend_time64)
776 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
777               size_t, len, unsigned, prio, const struct timespec *, timeout)
778 #endif
779 #if defined(TARGET_NR_mq_timedreceive) || \
780     defined(TARGET_NR_mq_timedreceive_time64)
781 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
782               size_t, len, unsigned *, prio, const struct timespec *, timeout)
783 #endif
784 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
785 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
786               int, outfd, loff_t *, poutoff, size_t, length,
787               unsigned int, flags)
788 #endif
789 
790 /* We do ioctl like this rather than via safe_syscall3 to preserve the
791  * "third argument might be integer or pointer or not present" behaviour of
792  * the libc function.
793  */
794 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
795 /* Similarly for fcntl. Since we always build with LFS enabled,
796  * we should be using the 64-bit structures automatically.
797  */
798 #ifdef __NR_fcntl64
799 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
800 #else
801 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
802 #endif
803 
804 static inline int host_to_target_sock_type(int host_type)
805 {
806     int target_type;
807 
808     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
809     case SOCK_DGRAM:
810         target_type = TARGET_SOCK_DGRAM;
811         break;
812     case SOCK_STREAM:
813         target_type = TARGET_SOCK_STREAM;
814         break;
815     default:
816         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
817         break;
818     }
819 
820 #if defined(SOCK_CLOEXEC)
821     if (host_type & SOCK_CLOEXEC) {
822         target_type |= TARGET_SOCK_CLOEXEC;
823     }
824 #endif
825 
826 #if defined(SOCK_NONBLOCK)
827     if (host_type & SOCK_NONBLOCK) {
828         target_type |= TARGET_SOCK_NONBLOCK;
829     }
830 #endif
831 
832     return target_type;
833 }
834 
835 static abi_ulong target_brk, initial_target_brk;
836 
837 void target_set_brk(abi_ulong new_brk)
838 {
839     target_brk = TARGET_PAGE_ALIGN(new_brk);
840     initial_target_brk = target_brk;
841 }
842 
843 /* do_brk() must return target values and target errnos. */
844 abi_long do_brk(abi_ulong brk_val)
845 {
846     abi_long mapped_addr;
847     abi_ulong new_brk;
848     abi_ulong old_brk;
849 
850     /* brk pointers are always untagged */
851 
852     /* do not allow to shrink below initial brk value */
853     if (brk_val < initial_target_brk) {
854         return target_brk;
855     }
856 
857     new_brk = TARGET_PAGE_ALIGN(brk_val);
858     old_brk = TARGET_PAGE_ALIGN(target_brk);
859 
860     /* new and old target_brk might be on the same page */
861     if (new_brk == old_brk) {
862         target_brk = brk_val;
863         return target_brk;
864     }
865 
866     /* Release heap if necessary */
867     if (new_brk < old_brk) {
868         target_munmap(new_brk, old_brk - new_brk);
869 
870         target_brk = brk_val;
871         return target_brk;
872     }
873 
874     mapped_addr = target_mmap(old_brk, new_brk - old_brk,
875                               PROT_READ | PROT_WRITE,
876                               MAP_FIXED_NOREPLACE | MAP_ANON | MAP_PRIVATE,
877                               -1, 0);
878 
879     if (mapped_addr == old_brk) {
880         target_brk = brk_val;
881         return target_brk;
882     }
883 
884 #if defined(TARGET_ALPHA)
885     /* We (partially) emulate OSF/1 on Alpha, which requires we
886        return a proper errno, not an unchanged brk value.  */
887     return -TARGET_ENOMEM;
888 #endif
889     /* For everything else, return the previous break. */
890     return target_brk;
891 }
892 
893 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
894     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
895 static inline abi_long copy_from_user_fdset(fd_set *fds,
896                                             abi_ulong target_fds_addr,
897                                             int n)
898 {
899     int i, nw, j, k;
900     abi_ulong b, *target_fds;
901 
902     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
903     if (!(target_fds = lock_user(VERIFY_READ,
904                                  target_fds_addr,
905                                  sizeof(abi_ulong) * nw,
906                                  1)))
907         return -TARGET_EFAULT;
908 
909     FD_ZERO(fds);
910     k = 0;
911     for (i = 0; i < nw; i++) {
912         /* grab the abi_ulong */
913         __get_user(b, &target_fds[i]);
914         for (j = 0; j < TARGET_ABI_BITS; j++) {
915             /* check the bit inside the abi_ulong */
916             if ((b >> j) & 1)
917                 FD_SET(k, fds);
918             k++;
919         }
920     }
921 
922     unlock_user(target_fds, target_fds_addr, 0);
923 
924     return 0;
925 }
926 
927 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
928                                                  abi_ulong target_fds_addr,
929                                                  int n)
930 {
931     if (target_fds_addr) {
932         if (copy_from_user_fdset(fds, target_fds_addr, n))
933             return -TARGET_EFAULT;
934         *fds_ptr = fds;
935     } else {
936         *fds_ptr = NULL;
937     }
938     return 0;
939 }
940 
941 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
942                                           const fd_set *fds,
943                                           int n)
944 {
945     int i, nw, j, k;
946     abi_long v;
947     abi_ulong *target_fds;
948 
949     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
950     if (!(target_fds = lock_user(VERIFY_WRITE,
951                                  target_fds_addr,
952                                  sizeof(abi_ulong) * nw,
953                                  0)))
954         return -TARGET_EFAULT;
955 
956     k = 0;
957     for (i = 0; i < nw; i++) {
958         v = 0;
959         for (j = 0; j < TARGET_ABI_BITS; j++) {
960             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
961             k++;
962         }
963         __put_user(v, &target_fds[i]);
964     }
965 
966     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
967 
968     return 0;
969 }
970 #endif
971 
972 #if defined(__alpha__)
973 #define HOST_HZ 1024
974 #else
975 #define HOST_HZ 100
976 #endif
977 
978 static inline abi_long host_to_target_clock_t(long ticks)
979 {
980 #if HOST_HZ == TARGET_HZ
981     return ticks;
982 #else
983     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
984 #endif
985 }
986 
987 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
988                                              const struct rusage *rusage)
989 {
990     struct target_rusage *target_rusage;
991 
992     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
993         return -TARGET_EFAULT;
994     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
995     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
996     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
997     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
998     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
999     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1000     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1001     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1002     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1003     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1004     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1005     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1006     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1007     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1008     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1009     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1010     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1011     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1012     unlock_user_struct(target_rusage, target_addr, 1);
1013 
1014     return 0;
1015 }
1016 
1017 #ifdef TARGET_NR_setrlimit
1018 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1019 {
1020     abi_ulong target_rlim_swap;
1021     rlim_t result;
1022 
1023     target_rlim_swap = tswapal(target_rlim);
1024     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1025         return RLIM_INFINITY;
1026 
1027     result = target_rlim_swap;
1028     if (target_rlim_swap != (rlim_t)result)
1029         return RLIM_INFINITY;
1030 
1031     return result;
1032 }
1033 #endif
1034 
1035 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1036 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1037 {
1038     abi_ulong target_rlim_swap;
1039     abi_ulong result;
1040 
1041     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1042         target_rlim_swap = TARGET_RLIM_INFINITY;
1043     else
1044         target_rlim_swap = rlim;
1045     result = tswapal(target_rlim_swap);
1046 
1047     return result;
1048 }
1049 #endif
1050 
1051 static inline int target_to_host_resource(int code)
1052 {
1053     switch (code) {
1054     case TARGET_RLIMIT_AS:
1055         return RLIMIT_AS;
1056     case TARGET_RLIMIT_CORE:
1057         return RLIMIT_CORE;
1058     case TARGET_RLIMIT_CPU:
1059         return RLIMIT_CPU;
1060     case TARGET_RLIMIT_DATA:
1061         return RLIMIT_DATA;
1062     case TARGET_RLIMIT_FSIZE:
1063         return RLIMIT_FSIZE;
1064     case TARGET_RLIMIT_LOCKS:
1065         return RLIMIT_LOCKS;
1066     case TARGET_RLIMIT_MEMLOCK:
1067         return RLIMIT_MEMLOCK;
1068     case TARGET_RLIMIT_MSGQUEUE:
1069         return RLIMIT_MSGQUEUE;
1070     case TARGET_RLIMIT_NICE:
1071         return RLIMIT_NICE;
1072     case TARGET_RLIMIT_NOFILE:
1073         return RLIMIT_NOFILE;
1074     case TARGET_RLIMIT_NPROC:
1075         return RLIMIT_NPROC;
1076     case TARGET_RLIMIT_RSS:
1077         return RLIMIT_RSS;
1078     case TARGET_RLIMIT_RTPRIO:
1079         return RLIMIT_RTPRIO;
1080 #ifdef RLIMIT_RTTIME
1081     case TARGET_RLIMIT_RTTIME:
1082         return RLIMIT_RTTIME;
1083 #endif
1084     case TARGET_RLIMIT_SIGPENDING:
1085         return RLIMIT_SIGPENDING;
1086     case TARGET_RLIMIT_STACK:
1087         return RLIMIT_STACK;
1088     default:
1089         return code;
1090     }
1091 }
1092 
1093 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1094                                               abi_ulong target_tv_addr)
1095 {
1096     struct target_timeval *target_tv;
1097 
1098     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1099         return -TARGET_EFAULT;
1100     }
1101 
1102     __get_user(tv->tv_sec, &target_tv->tv_sec);
1103     __get_user(tv->tv_usec, &target_tv->tv_usec);
1104 
1105     unlock_user_struct(target_tv, target_tv_addr, 0);
1106 
1107     return 0;
1108 }
1109 
1110 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1111                                             const struct timeval *tv)
1112 {
1113     struct target_timeval *target_tv;
1114 
1115     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1116         return -TARGET_EFAULT;
1117     }
1118 
1119     __put_user(tv->tv_sec, &target_tv->tv_sec);
1120     __put_user(tv->tv_usec, &target_tv->tv_usec);
1121 
1122     unlock_user_struct(target_tv, target_tv_addr, 1);
1123 
1124     return 0;
1125 }
1126 
1127 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1128 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1129                                                 abi_ulong target_tv_addr)
1130 {
1131     struct target__kernel_sock_timeval *target_tv;
1132 
1133     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1134         return -TARGET_EFAULT;
1135     }
1136 
1137     __get_user(tv->tv_sec, &target_tv->tv_sec);
1138     __get_user(tv->tv_usec, &target_tv->tv_usec);
1139 
1140     unlock_user_struct(target_tv, target_tv_addr, 0);
1141 
1142     return 0;
1143 }
1144 #endif
1145 
1146 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1147                                               const struct timeval *tv)
1148 {
1149     struct target__kernel_sock_timeval *target_tv;
1150 
1151     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1152         return -TARGET_EFAULT;
1153     }
1154 
1155     __put_user(tv->tv_sec, &target_tv->tv_sec);
1156     __put_user(tv->tv_usec, &target_tv->tv_usec);
1157 
1158     unlock_user_struct(target_tv, target_tv_addr, 1);
1159 
1160     return 0;
1161 }
1162 
1163 #if defined(TARGET_NR_futex) || \
1164     defined(TARGET_NR_rt_sigtimedwait) || \
1165     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1166     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1167     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1168     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1169     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1170     defined(TARGET_NR_timer_settime) || \
1171     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1172 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1173                                                abi_ulong target_addr)
1174 {
1175     struct target_timespec *target_ts;
1176 
1177     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1178         return -TARGET_EFAULT;
1179     }
1180     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1181     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1182     unlock_user_struct(target_ts, target_addr, 0);
1183     return 0;
1184 }
1185 #endif
1186 
1187 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1188     defined(TARGET_NR_timer_settime64) || \
1189     defined(TARGET_NR_mq_timedsend_time64) || \
1190     defined(TARGET_NR_mq_timedreceive_time64) || \
1191     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1192     defined(TARGET_NR_clock_nanosleep_time64) || \
1193     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1194     defined(TARGET_NR_utimensat) || \
1195     defined(TARGET_NR_utimensat_time64) || \
1196     defined(TARGET_NR_semtimedop_time64) || \
1197     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1198 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1199                                                  abi_ulong target_addr)
1200 {
1201     struct target__kernel_timespec *target_ts;
1202 
1203     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1204         return -TARGET_EFAULT;
1205     }
1206     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1207     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1208     /* in 32bit mode, this drops the padding */
1209     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1210     unlock_user_struct(target_ts, target_addr, 0);
1211     return 0;
1212 }
1213 #endif
1214 
1215 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1216                                                struct timespec *host_ts)
1217 {
1218     struct target_timespec *target_ts;
1219 
1220     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1221         return -TARGET_EFAULT;
1222     }
1223     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1224     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1225     unlock_user_struct(target_ts, target_addr, 1);
1226     return 0;
1227 }
1228 
1229 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1230                                                  struct timespec *host_ts)
1231 {
1232     struct target__kernel_timespec *target_ts;
1233 
1234     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1235         return -TARGET_EFAULT;
1236     }
1237     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1238     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1239     unlock_user_struct(target_ts, target_addr, 1);
1240     return 0;
1241 }
1242 
1243 #if defined(TARGET_NR_gettimeofday)
1244 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1245                                              struct timezone *tz)
1246 {
1247     struct target_timezone *target_tz;
1248 
1249     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1250         return -TARGET_EFAULT;
1251     }
1252 
1253     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1254     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1255 
1256     unlock_user_struct(target_tz, target_tz_addr, 1);
1257 
1258     return 0;
1259 }
1260 #endif
1261 
1262 #if defined(TARGET_NR_settimeofday)
1263 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1264                                                abi_ulong target_tz_addr)
1265 {
1266     struct target_timezone *target_tz;
1267 
1268     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1269         return -TARGET_EFAULT;
1270     }
1271 
1272     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1273     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1274 
1275     unlock_user_struct(target_tz, target_tz_addr, 0);
1276 
1277     return 0;
1278 }
1279 #endif
1280 
1281 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1282 #include <mqueue.h>
1283 
1284 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1285                                               abi_ulong target_mq_attr_addr)
1286 {
1287     struct target_mq_attr *target_mq_attr;
1288 
1289     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1290                           target_mq_attr_addr, 1))
1291         return -TARGET_EFAULT;
1292 
1293     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1294     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1295     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1296     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1297 
1298     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1299 
1300     return 0;
1301 }
1302 
1303 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1304                                             const struct mq_attr *attr)
1305 {
1306     struct target_mq_attr *target_mq_attr;
1307 
1308     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1309                           target_mq_attr_addr, 0))
1310         return -TARGET_EFAULT;
1311 
1312     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1313     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1314     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1315     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1316 
1317     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1318 
1319     return 0;
1320 }
1321 #endif
1322 
1323 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1324 /* do_select() must return target values and target errnos. */
1325 static abi_long do_select(int n,
1326                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1327                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1328 {
1329     fd_set rfds, wfds, efds;
1330     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1331     struct timeval tv;
1332     struct timespec ts, *ts_ptr;
1333     abi_long ret;
1334 
1335     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1336     if (ret) {
1337         return ret;
1338     }
1339     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1340     if (ret) {
1341         return ret;
1342     }
1343     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1344     if (ret) {
1345         return ret;
1346     }
1347 
1348     if (target_tv_addr) {
1349         if (copy_from_user_timeval(&tv, target_tv_addr))
1350             return -TARGET_EFAULT;
1351         ts.tv_sec = tv.tv_sec;
1352         ts.tv_nsec = tv.tv_usec * 1000;
1353         ts_ptr = &ts;
1354     } else {
1355         ts_ptr = NULL;
1356     }
1357 
1358     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1359                                   ts_ptr, NULL));
1360 
1361     if (!is_error(ret)) {
1362         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1363             return -TARGET_EFAULT;
1364         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1365             return -TARGET_EFAULT;
1366         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1367             return -TARGET_EFAULT;
1368 
1369         if (target_tv_addr) {
1370             tv.tv_sec = ts.tv_sec;
1371             tv.tv_usec = ts.tv_nsec / 1000;
1372             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1373                 return -TARGET_EFAULT;
1374             }
1375         }
1376     }
1377 
1378     return ret;
1379 }
1380 
1381 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1382 static abi_long do_old_select(abi_ulong arg1)
1383 {
1384     struct target_sel_arg_struct *sel;
1385     abi_ulong inp, outp, exp, tvp;
1386     long nsel;
1387 
1388     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1389         return -TARGET_EFAULT;
1390     }
1391 
1392     nsel = tswapal(sel->n);
1393     inp = tswapal(sel->inp);
1394     outp = tswapal(sel->outp);
1395     exp = tswapal(sel->exp);
1396     tvp = tswapal(sel->tvp);
1397 
1398     unlock_user_struct(sel, arg1, 0);
1399 
1400     return do_select(nsel, inp, outp, exp, tvp);
1401 }
1402 #endif
1403 #endif
1404 
1405 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1406 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1407                             abi_long arg4, abi_long arg5, abi_long arg6,
1408                             bool time64)
1409 {
1410     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1411     fd_set rfds, wfds, efds;
1412     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1413     struct timespec ts, *ts_ptr;
1414     abi_long ret;
1415 
1416     /*
1417      * The 6th arg is actually two args smashed together,
1418      * so we cannot use the C library.
1419      */
1420     struct {
1421         sigset_t *set;
1422         size_t size;
1423     } sig, *sig_ptr;
1424 
1425     abi_ulong arg_sigset, arg_sigsize, *arg7;
1426 
1427     n = arg1;
1428     rfd_addr = arg2;
1429     wfd_addr = arg3;
1430     efd_addr = arg4;
1431     ts_addr = arg5;
1432 
1433     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1434     if (ret) {
1435         return ret;
1436     }
1437     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1438     if (ret) {
1439         return ret;
1440     }
1441     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1442     if (ret) {
1443         return ret;
1444     }
1445 
1446     /*
1447      * This takes a timespec, and not a timeval, so we cannot
1448      * use the do_select() helper ...
1449      */
1450     if (ts_addr) {
1451         if (time64) {
1452             if (target_to_host_timespec64(&ts, ts_addr)) {
1453                 return -TARGET_EFAULT;
1454             }
1455         } else {
1456             if (target_to_host_timespec(&ts, ts_addr)) {
1457                 return -TARGET_EFAULT;
1458             }
1459         }
1460             ts_ptr = &ts;
1461     } else {
1462         ts_ptr = NULL;
1463     }
1464 
1465     /* Extract the two packed args for the sigset */
1466     sig_ptr = NULL;
1467     if (arg6) {
1468         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1469         if (!arg7) {
1470             return -TARGET_EFAULT;
1471         }
1472         arg_sigset = tswapal(arg7[0]);
1473         arg_sigsize = tswapal(arg7[1]);
1474         unlock_user(arg7, arg6, 0);
1475 
1476         if (arg_sigset) {
1477             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1478             if (ret != 0) {
1479                 return ret;
1480             }
1481             sig_ptr = &sig;
1482             sig.size = SIGSET_T_SIZE;
1483         }
1484     }
1485 
1486     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1487                                   ts_ptr, sig_ptr));
1488 
1489     if (sig_ptr) {
1490         finish_sigsuspend_mask(ret);
1491     }
1492 
1493     if (!is_error(ret)) {
1494         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1495             return -TARGET_EFAULT;
1496         }
1497         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1498             return -TARGET_EFAULT;
1499         }
1500         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1501             return -TARGET_EFAULT;
1502         }
1503         if (time64) {
1504             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1505                 return -TARGET_EFAULT;
1506             }
1507         } else {
1508             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1509                 return -TARGET_EFAULT;
1510             }
1511         }
1512     }
1513     return ret;
1514 }
1515 #endif
1516 
1517 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1518     defined(TARGET_NR_ppoll_time64)
1519 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1520                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1521 {
1522     struct target_pollfd *target_pfd;
1523     unsigned int nfds = arg2;
1524     struct pollfd *pfd;
1525     unsigned int i;
1526     abi_long ret;
1527 
1528     pfd = NULL;
1529     target_pfd = NULL;
1530     if (nfds) {
1531         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1532             return -TARGET_EINVAL;
1533         }
1534         target_pfd = lock_user(VERIFY_WRITE, arg1,
1535                                sizeof(struct target_pollfd) * nfds, 1);
1536         if (!target_pfd) {
1537             return -TARGET_EFAULT;
1538         }
1539 
1540         pfd = alloca(sizeof(struct pollfd) * nfds);
1541         for (i = 0; i < nfds; i++) {
1542             pfd[i].fd = tswap32(target_pfd[i].fd);
1543             pfd[i].events = tswap16(target_pfd[i].events);
1544         }
1545     }
1546     if (ppoll) {
1547         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1548         sigset_t *set = NULL;
1549 
1550         if (arg3) {
1551             if (time64) {
1552                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1553                     unlock_user(target_pfd, arg1, 0);
1554                     return -TARGET_EFAULT;
1555                 }
1556             } else {
1557                 if (target_to_host_timespec(timeout_ts, arg3)) {
1558                     unlock_user(target_pfd, arg1, 0);
1559                     return -TARGET_EFAULT;
1560                 }
1561             }
1562         } else {
1563             timeout_ts = NULL;
1564         }
1565 
1566         if (arg4) {
1567             ret = process_sigsuspend_mask(&set, arg4, arg5);
1568             if (ret != 0) {
1569                 unlock_user(target_pfd, arg1, 0);
1570                 return ret;
1571             }
1572         }
1573 
1574         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1575                                    set, SIGSET_T_SIZE));
1576 
1577         if (set) {
1578             finish_sigsuspend_mask(ret);
1579         }
1580         if (!is_error(ret) && arg3) {
1581             if (time64) {
1582                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1583                     return -TARGET_EFAULT;
1584                 }
1585             } else {
1586                 if (host_to_target_timespec(arg3, timeout_ts)) {
1587                     return -TARGET_EFAULT;
1588                 }
1589             }
1590         }
1591     } else {
1592           struct timespec ts, *pts;
1593 
1594           if (arg3 >= 0) {
1595               /* Convert ms to secs, ns */
1596               ts.tv_sec = arg3 / 1000;
1597               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1598               pts = &ts;
1599           } else {
1600               /* -ve poll() timeout means "infinite" */
1601               pts = NULL;
1602           }
1603           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1604     }
1605 
1606     if (!is_error(ret)) {
1607         for (i = 0; i < nfds; i++) {
1608             target_pfd[i].revents = tswap16(pfd[i].revents);
1609         }
1610     }
1611     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1612     return ret;
1613 }
1614 #endif
1615 
1616 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1617                         int flags, int is_pipe2)
1618 {
1619     int host_pipe[2];
1620     abi_long ret;
1621     ret = pipe2(host_pipe, flags);
1622 
1623     if (is_error(ret))
1624         return get_errno(ret);
1625 
1626     /* Several targets have special calling conventions for the original
1627        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1628     if (!is_pipe2) {
1629 #if defined(TARGET_ALPHA)
1630         cpu_env->ir[IR_A4] = host_pipe[1];
1631         return host_pipe[0];
1632 #elif defined(TARGET_MIPS)
1633         cpu_env->active_tc.gpr[3] = host_pipe[1];
1634         return host_pipe[0];
1635 #elif defined(TARGET_SH4)
1636         cpu_env->gregs[1] = host_pipe[1];
1637         return host_pipe[0];
1638 #elif defined(TARGET_SPARC)
1639         cpu_env->regwptr[1] = host_pipe[1];
1640         return host_pipe[0];
1641 #endif
1642     }
1643 
1644     if (put_user_s32(host_pipe[0], pipedes)
1645         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1646         return -TARGET_EFAULT;
1647     return get_errno(ret);
1648 }
1649 
1650 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1651                                                abi_ulong target_addr,
1652                                                socklen_t len)
1653 {
1654     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1655     sa_family_t sa_family;
1656     struct target_sockaddr *target_saddr;
1657 
1658     if (fd_trans_target_to_host_addr(fd)) {
1659         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1660     }
1661 
1662     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1663     if (!target_saddr)
1664         return -TARGET_EFAULT;
1665 
1666     sa_family = tswap16(target_saddr->sa_family);
1667 
1668     /* Oops. The caller might send a incomplete sun_path; sun_path
1669      * must be terminated by \0 (see the manual page), but
1670      * unfortunately it is quite common to specify sockaddr_un
1671      * length as "strlen(x->sun_path)" while it should be
1672      * "strlen(...) + 1". We'll fix that here if needed.
1673      * Linux kernel has a similar feature.
1674      */
1675 
1676     if (sa_family == AF_UNIX) {
1677         if (len < unix_maxlen && len > 0) {
1678             char *cp = (char*)target_saddr;
1679 
1680             if ( cp[len-1] && !cp[len] )
1681                 len++;
1682         }
1683         if (len > unix_maxlen)
1684             len = unix_maxlen;
1685     }
1686 
1687     memcpy(addr, target_saddr, len);
1688     addr->sa_family = sa_family;
1689     if (sa_family == AF_NETLINK) {
1690         struct sockaddr_nl *nladdr;
1691 
1692         nladdr = (struct sockaddr_nl *)addr;
1693         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1694         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1695     } else if (sa_family == AF_PACKET) {
1696 	struct target_sockaddr_ll *lladdr;
1697 
1698 	lladdr = (struct target_sockaddr_ll *)addr;
1699 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1700 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1701     } else if (sa_family == AF_INET6) {
1702         struct sockaddr_in6 *in6addr;
1703 
1704         in6addr = (struct sockaddr_in6 *)addr;
1705         in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
1706     }
1707     unlock_user(target_saddr, target_addr, 0);
1708 
1709     return 0;
1710 }
1711 
1712 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1713                                                struct sockaddr *addr,
1714                                                socklen_t len)
1715 {
1716     struct target_sockaddr *target_saddr;
1717 
1718     if (len == 0) {
1719         return 0;
1720     }
1721     assert(addr);
1722 
1723     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1724     if (!target_saddr)
1725         return -TARGET_EFAULT;
1726     memcpy(target_saddr, addr, len);
1727     if (len >= offsetof(struct target_sockaddr, sa_family) +
1728         sizeof(target_saddr->sa_family)) {
1729         target_saddr->sa_family = tswap16(addr->sa_family);
1730     }
1731     if (addr->sa_family == AF_NETLINK &&
1732         len >= sizeof(struct target_sockaddr_nl)) {
1733         struct target_sockaddr_nl *target_nl =
1734                (struct target_sockaddr_nl *)target_saddr;
1735         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1736         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1737     } else if (addr->sa_family == AF_PACKET) {
1738         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1739         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1740         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1741     } else if (addr->sa_family == AF_INET6 &&
1742                len >= sizeof(struct target_sockaddr_in6)) {
1743         struct target_sockaddr_in6 *target_in6 =
1744                (struct target_sockaddr_in6 *)target_saddr;
1745         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1746     }
1747     unlock_user(target_saddr, target_addr, len);
1748 
1749     return 0;
1750 }
1751 
1752 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1753                                            struct target_msghdr *target_msgh)
1754 {
1755     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1756     abi_long msg_controllen;
1757     abi_ulong target_cmsg_addr;
1758     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1759     socklen_t space = 0;
1760 
1761     msg_controllen = tswapal(target_msgh->msg_controllen);
1762     if (msg_controllen < sizeof (struct target_cmsghdr))
1763         goto the_end;
1764     target_cmsg_addr = tswapal(target_msgh->msg_control);
1765     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1766     target_cmsg_start = target_cmsg;
1767     if (!target_cmsg)
1768         return -TARGET_EFAULT;
1769 
1770     while (cmsg && target_cmsg) {
1771         void *data = CMSG_DATA(cmsg);
1772         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1773 
1774         int len = tswapal(target_cmsg->cmsg_len)
1775             - sizeof(struct target_cmsghdr);
1776 
1777         space += CMSG_SPACE(len);
1778         if (space > msgh->msg_controllen) {
1779             space -= CMSG_SPACE(len);
1780             /* This is a QEMU bug, since we allocated the payload
1781              * area ourselves (unlike overflow in host-to-target
1782              * conversion, which is just the guest giving us a buffer
1783              * that's too small). It can't happen for the payload types
1784              * we currently support; if it becomes an issue in future
1785              * we would need to improve our allocation strategy to
1786              * something more intelligent than "twice the size of the
1787              * target buffer we're reading from".
1788              */
1789             qemu_log_mask(LOG_UNIMP,
1790                           ("Unsupported ancillary data %d/%d: "
1791                            "unhandled msg size\n"),
1792                           tswap32(target_cmsg->cmsg_level),
1793                           tswap32(target_cmsg->cmsg_type));
1794             break;
1795         }
1796 
1797         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1798             cmsg->cmsg_level = SOL_SOCKET;
1799         } else {
1800             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1801         }
1802         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1803         cmsg->cmsg_len = CMSG_LEN(len);
1804 
1805         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1806             int *fd = (int *)data;
1807             int *target_fd = (int *)target_data;
1808             int i, numfds = len / sizeof(int);
1809 
1810             for (i = 0; i < numfds; i++) {
1811                 __get_user(fd[i], target_fd + i);
1812             }
1813         } else if (cmsg->cmsg_level == SOL_SOCKET
1814                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1815             struct ucred *cred = (struct ucred *)data;
1816             struct target_ucred *target_cred =
1817                 (struct target_ucred *)target_data;
1818 
1819             __get_user(cred->pid, &target_cred->pid);
1820             __get_user(cred->uid, &target_cred->uid);
1821             __get_user(cred->gid, &target_cred->gid);
1822         } else if (cmsg->cmsg_level == SOL_ALG) {
1823             uint32_t *dst = (uint32_t *)data;
1824 
1825             memcpy(dst, target_data, len);
1826             /* fix endianness of first 32-bit word */
1827             if (len >= sizeof(uint32_t)) {
1828                 *dst = tswap32(*dst);
1829             }
1830         } else {
1831             qemu_log_mask(LOG_UNIMP, "Unsupported target ancillary data: %d/%d\n",
1832                           cmsg->cmsg_level, cmsg->cmsg_type);
1833             memcpy(data, target_data, len);
1834         }
1835 
1836         cmsg = CMSG_NXTHDR(msgh, cmsg);
1837         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1838                                          target_cmsg_start);
1839     }
1840     unlock_user(target_cmsg, target_cmsg_addr, 0);
1841  the_end:
1842     msgh->msg_controllen = space;
1843     return 0;
1844 }
1845 
1846 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1847                                            struct msghdr *msgh)
1848 {
1849     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1850     abi_long msg_controllen;
1851     abi_ulong target_cmsg_addr;
1852     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1853     socklen_t space = 0;
1854 
1855     msg_controllen = tswapal(target_msgh->msg_controllen);
1856     if (msg_controllen < sizeof (struct target_cmsghdr))
1857         goto the_end;
1858     target_cmsg_addr = tswapal(target_msgh->msg_control);
1859     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1860     target_cmsg_start = target_cmsg;
1861     if (!target_cmsg)
1862         return -TARGET_EFAULT;
1863 
1864     while (cmsg && target_cmsg) {
1865         void *data = CMSG_DATA(cmsg);
1866         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1867 
1868         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1869         int tgt_len, tgt_space;
1870 
1871         /* We never copy a half-header but may copy half-data;
1872          * this is Linux's behaviour in put_cmsg(). Note that
1873          * truncation here is a guest problem (which we report
1874          * to the guest via the CTRUNC bit), unlike truncation
1875          * in target_to_host_cmsg, which is a QEMU bug.
1876          */
1877         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1878             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1879             break;
1880         }
1881 
1882         if (cmsg->cmsg_level == SOL_SOCKET) {
1883             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1884         } else {
1885             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1886         }
1887         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1888 
1889         /* Payload types which need a different size of payload on
1890          * the target must adjust tgt_len here.
1891          */
1892         tgt_len = len;
1893         switch (cmsg->cmsg_level) {
1894         case SOL_SOCKET:
1895             switch (cmsg->cmsg_type) {
1896             case SO_TIMESTAMP:
1897                 tgt_len = sizeof(struct target_timeval);
1898                 break;
1899             default:
1900                 break;
1901             }
1902             break;
1903         default:
1904             break;
1905         }
1906 
1907         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1908             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1909             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1910         }
1911 
1912         /* We must now copy-and-convert len bytes of payload
1913          * into tgt_len bytes of destination space. Bear in mind
1914          * that in both source and destination we may be dealing
1915          * with a truncated value!
1916          */
1917         switch (cmsg->cmsg_level) {
1918         case SOL_SOCKET:
1919             switch (cmsg->cmsg_type) {
1920             case SCM_RIGHTS:
1921             {
1922                 int *fd = (int *)data;
1923                 int *target_fd = (int *)target_data;
1924                 int i, numfds = tgt_len / sizeof(int);
1925 
1926                 for (i = 0; i < numfds; i++) {
1927                     __put_user(fd[i], target_fd + i);
1928                 }
1929                 break;
1930             }
1931             case SO_TIMESTAMP:
1932             {
1933                 struct timeval *tv = (struct timeval *)data;
1934                 struct target_timeval *target_tv =
1935                     (struct target_timeval *)target_data;
1936 
1937                 if (len != sizeof(struct timeval) ||
1938                     tgt_len != sizeof(struct target_timeval)) {
1939                     goto unimplemented;
1940                 }
1941 
1942                 /* copy struct timeval to target */
1943                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1944                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1945                 break;
1946             }
1947             case SCM_CREDENTIALS:
1948             {
1949                 struct ucred *cred = (struct ucred *)data;
1950                 struct target_ucred *target_cred =
1951                     (struct target_ucred *)target_data;
1952 
1953                 __put_user(cred->pid, &target_cred->pid);
1954                 __put_user(cred->uid, &target_cred->uid);
1955                 __put_user(cred->gid, &target_cred->gid);
1956                 break;
1957             }
1958             default:
1959                 goto unimplemented;
1960             }
1961             break;
1962 
1963         case SOL_IP:
1964             switch (cmsg->cmsg_type) {
1965             case IP_TTL:
1966             {
1967                 uint32_t *v = (uint32_t *)data;
1968                 uint32_t *t_int = (uint32_t *)target_data;
1969 
1970                 if (len != sizeof(uint32_t) ||
1971                     tgt_len != sizeof(uint32_t)) {
1972                     goto unimplemented;
1973                 }
1974                 __put_user(*v, t_int);
1975                 break;
1976             }
1977             case IP_RECVERR:
1978             {
1979                 struct errhdr_t {
1980                    struct sock_extended_err ee;
1981                    struct sockaddr_in offender;
1982                 };
1983                 struct errhdr_t *errh = (struct errhdr_t *)data;
1984                 struct errhdr_t *target_errh =
1985                     (struct errhdr_t *)target_data;
1986 
1987                 if (len != sizeof(struct errhdr_t) ||
1988                     tgt_len != sizeof(struct errhdr_t)) {
1989                     goto unimplemented;
1990                 }
1991                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1992                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1993                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1994                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1995                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1996                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1997                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1998                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1999                     (void *) &errh->offender, sizeof(errh->offender));
2000                 break;
2001             }
2002             case IP_PKTINFO:
2003             {
2004                 struct in_pktinfo *pkti = data;
2005                 struct target_in_pktinfo *target_pi = target_data;
2006 
2007                 __put_user(pkti->ipi_ifindex, &target_pi->ipi_ifindex);
2008                 target_pi->ipi_spec_dst.s_addr = pkti->ipi_spec_dst.s_addr;
2009                 target_pi->ipi_addr.s_addr = pkti->ipi_addr.s_addr;
2010                 break;
2011             }
2012             default:
2013                 goto unimplemented;
2014             }
2015             break;
2016 
2017         case SOL_IPV6:
2018             switch (cmsg->cmsg_type) {
2019             case IPV6_HOPLIMIT:
2020             {
2021                 uint32_t *v = (uint32_t *)data;
2022                 uint32_t *t_int = (uint32_t *)target_data;
2023 
2024                 if (len != sizeof(uint32_t) ||
2025                     tgt_len != sizeof(uint32_t)) {
2026                     goto unimplemented;
2027                 }
2028                 __put_user(*v, t_int);
2029                 break;
2030             }
2031             case IPV6_RECVERR:
2032             {
2033                 struct errhdr6_t {
2034                    struct sock_extended_err ee;
2035                    struct sockaddr_in6 offender;
2036                 };
2037                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2038                 struct errhdr6_t *target_errh =
2039                     (struct errhdr6_t *)target_data;
2040 
2041                 if (len != sizeof(struct errhdr6_t) ||
2042                     tgt_len != sizeof(struct errhdr6_t)) {
2043                     goto unimplemented;
2044                 }
2045                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2046                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2047                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2048                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2049                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2050                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2051                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2052                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2053                     (void *) &errh->offender, sizeof(errh->offender));
2054                 break;
2055             }
2056             default:
2057                 goto unimplemented;
2058             }
2059             break;
2060 
2061         default:
2062         unimplemented:
2063             qemu_log_mask(LOG_UNIMP, "Unsupported host ancillary data: %d/%d\n",
2064                           cmsg->cmsg_level, cmsg->cmsg_type);
2065             memcpy(target_data, data, MIN(len, tgt_len));
2066             if (tgt_len > len) {
2067                 memset(target_data + len, 0, tgt_len - len);
2068             }
2069         }
2070 
2071         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2072         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2073         if (msg_controllen < tgt_space) {
2074             tgt_space = msg_controllen;
2075         }
2076         msg_controllen -= tgt_space;
2077         space += tgt_space;
2078         cmsg = CMSG_NXTHDR(msgh, cmsg);
2079         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2080                                          target_cmsg_start);
2081     }
2082     unlock_user(target_cmsg, target_cmsg_addr, space);
2083  the_end:
2084     target_msgh->msg_controllen = tswapal(space);
2085     return 0;
2086 }
2087 
2088 /* do_setsockopt() Must return target values and target errnos. */
2089 static abi_long do_setsockopt(int sockfd, int level, int optname,
2090                               abi_ulong optval_addr, socklen_t optlen)
2091 {
2092     abi_long ret;
2093     int val;
2094 
2095     switch(level) {
2096     case SOL_TCP:
2097     case SOL_UDP:
2098         /* TCP and UDP options all take an 'int' value.  */
2099         if (optlen < sizeof(uint32_t))
2100             return -TARGET_EINVAL;
2101 
2102         if (get_user_u32(val, optval_addr))
2103             return -TARGET_EFAULT;
2104         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2105         break;
2106     case SOL_IP:
2107         switch(optname) {
2108         case IP_TOS:
2109         case IP_TTL:
2110         case IP_HDRINCL:
2111         case IP_ROUTER_ALERT:
2112         case IP_RECVOPTS:
2113         case IP_RETOPTS:
2114         case IP_PKTINFO:
2115         case IP_MTU_DISCOVER:
2116         case IP_RECVERR:
2117         case IP_RECVTTL:
2118         case IP_RECVTOS:
2119 #ifdef IP_FREEBIND
2120         case IP_FREEBIND:
2121 #endif
2122         case IP_MULTICAST_TTL:
2123         case IP_MULTICAST_LOOP:
2124             val = 0;
2125             if (optlen >= sizeof(uint32_t)) {
2126                 if (get_user_u32(val, optval_addr))
2127                     return -TARGET_EFAULT;
2128             } else if (optlen >= 1) {
2129                 if (get_user_u8(val, optval_addr))
2130                     return -TARGET_EFAULT;
2131             }
2132             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2133             break;
2134         case IP_MULTICAST_IF:
2135         case IP_ADD_MEMBERSHIP:
2136         case IP_DROP_MEMBERSHIP:
2137         {
2138             struct ip_mreqn ip_mreq;
2139             struct target_ip_mreqn *target_smreqn;
2140             int min_size;
2141 
2142             QEMU_BUILD_BUG_ON(sizeof(struct ip_mreq) !=
2143                               sizeof(struct target_ip_mreq));
2144 
2145             if (optname == IP_MULTICAST_IF) {
2146                 min_size = sizeof(struct in_addr);
2147             } else {
2148                 min_size = sizeof(struct target_ip_mreq);
2149             }
2150             if (optlen < min_size ||
2151                 optlen > sizeof (struct target_ip_mreqn)) {
2152                 return -TARGET_EINVAL;
2153             }
2154 
2155             target_smreqn = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2156             if (!target_smreqn) {
2157                 return -TARGET_EFAULT;
2158             }
2159             ip_mreq.imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
2160             if (optlen >= sizeof(struct target_ip_mreq)) {
2161                 ip_mreq.imr_address.s_addr = target_smreqn->imr_address.s_addr;
2162                 if (optlen >= sizeof(struct target_ip_mreqn)) {
2163                     __put_user(target_smreqn->imr_ifindex, &ip_mreq.imr_ifindex);
2164                     optlen = sizeof(struct ip_mreqn);
2165                 }
2166             }
2167             unlock_user(target_smreqn, optval_addr, 0);
2168             ret = get_errno(setsockopt(sockfd, level, optname, &ip_mreq, optlen));
2169             break;
2170         }
2171         case IP_BLOCK_SOURCE:
2172         case IP_UNBLOCK_SOURCE:
2173         case IP_ADD_SOURCE_MEMBERSHIP:
2174         case IP_DROP_SOURCE_MEMBERSHIP:
2175         {
2176             struct ip_mreq_source *ip_mreq_source;
2177 
2178             if (optlen != sizeof (struct target_ip_mreq_source))
2179                 return -TARGET_EINVAL;
2180 
2181             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2182             if (!ip_mreq_source) {
2183                 return -TARGET_EFAULT;
2184             }
2185             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2186             unlock_user (ip_mreq_source, optval_addr, 0);
2187             break;
2188         }
2189         default:
2190             goto unimplemented;
2191         }
2192         break;
2193     case SOL_IPV6:
2194         switch (optname) {
2195         case IPV6_MTU_DISCOVER:
2196         case IPV6_MTU:
2197         case IPV6_V6ONLY:
2198         case IPV6_RECVPKTINFO:
2199         case IPV6_UNICAST_HOPS:
2200         case IPV6_MULTICAST_HOPS:
2201         case IPV6_MULTICAST_LOOP:
2202         case IPV6_RECVERR:
2203         case IPV6_RECVHOPLIMIT:
2204         case IPV6_2292HOPLIMIT:
2205         case IPV6_CHECKSUM:
2206         case IPV6_ADDRFORM:
2207         case IPV6_2292PKTINFO:
2208         case IPV6_RECVTCLASS:
2209         case IPV6_RECVRTHDR:
2210         case IPV6_2292RTHDR:
2211         case IPV6_RECVHOPOPTS:
2212         case IPV6_2292HOPOPTS:
2213         case IPV6_RECVDSTOPTS:
2214         case IPV6_2292DSTOPTS:
2215         case IPV6_TCLASS:
2216         case IPV6_ADDR_PREFERENCES:
2217 #ifdef IPV6_RECVPATHMTU
2218         case IPV6_RECVPATHMTU:
2219 #endif
2220 #ifdef IPV6_TRANSPARENT
2221         case IPV6_TRANSPARENT:
2222 #endif
2223 #ifdef IPV6_FREEBIND
2224         case IPV6_FREEBIND:
2225 #endif
2226 #ifdef IPV6_RECVORIGDSTADDR
2227         case IPV6_RECVORIGDSTADDR:
2228 #endif
2229             val = 0;
2230             if (optlen < sizeof(uint32_t)) {
2231                 return -TARGET_EINVAL;
2232             }
2233             if (get_user_u32(val, optval_addr)) {
2234                 return -TARGET_EFAULT;
2235             }
2236             ret = get_errno(setsockopt(sockfd, level, optname,
2237                                        &val, sizeof(val)));
2238             break;
2239         case IPV6_PKTINFO:
2240         {
2241             struct in6_pktinfo pki;
2242 
2243             if (optlen < sizeof(pki)) {
2244                 return -TARGET_EINVAL;
2245             }
2246 
2247             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2248                 return -TARGET_EFAULT;
2249             }
2250 
2251             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2252 
2253             ret = get_errno(setsockopt(sockfd, level, optname,
2254                                        &pki, sizeof(pki)));
2255             break;
2256         }
2257         case IPV6_ADD_MEMBERSHIP:
2258         case IPV6_DROP_MEMBERSHIP:
2259         {
2260             struct ipv6_mreq ipv6mreq;
2261 
2262             if (optlen < sizeof(ipv6mreq)) {
2263                 return -TARGET_EINVAL;
2264             }
2265 
2266             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2267                 return -TARGET_EFAULT;
2268             }
2269 
2270             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2271 
2272             ret = get_errno(setsockopt(sockfd, level, optname,
2273                                        &ipv6mreq, sizeof(ipv6mreq)));
2274             break;
2275         }
2276         default:
2277             goto unimplemented;
2278         }
2279         break;
2280     case SOL_ICMPV6:
2281         switch (optname) {
2282         case ICMPV6_FILTER:
2283         {
2284             struct icmp6_filter icmp6f;
2285 
2286             if (optlen > sizeof(icmp6f)) {
2287                 optlen = sizeof(icmp6f);
2288             }
2289 
2290             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2291                 return -TARGET_EFAULT;
2292             }
2293 
2294             for (val = 0; val < 8; val++) {
2295                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2296             }
2297 
2298             ret = get_errno(setsockopt(sockfd, level, optname,
2299                                        &icmp6f, optlen));
2300             break;
2301         }
2302         default:
2303             goto unimplemented;
2304         }
2305         break;
2306     case SOL_RAW:
2307         switch (optname) {
2308         case ICMP_FILTER:
2309         case IPV6_CHECKSUM:
2310             /* those take an u32 value */
2311             if (optlen < sizeof(uint32_t)) {
2312                 return -TARGET_EINVAL;
2313             }
2314 
2315             if (get_user_u32(val, optval_addr)) {
2316                 return -TARGET_EFAULT;
2317             }
2318             ret = get_errno(setsockopt(sockfd, level, optname,
2319                                        &val, sizeof(val)));
2320             break;
2321 
2322         default:
2323             goto unimplemented;
2324         }
2325         break;
2326 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2327     case SOL_ALG:
2328         switch (optname) {
2329         case ALG_SET_KEY:
2330         {
2331             char *alg_key = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2332             if (!alg_key) {
2333                 return -TARGET_EFAULT;
2334             }
2335             ret = get_errno(setsockopt(sockfd, level, optname,
2336                                        alg_key, optlen));
2337             unlock_user(alg_key, optval_addr, optlen);
2338             break;
2339         }
2340         case ALG_SET_AEAD_AUTHSIZE:
2341         {
2342             ret = get_errno(setsockopt(sockfd, level, optname,
2343                                        NULL, optlen));
2344             break;
2345         }
2346         default:
2347             goto unimplemented;
2348         }
2349         break;
2350 #endif
2351     case TARGET_SOL_SOCKET:
2352         switch (optname) {
2353         case TARGET_SO_RCVTIMEO:
2354         case TARGET_SO_SNDTIMEO:
2355         {
2356                 struct timeval tv;
2357 
2358                 if (optlen != sizeof(struct target_timeval)) {
2359                     return -TARGET_EINVAL;
2360                 }
2361 
2362                 if (copy_from_user_timeval(&tv, optval_addr)) {
2363                     return -TARGET_EFAULT;
2364                 }
2365 
2366                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2367                                 optname == TARGET_SO_RCVTIMEO ?
2368                                     SO_RCVTIMEO : SO_SNDTIMEO,
2369                                 &tv, sizeof(tv)));
2370                 return ret;
2371         }
2372         case TARGET_SO_ATTACH_FILTER:
2373         {
2374                 struct target_sock_fprog *tfprog;
2375                 struct target_sock_filter *tfilter;
2376                 struct sock_fprog fprog;
2377                 struct sock_filter *filter;
2378                 int i;
2379 
2380                 if (optlen != sizeof(*tfprog)) {
2381                     return -TARGET_EINVAL;
2382                 }
2383                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2384                     return -TARGET_EFAULT;
2385                 }
2386                 if (!lock_user_struct(VERIFY_READ, tfilter,
2387                                       tswapal(tfprog->filter), 0)) {
2388                     unlock_user_struct(tfprog, optval_addr, 1);
2389                     return -TARGET_EFAULT;
2390                 }
2391 
2392                 fprog.len = tswap16(tfprog->len);
2393                 filter = g_try_new(struct sock_filter, fprog.len);
2394                 if (filter == NULL) {
2395                     unlock_user_struct(tfilter, tfprog->filter, 1);
2396                     unlock_user_struct(tfprog, optval_addr, 1);
2397                     return -TARGET_ENOMEM;
2398                 }
2399                 for (i = 0; i < fprog.len; i++) {
2400                     filter[i].code = tswap16(tfilter[i].code);
2401                     filter[i].jt = tfilter[i].jt;
2402                     filter[i].jf = tfilter[i].jf;
2403                     filter[i].k = tswap32(tfilter[i].k);
2404                 }
2405                 fprog.filter = filter;
2406 
2407                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2408                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2409                 g_free(filter);
2410 
2411                 unlock_user_struct(tfilter, tfprog->filter, 1);
2412                 unlock_user_struct(tfprog, optval_addr, 1);
2413                 return ret;
2414         }
2415 	case TARGET_SO_BINDTODEVICE:
2416 	{
2417 		char *dev_ifname, *addr_ifname;
2418 
2419 		if (optlen > IFNAMSIZ - 1) {
2420 		    optlen = IFNAMSIZ - 1;
2421 		}
2422 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2423 		if (!dev_ifname) {
2424 		    return -TARGET_EFAULT;
2425 		}
2426 		optname = SO_BINDTODEVICE;
2427 		addr_ifname = alloca(IFNAMSIZ);
2428 		memcpy(addr_ifname, dev_ifname, optlen);
2429 		addr_ifname[optlen] = 0;
2430 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2431                                            addr_ifname, optlen));
2432 		unlock_user (dev_ifname, optval_addr, 0);
2433 		return ret;
2434 	}
2435         case TARGET_SO_LINGER:
2436         {
2437                 struct linger lg;
2438                 struct target_linger *tlg;
2439 
2440                 if (optlen != sizeof(struct target_linger)) {
2441                     return -TARGET_EINVAL;
2442                 }
2443                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2444                     return -TARGET_EFAULT;
2445                 }
2446                 __get_user(lg.l_onoff, &tlg->l_onoff);
2447                 __get_user(lg.l_linger, &tlg->l_linger);
2448                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2449                                 &lg, sizeof(lg)));
2450                 unlock_user_struct(tlg, optval_addr, 0);
2451                 return ret;
2452         }
2453             /* Options with 'int' argument.  */
2454         case TARGET_SO_DEBUG:
2455 		optname = SO_DEBUG;
2456 		break;
2457         case TARGET_SO_REUSEADDR:
2458 		optname = SO_REUSEADDR;
2459 		break;
2460 #ifdef SO_REUSEPORT
2461         case TARGET_SO_REUSEPORT:
2462                 optname = SO_REUSEPORT;
2463                 break;
2464 #endif
2465         case TARGET_SO_TYPE:
2466 		optname = SO_TYPE;
2467 		break;
2468         case TARGET_SO_ERROR:
2469 		optname = SO_ERROR;
2470 		break;
2471         case TARGET_SO_DONTROUTE:
2472 		optname = SO_DONTROUTE;
2473 		break;
2474         case TARGET_SO_BROADCAST:
2475 		optname = SO_BROADCAST;
2476 		break;
2477         case TARGET_SO_SNDBUF:
2478 		optname = SO_SNDBUF;
2479 		break;
2480         case TARGET_SO_SNDBUFFORCE:
2481                 optname = SO_SNDBUFFORCE;
2482                 break;
2483         case TARGET_SO_RCVBUF:
2484 		optname = SO_RCVBUF;
2485 		break;
2486         case TARGET_SO_RCVBUFFORCE:
2487                 optname = SO_RCVBUFFORCE;
2488                 break;
2489         case TARGET_SO_KEEPALIVE:
2490 		optname = SO_KEEPALIVE;
2491 		break;
2492         case TARGET_SO_OOBINLINE:
2493 		optname = SO_OOBINLINE;
2494 		break;
2495         case TARGET_SO_NO_CHECK:
2496 		optname = SO_NO_CHECK;
2497 		break;
2498         case TARGET_SO_PRIORITY:
2499 		optname = SO_PRIORITY;
2500 		break;
2501 #ifdef SO_BSDCOMPAT
2502         case TARGET_SO_BSDCOMPAT:
2503 		optname = SO_BSDCOMPAT;
2504 		break;
2505 #endif
2506         case TARGET_SO_PASSCRED:
2507 		optname = SO_PASSCRED;
2508 		break;
2509         case TARGET_SO_PASSSEC:
2510                 optname = SO_PASSSEC;
2511                 break;
2512         case TARGET_SO_TIMESTAMP:
2513 		optname = SO_TIMESTAMP;
2514 		break;
2515         case TARGET_SO_RCVLOWAT:
2516 		optname = SO_RCVLOWAT;
2517 		break;
2518         default:
2519             goto unimplemented;
2520         }
2521 	if (optlen < sizeof(uint32_t))
2522             return -TARGET_EINVAL;
2523 
2524 	if (get_user_u32(val, optval_addr))
2525             return -TARGET_EFAULT;
2526 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2527         break;
2528 #ifdef SOL_NETLINK
2529     case SOL_NETLINK:
2530         switch (optname) {
2531         case NETLINK_PKTINFO:
2532         case NETLINK_ADD_MEMBERSHIP:
2533         case NETLINK_DROP_MEMBERSHIP:
2534         case NETLINK_BROADCAST_ERROR:
2535         case NETLINK_NO_ENOBUFS:
2536 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2537         case NETLINK_LISTEN_ALL_NSID:
2538         case NETLINK_CAP_ACK:
2539 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2540 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2541         case NETLINK_EXT_ACK:
2542 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2543 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2544         case NETLINK_GET_STRICT_CHK:
2545 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2546             break;
2547         default:
2548             goto unimplemented;
2549         }
2550         val = 0;
2551         if (optlen < sizeof(uint32_t)) {
2552             return -TARGET_EINVAL;
2553         }
2554         if (get_user_u32(val, optval_addr)) {
2555             return -TARGET_EFAULT;
2556         }
2557         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2558                                    sizeof(val)));
2559         break;
2560 #endif /* SOL_NETLINK */
2561     default:
2562     unimplemented:
2563         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2564                       level, optname);
2565         ret = -TARGET_ENOPROTOOPT;
2566     }
2567     return ret;
2568 }
2569 
2570 /* do_getsockopt() Must return target values and target errnos. */
2571 static abi_long do_getsockopt(int sockfd, int level, int optname,
2572                               abi_ulong optval_addr, abi_ulong optlen)
2573 {
2574     abi_long ret;
2575     int len, val;
2576     socklen_t lv;
2577 
2578     switch(level) {
2579     case TARGET_SOL_SOCKET:
2580         level = SOL_SOCKET;
2581         switch (optname) {
2582         /* These don't just return a single integer */
2583         case TARGET_SO_PEERNAME:
2584             goto unimplemented;
2585         case TARGET_SO_RCVTIMEO: {
2586             struct timeval tv;
2587             socklen_t tvlen;
2588 
2589             optname = SO_RCVTIMEO;
2590 
2591 get_timeout:
2592             if (get_user_u32(len, optlen)) {
2593                 return -TARGET_EFAULT;
2594             }
2595             if (len < 0) {
2596                 return -TARGET_EINVAL;
2597             }
2598 
2599             tvlen = sizeof(tv);
2600             ret = get_errno(getsockopt(sockfd, level, optname,
2601                                        &tv, &tvlen));
2602             if (ret < 0) {
2603                 return ret;
2604             }
2605             if (len > sizeof(struct target_timeval)) {
2606                 len = sizeof(struct target_timeval);
2607             }
2608             if (copy_to_user_timeval(optval_addr, &tv)) {
2609                 return -TARGET_EFAULT;
2610             }
2611             if (put_user_u32(len, optlen)) {
2612                 return -TARGET_EFAULT;
2613             }
2614             break;
2615         }
2616         case TARGET_SO_SNDTIMEO:
2617             optname = SO_SNDTIMEO;
2618             goto get_timeout;
2619         case TARGET_SO_PEERCRED: {
2620             struct ucred cr;
2621             socklen_t crlen;
2622             struct target_ucred *tcr;
2623 
2624             if (get_user_u32(len, optlen)) {
2625                 return -TARGET_EFAULT;
2626             }
2627             if (len < 0) {
2628                 return -TARGET_EINVAL;
2629             }
2630 
2631             crlen = sizeof(cr);
2632             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2633                                        &cr, &crlen));
2634             if (ret < 0) {
2635                 return ret;
2636             }
2637             if (len > crlen) {
2638                 len = crlen;
2639             }
2640             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2641                 return -TARGET_EFAULT;
2642             }
2643             __put_user(cr.pid, &tcr->pid);
2644             __put_user(cr.uid, &tcr->uid);
2645             __put_user(cr.gid, &tcr->gid);
2646             unlock_user_struct(tcr, optval_addr, 1);
2647             if (put_user_u32(len, optlen)) {
2648                 return -TARGET_EFAULT;
2649             }
2650             break;
2651         }
2652         case TARGET_SO_PEERSEC: {
2653             char *name;
2654 
2655             if (get_user_u32(len, optlen)) {
2656                 return -TARGET_EFAULT;
2657             }
2658             if (len < 0) {
2659                 return -TARGET_EINVAL;
2660             }
2661             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2662             if (!name) {
2663                 return -TARGET_EFAULT;
2664             }
2665             lv = len;
2666             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2667                                        name, &lv));
2668             if (put_user_u32(lv, optlen)) {
2669                 ret = -TARGET_EFAULT;
2670             }
2671             unlock_user(name, optval_addr, lv);
2672             break;
2673         }
2674         case TARGET_SO_LINGER:
2675         {
2676             struct linger lg;
2677             socklen_t lglen;
2678             struct target_linger *tlg;
2679 
2680             if (get_user_u32(len, optlen)) {
2681                 return -TARGET_EFAULT;
2682             }
2683             if (len < 0) {
2684                 return -TARGET_EINVAL;
2685             }
2686 
2687             lglen = sizeof(lg);
2688             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2689                                        &lg, &lglen));
2690             if (ret < 0) {
2691                 return ret;
2692             }
2693             if (len > lglen) {
2694                 len = lglen;
2695             }
2696             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2697                 return -TARGET_EFAULT;
2698             }
2699             __put_user(lg.l_onoff, &tlg->l_onoff);
2700             __put_user(lg.l_linger, &tlg->l_linger);
2701             unlock_user_struct(tlg, optval_addr, 1);
2702             if (put_user_u32(len, optlen)) {
2703                 return -TARGET_EFAULT;
2704             }
2705             break;
2706         }
2707         /* Options with 'int' argument.  */
2708         case TARGET_SO_DEBUG:
2709             optname = SO_DEBUG;
2710             goto int_case;
2711         case TARGET_SO_REUSEADDR:
2712             optname = SO_REUSEADDR;
2713             goto int_case;
2714 #ifdef SO_REUSEPORT
2715         case TARGET_SO_REUSEPORT:
2716             optname = SO_REUSEPORT;
2717             goto int_case;
2718 #endif
2719         case TARGET_SO_TYPE:
2720             optname = SO_TYPE;
2721             goto int_case;
2722         case TARGET_SO_ERROR:
2723             optname = SO_ERROR;
2724             goto int_case;
2725         case TARGET_SO_DONTROUTE:
2726             optname = SO_DONTROUTE;
2727             goto int_case;
2728         case TARGET_SO_BROADCAST:
2729             optname = SO_BROADCAST;
2730             goto int_case;
2731         case TARGET_SO_SNDBUF:
2732             optname = SO_SNDBUF;
2733             goto int_case;
2734         case TARGET_SO_RCVBUF:
2735             optname = SO_RCVBUF;
2736             goto int_case;
2737         case TARGET_SO_KEEPALIVE:
2738             optname = SO_KEEPALIVE;
2739             goto int_case;
2740         case TARGET_SO_OOBINLINE:
2741             optname = SO_OOBINLINE;
2742             goto int_case;
2743         case TARGET_SO_NO_CHECK:
2744             optname = SO_NO_CHECK;
2745             goto int_case;
2746         case TARGET_SO_PRIORITY:
2747             optname = SO_PRIORITY;
2748             goto int_case;
2749 #ifdef SO_BSDCOMPAT
2750         case TARGET_SO_BSDCOMPAT:
2751             optname = SO_BSDCOMPAT;
2752             goto int_case;
2753 #endif
2754         case TARGET_SO_PASSCRED:
2755             optname = SO_PASSCRED;
2756             goto int_case;
2757         case TARGET_SO_TIMESTAMP:
2758             optname = SO_TIMESTAMP;
2759             goto int_case;
2760         case TARGET_SO_RCVLOWAT:
2761             optname = SO_RCVLOWAT;
2762             goto int_case;
2763         case TARGET_SO_ACCEPTCONN:
2764             optname = SO_ACCEPTCONN;
2765             goto int_case;
2766         case TARGET_SO_PROTOCOL:
2767             optname = SO_PROTOCOL;
2768             goto int_case;
2769         case TARGET_SO_DOMAIN:
2770             optname = SO_DOMAIN;
2771             goto int_case;
2772         default:
2773             goto int_case;
2774         }
2775         break;
2776     case SOL_TCP:
2777     case SOL_UDP:
2778         /* TCP and UDP options all take an 'int' value.  */
2779     int_case:
2780         if (get_user_u32(len, optlen))
2781             return -TARGET_EFAULT;
2782         if (len < 0)
2783             return -TARGET_EINVAL;
2784         lv = sizeof(lv);
2785         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2786         if (ret < 0)
2787             return ret;
2788         switch (optname) {
2789         case SO_TYPE:
2790             val = host_to_target_sock_type(val);
2791             break;
2792         case SO_ERROR:
2793             val = host_to_target_errno(val);
2794             break;
2795         }
2796         if (len > lv)
2797             len = lv;
2798         if (len == 4) {
2799             if (put_user_u32(val, optval_addr))
2800                 return -TARGET_EFAULT;
2801         } else {
2802             if (put_user_u8(val, optval_addr))
2803                 return -TARGET_EFAULT;
2804         }
2805         if (put_user_u32(len, optlen))
2806             return -TARGET_EFAULT;
2807         break;
2808     case SOL_IP:
2809         switch(optname) {
2810         case IP_TOS:
2811         case IP_TTL:
2812         case IP_HDRINCL:
2813         case IP_ROUTER_ALERT:
2814         case IP_RECVOPTS:
2815         case IP_RETOPTS:
2816         case IP_PKTINFO:
2817         case IP_MTU_DISCOVER:
2818         case IP_RECVERR:
2819         case IP_RECVTOS:
2820 #ifdef IP_FREEBIND
2821         case IP_FREEBIND:
2822 #endif
2823         case IP_MULTICAST_TTL:
2824         case IP_MULTICAST_LOOP:
2825             if (get_user_u32(len, optlen))
2826                 return -TARGET_EFAULT;
2827             if (len < 0)
2828                 return -TARGET_EINVAL;
2829             lv = sizeof(lv);
2830             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2831             if (ret < 0)
2832                 return ret;
2833             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2834                 len = 1;
2835                 if (put_user_u32(len, optlen)
2836                     || put_user_u8(val, optval_addr))
2837                     return -TARGET_EFAULT;
2838             } else {
2839                 if (len > sizeof(int))
2840                     len = sizeof(int);
2841                 if (put_user_u32(len, optlen)
2842                     || put_user_u32(val, optval_addr))
2843                     return -TARGET_EFAULT;
2844             }
2845             break;
2846         default:
2847             ret = -TARGET_ENOPROTOOPT;
2848             break;
2849         }
2850         break;
2851     case SOL_IPV6:
2852         switch (optname) {
2853         case IPV6_MTU_DISCOVER:
2854         case IPV6_MTU:
2855         case IPV6_V6ONLY:
2856         case IPV6_RECVPKTINFO:
2857         case IPV6_UNICAST_HOPS:
2858         case IPV6_MULTICAST_HOPS:
2859         case IPV6_MULTICAST_LOOP:
2860         case IPV6_RECVERR:
2861         case IPV6_RECVHOPLIMIT:
2862         case IPV6_2292HOPLIMIT:
2863         case IPV6_CHECKSUM:
2864         case IPV6_ADDRFORM:
2865         case IPV6_2292PKTINFO:
2866         case IPV6_RECVTCLASS:
2867         case IPV6_RECVRTHDR:
2868         case IPV6_2292RTHDR:
2869         case IPV6_RECVHOPOPTS:
2870         case IPV6_2292HOPOPTS:
2871         case IPV6_RECVDSTOPTS:
2872         case IPV6_2292DSTOPTS:
2873         case IPV6_TCLASS:
2874         case IPV6_ADDR_PREFERENCES:
2875 #ifdef IPV6_RECVPATHMTU
2876         case IPV6_RECVPATHMTU:
2877 #endif
2878 #ifdef IPV6_TRANSPARENT
2879         case IPV6_TRANSPARENT:
2880 #endif
2881 #ifdef IPV6_FREEBIND
2882         case IPV6_FREEBIND:
2883 #endif
2884 #ifdef IPV6_RECVORIGDSTADDR
2885         case IPV6_RECVORIGDSTADDR:
2886 #endif
2887             if (get_user_u32(len, optlen))
2888                 return -TARGET_EFAULT;
2889             if (len < 0)
2890                 return -TARGET_EINVAL;
2891             lv = sizeof(lv);
2892             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2893             if (ret < 0)
2894                 return ret;
2895             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2896                 len = 1;
2897                 if (put_user_u32(len, optlen)
2898                     || put_user_u8(val, optval_addr))
2899                     return -TARGET_EFAULT;
2900             } else {
2901                 if (len > sizeof(int))
2902                     len = sizeof(int);
2903                 if (put_user_u32(len, optlen)
2904                     || put_user_u32(val, optval_addr))
2905                     return -TARGET_EFAULT;
2906             }
2907             break;
2908         default:
2909             ret = -TARGET_ENOPROTOOPT;
2910             break;
2911         }
2912         break;
2913 #ifdef SOL_NETLINK
2914     case SOL_NETLINK:
2915         switch (optname) {
2916         case NETLINK_PKTINFO:
2917         case NETLINK_BROADCAST_ERROR:
2918         case NETLINK_NO_ENOBUFS:
2919 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2920         case NETLINK_LISTEN_ALL_NSID:
2921         case NETLINK_CAP_ACK:
2922 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2923 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2924         case NETLINK_EXT_ACK:
2925 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2926 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2927         case NETLINK_GET_STRICT_CHK:
2928 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2929             if (get_user_u32(len, optlen)) {
2930                 return -TARGET_EFAULT;
2931             }
2932             if (len != sizeof(val)) {
2933                 return -TARGET_EINVAL;
2934             }
2935             lv = len;
2936             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2937             if (ret < 0) {
2938                 return ret;
2939             }
2940             if (put_user_u32(lv, optlen)
2941                 || put_user_u32(val, optval_addr)) {
2942                 return -TARGET_EFAULT;
2943             }
2944             break;
2945 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2946         case NETLINK_LIST_MEMBERSHIPS:
2947         {
2948             uint32_t *results;
2949             int i;
2950             if (get_user_u32(len, optlen)) {
2951                 return -TARGET_EFAULT;
2952             }
2953             if (len < 0) {
2954                 return -TARGET_EINVAL;
2955             }
2956             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2957             if (!results && len > 0) {
2958                 return -TARGET_EFAULT;
2959             }
2960             lv = len;
2961             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2962             if (ret < 0) {
2963                 unlock_user(results, optval_addr, 0);
2964                 return ret;
2965             }
2966             /* swap host endianness to target endianness. */
2967             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2968                 results[i] = tswap32(results[i]);
2969             }
2970             if (put_user_u32(lv, optlen)) {
2971                 return -TARGET_EFAULT;
2972             }
2973             unlock_user(results, optval_addr, 0);
2974             break;
2975         }
2976 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2977         default:
2978             goto unimplemented;
2979         }
2980         break;
2981 #endif /* SOL_NETLINK */
2982     default:
2983     unimplemented:
2984         qemu_log_mask(LOG_UNIMP,
2985                       "getsockopt level=%d optname=%d not yet supported\n",
2986                       level, optname);
2987         ret = -TARGET_EOPNOTSUPP;
2988         break;
2989     }
2990     return ret;
2991 }
2992 
2993 /* Convert target low/high pair representing file offset into the host
2994  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2995  * as the kernel doesn't handle them either.
2996  */
2997 static void target_to_host_low_high(abi_ulong tlow,
2998                                     abi_ulong thigh,
2999                                     unsigned long *hlow,
3000                                     unsigned long *hhigh)
3001 {
3002     uint64_t off = tlow |
3003         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3004         TARGET_LONG_BITS / 2;
3005 
3006     *hlow = off;
3007     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3008 }
3009 
3010 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3011                                 abi_ulong count, int copy)
3012 {
3013     struct target_iovec *target_vec;
3014     struct iovec *vec;
3015     abi_ulong total_len, max_len;
3016     int i;
3017     int err = 0;
3018     bool bad_address = false;
3019 
3020     if (count == 0) {
3021         errno = 0;
3022         return NULL;
3023     }
3024     if (count > IOV_MAX) {
3025         errno = EINVAL;
3026         return NULL;
3027     }
3028 
3029     vec = g_try_new0(struct iovec, count);
3030     if (vec == NULL) {
3031         errno = ENOMEM;
3032         return NULL;
3033     }
3034 
3035     target_vec = lock_user(VERIFY_READ, target_addr,
3036                            count * sizeof(struct target_iovec), 1);
3037     if (target_vec == NULL) {
3038         err = EFAULT;
3039         goto fail2;
3040     }
3041 
3042     /* ??? If host page size > target page size, this will result in a
3043        value larger than what we can actually support.  */
3044     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3045     total_len = 0;
3046 
3047     for (i = 0; i < count; i++) {
3048         abi_ulong base = tswapal(target_vec[i].iov_base);
3049         abi_long len = tswapal(target_vec[i].iov_len);
3050 
3051         if (len < 0) {
3052             err = EINVAL;
3053             goto fail;
3054         } else if (len == 0) {
3055             /* Zero length pointer is ignored.  */
3056             vec[i].iov_base = 0;
3057         } else {
3058             vec[i].iov_base = lock_user(type, base, len, copy);
3059             /* If the first buffer pointer is bad, this is a fault.  But
3060              * subsequent bad buffers will result in a partial write; this
3061              * is realized by filling the vector with null pointers and
3062              * zero lengths. */
3063             if (!vec[i].iov_base) {
3064                 if (i == 0) {
3065                     err = EFAULT;
3066                     goto fail;
3067                 } else {
3068                     bad_address = true;
3069                 }
3070             }
3071             if (bad_address) {
3072                 len = 0;
3073             }
3074             if (len > max_len - total_len) {
3075                 len = max_len - total_len;
3076             }
3077         }
3078         vec[i].iov_len = len;
3079         total_len += len;
3080     }
3081 
3082     unlock_user(target_vec, target_addr, 0);
3083     return vec;
3084 
3085  fail:
3086     while (--i >= 0) {
3087         if (tswapal(target_vec[i].iov_len) > 0) {
3088             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3089         }
3090     }
3091     unlock_user(target_vec, target_addr, 0);
3092  fail2:
3093     g_free(vec);
3094     errno = err;
3095     return NULL;
3096 }
3097 
3098 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3099                          abi_ulong count, int copy)
3100 {
3101     struct target_iovec *target_vec;
3102     int i;
3103 
3104     target_vec = lock_user(VERIFY_READ, target_addr,
3105                            count * sizeof(struct target_iovec), 1);
3106     if (target_vec) {
3107         for (i = 0; i < count; i++) {
3108             abi_ulong base = tswapal(target_vec[i].iov_base);
3109             abi_long len = tswapal(target_vec[i].iov_len);
3110             if (len < 0) {
3111                 break;
3112             }
3113             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3114         }
3115         unlock_user(target_vec, target_addr, 0);
3116     }
3117 
3118     g_free(vec);
3119 }
3120 
3121 static inline int target_to_host_sock_type(int *type)
3122 {
3123     int host_type = 0;
3124     int target_type = *type;
3125 
3126     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3127     case TARGET_SOCK_DGRAM:
3128         host_type = SOCK_DGRAM;
3129         break;
3130     case TARGET_SOCK_STREAM:
3131         host_type = SOCK_STREAM;
3132         break;
3133     default:
3134         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3135         break;
3136     }
3137     if (target_type & TARGET_SOCK_CLOEXEC) {
3138 #if defined(SOCK_CLOEXEC)
3139         host_type |= SOCK_CLOEXEC;
3140 #else
3141         return -TARGET_EINVAL;
3142 #endif
3143     }
3144     if (target_type & TARGET_SOCK_NONBLOCK) {
3145 #if defined(SOCK_NONBLOCK)
3146         host_type |= SOCK_NONBLOCK;
3147 #elif !defined(O_NONBLOCK)
3148         return -TARGET_EINVAL;
3149 #endif
3150     }
3151     *type = host_type;
3152     return 0;
3153 }
3154 
3155 /* Try to emulate socket type flags after socket creation.  */
3156 static int sock_flags_fixup(int fd, int target_type)
3157 {
3158 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3159     if (target_type & TARGET_SOCK_NONBLOCK) {
3160         int flags = fcntl(fd, F_GETFL);
3161         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3162             close(fd);
3163             return -TARGET_EINVAL;
3164         }
3165     }
3166 #endif
3167     return fd;
3168 }
3169 
3170 /* do_socket() Must return target values and target errnos. */
3171 static abi_long do_socket(int domain, int type, int protocol)
3172 {
3173     int target_type = type;
3174     int ret;
3175 
3176     ret = target_to_host_sock_type(&type);
3177     if (ret) {
3178         return ret;
3179     }
3180 
3181     if (domain == PF_NETLINK && !(
3182 #ifdef CONFIG_RTNETLINK
3183          protocol == NETLINK_ROUTE ||
3184 #endif
3185          protocol == NETLINK_KOBJECT_UEVENT ||
3186          protocol == NETLINK_AUDIT)) {
3187         return -TARGET_EPROTONOSUPPORT;
3188     }
3189 
3190     if (domain == AF_PACKET ||
3191         (domain == AF_INET && type == SOCK_PACKET)) {
3192         protocol = tswap16(protocol);
3193     }
3194 
3195     ret = get_errno(socket(domain, type, protocol));
3196     if (ret >= 0) {
3197         ret = sock_flags_fixup(ret, target_type);
3198         if (type == SOCK_PACKET) {
3199             /* Manage an obsolete case :
3200              * if socket type is SOCK_PACKET, bind by name
3201              */
3202             fd_trans_register(ret, &target_packet_trans);
3203         } else if (domain == PF_NETLINK) {
3204             switch (protocol) {
3205 #ifdef CONFIG_RTNETLINK
3206             case NETLINK_ROUTE:
3207                 fd_trans_register(ret, &target_netlink_route_trans);
3208                 break;
3209 #endif
3210             case NETLINK_KOBJECT_UEVENT:
3211                 /* nothing to do: messages are strings */
3212                 break;
3213             case NETLINK_AUDIT:
3214                 fd_trans_register(ret, &target_netlink_audit_trans);
3215                 break;
3216             default:
3217                 g_assert_not_reached();
3218             }
3219         }
3220     }
3221     return ret;
3222 }
3223 
3224 /* do_bind() Must return target values and target errnos. */
3225 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3226                         socklen_t addrlen)
3227 {
3228     void *addr;
3229     abi_long ret;
3230 
3231     if ((int)addrlen < 0) {
3232         return -TARGET_EINVAL;
3233     }
3234 
3235     addr = alloca(addrlen+1);
3236 
3237     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3238     if (ret)
3239         return ret;
3240 
3241     return get_errno(bind(sockfd, addr, addrlen));
3242 }
3243 
3244 /* do_connect() Must return target values and target errnos. */
3245 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3246                            socklen_t addrlen)
3247 {
3248     void *addr;
3249     abi_long ret;
3250 
3251     if ((int)addrlen < 0) {
3252         return -TARGET_EINVAL;
3253     }
3254 
3255     addr = alloca(addrlen+1);
3256 
3257     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3258     if (ret)
3259         return ret;
3260 
3261     return get_errno(safe_connect(sockfd, addr, addrlen));
3262 }
3263 
3264 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3265 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3266                                       int flags, int send)
3267 {
3268     abi_long ret, len;
3269     struct msghdr msg;
3270     abi_ulong count;
3271     struct iovec *vec;
3272     abi_ulong target_vec;
3273 
3274     if (msgp->msg_name) {
3275         msg.msg_namelen = tswap32(msgp->msg_namelen);
3276         msg.msg_name = alloca(msg.msg_namelen+1);
3277         ret = target_to_host_sockaddr(fd, msg.msg_name,
3278                                       tswapal(msgp->msg_name),
3279                                       msg.msg_namelen);
3280         if (ret == -TARGET_EFAULT) {
3281             /* For connected sockets msg_name and msg_namelen must
3282              * be ignored, so returning EFAULT immediately is wrong.
3283              * Instead, pass a bad msg_name to the host kernel, and
3284              * let it decide whether to return EFAULT or not.
3285              */
3286             msg.msg_name = (void *)-1;
3287         } else if (ret) {
3288             goto out2;
3289         }
3290     } else {
3291         msg.msg_name = NULL;
3292         msg.msg_namelen = 0;
3293     }
3294     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3295     msg.msg_control = alloca(msg.msg_controllen);
3296     memset(msg.msg_control, 0, msg.msg_controllen);
3297 
3298     msg.msg_flags = tswap32(msgp->msg_flags);
3299 
3300     count = tswapal(msgp->msg_iovlen);
3301     target_vec = tswapal(msgp->msg_iov);
3302 
3303     if (count > IOV_MAX) {
3304         /* sendrcvmsg returns a different errno for this condition than
3305          * readv/writev, so we must catch it here before lock_iovec() does.
3306          */
3307         ret = -TARGET_EMSGSIZE;
3308         goto out2;
3309     }
3310 
3311     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3312                      target_vec, count, send);
3313     if (vec == NULL) {
3314         ret = -host_to_target_errno(errno);
3315         /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3316         if (!send || ret) {
3317             goto out2;
3318         }
3319     }
3320     msg.msg_iovlen = count;
3321     msg.msg_iov = vec;
3322 
3323     if (send) {
3324         if (fd_trans_target_to_host_data(fd)) {
3325             void *host_msg;
3326 
3327             host_msg = g_malloc(msg.msg_iov->iov_len);
3328             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3329             ret = fd_trans_target_to_host_data(fd)(host_msg,
3330                                                    msg.msg_iov->iov_len);
3331             if (ret >= 0) {
3332                 msg.msg_iov->iov_base = host_msg;
3333                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3334             }
3335             g_free(host_msg);
3336         } else {
3337             ret = target_to_host_cmsg(&msg, msgp);
3338             if (ret == 0) {
3339                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3340             }
3341         }
3342     } else {
3343         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3344         if (!is_error(ret)) {
3345             len = ret;
3346             if (fd_trans_host_to_target_data(fd)) {
3347                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3348                                                MIN(msg.msg_iov->iov_len, len));
3349             }
3350             if (!is_error(ret)) {
3351                 ret = host_to_target_cmsg(msgp, &msg);
3352             }
3353             if (!is_error(ret)) {
3354                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3355                 msgp->msg_flags = tswap32(msg.msg_flags);
3356                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3357                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3358                                     msg.msg_name, msg.msg_namelen);
3359                     if (ret) {
3360                         goto out;
3361                     }
3362                 }
3363 
3364                 ret = len;
3365             }
3366         }
3367     }
3368 
3369 out:
3370     if (vec) {
3371         unlock_iovec(vec, target_vec, count, !send);
3372     }
3373 out2:
3374     return ret;
3375 }
3376 
3377 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3378                                int flags, int send)
3379 {
3380     abi_long ret;
3381     struct target_msghdr *msgp;
3382 
3383     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3384                           msgp,
3385                           target_msg,
3386                           send ? 1 : 0)) {
3387         return -TARGET_EFAULT;
3388     }
3389     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3390     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3391     return ret;
3392 }
3393 
3394 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3395  * so it might not have this *mmsg-specific flag either.
3396  */
3397 #ifndef MSG_WAITFORONE
3398 #define MSG_WAITFORONE 0x10000
3399 #endif
3400 
3401 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3402                                 unsigned int vlen, unsigned int flags,
3403                                 int send)
3404 {
3405     struct target_mmsghdr *mmsgp;
3406     abi_long ret = 0;
3407     int i;
3408 
3409     if (vlen > UIO_MAXIOV) {
3410         vlen = UIO_MAXIOV;
3411     }
3412 
3413     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3414     if (!mmsgp) {
3415         return -TARGET_EFAULT;
3416     }
3417 
3418     for (i = 0; i < vlen; i++) {
3419         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3420         if (is_error(ret)) {
3421             break;
3422         }
3423         mmsgp[i].msg_len = tswap32(ret);
3424         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3425         if (flags & MSG_WAITFORONE) {
3426             flags |= MSG_DONTWAIT;
3427         }
3428     }
3429 
3430     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3431 
3432     /* Return number of datagrams sent if we sent any at all;
3433      * otherwise return the error.
3434      */
3435     if (i) {
3436         return i;
3437     }
3438     return ret;
3439 }
3440 
3441 /* do_accept4() Must return target values and target errnos. */
3442 static abi_long do_accept4(int fd, abi_ulong target_addr,
3443                            abi_ulong target_addrlen_addr, int flags)
3444 {
3445     socklen_t addrlen, ret_addrlen;
3446     void *addr;
3447     abi_long ret;
3448     int host_flags;
3449 
3450     if (flags & ~(TARGET_SOCK_CLOEXEC | TARGET_SOCK_NONBLOCK)) {
3451         return -TARGET_EINVAL;
3452     }
3453 
3454     host_flags = 0;
3455     if (flags & TARGET_SOCK_NONBLOCK) {
3456         host_flags |= SOCK_NONBLOCK;
3457     }
3458     if (flags & TARGET_SOCK_CLOEXEC) {
3459         host_flags |= SOCK_CLOEXEC;
3460     }
3461 
3462     if (target_addr == 0) {
3463         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3464     }
3465 
3466     /* linux returns EFAULT if addrlen pointer is invalid */
3467     if (get_user_u32(addrlen, target_addrlen_addr))
3468         return -TARGET_EFAULT;
3469 
3470     if ((int)addrlen < 0) {
3471         return -TARGET_EINVAL;
3472     }
3473 
3474     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3475         return -TARGET_EFAULT;
3476     }
3477 
3478     addr = alloca(addrlen);
3479 
3480     ret_addrlen = addrlen;
3481     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3482     if (!is_error(ret)) {
3483         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3484         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3485             ret = -TARGET_EFAULT;
3486         }
3487     }
3488     return ret;
3489 }
3490 
3491 /* do_getpeername() Must return target values and target errnos. */
3492 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3493                                abi_ulong target_addrlen_addr)
3494 {
3495     socklen_t addrlen, ret_addrlen;
3496     void *addr;
3497     abi_long ret;
3498 
3499     if (get_user_u32(addrlen, target_addrlen_addr))
3500         return -TARGET_EFAULT;
3501 
3502     if ((int)addrlen < 0) {
3503         return -TARGET_EINVAL;
3504     }
3505 
3506     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3507         return -TARGET_EFAULT;
3508     }
3509 
3510     addr = alloca(addrlen);
3511 
3512     ret_addrlen = addrlen;
3513     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3514     if (!is_error(ret)) {
3515         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3516         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3517             ret = -TARGET_EFAULT;
3518         }
3519     }
3520     return ret;
3521 }
3522 
3523 /* do_getsockname() Must return target values and target errnos. */
3524 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3525                                abi_ulong target_addrlen_addr)
3526 {
3527     socklen_t addrlen, ret_addrlen;
3528     void *addr;
3529     abi_long ret;
3530 
3531     if (get_user_u32(addrlen, target_addrlen_addr))
3532         return -TARGET_EFAULT;
3533 
3534     if ((int)addrlen < 0) {
3535         return -TARGET_EINVAL;
3536     }
3537 
3538     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3539         return -TARGET_EFAULT;
3540     }
3541 
3542     addr = alloca(addrlen);
3543 
3544     ret_addrlen = addrlen;
3545     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3546     if (!is_error(ret)) {
3547         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3548         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3549             ret = -TARGET_EFAULT;
3550         }
3551     }
3552     return ret;
3553 }
3554 
3555 /* do_socketpair() Must return target values and target errnos. */
3556 static abi_long do_socketpair(int domain, int type, int protocol,
3557                               abi_ulong target_tab_addr)
3558 {
3559     int tab[2];
3560     abi_long ret;
3561 
3562     target_to_host_sock_type(&type);
3563 
3564     ret = get_errno(socketpair(domain, type, protocol, tab));
3565     if (!is_error(ret)) {
3566         if (put_user_s32(tab[0], target_tab_addr)
3567             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3568             ret = -TARGET_EFAULT;
3569     }
3570     return ret;
3571 }
3572 
3573 /* do_sendto() Must return target values and target errnos. */
3574 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3575                           abi_ulong target_addr, socklen_t addrlen)
3576 {
3577     void *addr;
3578     void *host_msg;
3579     void *copy_msg = NULL;
3580     abi_long ret;
3581 
3582     if ((int)addrlen < 0) {
3583         return -TARGET_EINVAL;
3584     }
3585 
3586     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3587     if (!host_msg)
3588         return -TARGET_EFAULT;
3589     if (fd_trans_target_to_host_data(fd)) {
3590         copy_msg = host_msg;
3591         host_msg = g_malloc(len);
3592         memcpy(host_msg, copy_msg, len);
3593         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3594         if (ret < 0) {
3595             goto fail;
3596         }
3597     }
3598     if (target_addr) {
3599         addr = alloca(addrlen+1);
3600         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3601         if (ret) {
3602             goto fail;
3603         }
3604         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3605     } else {
3606         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3607     }
3608 fail:
3609     if (copy_msg) {
3610         g_free(host_msg);
3611         host_msg = copy_msg;
3612     }
3613     unlock_user(host_msg, msg, 0);
3614     return ret;
3615 }
3616 
3617 /* do_recvfrom() Must return target values and target errnos. */
3618 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3619                             abi_ulong target_addr,
3620                             abi_ulong target_addrlen)
3621 {
3622     socklen_t addrlen, ret_addrlen;
3623     void *addr;
3624     void *host_msg;
3625     abi_long ret;
3626 
3627     if (!msg) {
3628         host_msg = NULL;
3629     } else {
3630         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3631         if (!host_msg) {
3632             return -TARGET_EFAULT;
3633         }
3634     }
3635     if (target_addr) {
3636         if (get_user_u32(addrlen, target_addrlen)) {
3637             ret = -TARGET_EFAULT;
3638             goto fail;
3639         }
3640         if ((int)addrlen < 0) {
3641             ret = -TARGET_EINVAL;
3642             goto fail;
3643         }
3644         addr = alloca(addrlen);
3645         ret_addrlen = addrlen;
3646         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3647                                       addr, &ret_addrlen));
3648     } else {
3649         addr = NULL; /* To keep compiler quiet.  */
3650         addrlen = 0; /* To keep compiler quiet.  */
3651         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3652     }
3653     if (!is_error(ret)) {
3654         if (fd_trans_host_to_target_data(fd)) {
3655             abi_long trans;
3656             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3657             if (is_error(trans)) {
3658                 ret = trans;
3659                 goto fail;
3660             }
3661         }
3662         if (target_addr) {
3663             host_to_target_sockaddr(target_addr, addr,
3664                                     MIN(addrlen, ret_addrlen));
3665             if (put_user_u32(ret_addrlen, target_addrlen)) {
3666                 ret = -TARGET_EFAULT;
3667                 goto fail;
3668             }
3669         }
3670         unlock_user(host_msg, msg, len);
3671     } else {
3672 fail:
3673         unlock_user(host_msg, msg, 0);
3674     }
3675     return ret;
3676 }
3677 
3678 #ifdef TARGET_NR_socketcall
3679 /* do_socketcall() must return target values and target errnos. */
3680 static abi_long do_socketcall(int num, abi_ulong vptr)
3681 {
3682     static const unsigned nargs[] = { /* number of arguments per operation */
3683         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3684         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3685         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3686         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3687         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3688         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3689         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3690         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3691         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3692         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3693         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3694         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3695         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3696         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3697         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3698         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3699         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3700         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3701         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3702         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3703     };
3704     abi_long a[6]; /* max 6 args */
3705     unsigned i;
3706 
3707     /* check the range of the first argument num */
3708     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3709     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3710         return -TARGET_EINVAL;
3711     }
3712     /* ensure we have space for args */
3713     if (nargs[num] > ARRAY_SIZE(a)) {
3714         return -TARGET_EINVAL;
3715     }
3716     /* collect the arguments in a[] according to nargs[] */
3717     for (i = 0; i < nargs[num]; ++i) {
3718         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3719             return -TARGET_EFAULT;
3720         }
3721     }
3722     /* now when we have the args, invoke the appropriate underlying function */
3723     switch (num) {
3724     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3725         return do_socket(a[0], a[1], a[2]);
3726     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3727         return do_bind(a[0], a[1], a[2]);
3728     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3729         return do_connect(a[0], a[1], a[2]);
3730     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3731         return get_errno(listen(a[0], a[1]));
3732     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3733         return do_accept4(a[0], a[1], a[2], 0);
3734     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3735         return do_getsockname(a[0], a[1], a[2]);
3736     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3737         return do_getpeername(a[0], a[1], a[2]);
3738     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3739         return do_socketpair(a[0], a[1], a[2], a[3]);
3740     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3741         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3742     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3743         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3744     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3745         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3746     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3747         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3748     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3749         return get_errno(shutdown(a[0], a[1]));
3750     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3751         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3752     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3753         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3754     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3755         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3756     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3757         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3758     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3759         return do_accept4(a[0], a[1], a[2], a[3]);
3760     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3761         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3762     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3763         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3764     default:
3765         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3766         return -TARGET_EINVAL;
3767     }
3768 }
3769 #endif
3770 
3771 #ifndef TARGET_SEMID64_DS
3772 /* asm-generic version of this struct */
3773 struct target_semid64_ds
3774 {
3775   struct target_ipc_perm sem_perm;
3776   abi_ulong sem_otime;
3777 #if TARGET_ABI_BITS == 32
3778   abi_ulong __unused1;
3779 #endif
3780   abi_ulong sem_ctime;
3781 #if TARGET_ABI_BITS == 32
3782   abi_ulong __unused2;
3783 #endif
3784   abi_ulong sem_nsems;
3785   abi_ulong __unused3;
3786   abi_ulong __unused4;
3787 };
3788 #endif
3789 
3790 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3791                                                abi_ulong target_addr)
3792 {
3793     struct target_ipc_perm *target_ip;
3794     struct target_semid64_ds *target_sd;
3795 
3796     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3797         return -TARGET_EFAULT;
3798     target_ip = &(target_sd->sem_perm);
3799     host_ip->__key = tswap32(target_ip->__key);
3800     host_ip->uid = tswap32(target_ip->uid);
3801     host_ip->gid = tswap32(target_ip->gid);
3802     host_ip->cuid = tswap32(target_ip->cuid);
3803     host_ip->cgid = tswap32(target_ip->cgid);
3804 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3805     host_ip->mode = tswap32(target_ip->mode);
3806 #else
3807     host_ip->mode = tswap16(target_ip->mode);
3808 #endif
3809 #if defined(TARGET_PPC)
3810     host_ip->__seq = tswap32(target_ip->__seq);
3811 #else
3812     host_ip->__seq = tswap16(target_ip->__seq);
3813 #endif
3814     unlock_user_struct(target_sd, target_addr, 0);
3815     return 0;
3816 }
3817 
3818 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3819                                                struct ipc_perm *host_ip)
3820 {
3821     struct target_ipc_perm *target_ip;
3822     struct target_semid64_ds *target_sd;
3823 
3824     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3825         return -TARGET_EFAULT;
3826     target_ip = &(target_sd->sem_perm);
3827     target_ip->__key = tswap32(host_ip->__key);
3828     target_ip->uid = tswap32(host_ip->uid);
3829     target_ip->gid = tswap32(host_ip->gid);
3830     target_ip->cuid = tswap32(host_ip->cuid);
3831     target_ip->cgid = tswap32(host_ip->cgid);
3832 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3833     target_ip->mode = tswap32(host_ip->mode);
3834 #else
3835     target_ip->mode = tswap16(host_ip->mode);
3836 #endif
3837 #if defined(TARGET_PPC)
3838     target_ip->__seq = tswap32(host_ip->__seq);
3839 #else
3840     target_ip->__seq = tswap16(host_ip->__seq);
3841 #endif
3842     unlock_user_struct(target_sd, target_addr, 1);
3843     return 0;
3844 }
3845 
3846 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3847                                                abi_ulong target_addr)
3848 {
3849     struct target_semid64_ds *target_sd;
3850 
3851     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3852         return -TARGET_EFAULT;
3853     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3854         return -TARGET_EFAULT;
3855     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3856     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3857     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3858     unlock_user_struct(target_sd, target_addr, 0);
3859     return 0;
3860 }
3861 
3862 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3863                                                struct semid_ds *host_sd)
3864 {
3865     struct target_semid64_ds *target_sd;
3866 
3867     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3868         return -TARGET_EFAULT;
3869     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3870         return -TARGET_EFAULT;
3871     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3872     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3873     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3874     unlock_user_struct(target_sd, target_addr, 1);
3875     return 0;
3876 }
3877 
3878 struct target_seminfo {
3879     int semmap;
3880     int semmni;
3881     int semmns;
3882     int semmnu;
3883     int semmsl;
3884     int semopm;
3885     int semume;
3886     int semusz;
3887     int semvmx;
3888     int semaem;
3889 };
3890 
3891 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3892                                               struct seminfo *host_seminfo)
3893 {
3894     struct target_seminfo *target_seminfo;
3895     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3896         return -TARGET_EFAULT;
3897     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3898     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3899     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3900     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3901     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3902     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3903     __put_user(host_seminfo->semume, &target_seminfo->semume);
3904     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3905     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3906     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3907     unlock_user_struct(target_seminfo, target_addr, 1);
3908     return 0;
3909 }
3910 
3911 union semun {
3912 	int val;
3913 	struct semid_ds *buf;
3914 	unsigned short *array;
3915 	struct seminfo *__buf;
3916 };
3917 
3918 union target_semun {
3919 	int val;
3920 	abi_ulong buf;
3921 	abi_ulong array;
3922 	abi_ulong __buf;
3923 };
3924 
3925 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3926                                                abi_ulong target_addr)
3927 {
3928     int nsems;
3929     unsigned short *array;
3930     union semun semun;
3931     struct semid_ds semid_ds;
3932     int i, ret;
3933 
3934     semun.buf = &semid_ds;
3935 
3936     ret = semctl(semid, 0, IPC_STAT, semun);
3937     if (ret == -1)
3938         return get_errno(ret);
3939 
3940     nsems = semid_ds.sem_nsems;
3941 
3942     *host_array = g_try_new(unsigned short, nsems);
3943     if (!*host_array) {
3944         return -TARGET_ENOMEM;
3945     }
3946     array = lock_user(VERIFY_READ, target_addr,
3947                       nsems*sizeof(unsigned short), 1);
3948     if (!array) {
3949         g_free(*host_array);
3950         return -TARGET_EFAULT;
3951     }
3952 
3953     for(i=0; i<nsems; i++) {
3954         __get_user((*host_array)[i], &array[i]);
3955     }
3956     unlock_user(array, target_addr, 0);
3957 
3958     return 0;
3959 }
3960 
3961 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3962                                                unsigned short **host_array)
3963 {
3964     int nsems;
3965     unsigned short *array;
3966     union semun semun;
3967     struct semid_ds semid_ds;
3968     int i, ret;
3969 
3970     semun.buf = &semid_ds;
3971 
3972     ret = semctl(semid, 0, IPC_STAT, semun);
3973     if (ret == -1)
3974         return get_errno(ret);
3975 
3976     nsems = semid_ds.sem_nsems;
3977 
3978     array = lock_user(VERIFY_WRITE, target_addr,
3979                       nsems*sizeof(unsigned short), 0);
3980     if (!array)
3981         return -TARGET_EFAULT;
3982 
3983     for(i=0; i<nsems; i++) {
3984         __put_user((*host_array)[i], &array[i]);
3985     }
3986     g_free(*host_array);
3987     unlock_user(array, target_addr, 1);
3988 
3989     return 0;
3990 }
3991 
3992 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3993                                  abi_ulong target_arg)
3994 {
3995     union target_semun target_su = { .buf = target_arg };
3996     union semun arg;
3997     struct semid_ds dsarg;
3998     unsigned short *array = NULL;
3999     struct seminfo seminfo;
4000     abi_long ret = -TARGET_EINVAL;
4001     abi_long err;
4002     cmd &= 0xff;
4003 
4004     switch( cmd ) {
4005 	case GETVAL:
4006 	case SETVAL:
4007             /* In 64 bit cross-endian situations, we will erroneously pick up
4008              * the wrong half of the union for the "val" element.  To rectify
4009              * this, the entire 8-byte structure is byteswapped, followed by
4010 	     * a swap of the 4 byte val field. In other cases, the data is
4011 	     * already in proper host byte order. */
4012 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4013 		target_su.buf = tswapal(target_su.buf);
4014 		arg.val = tswap32(target_su.val);
4015 	    } else {
4016 		arg.val = target_su.val;
4017 	    }
4018             ret = get_errno(semctl(semid, semnum, cmd, arg));
4019             break;
4020 	case GETALL:
4021 	case SETALL:
4022             err = target_to_host_semarray(semid, &array, target_su.array);
4023             if (err)
4024                 return err;
4025             arg.array = array;
4026             ret = get_errno(semctl(semid, semnum, cmd, arg));
4027             err = host_to_target_semarray(semid, target_su.array, &array);
4028             if (err)
4029                 return err;
4030             break;
4031 	case IPC_STAT:
4032 	case IPC_SET:
4033 	case SEM_STAT:
4034             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4035             if (err)
4036                 return err;
4037             arg.buf = &dsarg;
4038             ret = get_errno(semctl(semid, semnum, cmd, arg));
4039             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4040             if (err)
4041                 return err;
4042             break;
4043 	case IPC_INFO:
4044 	case SEM_INFO:
4045             arg.__buf = &seminfo;
4046             ret = get_errno(semctl(semid, semnum, cmd, arg));
4047             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4048             if (err)
4049                 return err;
4050             break;
4051 	case IPC_RMID:
4052 	case GETPID:
4053 	case GETNCNT:
4054 	case GETZCNT:
4055             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4056             break;
4057     }
4058 
4059     return ret;
4060 }
4061 
4062 struct target_sembuf {
4063     unsigned short sem_num;
4064     short sem_op;
4065     short sem_flg;
4066 };
4067 
4068 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4069                                              abi_ulong target_addr,
4070                                              unsigned nsops)
4071 {
4072     struct target_sembuf *target_sembuf;
4073     int i;
4074 
4075     target_sembuf = lock_user(VERIFY_READ, target_addr,
4076                               nsops*sizeof(struct target_sembuf), 1);
4077     if (!target_sembuf)
4078         return -TARGET_EFAULT;
4079 
4080     for(i=0; i<nsops; i++) {
4081         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4082         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4083         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4084     }
4085 
4086     unlock_user(target_sembuf, target_addr, 0);
4087 
4088     return 0;
4089 }
4090 
4091 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4092     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4093 
4094 /*
4095  * This macro is required to handle the s390 variants, which passes the
4096  * arguments in a different order than default.
4097  */
4098 #ifdef __s390x__
4099 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4100   (__nsops), (__timeout), (__sops)
4101 #else
4102 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4103   (__nsops), 0, (__sops), (__timeout)
4104 #endif
4105 
4106 static inline abi_long do_semtimedop(int semid,
4107                                      abi_long ptr,
4108                                      unsigned nsops,
4109                                      abi_long timeout, bool time64)
4110 {
4111     struct sembuf *sops;
4112     struct timespec ts, *pts = NULL;
4113     abi_long ret;
4114 
4115     if (timeout) {
4116         pts = &ts;
4117         if (time64) {
4118             if (target_to_host_timespec64(pts, timeout)) {
4119                 return -TARGET_EFAULT;
4120             }
4121         } else {
4122             if (target_to_host_timespec(pts, timeout)) {
4123                 return -TARGET_EFAULT;
4124             }
4125         }
4126     }
4127 
4128     if (nsops > TARGET_SEMOPM) {
4129         return -TARGET_E2BIG;
4130     }
4131 
4132     sops = g_new(struct sembuf, nsops);
4133 
4134     if (target_to_host_sembuf(sops, ptr, nsops)) {
4135         g_free(sops);
4136         return -TARGET_EFAULT;
4137     }
4138 
4139     ret = -TARGET_ENOSYS;
4140 #ifdef __NR_semtimedop
4141     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4142 #endif
4143 #ifdef __NR_ipc
4144     if (ret == -TARGET_ENOSYS) {
4145         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4146                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4147     }
4148 #endif
4149     g_free(sops);
4150     return ret;
4151 }
4152 #endif
4153 
4154 struct target_msqid_ds
4155 {
4156     struct target_ipc_perm msg_perm;
4157     abi_ulong msg_stime;
4158 #if TARGET_ABI_BITS == 32
4159     abi_ulong __unused1;
4160 #endif
4161     abi_ulong msg_rtime;
4162 #if TARGET_ABI_BITS == 32
4163     abi_ulong __unused2;
4164 #endif
4165     abi_ulong msg_ctime;
4166 #if TARGET_ABI_BITS == 32
4167     abi_ulong __unused3;
4168 #endif
4169     abi_ulong __msg_cbytes;
4170     abi_ulong msg_qnum;
4171     abi_ulong msg_qbytes;
4172     abi_ulong msg_lspid;
4173     abi_ulong msg_lrpid;
4174     abi_ulong __unused4;
4175     abi_ulong __unused5;
4176 };
4177 
4178 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4179                                                abi_ulong target_addr)
4180 {
4181     struct target_msqid_ds *target_md;
4182 
4183     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4184         return -TARGET_EFAULT;
4185     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4186         return -TARGET_EFAULT;
4187     host_md->msg_stime = tswapal(target_md->msg_stime);
4188     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4189     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4190     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4191     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4192     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4193     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4194     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4195     unlock_user_struct(target_md, target_addr, 0);
4196     return 0;
4197 }
4198 
4199 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4200                                                struct msqid_ds *host_md)
4201 {
4202     struct target_msqid_ds *target_md;
4203 
4204     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4205         return -TARGET_EFAULT;
4206     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4207         return -TARGET_EFAULT;
4208     target_md->msg_stime = tswapal(host_md->msg_stime);
4209     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4210     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4211     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4212     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4213     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4214     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4215     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4216     unlock_user_struct(target_md, target_addr, 1);
4217     return 0;
4218 }
4219 
4220 struct target_msginfo {
4221     int msgpool;
4222     int msgmap;
4223     int msgmax;
4224     int msgmnb;
4225     int msgmni;
4226     int msgssz;
4227     int msgtql;
4228     unsigned short int msgseg;
4229 };
4230 
4231 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4232                                               struct msginfo *host_msginfo)
4233 {
4234     struct target_msginfo *target_msginfo;
4235     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4236         return -TARGET_EFAULT;
4237     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4238     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4239     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4240     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4241     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4242     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4243     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4244     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4245     unlock_user_struct(target_msginfo, target_addr, 1);
4246     return 0;
4247 }
4248 
4249 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4250 {
4251     struct msqid_ds dsarg;
4252     struct msginfo msginfo;
4253     abi_long ret = -TARGET_EINVAL;
4254 
4255     cmd &= 0xff;
4256 
4257     switch (cmd) {
4258     case IPC_STAT:
4259     case IPC_SET:
4260     case MSG_STAT:
4261         if (target_to_host_msqid_ds(&dsarg,ptr))
4262             return -TARGET_EFAULT;
4263         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4264         if (host_to_target_msqid_ds(ptr,&dsarg))
4265             return -TARGET_EFAULT;
4266         break;
4267     case IPC_RMID:
4268         ret = get_errno(msgctl(msgid, cmd, NULL));
4269         break;
4270     case IPC_INFO:
4271     case MSG_INFO:
4272         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4273         if (host_to_target_msginfo(ptr, &msginfo))
4274             return -TARGET_EFAULT;
4275         break;
4276     }
4277 
4278     return ret;
4279 }
4280 
4281 struct target_msgbuf {
4282     abi_long mtype;
4283     char	mtext[1];
4284 };
4285 
4286 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4287                                  ssize_t msgsz, int msgflg)
4288 {
4289     struct target_msgbuf *target_mb;
4290     struct msgbuf *host_mb;
4291     abi_long ret = 0;
4292 
4293     if (msgsz < 0) {
4294         return -TARGET_EINVAL;
4295     }
4296 
4297     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4298         return -TARGET_EFAULT;
4299     host_mb = g_try_malloc(msgsz + sizeof(long));
4300     if (!host_mb) {
4301         unlock_user_struct(target_mb, msgp, 0);
4302         return -TARGET_ENOMEM;
4303     }
4304     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4305     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4306     ret = -TARGET_ENOSYS;
4307 #ifdef __NR_msgsnd
4308     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4309 #endif
4310 #ifdef __NR_ipc
4311     if (ret == -TARGET_ENOSYS) {
4312 #ifdef __s390x__
4313         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4314                                  host_mb));
4315 #else
4316         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4317                                  host_mb, 0));
4318 #endif
4319     }
4320 #endif
4321     g_free(host_mb);
4322     unlock_user_struct(target_mb, msgp, 0);
4323 
4324     return ret;
4325 }
4326 
4327 #ifdef __NR_ipc
4328 #if defined(__sparc__)
4329 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4330 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4331 #elif defined(__s390x__)
4332 /* The s390 sys_ipc variant has only five parameters.  */
4333 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4334     ((long int[]){(long int)__msgp, __msgtyp})
4335 #else
4336 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4337     ((long int[]){(long int)__msgp, __msgtyp}), 0
4338 #endif
4339 #endif
4340 
4341 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4342                                  ssize_t msgsz, abi_long msgtyp,
4343                                  int msgflg)
4344 {
4345     struct target_msgbuf *target_mb;
4346     char *target_mtext;
4347     struct msgbuf *host_mb;
4348     abi_long ret = 0;
4349 
4350     if (msgsz < 0) {
4351         return -TARGET_EINVAL;
4352     }
4353 
4354     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4355         return -TARGET_EFAULT;
4356 
4357     host_mb = g_try_malloc(msgsz + sizeof(long));
4358     if (!host_mb) {
4359         ret = -TARGET_ENOMEM;
4360         goto end;
4361     }
4362     ret = -TARGET_ENOSYS;
4363 #ifdef __NR_msgrcv
4364     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4365 #endif
4366 #ifdef __NR_ipc
4367     if (ret == -TARGET_ENOSYS) {
4368         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4369                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4370     }
4371 #endif
4372 
4373     if (ret > 0) {
4374         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4375         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4376         if (!target_mtext) {
4377             ret = -TARGET_EFAULT;
4378             goto end;
4379         }
4380         memcpy(target_mb->mtext, host_mb->mtext, ret);
4381         unlock_user(target_mtext, target_mtext_addr, ret);
4382     }
4383 
4384     target_mb->mtype = tswapal(host_mb->mtype);
4385 
4386 end:
4387     if (target_mb)
4388         unlock_user_struct(target_mb, msgp, 1);
4389     g_free(host_mb);
4390     return ret;
4391 }
4392 
4393 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4394                                                abi_ulong target_addr)
4395 {
4396     struct target_shmid_ds *target_sd;
4397 
4398     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4399         return -TARGET_EFAULT;
4400     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4401         return -TARGET_EFAULT;
4402     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4403     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4404     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4405     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4406     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4407     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4408     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4409     unlock_user_struct(target_sd, target_addr, 0);
4410     return 0;
4411 }
4412 
4413 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4414                                                struct shmid_ds *host_sd)
4415 {
4416     struct target_shmid_ds *target_sd;
4417 
4418     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4419         return -TARGET_EFAULT;
4420     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4421         return -TARGET_EFAULT;
4422     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4423     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4424     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4425     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4426     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4427     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4428     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4429     unlock_user_struct(target_sd, target_addr, 1);
4430     return 0;
4431 }
4432 
4433 struct  target_shminfo {
4434     abi_ulong shmmax;
4435     abi_ulong shmmin;
4436     abi_ulong shmmni;
4437     abi_ulong shmseg;
4438     abi_ulong shmall;
4439 };
4440 
4441 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4442                                               struct shminfo *host_shminfo)
4443 {
4444     struct target_shminfo *target_shminfo;
4445     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4446         return -TARGET_EFAULT;
4447     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4448     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4449     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4450     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4451     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4452     unlock_user_struct(target_shminfo, target_addr, 1);
4453     return 0;
4454 }
4455 
4456 struct target_shm_info {
4457     int used_ids;
4458     abi_ulong shm_tot;
4459     abi_ulong shm_rss;
4460     abi_ulong shm_swp;
4461     abi_ulong swap_attempts;
4462     abi_ulong swap_successes;
4463 };
4464 
4465 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4466                                                struct shm_info *host_shm_info)
4467 {
4468     struct target_shm_info *target_shm_info;
4469     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4470         return -TARGET_EFAULT;
4471     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4472     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4473     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4474     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4475     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4476     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4477     unlock_user_struct(target_shm_info, target_addr, 1);
4478     return 0;
4479 }
4480 
4481 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4482 {
4483     struct shmid_ds dsarg;
4484     struct shminfo shminfo;
4485     struct shm_info shm_info;
4486     abi_long ret = -TARGET_EINVAL;
4487 
4488     cmd &= 0xff;
4489 
4490     switch(cmd) {
4491     case IPC_STAT:
4492     case IPC_SET:
4493     case SHM_STAT:
4494         if (target_to_host_shmid_ds(&dsarg, buf))
4495             return -TARGET_EFAULT;
4496         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4497         if (host_to_target_shmid_ds(buf, &dsarg))
4498             return -TARGET_EFAULT;
4499         break;
4500     case IPC_INFO:
4501         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4502         if (host_to_target_shminfo(buf, &shminfo))
4503             return -TARGET_EFAULT;
4504         break;
4505     case SHM_INFO:
4506         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4507         if (host_to_target_shm_info(buf, &shm_info))
4508             return -TARGET_EFAULT;
4509         break;
4510     case IPC_RMID:
4511     case SHM_LOCK:
4512     case SHM_UNLOCK:
4513         ret = get_errno(shmctl(shmid, cmd, NULL));
4514         break;
4515     }
4516 
4517     return ret;
4518 }
4519 
4520 #ifdef TARGET_NR_ipc
4521 /* ??? This only works with linear mappings.  */
4522 /* do_ipc() must return target values and target errnos. */
4523 static abi_long do_ipc(CPUArchState *cpu_env,
4524                        unsigned int call, abi_long first,
4525                        abi_long second, abi_long third,
4526                        abi_long ptr, abi_long fifth)
4527 {
4528     int version;
4529     abi_long ret = 0;
4530 
4531     version = call >> 16;
4532     call &= 0xffff;
4533 
4534     switch (call) {
4535     case IPCOP_semop:
4536         ret = do_semtimedop(first, ptr, second, 0, false);
4537         break;
4538     case IPCOP_semtimedop:
4539     /*
4540      * The s390 sys_ipc variant has only five parameters instead of six
4541      * (as for default variant) and the only difference is the handling of
4542      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4543      * to a struct timespec where the generic variant uses fifth parameter.
4544      */
4545 #if defined(TARGET_S390X)
4546         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4547 #else
4548         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4549 #endif
4550         break;
4551 
4552     case IPCOP_semget:
4553         ret = get_errno(semget(first, second, third));
4554         break;
4555 
4556     case IPCOP_semctl: {
4557         /* The semun argument to semctl is passed by value, so dereference the
4558          * ptr argument. */
4559         abi_ulong atptr;
4560         get_user_ual(atptr, ptr);
4561         ret = do_semctl(first, second, third, atptr);
4562         break;
4563     }
4564 
4565     case IPCOP_msgget:
4566         ret = get_errno(msgget(first, second));
4567         break;
4568 
4569     case IPCOP_msgsnd:
4570         ret = do_msgsnd(first, ptr, second, third);
4571         break;
4572 
4573     case IPCOP_msgctl:
4574         ret = do_msgctl(first, second, ptr);
4575         break;
4576 
4577     case IPCOP_msgrcv:
4578         switch (version) {
4579         case 0:
4580             {
4581                 struct target_ipc_kludge {
4582                     abi_long msgp;
4583                     abi_long msgtyp;
4584                 } *tmp;
4585 
4586                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4587                     ret = -TARGET_EFAULT;
4588                     break;
4589                 }
4590 
4591                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4592 
4593                 unlock_user_struct(tmp, ptr, 0);
4594                 break;
4595             }
4596         default:
4597             ret = do_msgrcv(first, ptr, second, fifth, third);
4598         }
4599         break;
4600 
4601     case IPCOP_shmat:
4602         switch (version) {
4603         default:
4604         {
4605             abi_ulong raddr;
4606             raddr = target_shmat(cpu_env, first, ptr, second);
4607             if (is_error(raddr))
4608                 return get_errno(raddr);
4609             if (put_user_ual(raddr, third))
4610                 return -TARGET_EFAULT;
4611             break;
4612         }
4613         case 1:
4614             ret = -TARGET_EINVAL;
4615             break;
4616         }
4617 	break;
4618     case IPCOP_shmdt:
4619         ret = target_shmdt(ptr);
4620 	break;
4621 
4622     case IPCOP_shmget:
4623 	/* IPC_* flag values are the same on all linux platforms */
4624 	ret = get_errno(shmget(first, second, third));
4625 	break;
4626 
4627 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4628     case IPCOP_shmctl:
4629         ret = do_shmctl(first, second, ptr);
4630         break;
4631     default:
4632         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4633                       call, version);
4634 	ret = -TARGET_ENOSYS;
4635 	break;
4636     }
4637     return ret;
4638 }
4639 #endif
4640 
4641 /* kernel structure types definitions */
4642 
4643 #define STRUCT(name, ...) STRUCT_ ## name,
4644 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4645 enum {
4646 #include "syscall_types.h"
4647 STRUCT_MAX
4648 };
4649 #undef STRUCT
4650 #undef STRUCT_SPECIAL
4651 
4652 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4653 #define STRUCT_SPECIAL(name)
4654 #include "syscall_types.h"
4655 #undef STRUCT
4656 #undef STRUCT_SPECIAL
4657 
4658 #define MAX_STRUCT_SIZE 4096
4659 
4660 #ifdef CONFIG_FIEMAP
4661 /* So fiemap access checks don't overflow on 32 bit systems.
4662  * This is very slightly smaller than the limit imposed by
4663  * the underlying kernel.
4664  */
4665 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4666                             / sizeof(struct fiemap_extent))
4667 
4668 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4669                                        int fd, int cmd, abi_long arg)
4670 {
4671     /* The parameter for this ioctl is a struct fiemap followed
4672      * by an array of struct fiemap_extent whose size is set
4673      * in fiemap->fm_extent_count. The array is filled in by the
4674      * ioctl.
4675      */
4676     int target_size_in, target_size_out;
4677     struct fiemap *fm;
4678     const argtype *arg_type = ie->arg_type;
4679     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4680     void *argptr, *p;
4681     abi_long ret;
4682     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4683     uint32_t outbufsz;
4684     int free_fm = 0;
4685 
4686     assert(arg_type[0] == TYPE_PTR);
4687     assert(ie->access == IOC_RW);
4688     arg_type++;
4689     target_size_in = thunk_type_size(arg_type, 0);
4690     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4691     if (!argptr) {
4692         return -TARGET_EFAULT;
4693     }
4694     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4695     unlock_user(argptr, arg, 0);
4696     fm = (struct fiemap *)buf_temp;
4697     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4698         return -TARGET_EINVAL;
4699     }
4700 
4701     outbufsz = sizeof (*fm) +
4702         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4703 
4704     if (outbufsz > MAX_STRUCT_SIZE) {
4705         /* We can't fit all the extents into the fixed size buffer.
4706          * Allocate one that is large enough and use it instead.
4707          */
4708         fm = g_try_malloc(outbufsz);
4709         if (!fm) {
4710             return -TARGET_ENOMEM;
4711         }
4712         memcpy(fm, buf_temp, sizeof(struct fiemap));
4713         free_fm = 1;
4714     }
4715     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4716     if (!is_error(ret)) {
4717         target_size_out = target_size_in;
4718         /* An extent_count of 0 means we were only counting the extents
4719          * so there are no structs to copy
4720          */
4721         if (fm->fm_extent_count != 0) {
4722             target_size_out += fm->fm_mapped_extents * extent_size;
4723         }
4724         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4725         if (!argptr) {
4726             ret = -TARGET_EFAULT;
4727         } else {
4728             /* Convert the struct fiemap */
4729             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4730             if (fm->fm_extent_count != 0) {
4731                 p = argptr + target_size_in;
4732                 /* ...and then all the struct fiemap_extents */
4733                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4734                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4735                                   THUNK_TARGET);
4736                     p += extent_size;
4737                 }
4738             }
4739             unlock_user(argptr, arg, target_size_out);
4740         }
4741     }
4742     if (free_fm) {
4743         g_free(fm);
4744     }
4745     return ret;
4746 }
4747 #endif
4748 
4749 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4750                                 int fd, int cmd, abi_long arg)
4751 {
4752     const argtype *arg_type = ie->arg_type;
4753     int target_size;
4754     void *argptr;
4755     int ret;
4756     struct ifconf *host_ifconf;
4757     uint32_t outbufsz;
4758     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4759     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4760     int target_ifreq_size;
4761     int nb_ifreq;
4762     int free_buf = 0;
4763     int i;
4764     int target_ifc_len;
4765     abi_long target_ifc_buf;
4766     int host_ifc_len;
4767     char *host_ifc_buf;
4768 
4769     assert(arg_type[0] == TYPE_PTR);
4770     assert(ie->access == IOC_RW);
4771 
4772     arg_type++;
4773     target_size = thunk_type_size(arg_type, 0);
4774 
4775     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4776     if (!argptr)
4777         return -TARGET_EFAULT;
4778     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4779     unlock_user(argptr, arg, 0);
4780 
4781     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4782     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4783     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4784 
4785     if (target_ifc_buf != 0) {
4786         target_ifc_len = host_ifconf->ifc_len;
4787         nb_ifreq = target_ifc_len / target_ifreq_size;
4788         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4789 
4790         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4791         if (outbufsz > MAX_STRUCT_SIZE) {
4792             /*
4793              * We can't fit all the extents into the fixed size buffer.
4794              * Allocate one that is large enough and use it instead.
4795              */
4796             host_ifconf = g_try_malloc(outbufsz);
4797             if (!host_ifconf) {
4798                 return -TARGET_ENOMEM;
4799             }
4800             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4801             free_buf = 1;
4802         }
4803         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4804 
4805         host_ifconf->ifc_len = host_ifc_len;
4806     } else {
4807       host_ifc_buf = NULL;
4808     }
4809     host_ifconf->ifc_buf = host_ifc_buf;
4810 
4811     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4812     if (!is_error(ret)) {
4813 	/* convert host ifc_len to target ifc_len */
4814 
4815         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4816         target_ifc_len = nb_ifreq * target_ifreq_size;
4817         host_ifconf->ifc_len = target_ifc_len;
4818 
4819 	/* restore target ifc_buf */
4820 
4821         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4822 
4823 	/* copy struct ifconf to target user */
4824 
4825         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4826         if (!argptr)
4827             return -TARGET_EFAULT;
4828         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4829         unlock_user(argptr, arg, target_size);
4830 
4831         if (target_ifc_buf != 0) {
4832             /* copy ifreq[] to target user */
4833             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4834             for (i = 0; i < nb_ifreq ; i++) {
4835                 thunk_convert(argptr + i * target_ifreq_size,
4836                               host_ifc_buf + i * sizeof(struct ifreq),
4837                               ifreq_arg_type, THUNK_TARGET);
4838             }
4839             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4840         }
4841     }
4842 
4843     if (free_buf) {
4844         g_free(host_ifconf);
4845     }
4846 
4847     return ret;
4848 }
4849 
4850 #if defined(CONFIG_USBFS)
4851 #if HOST_LONG_BITS > 64
4852 #error USBDEVFS thunks do not support >64 bit hosts yet.
4853 #endif
4854 struct live_urb {
4855     uint64_t target_urb_adr;
4856     uint64_t target_buf_adr;
4857     char *target_buf_ptr;
4858     struct usbdevfs_urb host_urb;
4859 };
4860 
4861 static GHashTable *usbdevfs_urb_hashtable(void)
4862 {
4863     static GHashTable *urb_hashtable;
4864 
4865     if (!urb_hashtable) {
4866         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4867     }
4868     return urb_hashtable;
4869 }
4870 
4871 static void urb_hashtable_insert(struct live_urb *urb)
4872 {
4873     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4874     g_hash_table_insert(urb_hashtable, urb, urb);
4875 }
4876 
4877 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4878 {
4879     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4880     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4881 }
4882 
4883 static void urb_hashtable_remove(struct live_urb *urb)
4884 {
4885     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4886     g_hash_table_remove(urb_hashtable, urb);
4887 }
4888 
4889 static abi_long
4890 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4891                           int fd, int cmd, abi_long arg)
4892 {
4893     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4894     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4895     struct live_urb *lurb;
4896     void *argptr;
4897     uint64_t hurb;
4898     int target_size;
4899     uintptr_t target_urb_adr;
4900     abi_long ret;
4901 
4902     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4903 
4904     memset(buf_temp, 0, sizeof(uint64_t));
4905     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4906     if (is_error(ret)) {
4907         return ret;
4908     }
4909 
4910     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4911     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4912     if (!lurb->target_urb_adr) {
4913         return -TARGET_EFAULT;
4914     }
4915     urb_hashtable_remove(lurb);
4916     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4917         lurb->host_urb.buffer_length);
4918     lurb->target_buf_ptr = NULL;
4919 
4920     /* restore the guest buffer pointer */
4921     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4922 
4923     /* update the guest urb struct */
4924     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4925     if (!argptr) {
4926         g_free(lurb);
4927         return -TARGET_EFAULT;
4928     }
4929     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4930     unlock_user(argptr, lurb->target_urb_adr, target_size);
4931 
4932     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4933     /* write back the urb handle */
4934     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4935     if (!argptr) {
4936         g_free(lurb);
4937         return -TARGET_EFAULT;
4938     }
4939 
4940     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4941     target_urb_adr = lurb->target_urb_adr;
4942     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4943     unlock_user(argptr, arg, target_size);
4944 
4945     g_free(lurb);
4946     return ret;
4947 }
4948 
4949 static abi_long
4950 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4951                              uint8_t *buf_temp __attribute__((unused)),
4952                              int fd, int cmd, abi_long arg)
4953 {
4954     struct live_urb *lurb;
4955 
4956     /* map target address back to host URB with metadata. */
4957     lurb = urb_hashtable_lookup(arg);
4958     if (!lurb) {
4959         return -TARGET_EFAULT;
4960     }
4961     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4962 }
4963 
4964 static abi_long
4965 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4966                             int fd, int cmd, abi_long arg)
4967 {
4968     const argtype *arg_type = ie->arg_type;
4969     int target_size;
4970     abi_long ret;
4971     void *argptr;
4972     int rw_dir;
4973     struct live_urb *lurb;
4974 
4975     /*
4976      * each submitted URB needs to map to a unique ID for the
4977      * kernel, and that unique ID needs to be a pointer to
4978      * host memory.  hence, we need to malloc for each URB.
4979      * isochronous transfers have a variable length struct.
4980      */
4981     arg_type++;
4982     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4983 
4984     /* construct host copy of urb and metadata */
4985     lurb = g_try_new0(struct live_urb, 1);
4986     if (!lurb) {
4987         return -TARGET_ENOMEM;
4988     }
4989 
4990     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4991     if (!argptr) {
4992         g_free(lurb);
4993         return -TARGET_EFAULT;
4994     }
4995     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4996     unlock_user(argptr, arg, 0);
4997 
4998     lurb->target_urb_adr = arg;
4999     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5000 
5001     /* buffer space used depends on endpoint type so lock the entire buffer */
5002     /* control type urbs should check the buffer contents for true direction */
5003     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5004     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5005         lurb->host_urb.buffer_length, 1);
5006     if (lurb->target_buf_ptr == NULL) {
5007         g_free(lurb);
5008         return -TARGET_EFAULT;
5009     }
5010 
5011     /* update buffer pointer in host copy */
5012     lurb->host_urb.buffer = lurb->target_buf_ptr;
5013 
5014     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5015     if (is_error(ret)) {
5016         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5017         g_free(lurb);
5018     } else {
5019         urb_hashtable_insert(lurb);
5020     }
5021 
5022     return ret;
5023 }
5024 #endif /* CONFIG_USBFS */
5025 
5026 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5027                             int cmd, abi_long arg)
5028 {
5029     void *argptr;
5030     struct dm_ioctl *host_dm;
5031     abi_long guest_data;
5032     uint32_t guest_data_size;
5033     int target_size;
5034     const argtype *arg_type = ie->arg_type;
5035     abi_long ret;
5036     void *big_buf = NULL;
5037     char *host_data;
5038 
5039     arg_type++;
5040     target_size = thunk_type_size(arg_type, 0);
5041     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5042     if (!argptr) {
5043         ret = -TARGET_EFAULT;
5044         goto out;
5045     }
5046     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5047     unlock_user(argptr, arg, 0);
5048 
5049     /* buf_temp is too small, so fetch things into a bigger buffer */
5050     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5051     memcpy(big_buf, buf_temp, target_size);
5052     buf_temp = big_buf;
5053     host_dm = big_buf;
5054 
5055     guest_data = arg + host_dm->data_start;
5056     if ((guest_data - arg) < 0) {
5057         ret = -TARGET_EINVAL;
5058         goto out;
5059     }
5060     guest_data_size = host_dm->data_size - host_dm->data_start;
5061     host_data = (char*)host_dm + host_dm->data_start;
5062 
5063     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5064     if (!argptr) {
5065         ret = -TARGET_EFAULT;
5066         goto out;
5067     }
5068 
5069     switch (ie->host_cmd) {
5070     case DM_REMOVE_ALL:
5071     case DM_LIST_DEVICES:
5072     case DM_DEV_CREATE:
5073     case DM_DEV_REMOVE:
5074     case DM_DEV_SUSPEND:
5075     case DM_DEV_STATUS:
5076     case DM_DEV_WAIT:
5077     case DM_TABLE_STATUS:
5078     case DM_TABLE_CLEAR:
5079     case DM_TABLE_DEPS:
5080     case DM_LIST_VERSIONS:
5081         /* no input data */
5082         break;
5083     case DM_DEV_RENAME:
5084     case DM_DEV_SET_GEOMETRY:
5085         /* data contains only strings */
5086         memcpy(host_data, argptr, guest_data_size);
5087         break;
5088     case DM_TARGET_MSG:
5089         memcpy(host_data, argptr, guest_data_size);
5090         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5091         break;
5092     case DM_TABLE_LOAD:
5093     {
5094         void *gspec = argptr;
5095         void *cur_data = host_data;
5096         const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5097         int spec_size = thunk_type_size(dm_arg_type, 0);
5098         int i;
5099 
5100         for (i = 0; i < host_dm->target_count; i++) {
5101             struct dm_target_spec *spec = cur_data;
5102             uint32_t next;
5103             int slen;
5104 
5105             thunk_convert(spec, gspec, dm_arg_type, THUNK_HOST);
5106             slen = strlen((char*)gspec + spec_size) + 1;
5107             next = spec->next;
5108             spec->next = sizeof(*spec) + slen;
5109             strcpy((char*)&spec[1], gspec + spec_size);
5110             gspec += next;
5111             cur_data += spec->next;
5112         }
5113         break;
5114     }
5115     default:
5116         ret = -TARGET_EINVAL;
5117         unlock_user(argptr, guest_data, 0);
5118         goto out;
5119     }
5120     unlock_user(argptr, guest_data, 0);
5121 
5122     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5123     if (!is_error(ret)) {
5124         guest_data = arg + host_dm->data_start;
5125         guest_data_size = host_dm->data_size - host_dm->data_start;
5126         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5127         switch (ie->host_cmd) {
5128         case DM_REMOVE_ALL:
5129         case DM_DEV_CREATE:
5130         case DM_DEV_REMOVE:
5131         case DM_DEV_RENAME:
5132         case DM_DEV_SUSPEND:
5133         case DM_DEV_STATUS:
5134         case DM_TABLE_LOAD:
5135         case DM_TABLE_CLEAR:
5136         case DM_TARGET_MSG:
5137         case DM_DEV_SET_GEOMETRY:
5138             /* no return data */
5139             break;
5140         case DM_LIST_DEVICES:
5141         {
5142             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5143             uint32_t remaining_data = guest_data_size;
5144             void *cur_data = argptr;
5145             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5146             int nl_size = 12; /* can't use thunk_size due to alignment */
5147 
5148             while (1) {
5149                 uint32_t next = nl->next;
5150                 if (next) {
5151                     nl->next = nl_size + (strlen(nl->name) + 1);
5152                 }
5153                 if (remaining_data < nl->next) {
5154                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5155                     break;
5156                 }
5157                 thunk_convert(cur_data, nl, dm_arg_type, THUNK_TARGET);
5158                 strcpy(cur_data + nl_size, nl->name);
5159                 cur_data += nl->next;
5160                 remaining_data -= nl->next;
5161                 if (!next) {
5162                     break;
5163                 }
5164                 nl = (void*)nl + next;
5165             }
5166             break;
5167         }
5168         case DM_DEV_WAIT:
5169         case DM_TABLE_STATUS:
5170         {
5171             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5172             void *cur_data = argptr;
5173             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5174             int spec_size = thunk_type_size(dm_arg_type, 0);
5175             int i;
5176 
5177             for (i = 0; i < host_dm->target_count; i++) {
5178                 uint32_t next = spec->next;
5179                 int slen = strlen((char*)&spec[1]) + 1;
5180                 spec->next = (cur_data - argptr) + spec_size + slen;
5181                 if (guest_data_size < spec->next) {
5182                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5183                     break;
5184                 }
5185                 thunk_convert(cur_data, spec, dm_arg_type, THUNK_TARGET);
5186                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5187                 cur_data = argptr + spec->next;
5188                 spec = (void*)host_dm + host_dm->data_start + next;
5189             }
5190             break;
5191         }
5192         case DM_TABLE_DEPS:
5193         {
5194             void *hdata = (void*)host_dm + host_dm->data_start;
5195             int count = *(uint32_t*)hdata;
5196             uint64_t *hdev = hdata + 8;
5197             uint64_t *gdev = argptr + 8;
5198             int i;
5199 
5200             *(uint32_t*)argptr = tswap32(count);
5201             for (i = 0; i < count; i++) {
5202                 *gdev = tswap64(*hdev);
5203                 gdev++;
5204                 hdev++;
5205             }
5206             break;
5207         }
5208         case DM_LIST_VERSIONS:
5209         {
5210             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5211             uint32_t remaining_data = guest_data_size;
5212             void *cur_data = argptr;
5213             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5214             int vers_size = thunk_type_size(dm_arg_type, 0);
5215 
5216             while (1) {
5217                 uint32_t next = vers->next;
5218                 if (next) {
5219                     vers->next = vers_size + (strlen(vers->name) + 1);
5220                 }
5221                 if (remaining_data < vers->next) {
5222                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5223                     break;
5224                 }
5225                 thunk_convert(cur_data, vers, dm_arg_type, THUNK_TARGET);
5226                 strcpy(cur_data + vers_size, vers->name);
5227                 cur_data += vers->next;
5228                 remaining_data -= vers->next;
5229                 if (!next) {
5230                     break;
5231                 }
5232                 vers = (void*)vers + next;
5233             }
5234             break;
5235         }
5236         default:
5237             unlock_user(argptr, guest_data, 0);
5238             ret = -TARGET_EINVAL;
5239             goto out;
5240         }
5241         unlock_user(argptr, guest_data, guest_data_size);
5242 
5243         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5244         if (!argptr) {
5245             ret = -TARGET_EFAULT;
5246             goto out;
5247         }
5248         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5249         unlock_user(argptr, arg, target_size);
5250     }
5251 out:
5252     g_free(big_buf);
5253     return ret;
5254 }
5255 
5256 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5257                                int cmd, abi_long arg)
5258 {
5259     void *argptr;
5260     int target_size;
5261     const argtype *arg_type = ie->arg_type;
5262     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5263     abi_long ret;
5264 
5265     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5266     struct blkpg_partition host_part;
5267 
5268     /* Read and convert blkpg */
5269     arg_type++;
5270     target_size = thunk_type_size(arg_type, 0);
5271     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5272     if (!argptr) {
5273         ret = -TARGET_EFAULT;
5274         goto out;
5275     }
5276     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5277     unlock_user(argptr, arg, 0);
5278 
5279     switch (host_blkpg->op) {
5280     case BLKPG_ADD_PARTITION:
5281     case BLKPG_DEL_PARTITION:
5282         /* payload is struct blkpg_partition */
5283         break;
5284     default:
5285         /* Unknown opcode */
5286         ret = -TARGET_EINVAL;
5287         goto out;
5288     }
5289 
5290     /* Read and convert blkpg->data */
5291     arg = (abi_long)(uintptr_t)host_blkpg->data;
5292     target_size = thunk_type_size(part_arg_type, 0);
5293     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5294     if (!argptr) {
5295         ret = -TARGET_EFAULT;
5296         goto out;
5297     }
5298     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5299     unlock_user(argptr, arg, 0);
5300 
5301     /* Swizzle the data pointer to our local copy and call! */
5302     host_blkpg->data = &host_part;
5303     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5304 
5305 out:
5306     return ret;
5307 }
5308 
5309 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5310                                 int fd, int cmd, abi_long arg)
5311 {
5312     const argtype *arg_type = ie->arg_type;
5313     const StructEntry *se;
5314     const argtype *field_types;
5315     const int *dst_offsets, *src_offsets;
5316     int target_size;
5317     void *argptr;
5318     abi_ulong *target_rt_dev_ptr = NULL;
5319     unsigned long *host_rt_dev_ptr = NULL;
5320     abi_long ret;
5321     int i;
5322 
5323     assert(ie->access == IOC_W);
5324     assert(*arg_type == TYPE_PTR);
5325     arg_type++;
5326     assert(*arg_type == TYPE_STRUCT);
5327     target_size = thunk_type_size(arg_type, 0);
5328     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5329     if (!argptr) {
5330         return -TARGET_EFAULT;
5331     }
5332     arg_type++;
5333     assert(*arg_type == (int)STRUCT_rtentry);
5334     se = struct_entries + *arg_type++;
5335     assert(se->convert[0] == NULL);
5336     /* convert struct here to be able to catch rt_dev string */
5337     field_types = se->field_types;
5338     dst_offsets = se->field_offsets[THUNK_HOST];
5339     src_offsets = se->field_offsets[THUNK_TARGET];
5340     for (i = 0; i < se->nb_fields; i++) {
5341         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5342             assert(*field_types == TYPE_PTRVOID);
5343             target_rt_dev_ptr = argptr + src_offsets[i];
5344             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5345             if (*target_rt_dev_ptr != 0) {
5346                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5347                                                   tswapal(*target_rt_dev_ptr));
5348                 if (!*host_rt_dev_ptr) {
5349                     unlock_user(argptr, arg, 0);
5350                     return -TARGET_EFAULT;
5351                 }
5352             } else {
5353                 *host_rt_dev_ptr = 0;
5354             }
5355             field_types++;
5356             continue;
5357         }
5358         field_types = thunk_convert(buf_temp + dst_offsets[i],
5359                                     argptr + src_offsets[i],
5360                                     field_types, THUNK_HOST);
5361     }
5362     unlock_user(argptr, arg, 0);
5363 
5364     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5365 
5366     assert(host_rt_dev_ptr != NULL);
5367     assert(target_rt_dev_ptr != NULL);
5368     if (*host_rt_dev_ptr != 0) {
5369         unlock_user((void *)*host_rt_dev_ptr,
5370                     *target_rt_dev_ptr, 0);
5371     }
5372     return ret;
5373 }
5374 
5375 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5376                                      int fd, int cmd, abi_long arg)
5377 {
5378     int sig = target_to_host_signal(arg);
5379     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5380 }
5381 
5382 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5383                                     int fd, int cmd, abi_long arg)
5384 {
5385     struct timeval tv;
5386     abi_long ret;
5387 
5388     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5389     if (is_error(ret)) {
5390         return ret;
5391     }
5392 
5393     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5394         if (copy_to_user_timeval(arg, &tv)) {
5395             return -TARGET_EFAULT;
5396         }
5397     } else {
5398         if (copy_to_user_timeval64(arg, &tv)) {
5399             return -TARGET_EFAULT;
5400         }
5401     }
5402 
5403     return ret;
5404 }
5405 
5406 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5407                                       int fd, int cmd, abi_long arg)
5408 {
5409     struct timespec ts;
5410     abi_long ret;
5411 
5412     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5413     if (is_error(ret)) {
5414         return ret;
5415     }
5416 
5417     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5418         if (host_to_target_timespec(arg, &ts)) {
5419             return -TARGET_EFAULT;
5420         }
5421     } else{
5422         if (host_to_target_timespec64(arg, &ts)) {
5423             return -TARGET_EFAULT;
5424         }
5425     }
5426 
5427     return ret;
5428 }
5429 
5430 #ifdef TIOCGPTPEER
5431 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5432                                      int fd, int cmd, abi_long arg)
5433 {
5434     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5435     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5436 }
5437 #endif
5438 
5439 #ifdef HAVE_DRM_H
5440 
5441 static void unlock_drm_version(struct drm_version *host_ver,
5442                                struct target_drm_version *target_ver,
5443                                bool copy)
5444 {
5445     unlock_user(host_ver->name, target_ver->name,
5446                                 copy ? host_ver->name_len : 0);
5447     unlock_user(host_ver->date, target_ver->date,
5448                                 copy ? host_ver->date_len : 0);
5449     unlock_user(host_ver->desc, target_ver->desc,
5450                                 copy ? host_ver->desc_len : 0);
5451 }
5452 
5453 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5454                                           struct target_drm_version *target_ver)
5455 {
5456     memset(host_ver, 0, sizeof(*host_ver));
5457 
5458     __get_user(host_ver->name_len, &target_ver->name_len);
5459     if (host_ver->name_len) {
5460         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5461                                    target_ver->name_len, 0);
5462         if (!host_ver->name) {
5463             return -EFAULT;
5464         }
5465     }
5466 
5467     __get_user(host_ver->date_len, &target_ver->date_len);
5468     if (host_ver->date_len) {
5469         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5470                                    target_ver->date_len, 0);
5471         if (!host_ver->date) {
5472             goto err;
5473         }
5474     }
5475 
5476     __get_user(host_ver->desc_len, &target_ver->desc_len);
5477     if (host_ver->desc_len) {
5478         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5479                                    target_ver->desc_len, 0);
5480         if (!host_ver->desc) {
5481             goto err;
5482         }
5483     }
5484 
5485     return 0;
5486 err:
5487     unlock_drm_version(host_ver, target_ver, false);
5488     return -EFAULT;
5489 }
5490 
5491 static inline void host_to_target_drmversion(
5492                                           struct target_drm_version *target_ver,
5493                                           struct drm_version *host_ver)
5494 {
5495     __put_user(host_ver->version_major, &target_ver->version_major);
5496     __put_user(host_ver->version_minor, &target_ver->version_minor);
5497     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5498     __put_user(host_ver->name_len, &target_ver->name_len);
5499     __put_user(host_ver->date_len, &target_ver->date_len);
5500     __put_user(host_ver->desc_len, &target_ver->desc_len);
5501     unlock_drm_version(host_ver, target_ver, true);
5502 }
5503 
5504 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5505                              int fd, int cmd, abi_long arg)
5506 {
5507     struct drm_version *ver;
5508     struct target_drm_version *target_ver;
5509     abi_long ret;
5510 
5511     switch (ie->host_cmd) {
5512     case DRM_IOCTL_VERSION:
5513         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5514             return -TARGET_EFAULT;
5515         }
5516         ver = (struct drm_version *)buf_temp;
5517         ret = target_to_host_drmversion(ver, target_ver);
5518         if (!is_error(ret)) {
5519             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5520             if (is_error(ret)) {
5521                 unlock_drm_version(ver, target_ver, false);
5522             } else {
5523                 host_to_target_drmversion(target_ver, ver);
5524             }
5525         }
5526         unlock_user_struct(target_ver, arg, 0);
5527         return ret;
5528     }
5529     return -TARGET_ENOSYS;
5530 }
5531 
5532 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5533                                            struct drm_i915_getparam *gparam,
5534                                            int fd, abi_long arg)
5535 {
5536     abi_long ret;
5537     int value;
5538     struct target_drm_i915_getparam *target_gparam;
5539 
5540     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5541         return -TARGET_EFAULT;
5542     }
5543 
5544     __get_user(gparam->param, &target_gparam->param);
5545     gparam->value = &value;
5546     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5547     put_user_s32(value, target_gparam->value);
5548 
5549     unlock_user_struct(target_gparam, arg, 0);
5550     return ret;
5551 }
5552 
5553 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5554                                   int fd, int cmd, abi_long arg)
5555 {
5556     switch (ie->host_cmd) {
5557     case DRM_IOCTL_I915_GETPARAM:
5558         return do_ioctl_drm_i915_getparam(ie,
5559                                           (struct drm_i915_getparam *)buf_temp,
5560                                           fd, arg);
5561     default:
5562         return -TARGET_ENOSYS;
5563     }
5564 }
5565 
5566 #endif
5567 
5568 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5569                                         int fd, int cmd, abi_long arg)
5570 {
5571     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5572     struct tun_filter *target_filter;
5573     char *target_addr;
5574 
5575     assert(ie->access == IOC_W);
5576 
5577     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5578     if (!target_filter) {
5579         return -TARGET_EFAULT;
5580     }
5581     filter->flags = tswap16(target_filter->flags);
5582     filter->count = tswap16(target_filter->count);
5583     unlock_user(target_filter, arg, 0);
5584 
5585     if (filter->count) {
5586         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5587             MAX_STRUCT_SIZE) {
5588             return -TARGET_EFAULT;
5589         }
5590 
5591         target_addr = lock_user(VERIFY_READ,
5592                                 arg + offsetof(struct tun_filter, addr),
5593                                 filter->count * ETH_ALEN, 1);
5594         if (!target_addr) {
5595             return -TARGET_EFAULT;
5596         }
5597         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5598         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5599     }
5600 
5601     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5602 }
5603 
5604 IOCTLEntry ioctl_entries[] = {
5605 #define IOCTL(cmd, access, ...) \
5606     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5607 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5608     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5609 #define IOCTL_IGNORE(cmd) \
5610     { TARGET_ ## cmd, 0, #cmd },
5611 #include "ioctls.h"
5612     { 0, 0, },
5613 };
5614 
5615 /* ??? Implement proper locking for ioctls.  */
5616 /* do_ioctl() Must return target values and target errnos. */
5617 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5618 {
5619     const IOCTLEntry *ie;
5620     const argtype *arg_type;
5621     abi_long ret;
5622     uint8_t buf_temp[MAX_STRUCT_SIZE];
5623     int target_size;
5624     void *argptr;
5625 
5626     ie = ioctl_entries;
5627     for(;;) {
5628         if (ie->target_cmd == 0) {
5629             qemu_log_mask(
5630                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5631             return -TARGET_ENOTTY;
5632         }
5633         if (ie->target_cmd == cmd)
5634             break;
5635         ie++;
5636     }
5637     arg_type = ie->arg_type;
5638     if (ie->do_ioctl) {
5639         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5640     } else if (!ie->host_cmd) {
5641         /* Some architectures define BSD ioctls in their headers
5642            that are not implemented in Linux.  */
5643         return -TARGET_ENOTTY;
5644     }
5645 
5646     switch(arg_type[0]) {
5647     case TYPE_NULL:
5648         /* no argument */
5649         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5650         break;
5651     case TYPE_PTRVOID:
5652     case TYPE_INT:
5653     case TYPE_LONG:
5654     case TYPE_ULONG:
5655         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5656         break;
5657     case TYPE_PTR:
5658         arg_type++;
5659         target_size = thunk_type_size(arg_type, 0);
5660         switch(ie->access) {
5661         case IOC_R:
5662             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5663             if (!is_error(ret)) {
5664                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5665                 if (!argptr)
5666                     return -TARGET_EFAULT;
5667                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5668                 unlock_user(argptr, arg, target_size);
5669             }
5670             break;
5671         case IOC_W:
5672             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5673             if (!argptr)
5674                 return -TARGET_EFAULT;
5675             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5676             unlock_user(argptr, arg, 0);
5677             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5678             break;
5679         default:
5680         case IOC_RW:
5681             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5682             if (!argptr)
5683                 return -TARGET_EFAULT;
5684             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5685             unlock_user(argptr, arg, 0);
5686             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5687             if (!is_error(ret)) {
5688                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5689                 if (!argptr)
5690                     return -TARGET_EFAULT;
5691                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5692                 unlock_user(argptr, arg, target_size);
5693             }
5694             break;
5695         }
5696         break;
5697     default:
5698         qemu_log_mask(LOG_UNIMP,
5699                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5700                       (long)cmd, arg_type[0]);
5701         ret = -TARGET_ENOTTY;
5702         break;
5703     }
5704     return ret;
5705 }
5706 
5707 static const bitmask_transtbl iflag_tbl[] = {
5708         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5709         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5710         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5711         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5712         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5713         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5714         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5715         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5716         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5717         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5718         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5719         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5720         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5721         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5722         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5723 };
5724 
5725 static const bitmask_transtbl oflag_tbl[] = {
5726 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5727 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5728 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5729 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5730 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5731 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5732 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5733 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5734 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5735 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5736 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5737 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5738 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5739 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5740 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5741 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5742 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5743 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5744 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5745 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5746 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5747 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5748 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5749 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5750 };
5751 
5752 static const bitmask_transtbl cflag_tbl[] = {
5753 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5754 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5755 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5756 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5757 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5758 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5759 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5760 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5761 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5762 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5763 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5764 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5765 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5766 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5767 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5768 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5769 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5770 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5771 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5772 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5773 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5774 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5775 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5776 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5777 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5778 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5779 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5780 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5781 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5782 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5783 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5784 };
5785 
5786 static const bitmask_transtbl lflag_tbl[] = {
5787   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5788   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5789   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5790   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5791   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5792   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5793   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5794   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5795   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5796   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5797   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5798   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5799   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5800   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5801   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5802   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5803 };
5804 
5805 static void target_to_host_termios (void *dst, const void *src)
5806 {
5807     struct host_termios *host = dst;
5808     const struct target_termios *target = src;
5809 
5810     host->c_iflag =
5811         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5812     host->c_oflag =
5813         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5814     host->c_cflag =
5815         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5816     host->c_lflag =
5817         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5818     host->c_line = target->c_line;
5819 
5820     memset(host->c_cc, 0, sizeof(host->c_cc));
5821     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5822     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5823     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5824     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5825     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5826     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5827     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5828     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5829     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5830     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5831     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5832     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5833     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5834     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5835     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5836     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5837     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5838 }
5839 
5840 static void host_to_target_termios (void *dst, const void *src)
5841 {
5842     struct target_termios *target = dst;
5843     const struct host_termios *host = src;
5844 
5845     target->c_iflag =
5846         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5847     target->c_oflag =
5848         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5849     target->c_cflag =
5850         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5851     target->c_lflag =
5852         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5853     target->c_line = host->c_line;
5854 
5855     memset(target->c_cc, 0, sizeof(target->c_cc));
5856     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5857     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5858     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5859     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5860     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5861     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5862     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5863     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5864     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5865     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5866     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5867     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5868     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5869     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5870     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5871     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5872     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5873 }
5874 
5875 static const StructEntry struct_termios_def = {
5876     .convert = { host_to_target_termios, target_to_host_termios },
5877     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5878     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5879     .print = print_termios,
5880 };
5881 
5882 /* If the host does not provide these bits, they may be safely discarded. */
5883 #ifndef MAP_SYNC
5884 #define MAP_SYNC 0
5885 #endif
5886 #ifndef MAP_UNINITIALIZED
5887 #define MAP_UNINITIALIZED 0
5888 #endif
5889 
5890 static const bitmask_transtbl mmap_flags_tbl[] = {
5891     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5892     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5893       MAP_ANONYMOUS, MAP_ANONYMOUS },
5894     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5895       MAP_GROWSDOWN, MAP_GROWSDOWN },
5896     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5897       MAP_DENYWRITE, MAP_DENYWRITE },
5898     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5899       MAP_EXECUTABLE, MAP_EXECUTABLE },
5900     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5901     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5902       MAP_NORESERVE, MAP_NORESERVE },
5903     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5904     /* MAP_STACK had been ignored by the kernel for quite some time.
5905        Recognize it for the target insofar as we do not want to pass
5906        it through to the host.  */
5907     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5908     { TARGET_MAP_NONBLOCK, TARGET_MAP_NONBLOCK, MAP_NONBLOCK, MAP_NONBLOCK },
5909     { TARGET_MAP_POPULATE, TARGET_MAP_POPULATE, MAP_POPULATE, MAP_POPULATE },
5910     { TARGET_MAP_FIXED_NOREPLACE, TARGET_MAP_FIXED_NOREPLACE,
5911       MAP_FIXED_NOREPLACE, MAP_FIXED_NOREPLACE },
5912     { TARGET_MAP_UNINITIALIZED, TARGET_MAP_UNINITIALIZED,
5913       MAP_UNINITIALIZED, MAP_UNINITIALIZED },
5914 };
5915 
5916 /*
5917  * Arrange for legacy / undefined architecture specific flags to be
5918  * ignored by mmap handling code.
5919  */
5920 #ifndef TARGET_MAP_32BIT
5921 #define TARGET_MAP_32BIT 0
5922 #endif
5923 #ifndef TARGET_MAP_HUGE_2MB
5924 #define TARGET_MAP_HUGE_2MB 0
5925 #endif
5926 #ifndef TARGET_MAP_HUGE_1GB
5927 #define TARGET_MAP_HUGE_1GB 0
5928 #endif
5929 
5930 static abi_long do_mmap(abi_ulong addr, abi_ulong len, int prot,
5931                         int target_flags, int fd, off_t offset)
5932 {
5933     /*
5934      * The historical set of flags that all mmap types implicitly support.
5935      */
5936     enum {
5937         TARGET_LEGACY_MAP_MASK = TARGET_MAP_SHARED
5938                                | TARGET_MAP_PRIVATE
5939                                | TARGET_MAP_FIXED
5940                                | TARGET_MAP_ANONYMOUS
5941                                | TARGET_MAP_DENYWRITE
5942                                | TARGET_MAP_EXECUTABLE
5943                                | TARGET_MAP_UNINITIALIZED
5944                                | TARGET_MAP_GROWSDOWN
5945                                | TARGET_MAP_LOCKED
5946                                | TARGET_MAP_NORESERVE
5947                                | TARGET_MAP_POPULATE
5948                                | TARGET_MAP_NONBLOCK
5949                                | TARGET_MAP_STACK
5950                                | TARGET_MAP_HUGETLB
5951                                | TARGET_MAP_32BIT
5952                                | TARGET_MAP_HUGE_2MB
5953                                | TARGET_MAP_HUGE_1GB
5954     };
5955     int host_flags;
5956 
5957     switch (target_flags & TARGET_MAP_TYPE) {
5958     case TARGET_MAP_PRIVATE:
5959         host_flags = MAP_PRIVATE;
5960         break;
5961     case TARGET_MAP_SHARED:
5962         host_flags = MAP_SHARED;
5963         break;
5964     case TARGET_MAP_SHARED_VALIDATE:
5965         /*
5966          * MAP_SYNC is only supported for MAP_SHARED_VALIDATE, and is
5967          * therefore omitted from mmap_flags_tbl and TARGET_LEGACY_MAP_MASK.
5968          */
5969         if (target_flags & ~(TARGET_LEGACY_MAP_MASK | TARGET_MAP_SYNC)) {
5970             return -TARGET_EOPNOTSUPP;
5971         }
5972         host_flags = MAP_SHARED_VALIDATE;
5973         if (target_flags & TARGET_MAP_SYNC) {
5974             host_flags |= MAP_SYNC;
5975         }
5976         break;
5977     default:
5978         return -TARGET_EINVAL;
5979     }
5980     host_flags |= target_to_host_bitmask(target_flags, mmap_flags_tbl);
5981 
5982     return get_errno(target_mmap(addr, len, prot, host_flags, fd, offset));
5983 }
5984 
5985 /*
5986  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5987  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5988  */
5989 #if defined(TARGET_I386)
5990 
5991 /* NOTE: there is really one LDT for all the threads */
5992 static uint8_t *ldt_table;
5993 
5994 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5995 {
5996     int size;
5997     void *p;
5998 
5999     if (!ldt_table)
6000         return 0;
6001     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6002     if (size > bytecount)
6003         size = bytecount;
6004     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6005     if (!p)
6006         return -TARGET_EFAULT;
6007     /* ??? Should this by byteswapped?  */
6008     memcpy(p, ldt_table, size);
6009     unlock_user(p, ptr, size);
6010     return size;
6011 }
6012 
6013 /* XXX: add locking support */
6014 static abi_long write_ldt(CPUX86State *env,
6015                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6016 {
6017     struct target_modify_ldt_ldt_s ldt_info;
6018     struct target_modify_ldt_ldt_s *target_ldt_info;
6019     int seg_32bit, contents, read_exec_only, limit_in_pages;
6020     int seg_not_present, useable, lm;
6021     uint32_t *lp, entry_1, entry_2;
6022 
6023     if (bytecount != sizeof(ldt_info))
6024         return -TARGET_EINVAL;
6025     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6026         return -TARGET_EFAULT;
6027     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6028     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6029     ldt_info.limit = tswap32(target_ldt_info->limit);
6030     ldt_info.flags = tswap32(target_ldt_info->flags);
6031     unlock_user_struct(target_ldt_info, ptr, 0);
6032 
6033     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6034         return -TARGET_EINVAL;
6035     seg_32bit = ldt_info.flags & 1;
6036     contents = (ldt_info.flags >> 1) & 3;
6037     read_exec_only = (ldt_info.flags >> 3) & 1;
6038     limit_in_pages = (ldt_info.flags >> 4) & 1;
6039     seg_not_present = (ldt_info.flags >> 5) & 1;
6040     useable = (ldt_info.flags >> 6) & 1;
6041 #ifdef TARGET_ABI32
6042     lm = 0;
6043 #else
6044     lm = (ldt_info.flags >> 7) & 1;
6045 #endif
6046     if (contents == 3) {
6047         if (oldmode)
6048             return -TARGET_EINVAL;
6049         if (seg_not_present == 0)
6050             return -TARGET_EINVAL;
6051     }
6052     /* allocate the LDT */
6053     if (!ldt_table) {
6054         env->ldt.base = target_mmap(0,
6055                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6056                                     PROT_READ|PROT_WRITE,
6057                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6058         if (env->ldt.base == -1)
6059             return -TARGET_ENOMEM;
6060         memset(g2h_untagged(env->ldt.base), 0,
6061                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6062         env->ldt.limit = 0xffff;
6063         ldt_table = g2h_untagged(env->ldt.base);
6064     }
6065 
6066     /* NOTE: same code as Linux kernel */
6067     /* Allow LDTs to be cleared by the user. */
6068     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6069         if (oldmode ||
6070             (contents == 0		&&
6071              read_exec_only == 1	&&
6072              seg_32bit == 0		&&
6073              limit_in_pages == 0	&&
6074              seg_not_present == 1	&&
6075              useable == 0 )) {
6076             entry_1 = 0;
6077             entry_2 = 0;
6078             goto install;
6079         }
6080     }
6081 
6082     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6083         (ldt_info.limit & 0x0ffff);
6084     entry_2 = (ldt_info.base_addr & 0xff000000) |
6085         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6086         (ldt_info.limit & 0xf0000) |
6087         ((read_exec_only ^ 1) << 9) |
6088         (contents << 10) |
6089         ((seg_not_present ^ 1) << 15) |
6090         (seg_32bit << 22) |
6091         (limit_in_pages << 23) |
6092         (lm << 21) |
6093         0x7000;
6094     if (!oldmode)
6095         entry_2 |= (useable << 20);
6096 
6097     /* Install the new entry ...  */
6098 install:
6099     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6100     lp[0] = tswap32(entry_1);
6101     lp[1] = tswap32(entry_2);
6102     return 0;
6103 }
6104 
6105 /* specific and weird i386 syscalls */
6106 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6107                               unsigned long bytecount)
6108 {
6109     abi_long ret;
6110 
6111     switch (func) {
6112     case 0:
6113         ret = read_ldt(ptr, bytecount);
6114         break;
6115     case 1:
6116         ret = write_ldt(env, ptr, bytecount, 1);
6117         break;
6118     case 0x11:
6119         ret = write_ldt(env, ptr, bytecount, 0);
6120         break;
6121     default:
6122         ret = -TARGET_ENOSYS;
6123         break;
6124     }
6125     return ret;
6126 }
6127 
6128 #if defined(TARGET_ABI32)
6129 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6130 {
6131     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6132     struct target_modify_ldt_ldt_s ldt_info;
6133     struct target_modify_ldt_ldt_s *target_ldt_info;
6134     int seg_32bit, contents, read_exec_only, limit_in_pages;
6135     int seg_not_present, useable, lm;
6136     uint32_t *lp, entry_1, entry_2;
6137     int i;
6138 
6139     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6140     if (!target_ldt_info)
6141         return -TARGET_EFAULT;
6142     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6143     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6144     ldt_info.limit = tswap32(target_ldt_info->limit);
6145     ldt_info.flags = tswap32(target_ldt_info->flags);
6146     if (ldt_info.entry_number == -1) {
6147         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6148             if (gdt_table[i] == 0) {
6149                 ldt_info.entry_number = i;
6150                 target_ldt_info->entry_number = tswap32(i);
6151                 break;
6152             }
6153         }
6154     }
6155     unlock_user_struct(target_ldt_info, ptr, 1);
6156 
6157     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6158         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6159            return -TARGET_EINVAL;
6160     seg_32bit = ldt_info.flags & 1;
6161     contents = (ldt_info.flags >> 1) & 3;
6162     read_exec_only = (ldt_info.flags >> 3) & 1;
6163     limit_in_pages = (ldt_info.flags >> 4) & 1;
6164     seg_not_present = (ldt_info.flags >> 5) & 1;
6165     useable = (ldt_info.flags >> 6) & 1;
6166 #ifdef TARGET_ABI32
6167     lm = 0;
6168 #else
6169     lm = (ldt_info.flags >> 7) & 1;
6170 #endif
6171 
6172     if (contents == 3) {
6173         if (seg_not_present == 0)
6174             return -TARGET_EINVAL;
6175     }
6176 
6177     /* NOTE: same code as Linux kernel */
6178     /* Allow LDTs to be cleared by the user. */
6179     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6180         if ((contents == 0             &&
6181              read_exec_only == 1       &&
6182              seg_32bit == 0            &&
6183              limit_in_pages == 0       &&
6184              seg_not_present == 1      &&
6185              useable == 0 )) {
6186             entry_1 = 0;
6187             entry_2 = 0;
6188             goto install;
6189         }
6190     }
6191 
6192     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6193         (ldt_info.limit & 0x0ffff);
6194     entry_2 = (ldt_info.base_addr & 0xff000000) |
6195         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6196         (ldt_info.limit & 0xf0000) |
6197         ((read_exec_only ^ 1) << 9) |
6198         (contents << 10) |
6199         ((seg_not_present ^ 1) << 15) |
6200         (seg_32bit << 22) |
6201         (limit_in_pages << 23) |
6202         (useable << 20) |
6203         (lm << 21) |
6204         0x7000;
6205 
6206     /* Install the new entry ...  */
6207 install:
6208     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6209     lp[0] = tswap32(entry_1);
6210     lp[1] = tswap32(entry_2);
6211     return 0;
6212 }
6213 
6214 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6215 {
6216     struct target_modify_ldt_ldt_s *target_ldt_info;
6217     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6218     uint32_t base_addr, limit, flags;
6219     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6220     int seg_not_present, useable, lm;
6221     uint32_t *lp, entry_1, entry_2;
6222 
6223     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6224     if (!target_ldt_info)
6225         return -TARGET_EFAULT;
6226     idx = tswap32(target_ldt_info->entry_number);
6227     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6228         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6229         unlock_user_struct(target_ldt_info, ptr, 1);
6230         return -TARGET_EINVAL;
6231     }
6232     lp = (uint32_t *)(gdt_table + idx);
6233     entry_1 = tswap32(lp[0]);
6234     entry_2 = tswap32(lp[1]);
6235 
6236     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6237     contents = (entry_2 >> 10) & 3;
6238     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6239     seg_32bit = (entry_2 >> 22) & 1;
6240     limit_in_pages = (entry_2 >> 23) & 1;
6241     useable = (entry_2 >> 20) & 1;
6242 #ifdef TARGET_ABI32
6243     lm = 0;
6244 #else
6245     lm = (entry_2 >> 21) & 1;
6246 #endif
6247     flags = (seg_32bit << 0) | (contents << 1) |
6248         (read_exec_only << 3) | (limit_in_pages << 4) |
6249         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6250     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6251     base_addr = (entry_1 >> 16) |
6252         (entry_2 & 0xff000000) |
6253         ((entry_2 & 0xff) << 16);
6254     target_ldt_info->base_addr = tswapal(base_addr);
6255     target_ldt_info->limit = tswap32(limit);
6256     target_ldt_info->flags = tswap32(flags);
6257     unlock_user_struct(target_ldt_info, ptr, 1);
6258     return 0;
6259 }
6260 
6261 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6262 {
6263     return -TARGET_ENOSYS;
6264 }
6265 #else
6266 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6267 {
6268     abi_long ret = 0;
6269     abi_ulong val;
6270     int idx;
6271 
6272     switch(code) {
6273     case TARGET_ARCH_SET_GS:
6274     case TARGET_ARCH_SET_FS:
6275         if (code == TARGET_ARCH_SET_GS)
6276             idx = R_GS;
6277         else
6278             idx = R_FS;
6279         cpu_x86_load_seg(env, idx, 0);
6280         env->segs[idx].base = addr;
6281         break;
6282     case TARGET_ARCH_GET_GS:
6283     case TARGET_ARCH_GET_FS:
6284         if (code == TARGET_ARCH_GET_GS)
6285             idx = R_GS;
6286         else
6287             idx = R_FS;
6288         val = env->segs[idx].base;
6289         if (put_user(val, addr, abi_ulong))
6290             ret = -TARGET_EFAULT;
6291         break;
6292     default:
6293         ret = -TARGET_EINVAL;
6294         break;
6295     }
6296     return ret;
6297 }
6298 #endif /* defined(TARGET_ABI32 */
6299 #endif /* defined(TARGET_I386) */
6300 
6301 /*
6302  * These constants are generic.  Supply any that are missing from the host.
6303  */
6304 #ifndef PR_SET_NAME
6305 # define PR_SET_NAME    15
6306 # define PR_GET_NAME    16
6307 #endif
6308 #ifndef PR_SET_FP_MODE
6309 # define PR_SET_FP_MODE 45
6310 # define PR_GET_FP_MODE 46
6311 # define PR_FP_MODE_FR   (1 << 0)
6312 # define PR_FP_MODE_FRE  (1 << 1)
6313 #endif
6314 #ifndef PR_SVE_SET_VL
6315 # define PR_SVE_SET_VL  50
6316 # define PR_SVE_GET_VL  51
6317 # define PR_SVE_VL_LEN_MASK  0xffff
6318 # define PR_SVE_VL_INHERIT   (1 << 17)
6319 #endif
6320 #ifndef PR_PAC_RESET_KEYS
6321 # define PR_PAC_RESET_KEYS  54
6322 # define PR_PAC_APIAKEY   (1 << 0)
6323 # define PR_PAC_APIBKEY   (1 << 1)
6324 # define PR_PAC_APDAKEY   (1 << 2)
6325 # define PR_PAC_APDBKEY   (1 << 3)
6326 # define PR_PAC_APGAKEY   (1 << 4)
6327 #endif
6328 #ifndef PR_SET_TAGGED_ADDR_CTRL
6329 # define PR_SET_TAGGED_ADDR_CTRL 55
6330 # define PR_GET_TAGGED_ADDR_CTRL 56
6331 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6332 #endif
6333 #ifndef PR_SET_IO_FLUSHER
6334 # define PR_SET_IO_FLUSHER 57
6335 # define PR_GET_IO_FLUSHER 58
6336 #endif
6337 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6338 # define PR_SET_SYSCALL_USER_DISPATCH 59
6339 #endif
6340 #ifndef PR_SME_SET_VL
6341 # define PR_SME_SET_VL  63
6342 # define PR_SME_GET_VL  64
6343 # define PR_SME_VL_LEN_MASK  0xffff
6344 # define PR_SME_VL_INHERIT   (1 << 17)
6345 #endif
6346 
6347 #include "target_prctl.h"
6348 
6349 static abi_long do_prctl_inval0(CPUArchState *env)
6350 {
6351     return -TARGET_EINVAL;
6352 }
6353 
6354 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6355 {
6356     return -TARGET_EINVAL;
6357 }
6358 
6359 #ifndef do_prctl_get_fp_mode
6360 #define do_prctl_get_fp_mode do_prctl_inval0
6361 #endif
6362 #ifndef do_prctl_set_fp_mode
6363 #define do_prctl_set_fp_mode do_prctl_inval1
6364 #endif
6365 #ifndef do_prctl_sve_get_vl
6366 #define do_prctl_sve_get_vl do_prctl_inval0
6367 #endif
6368 #ifndef do_prctl_sve_set_vl
6369 #define do_prctl_sve_set_vl do_prctl_inval1
6370 #endif
6371 #ifndef do_prctl_reset_keys
6372 #define do_prctl_reset_keys do_prctl_inval1
6373 #endif
6374 #ifndef do_prctl_set_tagged_addr_ctrl
6375 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6376 #endif
6377 #ifndef do_prctl_get_tagged_addr_ctrl
6378 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6379 #endif
6380 #ifndef do_prctl_get_unalign
6381 #define do_prctl_get_unalign do_prctl_inval1
6382 #endif
6383 #ifndef do_prctl_set_unalign
6384 #define do_prctl_set_unalign do_prctl_inval1
6385 #endif
6386 #ifndef do_prctl_sme_get_vl
6387 #define do_prctl_sme_get_vl do_prctl_inval0
6388 #endif
6389 #ifndef do_prctl_sme_set_vl
6390 #define do_prctl_sme_set_vl do_prctl_inval1
6391 #endif
6392 
6393 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6394                          abi_long arg3, abi_long arg4, abi_long arg5)
6395 {
6396     abi_long ret;
6397 
6398     switch (option) {
6399     case PR_GET_PDEATHSIG:
6400         {
6401             int deathsig;
6402             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6403                                   arg3, arg4, arg5));
6404             if (!is_error(ret) &&
6405                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6406                 return -TARGET_EFAULT;
6407             }
6408             return ret;
6409         }
6410     case PR_SET_PDEATHSIG:
6411         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6412                                arg3, arg4, arg5));
6413     case PR_GET_NAME:
6414         {
6415             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6416             if (!name) {
6417                 return -TARGET_EFAULT;
6418             }
6419             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6420                                   arg3, arg4, arg5));
6421             unlock_user(name, arg2, 16);
6422             return ret;
6423         }
6424     case PR_SET_NAME:
6425         {
6426             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6427             if (!name) {
6428                 return -TARGET_EFAULT;
6429             }
6430             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6431                                   arg3, arg4, arg5));
6432             unlock_user(name, arg2, 0);
6433             return ret;
6434         }
6435     case PR_GET_FP_MODE:
6436         return do_prctl_get_fp_mode(env);
6437     case PR_SET_FP_MODE:
6438         return do_prctl_set_fp_mode(env, arg2);
6439     case PR_SVE_GET_VL:
6440         return do_prctl_sve_get_vl(env);
6441     case PR_SVE_SET_VL:
6442         return do_prctl_sve_set_vl(env, arg2);
6443     case PR_SME_GET_VL:
6444         return do_prctl_sme_get_vl(env);
6445     case PR_SME_SET_VL:
6446         return do_prctl_sme_set_vl(env, arg2);
6447     case PR_PAC_RESET_KEYS:
6448         if (arg3 || arg4 || arg5) {
6449             return -TARGET_EINVAL;
6450         }
6451         return do_prctl_reset_keys(env, arg2);
6452     case PR_SET_TAGGED_ADDR_CTRL:
6453         if (arg3 || arg4 || arg5) {
6454             return -TARGET_EINVAL;
6455         }
6456         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6457     case PR_GET_TAGGED_ADDR_CTRL:
6458         if (arg2 || arg3 || arg4 || arg5) {
6459             return -TARGET_EINVAL;
6460         }
6461         return do_prctl_get_tagged_addr_ctrl(env);
6462 
6463     case PR_GET_UNALIGN:
6464         return do_prctl_get_unalign(env, arg2);
6465     case PR_SET_UNALIGN:
6466         return do_prctl_set_unalign(env, arg2);
6467 
6468     case PR_CAP_AMBIENT:
6469     case PR_CAPBSET_READ:
6470     case PR_CAPBSET_DROP:
6471     case PR_GET_DUMPABLE:
6472     case PR_SET_DUMPABLE:
6473     case PR_GET_KEEPCAPS:
6474     case PR_SET_KEEPCAPS:
6475     case PR_GET_SECUREBITS:
6476     case PR_SET_SECUREBITS:
6477     case PR_GET_TIMING:
6478     case PR_SET_TIMING:
6479     case PR_GET_TIMERSLACK:
6480     case PR_SET_TIMERSLACK:
6481     case PR_MCE_KILL:
6482     case PR_MCE_KILL_GET:
6483     case PR_GET_NO_NEW_PRIVS:
6484     case PR_SET_NO_NEW_PRIVS:
6485     case PR_GET_IO_FLUSHER:
6486     case PR_SET_IO_FLUSHER:
6487     case PR_SET_CHILD_SUBREAPER:
6488     case PR_GET_SPECULATION_CTRL:
6489     case PR_SET_SPECULATION_CTRL:
6490         /* Some prctl options have no pointer arguments and we can pass on. */
6491         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6492 
6493     case PR_GET_CHILD_SUBREAPER:
6494         {
6495             int val;
6496             ret = get_errno(prctl(PR_GET_CHILD_SUBREAPER, &val,
6497                                   arg3, arg4, arg5));
6498             if (!is_error(ret) && put_user_s32(val, arg2)) {
6499                 return -TARGET_EFAULT;
6500             }
6501             return ret;
6502         }
6503 
6504     case PR_GET_TID_ADDRESS:
6505         {
6506             TaskState *ts = get_task_state(env_cpu(env));
6507             return put_user_ual(ts->child_tidptr, arg2);
6508         }
6509 
6510     case PR_GET_FPEXC:
6511     case PR_SET_FPEXC:
6512         /* Was used for SPE on PowerPC. */
6513         return -TARGET_EINVAL;
6514 
6515     case PR_GET_ENDIAN:
6516     case PR_SET_ENDIAN:
6517     case PR_GET_FPEMU:
6518     case PR_SET_FPEMU:
6519     case PR_SET_MM:
6520     case PR_GET_SECCOMP:
6521     case PR_SET_SECCOMP:
6522     case PR_SET_SYSCALL_USER_DISPATCH:
6523     case PR_GET_THP_DISABLE:
6524     case PR_SET_THP_DISABLE:
6525     case PR_GET_TSC:
6526     case PR_SET_TSC:
6527         /* Disable to prevent the target disabling stuff we need. */
6528         return -TARGET_EINVAL;
6529 
6530     default:
6531         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6532                       option);
6533         return -TARGET_EINVAL;
6534     }
6535 }
6536 
6537 #define NEW_STACK_SIZE 0x40000
6538 
6539 
6540 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6541 typedef struct {
6542     CPUArchState *env;
6543     pthread_mutex_t mutex;
6544     pthread_cond_t cond;
6545     pthread_t thread;
6546     uint32_t tid;
6547     abi_ulong child_tidptr;
6548     abi_ulong parent_tidptr;
6549     sigset_t sigmask;
6550 } new_thread_info;
6551 
6552 static void *clone_func(void *arg)
6553 {
6554     new_thread_info *info = arg;
6555     CPUArchState *env;
6556     CPUState *cpu;
6557     TaskState *ts;
6558 
6559     rcu_register_thread();
6560     tcg_register_thread();
6561     env = info->env;
6562     cpu = env_cpu(env);
6563     thread_cpu = cpu;
6564     ts = get_task_state(cpu);
6565     info->tid = sys_gettid();
6566     task_settid(ts);
6567     if (info->child_tidptr)
6568         put_user_u32(info->tid, info->child_tidptr);
6569     if (info->parent_tidptr)
6570         put_user_u32(info->tid, info->parent_tidptr);
6571     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6572     /* Enable signals.  */
6573     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6574     /* Signal to the parent that we're ready.  */
6575     pthread_mutex_lock(&info->mutex);
6576     pthread_cond_broadcast(&info->cond);
6577     pthread_mutex_unlock(&info->mutex);
6578     /* Wait until the parent has finished initializing the tls state.  */
6579     pthread_mutex_lock(&clone_lock);
6580     pthread_mutex_unlock(&clone_lock);
6581     cpu_loop(env);
6582     /* never exits */
6583     return NULL;
6584 }
6585 
6586 /* do_fork() Must return host values and target errnos (unlike most
6587    do_*() functions). */
6588 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6589                    abi_ulong parent_tidptr, target_ulong newtls,
6590                    abi_ulong child_tidptr)
6591 {
6592     CPUState *cpu = env_cpu(env);
6593     int ret;
6594     TaskState *ts;
6595     CPUState *new_cpu;
6596     CPUArchState *new_env;
6597     sigset_t sigmask;
6598 
6599     flags &= ~CLONE_IGNORED_FLAGS;
6600 
6601     /* Emulate vfork() with fork() */
6602     if (flags & CLONE_VFORK)
6603         flags &= ~(CLONE_VFORK | CLONE_VM);
6604 
6605     if (flags & CLONE_VM) {
6606         TaskState *parent_ts = get_task_state(cpu);
6607         new_thread_info info;
6608         pthread_attr_t attr;
6609 
6610         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6611             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6612             return -TARGET_EINVAL;
6613         }
6614 
6615         ts = g_new0(TaskState, 1);
6616         init_task_state(ts);
6617 
6618         /* Grab a mutex so that thread setup appears atomic.  */
6619         pthread_mutex_lock(&clone_lock);
6620 
6621         /*
6622          * If this is our first additional thread, we need to ensure we
6623          * generate code for parallel execution and flush old translations.
6624          * Do this now so that the copy gets CF_PARALLEL too.
6625          */
6626         if (!tcg_cflags_has(cpu, CF_PARALLEL)) {
6627             tcg_cflags_set(cpu, CF_PARALLEL);
6628             tb_flush(cpu);
6629         }
6630 
6631         /* we create a new CPU instance. */
6632         new_env = cpu_copy(env);
6633         /* Init regs that differ from the parent.  */
6634         cpu_clone_regs_child(new_env, newsp, flags);
6635         cpu_clone_regs_parent(env, flags);
6636         new_cpu = env_cpu(new_env);
6637         new_cpu->opaque = ts;
6638         ts->bprm = parent_ts->bprm;
6639         ts->info = parent_ts->info;
6640         ts->signal_mask = parent_ts->signal_mask;
6641 
6642         if (flags & CLONE_CHILD_CLEARTID) {
6643             ts->child_tidptr = child_tidptr;
6644         }
6645 
6646         if (flags & CLONE_SETTLS) {
6647             cpu_set_tls (new_env, newtls);
6648         }
6649 
6650         memset(&info, 0, sizeof(info));
6651         pthread_mutex_init(&info.mutex, NULL);
6652         pthread_mutex_lock(&info.mutex);
6653         pthread_cond_init(&info.cond, NULL);
6654         info.env = new_env;
6655         if (flags & CLONE_CHILD_SETTID) {
6656             info.child_tidptr = child_tidptr;
6657         }
6658         if (flags & CLONE_PARENT_SETTID) {
6659             info.parent_tidptr = parent_tidptr;
6660         }
6661 
6662         ret = pthread_attr_init(&attr);
6663         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6664         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6665         /* It is not safe to deliver signals until the child has finished
6666            initializing, so temporarily block all signals.  */
6667         sigfillset(&sigmask);
6668         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6669         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6670 
6671         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6672         /* TODO: Free new CPU state if thread creation failed.  */
6673 
6674         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6675         pthread_attr_destroy(&attr);
6676         if (ret == 0) {
6677             /* Wait for the child to initialize.  */
6678             pthread_cond_wait(&info.cond, &info.mutex);
6679             ret = info.tid;
6680         } else {
6681             ret = -1;
6682         }
6683         pthread_mutex_unlock(&info.mutex);
6684         pthread_cond_destroy(&info.cond);
6685         pthread_mutex_destroy(&info.mutex);
6686         pthread_mutex_unlock(&clone_lock);
6687     } else {
6688         /* if no CLONE_VM, we consider it is a fork */
6689         if (flags & CLONE_INVALID_FORK_FLAGS) {
6690             return -TARGET_EINVAL;
6691         }
6692 
6693         /* We can't support custom termination signals */
6694         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6695             return -TARGET_EINVAL;
6696         }
6697 
6698 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6699         if (flags & CLONE_PIDFD) {
6700             return -TARGET_EINVAL;
6701         }
6702 #endif
6703 
6704         /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6705         if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) {
6706             return -TARGET_EINVAL;
6707         }
6708 
6709         if (block_signals()) {
6710             return -QEMU_ERESTARTSYS;
6711         }
6712 
6713         fork_start();
6714         ret = fork();
6715         if (ret == 0) {
6716             /* Child Process.  */
6717             cpu_clone_regs_child(env, newsp, flags);
6718             fork_end(ret);
6719             /* There is a race condition here.  The parent process could
6720                theoretically read the TID in the child process before the child
6721                tid is set.  This would require using either ptrace
6722                (not implemented) or having *_tidptr to point at a shared memory
6723                mapping.  We can't repeat the spinlock hack used above because
6724                the child process gets its own copy of the lock.  */
6725             if (flags & CLONE_CHILD_SETTID)
6726                 put_user_u32(sys_gettid(), child_tidptr);
6727             if (flags & CLONE_PARENT_SETTID)
6728                 put_user_u32(sys_gettid(), parent_tidptr);
6729             ts = get_task_state(cpu);
6730             if (flags & CLONE_SETTLS)
6731                 cpu_set_tls (env, newtls);
6732             if (flags & CLONE_CHILD_CLEARTID)
6733                 ts->child_tidptr = child_tidptr;
6734         } else {
6735             cpu_clone_regs_parent(env, flags);
6736             if (flags & CLONE_PIDFD) {
6737                 int pid_fd = 0;
6738 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6739                 int pid_child = ret;
6740                 pid_fd = pidfd_open(pid_child, 0);
6741                 if (pid_fd >= 0) {
6742                         fcntl(pid_fd, F_SETFD, fcntl(pid_fd, F_GETFL)
6743                                                | FD_CLOEXEC);
6744                 } else {
6745                         pid_fd = 0;
6746                 }
6747 #endif
6748                 put_user_u32(pid_fd, parent_tidptr);
6749             }
6750             fork_end(ret);
6751         }
6752         g_assert(!cpu_in_exclusive_context(cpu));
6753     }
6754     return ret;
6755 }
6756 
6757 /* warning : doesn't handle linux specific flags... */
6758 static int target_to_host_fcntl_cmd(int cmd)
6759 {
6760     int ret;
6761 
6762     switch(cmd) {
6763     case TARGET_F_DUPFD:
6764     case TARGET_F_GETFD:
6765     case TARGET_F_SETFD:
6766     case TARGET_F_GETFL:
6767     case TARGET_F_SETFL:
6768     case TARGET_F_OFD_GETLK:
6769     case TARGET_F_OFD_SETLK:
6770     case TARGET_F_OFD_SETLKW:
6771         ret = cmd;
6772         break;
6773     case TARGET_F_GETLK:
6774         ret = F_GETLK;
6775         break;
6776     case TARGET_F_SETLK:
6777         ret = F_SETLK;
6778         break;
6779     case TARGET_F_SETLKW:
6780         ret = F_SETLKW;
6781         break;
6782     case TARGET_F_GETOWN:
6783         ret = F_GETOWN;
6784         break;
6785     case TARGET_F_SETOWN:
6786         ret = F_SETOWN;
6787         break;
6788     case TARGET_F_GETSIG:
6789         ret = F_GETSIG;
6790         break;
6791     case TARGET_F_SETSIG:
6792         ret = F_SETSIG;
6793         break;
6794 #if TARGET_ABI_BITS == 32
6795     case TARGET_F_GETLK64:
6796         ret = F_GETLK;
6797         break;
6798     case TARGET_F_SETLK64:
6799         ret = F_SETLK;
6800         break;
6801     case TARGET_F_SETLKW64:
6802         ret = F_SETLKW;
6803         break;
6804 #endif
6805     case TARGET_F_SETLEASE:
6806         ret = F_SETLEASE;
6807         break;
6808     case TARGET_F_GETLEASE:
6809         ret = F_GETLEASE;
6810         break;
6811 #ifdef F_DUPFD_CLOEXEC
6812     case TARGET_F_DUPFD_CLOEXEC:
6813         ret = F_DUPFD_CLOEXEC;
6814         break;
6815 #endif
6816     case TARGET_F_NOTIFY:
6817         ret = F_NOTIFY;
6818         break;
6819 #ifdef F_GETOWN_EX
6820     case TARGET_F_GETOWN_EX:
6821         ret = F_GETOWN_EX;
6822         break;
6823 #endif
6824 #ifdef F_SETOWN_EX
6825     case TARGET_F_SETOWN_EX:
6826         ret = F_SETOWN_EX;
6827         break;
6828 #endif
6829 #ifdef F_SETPIPE_SZ
6830     case TARGET_F_SETPIPE_SZ:
6831         ret = F_SETPIPE_SZ;
6832         break;
6833     case TARGET_F_GETPIPE_SZ:
6834         ret = F_GETPIPE_SZ;
6835         break;
6836 #endif
6837 #ifdef F_ADD_SEALS
6838     case TARGET_F_ADD_SEALS:
6839         ret = F_ADD_SEALS;
6840         break;
6841     case TARGET_F_GET_SEALS:
6842         ret = F_GET_SEALS;
6843         break;
6844 #endif
6845     default:
6846         ret = -TARGET_EINVAL;
6847         break;
6848     }
6849 
6850 #if defined(__powerpc64__)
6851     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6852      * is not supported by kernel. The glibc fcntl call actually adjusts
6853      * them to 5, 6 and 7 before making the syscall(). Since we make the
6854      * syscall directly, adjust to what is supported by the kernel.
6855      */
6856     if (ret >= F_GETLK && ret <= F_SETLKW) {
6857         ret -= F_GETLK - 5;
6858     }
6859 #endif
6860 
6861     return ret;
6862 }
6863 
6864 #define FLOCK_TRANSTBL \
6865     switch (type) { \
6866     TRANSTBL_CONVERT(F_RDLCK); \
6867     TRANSTBL_CONVERT(F_WRLCK); \
6868     TRANSTBL_CONVERT(F_UNLCK); \
6869     }
6870 
6871 static int target_to_host_flock(int type)
6872 {
6873 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6874     FLOCK_TRANSTBL
6875 #undef  TRANSTBL_CONVERT
6876     return -TARGET_EINVAL;
6877 }
6878 
6879 static int host_to_target_flock(int type)
6880 {
6881 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6882     FLOCK_TRANSTBL
6883 #undef  TRANSTBL_CONVERT
6884     /* if we don't know how to convert the value coming
6885      * from the host we copy to the target field as-is
6886      */
6887     return type;
6888 }
6889 
6890 static inline abi_long copy_from_user_flock(struct flock *fl,
6891                                             abi_ulong target_flock_addr)
6892 {
6893     struct target_flock *target_fl;
6894     int l_type;
6895 
6896     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6897         return -TARGET_EFAULT;
6898     }
6899 
6900     __get_user(l_type, &target_fl->l_type);
6901     l_type = target_to_host_flock(l_type);
6902     if (l_type < 0) {
6903         return l_type;
6904     }
6905     fl->l_type = l_type;
6906     __get_user(fl->l_whence, &target_fl->l_whence);
6907     __get_user(fl->l_start, &target_fl->l_start);
6908     __get_user(fl->l_len, &target_fl->l_len);
6909     __get_user(fl->l_pid, &target_fl->l_pid);
6910     unlock_user_struct(target_fl, target_flock_addr, 0);
6911     return 0;
6912 }
6913 
6914 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6915                                           const struct flock *fl)
6916 {
6917     struct target_flock *target_fl;
6918     short l_type;
6919 
6920     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6921         return -TARGET_EFAULT;
6922     }
6923 
6924     l_type = host_to_target_flock(fl->l_type);
6925     __put_user(l_type, &target_fl->l_type);
6926     __put_user(fl->l_whence, &target_fl->l_whence);
6927     __put_user(fl->l_start, &target_fl->l_start);
6928     __put_user(fl->l_len, &target_fl->l_len);
6929     __put_user(fl->l_pid, &target_fl->l_pid);
6930     unlock_user_struct(target_fl, target_flock_addr, 1);
6931     return 0;
6932 }
6933 
6934 typedef abi_long from_flock64_fn(struct flock *fl, abi_ulong target_addr);
6935 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock *fl);
6936 
6937 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6938 struct target_oabi_flock64 {
6939     abi_short l_type;
6940     abi_short l_whence;
6941     abi_llong l_start;
6942     abi_llong l_len;
6943     abi_int   l_pid;
6944 } QEMU_PACKED;
6945 
6946 static inline abi_long copy_from_user_oabi_flock64(struct flock *fl,
6947                                                    abi_ulong target_flock_addr)
6948 {
6949     struct target_oabi_flock64 *target_fl;
6950     int l_type;
6951 
6952     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6953         return -TARGET_EFAULT;
6954     }
6955 
6956     __get_user(l_type, &target_fl->l_type);
6957     l_type = target_to_host_flock(l_type);
6958     if (l_type < 0) {
6959         return l_type;
6960     }
6961     fl->l_type = l_type;
6962     __get_user(fl->l_whence, &target_fl->l_whence);
6963     __get_user(fl->l_start, &target_fl->l_start);
6964     __get_user(fl->l_len, &target_fl->l_len);
6965     __get_user(fl->l_pid, &target_fl->l_pid);
6966     unlock_user_struct(target_fl, target_flock_addr, 0);
6967     return 0;
6968 }
6969 
6970 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6971                                                  const struct flock *fl)
6972 {
6973     struct target_oabi_flock64 *target_fl;
6974     short l_type;
6975 
6976     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6977         return -TARGET_EFAULT;
6978     }
6979 
6980     l_type = host_to_target_flock(fl->l_type);
6981     __put_user(l_type, &target_fl->l_type);
6982     __put_user(fl->l_whence, &target_fl->l_whence);
6983     __put_user(fl->l_start, &target_fl->l_start);
6984     __put_user(fl->l_len, &target_fl->l_len);
6985     __put_user(fl->l_pid, &target_fl->l_pid);
6986     unlock_user_struct(target_fl, target_flock_addr, 1);
6987     return 0;
6988 }
6989 #endif
6990 
6991 static inline abi_long copy_from_user_flock64(struct flock *fl,
6992                                               abi_ulong target_flock_addr)
6993 {
6994     struct target_flock64 *target_fl;
6995     int l_type;
6996 
6997     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6998         return -TARGET_EFAULT;
6999     }
7000 
7001     __get_user(l_type, &target_fl->l_type);
7002     l_type = target_to_host_flock(l_type);
7003     if (l_type < 0) {
7004         return l_type;
7005     }
7006     fl->l_type = l_type;
7007     __get_user(fl->l_whence, &target_fl->l_whence);
7008     __get_user(fl->l_start, &target_fl->l_start);
7009     __get_user(fl->l_len, &target_fl->l_len);
7010     __get_user(fl->l_pid, &target_fl->l_pid);
7011     unlock_user_struct(target_fl, target_flock_addr, 0);
7012     return 0;
7013 }
7014 
7015 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7016                                             const struct flock *fl)
7017 {
7018     struct target_flock64 *target_fl;
7019     short l_type;
7020 
7021     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7022         return -TARGET_EFAULT;
7023     }
7024 
7025     l_type = host_to_target_flock(fl->l_type);
7026     __put_user(l_type, &target_fl->l_type);
7027     __put_user(fl->l_whence, &target_fl->l_whence);
7028     __put_user(fl->l_start, &target_fl->l_start);
7029     __put_user(fl->l_len, &target_fl->l_len);
7030     __put_user(fl->l_pid, &target_fl->l_pid);
7031     unlock_user_struct(target_fl, target_flock_addr, 1);
7032     return 0;
7033 }
7034 
7035 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7036 {
7037     struct flock fl;
7038 #ifdef F_GETOWN_EX
7039     struct f_owner_ex fox;
7040     struct target_f_owner_ex *target_fox;
7041 #endif
7042     abi_long ret;
7043     int host_cmd = target_to_host_fcntl_cmd(cmd);
7044 
7045     if (host_cmd == -TARGET_EINVAL)
7046 	    return host_cmd;
7047 
7048     switch(cmd) {
7049     case TARGET_F_GETLK:
7050         ret = copy_from_user_flock(&fl, arg);
7051         if (ret) {
7052             return ret;
7053         }
7054         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7055         if (ret == 0) {
7056             ret = copy_to_user_flock(arg, &fl);
7057         }
7058         break;
7059 
7060     case TARGET_F_SETLK:
7061     case TARGET_F_SETLKW:
7062         ret = copy_from_user_flock(&fl, arg);
7063         if (ret) {
7064             return ret;
7065         }
7066         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7067         break;
7068 
7069     case TARGET_F_GETLK64:
7070     case TARGET_F_OFD_GETLK:
7071         ret = copy_from_user_flock64(&fl, arg);
7072         if (ret) {
7073             return ret;
7074         }
7075         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7076         if (ret == 0) {
7077             ret = copy_to_user_flock64(arg, &fl);
7078         }
7079         break;
7080     case TARGET_F_SETLK64:
7081     case TARGET_F_SETLKW64:
7082     case TARGET_F_OFD_SETLK:
7083     case TARGET_F_OFD_SETLKW:
7084         ret = copy_from_user_flock64(&fl, arg);
7085         if (ret) {
7086             return ret;
7087         }
7088         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7089         break;
7090 
7091     case TARGET_F_GETFL:
7092         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7093         if (ret >= 0) {
7094             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7095             /* tell 32-bit guests it uses largefile on 64-bit hosts: */
7096             if (O_LARGEFILE == 0 && HOST_LONG_BITS == 64) {
7097                 ret |= TARGET_O_LARGEFILE;
7098             }
7099         }
7100         break;
7101 
7102     case TARGET_F_SETFL:
7103         ret = get_errno(safe_fcntl(fd, host_cmd,
7104                                    target_to_host_bitmask(arg,
7105                                                           fcntl_flags_tbl)));
7106         break;
7107 
7108 #ifdef F_GETOWN_EX
7109     case TARGET_F_GETOWN_EX:
7110         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7111         if (ret >= 0) {
7112             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7113                 return -TARGET_EFAULT;
7114             target_fox->type = tswap32(fox.type);
7115             target_fox->pid = tswap32(fox.pid);
7116             unlock_user_struct(target_fox, arg, 1);
7117         }
7118         break;
7119 #endif
7120 
7121 #ifdef F_SETOWN_EX
7122     case TARGET_F_SETOWN_EX:
7123         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7124             return -TARGET_EFAULT;
7125         fox.type = tswap32(target_fox->type);
7126         fox.pid = tswap32(target_fox->pid);
7127         unlock_user_struct(target_fox, arg, 0);
7128         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7129         break;
7130 #endif
7131 
7132     case TARGET_F_SETSIG:
7133         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7134         break;
7135 
7136     case TARGET_F_GETSIG:
7137         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7138         break;
7139 
7140     case TARGET_F_SETOWN:
7141     case TARGET_F_GETOWN:
7142     case TARGET_F_SETLEASE:
7143     case TARGET_F_GETLEASE:
7144     case TARGET_F_SETPIPE_SZ:
7145     case TARGET_F_GETPIPE_SZ:
7146     case TARGET_F_ADD_SEALS:
7147     case TARGET_F_GET_SEALS:
7148         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7149         break;
7150 
7151     default:
7152         ret = get_errno(safe_fcntl(fd, cmd, arg));
7153         break;
7154     }
7155     return ret;
7156 }
7157 
7158 #ifdef USE_UID16
7159 
7160 static inline int high2lowuid(int uid)
7161 {
7162     if (uid > 65535)
7163         return 65534;
7164     else
7165         return uid;
7166 }
7167 
7168 static inline int high2lowgid(int gid)
7169 {
7170     if (gid > 65535)
7171         return 65534;
7172     else
7173         return gid;
7174 }
7175 
7176 static inline int low2highuid(int uid)
7177 {
7178     if ((int16_t)uid == -1)
7179         return -1;
7180     else
7181         return uid;
7182 }
7183 
7184 static inline int low2highgid(int gid)
7185 {
7186     if ((int16_t)gid == -1)
7187         return -1;
7188     else
7189         return gid;
7190 }
7191 static inline int tswapid(int id)
7192 {
7193     return tswap16(id);
7194 }
7195 
7196 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7197 
7198 #else /* !USE_UID16 */
7199 static inline int high2lowuid(int uid)
7200 {
7201     return uid;
7202 }
7203 static inline int high2lowgid(int gid)
7204 {
7205     return gid;
7206 }
7207 static inline int low2highuid(int uid)
7208 {
7209     return uid;
7210 }
7211 static inline int low2highgid(int gid)
7212 {
7213     return gid;
7214 }
7215 static inline int tswapid(int id)
7216 {
7217     return tswap32(id);
7218 }
7219 
7220 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7221 
7222 #endif /* USE_UID16 */
7223 
7224 /* We must do direct syscalls for setting UID/GID, because we want to
7225  * implement the Linux system call semantics of "change only for this thread",
7226  * not the libc/POSIX semantics of "change for all threads in process".
7227  * (See http://ewontfix.com/17/ for more details.)
7228  * We use the 32-bit version of the syscalls if present; if it is not
7229  * then either the host architecture supports 32-bit UIDs natively with
7230  * the standard syscall, or the 16-bit UID is the best we can do.
7231  */
7232 #ifdef __NR_setuid32
7233 #define __NR_sys_setuid __NR_setuid32
7234 #else
7235 #define __NR_sys_setuid __NR_setuid
7236 #endif
7237 #ifdef __NR_setgid32
7238 #define __NR_sys_setgid __NR_setgid32
7239 #else
7240 #define __NR_sys_setgid __NR_setgid
7241 #endif
7242 #ifdef __NR_setresuid32
7243 #define __NR_sys_setresuid __NR_setresuid32
7244 #else
7245 #define __NR_sys_setresuid __NR_setresuid
7246 #endif
7247 #ifdef __NR_setresgid32
7248 #define __NR_sys_setresgid __NR_setresgid32
7249 #else
7250 #define __NR_sys_setresgid __NR_setresgid
7251 #endif
7252 #ifdef __NR_setgroups32
7253 #define __NR_sys_setgroups __NR_setgroups32
7254 #else
7255 #define __NR_sys_setgroups __NR_setgroups
7256 #endif
7257 #ifdef __NR_sys_setreuid32
7258 #define __NR_sys_setreuid __NR_setreuid32
7259 #else
7260 #define __NR_sys_setreuid __NR_setreuid
7261 #endif
7262 #ifdef __NR_sys_setregid32
7263 #define __NR_sys_setregid __NR_setregid32
7264 #else
7265 #define __NR_sys_setregid __NR_setregid
7266 #endif
7267 
7268 _syscall1(int, sys_setuid, uid_t, uid)
7269 _syscall1(int, sys_setgid, gid_t, gid)
7270 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7271 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7272 _syscall2(int, sys_setgroups, int, size, gid_t *, grouplist)
7273 _syscall2(int, sys_setreuid, uid_t, ruid, uid_t, euid);
7274 _syscall2(int, sys_setregid, gid_t, rgid, gid_t, egid);
7275 
7276 void syscall_init(void)
7277 {
7278     IOCTLEntry *ie;
7279     const argtype *arg_type;
7280     int size;
7281 
7282     thunk_init(STRUCT_MAX);
7283 
7284 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7285 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7286 #include "syscall_types.h"
7287 #undef STRUCT
7288 #undef STRUCT_SPECIAL
7289 
7290     /* we patch the ioctl size if necessary. We rely on the fact that
7291        no ioctl has all the bits at '1' in the size field */
7292     ie = ioctl_entries;
7293     while (ie->target_cmd != 0) {
7294         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7295             TARGET_IOC_SIZEMASK) {
7296             arg_type = ie->arg_type;
7297             if (arg_type[0] != TYPE_PTR) {
7298                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7299                         ie->target_cmd);
7300                 exit(1);
7301             }
7302             arg_type++;
7303             size = thunk_type_size(arg_type, 0);
7304             ie->target_cmd = (ie->target_cmd &
7305                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7306                 (size << TARGET_IOC_SIZESHIFT);
7307         }
7308 
7309         /* automatic consistency check if same arch */
7310 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7311     (defined(__x86_64__) && defined(TARGET_X86_64))
7312         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7313             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7314                     ie->name, ie->target_cmd, ie->host_cmd);
7315         }
7316 #endif
7317         ie++;
7318     }
7319 }
7320 
7321 #ifdef TARGET_NR_truncate64
7322 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7323                                          abi_long arg2,
7324                                          abi_long arg3,
7325                                          abi_long arg4)
7326 {
7327     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7328         arg2 = arg3;
7329         arg3 = arg4;
7330     }
7331     return get_errno(truncate(arg1, target_offset64(arg2, arg3)));
7332 }
7333 #endif
7334 
7335 #ifdef TARGET_NR_ftruncate64
7336 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7337                                           abi_long arg2,
7338                                           abi_long arg3,
7339                                           abi_long arg4)
7340 {
7341     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7342         arg2 = arg3;
7343         arg3 = arg4;
7344     }
7345     return get_errno(ftruncate(arg1, target_offset64(arg2, arg3)));
7346 }
7347 #endif
7348 
7349 #if defined(TARGET_NR_timer_settime) || \
7350     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7351 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7352                                                  abi_ulong target_addr)
7353 {
7354     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7355                                 offsetof(struct target_itimerspec,
7356                                          it_interval)) ||
7357         target_to_host_timespec(&host_its->it_value, target_addr +
7358                                 offsetof(struct target_itimerspec,
7359                                          it_value))) {
7360         return -TARGET_EFAULT;
7361     }
7362 
7363     return 0;
7364 }
7365 #endif
7366 
7367 #if defined(TARGET_NR_timer_settime64) || \
7368     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7369 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7370                                                    abi_ulong target_addr)
7371 {
7372     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7373                                   offsetof(struct target__kernel_itimerspec,
7374                                            it_interval)) ||
7375         target_to_host_timespec64(&host_its->it_value, target_addr +
7376                                   offsetof(struct target__kernel_itimerspec,
7377                                            it_value))) {
7378         return -TARGET_EFAULT;
7379     }
7380 
7381     return 0;
7382 }
7383 #endif
7384 
7385 #if ((defined(TARGET_NR_timerfd_gettime) || \
7386       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7387       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7388 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7389                                                  struct itimerspec *host_its)
7390 {
7391     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7392                                                        it_interval),
7393                                 &host_its->it_interval) ||
7394         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7395                                                        it_value),
7396                                 &host_its->it_value)) {
7397         return -TARGET_EFAULT;
7398     }
7399     return 0;
7400 }
7401 #endif
7402 
7403 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7404       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7405       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7406 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7407                                                    struct itimerspec *host_its)
7408 {
7409     if (host_to_target_timespec64(target_addr +
7410                                   offsetof(struct target__kernel_itimerspec,
7411                                            it_interval),
7412                                   &host_its->it_interval) ||
7413         host_to_target_timespec64(target_addr +
7414                                   offsetof(struct target__kernel_itimerspec,
7415                                            it_value),
7416                                   &host_its->it_value)) {
7417         return -TARGET_EFAULT;
7418     }
7419     return 0;
7420 }
7421 #endif
7422 
7423 #if defined(TARGET_NR_adjtimex) || \
7424     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7425 static inline abi_long target_to_host_timex(struct timex *host_tx,
7426                                             abi_long target_addr)
7427 {
7428     struct target_timex *target_tx;
7429 
7430     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7431         return -TARGET_EFAULT;
7432     }
7433 
7434     __get_user(host_tx->modes, &target_tx->modes);
7435     __get_user(host_tx->offset, &target_tx->offset);
7436     __get_user(host_tx->freq, &target_tx->freq);
7437     __get_user(host_tx->maxerror, &target_tx->maxerror);
7438     __get_user(host_tx->esterror, &target_tx->esterror);
7439     __get_user(host_tx->status, &target_tx->status);
7440     __get_user(host_tx->constant, &target_tx->constant);
7441     __get_user(host_tx->precision, &target_tx->precision);
7442     __get_user(host_tx->tolerance, &target_tx->tolerance);
7443     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7444     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7445     __get_user(host_tx->tick, &target_tx->tick);
7446     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7447     __get_user(host_tx->jitter, &target_tx->jitter);
7448     __get_user(host_tx->shift, &target_tx->shift);
7449     __get_user(host_tx->stabil, &target_tx->stabil);
7450     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7451     __get_user(host_tx->calcnt, &target_tx->calcnt);
7452     __get_user(host_tx->errcnt, &target_tx->errcnt);
7453     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7454     __get_user(host_tx->tai, &target_tx->tai);
7455 
7456     unlock_user_struct(target_tx, target_addr, 0);
7457     return 0;
7458 }
7459 
7460 static inline abi_long host_to_target_timex(abi_long target_addr,
7461                                             struct timex *host_tx)
7462 {
7463     struct target_timex *target_tx;
7464 
7465     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7466         return -TARGET_EFAULT;
7467     }
7468 
7469     __put_user(host_tx->modes, &target_tx->modes);
7470     __put_user(host_tx->offset, &target_tx->offset);
7471     __put_user(host_tx->freq, &target_tx->freq);
7472     __put_user(host_tx->maxerror, &target_tx->maxerror);
7473     __put_user(host_tx->esterror, &target_tx->esterror);
7474     __put_user(host_tx->status, &target_tx->status);
7475     __put_user(host_tx->constant, &target_tx->constant);
7476     __put_user(host_tx->precision, &target_tx->precision);
7477     __put_user(host_tx->tolerance, &target_tx->tolerance);
7478     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7479     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7480     __put_user(host_tx->tick, &target_tx->tick);
7481     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7482     __put_user(host_tx->jitter, &target_tx->jitter);
7483     __put_user(host_tx->shift, &target_tx->shift);
7484     __put_user(host_tx->stabil, &target_tx->stabil);
7485     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7486     __put_user(host_tx->calcnt, &target_tx->calcnt);
7487     __put_user(host_tx->errcnt, &target_tx->errcnt);
7488     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7489     __put_user(host_tx->tai, &target_tx->tai);
7490 
7491     unlock_user_struct(target_tx, target_addr, 1);
7492     return 0;
7493 }
7494 #endif
7495 
7496 
7497 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7498 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7499                                               abi_long target_addr)
7500 {
7501     struct target__kernel_timex *target_tx;
7502 
7503     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7504                                  offsetof(struct target__kernel_timex,
7505                                           time))) {
7506         return -TARGET_EFAULT;
7507     }
7508 
7509     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7510         return -TARGET_EFAULT;
7511     }
7512 
7513     __get_user(host_tx->modes, &target_tx->modes);
7514     __get_user(host_tx->offset, &target_tx->offset);
7515     __get_user(host_tx->freq, &target_tx->freq);
7516     __get_user(host_tx->maxerror, &target_tx->maxerror);
7517     __get_user(host_tx->esterror, &target_tx->esterror);
7518     __get_user(host_tx->status, &target_tx->status);
7519     __get_user(host_tx->constant, &target_tx->constant);
7520     __get_user(host_tx->precision, &target_tx->precision);
7521     __get_user(host_tx->tolerance, &target_tx->tolerance);
7522     __get_user(host_tx->tick, &target_tx->tick);
7523     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7524     __get_user(host_tx->jitter, &target_tx->jitter);
7525     __get_user(host_tx->shift, &target_tx->shift);
7526     __get_user(host_tx->stabil, &target_tx->stabil);
7527     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7528     __get_user(host_tx->calcnt, &target_tx->calcnt);
7529     __get_user(host_tx->errcnt, &target_tx->errcnt);
7530     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7531     __get_user(host_tx->tai, &target_tx->tai);
7532 
7533     unlock_user_struct(target_tx, target_addr, 0);
7534     return 0;
7535 }
7536 
7537 static inline abi_long host_to_target_timex64(abi_long target_addr,
7538                                               struct timex *host_tx)
7539 {
7540     struct target__kernel_timex *target_tx;
7541 
7542    if (copy_to_user_timeval64(target_addr +
7543                               offsetof(struct target__kernel_timex, time),
7544                               &host_tx->time)) {
7545         return -TARGET_EFAULT;
7546     }
7547 
7548     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7549         return -TARGET_EFAULT;
7550     }
7551 
7552     __put_user(host_tx->modes, &target_tx->modes);
7553     __put_user(host_tx->offset, &target_tx->offset);
7554     __put_user(host_tx->freq, &target_tx->freq);
7555     __put_user(host_tx->maxerror, &target_tx->maxerror);
7556     __put_user(host_tx->esterror, &target_tx->esterror);
7557     __put_user(host_tx->status, &target_tx->status);
7558     __put_user(host_tx->constant, &target_tx->constant);
7559     __put_user(host_tx->precision, &target_tx->precision);
7560     __put_user(host_tx->tolerance, &target_tx->tolerance);
7561     __put_user(host_tx->tick, &target_tx->tick);
7562     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7563     __put_user(host_tx->jitter, &target_tx->jitter);
7564     __put_user(host_tx->shift, &target_tx->shift);
7565     __put_user(host_tx->stabil, &target_tx->stabil);
7566     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7567     __put_user(host_tx->calcnt, &target_tx->calcnt);
7568     __put_user(host_tx->errcnt, &target_tx->errcnt);
7569     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7570     __put_user(host_tx->tai, &target_tx->tai);
7571 
7572     unlock_user_struct(target_tx, target_addr, 1);
7573     return 0;
7574 }
7575 #endif
7576 
7577 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7578 #define sigev_notify_thread_id _sigev_un._tid
7579 #endif
7580 
7581 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7582                                                abi_ulong target_addr)
7583 {
7584     struct target_sigevent *target_sevp;
7585 
7586     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7587         return -TARGET_EFAULT;
7588     }
7589 
7590     /* This union is awkward on 64 bit systems because it has a 32 bit
7591      * integer and a pointer in it; we follow the conversion approach
7592      * used for handling sigval types in signal.c so the guest should get
7593      * the correct value back even if we did a 64 bit byteswap and it's
7594      * using the 32 bit integer.
7595      */
7596     host_sevp->sigev_value.sival_ptr =
7597         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7598     host_sevp->sigev_signo =
7599         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7600     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7601     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7602 
7603     unlock_user_struct(target_sevp, target_addr, 1);
7604     return 0;
7605 }
7606 
7607 #if defined(TARGET_NR_mlockall)
7608 static inline int target_to_host_mlockall_arg(int arg)
7609 {
7610     int result = 0;
7611 
7612     if (arg & TARGET_MCL_CURRENT) {
7613         result |= MCL_CURRENT;
7614     }
7615     if (arg & TARGET_MCL_FUTURE) {
7616         result |= MCL_FUTURE;
7617     }
7618 #ifdef MCL_ONFAULT
7619     if (arg & TARGET_MCL_ONFAULT) {
7620         result |= MCL_ONFAULT;
7621     }
7622 #endif
7623 
7624     return result;
7625 }
7626 #endif
7627 
7628 static inline int target_to_host_msync_arg(abi_long arg)
7629 {
7630     return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
7631            ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
7632            ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
7633            (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
7634 }
7635 
7636 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7637      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7638      defined(TARGET_NR_newfstatat))
7639 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7640                                              abi_ulong target_addr,
7641                                              struct stat *host_st)
7642 {
7643 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7644     if (cpu_env->eabi) {
7645         struct target_eabi_stat64 *target_st;
7646 
7647         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7648             return -TARGET_EFAULT;
7649         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7650         __put_user(host_st->st_dev, &target_st->st_dev);
7651         __put_user(host_st->st_ino, &target_st->st_ino);
7652 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7653         __put_user(host_st->st_ino, &target_st->__st_ino);
7654 #endif
7655         __put_user(host_st->st_mode, &target_st->st_mode);
7656         __put_user(host_st->st_nlink, &target_st->st_nlink);
7657         __put_user(host_st->st_uid, &target_st->st_uid);
7658         __put_user(host_st->st_gid, &target_st->st_gid);
7659         __put_user(host_st->st_rdev, &target_st->st_rdev);
7660         __put_user(host_st->st_size, &target_st->st_size);
7661         __put_user(host_st->st_blksize, &target_st->st_blksize);
7662         __put_user(host_st->st_blocks, &target_st->st_blocks);
7663         __put_user(host_st->st_atime, &target_st->target_st_atime);
7664         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7665         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7666 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7667         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7668         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7669         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7670 #endif
7671         unlock_user_struct(target_st, target_addr, 1);
7672     } else
7673 #endif
7674     {
7675 #if defined(TARGET_HAS_STRUCT_STAT64)
7676         struct target_stat64 *target_st;
7677 #else
7678         struct target_stat *target_st;
7679 #endif
7680 
7681         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7682             return -TARGET_EFAULT;
7683         memset(target_st, 0, sizeof(*target_st));
7684         __put_user(host_st->st_dev, &target_st->st_dev);
7685         __put_user(host_st->st_ino, &target_st->st_ino);
7686 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7687         __put_user(host_st->st_ino, &target_st->__st_ino);
7688 #endif
7689         __put_user(host_st->st_mode, &target_st->st_mode);
7690         __put_user(host_st->st_nlink, &target_st->st_nlink);
7691         __put_user(host_st->st_uid, &target_st->st_uid);
7692         __put_user(host_st->st_gid, &target_st->st_gid);
7693         __put_user(host_st->st_rdev, &target_st->st_rdev);
7694         /* XXX: better use of kernel struct */
7695         __put_user(host_st->st_size, &target_st->st_size);
7696         __put_user(host_st->st_blksize, &target_st->st_blksize);
7697         __put_user(host_st->st_blocks, &target_st->st_blocks);
7698         __put_user(host_st->st_atime, &target_st->target_st_atime);
7699         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7700         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7701 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7702         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7703         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7704         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7705 #endif
7706         unlock_user_struct(target_st, target_addr, 1);
7707     }
7708 
7709     return 0;
7710 }
7711 #endif
7712 
7713 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7714 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7715                                             abi_ulong target_addr)
7716 {
7717     struct target_statx *target_stx;
7718 
7719     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7720         return -TARGET_EFAULT;
7721     }
7722     memset(target_stx, 0, sizeof(*target_stx));
7723 
7724     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7725     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7726     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7727     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7728     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7729     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7730     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7731     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7732     __put_user(host_stx->stx_size, &target_stx->stx_size);
7733     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7734     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7735     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7736     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7737     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7738     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7739     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7740     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7741     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7742     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7743     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7744     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7745     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7746     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7747 
7748     unlock_user_struct(target_stx, target_addr, 1);
7749 
7750     return 0;
7751 }
7752 #endif
7753 
7754 static int do_sys_futex(int *uaddr, int op, int val,
7755                          const struct timespec *timeout, int *uaddr2,
7756                          int val3)
7757 {
7758 #if HOST_LONG_BITS == 64
7759 #if defined(__NR_futex)
7760     /* always a 64-bit time_t, it doesn't define _time64 version  */
7761     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7762 
7763 #endif
7764 #else /* HOST_LONG_BITS == 64 */
7765 #if defined(__NR_futex_time64)
7766     if (sizeof(timeout->tv_sec) == 8) {
7767         /* _time64 function on 32bit arch */
7768         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7769     }
7770 #endif
7771 #if defined(__NR_futex)
7772     /* old function on 32bit arch */
7773     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7774 #endif
7775 #endif /* HOST_LONG_BITS == 64 */
7776     g_assert_not_reached();
7777 }
7778 
7779 static int do_safe_futex(int *uaddr, int op, int val,
7780                          const struct timespec *timeout, int *uaddr2,
7781                          int val3)
7782 {
7783 #if HOST_LONG_BITS == 64
7784 #if defined(__NR_futex)
7785     /* always a 64-bit time_t, it doesn't define _time64 version  */
7786     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7787 #endif
7788 #else /* HOST_LONG_BITS == 64 */
7789 #if defined(__NR_futex_time64)
7790     if (sizeof(timeout->tv_sec) == 8) {
7791         /* _time64 function on 32bit arch */
7792         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7793                                            val3));
7794     }
7795 #endif
7796 #if defined(__NR_futex)
7797     /* old function on 32bit arch */
7798     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7799 #endif
7800 #endif /* HOST_LONG_BITS == 64 */
7801     return -TARGET_ENOSYS;
7802 }
7803 
7804 /* ??? Using host futex calls even when target atomic operations
7805    are not really atomic probably breaks things.  However implementing
7806    futexes locally would make futexes shared between multiple processes
7807    tricky.  However they're probably useless because guest atomic
7808    operations won't work either.  */
7809 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7810 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7811                     int op, int val, target_ulong timeout,
7812                     target_ulong uaddr2, int val3)
7813 {
7814     struct timespec ts, *pts = NULL;
7815     void *haddr2 = NULL;
7816     int base_op;
7817 
7818     /* We assume FUTEX_* constants are the same on both host and target. */
7819 #ifdef FUTEX_CMD_MASK
7820     base_op = op & FUTEX_CMD_MASK;
7821 #else
7822     base_op = op;
7823 #endif
7824     switch (base_op) {
7825     case FUTEX_WAIT:
7826     case FUTEX_WAIT_BITSET:
7827         val = tswap32(val);
7828         break;
7829     case FUTEX_WAIT_REQUEUE_PI:
7830         val = tswap32(val);
7831         haddr2 = g2h(cpu, uaddr2);
7832         break;
7833     case FUTEX_LOCK_PI:
7834     case FUTEX_LOCK_PI2:
7835         break;
7836     case FUTEX_WAKE:
7837     case FUTEX_WAKE_BITSET:
7838     case FUTEX_TRYLOCK_PI:
7839     case FUTEX_UNLOCK_PI:
7840         timeout = 0;
7841         break;
7842     case FUTEX_FD:
7843         val = target_to_host_signal(val);
7844         timeout = 0;
7845         break;
7846     case FUTEX_CMP_REQUEUE:
7847     case FUTEX_CMP_REQUEUE_PI:
7848         val3 = tswap32(val3);
7849         /* fall through */
7850     case FUTEX_REQUEUE:
7851     case FUTEX_WAKE_OP:
7852         /*
7853          * For these, the 4th argument is not TIMEOUT, but VAL2.
7854          * But the prototype of do_safe_futex takes a pointer, so
7855          * insert casts to satisfy the compiler.  We do not need
7856          * to tswap VAL2 since it's not compared to guest memory.
7857           */
7858         pts = (struct timespec *)(uintptr_t)timeout;
7859         timeout = 0;
7860         haddr2 = g2h(cpu, uaddr2);
7861         break;
7862     default:
7863         return -TARGET_ENOSYS;
7864     }
7865     if (timeout) {
7866         pts = &ts;
7867         if (time64
7868             ? target_to_host_timespec64(pts, timeout)
7869             : target_to_host_timespec(pts, timeout)) {
7870             return -TARGET_EFAULT;
7871         }
7872     }
7873     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7874 }
7875 #endif
7876 
7877 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7878 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7879                                      abi_long handle, abi_long mount_id,
7880                                      abi_long flags)
7881 {
7882     struct file_handle *target_fh;
7883     struct file_handle *fh;
7884     int mid = 0;
7885     abi_long ret;
7886     char *name;
7887     unsigned int size, total_size;
7888 
7889     if (get_user_s32(size, handle)) {
7890         return -TARGET_EFAULT;
7891     }
7892 
7893     name = lock_user_string(pathname);
7894     if (!name) {
7895         return -TARGET_EFAULT;
7896     }
7897 
7898     total_size = sizeof(struct file_handle) + size;
7899     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7900     if (!target_fh) {
7901         unlock_user(name, pathname, 0);
7902         return -TARGET_EFAULT;
7903     }
7904 
7905     fh = g_malloc0(total_size);
7906     fh->handle_bytes = size;
7907 
7908     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7909     unlock_user(name, pathname, 0);
7910 
7911     /* man name_to_handle_at(2):
7912      * Other than the use of the handle_bytes field, the caller should treat
7913      * the file_handle structure as an opaque data type
7914      */
7915 
7916     memcpy(target_fh, fh, total_size);
7917     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7918     target_fh->handle_type = tswap32(fh->handle_type);
7919     g_free(fh);
7920     unlock_user(target_fh, handle, total_size);
7921 
7922     if (put_user_s32(mid, mount_id)) {
7923         return -TARGET_EFAULT;
7924     }
7925 
7926     return ret;
7927 
7928 }
7929 #endif
7930 
7931 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7932 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7933                                      abi_long flags)
7934 {
7935     struct file_handle *target_fh;
7936     struct file_handle *fh;
7937     unsigned int size, total_size;
7938     abi_long ret;
7939 
7940     if (get_user_s32(size, handle)) {
7941         return -TARGET_EFAULT;
7942     }
7943 
7944     total_size = sizeof(struct file_handle) + size;
7945     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7946     if (!target_fh) {
7947         return -TARGET_EFAULT;
7948     }
7949 
7950     fh = g_memdup(target_fh, total_size);
7951     fh->handle_bytes = size;
7952     fh->handle_type = tswap32(target_fh->handle_type);
7953 
7954     ret = get_errno(open_by_handle_at(mount_fd, fh,
7955                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7956 
7957     g_free(fh);
7958 
7959     unlock_user(target_fh, handle, total_size);
7960 
7961     return ret;
7962 }
7963 #endif
7964 
7965 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7966 
7967 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7968 {
7969     int host_flags;
7970     target_sigset_t *target_mask;
7971     sigset_t host_mask;
7972     abi_long ret;
7973 
7974     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7975         return -TARGET_EINVAL;
7976     }
7977     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7978         return -TARGET_EFAULT;
7979     }
7980 
7981     target_to_host_sigset(&host_mask, target_mask);
7982 
7983     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7984 
7985     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7986     if (ret >= 0) {
7987         fd_trans_register(ret, &target_signalfd_trans);
7988     }
7989 
7990     unlock_user_struct(target_mask, mask, 0);
7991 
7992     return ret;
7993 }
7994 #endif
7995 
7996 /* Map host to target signal numbers for the wait family of syscalls.
7997    Assume all other status bits are the same.  */
7998 int host_to_target_waitstatus(int status)
7999 {
8000     if (WIFSIGNALED(status)) {
8001         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
8002     }
8003     if (WIFSTOPPED(status)) {
8004         return (host_to_target_signal(WSTOPSIG(status)) << 8)
8005                | (status & 0xff);
8006     }
8007     return status;
8008 }
8009 
8010 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
8011 {
8012     CPUState *cpu = env_cpu(cpu_env);
8013     struct linux_binprm *bprm = get_task_state(cpu)->bprm;
8014     int i;
8015 
8016     for (i = 0; i < bprm->argc; i++) {
8017         size_t len = strlen(bprm->argv[i]) + 1;
8018 
8019         if (write(fd, bprm->argv[i], len) != len) {
8020             return -1;
8021         }
8022     }
8023 
8024     return 0;
8025 }
8026 
8027 struct open_self_maps_data {
8028     TaskState *ts;
8029     IntervalTreeRoot *host_maps;
8030     int fd;
8031     bool smaps;
8032 };
8033 
8034 /*
8035  * Subroutine to output one line of /proc/self/maps,
8036  * or one region of /proc/self/smaps.
8037  */
8038 
8039 #ifdef TARGET_HPPA
8040 # define test_stack(S, E, L)  (E == L)
8041 #else
8042 # define test_stack(S, E, L)  (S == L)
8043 #endif
8044 
8045 static void open_self_maps_4(const struct open_self_maps_data *d,
8046                              const MapInfo *mi, abi_ptr start,
8047                              abi_ptr end, unsigned flags)
8048 {
8049     const struct image_info *info = d->ts->info;
8050     const char *path = mi->path;
8051     uint64_t offset;
8052     int fd = d->fd;
8053     int count;
8054 
8055     if (test_stack(start, end, info->stack_limit)) {
8056         path = "[stack]";
8057     } else if (start == info->brk) {
8058         path = "[heap]";
8059     } else if (start == info->vdso) {
8060         path = "[vdso]";
8061 #ifdef TARGET_X86_64
8062     } else if (start == TARGET_VSYSCALL_PAGE) {
8063         path = "[vsyscall]";
8064 #endif
8065     }
8066 
8067     /* Except null device (MAP_ANON), adjust offset for this fragment. */
8068     offset = mi->offset;
8069     if (mi->dev) {
8070         uintptr_t hstart = (uintptr_t)g2h_untagged(start);
8071         offset += hstart - mi->itree.start;
8072     }
8073 
8074     count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8075                     " %c%c%c%c %08" PRIx64 " %02x:%02x %"PRId64,
8076                     start, end,
8077                     (flags & PAGE_READ) ? 'r' : '-',
8078                     (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8079                     (flags & PAGE_EXEC) ? 'x' : '-',
8080                     mi->is_priv ? 'p' : 's',
8081                     offset, major(mi->dev), minor(mi->dev),
8082                     (uint64_t)mi->inode);
8083     if (path) {
8084         dprintf(fd, "%*s%s\n", 73 - count, "", path);
8085     } else {
8086         dprintf(fd, "\n");
8087     }
8088 
8089     if (d->smaps) {
8090         unsigned long size = end - start;
8091         unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
8092         unsigned long size_kb = size >> 10;
8093 
8094         dprintf(fd, "Size:                  %lu kB\n"
8095                 "KernelPageSize:        %lu kB\n"
8096                 "MMUPageSize:           %lu kB\n"
8097                 "Rss:                   0 kB\n"
8098                 "Pss:                   0 kB\n"
8099                 "Pss_Dirty:             0 kB\n"
8100                 "Shared_Clean:          0 kB\n"
8101                 "Shared_Dirty:          0 kB\n"
8102                 "Private_Clean:         0 kB\n"
8103                 "Private_Dirty:         0 kB\n"
8104                 "Referenced:            0 kB\n"
8105                 "Anonymous:             %lu kB\n"
8106                 "LazyFree:              0 kB\n"
8107                 "AnonHugePages:         0 kB\n"
8108                 "ShmemPmdMapped:        0 kB\n"
8109                 "FilePmdMapped:         0 kB\n"
8110                 "Shared_Hugetlb:        0 kB\n"
8111                 "Private_Hugetlb:       0 kB\n"
8112                 "Swap:                  0 kB\n"
8113                 "SwapPss:               0 kB\n"
8114                 "Locked:                0 kB\n"
8115                 "THPeligible:    0\n"
8116                 "VmFlags:%s%s%s%s%s%s%s%s\n",
8117                 size_kb, page_size_kb, page_size_kb,
8118                 (flags & PAGE_ANON ? size_kb : 0),
8119                 (flags & PAGE_READ) ? " rd" : "",
8120                 (flags & PAGE_WRITE_ORG) ? " wr" : "",
8121                 (flags & PAGE_EXEC) ? " ex" : "",
8122                 mi->is_priv ? "" : " sh",
8123                 (flags & PAGE_READ) ? " mr" : "",
8124                 (flags & PAGE_WRITE_ORG) ? " mw" : "",
8125                 (flags & PAGE_EXEC) ? " me" : "",
8126                 mi->is_priv ? "" : " ms");
8127     }
8128 }
8129 
8130 /*
8131  * Callback for walk_memory_regions, when read_self_maps() fails.
8132  * Proceed without the benefit of host /proc/self/maps cross-check.
8133  */
8134 static int open_self_maps_3(void *opaque, target_ulong guest_start,
8135                             target_ulong guest_end, unsigned long flags)
8136 {
8137     static const MapInfo mi = { .is_priv = true };
8138 
8139     open_self_maps_4(opaque, &mi, guest_start, guest_end, flags);
8140     return 0;
8141 }
8142 
8143 /*
8144  * Callback for walk_memory_regions, when read_self_maps() succeeds.
8145  */
8146 static int open_self_maps_2(void *opaque, target_ulong guest_start,
8147                             target_ulong guest_end, unsigned long flags)
8148 {
8149     const struct open_self_maps_data *d = opaque;
8150     uintptr_t host_start = (uintptr_t)g2h_untagged(guest_start);
8151     uintptr_t host_last = (uintptr_t)g2h_untagged(guest_end - 1);
8152 
8153 #ifdef TARGET_X86_64
8154     /*
8155      * Because of the extremely high position of the page within the guest
8156      * virtual address space, this is not backed by host memory at all.
8157      * Therefore the loop below would fail.  This is the only instance
8158      * of not having host backing memory.
8159      */
8160     if (guest_start == TARGET_VSYSCALL_PAGE) {
8161         return open_self_maps_3(opaque, guest_start, guest_end, flags);
8162     }
8163 #endif
8164 
8165     while (1) {
8166         IntervalTreeNode *n =
8167             interval_tree_iter_first(d->host_maps, host_start, host_start);
8168         MapInfo *mi = container_of(n, MapInfo, itree);
8169         uintptr_t this_hlast = MIN(host_last, n->last);
8170         target_ulong this_gend = h2g(this_hlast) + 1;
8171 
8172         open_self_maps_4(d, mi, guest_start, this_gend, flags);
8173 
8174         if (this_hlast == host_last) {
8175             return 0;
8176         }
8177         host_start = this_hlast + 1;
8178         guest_start = h2g(host_start);
8179     }
8180 }
8181 
8182 static int open_self_maps_1(CPUArchState *env, int fd, bool smaps)
8183 {
8184     struct open_self_maps_data d = {
8185         .ts = get_task_state(env_cpu(env)),
8186         .fd = fd,
8187         .smaps = smaps
8188     };
8189 
8190     mmap_lock();
8191     d.host_maps = read_self_maps();
8192     if (d.host_maps) {
8193         walk_memory_regions(&d, open_self_maps_2);
8194         free_self_maps(d.host_maps);
8195     } else {
8196         walk_memory_regions(&d, open_self_maps_3);
8197     }
8198     mmap_unlock();
8199     return 0;
8200 }
8201 
8202 static int open_self_maps(CPUArchState *cpu_env, int fd)
8203 {
8204     return open_self_maps_1(cpu_env, fd, false);
8205 }
8206 
8207 static int open_self_smaps(CPUArchState *cpu_env, int fd)
8208 {
8209     return open_self_maps_1(cpu_env, fd, true);
8210 }
8211 
8212 static int open_self_stat(CPUArchState *cpu_env, int fd)
8213 {
8214     CPUState *cpu = env_cpu(cpu_env);
8215     TaskState *ts = get_task_state(cpu);
8216     g_autoptr(GString) buf = g_string_new(NULL);
8217     int i;
8218 
8219     for (i = 0; i < 44; i++) {
8220         if (i == 0) {
8221             /* pid */
8222             g_string_printf(buf, FMT_pid " ", getpid());
8223         } else if (i == 1) {
8224             /* app name */
8225             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8226             bin = bin ? bin + 1 : ts->bprm->argv[0];
8227             g_string_printf(buf, "(%.15s) ", bin);
8228         } else if (i == 2) {
8229             /* task state */
8230             g_string_assign(buf, "R "); /* we are running right now */
8231         } else if (i == 3) {
8232             /* ppid */
8233             g_string_printf(buf, FMT_pid " ", getppid());
8234         } else if (i == 19) {
8235             /* num_threads */
8236             int cpus = 0;
8237             WITH_RCU_READ_LOCK_GUARD() {
8238                 CPUState *cpu_iter;
8239                 CPU_FOREACH(cpu_iter) {
8240                     cpus++;
8241                 }
8242             }
8243             g_string_printf(buf, "%d ", cpus);
8244         } else if (i == 21) {
8245             /* starttime */
8246             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8247         } else if (i == 27) {
8248             /* stack bottom */
8249             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8250         } else {
8251             /* for the rest, there is MasterCard */
8252             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8253         }
8254 
8255         if (write(fd, buf->str, buf->len) != buf->len) {
8256             return -1;
8257         }
8258     }
8259 
8260     return 0;
8261 }
8262 
8263 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8264 {
8265     CPUState *cpu = env_cpu(cpu_env);
8266     TaskState *ts = get_task_state(cpu);
8267     abi_ulong auxv = ts->info->saved_auxv;
8268     abi_ulong len = ts->info->auxv_len;
8269     char *ptr;
8270 
8271     /*
8272      * Auxiliary vector is stored in target process stack.
8273      * read in whole auxv vector and copy it to file
8274      */
8275     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8276     if (ptr != NULL) {
8277         while (len > 0) {
8278             ssize_t r;
8279             r = write(fd, ptr, len);
8280             if (r <= 0) {
8281                 break;
8282             }
8283             len -= r;
8284             ptr += r;
8285         }
8286         lseek(fd, 0, SEEK_SET);
8287         unlock_user(ptr, auxv, len);
8288     }
8289 
8290     return 0;
8291 }
8292 
8293 static int is_proc_myself(const char *filename, const char *entry)
8294 {
8295     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8296         filename += strlen("/proc/");
8297         if (!strncmp(filename, "self/", strlen("self/"))) {
8298             filename += strlen("self/");
8299         } else if (*filename >= '1' && *filename <= '9') {
8300             char myself[80];
8301             snprintf(myself, sizeof(myself), "%d/", getpid());
8302             if (!strncmp(filename, myself, strlen(myself))) {
8303                 filename += strlen(myself);
8304             } else {
8305                 return 0;
8306             }
8307         } else {
8308             return 0;
8309         }
8310         if (!strcmp(filename, entry)) {
8311             return 1;
8312         }
8313     }
8314     return 0;
8315 }
8316 
8317 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8318                       const char *fmt, int code)
8319 {
8320     if (logfile) {
8321         CPUState *cs = env_cpu(env);
8322 
8323         fprintf(logfile, fmt, code);
8324         fprintf(logfile, "Failing executable: %s\n", exec_path);
8325         cpu_dump_state(cs, logfile, 0);
8326         open_self_maps(env, fileno(logfile));
8327     }
8328 }
8329 
8330 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8331 {
8332     /* dump to console */
8333     excp_dump_file(stderr, env, fmt, code);
8334 
8335     /* dump to log file */
8336     if (qemu_log_separate()) {
8337         FILE *logfile = qemu_log_trylock();
8338 
8339         excp_dump_file(logfile, env, fmt, code);
8340         qemu_log_unlock(logfile);
8341     }
8342 }
8343 
8344 #include "target_proc.h"
8345 
8346 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8347     defined(HAVE_ARCH_PROC_CPUINFO) || \
8348     defined(HAVE_ARCH_PROC_HARDWARE)
8349 static int is_proc(const char *filename, const char *entry)
8350 {
8351     return strcmp(filename, entry) == 0;
8352 }
8353 #endif
8354 
8355 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8356 static int open_net_route(CPUArchState *cpu_env, int fd)
8357 {
8358     FILE *fp;
8359     char *line = NULL;
8360     size_t len = 0;
8361     ssize_t read;
8362 
8363     fp = fopen("/proc/net/route", "r");
8364     if (fp == NULL) {
8365         return -1;
8366     }
8367 
8368     /* read header */
8369 
8370     read = getline(&line, &len, fp);
8371     dprintf(fd, "%s", line);
8372 
8373     /* read routes */
8374 
8375     while ((read = getline(&line, &len, fp)) != -1) {
8376         char iface[16];
8377         uint32_t dest, gw, mask;
8378         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8379         int fields;
8380 
8381         fields = sscanf(line,
8382                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8383                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8384                         &mask, &mtu, &window, &irtt);
8385         if (fields != 11) {
8386             continue;
8387         }
8388         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8389                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8390                 metric, tswap32(mask), mtu, window, irtt);
8391     }
8392 
8393     free(line);
8394     fclose(fp);
8395 
8396     return 0;
8397 }
8398 #endif
8399 
8400 static int maybe_do_fake_open(CPUArchState *cpu_env, int dirfd,
8401                               const char *fname, int flags, mode_t mode,
8402                               int openat2_resolve, bool safe)
8403 {
8404     g_autofree char *proc_name = NULL;
8405     const char *pathname;
8406     struct fake_open {
8407         const char *filename;
8408         int (*fill)(CPUArchState *cpu_env, int fd);
8409         int (*cmp)(const char *s1, const char *s2);
8410     };
8411     const struct fake_open *fake_open;
8412     static const struct fake_open fakes[] = {
8413         { "maps", open_self_maps, is_proc_myself },
8414         { "smaps", open_self_smaps, is_proc_myself },
8415         { "stat", open_self_stat, is_proc_myself },
8416         { "auxv", open_self_auxv, is_proc_myself },
8417         { "cmdline", open_self_cmdline, is_proc_myself },
8418 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8419         { "/proc/net/route", open_net_route, is_proc },
8420 #endif
8421 #if defined(HAVE_ARCH_PROC_CPUINFO)
8422         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8423 #endif
8424 #if defined(HAVE_ARCH_PROC_HARDWARE)
8425         { "/proc/hardware", open_hardware, is_proc },
8426 #endif
8427         { NULL, NULL, NULL }
8428     };
8429 
8430     /* if this is a file from /proc/ filesystem, expand full name */
8431     proc_name = realpath(fname, NULL);
8432     if (proc_name && strncmp(proc_name, "/proc/", 6) == 0) {
8433         pathname = proc_name;
8434     } else {
8435         pathname = fname;
8436     }
8437 
8438     if (is_proc_myself(pathname, "exe")) {
8439         /* Honor openat2 resolve flags */
8440         if ((openat2_resolve & RESOLVE_NO_MAGICLINKS) ||
8441             (openat2_resolve & RESOLVE_NO_SYMLINKS)) {
8442             errno = ELOOP;
8443             return -1;
8444         }
8445         if (safe) {
8446             return safe_openat(dirfd, exec_path, flags, mode);
8447         } else {
8448             return openat(dirfd, exec_path, flags, mode);
8449         }
8450     }
8451 
8452     for (fake_open = fakes; fake_open->filename; fake_open++) {
8453         if (fake_open->cmp(pathname, fake_open->filename)) {
8454             break;
8455         }
8456     }
8457 
8458     if (fake_open->filename) {
8459         const char *tmpdir;
8460         char filename[PATH_MAX];
8461         int fd, r;
8462 
8463         fd = memfd_create("qemu-open", 0);
8464         if (fd < 0) {
8465             if (errno != ENOSYS) {
8466                 return fd;
8467             }
8468             /* create temporary file to map stat to */
8469             tmpdir = getenv("TMPDIR");
8470             if (!tmpdir)
8471                 tmpdir = "/tmp";
8472             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8473             fd = mkstemp(filename);
8474             if (fd < 0) {
8475                 return fd;
8476             }
8477             unlink(filename);
8478         }
8479 
8480         if ((r = fake_open->fill(cpu_env, fd))) {
8481             int e = errno;
8482             close(fd);
8483             errno = e;
8484             return r;
8485         }
8486         lseek(fd, 0, SEEK_SET);
8487 
8488         return fd;
8489     }
8490 
8491     return -2;
8492 }
8493 
8494 int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *pathname,
8495                     int flags, mode_t mode, bool safe)
8496 {
8497     int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, flags, mode, 0, safe);
8498     if (fd > -2) {
8499         return fd;
8500     }
8501 
8502     if (safe) {
8503         return safe_openat(dirfd, path(pathname), flags, mode);
8504     } else {
8505         return openat(dirfd, path(pathname), flags, mode);
8506     }
8507 }
8508 
8509 
8510 static int do_openat2(CPUArchState *cpu_env, abi_long dirfd,
8511                       abi_ptr guest_pathname, abi_ptr guest_open_how,
8512                       abi_ulong guest_size)
8513 {
8514     struct open_how_ver0 how = {0};
8515     char *pathname;
8516     int ret;
8517 
8518     if (guest_size < sizeof(struct target_open_how_ver0)) {
8519         return -TARGET_EINVAL;
8520     }
8521     ret = copy_struct_from_user(&how, sizeof(how), guest_open_how, guest_size);
8522     if (ret) {
8523         if (ret == -TARGET_E2BIG) {
8524             qemu_log_mask(LOG_UNIMP,
8525                           "Unimplemented openat2 open_how size: "
8526                           TARGET_ABI_FMT_lu "\n", guest_size);
8527         }
8528         return ret;
8529     }
8530     pathname = lock_user_string(guest_pathname);
8531     if (!pathname) {
8532         return -TARGET_EFAULT;
8533     }
8534 
8535     how.flags = target_to_host_bitmask(tswap64(how.flags), fcntl_flags_tbl);
8536     how.mode = tswap64(how.mode);
8537     how.resolve = tswap64(how.resolve);
8538     int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, how.flags, how.mode,
8539                                 how.resolve, true);
8540     if (fd > -2) {
8541         ret = get_errno(fd);
8542     } else {
8543         ret = get_errno(safe_openat2(dirfd, pathname, &how,
8544                                      sizeof(struct open_how_ver0)));
8545     }
8546 
8547     fd_trans_unregister(ret);
8548     unlock_user(pathname, guest_pathname, 0);
8549     return ret;
8550 }
8551 
8552 ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz)
8553 {
8554     ssize_t ret;
8555 
8556     if (!pathname || !buf) {
8557         errno = EFAULT;
8558         return -1;
8559     }
8560 
8561     if (!bufsiz) {
8562         /* Short circuit this for the magic exe check. */
8563         errno = EINVAL;
8564         return -1;
8565     }
8566 
8567     if (is_proc_myself((const char *)pathname, "exe")) {
8568         /*
8569          * Don't worry about sign mismatch as earlier mapping
8570          * logic would have thrown a bad address error.
8571          */
8572         ret = MIN(strlen(exec_path), bufsiz);
8573         /* We cannot NUL terminate the string. */
8574         memcpy(buf, exec_path, ret);
8575     } else {
8576         ret = readlink(path(pathname), buf, bufsiz);
8577     }
8578 
8579     return ret;
8580 }
8581 
8582 static int do_execv(CPUArchState *cpu_env, int dirfd,
8583                     abi_long pathname, abi_long guest_argp,
8584                     abi_long guest_envp, int flags, bool is_execveat)
8585 {
8586     int ret;
8587     char **argp, **envp;
8588     int argc, envc;
8589     abi_ulong gp;
8590     abi_ulong addr;
8591     char **q;
8592     void *p;
8593 
8594     argc = 0;
8595 
8596     for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8597         if (get_user_ual(addr, gp)) {
8598             return -TARGET_EFAULT;
8599         }
8600         if (!addr) {
8601             break;
8602         }
8603         argc++;
8604     }
8605     envc = 0;
8606     for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8607         if (get_user_ual(addr, gp)) {
8608             return -TARGET_EFAULT;
8609         }
8610         if (!addr) {
8611             break;
8612         }
8613         envc++;
8614     }
8615 
8616     argp = g_new0(char *, argc + 1);
8617     envp = g_new0(char *, envc + 1);
8618 
8619     for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8620         if (get_user_ual(addr, gp)) {
8621             goto execve_efault;
8622         }
8623         if (!addr) {
8624             break;
8625         }
8626         *q = lock_user_string(addr);
8627         if (!*q) {
8628             goto execve_efault;
8629         }
8630     }
8631     *q = NULL;
8632 
8633     for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8634         if (get_user_ual(addr, gp)) {
8635             goto execve_efault;
8636         }
8637         if (!addr) {
8638             break;
8639         }
8640         *q = lock_user_string(addr);
8641         if (!*q) {
8642             goto execve_efault;
8643         }
8644     }
8645     *q = NULL;
8646 
8647     /*
8648      * Although execve() is not an interruptible syscall it is
8649      * a special case where we must use the safe_syscall wrapper:
8650      * if we allow a signal to happen before we make the host
8651      * syscall then we will 'lose' it, because at the point of
8652      * execve the process leaves QEMU's control. So we use the
8653      * safe syscall wrapper to ensure that we either take the
8654      * signal as a guest signal, or else it does not happen
8655      * before the execve completes and makes it the other
8656      * program's problem.
8657      */
8658     p = lock_user_string(pathname);
8659     if (!p) {
8660         goto execve_efault;
8661     }
8662 
8663     const char *exe = p;
8664     if (is_proc_myself(p, "exe")) {
8665         exe = exec_path;
8666     }
8667     ret = is_execveat
8668         ? safe_execveat(dirfd, exe, argp, envp, flags)
8669         : safe_execve(exe, argp, envp);
8670     ret = get_errno(ret);
8671 
8672     unlock_user(p, pathname, 0);
8673 
8674     goto execve_end;
8675 
8676 execve_efault:
8677     ret = -TARGET_EFAULT;
8678 
8679 execve_end:
8680     for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8681         if (get_user_ual(addr, gp) || !addr) {
8682             break;
8683         }
8684         unlock_user(*q, addr, 0);
8685     }
8686     for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8687         if (get_user_ual(addr, gp) || !addr) {
8688             break;
8689         }
8690         unlock_user(*q, addr, 0);
8691     }
8692 
8693     g_free(argp);
8694     g_free(envp);
8695     return ret;
8696 }
8697 
8698 #define TIMER_MAGIC 0x0caf0000
8699 #define TIMER_MAGIC_MASK 0xffff0000
8700 
8701 /* Convert QEMU provided timer ID back to internal 16bit index format */
8702 static target_timer_t get_timer_id(abi_long arg)
8703 {
8704     target_timer_t timerid = arg;
8705 
8706     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8707         return -TARGET_EINVAL;
8708     }
8709 
8710     timerid &= 0xffff;
8711 
8712     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8713         return -TARGET_EINVAL;
8714     }
8715 
8716     return timerid;
8717 }
8718 
8719 static int target_to_host_cpu_mask(unsigned long *host_mask,
8720                                    size_t host_size,
8721                                    abi_ulong target_addr,
8722                                    size_t target_size)
8723 {
8724     unsigned target_bits = sizeof(abi_ulong) * 8;
8725     unsigned host_bits = sizeof(*host_mask) * 8;
8726     abi_ulong *target_mask;
8727     unsigned i, j;
8728 
8729     assert(host_size >= target_size);
8730 
8731     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8732     if (!target_mask) {
8733         return -TARGET_EFAULT;
8734     }
8735     memset(host_mask, 0, host_size);
8736 
8737     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8738         unsigned bit = i * target_bits;
8739         abi_ulong val;
8740 
8741         __get_user(val, &target_mask[i]);
8742         for (j = 0; j < target_bits; j++, bit++) {
8743             if (val & (1UL << j)) {
8744                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8745             }
8746         }
8747     }
8748 
8749     unlock_user(target_mask, target_addr, 0);
8750     return 0;
8751 }
8752 
8753 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8754                                    size_t host_size,
8755                                    abi_ulong target_addr,
8756                                    size_t target_size)
8757 {
8758     unsigned target_bits = sizeof(abi_ulong) * 8;
8759     unsigned host_bits = sizeof(*host_mask) * 8;
8760     abi_ulong *target_mask;
8761     unsigned i, j;
8762 
8763     assert(host_size >= target_size);
8764 
8765     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8766     if (!target_mask) {
8767         return -TARGET_EFAULT;
8768     }
8769 
8770     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8771         unsigned bit = i * target_bits;
8772         abi_ulong val = 0;
8773 
8774         for (j = 0; j < target_bits; j++, bit++) {
8775             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8776                 val |= 1UL << j;
8777             }
8778         }
8779         __put_user(val, &target_mask[i]);
8780     }
8781 
8782     unlock_user(target_mask, target_addr, target_size);
8783     return 0;
8784 }
8785 
8786 #ifdef TARGET_NR_getdents
8787 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8788 {
8789     g_autofree void *hdirp = NULL;
8790     void *tdirp;
8791     int hlen, hoff, toff;
8792     int hreclen, treclen;
8793     off_t prev_diroff = 0;
8794 
8795     hdirp = g_try_malloc(count);
8796     if (!hdirp) {
8797         return -TARGET_ENOMEM;
8798     }
8799 
8800 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8801     hlen = sys_getdents(dirfd, hdirp, count);
8802 #else
8803     hlen = sys_getdents64(dirfd, hdirp, count);
8804 #endif
8805 
8806     hlen = get_errno(hlen);
8807     if (is_error(hlen)) {
8808         return hlen;
8809     }
8810 
8811     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8812     if (!tdirp) {
8813         return -TARGET_EFAULT;
8814     }
8815 
8816     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8817 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8818         struct linux_dirent *hde = hdirp + hoff;
8819 #else
8820         struct linux_dirent64 *hde = hdirp + hoff;
8821 #endif
8822         struct target_dirent *tde = tdirp + toff;
8823         int namelen;
8824         uint8_t type;
8825 
8826         namelen = strlen(hde->d_name);
8827         hreclen = hde->d_reclen;
8828         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8829         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8830 
8831         if (toff + treclen > count) {
8832             /*
8833              * If the host struct is smaller than the target struct, or
8834              * requires less alignment and thus packs into less space,
8835              * then the host can return more entries than we can pass
8836              * on to the guest.
8837              */
8838             if (toff == 0) {
8839                 toff = -TARGET_EINVAL; /* result buffer is too small */
8840                 break;
8841             }
8842             /*
8843              * Return what we have, resetting the file pointer to the
8844              * location of the first record not returned.
8845              */
8846             lseek(dirfd, prev_diroff, SEEK_SET);
8847             break;
8848         }
8849 
8850         prev_diroff = hde->d_off;
8851         tde->d_ino = tswapal(hde->d_ino);
8852         tde->d_off = tswapal(hde->d_off);
8853         tde->d_reclen = tswap16(treclen);
8854         memcpy(tde->d_name, hde->d_name, namelen + 1);
8855 
8856         /*
8857          * The getdents type is in what was formerly a padding byte at the
8858          * end of the structure.
8859          */
8860 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8861         type = *((uint8_t *)hde + hreclen - 1);
8862 #else
8863         type = hde->d_type;
8864 #endif
8865         *((uint8_t *)tde + treclen - 1) = type;
8866     }
8867 
8868     unlock_user(tdirp, arg2, toff);
8869     return toff;
8870 }
8871 #endif /* TARGET_NR_getdents */
8872 
8873 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8874 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8875 {
8876     g_autofree void *hdirp = NULL;
8877     void *tdirp;
8878     int hlen, hoff, toff;
8879     int hreclen, treclen;
8880     off_t prev_diroff = 0;
8881 
8882     hdirp = g_try_malloc(count);
8883     if (!hdirp) {
8884         return -TARGET_ENOMEM;
8885     }
8886 
8887     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8888     if (is_error(hlen)) {
8889         return hlen;
8890     }
8891 
8892     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8893     if (!tdirp) {
8894         return -TARGET_EFAULT;
8895     }
8896 
8897     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8898         struct linux_dirent64 *hde = hdirp + hoff;
8899         struct target_dirent64 *tde = tdirp + toff;
8900         int namelen;
8901 
8902         namelen = strlen(hde->d_name) + 1;
8903         hreclen = hde->d_reclen;
8904         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8905         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8906 
8907         if (toff + treclen > count) {
8908             /*
8909              * If the host struct is smaller than the target struct, or
8910              * requires less alignment and thus packs into less space,
8911              * then the host can return more entries than we can pass
8912              * on to the guest.
8913              */
8914             if (toff == 0) {
8915                 toff = -TARGET_EINVAL; /* result buffer is too small */
8916                 break;
8917             }
8918             /*
8919              * Return what we have, resetting the file pointer to the
8920              * location of the first record not returned.
8921              */
8922             lseek(dirfd, prev_diroff, SEEK_SET);
8923             break;
8924         }
8925 
8926         prev_diroff = hde->d_off;
8927         tde->d_ino = tswap64(hde->d_ino);
8928         tde->d_off = tswap64(hde->d_off);
8929         tde->d_reclen = tswap16(treclen);
8930         tde->d_type = hde->d_type;
8931         memcpy(tde->d_name, hde->d_name, namelen);
8932     }
8933 
8934     unlock_user(tdirp, arg2, toff);
8935     return toff;
8936 }
8937 #endif /* TARGET_NR_getdents64 */
8938 
8939 #if defined(TARGET_NR_riscv_hwprobe)
8940 
8941 #define RISCV_HWPROBE_KEY_MVENDORID     0
8942 #define RISCV_HWPROBE_KEY_MARCHID       1
8943 #define RISCV_HWPROBE_KEY_MIMPID        2
8944 
8945 #define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
8946 #define     RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
8947 
8948 #define RISCV_HWPROBE_KEY_IMA_EXT_0         4
8949 #define     RISCV_HWPROBE_IMA_FD            (1 << 0)
8950 #define     RISCV_HWPROBE_IMA_C             (1 << 1)
8951 #define     RISCV_HWPROBE_IMA_V             (1 << 2)
8952 #define     RISCV_HWPROBE_EXT_ZBA           (1 << 3)
8953 #define     RISCV_HWPROBE_EXT_ZBB           (1 << 4)
8954 #define     RISCV_HWPROBE_EXT_ZBS           (1 << 5)
8955 #define     RISCV_HWPROBE_EXT_ZICBOZ        (1 << 6)
8956 #define     RISCV_HWPROBE_EXT_ZBC           (1 << 7)
8957 #define     RISCV_HWPROBE_EXT_ZBKB          (1 << 8)
8958 #define     RISCV_HWPROBE_EXT_ZBKC          (1 << 9)
8959 #define     RISCV_HWPROBE_EXT_ZBKX          (1 << 10)
8960 #define     RISCV_HWPROBE_EXT_ZKND          (1 << 11)
8961 #define     RISCV_HWPROBE_EXT_ZKNE          (1 << 12)
8962 #define     RISCV_HWPROBE_EXT_ZKNH          (1 << 13)
8963 #define     RISCV_HWPROBE_EXT_ZKSED         (1 << 14)
8964 #define     RISCV_HWPROBE_EXT_ZKSH          (1 << 15)
8965 #define     RISCV_HWPROBE_EXT_ZKT           (1 << 16)
8966 #define     RISCV_HWPROBE_EXT_ZVBB          (1 << 17)
8967 #define     RISCV_HWPROBE_EXT_ZVBC          (1 << 18)
8968 #define     RISCV_HWPROBE_EXT_ZVKB          (1 << 19)
8969 #define     RISCV_HWPROBE_EXT_ZVKG          (1 << 20)
8970 #define     RISCV_HWPROBE_EXT_ZVKNED        (1 << 21)
8971 #define     RISCV_HWPROBE_EXT_ZVKNHA        (1 << 22)
8972 #define     RISCV_HWPROBE_EXT_ZVKNHB        (1 << 23)
8973 #define     RISCV_HWPROBE_EXT_ZVKSED        (1 << 24)
8974 #define     RISCV_HWPROBE_EXT_ZVKSH         (1 << 25)
8975 #define     RISCV_HWPROBE_EXT_ZVKT          (1 << 26)
8976 #define     RISCV_HWPROBE_EXT_ZFH           (1 << 27)
8977 #define     RISCV_HWPROBE_EXT_ZFHMIN        (1 << 28)
8978 #define     RISCV_HWPROBE_EXT_ZIHINTNTL     (1 << 29)
8979 #define     RISCV_HWPROBE_EXT_ZVFH          (1 << 30)
8980 #define     RISCV_HWPROBE_EXT_ZVFHMIN       (1ULL << 31)
8981 #define     RISCV_HWPROBE_EXT_ZFA           (1ULL << 32)
8982 #define     RISCV_HWPROBE_EXT_ZTSO          (1ULL << 33)
8983 #define     RISCV_HWPROBE_EXT_ZACAS         (1ULL << 34)
8984 #define     RISCV_HWPROBE_EXT_ZICOND        (1ULL << 35)
8985 
8986 #define RISCV_HWPROBE_KEY_CPUPERF_0     5
8987 #define     RISCV_HWPROBE_MISALIGNED_UNKNOWN     (0 << 0)
8988 #define     RISCV_HWPROBE_MISALIGNED_EMULATED    (1 << 0)
8989 #define     RISCV_HWPROBE_MISALIGNED_SLOW        (2 << 0)
8990 #define     RISCV_HWPROBE_MISALIGNED_FAST        (3 << 0)
8991 #define     RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
8992 #define     RISCV_HWPROBE_MISALIGNED_MASK        (7 << 0)
8993 
8994 #define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6
8995 
8996 struct riscv_hwprobe {
8997     abi_llong  key;
8998     abi_ullong value;
8999 };
9000 
9001 static void risc_hwprobe_fill_pairs(CPURISCVState *env,
9002                                     struct riscv_hwprobe *pair,
9003                                     size_t pair_count)
9004 {
9005     const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
9006 
9007     for (; pair_count > 0; pair_count--, pair++) {
9008         abi_llong key;
9009         abi_ullong value;
9010         __put_user(0, &pair->value);
9011         __get_user(key, &pair->key);
9012         switch (key) {
9013         case RISCV_HWPROBE_KEY_MVENDORID:
9014             __put_user(cfg->mvendorid, &pair->value);
9015             break;
9016         case RISCV_HWPROBE_KEY_MARCHID:
9017             __put_user(cfg->marchid, &pair->value);
9018             break;
9019         case RISCV_HWPROBE_KEY_MIMPID:
9020             __put_user(cfg->mimpid, &pair->value);
9021             break;
9022         case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
9023             value = riscv_has_ext(env, RVI) &&
9024                     riscv_has_ext(env, RVM) &&
9025                     riscv_has_ext(env, RVA) ?
9026                     RISCV_HWPROBE_BASE_BEHAVIOR_IMA : 0;
9027             __put_user(value, &pair->value);
9028             break;
9029         case RISCV_HWPROBE_KEY_IMA_EXT_0:
9030             value = riscv_has_ext(env, RVF) &&
9031                     riscv_has_ext(env, RVD) ?
9032                     RISCV_HWPROBE_IMA_FD : 0;
9033             value |= riscv_has_ext(env, RVC) ?
9034                      RISCV_HWPROBE_IMA_C : 0;
9035             value |= riscv_has_ext(env, RVV) ?
9036                      RISCV_HWPROBE_IMA_V : 0;
9037             value |= cfg->ext_zba ?
9038                      RISCV_HWPROBE_EXT_ZBA : 0;
9039             value |= cfg->ext_zbb ?
9040                      RISCV_HWPROBE_EXT_ZBB : 0;
9041             value |= cfg->ext_zbs ?
9042                      RISCV_HWPROBE_EXT_ZBS : 0;
9043             value |= cfg->ext_zicboz ?
9044                      RISCV_HWPROBE_EXT_ZICBOZ : 0;
9045             value |= cfg->ext_zbc ?
9046                      RISCV_HWPROBE_EXT_ZBC : 0;
9047             value |= cfg->ext_zbkb ?
9048                      RISCV_HWPROBE_EXT_ZBKB : 0;
9049             value |= cfg->ext_zbkc ?
9050                      RISCV_HWPROBE_EXT_ZBKC : 0;
9051             value |= cfg->ext_zbkx ?
9052                      RISCV_HWPROBE_EXT_ZBKX : 0;
9053             value |= cfg->ext_zknd ?
9054                      RISCV_HWPROBE_EXT_ZKND : 0;
9055             value |= cfg->ext_zkne ?
9056                      RISCV_HWPROBE_EXT_ZKNE : 0;
9057             value |= cfg->ext_zknh ?
9058                      RISCV_HWPROBE_EXT_ZKNH : 0;
9059             value |= cfg->ext_zksed ?
9060                      RISCV_HWPROBE_EXT_ZKSED : 0;
9061             value |= cfg->ext_zksh ?
9062                      RISCV_HWPROBE_EXT_ZKSH : 0;
9063             value |= cfg->ext_zkt ?
9064                      RISCV_HWPROBE_EXT_ZKT : 0;
9065             value |= cfg->ext_zvbb ?
9066                      RISCV_HWPROBE_EXT_ZVBB : 0;
9067             value |= cfg->ext_zvbc ?
9068                      RISCV_HWPROBE_EXT_ZVBC : 0;
9069             value |= cfg->ext_zvkb ?
9070                      RISCV_HWPROBE_EXT_ZVKB : 0;
9071             value |= cfg->ext_zvkg ?
9072                      RISCV_HWPROBE_EXT_ZVKG : 0;
9073             value |= cfg->ext_zvkned ?
9074                      RISCV_HWPROBE_EXT_ZVKNED : 0;
9075             value |= cfg->ext_zvknha ?
9076                      RISCV_HWPROBE_EXT_ZVKNHA : 0;
9077             value |= cfg->ext_zvknhb ?
9078                      RISCV_HWPROBE_EXT_ZVKNHB : 0;
9079             value |= cfg->ext_zvksed ?
9080                      RISCV_HWPROBE_EXT_ZVKSED : 0;
9081             value |= cfg->ext_zvksh ?
9082                      RISCV_HWPROBE_EXT_ZVKSH : 0;
9083             value |= cfg->ext_zvkt ?
9084                      RISCV_HWPROBE_EXT_ZVKT : 0;
9085             value |= cfg->ext_zfh ?
9086                      RISCV_HWPROBE_EXT_ZFH : 0;
9087             value |= cfg->ext_zfhmin ?
9088                      RISCV_HWPROBE_EXT_ZFHMIN : 0;
9089             value |= cfg->ext_zihintntl ?
9090                      RISCV_HWPROBE_EXT_ZIHINTNTL : 0;
9091             value |= cfg->ext_zvfh ?
9092                      RISCV_HWPROBE_EXT_ZVFH : 0;
9093             value |= cfg->ext_zvfhmin ?
9094                      RISCV_HWPROBE_EXT_ZVFHMIN : 0;
9095             value |= cfg->ext_zfa ?
9096                      RISCV_HWPROBE_EXT_ZFA : 0;
9097             value |= cfg->ext_ztso ?
9098                      RISCV_HWPROBE_EXT_ZTSO : 0;
9099             value |= cfg->ext_zacas ?
9100                      RISCV_HWPROBE_EXT_ZACAS : 0;
9101             value |= cfg->ext_zicond ?
9102                      RISCV_HWPROBE_EXT_ZICOND : 0;
9103             __put_user(value, &pair->value);
9104             break;
9105         case RISCV_HWPROBE_KEY_CPUPERF_0:
9106             __put_user(RISCV_HWPROBE_MISALIGNED_FAST, &pair->value);
9107             break;
9108         case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
9109             value = cfg->ext_zicboz ? cfg->cboz_blocksize : 0;
9110             __put_user(value, &pair->value);
9111             break;
9112         default:
9113             __put_user(-1, &pair->key);
9114             break;
9115         }
9116     }
9117 }
9118 
9119 static int cpu_set_valid(abi_long arg3, abi_long arg4)
9120 {
9121     int ret, i, tmp;
9122     size_t host_mask_size, target_mask_size;
9123     unsigned long *host_mask;
9124 
9125     /*
9126      * cpu_set_t represent CPU masks as bit masks of type unsigned long *.
9127      * arg3 contains the cpu count.
9128      */
9129     tmp = (8 * sizeof(abi_ulong));
9130     target_mask_size = ((arg3 + tmp - 1) / tmp) * sizeof(abi_ulong);
9131     host_mask_size = (target_mask_size + (sizeof(*host_mask) - 1)) &
9132                      ~(sizeof(*host_mask) - 1);
9133 
9134     host_mask = alloca(host_mask_size);
9135 
9136     ret = target_to_host_cpu_mask(host_mask, host_mask_size,
9137                                   arg4, target_mask_size);
9138     if (ret != 0) {
9139         return ret;
9140     }
9141 
9142     for (i = 0 ; i < host_mask_size / sizeof(*host_mask); i++) {
9143         if (host_mask[i] != 0) {
9144             return 0;
9145         }
9146     }
9147     return -TARGET_EINVAL;
9148 }
9149 
9150 static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
9151                                  abi_long arg2, abi_long arg3,
9152                                  abi_long arg4, abi_long arg5)
9153 {
9154     int ret;
9155     struct riscv_hwprobe *host_pairs;
9156 
9157     /* flags must be 0 */
9158     if (arg5 != 0) {
9159         return -TARGET_EINVAL;
9160     }
9161 
9162     /* check cpu_set */
9163     if (arg3 != 0) {
9164         ret = cpu_set_valid(arg3, arg4);
9165         if (ret != 0) {
9166             return ret;
9167         }
9168     } else if (arg4 != 0) {
9169         return -TARGET_EINVAL;
9170     }
9171 
9172     /* no pairs */
9173     if (arg2 == 0) {
9174         return 0;
9175     }
9176 
9177     host_pairs = lock_user(VERIFY_WRITE, arg1,
9178                            sizeof(*host_pairs) * (size_t)arg2, 0);
9179     if (host_pairs == NULL) {
9180         return -TARGET_EFAULT;
9181     }
9182     risc_hwprobe_fill_pairs(cpu_env, host_pairs, arg2);
9183     unlock_user(host_pairs, arg1, sizeof(*host_pairs) * (size_t)arg2);
9184     return 0;
9185 }
9186 #endif /* TARGET_NR_riscv_hwprobe */
9187 
9188 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
9189 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
9190 #endif
9191 
9192 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9193 #define __NR_sys_open_tree __NR_open_tree
9194 _syscall3(int, sys_open_tree, int, __dfd, const char *, __filename,
9195           unsigned int, __flags)
9196 #endif
9197 
9198 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9199 #define __NR_sys_move_mount __NR_move_mount
9200 _syscall5(int, sys_move_mount, int, __from_dfd, const char *, __from_pathname,
9201            int, __to_dfd, const char *, __to_pathname, unsigned int, flag)
9202 #endif
9203 
9204 /* This is an internal helper for do_syscall so that it is easier
9205  * to have a single return point, so that actions, such as logging
9206  * of syscall results, can be performed.
9207  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
9208  */
9209 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
9210                             abi_long arg2, abi_long arg3, abi_long arg4,
9211                             abi_long arg5, abi_long arg6, abi_long arg7,
9212                             abi_long arg8)
9213 {
9214     CPUState *cpu = env_cpu(cpu_env);
9215     abi_long ret;
9216 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
9217     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
9218     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
9219     || defined(TARGET_NR_statx)
9220     struct stat st;
9221 #endif
9222 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
9223     || defined(TARGET_NR_fstatfs)
9224     struct statfs stfs;
9225 #endif
9226     void *p;
9227 
9228     switch(num) {
9229     case TARGET_NR_exit:
9230         /* In old applications this may be used to implement _exit(2).
9231            However in threaded applications it is used for thread termination,
9232            and _exit_group is used for application termination.
9233            Do thread termination if we have more then one thread.  */
9234 
9235         if (block_signals()) {
9236             return -QEMU_ERESTARTSYS;
9237         }
9238 
9239         pthread_mutex_lock(&clone_lock);
9240 
9241         if (CPU_NEXT(first_cpu)) {
9242             TaskState *ts = get_task_state(cpu);
9243 
9244             if (ts->child_tidptr) {
9245                 put_user_u32(0, ts->child_tidptr);
9246                 do_sys_futex(g2h(cpu, ts->child_tidptr),
9247                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
9248             }
9249 
9250             object_unparent(OBJECT(cpu));
9251             object_unref(OBJECT(cpu));
9252             /*
9253              * At this point the CPU should be unrealized and removed
9254              * from cpu lists. We can clean-up the rest of the thread
9255              * data without the lock held.
9256              */
9257 
9258             pthread_mutex_unlock(&clone_lock);
9259 
9260             thread_cpu = NULL;
9261             g_free(ts);
9262             rcu_unregister_thread();
9263             pthread_exit(NULL);
9264         }
9265 
9266         pthread_mutex_unlock(&clone_lock);
9267         preexit_cleanup(cpu_env, arg1);
9268         _exit(arg1);
9269         return 0; /* avoid warning */
9270     case TARGET_NR_read:
9271         if (arg2 == 0 && arg3 == 0) {
9272             return get_errno(safe_read(arg1, 0, 0));
9273         } else {
9274             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9275                 return -TARGET_EFAULT;
9276             ret = get_errno(safe_read(arg1, p, arg3));
9277             if (ret >= 0 &&
9278                 fd_trans_host_to_target_data(arg1)) {
9279                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
9280             }
9281             unlock_user(p, arg2, ret);
9282         }
9283         return ret;
9284     case TARGET_NR_write:
9285         if (arg2 == 0 && arg3 == 0) {
9286             return get_errno(safe_write(arg1, 0, 0));
9287         }
9288         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9289             return -TARGET_EFAULT;
9290         if (fd_trans_target_to_host_data(arg1)) {
9291             void *copy = g_malloc(arg3);
9292             memcpy(copy, p, arg3);
9293             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
9294             if (ret >= 0) {
9295                 ret = get_errno(safe_write(arg1, copy, ret));
9296             }
9297             g_free(copy);
9298         } else {
9299             ret = get_errno(safe_write(arg1, p, arg3));
9300         }
9301         unlock_user(p, arg2, 0);
9302         return ret;
9303 
9304 #ifdef TARGET_NR_open
9305     case TARGET_NR_open:
9306         if (!(p = lock_user_string(arg1)))
9307             return -TARGET_EFAULT;
9308         ret = get_errno(do_guest_openat(cpu_env, AT_FDCWD, p,
9309                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
9310                                   arg3, true));
9311         fd_trans_unregister(ret);
9312         unlock_user(p, arg1, 0);
9313         return ret;
9314 #endif
9315     case TARGET_NR_openat:
9316         if (!(p = lock_user_string(arg2)))
9317             return -TARGET_EFAULT;
9318         ret = get_errno(do_guest_openat(cpu_env, arg1, p,
9319                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
9320                                   arg4, true));
9321         fd_trans_unregister(ret);
9322         unlock_user(p, arg2, 0);
9323         return ret;
9324     case TARGET_NR_openat2:
9325         ret = do_openat2(cpu_env, arg1, arg2, arg3, arg4);
9326         return ret;
9327 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9328     case TARGET_NR_name_to_handle_at:
9329         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
9330         return ret;
9331 #endif
9332 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9333     case TARGET_NR_open_by_handle_at:
9334         ret = do_open_by_handle_at(arg1, arg2, arg3);
9335         fd_trans_unregister(ret);
9336         return ret;
9337 #endif
9338 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
9339     case TARGET_NR_pidfd_open:
9340         return get_errno(pidfd_open(arg1, arg2));
9341 #endif
9342 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
9343     case TARGET_NR_pidfd_send_signal:
9344         {
9345             siginfo_t uinfo, *puinfo;
9346 
9347             if (arg3) {
9348                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9349                 if (!p) {
9350                     return -TARGET_EFAULT;
9351                  }
9352                  target_to_host_siginfo(&uinfo, p);
9353                  unlock_user(p, arg3, 0);
9354                  puinfo = &uinfo;
9355             } else {
9356                  puinfo = NULL;
9357             }
9358             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
9359                                               puinfo, arg4));
9360         }
9361         return ret;
9362 #endif
9363 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
9364     case TARGET_NR_pidfd_getfd:
9365         return get_errno(pidfd_getfd(arg1, arg2, arg3));
9366 #endif
9367     case TARGET_NR_close:
9368         fd_trans_unregister(arg1);
9369         return get_errno(close(arg1));
9370 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
9371     case TARGET_NR_close_range:
9372         ret = get_errno(sys_close_range(arg1, arg2, arg3));
9373         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
9374             abi_long fd, maxfd;
9375             maxfd = MIN(arg2, target_fd_max);
9376             for (fd = arg1; fd < maxfd; fd++) {
9377                 fd_trans_unregister(fd);
9378             }
9379         }
9380         return ret;
9381 #endif
9382 
9383     case TARGET_NR_brk:
9384         return do_brk(arg1);
9385 #ifdef TARGET_NR_fork
9386     case TARGET_NR_fork:
9387         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
9388 #endif
9389 #ifdef TARGET_NR_waitpid
9390     case TARGET_NR_waitpid:
9391         {
9392             int status;
9393             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
9394             if (!is_error(ret) && arg2 && ret
9395                 && put_user_s32(host_to_target_waitstatus(status), arg2))
9396                 return -TARGET_EFAULT;
9397         }
9398         return ret;
9399 #endif
9400 #ifdef TARGET_NR_waitid
9401     case TARGET_NR_waitid:
9402         {
9403             struct rusage ru;
9404             siginfo_t info;
9405 
9406             ret = get_errno(safe_waitid(arg1, arg2, (arg3 ? &info : NULL),
9407                                         arg4, (arg5 ? &ru : NULL)));
9408             if (!is_error(ret)) {
9409                 if (arg3) {
9410                     p = lock_user(VERIFY_WRITE, arg3,
9411                                   sizeof(target_siginfo_t), 0);
9412                     if (!p) {
9413                         return -TARGET_EFAULT;
9414                     }
9415                     host_to_target_siginfo(p, &info);
9416                     unlock_user(p, arg3, sizeof(target_siginfo_t));
9417                 }
9418                 if (arg5 && host_to_target_rusage(arg5, &ru)) {
9419                     return -TARGET_EFAULT;
9420                 }
9421             }
9422         }
9423         return ret;
9424 #endif
9425 #ifdef TARGET_NR_creat /* not on alpha */
9426     case TARGET_NR_creat:
9427         if (!(p = lock_user_string(arg1)))
9428             return -TARGET_EFAULT;
9429         ret = get_errno(creat(p, arg2));
9430         fd_trans_unregister(ret);
9431         unlock_user(p, arg1, 0);
9432         return ret;
9433 #endif
9434 #ifdef TARGET_NR_link
9435     case TARGET_NR_link:
9436         {
9437             void * p2;
9438             p = lock_user_string(arg1);
9439             p2 = lock_user_string(arg2);
9440             if (!p || !p2)
9441                 ret = -TARGET_EFAULT;
9442             else
9443                 ret = get_errno(link(p, p2));
9444             unlock_user(p2, arg2, 0);
9445             unlock_user(p, arg1, 0);
9446         }
9447         return ret;
9448 #endif
9449 #if defined(TARGET_NR_linkat)
9450     case TARGET_NR_linkat:
9451         {
9452             void * p2 = NULL;
9453             if (!arg2 || !arg4)
9454                 return -TARGET_EFAULT;
9455             p  = lock_user_string(arg2);
9456             p2 = lock_user_string(arg4);
9457             if (!p || !p2)
9458                 ret = -TARGET_EFAULT;
9459             else
9460                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
9461             unlock_user(p, arg2, 0);
9462             unlock_user(p2, arg4, 0);
9463         }
9464         return ret;
9465 #endif
9466 #ifdef TARGET_NR_unlink
9467     case TARGET_NR_unlink:
9468         if (!(p = lock_user_string(arg1)))
9469             return -TARGET_EFAULT;
9470         ret = get_errno(unlink(p));
9471         unlock_user(p, arg1, 0);
9472         return ret;
9473 #endif
9474 #if defined(TARGET_NR_unlinkat)
9475     case TARGET_NR_unlinkat:
9476         if (!(p = lock_user_string(arg2)))
9477             return -TARGET_EFAULT;
9478         ret = get_errno(unlinkat(arg1, p, arg3));
9479         unlock_user(p, arg2, 0);
9480         return ret;
9481 #endif
9482     case TARGET_NR_execveat:
9483         return do_execv(cpu_env, arg1, arg2, arg3, arg4, arg5, true);
9484     case TARGET_NR_execve:
9485         return do_execv(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0, false);
9486     case TARGET_NR_chdir:
9487         if (!(p = lock_user_string(arg1)))
9488             return -TARGET_EFAULT;
9489         ret = get_errno(chdir(p));
9490         unlock_user(p, arg1, 0);
9491         return ret;
9492 #ifdef TARGET_NR_time
9493     case TARGET_NR_time:
9494         {
9495             time_t host_time;
9496             ret = get_errno(time(&host_time));
9497             if (!is_error(ret)
9498                 && arg1
9499                 && put_user_sal(host_time, arg1))
9500                 return -TARGET_EFAULT;
9501         }
9502         return ret;
9503 #endif
9504 #ifdef TARGET_NR_mknod
9505     case TARGET_NR_mknod:
9506         if (!(p = lock_user_string(arg1)))
9507             return -TARGET_EFAULT;
9508         ret = get_errno(mknod(p, arg2, arg3));
9509         unlock_user(p, arg1, 0);
9510         return ret;
9511 #endif
9512 #if defined(TARGET_NR_mknodat)
9513     case TARGET_NR_mknodat:
9514         if (!(p = lock_user_string(arg2)))
9515             return -TARGET_EFAULT;
9516         ret = get_errno(mknodat(arg1, p, arg3, arg4));
9517         unlock_user(p, arg2, 0);
9518         return ret;
9519 #endif
9520 #ifdef TARGET_NR_chmod
9521     case TARGET_NR_chmod:
9522         if (!(p = lock_user_string(arg1)))
9523             return -TARGET_EFAULT;
9524         ret = get_errno(chmod(p, arg2));
9525         unlock_user(p, arg1, 0);
9526         return ret;
9527 #endif
9528 #ifdef TARGET_NR_lseek
9529     case TARGET_NR_lseek:
9530         return get_errno(lseek(arg1, arg2, arg3));
9531 #endif
9532 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9533     /* Alpha specific */
9534     case TARGET_NR_getxpid:
9535         cpu_env->ir[IR_A4] = getppid();
9536         return get_errno(getpid());
9537 #endif
9538 #ifdef TARGET_NR_getpid
9539     case TARGET_NR_getpid:
9540         return get_errno(getpid());
9541 #endif
9542     case TARGET_NR_mount:
9543         {
9544             /* need to look at the data field */
9545             void *p2, *p3;
9546 
9547             if (arg1) {
9548                 p = lock_user_string(arg1);
9549                 if (!p) {
9550                     return -TARGET_EFAULT;
9551                 }
9552             } else {
9553                 p = NULL;
9554             }
9555 
9556             p2 = lock_user_string(arg2);
9557             if (!p2) {
9558                 if (arg1) {
9559                     unlock_user(p, arg1, 0);
9560                 }
9561                 return -TARGET_EFAULT;
9562             }
9563 
9564             if (arg3) {
9565                 p3 = lock_user_string(arg3);
9566                 if (!p3) {
9567                     if (arg1) {
9568                         unlock_user(p, arg1, 0);
9569                     }
9570                     unlock_user(p2, arg2, 0);
9571                     return -TARGET_EFAULT;
9572                 }
9573             } else {
9574                 p3 = NULL;
9575             }
9576 
9577             /* FIXME - arg5 should be locked, but it isn't clear how to
9578              * do that since it's not guaranteed to be a NULL-terminated
9579              * string.
9580              */
9581             if (!arg5) {
9582                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9583             } else {
9584                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9585             }
9586             ret = get_errno(ret);
9587 
9588             if (arg1) {
9589                 unlock_user(p, arg1, 0);
9590             }
9591             unlock_user(p2, arg2, 0);
9592             if (arg3) {
9593                 unlock_user(p3, arg3, 0);
9594             }
9595         }
9596         return ret;
9597 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9598 #if defined(TARGET_NR_umount)
9599     case TARGET_NR_umount:
9600 #endif
9601 #if defined(TARGET_NR_oldumount)
9602     case TARGET_NR_oldumount:
9603 #endif
9604         if (!(p = lock_user_string(arg1)))
9605             return -TARGET_EFAULT;
9606         ret = get_errno(umount(p));
9607         unlock_user(p, arg1, 0);
9608         return ret;
9609 #endif
9610 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9611     case TARGET_NR_move_mount:
9612         {
9613             void *p2, *p4;
9614 
9615             if (!arg2 || !arg4) {
9616                 return -TARGET_EFAULT;
9617             }
9618 
9619             p2 = lock_user_string(arg2);
9620             if (!p2) {
9621                 return -TARGET_EFAULT;
9622             }
9623 
9624             p4 = lock_user_string(arg4);
9625             if (!p4) {
9626                 unlock_user(p2, arg2, 0);
9627                 return -TARGET_EFAULT;
9628             }
9629             ret = get_errno(sys_move_mount(arg1, p2, arg3, p4, arg5));
9630 
9631             unlock_user(p2, arg2, 0);
9632             unlock_user(p4, arg4, 0);
9633 
9634             return ret;
9635         }
9636 #endif
9637 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9638     case TARGET_NR_open_tree:
9639         {
9640             void *p2;
9641             int host_flags;
9642 
9643             if (!arg2) {
9644                 return -TARGET_EFAULT;
9645             }
9646 
9647             p2 = lock_user_string(arg2);
9648             if (!p2) {
9649                 return -TARGET_EFAULT;
9650             }
9651 
9652             host_flags = arg3 & ~TARGET_O_CLOEXEC;
9653             if (arg3 & TARGET_O_CLOEXEC) {
9654                 host_flags |= O_CLOEXEC;
9655             }
9656 
9657             ret = get_errno(sys_open_tree(arg1, p2, host_flags));
9658 
9659             unlock_user(p2, arg2, 0);
9660 
9661             return ret;
9662         }
9663 #endif
9664 #ifdef TARGET_NR_stime /* not on alpha */
9665     case TARGET_NR_stime:
9666         {
9667             struct timespec ts;
9668             ts.tv_nsec = 0;
9669             if (get_user_sal(ts.tv_sec, arg1)) {
9670                 return -TARGET_EFAULT;
9671             }
9672             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9673         }
9674 #endif
9675 #ifdef TARGET_NR_alarm /* not on alpha */
9676     case TARGET_NR_alarm:
9677         return alarm(arg1);
9678 #endif
9679 #ifdef TARGET_NR_pause /* not on alpha */
9680     case TARGET_NR_pause:
9681         if (!block_signals()) {
9682             sigsuspend(&get_task_state(cpu)->signal_mask);
9683         }
9684         return -TARGET_EINTR;
9685 #endif
9686 #ifdef TARGET_NR_utime
9687     case TARGET_NR_utime:
9688         {
9689             struct utimbuf tbuf, *host_tbuf;
9690             struct target_utimbuf *target_tbuf;
9691             if (arg2) {
9692                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9693                     return -TARGET_EFAULT;
9694                 tbuf.actime = tswapal(target_tbuf->actime);
9695                 tbuf.modtime = tswapal(target_tbuf->modtime);
9696                 unlock_user_struct(target_tbuf, arg2, 0);
9697                 host_tbuf = &tbuf;
9698             } else {
9699                 host_tbuf = NULL;
9700             }
9701             if (!(p = lock_user_string(arg1)))
9702                 return -TARGET_EFAULT;
9703             ret = get_errno(utime(p, host_tbuf));
9704             unlock_user(p, arg1, 0);
9705         }
9706         return ret;
9707 #endif
9708 #ifdef TARGET_NR_utimes
9709     case TARGET_NR_utimes:
9710         {
9711             struct timeval *tvp, tv[2];
9712             if (arg2) {
9713                 if (copy_from_user_timeval(&tv[0], arg2)
9714                     || copy_from_user_timeval(&tv[1],
9715                                               arg2 + sizeof(struct target_timeval)))
9716                     return -TARGET_EFAULT;
9717                 tvp = tv;
9718             } else {
9719                 tvp = NULL;
9720             }
9721             if (!(p = lock_user_string(arg1)))
9722                 return -TARGET_EFAULT;
9723             ret = get_errno(utimes(p, tvp));
9724             unlock_user(p, arg1, 0);
9725         }
9726         return ret;
9727 #endif
9728 #if defined(TARGET_NR_futimesat)
9729     case TARGET_NR_futimesat:
9730         {
9731             struct timeval *tvp, tv[2];
9732             if (arg3) {
9733                 if (copy_from_user_timeval(&tv[0], arg3)
9734                     || copy_from_user_timeval(&tv[1],
9735                                               arg3 + sizeof(struct target_timeval)))
9736                     return -TARGET_EFAULT;
9737                 tvp = tv;
9738             } else {
9739                 tvp = NULL;
9740             }
9741             if (!(p = lock_user_string(arg2))) {
9742                 return -TARGET_EFAULT;
9743             }
9744             ret = get_errno(futimesat(arg1, path(p), tvp));
9745             unlock_user(p, arg2, 0);
9746         }
9747         return ret;
9748 #endif
9749 #ifdef TARGET_NR_access
9750     case TARGET_NR_access:
9751         if (!(p = lock_user_string(arg1))) {
9752             return -TARGET_EFAULT;
9753         }
9754         ret = get_errno(access(path(p), arg2));
9755         unlock_user(p, arg1, 0);
9756         return ret;
9757 #endif
9758 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9759     case TARGET_NR_faccessat:
9760         if (!(p = lock_user_string(arg2))) {
9761             return -TARGET_EFAULT;
9762         }
9763         ret = get_errno(faccessat(arg1, p, arg3, 0));
9764         unlock_user(p, arg2, 0);
9765         return ret;
9766 #endif
9767 #if defined(TARGET_NR_faccessat2)
9768     case TARGET_NR_faccessat2:
9769         if (!(p = lock_user_string(arg2))) {
9770             return -TARGET_EFAULT;
9771         }
9772         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9773         unlock_user(p, arg2, 0);
9774         return ret;
9775 #endif
9776 #ifdef TARGET_NR_nice /* not on alpha */
9777     case TARGET_NR_nice:
9778         return get_errno(nice(arg1));
9779 #endif
9780     case TARGET_NR_sync:
9781         sync();
9782         return 0;
9783 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9784     case TARGET_NR_syncfs:
9785         return get_errno(syncfs(arg1));
9786 #endif
9787     case TARGET_NR_kill:
9788         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9789 #ifdef TARGET_NR_rename
9790     case TARGET_NR_rename:
9791         {
9792             void *p2;
9793             p = lock_user_string(arg1);
9794             p2 = lock_user_string(arg2);
9795             if (!p || !p2)
9796                 ret = -TARGET_EFAULT;
9797             else
9798                 ret = get_errno(rename(p, p2));
9799             unlock_user(p2, arg2, 0);
9800             unlock_user(p, arg1, 0);
9801         }
9802         return ret;
9803 #endif
9804 #if defined(TARGET_NR_renameat)
9805     case TARGET_NR_renameat:
9806         {
9807             void *p2;
9808             p  = lock_user_string(arg2);
9809             p2 = lock_user_string(arg4);
9810             if (!p || !p2)
9811                 ret = -TARGET_EFAULT;
9812             else
9813                 ret = get_errno(renameat(arg1, p, arg3, p2));
9814             unlock_user(p2, arg4, 0);
9815             unlock_user(p, arg2, 0);
9816         }
9817         return ret;
9818 #endif
9819 #if defined(TARGET_NR_renameat2)
9820     case TARGET_NR_renameat2:
9821         {
9822             void *p2;
9823             p  = lock_user_string(arg2);
9824             p2 = lock_user_string(arg4);
9825             if (!p || !p2) {
9826                 ret = -TARGET_EFAULT;
9827             } else {
9828                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9829             }
9830             unlock_user(p2, arg4, 0);
9831             unlock_user(p, arg2, 0);
9832         }
9833         return ret;
9834 #endif
9835 #ifdef TARGET_NR_mkdir
9836     case TARGET_NR_mkdir:
9837         if (!(p = lock_user_string(arg1)))
9838             return -TARGET_EFAULT;
9839         ret = get_errno(mkdir(p, arg2));
9840         unlock_user(p, arg1, 0);
9841         return ret;
9842 #endif
9843 #if defined(TARGET_NR_mkdirat)
9844     case TARGET_NR_mkdirat:
9845         if (!(p = lock_user_string(arg2)))
9846             return -TARGET_EFAULT;
9847         ret = get_errno(mkdirat(arg1, p, arg3));
9848         unlock_user(p, arg2, 0);
9849         return ret;
9850 #endif
9851 #ifdef TARGET_NR_rmdir
9852     case TARGET_NR_rmdir:
9853         if (!(p = lock_user_string(arg1)))
9854             return -TARGET_EFAULT;
9855         ret = get_errno(rmdir(p));
9856         unlock_user(p, arg1, 0);
9857         return ret;
9858 #endif
9859     case TARGET_NR_dup:
9860         ret = get_errno(dup(arg1));
9861         if (ret >= 0) {
9862             fd_trans_dup(arg1, ret);
9863         }
9864         return ret;
9865 #ifdef TARGET_NR_pipe
9866     case TARGET_NR_pipe:
9867         return do_pipe(cpu_env, arg1, 0, 0);
9868 #endif
9869 #ifdef TARGET_NR_pipe2
9870     case TARGET_NR_pipe2:
9871         return do_pipe(cpu_env, arg1,
9872                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9873 #endif
9874     case TARGET_NR_times:
9875         {
9876             struct target_tms *tmsp;
9877             struct tms tms;
9878             ret = get_errno(times(&tms));
9879             if (arg1) {
9880                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9881                 if (!tmsp)
9882                     return -TARGET_EFAULT;
9883                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9884                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9885                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9886                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9887             }
9888             if (!is_error(ret))
9889                 ret = host_to_target_clock_t(ret);
9890         }
9891         return ret;
9892     case TARGET_NR_acct:
9893         if (arg1 == 0) {
9894             ret = get_errno(acct(NULL));
9895         } else {
9896             if (!(p = lock_user_string(arg1))) {
9897                 return -TARGET_EFAULT;
9898             }
9899             ret = get_errno(acct(path(p)));
9900             unlock_user(p, arg1, 0);
9901         }
9902         return ret;
9903 #ifdef TARGET_NR_umount2
9904     case TARGET_NR_umount2:
9905         if (!(p = lock_user_string(arg1)))
9906             return -TARGET_EFAULT;
9907         ret = get_errno(umount2(p, arg2));
9908         unlock_user(p, arg1, 0);
9909         return ret;
9910 #endif
9911     case TARGET_NR_ioctl:
9912         return do_ioctl(arg1, arg2, arg3);
9913 #ifdef TARGET_NR_fcntl
9914     case TARGET_NR_fcntl:
9915         return do_fcntl(arg1, arg2, arg3);
9916 #endif
9917     case TARGET_NR_setpgid:
9918         return get_errno(setpgid(arg1, arg2));
9919     case TARGET_NR_umask:
9920         return get_errno(umask(arg1));
9921     case TARGET_NR_chroot:
9922         if (!(p = lock_user_string(arg1)))
9923             return -TARGET_EFAULT;
9924         ret = get_errno(chroot(p));
9925         unlock_user(p, arg1, 0);
9926         return ret;
9927 #ifdef TARGET_NR_dup2
9928     case TARGET_NR_dup2:
9929         ret = get_errno(dup2(arg1, arg2));
9930         if (ret >= 0) {
9931             fd_trans_dup(arg1, arg2);
9932         }
9933         return ret;
9934 #endif
9935 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9936     case TARGET_NR_dup3:
9937     {
9938         int host_flags;
9939 
9940         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9941             return -EINVAL;
9942         }
9943         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9944         ret = get_errno(dup3(arg1, arg2, host_flags));
9945         if (ret >= 0) {
9946             fd_trans_dup(arg1, arg2);
9947         }
9948         return ret;
9949     }
9950 #endif
9951 #ifdef TARGET_NR_getppid /* not on alpha */
9952     case TARGET_NR_getppid:
9953         return get_errno(getppid());
9954 #endif
9955 #ifdef TARGET_NR_getpgrp
9956     case TARGET_NR_getpgrp:
9957         return get_errno(getpgrp());
9958 #endif
9959     case TARGET_NR_setsid:
9960         return get_errno(setsid());
9961 #ifdef TARGET_NR_sigaction
9962     case TARGET_NR_sigaction:
9963         {
9964 #if defined(TARGET_MIPS)
9965 	    struct target_sigaction act, oact, *pact, *old_act;
9966 
9967 	    if (arg2) {
9968                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9969                     return -TARGET_EFAULT;
9970 		act._sa_handler = old_act->_sa_handler;
9971 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9972 		act.sa_flags = old_act->sa_flags;
9973 		unlock_user_struct(old_act, arg2, 0);
9974 		pact = &act;
9975 	    } else {
9976 		pact = NULL;
9977 	    }
9978 
9979         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9980 
9981 	    if (!is_error(ret) && arg3) {
9982                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9983                     return -TARGET_EFAULT;
9984 		old_act->_sa_handler = oact._sa_handler;
9985 		old_act->sa_flags = oact.sa_flags;
9986 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9987 		old_act->sa_mask.sig[1] = 0;
9988 		old_act->sa_mask.sig[2] = 0;
9989 		old_act->sa_mask.sig[3] = 0;
9990 		unlock_user_struct(old_act, arg3, 1);
9991 	    }
9992 #else
9993             struct target_old_sigaction *old_act;
9994             struct target_sigaction act, oact, *pact;
9995             if (arg2) {
9996                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9997                     return -TARGET_EFAULT;
9998                 act._sa_handler = old_act->_sa_handler;
9999                 target_siginitset(&act.sa_mask, old_act->sa_mask);
10000                 act.sa_flags = old_act->sa_flags;
10001 #ifdef TARGET_ARCH_HAS_SA_RESTORER
10002                 act.sa_restorer = old_act->sa_restorer;
10003 #endif
10004                 unlock_user_struct(old_act, arg2, 0);
10005                 pact = &act;
10006             } else {
10007                 pact = NULL;
10008             }
10009             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
10010             if (!is_error(ret) && arg3) {
10011                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
10012                     return -TARGET_EFAULT;
10013                 old_act->_sa_handler = oact._sa_handler;
10014                 old_act->sa_mask = oact.sa_mask.sig[0];
10015                 old_act->sa_flags = oact.sa_flags;
10016 #ifdef TARGET_ARCH_HAS_SA_RESTORER
10017                 old_act->sa_restorer = oact.sa_restorer;
10018 #endif
10019                 unlock_user_struct(old_act, arg3, 1);
10020             }
10021 #endif
10022         }
10023         return ret;
10024 #endif
10025     case TARGET_NR_rt_sigaction:
10026         {
10027             /*
10028              * For Alpha and SPARC this is a 5 argument syscall, with
10029              * a 'restorer' parameter which must be copied into the
10030              * sa_restorer field of the sigaction struct.
10031              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
10032              * and arg5 is the sigsetsize.
10033              */
10034 #if defined(TARGET_ALPHA)
10035             target_ulong sigsetsize = arg4;
10036             target_ulong restorer = arg5;
10037 #elif defined(TARGET_SPARC)
10038             target_ulong restorer = arg4;
10039             target_ulong sigsetsize = arg5;
10040 #else
10041             target_ulong sigsetsize = arg4;
10042             target_ulong restorer = 0;
10043 #endif
10044             struct target_sigaction *act = NULL;
10045             struct target_sigaction *oact = NULL;
10046 
10047             if (sigsetsize != sizeof(target_sigset_t)) {
10048                 return -TARGET_EINVAL;
10049             }
10050             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
10051                 return -TARGET_EFAULT;
10052             }
10053             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
10054                 ret = -TARGET_EFAULT;
10055             } else {
10056                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
10057                 if (oact) {
10058                     unlock_user_struct(oact, arg3, 1);
10059                 }
10060             }
10061             if (act) {
10062                 unlock_user_struct(act, arg2, 0);
10063             }
10064         }
10065         return ret;
10066 #ifdef TARGET_NR_sgetmask /* not on alpha */
10067     case TARGET_NR_sgetmask:
10068         {
10069             sigset_t cur_set;
10070             abi_ulong target_set;
10071             ret = do_sigprocmask(0, NULL, &cur_set);
10072             if (!ret) {
10073                 host_to_target_old_sigset(&target_set, &cur_set);
10074                 ret = target_set;
10075             }
10076         }
10077         return ret;
10078 #endif
10079 #ifdef TARGET_NR_ssetmask /* not on alpha */
10080     case TARGET_NR_ssetmask:
10081         {
10082             sigset_t set, oset;
10083             abi_ulong target_set = arg1;
10084             target_to_host_old_sigset(&set, &target_set);
10085             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
10086             if (!ret) {
10087                 host_to_target_old_sigset(&target_set, &oset);
10088                 ret = target_set;
10089             }
10090         }
10091         return ret;
10092 #endif
10093 #ifdef TARGET_NR_sigprocmask
10094     case TARGET_NR_sigprocmask:
10095         {
10096 #if defined(TARGET_ALPHA)
10097             sigset_t set, oldset;
10098             abi_ulong mask;
10099             int how;
10100 
10101             switch (arg1) {
10102             case TARGET_SIG_BLOCK:
10103                 how = SIG_BLOCK;
10104                 break;
10105             case TARGET_SIG_UNBLOCK:
10106                 how = SIG_UNBLOCK;
10107                 break;
10108             case TARGET_SIG_SETMASK:
10109                 how = SIG_SETMASK;
10110                 break;
10111             default:
10112                 return -TARGET_EINVAL;
10113             }
10114             mask = arg2;
10115             target_to_host_old_sigset(&set, &mask);
10116 
10117             ret = do_sigprocmask(how, &set, &oldset);
10118             if (!is_error(ret)) {
10119                 host_to_target_old_sigset(&mask, &oldset);
10120                 ret = mask;
10121                 cpu_env->ir[IR_V0] = 0; /* force no error */
10122             }
10123 #else
10124             sigset_t set, oldset, *set_ptr;
10125             int how;
10126 
10127             if (arg2) {
10128                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10129                 if (!p) {
10130                     return -TARGET_EFAULT;
10131                 }
10132                 target_to_host_old_sigset(&set, p);
10133                 unlock_user(p, arg2, 0);
10134                 set_ptr = &set;
10135                 switch (arg1) {
10136                 case TARGET_SIG_BLOCK:
10137                     how = SIG_BLOCK;
10138                     break;
10139                 case TARGET_SIG_UNBLOCK:
10140                     how = SIG_UNBLOCK;
10141                     break;
10142                 case TARGET_SIG_SETMASK:
10143                     how = SIG_SETMASK;
10144                     break;
10145                 default:
10146                     return -TARGET_EINVAL;
10147                 }
10148             } else {
10149                 how = 0;
10150                 set_ptr = NULL;
10151             }
10152             ret = do_sigprocmask(how, set_ptr, &oldset);
10153             if (!is_error(ret) && arg3) {
10154                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10155                     return -TARGET_EFAULT;
10156                 host_to_target_old_sigset(p, &oldset);
10157                 unlock_user(p, arg3, sizeof(target_sigset_t));
10158             }
10159 #endif
10160         }
10161         return ret;
10162 #endif
10163     case TARGET_NR_rt_sigprocmask:
10164         {
10165             int how = arg1;
10166             sigset_t set, oldset, *set_ptr;
10167 
10168             if (arg4 != sizeof(target_sigset_t)) {
10169                 return -TARGET_EINVAL;
10170             }
10171 
10172             if (arg2) {
10173                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10174                 if (!p) {
10175                     return -TARGET_EFAULT;
10176                 }
10177                 target_to_host_sigset(&set, p);
10178                 unlock_user(p, arg2, 0);
10179                 set_ptr = &set;
10180                 switch(how) {
10181                 case TARGET_SIG_BLOCK:
10182                     how = SIG_BLOCK;
10183                     break;
10184                 case TARGET_SIG_UNBLOCK:
10185                     how = SIG_UNBLOCK;
10186                     break;
10187                 case TARGET_SIG_SETMASK:
10188                     how = SIG_SETMASK;
10189                     break;
10190                 default:
10191                     return -TARGET_EINVAL;
10192                 }
10193             } else {
10194                 how = 0;
10195                 set_ptr = NULL;
10196             }
10197             ret = do_sigprocmask(how, set_ptr, &oldset);
10198             if (!is_error(ret) && arg3) {
10199                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10200                     return -TARGET_EFAULT;
10201                 host_to_target_sigset(p, &oldset);
10202                 unlock_user(p, arg3, sizeof(target_sigset_t));
10203             }
10204         }
10205         return ret;
10206 #ifdef TARGET_NR_sigpending
10207     case TARGET_NR_sigpending:
10208         {
10209             sigset_t set;
10210             ret = get_errno(sigpending(&set));
10211             if (!is_error(ret)) {
10212                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10213                     return -TARGET_EFAULT;
10214                 host_to_target_old_sigset(p, &set);
10215                 unlock_user(p, arg1, sizeof(target_sigset_t));
10216             }
10217         }
10218         return ret;
10219 #endif
10220     case TARGET_NR_rt_sigpending:
10221         {
10222             sigset_t set;
10223 
10224             /* Yes, this check is >, not != like most. We follow the kernel's
10225              * logic and it does it like this because it implements
10226              * NR_sigpending through the same code path, and in that case
10227              * the old_sigset_t is smaller in size.
10228              */
10229             if (arg2 > sizeof(target_sigset_t)) {
10230                 return -TARGET_EINVAL;
10231             }
10232 
10233             ret = get_errno(sigpending(&set));
10234             if (!is_error(ret)) {
10235                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10236                     return -TARGET_EFAULT;
10237                 host_to_target_sigset(p, &set);
10238                 unlock_user(p, arg1, sizeof(target_sigset_t));
10239             }
10240         }
10241         return ret;
10242 #ifdef TARGET_NR_sigsuspend
10243     case TARGET_NR_sigsuspend:
10244         {
10245             sigset_t *set;
10246 
10247 #if defined(TARGET_ALPHA)
10248             TaskState *ts = get_task_state(cpu);
10249             /* target_to_host_old_sigset will bswap back */
10250             abi_ulong mask = tswapal(arg1);
10251             set = &ts->sigsuspend_mask;
10252             target_to_host_old_sigset(set, &mask);
10253 #else
10254             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
10255             if (ret != 0) {
10256                 return ret;
10257             }
10258 #endif
10259             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10260             finish_sigsuspend_mask(ret);
10261         }
10262         return ret;
10263 #endif
10264     case TARGET_NR_rt_sigsuspend:
10265         {
10266             sigset_t *set;
10267 
10268             ret = process_sigsuspend_mask(&set, arg1, arg2);
10269             if (ret != 0) {
10270                 return ret;
10271             }
10272             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10273             finish_sigsuspend_mask(ret);
10274         }
10275         return ret;
10276 #ifdef TARGET_NR_rt_sigtimedwait
10277     case TARGET_NR_rt_sigtimedwait:
10278         {
10279             sigset_t set;
10280             struct timespec uts, *puts;
10281             siginfo_t uinfo;
10282 
10283             if (arg4 != sizeof(target_sigset_t)) {
10284                 return -TARGET_EINVAL;
10285             }
10286 
10287             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
10288                 return -TARGET_EFAULT;
10289             target_to_host_sigset(&set, p);
10290             unlock_user(p, arg1, 0);
10291             if (arg3) {
10292                 puts = &uts;
10293                 if (target_to_host_timespec(puts, arg3)) {
10294                     return -TARGET_EFAULT;
10295                 }
10296             } else {
10297                 puts = NULL;
10298             }
10299             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10300                                                  SIGSET_T_SIZE));
10301             if (!is_error(ret)) {
10302                 if (arg2) {
10303                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
10304                                   0);
10305                     if (!p) {
10306                         return -TARGET_EFAULT;
10307                     }
10308                     host_to_target_siginfo(p, &uinfo);
10309                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10310                 }
10311                 ret = host_to_target_signal(ret);
10312             }
10313         }
10314         return ret;
10315 #endif
10316 #ifdef TARGET_NR_rt_sigtimedwait_time64
10317     case TARGET_NR_rt_sigtimedwait_time64:
10318         {
10319             sigset_t set;
10320             struct timespec uts, *puts;
10321             siginfo_t uinfo;
10322 
10323             if (arg4 != sizeof(target_sigset_t)) {
10324                 return -TARGET_EINVAL;
10325             }
10326 
10327             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
10328             if (!p) {
10329                 return -TARGET_EFAULT;
10330             }
10331             target_to_host_sigset(&set, p);
10332             unlock_user(p, arg1, 0);
10333             if (arg3) {
10334                 puts = &uts;
10335                 if (target_to_host_timespec64(puts, arg3)) {
10336                     return -TARGET_EFAULT;
10337                 }
10338             } else {
10339                 puts = NULL;
10340             }
10341             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10342                                                  SIGSET_T_SIZE));
10343             if (!is_error(ret)) {
10344                 if (arg2) {
10345                     p = lock_user(VERIFY_WRITE, arg2,
10346                                   sizeof(target_siginfo_t), 0);
10347                     if (!p) {
10348                         return -TARGET_EFAULT;
10349                     }
10350                     host_to_target_siginfo(p, &uinfo);
10351                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10352                 }
10353                 ret = host_to_target_signal(ret);
10354             }
10355         }
10356         return ret;
10357 #endif
10358     case TARGET_NR_rt_sigqueueinfo:
10359         {
10360             siginfo_t uinfo;
10361 
10362             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
10363             if (!p) {
10364                 return -TARGET_EFAULT;
10365             }
10366             target_to_host_siginfo(&uinfo, p);
10367             unlock_user(p, arg3, 0);
10368             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
10369         }
10370         return ret;
10371     case TARGET_NR_rt_tgsigqueueinfo:
10372         {
10373             siginfo_t uinfo;
10374 
10375             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
10376             if (!p) {
10377                 return -TARGET_EFAULT;
10378             }
10379             target_to_host_siginfo(&uinfo, p);
10380             unlock_user(p, arg4, 0);
10381             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
10382         }
10383         return ret;
10384 #ifdef TARGET_NR_sigreturn
10385     case TARGET_NR_sigreturn:
10386         if (block_signals()) {
10387             return -QEMU_ERESTARTSYS;
10388         }
10389         return do_sigreturn(cpu_env);
10390 #endif
10391     case TARGET_NR_rt_sigreturn:
10392         if (block_signals()) {
10393             return -QEMU_ERESTARTSYS;
10394         }
10395         return do_rt_sigreturn(cpu_env);
10396     case TARGET_NR_sethostname:
10397         if (!(p = lock_user_string(arg1)))
10398             return -TARGET_EFAULT;
10399         ret = get_errno(sethostname(p, arg2));
10400         unlock_user(p, arg1, 0);
10401         return ret;
10402 #ifdef TARGET_NR_setrlimit
10403     case TARGET_NR_setrlimit:
10404         {
10405             int resource = target_to_host_resource(arg1);
10406             struct target_rlimit *target_rlim;
10407             struct rlimit rlim;
10408             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
10409                 return -TARGET_EFAULT;
10410             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
10411             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
10412             unlock_user_struct(target_rlim, arg2, 0);
10413             /*
10414              * If we just passed through resource limit settings for memory then
10415              * they would also apply to QEMU's own allocations, and QEMU will
10416              * crash or hang or die if its allocations fail. Ideally we would
10417              * track the guest allocations in QEMU and apply the limits ourselves.
10418              * For now, just tell the guest the call succeeded but don't actually
10419              * limit anything.
10420              */
10421             if (resource != RLIMIT_AS &&
10422                 resource != RLIMIT_DATA &&
10423                 resource != RLIMIT_STACK) {
10424                 return get_errno(setrlimit(resource, &rlim));
10425             } else {
10426                 return 0;
10427             }
10428         }
10429 #endif
10430 #ifdef TARGET_NR_getrlimit
10431     case TARGET_NR_getrlimit:
10432         {
10433             int resource = target_to_host_resource(arg1);
10434             struct target_rlimit *target_rlim;
10435             struct rlimit rlim;
10436 
10437             ret = get_errno(getrlimit(resource, &rlim));
10438             if (!is_error(ret)) {
10439                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10440                     return -TARGET_EFAULT;
10441                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10442                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10443                 unlock_user_struct(target_rlim, arg2, 1);
10444             }
10445         }
10446         return ret;
10447 #endif
10448     case TARGET_NR_getrusage:
10449         {
10450             struct rusage rusage;
10451             ret = get_errno(getrusage(arg1, &rusage));
10452             if (!is_error(ret)) {
10453                 ret = host_to_target_rusage(arg2, &rusage);
10454             }
10455         }
10456         return ret;
10457 #if defined(TARGET_NR_gettimeofday)
10458     case TARGET_NR_gettimeofday:
10459         {
10460             struct timeval tv;
10461             struct timezone tz;
10462 
10463             ret = get_errno(gettimeofday(&tv, &tz));
10464             if (!is_error(ret)) {
10465                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
10466                     return -TARGET_EFAULT;
10467                 }
10468                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
10469                     return -TARGET_EFAULT;
10470                 }
10471             }
10472         }
10473         return ret;
10474 #endif
10475 #if defined(TARGET_NR_settimeofday)
10476     case TARGET_NR_settimeofday:
10477         {
10478             struct timeval tv, *ptv = NULL;
10479             struct timezone tz, *ptz = NULL;
10480 
10481             if (arg1) {
10482                 if (copy_from_user_timeval(&tv, arg1)) {
10483                     return -TARGET_EFAULT;
10484                 }
10485                 ptv = &tv;
10486             }
10487 
10488             if (arg2) {
10489                 if (copy_from_user_timezone(&tz, arg2)) {
10490                     return -TARGET_EFAULT;
10491                 }
10492                 ptz = &tz;
10493             }
10494 
10495             return get_errno(settimeofday(ptv, ptz));
10496         }
10497 #endif
10498 #if defined(TARGET_NR_select)
10499     case TARGET_NR_select:
10500 #if defined(TARGET_WANT_NI_OLD_SELECT)
10501         /* some architectures used to have old_select here
10502          * but now ENOSYS it.
10503          */
10504         ret = -TARGET_ENOSYS;
10505 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
10506         ret = do_old_select(arg1);
10507 #else
10508         ret = do_select(arg1, arg2, arg3, arg4, arg5);
10509 #endif
10510         return ret;
10511 #endif
10512 #ifdef TARGET_NR_pselect6
10513     case TARGET_NR_pselect6:
10514         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
10515 #endif
10516 #ifdef TARGET_NR_pselect6_time64
10517     case TARGET_NR_pselect6_time64:
10518         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
10519 #endif
10520 #ifdef TARGET_NR_symlink
10521     case TARGET_NR_symlink:
10522         {
10523             void *p2;
10524             p = lock_user_string(arg1);
10525             p2 = lock_user_string(arg2);
10526             if (!p || !p2)
10527                 ret = -TARGET_EFAULT;
10528             else
10529                 ret = get_errno(symlink(p, p2));
10530             unlock_user(p2, arg2, 0);
10531             unlock_user(p, arg1, 0);
10532         }
10533         return ret;
10534 #endif
10535 #if defined(TARGET_NR_symlinkat)
10536     case TARGET_NR_symlinkat:
10537         {
10538             void *p2;
10539             p  = lock_user_string(arg1);
10540             p2 = lock_user_string(arg3);
10541             if (!p || !p2)
10542                 ret = -TARGET_EFAULT;
10543             else
10544                 ret = get_errno(symlinkat(p, arg2, p2));
10545             unlock_user(p2, arg3, 0);
10546             unlock_user(p, arg1, 0);
10547         }
10548         return ret;
10549 #endif
10550 #ifdef TARGET_NR_readlink
10551     case TARGET_NR_readlink:
10552         {
10553             void *p2;
10554             p = lock_user_string(arg1);
10555             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10556             ret = get_errno(do_guest_readlink(p, p2, arg3));
10557             unlock_user(p2, arg2, ret);
10558             unlock_user(p, arg1, 0);
10559         }
10560         return ret;
10561 #endif
10562 #if defined(TARGET_NR_readlinkat)
10563     case TARGET_NR_readlinkat:
10564         {
10565             void *p2;
10566             p  = lock_user_string(arg2);
10567             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10568             if (!p || !p2) {
10569                 ret = -TARGET_EFAULT;
10570             } else if (!arg4) {
10571                 /* Short circuit this for the magic exe check. */
10572                 ret = -TARGET_EINVAL;
10573             } else if (is_proc_myself((const char *)p, "exe")) {
10574                 /*
10575                  * Don't worry about sign mismatch as earlier mapping
10576                  * logic would have thrown a bad address error.
10577                  */
10578                 ret = MIN(strlen(exec_path), arg4);
10579                 /* We cannot NUL terminate the string. */
10580                 memcpy(p2, exec_path, ret);
10581             } else {
10582                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10583             }
10584             unlock_user(p2, arg3, ret);
10585             unlock_user(p, arg2, 0);
10586         }
10587         return ret;
10588 #endif
10589 #ifdef TARGET_NR_swapon
10590     case TARGET_NR_swapon:
10591         if (!(p = lock_user_string(arg1)))
10592             return -TARGET_EFAULT;
10593         ret = get_errno(swapon(p, arg2));
10594         unlock_user(p, arg1, 0);
10595         return ret;
10596 #endif
10597     case TARGET_NR_reboot:
10598         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10599            /* arg4 must be ignored in all other cases */
10600            p = lock_user_string(arg4);
10601            if (!p) {
10602                return -TARGET_EFAULT;
10603            }
10604            ret = get_errno(reboot(arg1, arg2, arg3, p));
10605            unlock_user(p, arg4, 0);
10606         } else {
10607            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10608         }
10609         return ret;
10610 #ifdef TARGET_NR_mmap
10611     case TARGET_NR_mmap:
10612 #ifdef TARGET_ARCH_WANT_SYS_OLD_MMAP
10613         {
10614             abi_ulong *v;
10615             abi_ulong v1, v2, v3, v4, v5, v6;
10616             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10617                 return -TARGET_EFAULT;
10618             v1 = tswapal(v[0]);
10619             v2 = tswapal(v[1]);
10620             v3 = tswapal(v[2]);
10621             v4 = tswapal(v[3]);
10622             v5 = tswapal(v[4]);
10623             v6 = tswapal(v[5]);
10624             unlock_user(v, arg1, 0);
10625             return do_mmap(v1, v2, v3, v4, v5, v6);
10626         }
10627 #else
10628         /* mmap pointers are always untagged */
10629         return do_mmap(arg1, arg2, arg3, arg4, arg5, arg6);
10630 #endif
10631 #endif
10632 #ifdef TARGET_NR_mmap2
10633     case TARGET_NR_mmap2:
10634 #ifndef MMAP_SHIFT
10635 #define MMAP_SHIFT 12
10636 #endif
10637         return do_mmap(arg1, arg2, arg3, arg4, arg5,
10638                        (off_t)(abi_ulong)arg6 << MMAP_SHIFT);
10639 #endif
10640     case TARGET_NR_munmap:
10641         arg1 = cpu_untagged_addr(cpu, arg1);
10642         return get_errno(target_munmap(arg1, arg2));
10643     case TARGET_NR_mprotect:
10644         arg1 = cpu_untagged_addr(cpu, arg1);
10645         {
10646             TaskState *ts = get_task_state(cpu);
10647             /* Special hack to detect libc making the stack executable.  */
10648             if ((arg3 & PROT_GROWSDOWN)
10649                 && arg1 >= ts->info->stack_limit
10650                 && arg1 <= ts->info->start_stack) {
10651                 arg3 &= ~PROT_GROWSDOWN;
10652                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10653                 arg1 = ts->info->stack_limit;
10654             }
10655         }
10656         return get_errno(target_mprotect(arg1, arg2, arg3));
10657 #ifdef TARGET_NR_mremap
10658     case TARGET_NR_mremap:
10659         arg1 = cpu_untagged_addr(cpu, arg1);
10660         /* mremap new_addr (arg5) is always untagged */
10661         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10662 #endif
10663         /* ??? msync/mlock/munlock are broken for softmmu.  */
10664 #ifdef TARGET_NR_msync
10665     case TARGET_NR_msync:
10666         return get_errno(msync(g2h(cpu, arg1), arg2,
10667                                target_to_host_msync_arg(arg3)));
10668 #endif
10669 #ifdef TARGET_NR_mlock
10670     case TARGET_NR_mlock:
10671         return get_errno(mlock(g2h(cpu, arg1), arg2));
10672 #endif
10673 #ifdef TARGET_NR_munlock
10674     case TARGET_NR_munlock:
10675         return get_errno(munlock(g2h(cpu, arg1), arg2));
10676 #endif
10677 #ifdef TARGET_NR_mlockall
10678     case TARGET_NR_mlockall:
10679         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10680 #endif
10681 #ifdef TARGET_NR_munlockall
10682     case TARGET_NR_munlockall:
10683         return get_errno(munlockall());
10684 #endif
10685 #ifdef TARGET_NR_truncate
10686     case TARGET_NR_truncate:
10687         if (!(p = lock_user_string(arg1)))
10688             return -TARGET_EFAULT;
10689         ret = get_errno(truncate(p, arg2));
10690         unlock_user(p, arg1, 0);
10691         return ret;
10692 #endif
10693 #ifdef TARGET_NR_ftruncate
10694     case TARGET_NR_ftruncate:
10695         return get_errno(ftruncate(arg1, arg2));
10696 #endif
10697     case TARGET_NR_fchmod:
10698         return get_errno(fchmod(arg1, arg2));
10699 #if defined(TARGET_NR_fchmodat)
10700     case TARGET_NR_fchmodat:
10701         if (!(p = lock_user_string(arg2)))
10702             return -TARGET_EFAULT;
10703         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10704         unlock_user(p, arg2, 0);
10705         return ret;
10706 #endif
10707     case TARGET_NR_getpriority:
10708         /* Note that negative values are valid for getpriority, so we must
10709            differentiate based on errno settings.  */
10710         errno = 0;
10711         ret = getpriority(arg1, arg2);
10712         if (ret == -1 && errno != 0) {
10713             return -host_to_target_errno(errno);
10714         }
10715 #ifdef TARGET_ALPHA
10716         /* Return value is the unbiased priority.  Signal no error.  */
10717         cpu_env->ir[IR_V0] = 0;
10718 #else
10719         /* Return value is a biased priority to avoid negative numbers.  */
10720         ret = 20 - ret;
10721 #endif
10722         return ret;
10723     case TARGET_NR_setpriority:
10724         return get_errno(setpriority(arg1, arg2, arg3));
10725 #ifdef TARGET_NR_statfs
10726     case TARGET_NR_statfs:
10727         if (!(p = lock_user_string(arg1))) {
10728             return -TARGET_EFAULT;
10729         }
10730         ret = get_errno(statfs(path(p), &stfs));
10731         unlock_user(p, arg1, 0);
10732     convert_statfs:
10733         if (!is_error(ret)) {
10734             struct target_statfs *target_stfs;
10735 
10736             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10737                 return -TARGET_EFAULT;
10738             __put_user(stfs.f_type, &target_stfs->f_type);
10739             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10740             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10741             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10742             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10743             __put_user(stfs.f_files, &target_stfs->f_files);
10744             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10745             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10746             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10747             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10748             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10749 #ifdef _STATFS_F_FLAGS
10750             __put_user(stfs.f_flags, &target_stfs->f_flags);
10751 #else
10752             __put_user(0, &target_stfs->f_flags);
10753 #endif
10754             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10755             unlock_user_struct(target_stfs, arg2, 1);
10756         }
10757         return ret;
10758 #endif
10759 #ifdef TARGET_NR_fstatfs
10760     case TARGET_NR_fstatfs:
10761         ret = get_errno(fstatfs(arg1, &stfs));
10762         goto convert_statfs;
10763 #endif
10764 #ifdef TARGET_NR_statfs64
10765     case TARGET_NR_statfs64:
10766         if (!(p = lock_user_string(arg1))) {
10767             return -TARGET_EFAULT;
10768         }
10769         ret = get_errno(statfs(path(p), &stfs));
10770         unlock_user(p, arg1, 0);
10771     convert_statfs64:
10772         if (!is_error(ret)) {
10773             struct target_statfs64 *target_stfs;
10774 
10775             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10776                 return -TARGET_EFAULT;
10777             __put_user(stfs.f_type, &target_stfs->f_type);
10778             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10779             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10780             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10781             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10782             __put_user(stfs.f_files, &target_stfs->f_files);
10783             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10784             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10785             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10786             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10787             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10788 #ifdef _STATFS_F_FLAGS
10789             __put_user(stfs.f_flags, &target_stfs->f_flags);
10790 #else
10791             __put_user(0, &target_stfs->f_flags);
10792 #endif
10793             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10794             unlock_user_struct(target_stfs, arg3, 1);
10795         }
10796         return ret;
10797     case TARGET_NR_fstatfs64:
10798         ret = get_errno(fstatfs(arg1, &stfs));
10799         goto convert_statfs64;
10800 #endif
10801 #ifdef TARGET_NR_socketcall
10802     case TARGET_NR_socketcall:
10803         return do_socketcall(arg1, arg2);
10804 #endif
10805 #ifdef TARGET_NR_accept
10806     case TARGET_NR_accept:
10807         return do_accept4(arg1, arg2, arg3, 0);
10808 #endif
10809 #ifdef TARGET_NR_accept4
10810     case TARGET_NR_accept4:
10811         return do_accept4(arg1, arg2, arg3, arg4);
10812 #endif
10813 #ifdef TARGET_NR_bind
10814     case TARGET_NR_bind:
10815         return do_bind(arg1, arg2, arg3);
10816 #endif
10817 #ifdef TARGET_NR_connect
10818     case TARGET_NR_connect:
10819         return do_connect(arg1, arg2, arg3);
10820 #endif
10821 #ifdef TARGET_NR_getpeername
10822     case TARGET_NR_getpeername:
10823         return do_getpeername(arg1, arg2, arg3);
10824 #endif
10825 #ifdef TARGET_NR_getsockname
10826     case TARGET_NR_getsockname:
10827         return do_getsockname(arg1, arg2, arg3);
10828 #endif
10829 #ifdef TARGET_NR_getsockopt
10830     case TARGET_NR_getsockopt:
10831         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10832 #endif
10833 #ifdef TARGET_NR_listen
10834     case TARGET_NR_listen:
10835         return get_errno(listen(arg1, arg2));
10836 #endif
10837 #ifdef TARGET_NR_recv
10838     case TARGET_NR_recv:
10839         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10840 #endif
10841 #ifdef TARGET_NR_recvfrom
10842     case TARGET_NR_recvfrom:
10843         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10844 #endif
10845 #ifdef TARGET_NR_recvmsg
10846     case TARGET_NR_recvmsg:
10847         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10848 #endif
10849 #ifdef TARGET_NR_send
10850     case TARGET_NR_send:
10851         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10852 #endif
10853 #ifdef TARGET_NR_sendmsg
10854     case TARGET_NR_sendmsg:
10855         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10856 #endif
10857 #ifdef TARGET_NR_sendmmsg
10858     case TARGET_NR_sendmmsg:
10859         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10860 #endif
10861 #ifdef TARGET_NR_recvmmsg
10862     case TARGET_NR_recvmmsg:
10863         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10864 #endif
10865 #ifdef TARGET_NR_sendto
10866     case TARGET_NR_sendto:
10867         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10868 #endif
10869 #ifdef TARGET_NR_shutdown
10870     case TARGET_NR_shutdown:
10871         return get_errno(shutdown(arg1, arg2));
10872 #endif
10873 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10874     case TARGET_NR_getrandom:
10875         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10876         if (!p) {
10877             return -TARGET_EFAULT;
10878         }
10879         ret = get_errno(getrandom(p, arg2, arg3));
10880         unlock_user(p, arg1, ret);
10881         return ret;
10882 #endif
10883 #ifdef TARGET_NR_socket
10884     case TARGET_NR_socket:
10885         return do_socket(arg1, arg2, arg3);
10886 #endif
10887 #ifdef TARGET_NR_socketpair
10888     case TARGET_NR_socketpair:
10889         return do_socketpair(arg1, arg2, arg3, arg4);
10890 #endif
10891 #ifdef TARGET_NR_setsockopt
10892     case TARGET_NR_setsockopt:
10893         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10894 #endif
10895 #if defined(TARGET_NR_syslog)
10896     case TARGET_NR_syslog:
10897         {
10898             int len = arg2;
10899 
10900             switch (arg1) {
10901             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10902             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10903             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10904             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10905             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10906             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10907             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10908             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10909                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10910             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10911             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10912             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10913                 {
10914                     if (len < 0) {
10915                         return -TARGET_EINVAL;
10916                     }
10917                     if (len == 0) {
10918                         return 0;
10919                     }
10920                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10921                     if (!p) {
10922                         return -TARGET_EFAULT;
10923                     }
10924                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10925                     unlock_user(p, arg2, arg3);
10926                 }
10927                 return ret;
10928             default:
10929                 return -TARGET_EINVAL;
10930             }
10931         }
10932         break;
10933 #endif
10934     case TARGET_NR_setitimer:
10935         {
10936             struct itimerval value, ovalue, *pvalue;
10937 
10938             if (arg2) {
10939                 pvalue = &value;
10940                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10941                     || copy_from_user_timeval(&pvalue->it_value,
10942                                               arg2 + sizeof(struct target_timeval)))
10943                     return -TARGET_EFAULT;
10944             } else {
10945                 pvalue = NULL;
10946             }
10947             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10948             if (!is_error(ret) && arg3) {
10949                 if (copy_to_user_timeval(arg3,
10950                                          &ovalue.it_interval)
10951                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10952                                             &ovalue.it_value))
10953                     return -TARGET_EFAULT;
10954             }
10955         }
10956         return ret;
10957     case TARGET_NR_getitimer:
10958         {
10959             struct itimerval value;
10960 
10961             ret = get_errno(getitimer(arg1, &value));
10962             if (!is_error(ret) && arg2) {
10963                 if (copy_to_user_timeval(arg2,
10964                                          &value.it_interval)
10965                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10966                                             &value.it_value))
10967                     return -TARGET_EFAULT;
10968             }
10969         }
10970         return ret;
10971 #ifdef TARGET_NR_stat
10972     case TARGET_NR_stat:
10973         if (!(p = lock_user_string(arg1))) {
10974             return -TARGET_EFAULT;
10975         }
10976         ret = get_errno(stat(path(p), &st));
10977         unlock_user(p, arg1, 0);
10978         goto do_stat;
10979 #endif
10980 #ifdef TARGET_NR_lstat
10981     case TARGET_NR_lstat:
10982         if (!(p = lock_user_string(arg1))) {
10983             return -TARGET_EFAULT;
10984         }
10985         ret = get_errno(lstat(path(p), &st));
10986         unlock_user(p, arg1, 0);
10987         goto do_stat;
10988 #endif
10989 #ifdef TARGET_NR_fstat
10990     case TARGET_NR_fstat:
10991         {
10992             ret = get_errno(fstat(arg1, &st));
10993 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10994         do_stat:
10995 #endif
10996             if (!is_error(ret)) {
10997                 struct target_stat *target_st;
10998 
10999                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
11000                     return -TARGET_EFAULT;
11001                 memset(target_st, 0, sizeof(*target_st));
11002                 __put_user(st.st_dev, &target_st->st_dev);
11003                 __put_user(st.st_ino, &target_st->st_ino);
11004                 __put_user(st.st_mode, &target_st->st_mode);
11005                 __put_user(st.st_uid, &target_st->st_uid);
11006                 __put_user(st.st_gid, &target_st->st_gid);
11007                 __put_user(st.st_nlink, &target_st->st_nlink);
11008                 __put_user(st.st_rdev, &target_st->st_rdev);
11009                 __put_user(st.st_size, &target_st->st_size);
11010                 __put_user(st.st_blksize, &target_st->st_blksize);
11011                 __put_user(st.st_blocks, &target_st->st_blocks);
11012                 __put_user(st.st_atime, &target_st->target_st_atime);
11013                 __put_user(st.st_mtime, &target_st->target_st_mtime);
11014                 __put_user(st.st_ctime, &target_st->target_st_ctime);
11015 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
11016                 __put_user(st.st_atim.tv_nsec,
11017                            &target_st->target_st_atime_nsec);
11018                 __put_user(st.st_mtim.tv_nsec,
11019                            &target_st->target_st_mtime_nsec);
11020                 __put_user(st.st_ctim.tv_nsec,
11021                            &target_st->target_st_ctime_nsec);
11022 #endif
11023                 unlock_user_struct(target_st, arg2, 1);
11024             }
11025         }
11026         return ret;
11027 #endif
11028     case TARGET_NR_vhangup:
11029         return get_errno(vhangup());
11030 #ifdef TARGET_NR_syscall
11031     case TARGET_NR_syscall:
11032         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
11033                           arg6, arg7, arg8, 0);
11034 #endif
11035 #if defined(TARGET_NR_wait4)
11036     case TARGET_NR_wait4:
11037         {
11038             int status;
11039             abi_long status_ptr = arg2;
11040             struct rusage rusage, *rusage_ptr;
11041             abi_ulong target_rusage = arg4;
11042             abi_long rusage_err;
11043             if (target_rusage)
11044                 rusage_ptr = &rusage;
11045             else
11046                 rusage_ptr = NULL;
11047             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
11048             if (!is_error(ret)) {
11049                 if (status_ptr && ret) {
11050                     status = host_to_target_waitstatus(status);
11051                     if (put_user_s32(status, status_ptr))
11052                         return -TARGET_EFAULT;
11053                 }
11054                 if (target_rusage) {
11055                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
11056                     if (rusage_err) {
11057                         ret = rusage_err;
11058                     }
11059                 }
11060             }
11061         }
11062         return ret;
11063 #endif
11064 #ifdef TARGET_NR_swapoff
11065     case TARGET_NR_swapoff:
11066         if (!(p = lock_user_string(arg1)))
11067             return -TARGET_EFAULT;
11068         ret = get_errno(swapoff(p));
11069         unlock_user(p, arg1, 0);
11070         return ret;
11071 #endif
11072     case TARGET_NR_sysinfo:
11073         {
11074             struct target_sysinfo *target_value;
11075             struct sysinfo value;
11076             ret = get_errno(sysinfo(&value));
11077             if (!is_error(ret) && arg1)
11078             {
11079                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
11080                     return -TARGET_EFAULT;
11081                 __put_user(value.uptime, &target_value->uptime);
11082                 __put_user(value.loads[0], &target_value->loads[0]);
11083                 __put_user(value.loads[1], &target_value->loads[1]);
11084                 __put_user(value.loads[2], &target_value->loads[2]);
11085                 __put_user(value.totalram, &target_value->totalram);
11086                 __put_user(value.freeram, &target_value->freeram);
11087                 __put_user(value.sharedram, &target_value->sharedram);
11088                 __put_user(value.bufferram, &target_value->bufferram);
11089                 __put_user(value.totalswap, &target_value->totalswap);
11090                 __put_user(value.freeswap, &target_value->freeswap);
11091                 __put_user(value.procs, &target_value->procs);
11092                 __put_user(value.totalhigh, &target_value->totalhigh);
11093                 __put_user(value.freehigh, &target_value->freehigh);
11094                 __put_user(value.mem_unit, &target_value->mem_unit);
11095                 unlock_user_struct(target_value, arg1, 1);
11096             }
11097         }
11098         return ret;
11099 #ifdef TARGET_NR_ipc
11100     case TARGET_NR_ipc:
11101         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
11102 #endif
11103 #ifdef TARGET_NR_semget
11104     case TARGET_NR_semget:
11105         return get_errno(semget(arg1, arg2, arg3));
11106 #endif
11107 #ifdef TARGET_NR_semop
11108     case TARGET_NR_semop:
11109         return do_semtimedop(arg1, arg2, arg3, 0, false);
11110 #endif
11111 #ifdef TARGET_NR_semtimedop
11112     case TARGET_NR_semtimedop:
11113         return do_semtimedop(arg1, arg2, arg3, arg4, false);
11114 #endif
11115 #ifdef TARGET_NR_semtimedop_time64
11116     case TARGET_NR_semtimedop_time64:
11117         return do_semtimedop(arg1, arg2, arg3, arg4, true);
11118 #endif
11119 #ifdef TARGET_NR_semctl
11120     case TARGET_NR_semctl:
11121         return do_semctl(arg1, arg2, arg3, arg4);
11122 #endif
11123 #ifdef TARGET_NR_msgctl
11124     case TARGET_NR_msgctl:
11125         return do_msgctl(arg1, arg2, arg3);
11126 #endif
11127 #ifdef TARGET_NR_msgget
11128     case TARGET_NR_msgget:
11129         return get_errno(msgget(arg1, arg2));
11130 #endif
11131 #ifdef TARGET_NR_msgrcv
11132     case TARGET_NR_msgrcv:
11133         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
11134 #endif
11135 #ifdef TARGET_NR_msgsnd
11136     case TARGET_NR_msgsnd:
11137         return do_msgsnd(arg1, arg2, arg3, arg4);
11138 #endif
11139 #ifdef TARGET_NR_shmget
11140     case TARGET_NR_shmget:
11141         return get_errno(shmget(arg1, arg2, arg3));
11142 #endif
11143 #ifdef TARGET_NR_shmctl
11144     case TARGET_NR_shmctl:
11145         return do_shmctl(arg1, arg2, arg3);
11146 #endif
11147 #ifdef TARGET_NR_shmat
11148     case TARGET_NR_shmat:
11149         return target_shmat(cpu_env, arg1, arg2, arg3);
11150 #endif
11151 #ifdef TARGET_NR_shmdt
11152     case TARGET_NR_shmdt:
11153         return target_shmdt(arg1);
11154 #endif
11155     case TARGET_NR_fsync:
11156         return get_errno(fsync(arg1));
11157     case TARGET_NR_clone:
11158         /* Linux manages to have three different orderings for its
11159          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
11160          * match the kernel's CONFIG_CLONE_* settings.
11161          * Microblaze is further special in that it uses a sixth
11162          * implicit argument to clone for the TLS pointer.
11163          */
11164 #if defined(TARGET_MICROBLAZE)
11165         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
11166 #elif defined(TARGET_CLONE_BACKWARDS)
11167         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
11168 #elif defined(TARGET_CLONE_BACKWARDS2)
11169         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
11170 #else
11171         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
11172 #endif
11173         return ret;
11174 #ifdef __NR_exit_group
11175         /* new thread calls */
11176     case TARGET_NR_exit_group:
11177         preexit_cleanup(cpu_env, arg1);
11178         return get_errno(exit_group(arg1));
11179 #endif
11180     case TARGET_NR_setdomainname:
11181         if (!(p = lock_user_string(arg1)))
11182             return -TARGET_EFAULT;
11183         ret = get_errno(setdomainname(p, arg2));
11184         unlock_user(p, arg1, 0);
11185         return ret;
11186     case TARGET_NR_uname:
11187         /* no need to transcode because we use the linux syscall */
11188         {
11189             struct new_utsname * buf;
11190 
11191             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
11192                 return -TARGET_EFAULT;
11193             ret = get_errno(sys_uname(buf));
11194             if (!is_error(ret)) {
11195                 /* Overwrite the native machine name with whatever is being
11196                    emulated. */
11197                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
11198                           sizeof(buf->machine));
11199                 /* Allow the user to override the reported release.  */
11200                 if (qemu_uname_release && *qemu_uname_release) {
11201                     g_strlcpy(buf->release, qemu_uname_release,
11202                               sizeof(buf->release));
11203                 }
11204             }
11205             unlock_user_struct(buf, arg1, 1);
11206         }
11207         return ret;
11208 #ifdef TARGET_I386
11209     case TARGET_NR_modify_ldt:
11210         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
11211 #if !defined(TARGET_X86_64)
11212     case TARGET_NR_vm86:
11213         return do_vm86(cpu_env, arg1, arg2);
11214 #endif
11215 #endif
11216 #if defined(TARGET_NR_adjtimex)
11217     case TARGET_NR_adjtimex:
11218         {
11219             struct timex host_buf;
11220 
11221             if (target_to_host_timex(&host_buf, arg1) != 0) {
11222                 return -TARGET_EFAULT;
11223             }
11224             ret = get_errno(adjtimex(&host_buf));
11225             if (!is_error(ret)) {
11226                 if (host_to_target_timex(arg1, &host_buf) != 0) {
11227                     return -TARGET_EFAULT;
11228                 }
11229             }
11230         }
11231         return ret;
11232 #endif
11233 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
11234     case TARGET_NR_clock_adjtime:
11235         {
11236             struct timex htx;
11237 
11238             if (target_to_host_timex(&htx, arg2) != 0) {
11239                 return -TARGET_EFAULT;
11240             }
11241             ret = get_errno(clock_adjtime(arg1, &htx));
11242             if (!is_error(ret) && host_to_target_timex(arg2, &htx)) {
11243                 return -TARGET_EFAULT;
11244             }
11245         }
11246         return ret;
11247 #endif
11248 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
11249     case TARGET_NR_clock_adjtime64:
11250         {
11251             struct timex htx;
11252 
11253             if (target_to_host_timex64(&htx, arg2) != 0) {
11254                 return -TARGET_EFAULT;
11255             }
11256             ret = get_errno(clock_adjtime(arg1, &htx));
11257             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
11258                     return -TARGET_EFAULT;
11259             }
11260         }
11261         return ret;
11262 #endif
11263     case TARGET_NR_getpgid:
11264         return get_errno(getpgid(arg1));
11265     case TARGET_NR_fchdir:
11266         return get_errno(fchdir(arg1));
11267     case TARGET_NR_personality:
11268         return get_errno(personality(arg1));
11269 #ifdef TARGET_NR__llseek /* Not on alpha */
11270     case TARGET_NR__llseek:
11271         {
11272             int64_t res;
11273 #if !defined(__NR_llseek)
11274             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
11275             if (res == -1) {
11276                 ret = get_errno(res);
11277             } else {
11278                 ret = 0;
11279             }
11280 #else
11281             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
11282 #endif
11283             if ((ret == 0) && put_user_s64(res, arg4)) {
11284                 return -TARGET_EFAULT;
11285             }
11286         }
11287         return ret;
11288 #endif
11289 #ifdef TARGET_NR_getdents
11290     case TARGET_NR_getdents:
11291         return do_getdents(arg1, arg2, arg3);
11292 #endif /* TARGET_NR_getdents */
11293 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
11294     case TARGET_NR_getdents64:
11295         return do_getdents64(arg1, arg2, arg3);
11296 #endif /* TARGET_NR_getdents64 */
11297 #if defined(TARGET_NR__newselect)
11298     case TARGET_NR__newselect:
11299         return do_select(arg1, arg2, arg3, arg4, arg5);
11300 #endif
11301 #ifdef TARGET_NR_poll
11302     case TARGET_NR_poll:
11303         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
11304 #endif
11305 #ifdef TARGET_NR_ppoll
11306     case TARGET_NR_ppoll:
11307         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
11308 #endif
11309 #ifdef TARGET_NR_ppoll_time64
11310     case TARGET_NR_ppoll_time64:
11311         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
11312 #endif
11313     case TARGET_NR_flock:
11314         /* NOTE: the flock constant seems to be the same for every
11315            Linux platform */
11316         return get_errno(safe_flock(arg1, arg2));
11317     case TARGET_NR_readv:
11318         {
11319             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11320             if (vec != NULL) {
11321                 ret = get_errno(safe_readv(arg1, vec, arg3));
11322                 unlock_iovec(vec, arg2, arg3, 1);
11323             } else {
11324                 ret = -host_to_target_errno(errno);
11325             }
11326         }
11327         return ret;
11328     case TARGET_NR_writev:
11329         {
11330             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11331             if (vec != NULL) {
11332                 ret = get_errno(safe_writev(arg1, vec, arg3));
11333                 unlock_iovec(vec, arg2, arg3, 0);
11334             } else {
11335                 ret = -host_to_target_errno(errno);
11336             }
11337         }
11338         return ret;
11339 #if defined(TARGET_NR_preadv)
11340     case TARGET_NR_preadv:
11341         {
11342             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11343             if (vec != NULL) {
11344                 unsigned long low, high;
11345 
11346                 target_to_host_low_high(arg4, arg5, &low, &high);
11347                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
11348                 unlock_iovec(vec, arg2, arg3, 1);
11349             } else {
11350                 ret = -host_to_target_errno(errno);
11351            }
11352         }
11353         return ret;
11354 #endif
11355 #if defined(TARGET_NR_pwritev)
11356     case TARGET_NR_pwritev:
11357         {
11358             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11359             if (vec != NULL) {
11360                 unsigned long low, high;
11361 
11362                 target_to_host_low_high(arg4, arg5, &low, &high);
11363                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
11364                 unlock_iovec(vec, arg2, arg3, 0);
11365             } else {
11366                 ret = -host_to_target_errno(errno);
11367            }
11368         }
11369         return ret;
11370 #endif
11371     case TARGET_NR_getsid:
11372         return get_errno(getsid(arg1));
11373 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
11374     case TARGET_NR_fdatasync:
11375         return get_errno(fdatasync(arg1));
11376 #endif
11377     case TARGET_NR_sched_getaffinity:
11378         {
11379             unsigned int mask_size;
11380             unsigned long *mask;
11381 
11382             /*
11383              * sched_getaffinity needs multiples of ulong, so need to take
11384              * care of mismatches between target ulong and host ulong sizes.
11385              */
11386             if (arg2 & (sizeof(abi_ulong) - 1)) {
11387                 return -TARGET_EINVAL;
11388             }
11389             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11390 
11391             mask = alloca(mask_size);
11392             memset(mask, 0, mask_size);
11393             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
11394 
11395             if (!is_error(ret)) {
11396                 if (ret > arg2) {
11397                     /* More data returned than the caller's buffer will fit.
11398                      * This only happens if sizeof(abi_long) < sizeof(long)
11399                      * and the caller passed us a buffer holding an odd number
11400                      * of abi_longs. If the host kernel is actually using the
11401                      * extra 4 bytes then fail EINVAL; otherwise we can just
11402                      * ignore them and only copy the interesting part.
11403                      */
11404                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
11405                     if (numcpus > arg2 * 8) {
11406                         return -TARGET_EINVAL;
11407                     }
11408                     ret = arg2;
11409                 }
11410 
11411                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
11412                     return -TARGET_EFAULT;
11413                 }
11414             }
11415         }
11416         return ret;
11417     case TARGET_NR_sched_setaffinity:
11418         {
11419             unsigned int mask_size;
11420             unsigned long *mask;
11421 
11422             /*
11423              * sched_setaffinity needs multiples of ulong, so need to take
11424              * care of mismatches between target ulong and host ulong sizes.
11425              */
11426             if (arg2 & (sizeof(abi_ulong) - 1)) {
11427                 return -TARGET_EINVAL;
11428             }
11429             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11430             mask = alloca(mask_size);
11431 
11432             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
11433             if (ret) {
11434                 return ret;
11435             }
11436 
11437             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
11438         }
11439     case TARGET_NR_getcpu:
11440         {
11441             unsigned cpuid, node;
11442             ret = get_errno(sys_getcpu(arg1 ? &cpuid : NULL,
11443                                        arg2 ? &node : NULL,
11444                                        NULL));
11445             if (is_error(ret)) {
11446                 return ret;
11447             }
11448             if (arg1 && put_user_u32(cpuid, arg1)) {
11449                 return -TARGET_EFAULT;
11450             }
11451             if (arg2 && put_user_u32(node, arg2)) {
11452                 return -TARGET_EFAULT;
11453             }
11454         }
11455         return ret;
11456     case TARGET_NR_sched_setparam:
11457         {
11458             struct target_sched_param *target_schp;
11459             struct sched_param schp;
11460 
11461             if (arg2 == 0) {
11462                 return -TARGET_EINVAL;
11463             }
11464             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
11465                 return -TARGET_EFAULT;
11466             }
11467             schp.sched_priority = tswap32(target_schp->sched_priority);
11468             unlock_user_struct(target_schp, arg2, 0);
11469             return get_errno(sys_sched_setparam(arg1, &schp));
11470         }
11471     case TARGET_NR_sched_getparam:
11472         {
11473             struct target_sched_param *target_schp;
11474             struct sched_param schp;
11475 
11476             if (arg2 == 0) {
11477                 return -TARGET_EINVAL;
11478             }
11479             ret = get_errno(sys_sched_getparam(arg1, &schp));
11480             if (!is_error(ret)) {
11481                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
11482                     return -TARGET_EFAULT;
11483                 }
11484                 target_schp->sched_priority = tswap32(schp.sched_priority);
11485                 unlock_user_struct(target_schp, arg2, 1);
11486             }
11487         }
11488         return ret;
11489     case TARGET_NR_sched_setscheduler:
11490         {
11491             struct target_sched_param *target_schp;
11492             struct sched_param schp;
11493             if (arg3 == 0) {
11494                 return -TARGET_EINVAL;
11495             }
11496             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
11497                 return -TARGET_EFAULT;
11498             }
11499             schp.sched_priority = tswap32(target_schp->sched_priority);
11500             unlock_user_struct(target_schp, arg3, 0);
11501             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
11502         }
11503     case TARGET_NR_sched_getscheduler:
11504         return get_errno(sys_sched_getscheduler(arg1));
11505     case TARGET_NR_sched_getattr:
11506         {
11507             struct target_sched_attr *target_scha;
11508             struct sched_attr scha;
11509             if (arg2 == 0) {
11510                 return -TARGET_EINVAL;
11511             }
11512             if (arg3 > sizeof(scha)) {
11513                 arg3 = sizeof(scha);
11514             }
11515             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
11516             if (!is_error(ret)) {
11517                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11518                 if (!target_scha) {
11519                     return -TARGET_EFAULT;
11520                 }
11521                 target_scha->size = tswap32(scha.size);
11522                 target_scha->sched_policy = tswap32(scha.sched_policy);
11523                 target_scha->sched_flags = tswap64(scha.sched_flags);
11524                 target_scha->sched_nice = tswap32(scha.sched_nice);
11525                 target_scha->sched_priority = tswap32(scha.sched_priority);
11526                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
11527                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
11528                 target_scha->sched_period = tswap64(scha.sched_period);
11529                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
11530                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
11531                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
11532                 }
11533                 unlock_user(target_scha, arg2, arg3);
11534             }
11535             return ret;
11536         }
11537     case TARGET_NR_sched_setattr:
11538         {
11539             struct target_sched_attr *target_scha;
11540             struct sched_attr scha;
11541             uint32_t size;
11542             int zeroed;
11543             if (arg2 == 0) {
11544                 return -TARGET_EINVAL;
11545             }
11546             if (get_user_u32(size, arg2)) {
11547                 return -TARGET_EFAULT;
11548             }
11549             if (!size) {
11550                 size = offsetof(struct target_sched_attr, sched_util_min);
11551             }
11552             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11553                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11554                     return -TARGET_EFAULT;
11555                 }
11556                 return -TARGET_E2BIG;
11557             }
11558 
11559             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11560             if (zeroed < 0) {
11561                 return zeroed;
11562             } else if (zeroed == 0) {
11563                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11564                     return -TARGET_EFAULT;
11565                 }
11566                 return -TARGET_E2BIG;
11567             }
11568             if (size > sizeof(struct target_sched_attr)) {
11569                 size = sizeof(struct target_sched_attr);
11570             }
11571 
11572             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11573             if (!target_scha) {
11574                 return -TARGET_EFAULT;
11575             }
11576             scha.size = size;
11577             scha.sched_policy = tswap32(target_scha->sched_policy);
11578             scha.sched_flags = tswap64(target_scha->sched_flags);
11579             scha.sched_nice = tswap32(target_scha->sched_nice);
11580             scha.sched_priority = tswap32(target_scha->sched_priority);
11581             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11582             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11583             scha.sched_period = tswap64(target_scha->sched_period);
11584             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11585                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11586                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11587             }
11588             unlock_user(target_scha, arg2, 0);
11589             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11590         }
11591     case TARGET_NR_sched_yield:
11592         return get_errno(sched_yield());
11593     case TARGET_NR_sched_get_priority_max:
11594         return get_errno(sched_get_priority_max(arg1));
11595     case TARGET_NR_sched_get_priority_min:
11596         return get_errno(sched_get_priority_min(arg1));
11597 #ifdef TARGET_NR_sched_rr_get_interval
11598     case TARGET_NR_sched_rr_get_interval:
11599         {
11600             struct timespec ts;
11601             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11602             if (!is_error(ret)) {
11603                 ret = host_to_target_timespec(arg2, &ts);
11604             }
11605         }
11606         return ret;
11607 #endif
11608 #ifdef TARGET_NR_sched_rr_get_interval_time64
11609     case TARGET_NR_sched_rr_get_interval_time64:
11610         {
11611             struct timespec ts;
11612             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11613             if (!is_error(ret)) {
11614                 ret = host_to_target_timespec64(arg2, &ts);
11615             }
11616         }
11617         return ret;
11618 #endif
11619 #if defined(TARGET_NR_nanosleep)
11620     case TARGET_NR_nanosleep:
11621         {
11622             struct timespec req, rem;
11623             target_to_host_timespec(&req, arg1);
11624             ret = get_errno(safe_nanosleep(&req, &rem));
11625             if (is_error(ret) && arg2) {
11626                 host_to_target_timespec(arg2, &rem);
11627             }
11628         }
11629         return ret;
11630 #endif
11631     case TARGET_NR_prctl:
11632         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11633         break;
11634 #ifdef TARGET_NR_arch_prctl
11635     case TARGET_NR_arch_prctl:
11636         return do_arch_prctl(cpu_env, arg1, arg2);
11637 #endif
11638 #ifdef TARGET_NR_pread64
11639     case TARGET_NR_pread64:
11640         if (regpairs_aligned(cpu_env, num)) {
11641             arg4 = arg5;
11642             arg5 = arg6;
11643         }
11644         if (arg2 == 0 && arg3 == 0) {
11645             /* Special-case NULL buffer and zero length, which should succeed */
11646             p = 0;
11647         } else {
11648             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11649             if (!p) {
11650                 return -TARGET_EFAULT;
11651             }
11652         }
11653         ret = get_errno(pread(arg1, p, arg3, target_offset64(arg4, arg5)));
11654         unlock_user(p, arg2, ret);
11655         return ret;
11656     case TARGET_NR_pwrite64:
11657         if (regpairs_aligned(cpu_env, num)) {
11658             arg4 = arg5;
11659             arg5 = arg6;
11660         }
11661         if (arg2 == 0 && arg3 == 0) {
11662             /* Special-case NULL buffer and zero length, which should succeed */
11663             p = 0;
11664         } else {
11665             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11666             if (!p) {
11667                 return -TARGET_EFAULT;
11668             }
11669         }
11670         ret = get_errno(pwrite(arg1, p, arg3, target_offset64(arg4, arg5)));
11671         unlock_user(p, arg2, 0);
11672         return ret;
11673 #endif
11674     case TARGET_NR_getcwd:
11675         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11676             return -TARGET_EFAULT;
11677         ret = get_errno(sys_getcwd1(p, arg2));
11678         unlock_user(p, arg1, ret);
11679         return ret;
11680     case TARGET_NR_capget:
11681     case TARGET_NR_capset:
11682     {
11683         struct target_user_cap_header *target_header;
11684         struct target_user_cap_data *target_data = NULL;
11685         struct __user_cap_header_struct header;
11686         struct __user_cap_data_struct data[2];
11687         struct __user_cap_data_struct *dataptr = NULL;
11688         int i, target_datalen;
11689         int data_items = 1;
11690 
11691         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11692             return -TARGET_EFAULT;
11693         }
11694         header.version = tswap32(target_header->version);
11695         header.pid = tswap32(target_header->pid);
11696 
11697         if (header.version != _LINUX_CAPABILITY_VERSION) {
11698             /* Version 2 and up takes pointer to two user_data structs */
11699             data_items = 2;
11700         }
11701 
11702         target_datalen = sizeof(*target_data) * data_items;
11703 
11704         if (arg2) {
11705             if (num == TARGET_NR_capget) {
11706                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11707             } else {
11708                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11709             }
11710             if (!target_data) {
11711                 unlock_user_struct(target_header, arg1, 0);
11712                 return -TARGET_EFAULT;
11713             }
11714 
11715             if (num == TARGET_NR_capset) {
11716                 for (i = 0; i < data_items; i++) {
11717                     data[i].effective = tswap32(target_data[i].effective);
11718                     data[i].permitted = tswap32(target_data[i].permitted);
11719                     data[i].inheritable = tswap32(target_data[i].inheritable);
11720                 }
11721             }
11722 
11723             dataptr = data;
11724         }
11725 
11726         if (num == TARGET_NR_capget) {
11727             ret = get_errno(capget(&header, dataptr));
11728         } else {
11729             ret = get_errno(capset(&header, dataptr));
11730         }
11731 
11732         /* The kernel always updates version for both capget and capset */
11733         target_header->version = tswap32(header.version);
11734         unlock_user_struct(target_header, arg1, 1);
11735 
11736         if (arg2) {
11737             if (num == TARGET_NR_capget) {
11738                 for (i = 0; i < data_items; i++) {
11739                     target_data[i].effective = tswap32(data[i].effective);
11740                     target_data[i].permitted = tswap32(data[i].permitted);
11741                     target_data[i].inheritable = tswap32(data[i].inheritable);
11742                 }
11743                 unlock_user(target_data, arg2, target_datalen);
11744             } else {
11745                 unlock_user(target_data, arg2, 0);
11746             }
11747         }
11748         return ret;
11749     }
11750     case TARGET_NR_sigaltstack:
11751         return do_sigaltstack(arg1, arg2, cpu_env);
11752 
11753 #ifdef CONFIG_SENDFILE
11754 #ifdef TARGET_NR_sendfile
11755     case TARGET_NR_sendfile:
11756     {
11757         off_t *offp = NULL;
11758         off_t off;
11759         if (arg3) {
11760             ret = get_user_sal(off, arg3);
11761             if (is_error(ret)) {
11762                 return ret;
11763             }
11764             offp = &off;
11765         }
11766         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11767         if (!is_error(ret) && arg3) {
11768             abi_long ret2 = put_user_sal(off, arg3);
11769             if (is_error(ret2)) {
11770                 ret = ret2;
11771             }
11772         }
11773         return ret;
11774     }
11775 #endif
11776 #ifdef TARGET_NR_sendfile64
11777     case TARGET_NR_sendfile64:
11778     {
11779         off_t *offp = NULL;
11780         off_t off;
11781         if (arg3) {
11782             ret = get_user_s64(off, arg3);
11783             if (is_error(ret)) {
11784                 return ret;
11785             }
11786             offp = &off;
11787         }
11788         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11789         if (!is_error(ret) && arg3) {
11790             abi_long ret2 = put_user_s64(off, arg3);
11791             if (is_error(ret2)) {
11792                 ret = ret2;
11793             }
11794         }
11795         return ret;
11796     }
11797 #endif
11798 #endif
11799 #ifdef TARGET_NR_vfork
11800     case TARGET_NR_vfork:
11801         return get_errno(do_fork(cpu_env,
11802                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11803                          0, 0, 0, 0));
11804 #endif
11805 #ifdef TARGET_NR_ugetrlimit
11806     case TARGET_NR_ugetrlimit:
11807     {
11808 	struct rlimit rlim;
11809 	int resource = target_to_host_resource(arg1);
11810 	ret = get_errno(getrlimit(resource, &rlim));
11811 	if (!is_error(ret)) {
11812 	    struct target_rlimit *target_rlim;
11813             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11814                 return -TARGET_EFAULT;
11815 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11816 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11817             unlock_user_struct(target_rlim, arg2, 1);
11818 	}
11819         return ret;
11820     }
11821 #endif
11822 #ifdef TARGET_NR_truncate64
11823     case TARGET_NR_truncate64:
11824         if (!(p = lock_user_string(arg1)))
11825             return -TARGET_EFAULT;
11826 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11827         unlock_user(p, arg1, 0);
11828         return ret;
11829 #endif
11830 #ifdef TARGET_NR_ftruncate64
11831     case TARGET_NR_ftruncate64:
11832         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11833 #endif
11834 #ifdef TARGET_NR_stat64
11835     case TARGET_NR_stat64:
11836         if (!(p = lock_user_string(arg1))) {
11837             return -TARGET_EFAULT;
11838         }
11839         ret = get_errno(stat(path(p), &st));
11840         unlock_user(p, arg1, 0);
11841         if (!is_error(ret))
11842             ret = host_to_target_stat64(cpu_env, arg2, &st);
11843         return ret;
11844 #endif
11845 #ifdef TARGET_NR_lstat64
11846     case TARGET_NR_lstat64:
11847         if (!(p = lock_user_string(arg1))) {
11848             return -TARGET_EFAULT;
11849         }
11850         ret = get_errno(lstat(path(p), &st));
11851         unlock_user(p, arg1, 0);
11852         if (!is_error(ret))
11853             ret = host_to_target_stat64(cpu_env, arg2, &st);
11854         return ret;
11855 #endif
11856 #ifdef TARGET_NR_fstat64
11857     case TARGET_NR_fstat64:
11858         ret = get_errno(fstat(arg1, &st));
11859         if (!is_error(ret))
11860             ret = host_to_target_stat64(cpu_env, arg2, &st);
11861         return ret;
11862 #endif
11863 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11864 #ifdef TARGET_NR_fstatat64
11865     case TARGET_NR_fstatat64:
11866 #endif
11867 #ifdef TARGET_NR_newfstatat
11868     case TARGET_NR_newfstatat:
11869 #endif
11870         if (!(p = lock_user_string(arg2))) {
11871             return -TARGET_EFAULT;
11872         }
11873         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11874         unlock_user(p, arg2, 0);
11875         if (!is_error(ret))
11876             ret = host_to_target_stat64(cpu_env, arg3, &st);
11877         return ret;
11878 #endif
11879 #if defined(TARGET_NR_statx)
11880     case TARGET_NR_statx:
11881         {
11882             struct target_statx *target_stx;
11883             int dirfd = arg1;
11884             int flags = arg3;
11885 
11886             p = lock_user_string(arg2);
11887             if (p == NULL) {
11888                 return -TARGET_EFAULT;
11889             }
11890 #if defined(__NR_statx)
11891             {
11892                 /*
11893                  * It is assumed that struct statx is architecture independent.
11894                  */
11895                 struct target_statx host_stx;
11896                 int mask = arg4;
11897 
11898                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11899                 if (!is_error(ret)) {
11900                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11901                         unlock_user(p, arg2, 0);
11902                         return -TARGET_EFAULT;
11903                     }
11904                 }
11905 
11906                 if (ret != -TARGET_ENOSYS) {
11907                     unlock_user(p, arg2, 0);
11908                     return ret;
11909                 }
11910             }
11911 #endif
11912             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11913             unlock_user(p, arg2, 0);
11914 
11915             if (!is_error(ret)) {
11916                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11917                     return -TARGET_EFAULT;
11918                 }
11919                 memset(target_stx, 0, sizeof(*target_stx));
11920                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11921                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11922                 __put_user(st.st_ino, &target_stx->stx_ino);
11923                 __put_user(st.st_mode, &target_stx->stx_mode);
11924                 __put_user(st.st_uid, &target_stx->stx_uid);
11925                 __put_user(st.st_gid, &target_stx->stx_gid);
11926                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11927                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11928                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11929                 __put_user(st.st_size, &target_stx->stx_size);
11930                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11931                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11932                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11933                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11934                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11935                 unlock_user_struct(target_stx, arg5, 1);
11936             }
11937         }
11938         return ret;
11939 #endif
11940 #ifdef TARGET_NR_lchown
11941     case TARGET_NR_lchown:
11942         if (!(p = lock_user_string(arg1)))
11943             return -TARGET_EFAULT;
11944         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11945         unlock_user(p, arg1, 0);
11946         return ret;
11947 #endif
11948 #ifdef TARGET_NR_getuid
11949     case TARGET_NR_getuid:
11950         return get_errno(high2lowuid(getuid()));
11951 #endif
11952 #ifdef TARGET_NR_getgid
11953     case TARGET_NR_getgid:
11954         return get_errno(high2lowgid(getgid()));
11955 #endif
11956 #ifdef TARGET_NR_geteuid
11957     case TARGET_NR_geteuid:
11958         return get_errno(high2lowuid(geteuid()));
11959 #endif
11960 #ifdef TARGET_NR_getegid
11961     case TARGET_NR_getegid:
11962         return get_errno(high2lowgid(getegid()));
11963 #endif
11964     case TARGET_NR_setreuid:
11965         return get_errno(sys_setreuid(low2highuid(arg1), low2highuid(arg2)));
11966     case TARGET_NR_setregid:
11967         return get_errno(sys_setregid(low2highgid(arg1), low2highgid(arg2)));
11968     case TARGET_NR_getgroups:
11969         { /* the same code as for TARGET_NR_getgroups32 */
11970             int gidsetsize = arg1;
11971             target_id *target_grouplist;
11972             g_autofree gid_t *grouplist = NULL;
11973             int i;
11974 
11975             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11976                 return -TARGET_EINVAL;
11977             }
11978             if (gidsetsize > 0) {
11979                 grouplist = g_try_new(gid_t, gidsetsize);
11980                 if (!grouplist) {
11981                     return -TARGET_ENOMEM;
11982                 }
11983             }
11984             ret = get_errno(getgroups(gidsetsize, grouplist));
11985             if (!is_error(ret) && gidsetsize > 0) {
11986                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
11987                                              gidsetsize * sizeof(target_id), 0);
11988                 if (!target_grouplist) {
11989                     return -TARGET_EFAULT;
11990                 }
11991                 for (i = 0; i < ret; i++) {
11992                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11993                 }
11994                 unlock_user(target_grouplist, arg2,
11995                             gidsetsize * sizeof(target_id));
11996             }
11997             return ret;
11998         }
11999     case TARGET_NR_setgroups:
12000         { /* the same code as for TARGET_NR_setgroups32 */
12001             int gidsetsize = arg1;
12002             target_id *target_grouplist;
12003             g_autofree gid_t *grouplist = NULL;
12004             int i;
12005 
12006             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12007                 return -TARGET_EINVAL;
12008             }
12009             if (gidsetsize > 0) {
12010                 grouplist = g_try_new(gid_t, gidsetsize);
12011                 if (!grouplist) {
12012                     return -TARGET_ENOMEM;
12013                 }
12014                 target_grouplist = lock_user(VERIFY_READ, arg2,
12015                                              gidsetsize * sizeof(target_id), 1);
12016                 if (!target_grouplist) {
12017                     return -TARGET_EFAULT;
12018                 }
12019                 for (i = 0; i < gidsetsize; i++) {
12020                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
12021                 }
12022                 unlock_user(target_grouplist, arg2,
12023                             gidsetsize * sizeof(target_id));
12024             }
12025             return get_errno(sys_setgroups(gidsetsize, grouplist));
12026         }
12027     case TARGET_NR_fchown:
12028         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
12029 #if defined(TARGET_NR_fchownat)
12030     case TARGET_NR_fchownat:
12031         if (!(p = lock_user_string(arg2)))
12032             return -TARGET_EFAULT;
12033         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
12034                                  low2highgid(arg4), arg5));
12035         unlock_user(p, arg2, 0);
12036         return ret;
12037 #endif
12038 #ifdef TARGET_NR_setresuid
12039     case TARGET_NR_setresuid:
12040         return get_errno(sys_setresuid(low2highuid(arg1),
12041                                        low2highuid(arg2),
12042                                        low2highuid(arg3)));
12043 #endif
12044 #ifdef TARGET_NR_getresuid
12045     case TARGET_NR_getresuid:
12046         {
12047             uid_t ruid, euid, suid;
12048             ret = get_errno(getresuid(&ruid, &euid, &suid));
12049             if (!is_error(ret)) {
12050                 if (put_user_id(high2lowuid(ruid), arg1)
12051                     || put_user_id(high2lowuid(euid), arg2)
12052                     || put_user_id(high2lowuid(suid), arg3))
12053                     return -TARGET_EFAULT;
12054             }
12055         }
12056         return ret;
12057 #endif
12058 #ifdef TARGET_NR_getresgid
12059     case TARGET_NR_setresgid:
12060         return get_errno(sys_setresgid(low2highgid(arg1),
12061                                        low2highgid(arg2),
12062                                        low2highgid(arg3)));
12063 #endif
12064 #ifdef TARGET_NR_getresgid
12065     case TARGET_NR_getresgid:
12066         {
12067             gid_t rgid, egid, sgid;
12068             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12069             if (!is_error(ret)) {
12070                 if (put_user_id(high2lowgid(rgid), arg1)
12071                     || put_user_id(high2lowgid(egid), arg2)
12072                     || put_user_id(high2lowgid(sgid), arg3))
12073                     return -TARGET_EFAULT;
12074             }
12075         }
12076         return ret;
12077 #endif
12078 #ifdef TARGET_NR_chown
12079     case TARGET_NR_chown:
12080         if (!(p = lock_user_string(arg1)))
12081             return -TARGET_EFAULT;
12082         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
12083         unlock_user(p, arg1, 0);
12084         return ret;
12085 #endif
12086     case TARGET_NR_setuid:
12087         return get_errno(sys_setuid(low2highuid(arg1)));
12088     case TARGET_NR_setgid:
12089         return get_errno(sys_setgid(low2highgid(arg1)));
12090     case TARGET_NR_setfsuid:
12091         return get_errno(setfsuid(arg1));
12092     case TARGET_NR_setfsgid:
12093         return get_errno(setfsgid(arg1));
12094 
12095 #ifdef TARGET_NR_lchown32
12096     case TARGET_NR_lchown32:
12097         if (!(p = lock_user_string(arg1)))
12098             return -TARGET_EFAULT;
12099         ret = get_errno(lchown(p, arg2, arg3));
12100         unlock_user(p, arg1, 0);
12101         return ret;
12102 #endif
12103 #ifdef TARGET_NR_getuid32
12104     case TARGET_NR_getuid32:
12105         return get_errno(getuid());
12106 #endif
12107 
12108 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
12109    /* Alpha specific */
12110     case TARGET_NR_getxuid:
12111          {
12112             uid_t euid;
12113             euid=geteuid();
12114             cpu_env->ir[IR_A4]=euid;
12115          }
12116         return get_errno(getuid());
12117 #endif
12118 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
12119    /* Alpha specific */
12120     case TARGET_NR_getxgid:
12121          {
12122             uid_t egid;
12123             egid=getegid();
12124             cpu_env->ir[IR_A4]=egid;
12125          }
12126         return get_errno(getgid());
12127 #endif
12128 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
12129     /* Alpha specific */
12130     case TARGET_NR_osf_getsysinfo:
12131         ret = -TARGET_EOPNOTSUPP;
12132         switch (arg1) {
12133           case TARGET_GSI_IEEE_FP_CONTROL:
12134             {
12135                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
12136                 uint64_t swcr = cpu_env->swcr;
12137 
12138                 swcr &= ~SWCR_STATUS_MASK;
12139                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
12140 
12141                 if (put_user_u64 (swcr, arg2))
12142                         return -TARGET_EFAULT;
12143                 ret = 0;
12144             }
12145             break;
12146 
12147           /* case GSI_IEEE_STATE_AT_SIGNAL:
12148              -- Not implemented in linux kernel.
12149              case GSI_UACPROC:
12150              -- Retrieves current unaligned access state; not much used.
12151              case GSI_PROC_TYPE:
12152              -- Retrieves implver information; surely not used.
12153              case GSI_GET_HWRPB:
12154              -- Grabs a copy of the HWRPB; surely not used.
12155           */
12156         }
12157         return ret;
12158 #endif
12159 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
12160     /* Alpha specific */
12161     case TARGET_NR_osf_setsysinfo:
12162         ret = -TARGET_EOPNOTSUPP;
12163         switch (arg1) {
12164           case TARGET_SSI_IEEE_FP_CONTROL:
12165             {
12166                 uint64_t swcr, fpcr;
12167 
12168                 if (get_user_u64 (swcr, arg2)) {
12169                     return -TARGET_EFAULT;
12170                 }
12171 
12172                 /*
12173                  * The kernel calls swcr_update_status to update the
12174                  * status bits from the fpcr at every point that it
12175                  * could be queried.  Therefore, we store the status
12176                  * bits only in FPCR.
12177                  */
12178                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
12179 
12180                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12181                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
12182                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
12183                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12184                 ret = 0;
12185             }
12186             break;
12187 
12188           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
12189             {
12190                 uint64_t exc, fpcr, fex;
12191 
12192                 if (get_user_u64(exc, arg2)) {
12193                     return -TARGET_EFAULT;
12194                 }
12195                 exc &= SWCR_STATUS_MASK;
12196                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12197 
12198                 /* Old exceptions are not signaled.  */
12199                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
12200                 fex = exc & ~fex;
12201                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
12202                 fex &= (cpu_env)->swcr;
12203 
12204                 /* Update the hardware fpcr.  */
12205                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
12206                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12207 
12208                 if (fex) {
12209                     int si_code = TARGET_FPE_FLTUNK;
12210                     target_siginfo_t info;
12211 
12212                     if (fex & SWCR_TRAP_ENABLE_DNO) {
12213                         si_code = TARGET_FPE_FLTUND;
12214                     }
12215                     if (fex & SWCR_TRAP_ENABLE_INE) {
12216                         si_code = TARGET_FPE_FLTRES;
12217                     }
12218                     if (fex & SWCR_TRAP_ENABLE_UNF) {
12219                         si_code = TARGET_FPE_FLTUND;
12220                     }
12221                     if (fex & SWCR_TRAP_ENABLE_OVF) {
12222                         si_code = TARGET_FPE_FLTOVF;
12223                     }
12224                     if (fex & SWCR_TRAP_ENABLE_DZE) {
12225                         si_code = TARGET_FPE_FLTDIV;
12226                     }
12227                     if (fex & SWCR_TRAP_ENABLE_INV) {
12228                         si_code = TARGET_FPE_FLTINV;
12229                     }
12230 
12231                     info.si_signo = SIGFPE;
12232                     info.si_errno = 0;
12233                     info.si_code = si_code;
12234                     info._sifields._sigfault._addr = (cpu_env)->pc;
12235                     queue_signal(cpu_env, info.si_signo,
12236                                  QEMU_SI_FAULT, &info);
12237                 }
12238                 ret = 0;
12239             }
12240             break;
12241 
12242           /* case SSI_NVPAIRS:
12243              -- Used with SSIN_UACPROC to enable unaligned accesses.
12244              case SSI_IEEE_STATE_AT_SIGNAL:
12245              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
12246              -- Not implemented in linux kernel
12247           */
12248         }
12249         return ret;
12250 #endif
12251 #ifdef TARGET_NR_osf_sigprocmask
12252     /* Alpha specific.  */
12253     case TARGET_NR_osf_sigprocmask:
12254         {
12255             abi_ulong mask;
12256             int how;
12257             sigset_t set, oldset;
12258 
12259             switch(arg1) {
12260             case TARGET_SIG_BLOCK:
12261                 how = SIG_BLOCK;
12262                 break;
12263             case TARGET_SIG_UNBLOCK:
12264                 how = SIG_UNBLOCK;
12265                 break;
12266             case TARGET_SIG_SETMASK:
12267                 how = SIG_SETMASK;
12268                 break;
12269             default:
12270                 return -TARGET_EINVAL;
12271             }
12272             mask = arg2;
12273             target_to_host_old_sigset(&set, &mask);
12274             ret = do_sigprocmask(how, &set, &oldset);
12275             if (!ret) {
12276                 host_to_target_old_sigset(&mask, &oldset);
12277                 ret = mask;
12278             }
12279         }
12280         return ret;
12281 #endif
12282 
12283 #ifdef TARGET_NR_getgid32
12284     case TARGET_NR_getgid32:
12285         return get_errno(getgid());
12286 #endif
12287 #ifdef TARGET_NR_geteuid32
12288     case TARGET_NR_geteuid32:
12289         return get_errno(geteuid());
12290 #endif
12291 #ifdef TARGET_NR_getegid32
12292     case TARGET_NR_getegid32:
12293         return get_errno(getegid());
12294 #endif
12295 #ifdef TARGET_NR_setreuid32
12296     case TARGET_NR_setreuid32:
12297         return get_errno(sys_setreuid(arg1, arg2));
12298 #endif
12299 #ifdef TARGET_NR_setregid32
12300     case TARGET_NR_setregid32:
12301         return get_errno(sys_setregid(arg1, arg2));
12302 #endif
12303 #ifdef TARGET_NR_getgroups32
12304     case TARGET_NR_getgroups32:
12305         { /* the same code as for TARGET_NR_getgroups */
12306             int gidsetsize = arg1;
12307             uint32_t *target_grouplist;
12308             g_autofree gid_t *grouplist = NULL;
12309             int i;
12310 
12311             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12312                 return -TARGET_EINVAL;
12313             }
12314             if (gidsetsize > 0) {
12315                 grouplist = g_try_new(gid_t, gidsetsize);
12316                 if (!grouplist) {
12317                     return -TARGET_ENOMEM;
12318                 }
12319             }
12320             ret = get_errno(getgroups(gidsetsize, grouplist));
12321             if (!is_error(ret) && gidsetsize > 0) {
12322                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
12323                                              gidsetsize * 4, 0);
12324                 if (!target_grouplist) {
12325                     return -TARGET_EFAULT;
12326                 }
12327                 for (i = 0; i < ret; i++) {
12328                     target_grouplist[i] = tswap32(grouplist[i]);
12329                 }
12330                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
12331             }
12332             return ret;
12333         }
12334 #endif
12335 #ifdef TARGET_NR_setgroups32
12336     case TARGET_NR_setgroups32:
12337         { /* the same code as for TARGET_NR_setgroups */
12338             int gidsetsize = arg1;
12339             uint32_t *target_grouplist;
12340             g_autofree gid_t *grouplist = NULL;
12341             int i;
12342 
12343             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12344                 return -TARGET_EINVAL;
12345             }
12346             if (gidsetsize > 0) {
12347                 grouplist = g_try_new(gid_t, gidsetsize);
12348                 if (!grouplist) {
12349                     return -TARGET_ENOMEM;
12350                 }
12351                 target_grouplist = lock_user(VERIFY_READ, arg2,
12352                                              gidsetsize * 4, 1);
12353                 if (!target_grouplist) {
12354                     return -TARGET_EFAULT;
12355                 }
12356                 for (i = 0; i < gidsetsize; i++) {
12357                     grouplist[i] = tswap32(target_grouplist[i]);
12358                 }
12359                 unlock_user(target_grouplist, arg2, 0);
12360             }
12361             return get_errno(sys_setgroups(gidsetsize, grouplist));
12362         }
12363 #endif
12364 #ifdef TARGET_NR_fchown32
12365     case TARGET_NR_fchown32:
12366         return get_errno(fchown(arg1, arg2, arg3));
12367 #endif
12368 #ifdef TARGET_NR_setresuid32
12369     case TARGET_NR_setresuid32:
12370         return get_errno(sys_setresuid(arg1, arg2, arg3));
12371 #endif
12372 #ifdef TARGET_NR_getresuid32
12373     case TARGET_NR_getresuid32:
12374         {
12375             uid_t ruid, euid, suid;
12376             ret = get_errno(getresuid(&ruid, &euid, &suid));
12377             if (!is_error(ret)) {
12378                 if (put_user_u32(ruid, arg1)
12379                     || put_user_u32(euid, arg2)
12380                     || put_user_u32(suid, arg3))
12381                     return -TARGET_EFAULT;
12382             }
12383         }
12384         return ret;
12385 #endif
12386 #ifdef TARGET_NR_setresgid32
12387     case TARGET_NR_setresgid32:
12388         return get_errno(sys_setresgid(arg1, arg2, arg3));
12389 #endif
12390 #ifdef TARGET_NR_getresgid32
12391     case TARGET_NR_getresgid32:
12392         {
12393             gid_t rgid, egid, sgid;
12394             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12395             if (!is_error(ret)) {
12396                 if (put_user_u32(rgid, arg1)
12397                     || put_user_u32(egid, arg2)
12398                     || put_user_u32(sgid, arg3))
12399                     return -TARGET_EFAULT;
12400             }
12401         }
12402         return ret;
12403 #endif
12404 #ifdef TARGET_NR_chown32
12405     case TARGET_NR_chown32:
12406         if (!(p = lock_user_string(arg1)))
12407             return -TARGET_EFAULT;
12408         ret = get_errno(chown(p, arg2, arg3));
12409         unlock_user(p, arg1, 0);
12410         return ret;
12411 #endif
12412 #ifdef TARGET_NR_setuid32
12413     case TARGET_NR_setuid32:
12414         return get_errno(sys_setuid(arg1));
12415 #endif
12416 #ifdef TARGET_NR_setgid32
12417     case TARGET_NR_setgid32:
12418         return get_errno(sys_setgid(arg1));
12419 #endif
12420 #ifdef TARGET_NR_setfsuid32
12421     case TARGET_NR_setfsuid32:
12422         return get_errno(setfsuid(arg1));
12423 #endif
12424 #ifdef TARGET_NR_setfsgid32
12425     case TARGET_NR_setfsgid32:
12426         return get_errno(setfsgid(arg1));
12427 #endif
12428 #ifdef TARGET_NR_mincore
12429     case TARGET_NR_mincore:
12430         {
12431             void *a = lock_user(VERIFY_NONE, arg1, arg2, 0);
12432             if (!a) {
12433                 return -TARGET_ENOMEM;
12434             }
12435             p = lock_user_string(arg3);
12436             if (!p) {
12437                 ret = -TARGET_EFAULT;
12438             } else {
12439                 ret = get_errno(mincore(a, arg2, p));
12440                 unlock_user(p, arg3, ret);
12441             }
12442             unlock_user(a, arg1, 0);
12443         }
12444         return ret;
12445 #endif
12446 #ifdef TARGET_NR_arm_fadvise64_64
12447     case TARGET_NR_arm_fadvise64_64:
12448         /* arm_fadvise64_64 looks like fadvise64_64 but
12449          * with different argument order: fd, advice, offset, len
12450          * rather than the usual fd, offset, len, advice.
12451          * Note that offset and len are both 64-bit so appear as
12452          * pairs of 32-bit registers.
12453          */
12454         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
12455                             target_offset64(arg5, arg6), arg2);
12456         return -host_to_target_errno(ret);
12457 #endif
12458 
12459 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12460 
12461 #ifdef TARGET_NR_fadvise64_64
12462     case TARGET_NR_fadvise64_64:
12463 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
12464         /* 6 args: fd, advice, offset (high, low), len (high, low) */
12465         ret = arg2;
12466         arg2 = arg3;
12467         arg3 = arg4;
12468         arg4 = arg5;
12469         arg5 = arg6;
12470         arg6 = ret;
12471 #else
12472         /* 6 args: fd, offset (high, low), len (high, low), advice */
12473         if (regpairs_aligned(cpu_env, num)) {
12474             /* offset is in (3,4), len in (5,6) and advice in 7 */
12475             arg2 = arg3;
12476             arg3 = arg4;
12477             arg4 = arg5;
12478             arg5 = arg6;
12479             arg6 = arg7;
12480         }
12481 #endif
12482         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
12483                             target_offset64(arg4, arg5), arg6);
12484         return -host_to_target_errno(ret);
12485 #endif
12486 
12487 #ifdef TARGET_NR_fadvise64
12488     case TARGET_NR_fadvise64:
12489         /* 5 args: fd, offset (high, low), len, advice */
12490         if (regpairs_aligned(cpu_env, num)) {
12491             /* offset is in (3,4), len in 5 and advice in 6 */
12492             arg2 = arg3;
12493             arg3 = arg4;
12494             arg4 = arg5;
12495             arg5 = arg6;
12496         }
12497         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
12498         return -host_to_target_errno(ret);
12499 #endif
12500 
12501 #else /* not a 32-bit ABI */
12502 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
12503 #ifdef TARGET_NR_fadvise64_64
12504     case TARGET_NR_fadvise64_64:
12505 #endif
12506 #ifdef TARGET_NR_fadvise64
12507     case TARGET_NR_fadvise64:
12508 #endif
12509 #ifdef TARGET_S390X
12510         switch (arg4) {
12511         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
12512         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
12513         case 6: arg4 = POSIX_FADV_DONTNEED; break;
12514         case 7: arg4 = POSIX_FADV_NOREUSE; break;
12515         default: break;
12516         }
12517 #endif
12518         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
12519 #endif
12520 #endif /* end of 64-bit ABI fadvise handling */
12521 
12522 #ifdef TARGET_NR_madvise
12523     case TARGET_NR_madvise:
12524         return target_madvise(arg1, arg2, arg3);
12525 #endif
12526 #ifdef TARGET_NR_fcntl64
12527     case TARGET_NR_fcntl64:
12528     {
12529         int cmd;
12530         struct flock fl;
12531         from_flock64_fn *copyfrom = copy_from_user_flock64;
12532         to_flock64_fn *copyto = copy_to_user_flock64;
12533 
12534 #ifdef TARGET_ARM
12535         if (!cpu_env->eabi) {
12536             copyfrom = copy_from_user_oabi_flock64;
12537             copyto = copy_to_user_oabi_flock64;
12538         }
12539 #endif
12540 
12541         cmd = target_to_host_fcntl_cmd(arg2);
12542         if (cmd == -TARGET_EINVAL) {
12543             return cmd;
12544         }
12545 
12546         switch(arg2) {
12547         case TARGET_F_GETLK64:
12548             ret = copyfrom(&fl, arg3);
12549             if (ret) {
12550                 break;
12551             }
12552             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12553             if (ret == 0) {
12554                 ret = copyto(arg3, &fl);
12555             }
12556 	    break;
12557 
12558         case TARGET_F_SETLK64:
12559         case TARGET_F_SETLKW64:
12560             ret = copyfrom(&fl, arg3);
12561             if (ret) {
12562                 break;
12563             }
12564             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12565 	    break;
12566         default:
12567             ret = do_fcntl(arg1, arg2, arg3);
12568             break;
12569         }
12570         return ret;
12571     }
12572 #endif
12573 #ifdef TARGET_NR_cacheflush
12574     case TARGET_NR_cacheflush:
12575         /* self-modifying code is handled automatically, so nothing needed */
12576         return 0;
12577 #endif
12578 #ifdef TARGET_NR_getpagesize
12579     case TARGET_NR_getpagesize:
12580         return TARGET_PAGE_SIZE;
12581 #endif
12582     case TARGET_NR_gettid:
12583         return get_errno(sys_gettid());
12584 #ifdef TARGET_NR_readahead
12585     case TARGET_NR_readahead:
12586 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12587         if (regpairs_aligned(cpu_env, num)) {
12588             arg2 = arg3;
12589             arg3 = arg4;
12590             arg4 = arg5;
12591         }
12592         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12593 #else
12594         ret = get_errno(readahead(arg1, arg2, arg3));
12595 #endif
12596         return ret;
12597 #endif
12598 #ifdef CONFIG_ATTR
12599 #ifdef TARGET_NR_setxattr
12600     case TARGET_NR_listxattr:
12601     case TARGET_NR_llistxattr:
12602     {
12603         void *b = 0;
12604         if (arg2) {
12605             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12606             if (!b) {
12607                 return -TARGET_EFAULT;
12608             }
12609         }
12610         p = lock_user_string(arg1);
12611         if (p) {
12612             if (num == TARGET_NR_listxattr) {
12613                 ret = get_errno(listxattr(p, b, arg3));
12614             } else {
12615                 ret = get_errno(llistxattr(p, b, arg3));
12616             }
12617         } else {
12618             ret = -TARGET_EFAULT;
12619         }
12620         unlock_user(p, arg1, 0);
12621         unlock_user(b, arg2, arg3);
12622         return ret;
12623     }
12624     case TARGET_NR_flistxattr:
12625     {
12626         void *b = 0;
12627         if (arg2) {
12628             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12629             if (!b) {
12630                 return -TARGET_EFAULT;
12631             }
12632         }
12633         ret = get_errno(flistxattr(arg1, b, arg3));
12634         unlock_user(b, arg2, arg3);
12635         return ret;
12636     }
12637     case TARGET_NR_setxattr:
12638     case TARGET_NR_lsetxattr:
12639         {
12640             void *n, *v = 0;
12641             if (arg3) {
12642                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12643                 if (!v) {
12644                     return -TARGET_EFAULT;
12645                 }
12646             }
12647             p = lock_user_string(arg1);
12648             n = lock_user_string(arg2);
12649             if (p && n) {
12650                 if (num == TARGET_NR_setxattr) {
12651                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12652                 } else {
12653                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12654                 }
12655             } else {
12656                 ret = -TARGET_EFAULT;
12657             }
12658             unlock_user(p, arg1, 0);
12659             unlock_user(n, arg2, 0);
12660             unlock_user(v, arg3, 0);
12661         }
12662         return ret;
12663     case TARGET_NR_fsetxattr:
12664         {
12665             void *n, *v = 0;
12666             if (arg3) {
12667                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12668                 if (!v) {
12669                     return -TARGET_EFAULT;
12670                 }
12671             }
12672             n = lock_user_string(arg2);
12673             if (n) {
12674                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12675             } else {
12676                 ret = -TARGET_EFAULT;
12677             }
12678             unlock_user(n, arg2, 0);
12679             unlock_user(v, arg3, 0);
12680         }
12681         return ret;
12682     case TARGET_NR_getxattr:
12683     case TARGET_NR_lgetxattr:
12684         {
12685             void *n, *v = 0;
12686             if (arg3) {
12687                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12688                 if (!v) {
12689                     return -TARGET_EFAULT;
12690                 }
12691             }
12692             p = lock_user_string(arg1);
12693             n = lock_user_string(arg2);
12694             if (p && n) {
12695                 if (num == TARGET_NR_getxattr) {
12696                     ret = get_errno(getxattr(p, n, v, arg4));
12697                 } else {
12698                     ret = get_errno(lgetxattr(p, n, v, arg4));
12699                 }
12700             } else {
12701                 ret = -TARGET_EFAULT;
12702             }
12703             unlock_user(p, arg1, 0);
12704             unlock_user(n, arg2, 0);
12705             unlock_user(v, arg3, arg4);
12706         }
12707         return ret;
12708     case TARGET_NR_fgetxattr:
12709         {
12710             void *n, *v = 0;
12711             if (arg3) {
12712                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12713                 if (!v) {
12714                     return -TARGET_EFAULT;
12715                 }
12716             }
12717             n = lock_user_string(arg2);
12718             if (n) {
12719                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12720             } else {
12721                 ret = -TARGET_EFAULT;
12722             }
12723             unlock_user(n, arg2, 0);
12724             unlock_user(v, arg3, arg4);
12725         }
12726         return ret;
12727     case TARGET_NR_removexattr:
12728     case TARGET_NR_lremovexattr:
12729         {
12730             void *n;
12731             p = lock_user_string(arg1);
12732             n = lock_user_string(arg2);
12733             if (p && n) {
12734                 if (num == TARGET_NR_removexattr) {
12735                     ret = get_errno(removexattr(p, n));
12736                 } else {
12737                     ret = get_errno(lremovexattr(p, n));
12738                 }
12739             } else {
12740                 ret = -TARGET_EFAULT;
12741             }
12742             unlock_user(p, arg1, 0);
12743             unlock_user(n, arg2, 0);
12744         }
12745         return ret;
12746     case TARGET_NR_fremovexattr:
12747         {
12748             void *n;
12749             n = lock_user_string(arg2);
12750             if (n) {
12751                 ret = get_errno(fremovexattr(arg1, n));
12752             } else {
12753                 ret = -TARGET_EFAULT;
12754             }
12755             unlock_user(n, arg2, 0);
12756         }
12757         return ret;
12758 #endif
12759 #endif /* CONFIG_ATTR */
12760 #ifdef TARGET_NR_set_thread_area
12761     case TARGET_NR_set_thread_area:
12762 #if defined(TARGET_MIPS)
12763       cpu_env->active_tc.CP0_UserLocal = arg1;
12764       return 0;
12765 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12766       return do_set_thread_area(cpu_env, arg1);
12767 #elif defined(TARGET_M68K)
12768       {
12769           TaskState *ts = get_task_state(cpu);
12770           ts->tp_value = arg1;
12771           return 0;
12772       }
12773 #else
12774       return -TARGET_ENOSYS;
12775 #endif
12776 #endif
12777 #ifdef TARGET_NR_get_thread_area
12778     case TARGET_NR_get_thread_area:
12779 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12780         return do_get_thread_area(cpu_env, arg1);
12781 #elif defined(TARGET_M68K)
12782         {
12783             TaskState *ts = get_task_state(cpu);
12784             return ts->tp_value;
12785         }
12786 #else
12787         return -TARGET_ENOSYS;
12788 #endif
12789 #endif
12790 #ifdef TARGET_NR_getdomainname
12791     case TARGET_NR_getdomainname:
12792         return -TARGET_ENOSYS;
12793 #endif
12794 
12795 #ifdef TARGET_NR_clock_settime
12796     case TARGET_NR_clock_settime:
12797     {
12798         struct timespec ts;
12799 
12800         ret = target_to_host_timespec(&ts, arg2);
12801         if (!is_error(ret)) {
12802             ret = get_errno(clock_settime(arg1, &ts));
12803         }
12804         return ret;
12805     }
12806 #endif
12807 #ifdef TARGET_NR_clock_settime64
12808     case TARGET_NR_clock_settime64:
12809     {
12810         struct timespec ts;
12811 
12812         ret = target_to_host_timespec64(&ts, arg2);
12813         if (!is_error(ret)) {
12814             ret = get_errno(clock_settime(arg1, &ts));
12815         }
12816         return ret;
12817     }
12818 #endif
12819 #ifdef TARGET_NR_clock_gettime
12820     case TARGET_NR_clock_gettime:
12821     {
12822         struct timespec ts;
12823         ret = get_errno(clock_gettime(arg1, &ts));
12824         if (!is_error(ret)) {
12825             ret = host_to_target_timespec(arg2, &ts);
12826         }
12827         return ret;
12828     }
12829 #endif
12830 #ifdef TARGET_NR_clock_gettime64
12831     case TARGET_NR_clock_gettime64:
12832     {
12833         struct timespec ts;
12834         ret = get_errno(clock_gettime(arg1, &ts));
12835         if (!is_error(ret)) {
12836             ret = host_to_target_timespec64(arg2, &ts);
12837         }
12838         return ret;
12839     }
12840 #endif
12841 #ifdef TARGET_NR_clock_getres
12842     case TARGET_NR_clock_getres:
12843     {
12844         struct timespec ts;
12845         ret = get_errno(clock_getres(arg1, &ts));
12846         if (!is_error(ret)) {
12847             host_to_target_timespec(arg2, &ts);
12848         }
12849         return ret;
12850     }
12851 #endif
12852 #ifdef TARGET_NR_clock_getres_time64
12853     case TARGET_NR_clock_getres_time64:
12854     {
12855         struct timespec ts;
12856         ret = get_errno(clock_getres(arg1, &ts));
12857         if (!is_error(ret)) {
12858             host_to_target_timespec64(arg2, &ts);
12859         }
12860         return ret;
12861     }
12862 #endif
12863 #ifdef TARGET_NR_clock_nanosleep
12864     case TARGET_NR_clock_nanosleep:
12865     {
12866         struct timespec ts;
12867         if (target_to_host_timespec(&ts, arg3)) {
12868             return -TARGET_EFAULT;
12869         }
12870         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12871                                              &ts, arg4 ? &ts : NULL));
12872         /*
12873          * if the call is interrupted by a signal handler, it fails
12874          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12875          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12876          */
12877         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12878             host_to_target_timespec(arg4, &ts)) {
12879               return -TARGET_EFAULT;
12880         }
12881 
12882         return ret;
12883     }
12884 #endif
12885 #ifdef TARGET_NR_clock_nanosleep_time64
12886     case TARGET_NR_clock_nanosleep_time64:
12887     {
12888         struct timespec ts;
12889 
12890         if (target_to_host_timespec64(&ts, arg3)) {
12891             return -TARGET_EFAULT;
12892         }
12893 
12894         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12895                                              &ts, arg4 ? &ts : NULL));
12896 
12897         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12898             host_to_target_timespec64(arg4, &ts)) {
12899             return -TARGET_EFAULT;
12900         }
12901         return ret;
12902     }
12903 #endif
12904 
12905 #if defined(TARGET_NR_set_tid_address)
12906     case TARGET_NR_set_tid_address:
12907     {
12908         TaskState *ts = get_task_state(cpu);
12909         ts->child_tidptr = arg1;
12910         /* do not call host set_tid_address() syscall, instead return tid() */
12911         return get_errno(sys_gettid());
12912     }
12913 #endif
12914 
12915     case TARGET_NR_tkill:
12916         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12917 
12918     case TARGET_NR_tgkill:
12919         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12920                          target_to_host_signal(arg3)));
12921 
12922 #ifdef TARGET_NR_set_robust_list
12923     case TARGET_NR_set_robust_list:
12924     case TARGET_NR_get_robust_list:
12925         /* The ABI for supporting robust futexes has userspace pass
12926          * the kernel a pointer to a linked list which is updated by
12927          * userspace after the syscall; the list is walked by the kernel
12928          * when the thread exits. Since the linked list in QEMU guest
12929          * memory isn't a valid linked list for the host and we have
12930          * no way to reliably intercept the thread-death event, we can't
12931          * support these. Silently return ENOSYS so that guest userspace
12932          * falls back to a non-robust futex implementation (which should
12933          * be OK except in the corner case of the guest crashing while
12934          * holding a mutex that is shared with another process via
12935          * shared memory).
12936          */
12937         return -TARGET_ENOSYS;
12938 #endif
12939 
12940 #if defined(TARGET_NR_utimensat)
12941     case TARGET_NR_utimensat:
12942         {
12943             struct timespec *tsp, ts[2];
12944             if (!arg3) {
12945                 tsp = NULL;
12946             } else {
12947                 if (target_to_host_timespec(ts, arg3)) {
12948                     return -TARGET_EFAULT;
12949                 }
12950                 if (target_to_host_timespec(ts + 1, arg3 +
12951                                             sizeof(struct target_timespec))) {
12952                     return -TARGET_EFAULT;
12953                 }
12954                 tsp = ts;
12955             }
12956             if (!arg2)
12957                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12958             else {
12959                 if (!(p = lock_user_string(arg2))) {
12960                     return -TARGET_EFAULT;
12961                 }
12962                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12963                 unlock_user(p, arg2, 0);
12964             }
12965         }
12966         return ret;
12967 #endif
12968 #ifdef TARGET_NR_utimensat_time64
12969     case TARGET_NR_utimensat_time64:
12970         {
12971             struct timespec *tsp, ts[2];
12972             if (!arg3) {
12973                 tsp = NULL;
12974             } else {
12975                 if (target_to_host_timespec64(ts, arg3)) {
12976                     return -TARGET_EFAULT;
12977                 }
12978                 if (target_to_host_timespec64(ts + 1, arg3 +
12979                                      sizeof(struct target__kernel_timespec))) {
12980                     return -TARGET_EFAULT;
12981                 }
12982                 tsp = ts;
12983             }
12984             if (!arg2)
12985                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12986             else {
12987                 p = lock_user_string(arg2);
12988                 if (!p) {
12989                     return -TARGET_EFAULT;
12990                 }
12991                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12992                 unlock_user(p, arg2, 0);
12993             }
12994         }
12995         return ret;
12996 #endif
12997 #ifdef TARGET_NR_futex
12998     case TARGET_NR_futex:
12999         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
13000 #endif
13001 #ifdef TARGET_NR_futex_time64
13002     case TARGET_NR_futex_time64:
13003         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
13004 #endif
13005 #ifdef CONFIG_INOTIFY
13006 #if defined(TARGET_NR_inotify_init)
13007     case TARGET_NR_inotify_init:
13008         ret = get_errno(inotify_init());
13009         if (ret >= 0) {
13010             fd_trans_register(ret, &target_inotify_trans);
13011         }
13012         return ret;
13013 #endif
13014 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
13015     case TARGET_NR_inotify_init1:
13016         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
13017                                           fcntl_flags_tbl)));
13018         if (ret >= 0) {
13019             fd_trans_register(ret, &target_inotify_trans);
13020         }
13021         return ret;
13022 #endif
13023 #if defined(TARGET_NR_inotify_add_watch)
13024     case TARGET_NR_inotify_add_watch:
13025         p = lock_user_string(arg2);
13026         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
13027         unlock_user(p, arg2, 0);
13028         return ret;
13029 #endif
13030 #if defined(TARGET_NR_inotify_rm_watch)
13031     case TARGET_NR_inotify_rm_watch:
13032         return get_errno(inotify_rm_watch(arg1, arg2));
13033 #endif
13034 #endif
13035 
13036 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
13037     case TARGET_NR_mq_open:
13038         {
13039             struct mq_attr posix_mq_attr;
13040             struct mq_attr *pposix_mq_attr;
13041             int host_flags;
13042 
13043             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
13044             pposix_mq_attr = NULL;
13045             if (arg4) {
13046                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
13047                     return -TARGET_EFAULT;
13048                 }
13049                 pposix_mq_attr = &posix_mq_attr;
13050             }
13051             p = lock_user_string(arg1 - 1);
13052             if (!p) {
13053                 return -TARGET_EFAULT;
13054             }
13055             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
13056             unlock_user (p, arg1, 0);
13057         }
13058         return ret;
13059 
13060     case TARGET_NR_mq_unlink:
13061         p = lock_user_string(arg1 - 1);
13062         if (!p) {
13063             return -TARGET_EFAULT;
13064         }
13065         ret = get_errno(mq_unlink(p));
13066         unlock_user (p, arg1, 0);
13067         return ret;
13068 
13069 #ifdef TARGET_NR_mq_timedsend
13070     case TARGET_NR_mq_timedsend:
13071         {
13072             struct timespec ts;
13073 
13074             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13075             if (arg5 != 0) {
13076                 if (target_to_host_timespec(&ts, arg5)) {
13077                     return -TARGET_EFAULT;
13078                 }
13079                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13080                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13081                     return -TARGET_EFAULT;
13082                 }
13083             } else {
13084                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13085             }
13086             unlock_user (p, arg2, arg3);
13087         }
13088         return ret;
13089 #endif
13090 #ifdef TARGET_NR_mq_timedsend_time64
13091     case TARGET_NR_mq_timedsend_time64:
13092         {
13093             struct timespec ts;
13094 
13095             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13096             if (arg5 != 0) {
13097                 if (target_to_host_timespec64(&ts, arg5)) {
13098                     return -TARGET_EFAULT;
13099                 }
13100                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13101                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13102                     return -TARGET_EFAULT;
13103                 }
13104             } else {
13105                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13106             }
13107             unlock_user(p, arg2, arg3);
13108         }
13109         return ret;
13110 #endif
13111 
13112 #ifdef TARGET_NR_mq_timedreceive
13113     case TARGET_NR_mq_timedreceive:
13114         {
13115             struct timespec ts;
13116             unsigned int prio;
13117 
13118             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13119             if (arg5 != 0) {
13120                 if (target_to_host_timespec(&ts, arg5)) {
13121                     return -TARGET_EFAULT;
13122                 }
13123                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13124                                                      &prio, &ts));
13125                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13126                     return -TARGET_EFAULT;
13127                 }
13128             } else {
13129                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13130                                                      &prio, NULL));
13131             }
13132             unlock_user (p, arg2, arg3);
13133             if (arg4 != 0)
13134                 put_user_u32(prio, arg4);
13135         }
13136         return ret;
13137 #endif
13138 #ifdef TARGET_NR_mq_timedreceive_time64
13139     case TARGET_NR_mq_timedreceive_time64:
13140         {
13141             struct timespec ts;
13142             unsigned int prio;
13143 
13144             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13145             if (arg5 != 0) {
13146                 if (target_to_host_timespec64(&ts, arg5)) {
13147                     return -TARGET_EFAULT;
13148                 }
13149                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13150                                                      &prio, &ts));
13151                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13152                     return -TARGET_EFAULT;
13153                 }
13154             } else {
13155                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13156                                                      &prio, NULL));
13157             }
13158             unlock_user(p, arg2, arg3);
13159             if (arg4 != 0) {
13160                 put_user_u32(prio, arg4);
13161             }
13162         }
13163         return ret;
13164 #endif
13165 
13166     /* Not implemented for now... */
13167 /*     case TARGET_NR_mq_notify: */
13168 /*         break; */
13169 
13170     case TARGET_NR_mq_getsetattr:
13171         {
13172             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
13173             ret = 0;
13174             if (arg2 != 0) {
13175                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
13176                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
13177                                            &posix_mq_attr_out));
13178             } else if (arg3 != 0) {
13179                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
13180             }
13181             if (ret == 0 && arg3 != 0) {
13182                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
13183             }
13184         }
13185         return ret;
13186 #endif
13187 
13188 #ifdef CONFIG_SPLICE
13189 #ifdef TARGET_NR_tee
13190     case TARGET_NR_tee:
13191         {
13192             ret = get_errno(tee(arg1,arg2,arg3,arg4));
13193         }
13194         return ret;
13195 #endif
13196 #ifdef TARGET_NR_splice
13197     case TARGET_NR_splice:
13198         {
13199             loff_t loff_in, loff_out;
13200             loff_t *ploff_in = NULL, *ploff_out = NULL;
13201             if (arg2) {
13202                 if (get_user_u64(loff_in, arg2)) {
13203                     return -TARGET_EFAULT;
13204                 }
13205                 ploff_in = &loff_in;
13206             }
13207             if (arg4) {
13208                 if (get_user_u64(loff_out, arg4)) {
13209                     return -TARGET_EFAULT;
13210                 }
13211                 ploff_out = &loff_out;
13212             }
13213             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
13214             if (arg2) {
13215                 if (put_user_u64(loff_in, arg2)) {
13216                     return -TARGET_EFAULT;
13217                 }
13218             }
13219             if (arg4) {
13220                 if (put_user_u64(loff_out, arg4)) {
13221                     return -TARGET_EFAULT;
13222                 }
13223             }
13224         }
13225         return ret;
13226 #endif
13227 #ifdef TARGET_NR_vmsplice
13228 	case TARGET_NR_vmsplice:
13229         {
13230             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
13231             if (vec != NULL) {
13232                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
13233                 unlock_iovec(vec, arg2, arg3, 0);
13234             } else {
13235                 ret = -host_to_target_errno(errno);
13236             }
13237         }
13238         return ret;
13239 #endif
13240 #endif /* CONFIG_SPLICE */
13241 #ifdef CONFIG_EVENTFD
13242 #if defined(TARGET_NR_eventfd)
13243     case TARGET_NR_eventfd:
13244         ret = get_errno(eventfd(arg1, 0));
13245         if (ret >= 0) {
13246             fd_trans_register(ret, &target_eventfd_trans);
13247         }
13248         return ret;
13249 #endif
13250 #if defined(TARGET_NR_eventfd2)
13251     case TARGET_NR_eventfd2:
13252     {
13253         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
13254         if (arg2 & TARGET_O_NONBLOCK) {
13255             host_flags |= O_NONBLOCK;
13256         }
13257         if (arg2 & TARGET_O_CLOEXEC) {
13258             host_flags |= O_CLOEXEC;
13259         }
13260         ret = get_errno(eventfd(arg1, host_flags));
13261         if (ret >= 0) {
13262             fd_trans_register(ret, &target_eventfd_trans);
13263         }
13264         return ret;
13265     }
13266 #endif
13267 #endif /* CONFIG_EVENTFD  */
13268 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
13269     case TARGET_NR_fallocate:
13270 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13271         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
13272                                   target_offset64(arg5, arg6)));
13273 #else
13274         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
13275 #endif
13276         return ret;
13277 #endif
13278 #if defined(CONFIG_SYNC_FILE_RANGE)
13279 #if defined(TARGET_NR_sync_file_range)
13280     case TARGET_NR_sync_file_range:
13281 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13282 #if defined(TARGET_MIPS)
13283         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13284                                         target_offset64(arg5, arg6), arg7));
13285 #else
13286         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
13287                                         target_offset64(arg4, arg5), arg6));
13288 #endif /* !TARGET_MIPS */
13289 #else
13290         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
13291 #endif
13292         return ret;
13293 #endif
13294 #if defined(TARGET_NR_sync_file_range2) || \
13295     defined(TARGET_NR_arm_sync_file_range)
13296 #if defined(TARGET_NR_sync_file_range2)
13297     case TARGET_NR_sync_file_range2:
13298 #endif
13299 #if defined(TARGET_NR_arm_sync_file_range)
13300     case TARGET_NR_arm_sync_file_range:
13301 #endif
13302         /* This is like sync_file_range but the arguments are reordered */
13303 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13304         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13305                                         target_offset64(arg5, arg6), arg2));
13306 #else
13307         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
13308 #endif
13309         return ret;
13310 #endif
13311 #endif
13312 #if defined(TARGET_NR_signalfd4)
13313     case TARGET_NR_signalfd4:
13314         return do_signalfd4(arg1, arg2, arg4);
13315 #endif
13316 #if defined(TARGET_NR_signalfd)
13317     case TARGET_NR_signalfd:
13318         return do_signalfd4(arg1, arg2, 0);
13319 #endif
13320 #if defined(CONFIG_EPOLL)
13321 #if defined(TARGET_NR_epoll_create)
13322     case TARGET_NR_epoll_create:
13323         return get_errno(epoll_create(arg1));
13324 #endif
13325 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
13326     case TARGET_NR_epoll_create1:
13327         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
13328 #endif
13329 #if defined(TARGET_NR_epoll_ctl)
13330     case TARGET_NR_epoll_ctl:
13331     {
13332         struct epoll_event ep;
13333         struct epoll_event *epp = 0;
13334         if (arg4) {
13335             if (arg2 != EPOLL_CTL_DEL) {
13336                 struct target_epoll_event *target_ep;
13337                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
13338                     return -TARGET_EFAULT;
13339                 }
13340                 ep.events = tswap32(target_ep->events);
13341                 /*
13342                  * The epoll_data_t union is just opaque data to the kernel,
13343                  * so we transfer all 64 bits across and need not worry what
13344                  * actual data type it is.
13345                  */
13346                 ep.data.u64 = tswap64(target_ep->data.u64);
13347                 unlock_user_struct(target_ep, arg4, 0);
13348             }
13349             /*
13350              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
13351              * non-null pointer, even though this argument is ignored.
13352              *
13353              */
13354             epp = &ep;
13355         }
13356         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
13357     }
13358 #endif
13359 
13360 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
13361 #if defined(TARGET_NR_epoll_wait)
13362     case TARGET_NR_epoll_wait:
13363 #endif
13364 #if defined(TARGET_NR_epoll_pwait)
13365     case TARGET_NR_epoll_pwait:
13366 #endif
13367     {
13368         struct target_epoll_event *target_ep;
13369         struct epoll_event *ep;
13370         int epfd = arg1;
13371         int maxevents = arg3;
13372         int timeout = arg4;
13373 
13374         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
13375             return -TARGET_EINVAL;
13376         }
13377 
13378         target_ep = lock_user(VERIFY_WRITE, arg2,
13379                               maxevents * sizeof(struct target_epoll_event), 1);
13380         if (!target_ep) {
13381             return -TARGET_EFAULT;
13382         }
13383 
13384         ep = g_try_new(struct epoll_event, maxevents);
13385         if (!ep) {
13386             unlock_user(target_ep, arg2, 0);
13387             return -TARGET_ENOMEM;
13388         }
13389 
13390         switch (num) {
13391 #if defined(TARGET_NR_epoll_pwait)
13392         case TARGET_NR_epoll_pwait:
13393         {
13394             sigset_t *set = NULL;
13395 
13396             if (arg5) {
13397                 ret = process_sigsuspend_mask(&set, arg5, arg6);
13398                 if (ret != 0) {
13399                     break;
13400                 }
13401             }
13402 
13403             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13404                                              set, SIGSET_T_SIZE));
13405 
13406             if (set) {
13407                 finish_sigsuspend_mask(ret);
13408             }
13409             break;
13410         }
13411 #endif
13412 #if defined(TARGET_NR_epoll_wait)
13413         case TARGET_NR_epoll_wait:
13414             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13415                                              NULL, 0));
13416             break;
13417 #endif
13418         default:
13419             ret = -TARGET_ENOSYS;
13420         }
13421         if (!is_error(ret)) {
13422             int i;
13423             for (i = 0; i < ret; i++) {
13424                 target_ep[i].events = tswap32(ep[i].events);
13425                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
13426             }
13427             unlock_user(target_ep, arg2,
13428                         ret * sizeof(struct target_epoll_event));
13429         } else {
13430             unlock_user(target_ep, arg2, 0);
13431         }
13432         g_free(ep);
13433         return ret;
13434     }
13435 #endif
13436 #endif
13437 #ifdef TARGET_NR_prlimit64
13438     case TARGET_NR_prlimit64:
13439     {
13440         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
13441         struct target_rlimit64 *target_rnew, *target_rold;
13442         struct host_rlimit64 rnew, rold, *rnewp = 0;
13443         int resource = target_to_host_resource(arg2);
13444 
13445         if (arg3 && (resource != RLIMIT_AS &&
13446                      resource != RLIMIT_DATA &&
13447                      resource != RLIMIT_STACK)) {
13448             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
13449                 return -TARGET_EFAULT;
13450             }
13451             __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
13452             __get_user(rnew.rlim_max, &target_rnew->rlim_max);
13453             unlock_user_struct(target_rnew, arg3, 0);
13454             rnewp = &rnew;
13455         }
13456 
13457         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
13458         if (!is_error(ret) && arg4) {
13459             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
13460                 return -TARGET_EFAULT;
13461             }
13462             __put_user(rold.rlim_cur, &target_rold->rlim_cur);
13463             __put_user(rold.rlim_max, &target_rold->rlim_max);
13464             unlock_user_struct(target_rold, arg4, 1);
13465         }
13466         return ret;
13467     }
13468 #endif
13469 #ifdef TARGET_NR_gethostname
13470     case TARGET_NR_gethostname:
13471     {
13472         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
13473         if (name) {
13474             ret = get_errno(gethostname(name, arg2));
13475             unlock_user(name, arg1, arg2);
13476         } else {
13477             ret = -TARGET_EFAULT;
13478         }
13479         return ret;
13480     }
13481 #endif
13482 #ifdef TARGET_NR_atomic_cmpxchg_32
13483     case TARGET_NR_atomic_cmpxchg_32:
13484     {
13485         /* should use start_exclusive from main.c */
13486         abi_ulong mem_value;
13487         if (get_user_u32(mem_value, arg6)) {
13488             target_siginfo_t info;
13489             info.si_signo = SIGSEGV;
13490             info.si_errno = 0;
13491             info.si_code = TARGET_SEGV_MAPERR;
13492             info._sifields._sigfault._addr = arg6;
13493             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
13494             ret = 0xdeadbeef;
13495 
13496         }
13497         if (mem_value == arg2)
13498             put_user_u32(arg1, arg6);
13499         return mem_value;
13500     }
13501 #endif
13502 #ifdef TARGET_NR_atomic_barrier
13503     case TARGET_NR_atomic_barrier:
13504         /* Like the kernel implementation and the
13505            qemu arm barrier, no-op this? */
13506         return 0;
13507 #endif
13508 
13509 #ifdef TARGET_NR_timer_create
13510     case TARGET_NR_timer_create:
13511     {
13512         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
13513 
13514         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
13515 
13516         int clkid = arg1;
13517         int timer_index = next_free_host_timer();
13518 
13519         if (timer_index < 0) {
13520             ret = -TARGET_EAGAIN;
13521         } else {
13522             timer_t *phtimer = g_posix_timers  + timer_index;
13523 
13524             if (arg2) {
13525                 phost_sevp = &host_sevp;
13526                 ret = target_to_host_sigevent(phost_sevp, arg2);
13527                 if (ret != 0) {
13528                     free_host_timer_slot(timer_index);
13529                     return ret;
13530                 }
13531             }
13532 
13533             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
13534             if (ret) {
13535                 free_host_timer_slot(timer_index);
13536             } else {
13537                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
13538                     timer_delete(*phtimer);
13539                     free_host_timer_slot(timer_index);
13540                     return -TARGET_EFAULT;
13541                 }
13542             }
13543         }
13544         return ret;
13545     }
13546 #endif
13547 
13548 #ifdef TARGET_NR_timer_settime
13549     case TARGET_NR_timer_settime:
13550     {
13551         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13552          * struct itimerspec * old_value */
13553         target_timer_t timerid = get_timer_id(arg1);
13554 
13555         if (timerid < 0) {
13556             ret = timerid;
13557         } else if (arg3 == 0) {
13558             ret = -TARGET_EINVAL;
13559         } else {
13560             timer_t htimer = g_posix_timers[timerid];
13561             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13562 
13563             if (target_to_host_itimerspec(&hspec_new, arg3)) {
13564                 return -TARGET_EFAULT;
13565             }
13566             ret = get_errno(
13567                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13568             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13569                 return -TARGET_EFAULT;
13570             }
13571         }
13572         return ret;
13573     }
13574 #endif
13575 
13576 #ifdef TARGET_NR_timer_settime64
13577     case TARGET_NR_timer_settime64:
13578     {
13579         target_timer_t timerid = get_timer_id(arg1);
13580 
13581         if (timerid < 0) {
13582             ret = timerid;
13583         } else if (arg3 == 0) {
13584             ret = -TARGET_EINVAL;
13585         } else {
13586             timer_t htimer = g_posix_timers[timerid];
13587             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13588 
13589             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13590                 return -TARGET_EFAULT;
13591             }
13592             ret = get_errno(
13593                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13594             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13595                 return -TARGET_EFAULT;
13596             }
13597         }
13598         return ret;
13599     }
13600 #endif
13601 
13602 #ifdef TARGET_NR_timer_gettime
13603     case TARGET_NR_timer_gettime:
13604     {
13605         /* args: timer_t timerid, struct itimerspec *curr_value */
13606         target_timer_t timerid = get_timer_id(arg1);
13607 
13608         if (timerid < 0) {
13609             ret = timerid;
13610         } else if (!arg2) {
13611             ret = -TARGET_EFAULT;
13612         } else {
13613             timer_t htimer = g_posix_timers[timerid];
13614             struct itimerspec hspec;
13615             ret = get_errno(timer_gettime(htimer, &hspec));
13616 
13617             if (host_to_target_itimerspec(arg2, &hspec)) {
13618                 ret = -TARGET_EFAULT;
13619             }
13620         }
13621         return ret;
13622     }
13623 #endif
13624 
13625 #ifdef TARGET_NR_timer_gettime64
13626     case TARGET_NR_timer_gettime64:
13627     {
13628         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13629         target_timer_t timerid = get_timer_id(arg1);
13630 
13631         if (timerid < 0) {
13632             ret = timerid;
13633         } else if (!arg2) {
13634             ret = -TARGET_EFAULT;
13635         } else {
13636             timer_t htimer = g_posix_timers[timerid];
13637             struct itimerspec hspec;
13638             ret = get_errno(timer_gettime(htimer, &hspec));
13639 
13640             if (host_to_target_itimerspec64(arg2, &hspec)) {
13641                 ret = -TARGET_EFAULT;
13642             }
13643         }
13644         return ret;
13645     }
13646 #endif
13647 
13648 #ifdef TARGET_NR_timer_getoverrun
13649     case TARGET_NR_timer_getoverrun:
13650     {
13651         /* args: timer_t timerid */
13652         target_timer_t timerid = get_timer_id(arg1);
13653 
13654         if (timerid < 0) {
13655             ret = timerid;
13656         } else {
13657             timer_t htimer = g_posix_timers[timerid];
13658             ret = get_errno(timer_getoverrun(htimer));
13659         }
13660         return ret;
13661     }
13662 #endif
13663 
13664 #ifdef TARGET_NR_timer_delete
13665     case TARGET_NR_timer_delete:
13666     {
13667         /* args: timer_t timerid */
13668         target_timer_t timerid = get_timer_id(arg1);
13669 
13670         if (timerid < 0) {
13671             ret = timerid;
13672         } else {
13673             timer_t htimer = g_posix_timers[timerid];
13674             ret = get_errno(timer_delete(htimer));
13675             free_host_timer_slot(timerid);
13676         }
13677         return ret;
13678     }
13679 #endif
13680 
13681 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13682     case TARGET_NR_timerfd_create:
13683         ret = get_errno(timerfd_create(arg1,
13684                         target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13685         if (ret >= 0) {
13686             fd_trans_register(ret, &target_timerfd_trans);
13687         }
13688         return ret;
13689 #endif
13690 
13691 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13692     case TARGET_NR_timerfd_gettime:
13693         {
13694             struct itimerspec its_curr;
13695 
13696             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13697 
13698             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13699                 return -TARGET_EFAULT;
13700             }
13701         }
13702         return ret;
13703 #endif
13704 
13705 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13706     case TARGET_NR_timerfd_gettime64:
13707         {
13708             struct itimerspec its_curr;
13709 
13710             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13711 
13712             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13713                 return -TARGET_EFAULT;
13714             }
13715         }
13716         return ret;
13717 #endif
13718 
13719 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13720     case TARGET_NR_timerfd_settime:
13721         {
13722             struct itimerspec its_new, its_old, *p_new;
13723 
13724             if (arg3) {
13725                 if (target_to_host_itimerspec(&its_new, arg3)) {
13726                     return -TARGET_EFAULT;
13727                 }
13728                 p_new = &its_new;
13729             } else {
13730                 p_new = NULL;
13731             }
13732 
13733             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13734 
13735             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13736                 return -TARGET_EFAULT;
13737             }
13738         }
13739         return ret;
13740 #endif
13741 
13742 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13743     case TARGET_NR_timerfd_settime64:
13744         {
13745             struct itimerspec its_new, its_old, *p_new;
13746 
13747             if (arg3) {
13748                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13749                     return -TARGET_EFAULT;
13750                 }
13751                 p_new = &its_new;
13752             } else {
13753                 p_new = NULL;
13754             }
13755 
13756             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13757 
13758             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13759                 return -TARGET_EFAULT;
13760             }
13761         }
13762         return ret;
13763 #endif
13764 
13765 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13766     case TARGET_NR_ioprio_get:
13767         return get_errno(ioprio_get(arg1, arg2));
13768 #endif
13769 
13770 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13771     case TARGET_NR_ioprio_set:
13772         return get_errno(ioprio_set(arg1, arg2, arg3));
13773 #endif
13774 
13775 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13776     case TARGET_NR_setns:
13777         return get_errno(setns(arg1, arg2));
13778 #endif
13779 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13780     case TARGET_NR_unshare:
13781         return get_errno(unshare(arg1));
13782 #endif
13783 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13784     case TARGET_NR_kcmp:
13785         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13786 #endif
13787 #ifdef TARGET_NR_swapcontext
13788     case TARGET_NR_swapcontext:
13789         /* PowerPC specific.  */
13790         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13791 #endif
13792 #ifdef TARGET_NR_memfd_create
13793     case TARGET_NR_memfd_create:
13794         p = lock_user_string(arg1);
13795         if (!p) {
13796             return -TARGET_EFAULT;
13797         }
13798         ret = get_errno(memfd_create(p, arg2));
13799         fd_trans_unregister(ret);
13800         unlock_user(p, arg1, 0);
13801         return ret;
13802 #endif
13803 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13804     case TARGET_NR_membarrier:
13805         return get_errno(membarrier(arg1, arg2));
13806 #endif
13807 
13808 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13809     case TARGET_NR_copy_file_range:
13810         {
13811             loff_t inoff, outoff;
13812             loff_t *pinoff = NULL, *poutoff = NULL;
13813 
13814             if (arg2) {
13815                 if (get_user_u64(inoff, arg2)) {
13816                     return -TARGET_EFAULT;
13817                 }
13818                 pinoff = &inoff;
13819             }
13820             if (arg4) {
13821                 if (get_user_u64(outoff, arg4)) {
13822                     return -TARGET_EFAULT;
13823                 }
13824                 poutoff = &outoff;
13825             }
13826             /* Do not sign-extend the count parameter. */
13827             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13828                                                  (abi_ulong)arg5, arg6));
13829             if (!is_error(ret) && ret > 0) {
13830                 if (arg2) {
13831                     if (put_user_u64(inoff, arg2)) {
13832                         return -TARGET_EFAULT;
13833                     }
13834                 }
13835                 if (arg4) {
13836                     if (put_user_u64(outoff, arg4)) {
13837                         return -TARGET_EFAULT;
13838                     }
13839                 }
13840             }
13841         }
13842         return ret;
13843 #endif
13844 
13845 #if defined(TARGET_NR_pivot_root)
13846     case TARGET_NR_pivot_root:
13847         {
13848             void *p2;
13849             p = lock_user_string(arg1); /* new_root */
13850             p2 = lock_user_string(arg2); /* put_old */
13851             if (!p || !p2) {
13852                 ret = -TARGET_EFAULT;
13853             } else {
13854                 ret = get_errno(pivot_root(p, p2));
13855             }
13856             unlock_user(p2, arg2, 0);
13857             unlock_user(p, arg1, 0);
13858         }
13859         return ret;
13860 #endif
13861 
13862 #if defined(TARGET_NR_riscv_hwprobe)
13863     case TARGET_NR_riscv_hwprobe:
13864         return do_riscv_hwprobe(cpu_env, arg1, arg2, arg3, arg4, arg5);
13865 #endif
13866 
13867     default:
13868         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13869         return -TARGET_ENOSYS;
13870     }
13871     return ret;
13872 }
13873 
13874 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13875                     abi_long arg2, abi_long arg3, abi_long arg4,
13876                     abi_long arg5, abi_long arg6, abi_long arg7,
13877                     abi_long arg8)
13878 {
13879     CPUState *cpu = env_cpu(cpu_env);
13880     abi_long ret;
13881 
13882 #ifdef DEBUG_ERESTARTSYS
13883     /* Debug-only code for exercising the syscall-restart code paths
13884      * in the per-architecture cpu main loops: restart every syscall
13885      * the guest makes once before letting it through.
13886      */
13887     {
13888         static bool flag;
13889         flag = !flag;
13890         if (flag) {
13891             return -QEMU_ERESTARTSYS;
13892         }
13893     }
13894 #endif
13895 
13896     record_syscall_start(cpu, num, arg1,
13897                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13898 
13899     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13900         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13901     }
13902 
13903     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13904                       arg5, arg6, arg7, arg8);
13905 
13906     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13907         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13908                           arg3, arg4, arg5, arg6);
13909     }
13910 
13911     record_syscall_return(cpu, num, ret);
13912     return ret;
13913 }
13914