xref: /qemu/linux-user/syscall.c (revision f26137893b98c6e1fd6819d5f13cb74fafcdcff9)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "qemu/plugin.h"
26 #include "tcg/startup.h"
27 #include "target_mman.h"
28 #include "exec/page-protection.h"
29 #include "exec/translation-block.h"
30 #include <elf.h>
31 #include <endian.h>
32 #include <grp.h>
33 #include <sys/ipc.h>
34 #include <sys/msg.h>
35 #include <sys/wait.h>
36 #include <sys/mount.h>
37 #include <sys/file.h>
38 #include <sys/fsuid.h>
39 #include <sys/personality.h>
40 #include <sys/prctl.h>
41 #include <sys/resource.h>
42 #include <sys/swap.h>
43 #include <linux/capability.h>
44 #include <sched.h>
45 #include <sys/timex.h>
46 #include <sys/socket.h>
47 #include <linux/sockios.h>
48 #include <sys/un.h>
49 #include <sys/uio.h>
50 #include <poll.h>
51 #include <sys/times.h>
52 #include <sys/shm.h>
53 #include <sys/sem.h>
54 #include <sys/statfs.h>
55 #include <utime.h>
56 #include <sys/sysinfo.h>
57 #include <sys/signalfd.h>
58 #include <netinet/in.h>
59 #include <netinet/ip.h>
60 #include <netinet/tcp.h>
61 #include <netinet/udp.h>
62 #include <linux/wireless.h>
63 #include <linux/icmp.h>
64 #include <linux/icmpv6.h>
65 #include <linux/if_tun.h>
66 #include <linux/in6.h>
67 #include <linux/errqueue.h>
68 #include <linux/random.h>
69 #ifdef CONFIG_TIMERFD
70 #include <sys/timerfd.h>
71 #endif
72 #ifdef CONFIG_EVENTFD
73 #include <sys/eventfd.h>
74 #endif
75 #ifdef CONFIG_EPOLL
76 #include <sys/epoll.h>
77 #endif
78 #ifdef CONFIG_ATTR
79 #include "qemu/xattr.h"
80 #endif
81 #ifdef CONFIG_SENDFILE
82 #include <sys/sendfile.h>
83 #endif
84 #ifdef HAVE_SYS_KCOV_H
85 #include <sys/kcov.h>
86 #endif
87 
88 #define termios host_termios
89 #define winsize host_winsize
90 #define termio host_termio
91 #define sgttyb host_sgttyb /* same as target */
92 #define tchars host_tchars /* same as target */
93 #define ltchars host_ltchars /* same as target */
94 
95 #include <linux/termios.h>
96 #include <linux/unistd.h>
97 #include <linux/cdrom.h>
98 #include <linux/hdreg.h>
99 #include <linux/soundcard.h>
100 #include <linux/kd.h>
101 #include <linux/mtio.h>
102 #include <linux/fs.h>
103 #include <linux/fd.h>
104 #if defined(CONFIG_FIEMAP)
105 #include <linux/fiemap.h>
106 #endif
107 #include <linux/fb.h>
108 #if defined(CONFIG_USBFS)
109 #include <linux/usbdevice_fs.h>
110 #include <linux/usb/ch9.h>
111 #endif
112 #include <linux/vt.h>
113 #include <linux/dm-ioctl.h>
114 #include <linux/reboot.h>
115 #include <linux/route.h>
116 #include <linux/filter.h>
117 #include <linux/blkpg.h>
118 #include <netpacket/packet.h>
119 #include <linux/netlink.h>
120 #include <linux/if_alg.h>
121 #include <linux/rtc.h>
122 #include <sound/asound.h>
123 #ifdef HAVE_BTRFS_H
124 #include <linux/btrfs.h>
125 #endif
126 #ifdef HAVE_DRM_H
127 #include <libdrm/drm.h>
128 #include <libdrm/i915_drm.h>
129 #endif
130 #include "linux_loop.h"
131 #include "uname.h"
132 
133 #include "qemu.h"
134 #include "user-internals.h"
135 #include "strace.h"
136 #include "signal-common.h"
137 #include "loader.h"
138 #include "user-mmap.h"
139 #include "user/page-protection.h"
140 #include "user/safe-syscall.h"
141 #include "qemu/guest-random.h"
142 #include "qemu/selfmap.h"
143 #include "user/syscall-trace.h"
144 #include "special-errno.h"
145 #include "qapi/error.h"
146 #include "fd-trans.h"
147 #include "user/cpu_loop.h"
148 
149 #ifndef CLONE_IO
150 #define CLONE_IO                0x80000000      /* Clone io context */
151 #endif
152 
153 /* We can't directly call the host clone syscall, because this will
154  * badly confuse libc (breaking mutexes, for example). So we must
155  * divide clone flags into:
156  *  * flag combinations that look like pthread_create()
157  *  * flag combinations that look like fork()
158  *  * flags we can implement within QEMU itself
159  *  * flags we can't support and will return an error for
160  */
161 /* For thread creation, all these flags must be present; for
162  * fork, none must be present.
163  */
164 #define CLONE_THREAD_FLAGS                              \
165     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
166      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
167 
168 /* These flags are ignored:
169  * CLONE_DETACHED is now ignored by the kernel;
170  * CLONE_IO is just an optimisation hint to the I/O scheduler
171  */
172 #define CLONE_IGNORED_FLAGS                     \
173     (CLONE_DETACHED | CLONE_IO)
174 
175 #ifndef CLONE_PIDFD
176 # define CLONE_PIDFD 0x00001000
177 #endif
178 
179 /* Flags for fork which we can implement within QEMU itself */
180 #define CLONE_OPTIONAL_FORK_FLAGS               \
181     (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
182      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
183 
184 /* Flags for thread creation which we can implement within QEMU itself */
185 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
186     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
187      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
188 
189 #define CLONE_INVALID_FORK_FLAGS                                        \
190     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
191 
192 #define CLONE_INVALID_THREAD_FLAGS                                      \
193     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
194        CLONE_IGNORED_FLAGS))
195 
196 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
197  * have almost all been allocated. We cannot support any of
198  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
199  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
200  * The checks against the invalid thread masks above will catch these.
201  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
202  */
203 
204 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
205  * once. This exercises the codepaths for restart.
206  */
207 //#define DEBUG_ERESTARTSYS
208 
209 //#include <linux/msdos_fs.h>
210 #define VFAT_IOCTL_READDIR_BOTH \
211     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
212 #define VFAT_IOCTL_READDIR_SHORT \
213     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
214 
215 #undef _syscall0
216 #undef _syscall1
217 #undef _syscall2
218 #undef _syscall3
219 #undef _syscall4
220 #undef _syscall5
221 #undef _syscall6
222 
223 #define _syscall0(type,name)		\
224 static type name (void)			\
225 {					\
226 	return syscall(__NR_##name);	\
227 }
228 
229 #define _syscall1(type,name,type1,arg1)		\
230 static type name (type1 arg1)			\
231 {						\
232 	return syscall(__NR_##name, arg1);	\
233 }
234 
235 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
236 static type name (type1 arg1,type2 arg2)		\
237 {							\
238 	return syscall(__NR_##name, arg1, arg2);	\
239 }
240 
241 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
242 static type name (type1 arg1,type2 arg2,type3 arg3)		\
243 {								\
244 	return syscall(__NR_##name, arg1, arg2, arg3);		\
245 }
246 
247 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
248 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
249 {										\
250 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
251 }
252 
253 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
254 		  type5,arg5)							\
255 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
256 {										\
257 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
258 }
259 
260 
261 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
262 		  type5,arg5,type6,arg6)					\
263 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
264                   type6 arg6)							\
265 {										\
266 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
267 }
268 
269 
270 #define __NR_sys_uname __NR_uname
271 #define __NR_sys_getcwd1 __NR_getcwd
272 #define __NR_sys_getdents __NR_getdents
273 #define __NR_sys_getdents64 __NR_getdents64
274 #define __NR_sys_getpriority __NR_getpriority
275 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
276 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
277 #define __NR_sys_syslog __NR_syslog
278 #if defined(__NR_futex)
279 # define __NR_sys_futex __NR_futex
280 #endif
281 #if defined(__NR_futex_time64)
282 # define __NR_sys_futex_time64 __NR_futex_time64
283 #endif
284 #define __NR_sys_statx __NR_statx
285 
286 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
287 #define __NR__llseek __NR_lseek
288 #endif
289 
290 /* Newer kernel ports have llseek() instead of _llseek() */
291 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
292 #define TARGET_NR__llseek TARGET_NR_llseek
293 #endif
294 
295 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
296 #ifndef TARGET_O_NONBLOCK_MASK
297 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
298 #endif
299 
300 #define __NR_sys_gettid __NR_gettid
301 _syscall0(int, sys_gettid)
302 
303 /* For the 64-bit guest on 32-bit host case we must emulate
304  * getdents using getdents64, because otherwise the host
305  * might hand us back more dirent records than we can fit
306  * into the guest buffer after structure format conversion.
307  * Otherwise we emulate getdents with getdents if the host has it.
308  */
309 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
310 #define EMULATE_GETDENTS_WITH_GETDENTS
311 #endif
312 
313 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
314 _syscall3(int, sys_getdents, unsigned int, fd, struct linux_dirent *, dirp, unsigned int, count);
315 #endif
316 #if (defined(TARGET_NR_getdents) && \
317       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
318     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
319 _syscall3(int, sys_getdents64, unsigned int, fd, struct linux_dirent64 *, dirp, unsigned int, count);
320 #endif
321 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
322 _syscall5(int, _llseek,  unsigned int,  fd, unsigned long, hi, unsigned long, lo,
323           loff_t *, res, unsigned int, wh);
324 #endif
325 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
326 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
327           siginfo_t *, uinfo)
328 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
329 #ifdef __NR_exit_group
330 _syscall1(int,exit_group,int,error_code)
331 #endif
332 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
333 #define __NR_sys_close_range __NR_close_range
334 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
335 #ifndef CLOSE_RANGE_CLOEXEC
336 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
337 #endif
338 #endif
339 #if defined(__NR_futex)
340 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
341           const struct timespec *,timeout,int *,uaddr2,int,val3)
342 #endif
343 #if defined(__NR_futex_time64)
344 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
345           const struct timespec *,timeout,int *,uaddr2,int,val3)
346 #endif
347 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
348 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
349 #endif
350 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
351 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
352                              unsigned int, flags);
353 #endif
354 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
355 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
356 #endif
357 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
358 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
359           unsigned long *, user_mask_ptr);
360 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
361 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
362           unsigned long *, user_mask_ptr);
363 /* sched_attr is not defined in glibc */
364 struct sched_attr {
365     uint32_t size;
366     uint32_t sched_policy;
367     uint64_t sched_flags;
368     int32_t sched_nice;
369     uint32_t sched_priority;
370     uint64_t sched_runtime;
371     uint64_t sched_deadline;
372     uint64_t sched_period;
373     uint32_t sched_util_min;
374     uint32_t sched_util_max;
375 };
376 #define __NR_sys_sched_getattr __NR_sched_getattr
377 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
378           unsigned int, size, unsigned int, flags);
379 #define __NR_sys_sched_setattr __NR_sched_setattr
380 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
381           unsigned int, flags);
382 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
383 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
384 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
385 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
386           const struct sched_param *, param);
387 #define __NR_sys_sched_getparam __NR_sched_getparam
388 _syscall2(int, sys_sched_getparam, pid_t, pid,
389           struct sched_param *, param);
390 #define __NR_sys_sched_setparam __NR_sched_setparam
391 _syscall2(int, sys_sched_setparam, pid_t, pid,
392           const struct sched_param *, param);
393 #define __NR_sys_getcpu __NR_getcpu
394 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
395 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
396           void *, arg);
397 _syscall2(int, capget, struct __user_cap_header_struct *, header,
398           struct __user_cap_data_struct *, data);
399 _syscall2(int, capset, struct __user_cap_header_struct *, header,
400           struct __user_cap_data_struct *, data);
401 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
402 _syscall2(int, ioprio_get, int, which, int, who)
403 #endif
404 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
405 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
406 #endif
407 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
408 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
409 #endif
410 
411 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
412 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
413           unsigned long, idx1, unsigned long, idx2)
414 #endif
415 
416 /*
417  * It is assumed that struct statx is architecture independent.
418  */
419 #if defined(TARGET_NR_statx) && defined(__NR_statx)
420 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
421           unsigned int, mask, struct target_statx *, statxbuf)
422 #endif
423 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
424 _syscall2(int, membarrier, int, cmd, int, flags)
425 #endif
426 
427 static const bitmask_transtbl fcntl_flags_tbl[] = {
428   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
429   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
430   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
431   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
432   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
433   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
434   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
435   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
436   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
437   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
438   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
439   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
440   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
441 #if defined(O_DIRECT)
442   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
443 #endif
444 #if defined(O_NOATIME)
445   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
446 #endif
447 #if defined(O_CLOEXEC)
448   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
449 #endif
450 #if defined(O_PATH)
451   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
452 #endif
453 #if defined(O_TMPFILE)
454   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
455 #endif
456   /* Don't terminate the list prematurely on 64-bit host+guest.  */
457 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
458   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
459 #endif
460 };
461 
462 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
463 
464 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
465 #if defined(__NR_utimensat)
466 #define __NR_sys_utimensat __NR_utimensat
467 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
468           const struct timespec *,tsp,int,flags)
469 #else
470 static int sys_utimensat(int dirfd, const char *pathname,
471                          const struct timespec times[2], int flags)
472 {
473     errno = ENOSYS;
474     return -1;
475 }
476 #endif
477 #endif /* TARGET_NR_utimensat */
478 
479 #ifdef TARGET_NR_renameat2
480 #if defined(__NR_renameat2)
481 #define __NR_sys_renameat2 __NR_renameat2
482 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
483           const char *, new, unsigned int, flags)
484 #else
485 static int sys_renameat2(int oldfd, const char *old,
486                          int newfd, const char *new, int flags)
487 {
488     if (flags == 0) {
489         return renameat(oldfd, old, newfd, new);
490     }
491     errno = ENOSYS;
492     return -1;
493 }
494 #endif
495 #endif /* TARGET_NR_renameat2 */
496 
497 #ifdef CONFIG_INOTIFY
498 #include <sys/inotify.h>
499 #else
500 /* Userspace can usually survive runtime without inotify */
501 #undef TARGET_NR_inotify_init
502 #undef TARGET_NR_inotify_init1
503 #undef TARGET_NR_inotify_add_watch
504 #undef TARGET_NR_inotify_rm_watch
505 #endif /* CONFIG_INOTIFY  */
506 
507 #if defined(TARGET_NR_prlimit64)
508 #ifndef __NR_prlimit64
509 # define __NR_prlimit64 -1
510 #endif
511 #define __NR_sys_prlimit64 __NR_prlimit64
512 /* The glibc rlimit structure may not be that used by the underlying syscall */
513 struct host_rlimit64 {
514     uint64_t rlim_cur;
515     uint64_t rlim_max;
516 };
517 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
518           const struct host_rlimit64 *, new_limit,
519           struct host_rlimit64 *, old_limit)
520 #endif
521 
522 
523 #if defined(TARGET_NR_timer_create)
524 /* Maximum of 32 active POSIX timers allowed at any one time. */
525 #define GUEST_TIMER_MAX 32
526 static timer_t g_posix_timers[GUEST_TIMER_MAX];
527 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
528 
529 static inline int next_free_host_timer(void)
530 {
531     int k;
532     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
533         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
534             return k;
535         }
536     }
537     return -1;
538 }
539 
540 static inline void free_host_timer_slot(int id)
541 {
542     qatomic_store_release(g_posix_timer_allocated + id, 0);
543 }
544 #endif
545 
546 static inline int host_to_target_errno(int host_errno)
547 {
548     switch (host_errno) {
549 #define E(X)  case X: return TARGET_##X;
550 #include "errnos.c.inc"
551 #undef E
552     default:
553         return host_errno;
554     }
555 }
556 
557 static inline int target_to_host_errno(int target_errno)
558 {
559     switch (target_errno) {
560 #define E(X)  case TARGET_##X: return X;
561 #include "errnos.c.inc"
562 #undef E
563     default:
564         return target_errno;
565     }
566 }
567 
568 abi_long get_errno(abi_long ret)
569 {
570     if (ret == -1)
571         return -host_to_target_errno(errno);
572     else
573         return ret;
574 }
575 
576 const char *target_strerror(int err)
577 {
578     if (err == QEMU_ERESTARTSYS) {
579         return "To be restarted";
580     }
581     if (err == QEMU_ESIGRETURN) {
582         return "Successful exit from sigreturn";
583     }
584 
585     return strerror(target_to_host_errno(err));
586 }
587 
588 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
589 {
590     int i;
591     uint8_t b;
592     if (usize <= ksize) {
593         return 1;
594     }
595     for (i = ksize; i < usize; i++) {
596         if (get_user_u8(b, addr + i)) {
597             return -TARGET_EFAULT;
598         }
599         if (b != 0) {
600             return 0;
601         }
602     }
603     return 1;
604 }
605 
606 /*
607  * Copies a target struct to a host struct, in a way that guarantees
608  * backwards-compatibility for struct syscall arguments.
609  *
610  * Similar to kernels uaccess.h:copy_struct_from_user()
611  */
612 int copy_struct_from_user(void *dst, size_t ksize, abi_ptr src, size_t usize)
613 {
614     size_t size = MIN(ksize, usize);
615     size_t rest = MAX(ksize, usize) - size;
616 
617     /* Deal with trailing bytes. */
618     if (usize < ksize) {
619         memset(dst + size, 0, rest);
620     } else if (usize > ksize) {
621         int ret = check_zeroed_user(src, ksize, usize);
622         if (ret <= 0) {
623             return ret ?: -TARGET_E2BIG;
624         }
625     }
626     /* Copy the interoperable parts of the struct. */
627     if (copy_from_user(dst, src, size)) {
628         return -TARGET_EFAULT;
629     }
630     return 0;
631 }
632 
633 #define safe_syscall0(type, name) \
634 static type safe_##name(void) \
635 { \
636     return safe_syscall(__NR_##name); \
637 }
638 
639 #define safe_syscall1(type, name, type1, arg1) \
640 static type safe_##name(type1 arg1) \
641 { \
642     return safe_syscall(__NR_##name, arg1); \
643 }
644 
645 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
646 static type safe_##name(type1 arg1, type2 arg2) \
647 { \
648     return safe_syscall(__NR_##name, arg1, arg2); \
649 }
650 
651 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
652 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
653 { \
654     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
655 }
656 
657 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
658     type4, arg4) \
659 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
660 { \
661     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
662 }
663 
664 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
665     type4, arg4, type5, arg5) \
666 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
667     type5 arg5) \
668 { \
669     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
670 }
671 
672 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
673     type4, arg4, type5, arg5, type6, arg6) \
674 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
675     type5 arg5, type6 arg6) \
676 { \
677     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
678 }
679 
680 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
681 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
682 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
683               int, flags, mode_t, mode)
684 
685 safe_syscall4(int, openat2, int, dirfd, const char *, pathname, \
686               const struct open_how_ver0 *, how, size_t, size)
687 
688 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
689 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
690               struct rusage *, rusage)
691 #endif
692 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
693               int, options, struct rusage *, rusage)
694 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
695 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
696               char **, argv, char **, envp, int, flags)
697 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
698     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
699 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
700               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
701 #endif
702 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
703 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
704               struct timespec *, tsp, const sigset_t *, sigmask,
705               size_t, sigsetsize)
706 #endif
707 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
708               int, maxevents, int, timeout, const sigset_t *, sigmask,
709               size_t, sigsetsize)
710 #if defined(__NR_futex)
711 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
712               const struct timespec *,timeout,int *,uaddr2,int,val3)
713 #endif
714 #if defined(__NR_futex_time64)
715 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
716               const struct timespec *,timeout,int *,uaddr2,int,val3)
717 #endif
718 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
719 safe_syscall2(int, kill, pid_t, pid, int, sig)
720 safe_syscall2(int, tkill, int, tid, int, sig)
721 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
722 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
723 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
724 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
725               unsigned long, pos_l, unsigned long, pos_h)
726 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
727               unsigned long, pos_l, unsigned long, pos_h)
728 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
729               socklen_t, addrlen)
730 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
731               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
732 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
733               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
734 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
735 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
736 safe_syscall2(int, flock, int, fd, int, operation)
737 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
738 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
739               const struct timespec *, uts, size_t, sigsetsize)
740 #endif
741 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
742               int, flags)
743 #if defined(TARGET_NR_nanosleep)
744 safe_syscall2(int, nanosleep, const struct timespec *, req,
745               struct timespec *, rem)
746 #endif
747 #if defined(TARGET_NR_clock_nanosleep) || \
748     defined(TARGET_NR_clock_nanosleep_time64)
749 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
750               const struct timespec *, req, struct timespec *, rem)
751 #endif
752 #ifdef __NR_ipc
753 #ifdef __s390x__
754 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
755               void *, ptr)
756 #else
757 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
758               void *, ptr, long, fifth)
759 #endif
760 #endif
761 #ifdef __NR_msgsnd
762 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
763               int, flags)
764 #endif
765 #ifdef __NR_msgrcv
766 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
767               long, msgtype, int, flags)
768 #endif
769 #ifdef __NR_semtimedop
770 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
771               unsigned, nsops, const struct timespec *, timeout)
772 #endif
773 #if defined(TARGET_NR_mq_timedsend) || \
774     defined(TARGET_NR_mq_timedsend_time64)
775 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
776               size_t, len, unsigned, prio, const struct timespec *, timeout)
777 #endif
778 #if defined(TARGET_NR_mq_timedreceive) || \
779     defined(TARGET_NR_mq_timedreceive_time64)
780 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
781               size_t, len, unsigned *, prio, const struct timespec *, timeout)
782 #endif
783 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
784 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
785               int, outfd, loff_t *, poutoff, size_t, length,
786               unsigned int, flags)
787 #endif
788 
789 /* We do ioctl like this rather than via safe_syscall3 to preserve the
790  * "third argument might be integer or pointer or not present" behaviour of
791  * the libc function.
792  */
793 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
794 /* Similarly for fcntl. Since we always build with LFS enabled,
795  * we should be using the 64-bit structures automatically.
796  */
797 #ifdef __NR_fcntl64
798 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
799 #else
800 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
801 #endif
802 
803 static inline int host_to_target_sock_type(int host_type)
804 {
805     int target_type;
806 
807     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
808     case SOCK_DGRAM:
809         target_type = TARGET_SOCK_DGRAM;
810         break;
811     case SOCK_STREAM:
812         target_type = TARGET_SOCK_STREAM;
813         break;
814     default:
815         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
816         break;
817     }
818 
819 #if defined(SOCK_CLOEXEC)
820     if (host_type & SOCK_CLOEXEC) {
821         target_type |= TARGET_SOCK_CLOEXEC;
822     }
823 #endif
824 
825 #if defined(SOCK_NONBLOCK)
826     if (host_type & SOCK_NONBLOCK) {
827         target_type |= TARGET_SOCK_NONBLOCK;
828     }
829 #endif
830 
831     return target_type;
832 }
833 
834 static abi_ulong target_brk, initial_target_brk;
835 
836 void target_set_brk(abi_ulong new_brk)
837 {
838     target_brk = TARGET_PAGE_ALIGN(new_brk);
839     initial_target_brk = target_brk;
840 }
841 
842 /* do_brk() must return target values and target errnos. */
843 abi_long do_brk(abi_ulong brk_val)
844 {
845     abi_long mapped_addr;
846     abi_ulong new_brk;
847     abi_ulong old_brk;
848 
849     /* brk pointers are always untagged */
850 
851     /* do not allow to shrink below initial brk value */
852     if (brk_val < initial_target_brk) {
853         return target_brk;
854     }
855 
856     new_brk = TARGET_PAGE_ALIGN(brk_val);
857     old_brk = TARGET_PAGE_ALIGN(target_brk);
858 
859     /* new and old target_brk might be on the same page */
860     if (new_brk == old_brk) {
861         target_brk = brk_val;
862         return target_brk;
863     }
864 
865     /* Release heap if necessary */
866     if (new_brk < old_brk) {
867         target_munmap(new_brk, old_brk - new_brk);
868 
869         target_brk = brk_val;
870         return target_brk;
871     }
872 
873     mapped_addr = target_mmap(old_brk, new_brk - old_brk,
874                               PROT_READ | PROT_WRITE,
875                               MAP_FIXED_NOREPLACE | MAP_ANON | MAP_PRIVATE,
876                               -1, 0);
877 
878     if (mapped_addr == old_brk) {
879         target_brk = brk_val;
880         return target_brk;
881     }
882 
883 #if defined(TARGET_ALPHA)
884     /* We (partially) emulate OSF/1 on Alpha, which requires we
885        return a proper errno, not an unchanged brk value.  */
886     return -TARGET_ENOMEM;
887 #endif
888     /* For everything else, return the previous break. */
889     return target_brk;
890 }
891 
892 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
893     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
894 static inline abi_long copy_from_user_fdset(fd_set *fds,
895                                             abi_ulong target_fds_addr,
896                                             int n)
897 {
898     int i, nw, j, k;
899     abi_ulong b, *target_fds;
900 
901     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
902     if (!(target_fds = lock_user(VERIFY_READ,
903                                  target_fds_addr,
904                                  sizeof(abi_ulong) * nw,
905                                  1)))
906         return -TARGET_EFAULT;
907 
908     FD_ZERO(fds);
909     k = 0;
910     for (i = 0; i < nw; i++) {
911         /* grab the abi_ulong */
912         __get_user(b, &target_fds[i]);
913         for (j = 0; j < TARGET_ABI_BITS; j++) {
914             /* check the bit inside the abi_ulong */
915             if ((b >> j) & 1)
916                 FD_SET(k, fds);
917             k++;
918         }
919     }
920 
921     unlock_user(target_fds, target_fds_addr, 0);
922 
923     return 0;
924 }
925 
926 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
927                                                  abi_ulong target_fds_addr,
928                                                  int n)
929 {
930     if (target_fds_addr) {
931         if (copy_from_user_fdset(fds, target_fds_addr, n))
932             return -TARGET_EFAULT;
933         *fds_ptr = fds;
934     } else {
935         *fds_ptr = NULL;
936     }
937     return 0;
938 }
939 
940 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
941                                           const fd_set *fds,
942                                           int n)
943 {
944     int i, nw, j, k;
945     abi_long v;
946     abi_ulong *target_fds;
947 
948     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
949     if (!(target_fds = lock_user(VERIFY_WRITE,
950                                  target_fds_addr,
951                                  sizeof(abi_ulong) * nw,
952                                  0)))
953         return -TARGET_EFAULT;
954 
955     k = 0;
956     for (i = 0; i < nw; i++) {
957         v = 0;
958         for (j = 0; j < TARGET_ABI_BITS; j++) {
959             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
960             k++;
961         }
962         __put_user(v, &target_fds[i]);
963     }
964 
965     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
966 
967     return 0;
968 }
969 #endif
970 
971 #if defined(__alpha__)
972 #define HOST_HZ 1024
973 #else
974 #define HOST_HZ 100
975 #endif
976 
977 static inline abi_long host_to_target_clock_t(long ticks)
978 {
979 #if HOST_HZ == TARGET_HZ
980     return ticks;
981 #else
982     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
983 #endif
984 }
985 
986 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
987                                              const struct rusage *rusage)
988 {
989     struct target_rusage *target_rusage;
990 
991     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
992         return -TARGET_EFAULT;
993     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
994     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
995     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
996     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
997     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
998     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
999     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1000     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1001     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1002     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1003     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1004     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1005     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1006     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1007     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1008     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1009     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1010     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1011     unlock_user_struct(target_rusage, target_addr, 1);
1012 
1013     return 0;
1014 }
1015 
1016 #ifdef TARGET_NR_setrlimit
1017 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1018 {
1019     abi_ulong target_rlim_swap;
1020     rlim_t result;
1021 
1022     target_rlim_swap = tswapal(target_rlim);
1023     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1024         return RLIM_INFINITY;
1025 
1026     result = target_rlim_swap;
1027     if (target_rlim_swap != (rlim_t)result)
1028         return RLIM_INFINITY;
1029 
1030     return result;
1031 }
1032 #endif
1033 
1034 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1035 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1036 {
1037     abi_ulong target_rlim_swap;
1038     abi_ulong result;
1039 
1040     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1041         target_rlim_swap = TARGET_RLIM_INFINITY;
1042     else
1043         target_rlim_swap = rlim;
1044     result = tswapal(target_rlim_swap);
1045 
1046     return result;
1047 }
1048 #endif
1049 
1050 static inline int target_to_host_resource(int code)
1051 {
1052     switch (code) {
1053     case TARGET_RLIMIT_AS:
1054         return RLIMIT_AS;
1055     case TARGET_RLIMIT_CORE:
1056         return RLIMIT_CORE;
1057     case TARGET_RLIMIT_CPU:
1058         return RLIMIT_CPU;
1059     case TARGET_RLIMIT_DATA:
1060         return RLIMIT_DATA;
1061     case TARGET_RLIMIT_FSIZE:
1062         return RLIMIT_FSIZE;
1063     case TARGET_RLIMIT_LOCKS:
1064         return RLIMIT_LOCKS;
1065     case TARGET_RLIMIT_MEMLOCK:
1066         return RLIMIT_MEMLOCK;
1067     case TARGET_RLIMIT_MSGQUEUE:
1068         return RLIMIT_MSGQUEUE;
1069     case TARGET_RLIMIT_NICE:
1070         return RLIMIT_NICE;
1071     case TARGET_RLIMIT_NOFILE:
1072         return RLIMIT_NOFILE;
1073     case TARGET_RLIMIT_NPROC:
1074         return RLIMIT_NPROC;
1075     case TARGET_RLIMIT_RSS:
1076         return RLIMIT_RSS;
1077     case TARGET_RLIMIT_RTPRIO:
1078         return RLIMIT_RTPRIO;
1079 #ifdef RLIMIT_RTTIME
1080     case TARGET_RLIMIT_RTTIME:
1081         return RLIMIT_RTTIME;
1082 #endif
1083     case TARGET_RLIMIT_SIGPENDING:
1084         return RLIMIT_SIGPENDING;
1085     case TARGET_RLIMIT_STACK:
1086         return RLIMIT_STACK;
1087     default:
1088         return code;
1089     }
1090 }
1091 
1092 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1093                                               abi_ulong target_tv_addr)
1094 {
1095     struct target_timeval *target_tv;
1096 
1097     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1098         return -TARGET_EFAULT;
1099     }
1100 
1101     __get_user(tv->tv_sec, &target_tv->tv_sec);
1102     __get_user(tv->tv_usec, &target_tv->tv_usec);
1103 
1104     unlock_user_struct(target_tv, target_tv_addr, 0);
1105 
1106     return 0;
1107 }
1108 
1109 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1110                                             const struct timeval *tv)
1111 {
1112     struct target_timeval *target_tv;
1113 
1114     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1115         return -TARGET_EFAULT;
1116     }
1117 
1118     __put_user(tv->tv_sec, &target_tv->tv_sec);
1119     __put_user(tv->tv_usec, &target_tv->tv_usec);
1120 
1121     unlock_user_struct(target_tv, target_tv_addr, 1);
1122 
1123     return 0;
1124 }
1125 
1126 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1127 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1128                                                 abi_ulong target_tv_addr)
1129 {
1130     struct target__kernel_sock_timeval *target_tv;
1131 
1132     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1133         return -TARGET_EFAULT;
1134     }
1135 
1136     __get_user(tv->tv_sec, &target_tv->tv_sec);
1137     __get_user(tv->tv_usec, &target_tv->tv_usec);
1138 
1139     unlock_user_struct(target_tv, target_tv_addr, 0);
1140 
1141     return 0;
1142 }
1143 #endif
1144 
1145 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1146                                               const struct timeval *tv)
1147 {
1148     struct target__kernel_sock_timeval *target_tv;
1149 
1150     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1151         return -TARGET_EFAULT;
1152     }
1153 
1154     __put_user(tv->tv_sec, &target_tv->tv_sec);
1155     __put_user(tv->tv_usec, &target_tv->tv_usec);
1156 
1157     unlock_user_struct(target_tv, target_tv_addr, 1);
1158 
1159     return 0;
1160 }
1161 
1162 #if defined(TARGET_NR_futex) || \
1163     defined(TARGET_NR_rt_sigtimedwait) || \
1164     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1165     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1166     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1167     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1168     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1169     defined(TARGET_NR_timer_settime) || \
1170     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1171 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1172                                                abi_ulong target_addr)
1173 {
1174     struct target_timespec *target_ts;
1175 
1176     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1177         return -TARGET_EFAULT;
1178     }
1179     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1180     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1181     unlock_user_struct(target_ts, target_addr, 0);
1182     return 0;
1183 }
1184 #endif
1185 
1186 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1187     defined(TARGET_NR_timer_settime64) || \
1188     defined(TARGET_NR_mq_timedsend_time64) || \
1189     defined(TARGET_NR_mq_timedreceive_time64) || \
1190     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1191     defined(TARGET_NR_clock_nanosleep_time64) || \
1192     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1193     defined(TARGET_NR_utimensat) || \
1194     defined(TARGET_NR_utimensat_time64) || \
1195     defined(TARGET_NR_semtimedop_time64) || \
1196     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1197 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1198                                                  abi_ulong target_addr)
1199 {
1200     struct target__kernel_timespec *target_ts;
1201 
1202     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1203         return -TARGET_EFAULT;
1204     }
1205     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1206     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1207     /* in 32bit mode, this drops the padding */
1208     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1209     unlock_user_struct(target_ts, target_addr, 0);
1210     return 0;
1211 }
1212 #endif
1213 
1214 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1215                                                struct timespec *host_ts)
1216 {
1217     struct target_timespec *target_ts;
1218 
1219     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1220         return -TARGET_EFAULT;
1221     }
1222     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1223     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1224     unlock_user_struct(target_ts, target_addr, 1);
1225     return 0;
1226 }
1227 
1228 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1229                                                  struct timespec *host_ts)
1230 {
1231     struct target__kernel_timespec *target_ts;
1232 
1233     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1234         return -TARGET_EFAULT;
1235     }
1236     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1237     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1238     unlock_user_struct(target_ts, target_addr, 1);
1239     return 0;
1240 }
1241 
1242 #if defined(TARGET_NR_gettimeofday)
1243 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1244                                              struct timezone *tz)
1245 {
1246     struct target_timezone *target_tz;
1247 
1248     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1249         return -TARGET_EFAULT;
1250     }
1251 
1252     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1253     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1254 
1255     unlock_user_struct(target_tz, target_tz_addr, 1);
1256 
1257     return 0;
1258 }
1259 #endif
1260 
1261 #if defined(TARGET_NR_settimeofday)
1262 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1263                                                abi_ulong target_tz_addr)
1264 {
1265     struct target_timezone *target_tz;
1266 
1267     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1268         return -TARGET_EFAULT;
1269     }
1270 
1271     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1272     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1273 
1274     unlock_user_struct(target_tz, target_tz_addr, 0);
1275 
1276     return 0;
1277 }
1278 #endif
1279 
1280 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1281 #include <mqueue.h>
1282 
1283 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1284                                               abi_ulong target_mq_attr_addr)
1285 {
1286     struct target_mq_attr *target_mq_attr;
1287 
1288     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1289                           target_mq_attr_addr, 1))
1290         return -TARGET_EFAULT;
1291 
1292     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1293     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1294     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1295     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1296 
1297     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1298 
1299     return 0;
1300 }
1301 
1302 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1303                                             const struct mq_attr *attr)
1304 {
1305     struct target_mq_attr *target_mq_attr;
1306 
1307     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1308                           target_mq_attr_addr, 0))
1309         return -TARGET_EFAULT;
1310 
1311     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1312     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1313     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1314     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1315 
1316     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1317 
1318     return 0;
1319 }
1320 #endif
1321 
1322 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1323 /* do_select() must return target values and target errnos. */
1324 static abi_long do_select(int n,
1325                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1326                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1327 {
1328     fd_set rfds, wfds, efds;
1329     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1330     struct timeval tv;
1331     struct timespec ts, *ts_ptr;
1332     abi_long ret;
1333 
1334     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1335     if (ret) {
1336         return ret;
1337     }
1338     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1339     if (ret) {
1340         return ret;
1341     }
1342     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1343     if (ret) {
1344         return ret;
1345     }
1346 
1347     if (target_tv_addr) {
1348         if (copy_from_user_timeval(&tv, target_tv_addr))
1349             return -TARGET_EFAULT;
1350         ts.tv_sec = tv.tv_sec;
1351         ts.tv_nsec = tv.tv_usec * 1000;
1352         ts_ptr = &ts;
1353     } else {
1354         ts_ptr = NULL;
1355     }
1356 
1357     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1358                                   ts_ptr, NULL));
1359 
1360     if (!is_error(ret)) {
1361         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1362             return -TARGET_EFAULT;
1363         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1364             return -TARGET_EFAULT;
1365         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1366             return -TARGET_EFAULT;
1367 
1368         if (target_tv_addr) {
1369             tv.tv_sec = ts.tv_sec;
1370             tv.tv_usec = ts.tv_nsec / 1000;
1371             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1372                 return -TARGET_EFAULT;
1373             }
1374         }
1375     }
1376 
1377     return ret;
1378 }
1379 
1380 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1381 static abi_long do_old_select(abi_ulong arg1)
1382 {
1383     struct target_sel_arg_struct *sel;
1384     abi_ulong inp, outp, exp, tvp;
1385     long nsel;
1386 
1387     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1388         return -TARGET_EFAULT;
1389     }
1390 
1391     nsel = tswapal(sel->n);
1392     inp = tswapal(sel->inp);
1393     outp = tswapal(sel->outp);
1394     exp = tswapal(sel->exp);
1395     tvp = tswapal(sel->tvp);
1396 
1397     unlock_user_struct(sel, arg1, 0);
1398 
1399     return do_select(nsel, inp, outp, exp, tvp);
1400 }
1401 #endif
1402 #endif
1403 
1404 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1405 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1406                             abi_long arg4, abi_long arg5, abi_long arg6,
1407                             bool time64)
1408 {
1409     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1410     fd_set rfds, wfds, efds;
1411     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1412     struct timespec ts, *ts_ptr;
1413     abi_long ret;
1414 
1415     /*
1416      * The 6th arg is actually two args smashed together,
1417      * so we cannot use the C library.
1418      */
1419     struct {
1420         sigset_t *set;
1421         size_t size;
1422     } sig, *sig_ptr;
1423 
1424     abi_ulong arg_sigset, arg_sigsize, *arg7;
1425 
1426     n = arg1;
1427     rfd_addr = arg2;
1428     wfd_addr = arg3;
1429     efd_addr = arg4;
1430     ts_addr = arg5;
1431 
1432     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1433     if (ret) {
1434         return ret;
1435     }
1436     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1437     if (ret) {
1438         return ret;
1439     }
1440     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1441     if (ret) {
1442         return ret;
1443     }
1444 
1445     /*
1446      * This takes a timespec, and not a timeval, so we cannot
1447      * use the do_select() helper ...
1448      */
1449     if (ts_addr) {
1450         if (time64) {
1451             if (target_to_host_timespec64(&ts, ts_addr)) {
1452                 return -TARGET_EFAULT;
1453             }
1454         } else {
1455             if (target_to_host_timespec(&ts, ts_addr)) {
1456                 return -TARGET_EFAULT;
1457             }
1458         }
1459             ts_ptr = &ts;
1460     } else {
1461         ts_ptr = NULL;
1462     }
1463 
1464     /* Extract the two packed args for the sigset */
1465     sig_ptr = NULL;
1466     if (arg6) {
1467         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1468         if (!arg7) {
1469             return -TARGET_EFAULT;
1470         }
1471         arg_sigset = tswapal(arg7[0]);
1472         arg_sigsize = tswapal(arg7[1]);
1473         unlock_user(arg7, arg6, 0);
1474 
1475         if (arg_sigset) {
1476             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1477             if (ret != 0) {
1478                 return ret;
1479             }
1480             sig_ptr = &sig;
1481             sig.size = SIGSET_T_SIZE;
1482         }
1483     }
1484 
1485     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1486                                   ts_ptr, sig_ptr));
1487 
1488     if (sig_ptr) {
1489         finish_sigsuspend_mask(ret);
1490     }
1491 
1492     if (!is_error(ret)) {
1493         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1494             return -TARGET_EFAULT;
1495         }
1496         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1497             return -TARGET_EFAULT;
1498         }
1499         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1500             return -TARGET_EFAULT;
1501         }
1502         if (time64) {
1503             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1504                 return -TARGET_EFAULT;
1505             }
1506         } else {
1507             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1508                 return -TARGET_EFAULT;
1509             }
1510         }
1511     }
1512     return ret;
1513 }
1514 #endif
1515 
1516 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1517     defined(TARGET_NR_ppoll_time64)
1518 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1519                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1520 {
1521     struct target_pollfd *target_pfd;
1522     unsigned int nfds = arg2;
1523     struct pollfd *pfd;
1524     unsigned int i;
1525     abi_long ret;
1526 
1527     pfd = NULL;
1528     target_pfd = NULL;
1529     if (nfds) {
1530         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1531             return -TARGET_EINVAL;
1532         }
1533         target_pfd = lock_user(VERIFY_WRITE, arg1,
1534                                sizeof(struct target_pollfd) * nfds, 1);
1535         if (!target_pfd) {
1536             return -TARGET_EFAULT;
1537         }
1538 
1539         pfd = alloca(sizeof(struct pollfd) * nfds);
1540         for (i = 0; i < nfds; i++) {
1541             pfd[i].fd = tswap32(target_pfd[i].fd);
1542             pfd[i].events = tswap16(target_pfd[i].events);
1543         }
1544     }
1545     if (ppoll) {
1546         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1547         sigset_t *set = NULL;
1548 
1549         if (arg3) {
1550             if (time64) {
1551                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1552                     unlock_user(target_pfd, arg1, 0);
1553                     return -TARGET_EFAULT;
1554                 }
1555             } else {
1556                 if (target_to_host_timespec(timeout_ts, arg3)) {
1557                     unlock_user(target_pfd, arg1, 0);
1558                     return -TARGET_EFAULT;
1559                 }
1560             }
1561         } else {
1562             timeout_ts = NULL;
1563         }
1564 
1565         if (arg4) {
1566             ret = process_sigsuspend_mask(&set, arg4, arg5);
1567             if (ret != 0) {
1568                 unlock_user(target_pfd, arg1, 0);
1569                 return ret;
1570             }
1571         }
1572 
1573         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1574                                    set, SIGSET_T_SIZE));
1575 
1576         if (set) {
1577             finish_sigsuspend_mask(ret);
1578         }
1579         if (!is_error(ret) && arg3) {
1580             if (time64) {
1581                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1582                     return -TARGET_EFAULT;
1583                 }
1584             } else {
1585                 if (host_to_target_timespec(arg3, timeout_ts)) {
1586                     return -TARGET_EFAULT;
1587                 }
1588             }
1589         }
1590     } else {
1591           struct timespec ts, *pts;
1592 
1593           if (arg3 >= 0) {
1594               /* Convert ms to secs, ns */
1595               ts.tv_sec = arg3 / 1000;
1596               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1597               pts = &ts;
1598           } else {
1599               /* -ve poll() timeout means "infinite" */
1600               pts = NULL;
1601           }
1602           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1603     }
1604 
1605     if (!is_error(ret)) {
1606         for (i = 0; i < nfds; i++) {
1607             target_pfd[i].revents = tswap16(pfd[i].revents);
1608         }
1609     }
1610     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1611     return ret;
1612 }
1613 #endif
1614 
1615 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1616                         int flags, int is_pipe2)
1617 {
1618     int host_pipe[2];
1619     abi_long ret;
1620     ret = pipe2(host_pipe, flags);
1621 
1622     if (is_error(ret))
1623         return get_errno(ret);
1624 
1625     /* Several targets have special calling conventions for the original
1626        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1627     if (!is_pipe2) {
1628 #if defined(TARGET_ALPHA)
1629         cpu_env->ir[IR_A4] = host_pipe[1];
1630         return host_pipe[0];
1631 #elif defined(TARGET_MIPS)
1632         cpu_env->active_tc.gpr[3] = host_pipe[1];
1633         return host_pipe[0];
1634 #elif defined(TARGET_SH4)
1635         cpu_env->gregs[1] = host_pipe[1];
1636         return host_pipe[0];
1637 #elif defined(TARGET_SPARC)
1638         cpu_env->regwptr[1] = host_pipe[1];
1639         return host_pipe[0];
1640 #endif
1641     }
1642 
1643     if (put_user_s32(host_pipe[0], pipedes)
1644         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1645         return -TARGET_EFAULT;
1646     return get_errno(ret);
1647 }
1648 
1649 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1650                                                abi_ulong target_addr,
1651                                                socklen_t len)
1652 {
1653     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1654     sa_family_t sa_family;
1655     struct target_sockaddr *target_saddr;
1656 
1657     if (fd_trans_target_to_host_addr(fd)) {
1658         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1659     }
1660 
1661     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1662     if (!target_saddr)
1663         return -TARGET_EFAULT;
1664 
1665     sa_family = tswap16(target_saddr->sa_family);
1666 
1667     /* Oops. The caller might send a incomplete sun_path; sun_path
1668      * must be terminated by \0 (see the manual page), but
1669      * unfortunately it is quite common to specify sockaddr_un
1670      * length as "strlen(x->sun_path)" while it should be
1671      * "strlen(...) + 1". We'll fix that here if needed.
1672      * Linux kernel has a similar feature.
1673      */
1674 
1675     if (sa_family == AF_UNIX) {
1676         if (len < unix_maxlen && len > 0) {
1677             char *cp = (char*)target_saddr;
1678 
1679             if ( cp[len-1] && !cp[len] )
1680                 len++;
1681         }
1682         if (len > unix_maxlen)
1683             len = unix_maxlen;
1684     }
1685 
1686     memcpy(addr, target_saddr, len);
1687     addr->sa_family = sa_family;
1688     if (sa_family == AF_NETLINK) {
1689         struct sockaddr_nl *nladdr;
1690 
1691         nladdr = (struct sockaddr_nl *)addr;
1692         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1693         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1694     } else if (sa_family == AF_PACKET) {
1695 	struct target_sockaddr_ll *lladdr;
1696 
1697 	lladdr = (struct target_sockaddr_ll *)addr;
1698 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1699 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1700     } else if (sa_family == AF_INET6) {
1701         struct sockaddr_in6 *in6addr;
1702 
1703         in6addr = (struct sockaddr_in6 *)addr;
1704         in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
1705     }
1706     unlock_user(target_saddr, target_addr, 0);
1707 
1708     return 0;
1709 }
1710 
1711 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1712                                                struct sockaddr *addr,
1713                                                socklen_t len)
1714 {
1715     struct target_sockaddr *target_saddr;
1716 
1717     if (len == 0) {
1718         return 0;
1719     }
1720     assert(addr);
1721 
1722     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1723     if (!target_saddr)
1724         return -TARGET_EFAULT;
1725     memcpy(target_saddr, addr, len);
1726     if (len >= offsetof(struct target_sockaddr, sa_family) +
1727         sizeof(target_saddr->sa_family)) {
1728         target_saddr->sa_family = tswap16(addr->sa_family);
1729     }
1730     if (addr->sa_family == AF_NETLINK &&
1731         len >= sizeof(struct target_sockaddr_nl)) {
1732         struct target_sockaddr_nl *target_nl =
1733                (struct target_sockaddr_nl *)target_saddr;
1734         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1735         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1736     } else if (addr->sa_family == AF_PACKET) {
1737         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1738         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1739         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1740     } else if (addr->sa_family == AF_INET6 &&
1741                len >= sizeof(struct target_sockaddr_in6)) {
1742         struct target_sockaddr_in6 *target_in6 =
1743                (struct target_sockaddr_in6 *)target_saddr;
1744         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1745     }
1746     unlock_user(target_saddr, target_addr, len);
1747 
1748     return 0;
1749 }
1750 
1751 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1752                                            struct target_msghdr *target_msgh)
1753 {
1754     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1755     abi_long msg_controllen;
1756     abi_ulong target_cmsg_addr;
1757     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1758     socklen_t space = 0;
1759 
1760     msg_controllen = tswapal(target_msgh->msg_controllen);
1761     if (msg_controllen < sizeof (struct target_cmsghdr))
1762         goto the_end;
1763     target_cmsg_addr = tswapal(target_msgh->msg_control);
1764     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1765     target_cmsg_start = target_cmsg;
1766     if (!target_cmsg)
1767         return -TARGET_EFAULT;
1768 
1769     while (cmsg && target_cmsg) {
1770         void *data = CMSG_DATA(cmsg);
1771         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1772 
1773         int len = tswapal(target_cmsg->cmsg_len)
1774             - sizeof(struct target_cmsghdr);
1775 
1776         space += CMSG_SPACE(len);
1777         if (space > msgh->msg_controllen) {
1778             space -= CMSG_SPACE(len);
1779             /* This is a QEMU bug, since we allocated the payload
1780              * area ourselves (unlike overflow in host-to-target
1781              * conversion, which is just the guest giving us a buffer
1782              * that's too small). It can't happen for the payload types
1783              * we currently support; if it becomes an issue in future
1784              * we would need to improve our allocation strategy to
1785              * something more intelligent than "twice the size of the
1786              * target buffer we're reading from".
1787              */
1788             qemu_log_mask(LOG_UNIMP,
1789                           ("Unsupported ancillary data %d/%d: "
1790                            "unhandled msg size\n"),
1791                           tswap32(target_cmsg->cmsg_level),
1792                           tswap32(target_cmsg->cmsg_type));
1793             break;
1794         }
1795 
1796         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1797             cmsg->cmsg_level = SOL_SOCKET;
1798         } else {
1799             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1800         }
1801         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1802         cmsg->cmsg_len = CMSG_LEN(len);
1803 
1804         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1805             int *fd = (int *)data;
1806             int *target_fd = (int *)target_data;
1807             int i, numfds = len / sizeof(int);
1808 
1809             for (i = 0; i < numfds; i++) {
1810                 __get_user(fd[i], target_fd + i);
1811             }
1812         } else if (cmsg->cmsg_level == SOL_SOCKET
1813                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1814             struct ucred *cred = (struct ucred *)data;
1815             struct target_ucred *target_cred =
1816                 (struct target_ucred *)target_data;
1817 
1818             __get_user(cred->pid, &target_cred->pid);
1819             __get_user(cred->uid, &target_cred->uid);
1820             __get_user(cred->gid, &target_cred->gid);
1821         } else if (cmsg->cmsg_level == SOL_ALG) {
1822             uint32_t *dst = (uint32_t *)data;
1823 
1824             memcpy(dst, target_data, len);
1825             /* fix endianness of first 32-bit word */
1826             if (len >= sizeof(uint32_t)) {
1827                 *dst = tswap32(*dst);
1828             }
1829         } else {
1830             qemu_log_mask(LOG_UNIMP, "Unsupported target ancillary data: %d/%d\n",
1831                           cmsg->cmsg_level, cmsg->cmsg_type);
1832             memcpy(data, target_data, len);
1833         }
1834 
1835         cmsg = CMSG_NXTHDR(msgh, cmsg);
1836         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1837                                          target_cmsg_start);
1838     }
1839     unlock_user(target_cmsg, target_cmsg_addr, 0);
1840  the_end:
1841     msgh->msg_controllen = space;
1842     return 0;
1843 }
1844 
1845 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1846                                            struct msghdr *msgh)
1847 {
1848     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1849     abi_long msg_controllen;
1850     abi_ulong target_cmsg_addr;
1851     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1852     socklen_t space = 0;
1853 
1854     msg_controllen = tswapal(target_msgh->msg_controllen);
1855     if (msg_controllen < sizeof (struct target_cmsghdr))
1856         goto the_end;
1857     target_cmsg_addr = tswapal(target_msgh->msg_control);
1858     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1859     target_cmsg_start = target_cmsg;
1860     if (!target_cmsg)
1861         return -TARGET_EFAULT;
1862 
1863     while (cmsg && target_cmsg) {
1864         void *data = CMSG_DATA(cmsg);
1865         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1866 
1867         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1868         int tgt_len, tgt_space;
1869 
1870         /* We never copy a half-header but may copy half-data;
1871          * this is Linux's behaviour in put_cmsg(). Note that
1872          * truncation here is a guest problem (which we report
1873          * to the guest via the CTRUNC bit), unlike truncation
1874          * in target_to_host_cmsg, which is a QEMU bug.
1875          */
1876         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1877             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1878             break;
1879         }
1880 
1881         if (cmsg->cmsg_level == SOL_SOCKET) {
1882             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1883         } else {
1884             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1885         }
1886         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1887 
1888         /* Payload types which need a different size of payload on
1889          * the target must adjust tgt_len here.
1890          */
1891         tgt_len = len;
1892         switch (cmsg->cmsg_level) {
1893         case SOL_SOCKET:
1894             switch (cmsg->cmsg_type) {
1895             case SO_TIMESTAMP:
1896                 tgt_len = sizeof(struct target_timeval);
1897                 break;
1898             default:
1899                 break;
1900             }
1901             break;
1902         default:
1903             break;
1904         }
1905 
1906         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1907             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1908             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1909         }
1910 
1911         /* We must now copy-and-convert len bytes of payload
1912          * into tgt_len bytes of destination space. Bear in mind
1913          * that in both source and destination we may be dealing
1914          * with a truncated value!
1915          */
1916         switch (cmsg->cmsg_level) {
1917         case SOL_SOCKET:
1918             switch (cmsg->cmsg_type) {
1919             case SCM_RIGHTS:
1920             {
1921                 int *fd = (int *)data;
1922                 int *target_fd = (int *)target_data;
1923                 int i, numfds = tgt_len / sizeof(int);
1924 
1925                 for (i = 0; i < numfds; i++) {
1926                     __put_user(fd[i], target_fd + i);
1927                 }
1928                 break;
1929             }
1930             case SO_TIMESTAMP:
1931             {
1932                 struct timeval *tv = (struct timeval *)data;
1933                 struct target_timeval *target_tv =
1934                     (struct target_timeval *)target_data;
1935 
1936                 if (len != sizeof(struct timeval) ||
1937                     tgt_len != sizeof(struct target_timeval)) {
1938                     goto unimplemented;
1939                 }
1940 
1941                 /* copy struct timeval to target */
1942                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1943                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1944                 break;
1945             }
1946             case SCM_CREDENTIALS:
1947             {
1948                 struct ucred *cred = (struct ucred *)data;
1949                 struct target_ucred *target_cred =
1950                     (struct target_ucred *)target_data;
1951 
1952                 __put_user(cred->pid, &target_cred->pid);
1953                 __put_user(cred->uid, &target_cred->uid);
1954                 __put_user(cred->gid, &target_cred->gid);
1955                 break;
1956             }
1957             default:
1958                 goto unimplemented;
1959             }
1960             break;
1961 
1962         case SOL_IP:
1963             switch (cmsg->cmsg_type) {
1964             case IP_TTL:
1965             {
1966                 uint32_t *v = (uint32_t *)data;
1967                 uint32_t *t_int = (uint32_t *)target_data;
1968 
1969                 if (len != sizeof(uint32_t) ||
1970                     tgt_len != sizeof(uint32_t)) {
1971                     goto unimplemented;
1972                 }
1973                 __put_user(*v, t_int);
1974                 break;
1975             }
1976             case IP_RECVERR:
1977             {
1978                 struct errhdr_t {
1979                    struct sock_extended_err ee;
1980                    struct sockaddr_in offender;
1981                 };
1982                 struct errhdr_t *errh = (struct errhdr_t *)data;
1983                 struct errhdr_t *target_errh =
1984                     (struct errhdr_t *)target_data;
1985 
1986                 if (len != sizeof(struct errhdr_t) ||
1987                     tgt_len != sizeof(struct errhdr_t)) {
1988                     goto unimplemented;
1989                 }
1990                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1991                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1992                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1993                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1994                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1995                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1996                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1997                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1998                     (void *) &errh->offender, sizeof(errh->offender));
1999                 break;
2000             }
2001             case IP_PKTINFO:
2002             {
2003                 struct in_pktinfo *pkti = data;
2004                 struct target_in_pktinfo *target_pi = target_data;
2005 
2006                 __put_user(pkti->ipi_ifindex, &target_pi->ipi_ifindex);
2007                 target_pi->ipi_spec_dst.s_addr = pkti->ipi_spec_dst.s_addr;
2008                 target_pi->ipi_addr.s_addr = pkti->ipi_addr.s_addr;
2009                 break;
2010             }
2011             default:
2012                 goto unimplemented;
2013             }
2014             break;
2015 
2016         case SOL_IPV6:
2017             switch (cmsg->cmsg_type) {
2018             case IPV6_HOPLIMIT:
2019             {
2020                 uint32_t *v = (uint32_t *)data;
2021                 uint32_t *t_int = (uint32_t *)target_data;
2022 
2023                 if (len != sizeof(uint32_t) ||
2024                     tgt_len != sizeof(uint32_t)) {
2025                     goto unimplemented;
2026                 }
2027                 __put_user(*v, t_int);
2028                 break;
2029             }
2030             case IPV6_RECVERR:
2031             {
2032                 struct errhdr6_t {
2033                    struct sock_extended_err ee;
2034                    struct sockaddr_in6 offender;
2035                 };
2036                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2037                 struct errhdr6_t *target_errh =
2038                     (struct errhdr6_t *)target_data;
2039 
2040                 if (len != sizeof(struct errhdr6_t) ||
2041                     tgt_len != sizeof(struct errhdr6_t)) {
2042                     goto unimplemented;
2043                 }
2044                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2045                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2046                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2047                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2048                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2049                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2050                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2051                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2052                     (void *) &errh->offender, sizeof(errh->offender));
2053                 break;
2054             }
2055             default:
2056                 goto unimplemented;
2057             }
2058             break;
2059 
2060         default:
2061         unimplemented:
2062             qemu_log_mask(LOG_UNIMP, "Unsupported host ancillary data: %d/%d\n",
2063                           cmsg->cmsg_level, cmsg->cmsg_type);
2064             memcpy(target_data, data, MIN(len, tgt_len));
2065             if (tgt_len > len) {
2066                 memset(target_data + len, 0, tgt_len - len);
2067             }
2068         }
2069 
2070         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2071         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2072         if (msg_controllen < tgt_space) {
2073             tgt_space = msg_controllen;
2074         }
2075         msg_controllen -= tgt_space;
2076         space += tgt_space;
2077         cmsg = CMSG_NXTHDR(msgh, cmsg);
2078         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2079                                          target_cmsg_start);
2080     }
2081     unlock_user(target_cmsg, target_cmsg_addr, space);
2082  the_end:
2083     target_msgh->msg_controllen = tswapal(space);
2084     return 0;
2085 }
2086 
2087 /* do_setsockopt() Must return target values and target errnos. */
2088 static abi_long do_setsockopt(int sockfd, int level, int optname,
2089                               abi_ulong optval_addr, socklen_t optlen)
2090 {
2091     abi_long ret;
2092     int val;
2093 
2094     switch(level) {
2095     case SOL_TCP:
2096     case SOL_UDP:
2097         /* TCP and UDP options all take an 'int' value.  */
2098         if (optlen < sizeof(uint32_t))
2099             return -TARGET_EINVAL;
2100 
2101         if (get_user_u32(val, optval_addr))
2102             return -TARGET_EFAULT;
2103         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2104         break;
2105     case SOL_IP:
2106         switch(optname) {
2107         case IP_TOS:
2108         case IP_TTL:
2109         case IP_HDRINCL:
2110         case IP_ROUTER_ALERT:
2111         case IP_RECVOPTS:
2112         case IP_RETOPTS:
2113         case IP_PKTINFO:
2114         case IP_MTU_DISCOVER:
2115         case IP_RECVERR:
2116         case IP_RECVTTL:
2117         case IP_RECVTOS:
2118 #ifdef IP_FREEBIND
2119         case IP_FREEBIND:
2120 #endif
2121         case IP_MULTICAST_TTL:
2122         case IP_MULTICAST_LOOP:
2123             val = 0;
2124             if (optlen >= sizeof(uint32_t)) {
2125                 if (get_user_u32(val, optval_addr))
2126                     return -TARGET_EFAULT;
2127             } else if (optlen >= 1) {
2128                 if (get_user_u8(val, optval_addr))
2129                     return -TARGET_EFAULT;
2130             }
2131             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2132             break;
2133         case IP_MULTICAST_IF:
2134         case IP_ADD_MEMBERSHIP:
2135         case IP_DROP_MEMBERSHIP:
2136         {
2137             struct ip_mreqn ip_mreq;
2138             struct target_ip_mreqn *target_smreqn;
2139             int min_size;
2140 
2141             QEMU_BUILD_BUG_ON(sizeof(struct ip_mreq) !=
2142                               sizeof(struct target_ip_mreq));
2143 
2144             if (optname == IP_MULTICAST_IF) {
2145                 min_size = sizeof(struct in_addr);
2146             } else {
2147                 min_size = sizeof(struct target_ip_mreq);
2148             }
2149             if (optlen < min_size ||
2150                 optlen > sizeof (struct target_ip_mreqn)) {
2151                 return -TARGET_EINVAL;
2152             }
2153 
2154             target_smreqn = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2155             if (!target_smreqn) {
2156                 return -TARGET_EFAULT;
2157             }
2158             ip_mreq.imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
2159             if (optlen >= sizeof(struct target_ip_mreq)) {
2160                 ip_mreq.imr_address.s_addr = target_smreqn->imr_address.s_addr;
2161                 if (optlen >= sizeof(struct target_ip_mreqn)) {
2162                     __put_user(target_smreqn->imr_ifindex, &ip_mreq.imr_ifindex);
2163                     optlen = sizeof(struct ip_mreqn);
2164                 }
2165             }
2166             unlock_user(target_smreqn, optval_addr, 0);
2167             ret = get_errno(setsockopt(sockfd, level, optname, &ip_mreq, optlen));
2168             break;
2169         }
2170         case IP_BLOCK_SOURCE:
2171         case IP_UNBLOCK_SOURCE:
2172         case IP_ADD_SOURCE_MEMBERSHIP:
2173         case IP_DROP_SOURCE_MEMBERSHIP:
2174         {
2175             struct ip_mreq_source *ip_mreq_source;
2176 
2177             if (optlen != sizeof (struct target_ip_mreq_source))
2178                 return -TARGET_EINVAL;
2179 
2180             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2181             if (!ip_mreq_source) {
2182                 return -TARGET_EFAULT;
2183             }
2184             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2185             unlock_user (ip_mreq_source, optval_addr, 0);
2186             break;
2187         }
2188         default:
2189             goto unimplemented;
2190         }
2191         break;
2192     case SOL_IPV6:
2193         switch (optname) {
2194         case IPV6_MTU_DISCOVER:
2195         case IPV6_MTU:
2196         case IPV6_V6ONLY:
2197         case IPV6_RECVPKTINFO:
2198         case IPV6_UNICAST_HOPS:
2199         case IPV6_MULTICAST_HOPS:
2200         case IPV6_MULTICAST_LOOP:
2201         case IPV6_RECVERR:
2202         case IPV6_RECVHOPLIMIT:
2203         case IPV6_2292HOPLIMIT:
2204         case IPV6_CHECKSUM:
2205         case IPV6_ADDRFORM:
2206         case IPV6_2292PKTINFO:
2207         case IPV6_RECVTCLASS:
2208         case IPV6_RECVRTHDR:
2209         case IPV6_2292RTHDR:
2210         case IPV6_RECVHOPOPTS:
2211         case IPV6_2292HOPOPTS:
2212         case IPV6_RECVDSTOPTS:
2213         case IPV6_2292DSTOPTS:
2214         case IPV6_TCLASS:
2215         case IPV6_ADDR_PREFERENCES:
2216 #ifdef IPV6_RECVPATHMTU
2217         case IPV6_RECVPATHMTU:
2218 #endif
2219 #ifdef IPV6_TRANSPARENT
2220         case IPV6_TRANSPARENT:
2221 #endif
2222 #ifdef IPV6_FREEBIND
2223         case IPV6_FREEBIND:
2224 #endif
2225 #ifdef IPV6_RECVORIGDSTADDR
2226         case IPV6_RECVORIGDSTADDR:
2227 #endif
2228             val = 0;
2229             if (optlen < sizeof(uint32_t)) {
2230                 return -TARGET_EINVAL;
2231             }
2232             if (get_user_u32(val, optval_addr)) {
2233                 return -TARGET_EFAULT;
2234             }
2235             ret = get_errno(setsockopt(sockfd, level, optname,
2236                                        &val, sizeof(val)));
2237             break;
2238         case IPV6_PKTINFO:
2239         {
2240             struct in6_pktinfo pki;
2241 
2242             if (optlen < sizeof(pki)) {
2243                 return -TARGET_EINVAL;
2244             }
2245 
2246             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2247                 return -TARGET_EFAULT;
2248             }
2249 
2250             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2251 
2252             ret = get_errno(setsockopt(sockfd, level, optname,
2253                                        &pki, sizeof(pki)));
2254             break;
2255         }
2256         case IPV6_ADD_MEMBERSHIP:
2257         case IPV6_DROP_MEMBERSHIP:
2258         {
2259             struct ipv6_mreq ipv6mreq;
2260 
2261             if (optlen < sizeof(ipv6mreq)) {
2262                 return -TARGET_EINVAL;
2263             }
2264 
2265             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2266                 return -TARGET_EFAULT;
2267             }
2268 
2269             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2270 
2271             ret = get_errno(setsockopt(sockfd, level, optname,
2272                                        &ipv6mreq, sizeof(ipv6mreq)));
2273             break;
2274         }
2275         default:
2276             goto unimplemented;
2277         }
2278         break;
2279     case SOL_ICMPV6:
2280         switch (optname) {
2281         case ICMPV6_FILTER:
2282         {
2283             struct icmp6_filter icmp6f;
2284 
2285             if (optlen > sizeof(icmp6f)) {
2286                 optlen = sizeof(icmp6f);
2287             }
2288 
2289             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2290                 return -TARGET_EFAULT;
2291             }
2292 
2293             for (val = 0; val < 8; val++) {
2294                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2295             }
2296 
2297             ret = get_errno(setsockopt(sockfd, level, optname,
2298                                        &icmp6f, optlen));
2299             break;
2300         }
2301         default:
2302             goto unimplemented;
2303         }
2304         break;
2305     case SOL_RAW:
2306         switch (optname) {
2307         case ICMP_FILTER:
2308         case IPV6_CHECKSUM:
2309             /* those take an u32 value */
2310             if (optlen < sizeof(uint32_t)) {
2311                 return -TARGET_EINVAL;
2312             }
2313 
2314             if (get_user_u32(val, optval_addr)) {
2315                 return -TARGET_EFAULT;
2316             }
2317             ret = get_errno(setsockopt(sockfd, level, optname,
2318                                        &val, sizeof(val)));
2319             break;
2320 
2321         default:
2322             goto unimplemented;
2323         }
2324         break;
2325 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2326     case SOL_ALG:
2327         switch (optname) {
2328         case ALG_SET_KEY:
2329         {
2330             char *alg_key = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2331             if (!alg_key) {
2332                 return -TARGET_EFAULT;
2333             }
2334             ret = get_errno(setsockopt(sockfd, level, optname,
2335                                        alg_key, optlen));
2336             unlock_user(alg_key, optval_addr, optlen);
2337             break;
2338         }
2339         case ALG_SET_AEAD_AUTHSIZE:
2340         {
2341             ret = get_errno(setsockopt(sockfd, level, optname,
2342                                        NULL, optlen));
2343             break;
2344         }
2345         default:
2346             goto unimplemented;
2347         }
2348         break;
2349 #endif
2350     case TARGET_SOL_SOCKET:
2351         switch (optname) {
2352         case TARGET_SO_RCVTIMEO:
2353         case TARGET_SO_SNDTIMEO:
2354         {
2355                 struct timeval tv;
2356 
2357                 if (optlen != sizeof(struct target_timeval)) {
2358                     return -TARGET_EINVAL;
2359                 }
2360 
2361                 if (copy_from_user_timeval(&tv, optval_addr)) {
2362                     return -TARGET_EFAULT;
2363                 }
2364 
2365                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2366                                 optname == TARGET_SO_RCVTIMEO ?
2367                                     SO_RCVTIMEO : SO_SNDTIMEO,
2368                                 &tv, sizeof(tv)));
2369                 return ret;
2370         }
2371         case TARGET_SO_ATTACH_FILTER:
2372         {
2373                 struct target_sock_fprog *tfprog;
2374                 struct target_sock_filter *tfilter;
2375                 struct sock_fprog fprog;
2376                 struct sock_filter *filter;
2377                 int i;
2378 
2379                 if (optlen != sizeof(*tfprog)) {
2380                     return -TARGET_EINVAL;
2381                 }
2382                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2383                     return -TARGET_EFAULT;
2384                 }
2385                 if (!lock_user_struct(VERIFY_READ, tfilter,
2386                                       tswapal(tfprog->filter), 0)) {
2387                     unlock_user_struct(tfprog, optval_addr, 1);
2388                     return -TARGET_EFAULT;
2389                 }
2390 
2391                 fprog.len = tswap16(tfprog->len);
2392                 filter = g_try_new(struct sock_filter, fprog.len);
2393                 if (filter == NULL) {
2394                     unlock_user_struct(tfilter, tfprog->filter, 1);
2395                     unlock_user_struct(tfprog, optval_addr, 1);
2396                     return -TARGET_ENOMEM;
2397                 }
2398                 for (i = 0; i < fprog.len; i++) {
2399                     filter[i].code = tswap16(tfilter[i].code);
2400                     filter[i].jt = tfilter[i].jt;
2401                     filter[i].jf = tfilter[i].jf;
2402                     filter[i].k = tswap32(tfilter[i].k);
2403                 }
2404                 fprog.filter = filter;
2405 
2406                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2407                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2408                 g_free(filter);
2409 
2410                 unlock_user_struct(tfilter, tfprog->filter, 1);
2411                 unlock_user_struct(tfprog, optval_addr, 1);
2412                 return ret;
2413         }
2414 	case TARGET_SO_BINDTODEVICE:
2415 	{
2416 		char *dev_ifname, *addr_ifname;
2417 
2418 		if (optlen > IFNAMSIZ - 1) {
2419 		    optlen = IFNAMSIZ - 1;
2420 		}
2421 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2422 		if (!dev_ifname) {
2423 		    return -TARGET_EFAULT;
2424 		}
2425 		optname = SO_BINDTODEVICE;
2426 		addr_ifname = alloca(IFNAMSIZ);
2427 		memcpy(addr_ifname, dev_ifname, optlen);
2428 		addr_ifname[optlen] = 0;
2429 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2430                                            addr_ifname, optlen));
2431 		unlock_user (dev_ifname, optval_addr, 0);
2432 		return ret;
2433 	}
2434         case TARGET_SO_LINGER:
2435         {
2436                 struct linger lg;
2437                 struct target_linger *tlg;
2438 
2439                 if (optlen != sizeof(struct target_linger)) {
2440                     return -TARGET_EINVAL;
2441                 }
2442                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2443                     return -TARGET_EFAULT;
2444                 }
2445                 __get_user(lg.l_onoff, &tlg->l_onoff);
2446                 __get_user(lg.l_linger, &tlg->l_linger);
2447                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2448                                 &lg, sizeof(lg)));
2449                 unlock_user_struct(tlg, optval_addr, 0);
2450                 return ret;
2451         }
2452             /* Options with 'int' argument.  */
2453         case TARGET_SO_DEBUG:
2454 		optname = SO_DEBUG;
2455 		break;
2456         case TARGET_SO_REUSEADDR:
2457 		optname = SO_REUSEADDR;
2458 		break;
2459 #ifdef SO_REUSEPORT
2460         case TARGET_SO_REUSEPORT:
2461                 optname = SO_REUSEPORT;
2462                 break;
2463 #endif
2464         case TARGET_SO_TYPE:
2465 		optname = SO_TYPE;
2466 		break;
2467         case TARGET_SO_ERROR:
2468 		optname = SO_ERROR;
2469 		break;
2470         case TARGET_SO_DONTROUTE:
2471 		optname = SO_DONTROUTE;
2472 		break;
2473         case TARGET_SO_BROADCAST:
2474 		optname = SO_BROADCAST;
2475 		break;
2476         case TARGET_SO_SNDBUF:
2477 		optname = SO_SNDBUF;
2478 		break;
2479         case TARGET_SO_SNDBUFFORCE:
2480                 optname = SO_SNDBUFFORCE;
2481                 break;
2482         case TARGET_SO_RCVBUF:
2483 		optname = SO_RCVBUF;
2484 		break;
2485         case TARGET_SO_RCVBUFFORCE:
2486                 optname = SO_RCVBUFFORCE;
2487                 break;
2488         case TARGET_SO_KEEPALIVE:
2489 		optname = SO_KEEPALIVE;
2490 		break;
2491         case TARGET_SO_OOBINLINE:
2492 		optname = SO_OOBINLINE;
2493 		break;
2494         case TARGET_SO_NO_CHECK:
2495 		optname = SO_NO_CHECK;
2496 		break;
2497         case TARGET_SO_PRIORITY:
2498 		optname = SO_PRIORITY;
2499 		break;
2500 #ifdef SO_BSDCOMPAT
2501         case TARGET_SO_BSDCOMPAT:
2502 		optname = SO_BSDCOMPAT;
2503 		break;
2504 #endif
2505         case TARGET_SO_PASSCRED:
2506 		optname = SO_PASSCRED;
2507 		break;
2508         case TARGET_SO_PASSSEC:
2509                 optname = SO_PASSSEC;
2510                 break;
2511         case TARGET_SO_TIMESTAMP:
2512 		optname = SO_TIMESTAMP;
2513 		break;
2514         case TARGET_SO_RCVLOWAT:
2515 		optname = SO_RCVLOWAT;
2516 		break;
2517         default:
2518             goto unimplemented;
2519         }
2520 	if (optlen < sizeof(uint32_t))
2521             return -TARGET_EINVAL;
2522 
2523 	if (get_user_u32(val, optval_addr))
2524             return -TARGET_EFAULT;
2525 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2526         break;
2527 #ifdef SOL_NETLINK
2528     case SOL_NETLINK:
2529         switch (optname) {
2530         case NETLINK_PKTINFO:
2531         case NETLINK_ADD_MEMBERSHIP:
2532         case NETLINK_DROP_MEMBERSHIP:
2533         case NETLINK_BROADCAST_ERROR:
2534         case NETLINK_NO_ENOBUFS:
2535 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2536         case NETLINK_LISTEN_ALL_NSID:
2537         case NETLINK_CAP_ACK:
2538 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2539 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2540         case NETLINK_EXT_ACK:
2541 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2542 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2543         case NETLINK_GET_STRICT_CHK:
2544 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2545             break;
2546         default:
2547             goto unimplemented;
2548         }
2549         val = 0;
2550         if (optlen < sizeof(uint32_t)) {
2551             return -TARGET_EINVAL;
2552         }
2553         if (get_user_u32(val, optval_addr)) {
2554             return -TARGET_EFAULT;
2555         }
2556         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2557                                    sizeof(val)));
2558         break;
2559 #endif /* SOL_NETLINK */
2560     default:
2561     unimplemented:
2562         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2563                       level, optname);
2564         ret = -TARGET_ENOPROTOOPT;
2565     }
2566     return ret;
2567 }
2568 
2569 /* do_getsockopt() Must return target values and target errnos. */
2570 static abi_long do_getsockopt(int sockfd, int level, int optname,
2571                               abi_ulong optval_addr, abi_ulong optlen)
2572 {
2573     abi_long ret;
2574     int len, val;
2575     socklen_t lv;
2576 
2577     switch(level) {
2578     case TARGET_SOL_SOCKET:
2579         level = SOL_SOCKET;
2580         switch (optname) {
2581         /* These don't just return a single integer */
2582         case TARGET_SO_PEERNAME:
2583             goto unimplemented;
2584         case TARGET_SO_RCVTIMEO: {
2585             struct timeval tv;
2586             socklen_t tvlen;
2587 
2588             optname = SO_RCVTIMEO;
2589 
2590 get_timeout:
2591             if (get_user_u32(len, optlen)) {
2592                 return -TARGET_EFAULT;
2593             }
2594             if (len < 0) {
2595                 return -TARGET_EINVAL;
2596             }
2597 
2598             tvlen = sizeof(tv);
2599             ret = get_errno(getsockopt(sockfd, level, optname,
2600                                        &tv, &tvlen));
2601             if (ret < 0) {
2602                 return ret;
2603             }
2604             if (len > sizeof(struct target_timeval)) {
2605                 len = sizeof(struct target_timeval);
2606             }
2607             if (copy_to_user_timeval(optval_addr, &tv)) {
2608                 return -TARGET_EFAULT;
2609             }
2610             if (put_user_u32(len, optlen)) {
2611                 return -TARGET_EFAULT;
2612             }
2613             break;
2614         }
2615         case TARGET_SO_SNDTIMEO:
2616             optname = SO_SNDTIMEO;
2617             goto get_timeout;
2618         case TARGET_SO_PEERCRED: {
2619             struct ucred cr;
2620             socklen_t crlen;
2621             struct target_ucred *tcr;
2622 
2623             if (get_user_u32(len, optlen)) {
2624                 return -TARGET_EFAULT;
2625             }
2626             if (len < 0) {
2627                 return -TARGET_EINVAL;
2628             }
2629 
2630             crlen = sizeof(cr);
2631             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2632                                        &cr, &crlen));
2633             if (ret < 0) {
2634                 return ret;
2635             }
2636             if (len > crlen) {
2637                 len = crlen;
2638             }
2639             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2640                 return -TARGET_EFAULT;
2641             }
2642             __put_user(cr.pid, &tcr->pid);
2643             __put_user(cr.uid, &tcr->uid);
2644             __put_user(cr.gid, &tcr->gid);
2645             unlock_user_struct(tcr, optval_addr, 1);
2646             if (put_user_u32(len, optlen)) {
2647                 return -TARGET_EFAULT;
2648             }
2649             break;
2650         }
2651         case TARGET_SO_PEERSEC: {
2652             char *name;
2653 
2654             if (get_user_u32(len, optlen)) {
2655                 return -TARGET_EFAULT;
2656             }
2657             if (len < 0) {
2658                 return -TARGET_EINVAL;
2659             }
2660             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2661             if (!name) {
2662                 return -TARGET_EFAULT;
2663             }
2664             lv = len;
2665             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2666                                        name, &lv));
2667             if (put_user_u32(lv, optlen)) {
2668                 ret = -TARGET_EFAULT;
2669             }
2670             unlock_user(name, optval_addr, lv);
2671             break;
2672         }
2673         case TARGET_SO_LINGER:
2674         {
2675             struct linger lg;
2676             socklen_t lglen;
2677             struct target_linger *tlg;
2678 
2679             if (get_user_u32(len, optlen)) {
2680                 return -TARGET_EFAULT;
2681             }
2682             if (len < 0) {
2683                 return -TARGET_EINVAL;
2684             }
2685 
2686             lglen = sizeof(lg);
2687             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2688                                        &lg, &lglen));
2689             if (ret < 0) {
2690                 return ret;
2691             }
2692             if (len > lglen) {
2693                 len = lglen;
2694             }
2695             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2696                 return -TARGET_EFAULT;
2697             }
2698             __put_user(lg.l_onoff, &tlg->l_onoff);
2699             __put_user(lg.l_linger, &tlg->l_linger);
2700             unlock_user_struct(tlg, optval_addr, 1);
2701             if (put_user_u32(len, optlen)) {
2702                 return -TARGET_EFAULT;
2703             }
2704             break;
2705         }
2706         /* Options with 'int' argument.  */
2707         case TARGET_SO_DEBUG:
2708             optname = SO_DEBUG;
2709             goto int_case;
2710         case TARGET_SO_REUSEADDR:
2711             optname = SO_REUSEADDR;
2712             goto int_case;
2713 #ifdef SO_REUSEPORT
2714         case TARGET_SO_REUSEPORT:
2715             optname = SO_REUSEPORT;
2716             goto int_case;
2717 #endif
2718         case TARGET_SO_TYPE:
2719             optname = SO_TYPE;
2720             goto int_case;
2721         case TARGET_SO_ERROR:
2722             optname = SO_ERROR;
2723             goto int_case;
2724         case TARGET_SO_DONTROUTE:
2725             optname = SO_DONTROUTE;
2726             goto int_case;
2727         case TARGET_SO_BROADCAST:
2728             optname = SO_BROADCAST;
2729             goto int_case;
2730         case TARGET_SO_SNDBUF:
2731             optname = SO_SNDBUF;
2732             goto int_case;
2733         case TARGET_SO_RCVBUF:
2734             optname = SO_RCVBUF;
2735             goto int_case;
2736         case TARGET_SO_KEEPALIVE:
2737             optname = SO_KEEPALIVE;
2738             goto int_case;
2739         case TARGET_SO_OOBINLINE:
2740             optname = SO_OOBINLINE;
2741             goto int_case;
2742         case TARGET_SO_NO_CHECK:
2743             optname = SO_NO_CHECK;
2744             goto int_case;
2745         case TARGET_SO_PRIORITY:
2746             optname = SO_PRIORITY;
2747             goto int_case;
2748 #ifdef SO_BSDCOMPAT
2749         case TARGET_SO_BSDCOMPAT:
2750             optname = SO_BSDCOMPAT;
2751             goto int_case;
2752 #endif
2753         case TARGET_SO_PASSCRED:
2754             optname = SO_PASSCRED;
2755             goto int_case;
2756         case TARGET_SO_TIMESTAMP:
2757             optname = SO_TIMESTAMP;
2758             goto int_case;
2759         case TARGET_SO_RCVLOWAT:
2760             optname = SO_RCVLOWAT;
2761             goto int_case;
2762         case TARGET_SO_ACCEPTCONN:
2763             optname = SO_ACCEPTCONN;
2764             goto int_case;
2765         case TARGET_SO_PROTOCOL:
2766             optname = SO_PROTOCOL;
2767             goto int_case;
2768         case TARGET_SO_DOMAIN:
2769             optname = SO_DOMAIN;
2770             goto int_case;
2771         default:
2772             goto int_case;
2773         }
2774         break;
2775     case SOL_TCP:
2776     case SOL_UDP:
2777         /* TCP and UDP options all take an 'int' value.  */
2778     int_case:
2779         if (get_user_u32(len, optlen))
2780             return -TARGET_EFAULT;
2781         if (len < 0)
2782             return -TARGET_EINVAL;
2783         lv = sizeof(lv);
2784         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2785         if (ret < 0)
2786             return ret;
2787         switch (optname) {
2788         case SO_TYPE:
2789             val = host_to_target_sock_type(val);
2790             break;
2791         case SO_ERROR:
2792             val = host_to_target_errno(val);
2793             break;
2794         }
2795         if (len > lv)
2796             len = lv;
2797         if (len == 4) {
2798             if (put_user_u32(val, optval_addr))
2799                 return -TARGET_EFAULT;
2800         } else {
2801             if (put_user_u8(val, optval_addr))
2802                 return -TARGET_EFAULT;
2803         }
2804         if (put_user_u32(len, optlen))
2805             return -TARGET_EFAULT;
2806         break;
2807     case SOL_IP:
2808         switch(optname) {
2809         case IP_TOS:
2810         case IP_TTL:
2811         case IP_HDRINCL:
2812         case IP_ROUTER_ALERT:
2813         case IP_RECVOPTS:
2814         case IP_RETOPTS:
2815         case IP_PKTINFO:
2816         case IP_MTU_DISCOVER:
2817         case IP_RECVERR:
2818         case IP_RECVTOS:
2819 #ifdef IP_FREEBIND
2820         case IP_FREEBIND:
2821 #endif
2822         case IP_MULTICAST_TTL:
2823         case IP_MULTICAST_LOOP:
2824             if (get_user_u32(len, optlen))
2825                 return -TARGET_EFAULT;
2826             if (len < 0)
2827                 return -TARGET_EINVAL;
2828             lv = sizeof(lv);
2829             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2830             if (ret < 0)
2831                 return ret;
2832             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2833                 len = 1;
2834                 if (put_user_u32(len, optlen)
2835                     || put_user_u8(val, optval_addr))
2836                     return -TARGET_EFAULT;
2837             } else {
2838                 if (len > sizeof(int))
2839                     len = sizeof(int);
2840                 if (put_user_u32(len, optlen)
2841                     || put_user_u32(val, optval_addr))
2842                     return -TARGET_EFAULT;
2843             }
2844             break;
2845         default:
2846             ret = -TARGET_ENOPROTOOPT;
2847             break;
2848         }
2849         break;
2850     case SOL_IPV6:
2851         switch (optname) {
2852         case IPV6_MTU_DISCOVER:
2853         case IPV6_MTU:
2854         case IPV6_V6ONLY:
2855         case IPV6_RECVPKTINFO:
2856         case IPV6_UNICAST_HOPS:
2857         case IPV6_MULTICAST_HOPS:
2858         case IPV6_MULTICAST_LOOP:
2859         case IPV6_RECVERR:
2860         case IPV6_RECVHOPLIMIT:
2861         case IPV6_2292HOPLIMIT:
2862         case IPV6_CHECKSUM:
2863         case IPV6_ADDRFORM:
2864         case IPV6_2292PKTINFO:
2865         case IPV6_RECVTCLASS:
2866         case IPV6_RECVRTHDR:
2867         case IPV6_2292RTHDR:
2868         case IPV6_RECVHOPOPTS:
2869         case IPV6_2292HOPOPTS:
2870         case IPV6_RECVDSTOPTS:
2871         case IPV6_2292DSTOPTS:
2872         case IPV6_TCLASS:
2873         case IPV6_ADDR_PREFERENCES:
2874 #ifdef IPV6_RECVPATHMTU
2875         case IPV6_RECVPATHMTU:
2876 #endif
2877 #ifdef IPV6_TRANSPARENT
2878         case IPV6_TRANSPARENT:
2879 #endif
2880 #ifdef IPV6_FREEBIND
2881         case IPV6_FREEBIND:
2882 #endif
2883 #ifdef IPV6_RECVORIGDSTADDR
2884         case IPV6_RECVORIGDSTADDR:
2885 #endif
2886             if (get_user_u32(len, optlen))
2887                 return -TARGET_EFAULT;
2888             if (len < 0)
2889                 return -TARGET_EINVAL;
2890             lv = sizeof(lv);
2891             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2892             if (ret < 0)
2893                 return ret;
2894             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2895                 len = 1;
2896                 if (put_user_u32(len, optlen)
2897                     || put_user_u8(val, optval_addr))
2898                     return -TARGET_EFAULT;
2899             } else {
2900                 if (len > sizeof(int))
2901                     len = sizeof(int);
2902                 if (put_user_u32(len, optlen)
2903                     || put_user_u32(val, optval_addr))
2904                     return -TARGET_EFAULT;
2905             }
2906             break;
2907         default:
2908             ret = -TARGET_ENOPROTOOPT;
2909             break;
2910         }
2911         break;
2912 #ifdef SOL_NETLINK
2913     case SOL_NETLINK:
2914         switch (optname) {
2915         case NETLINK_PKTINFO:
2916         case NETLINK_BROADCAST_ERROR:
2917         case NETLINK_NO_ENOBUFS:
2918 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2919         case NETLINK_LISTEN_ALL_NSID:
2920         case NETLINK_CAP_ACK:
2921 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2922 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2923         case NETLINK_EXT_ACK:
2924 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2925 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2926         case NETLINK_GET_STRICT_CHK:
2927 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2928             if (get_user_u32(len, optlen)) {
2929                 return -TARGET_EFAULT;
2930             }
2931             if (len != sizeof(val)) {
2932                 return -TARGET_EINVAL;
2933             }
2934             lv = len;
2935             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2936             if (ret < 0) {
2937                 return ret;
2938             }
2939             if (put_user_u32(lv, optlen)
2940                 || put_user_u32(val, optval_addr)) {
2941                 return -TARGET_EFAULT;
2942             }
2943             break;
2944 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2945         case NETLINK_LIST_MEMBERSHIPS:
2946         {
2947             uint32_t *results;
2948             int i;
2949             if (get_user_u32(len, optlen)) {
2950                 return -TARGET_EFAULT;
2951             }
2952             if (len < 0) {
2953                 return -TARGET_EINVAL;
2954             }
2955             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2956             if (!results && len > 0) {
2957                 return -TARGET_EFAULT;
2958             }
2959             lv = len;
2960             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2961             if (ret < 0) {
2962                 unlock_user(results, optval_addr, 0);
2963                 return ret;
2964             }
2965             /* swap host endianness to target endianness. */
2966             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2967                 results[i] = tswap32(results[i]);
2968             }
2969             if (put_user_u32(lv, optlen)) {
2970                 return -TARGET_EFAULT;
2971             }
2972             unlock_user(results, optval_addr, 0);
2973             break;
2974         }
2975 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2976         default:
2977             goto unimplemented;
2978         }
2979         break;
2980 #endif /* SOL_NETLINK */
2981     default:
2982     unimplemented:
2983         qemu_log_mask(LOG_UNIMP,
2984                       "getsockopt level=%d optname=%d not yet supported\n",
2985                       level, optname);
2986         ret = -TARGET_EOPNOTSUPP;
2987         break;
2988     }
2989     return ret;
2990 }
2991 
2992 /* Convert target low/high pair representing file offset into the host
2993  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2994  * as the kernel doesn't handle them either.
2995  */
2996 static void target_to_host_low_high(abi_ulong tlow,
2997                                     abi_ulong thigh,
2998                                     unsigned long *hlow,
2999                                     unsigned long *hhigh)
3000 {
3001     uint64_t off = tlow |
3002         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3003         TARGET_LONG_BITS / 2;
3004 
3005     *hlow = off;
3006     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3007 }
3008 
3009 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3010                                 abi_ulong count, int copy)
3011 {
3012     struct target_iovec *target_vec;
3013     struct iovec *vec;
3014     abi_ulong total_len, max_len;
3015     int i;
3016     int err = 0;
3017     bool bad_address = false;
3018 
3019     if (count == 0) {
3020         errno = 0;
3021         return NULL;
3022     }
3023     if (count > IOV_MAX) {
3024         errno = EINVAL;
3025         return NULL;
3026     }
3027 
3028     vec = g_try_new0(struct iovec, count);
3029     if (vec == NULL) {
3030         errno = ENOMEM;
3031         return NULL;
3032     }
3033 
3034     target_vec = lock_user(VERIFY_READ, target_addr,
3035                            count * sizeof(struct target_iovec), 1);
3036     if (target_vec == NULL) {
3037         err = EFAULT;
3038         goto fail2;
3039     }
3040 
3041     /* ??? If host page size > target page size, this will result in a
3042        value larger than what we can actually support.  */
3043     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3044     total_len = 0;
3045 
3046     for (i = 0; i < count; i++) {
3047         abi_ulong base = tswapal(target_vec[i].iov_base);
3048         abi_long len = tswapal(target_vec[i].iov_len);
3049 
3050         if (len < 0) {
3051             err = EINVAL;
3052             goto fail;
3053         } else if (len == 0) {
3054             /* Zero length pointer is ignored.  */
3055             vec[i].iov_base = 0;
3056         } else {
3057             vec[i].iov_base = lock_user(type, base, len, copy);
3058             /* If the first buffer pointer is bad, this is a fault.  But
3059              * subsequent bad buffers will result in a partial write; this
3060              * is realized by filling the vector with null pointers and
3061              * zero lengths. */
3062             if (!vec[i].iov_base) {
3063                 if (i == 0) {
3064                     err = EFAULT;
3065                     goto fail;
3066                 } else {
3067                     bad_address = true;
3068                 }
3069             }
3070             if (bad_address) {
3071                 len = 0;
3072             }
3073             if (len > max_len - total_len) {
3074                 len = max_len - total_len;
3075             }
3076         }
3077         vec[i].iov_len = len;
3078         total_len += len;
3079     }
3080 
3081     unlock_user(target_vec, target_addr, 0);
3082     return vec;
3083 
3084  fail:
3085     while (--i >= 0) {
3086         if (tswapal(target_vec[i].iov_len) > 0) {
3087             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3088         }
3089     }
3090     unlock_user(target_vec, target_addr, 0);
3091  fail2:
3092     g_free(vec);
3093     errno = err;
3094     return NULL;
3095 }
3096 
3097 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3098                          abi_ulong count, int copy)
3099 {
3100     struct target_iovec *target_vec;
3101     int i;
3102 
3103     target_vec = lock_user(VERIFY_READ, target_addr,
3104                            count * sizeof(struct target_iovec), 1);
3105     if (target_vec) {
3106         for (i = 0; i < count; i++) {
3107             abi_ulong base = tswapal(target_vec[i].iov_base);
3108             abi_long len = tswapal(target_vec[i].iov_len);
3109             if (len < 0) {
3110                 break;
3111             }
3112             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3113         }
3114         unlock_user(target_vec, target_addr, 0);
3115     }
3116 
3117     g_free(vec);
3118 }
3119 
3120 static inline int target_to_host_sock_type(int *type)
3121 {
3122     int host_type = 0;
3123     int target_type = *type;
3124 
3125     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3126     case TARGET_SOCK_DGRAM:
3127         host_type = SOCK_DGRAM;
3128         break;
3129     case TARGET_SOCK_STREAM:
3130         host_type = SOCK_STREAM;
3131         break;
3132     default:
3133         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3134         break;
3135     }
3136     if (target_type & TARGET_SOCK_CLOEXEC) {
3137 #if defined(SOCK_CLOEXEC)
3138         host_type |= SOCK_CLOEXEC;
3139 #else
3140         return -TARGET_EINVAL;
3141 #endif
3142     }
3143     if (target_type & TARGET_SOCK_NONBLOCK) {
3144 #if defined(SOCK_NONBLOCK)
3145         host_type |= SOCK_NONBLOCK;
3146 #elif !defined(O_NONBLOCK)
3147         return -TARGET_EINVAL;
3148 #endif
3149     }
3150     *type = host_type;
3151     return 0;
3152 }
3153 
3154 /* Try to emulate socket type flags after socket creation.  */
3155 static int sock_flags_fixup(int fd, int target_type)
3156 {
3157 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3158     if (target_type & TARGET_SOCK_NONBLOCK) {
3159         int flags = fcntl(fd, F_GETFL);
3160         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3161             close(fd);
3162             return -TARGET_EINVAL;
3163         }
3164     }
3165 #endif
3166     return fd;
3167 }
3168 
3169 /* do_socket() Must return target values and target errnos. */
3170 static abi_long do_socket(int domain, int type, int protocol)
3171 {
3172     int target_type = type;
3173     int ret;
3174 
3175     ret = target_to_host_sock_type(&type);
3176     if (ret) {
3177         return ret;
3178     }
3179 
3180     if (domain == PF_NETLINK && !(
3181 #ifdef CONFIG_RTNETLINK
3182          protocol == NETLINK_ROUTE ||
3183 #endif
3184          protocol == NETLINK_KOBJECT_UEVENT ||
3185          protocol == NETLINK_AUDIT)) {
3186         return -TARGET_EPROTONOSUPPORT;
3187     }
3188 
3189     if (domain == AF_PACKET ||
3190         (domain == AF_INET && type == SOCK_PACKET)) {
3191         protocol = tswap16(protocol);
3192     }
3193 
3194     ret = get_errno(socket(domain, type, protocol));
3195     if (ret >= 0) {
3196         ret = sock_flags_fixup(ret, target_type);
3197         if (type == SOCK_PACKET) {
3198             /* Manage an obsolete case :
3199              * if socket type is SOCK_PACKET, bind by name
3200              */
3201             fd_trans_register(ret, &target_packet_trans);
3202         } else if (domain == PF_NETLINK) {
3203             switch (protocol) {
3204 #ifdef CONFIG_RTNETLINK
3205             case NETLINK_ROUTE:
3206                 fd_trans_register(ret, &target_netlink_route_trans);
3207                 break;
3208 #endif
3209             case NETLINK_KOBJECT_UEVENT:
3210                 /* nothing to do: messages are strings */
3211                 break;
3212             case NETLINK_AUDIT:
3213                 fd_trans_register(ret, &target_netlink_audit_trans);
3214                 break;
3215             default:
3216                 g_assert_not_reached();
3217             }
3218         }
3219     }
3220     return ret;
3221 }
3222 
3223 /* do_bind() Must return target values and target errnos. */
3224 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3225                         socklen_t addrlen)
3226 {
3227     void *addr;
3228     abi_long ret;
3229 
3230     if ((int)addrlen < 0) {
3231         return -TARGET_EINVAL;
3232     }
3233 
3234     addr = alloca(addrlen+1);
3235 
3236     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3237     if (ret)
3238         return ret;
3239 
3240     return get_errno(bind(sockfd, addr, addrlen));
3241 }
3242 
3243 /* do_connect() Must return target values and target errnos. */
3244 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3245                            socklen_t addrlen)
3246 {
3247     void *addr;
3248     abi_long ret;
3249 
3250     if ((int)addrlen < 0) {
3251         return -TARGET_EINVAL;
3252     }
3253 
3254     addr = alloca(addrlen+1);
3255 
3256     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3257     if (ret)
3258         return ret;
3259 
3260     return get_errno(safe_connect(sockfd, addr, addrlen));
3261 }
3262 
3263 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3264 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3265                                       int flags, int send)
3266 {
3267     abi_long ret, len;
3268     struct msghdr msg;
3269     abi_ulong count;
3270     struct iovec *vec;
3271     abi_ulong target_vec;
3272 
3273     if (msgp->msg_name) {
3274         msg.msg_namelen = tswap32(msgp->msg_namelen);
3275         msg.msg_name = alloca(msg.msg_namelen+1);
3276         ret = target_to_host_sockaddr(fd, msg.msg_name,
3277                                       tswapal(msgp->msg_name),
3278                                       msg.msg_namelen);
3279         if (ret == -TARGET_EFAULT) {
3280             /* For connected sockets msg_name and msg_namelen must
3281              * be ignored, so returning EFAULT immediately is wrong.
3282              * Instead, pass a bad msg_name to the host kernel, and
3283              * let it decide whether to return EFAULT or not.
3284              */
3285             msg.msg_name = (void *)-1;
3286         } else if (ret) {
3287             goto out2;
3288         }
3289     } else {
3290         msg.msg_name = NULL;
3291         msg.msg_namelen = 0;
3292     }
3293     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3294     msg.msg_control = alloca(msg.msg_controllen);
3295     memset(msg.msg_control, 0, msg.msg_controllen);
3296 
3297     msg.msg_flags = tswap32(msgp->msg_flags);
3298 
3299     count = tswapal(msgp->msg_iovlen);
3300     target_vec = tswapal(msgp->msg_iov);
3301 
3302     if (count > IOV_MAX) {
3303         /* sendrcvmsg returns a different errno for this condition than
3304          * readv/writev, so we must catch it here before lock_iovec() does.
3305          */
3306         ret = -TARGET_EMSGSIZE;
3307         goto out2;
3308     }
3309 
3310     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3311                      target_vec, count, send);
3312     if (vec == NULL) {
3313         ret = -host_to_target_errno(errno);
3314         /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3315         if (!send || ret) {
3316             goto out2;
3317         }
3318     }
3319     msg.msg_iovlen = count;
3320     msg.msg_iov = vec;
3321 
3322     if (send) {
3323         if (fd_trans_target_to_host_data(fd)) {
3324             void *host_msg;
3325 
3326             host_msg = g_malloc(msg.msg_iov->iov_len);
3327             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3328             ret = fd_trans_target_to_host_data(fd)(host_msg,
3329                                                    msg.msg_iov->iov_len);
3330             if (ret >= 0) {
3331                 msg.msg_iov->iov_base = host_msg;
3332                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3333             }
3334             g_free(host_msg);
3335         } else {
3336             ret = target_to_host_cmsg(&msg, msgp);
3337             if (ret == 0) {
3338                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3339             }
3340         }
3341     } else {
3342         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3343         if (!is_error(ret)) {
3344             len = ret;
3345             if (fd_trans_host_to_target_data(fd)) {
3346                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3347                                                MIN(msg.msg_iov->iov_len, len));
3348             }
3349             if (!is_error(ret)) {
3350                 ret = host_to_target_cmsg(msgp, &msg);
3351             }
3352             if (!is_error(ret)) {
3353                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3354                 msgp->msg_flags = tswap32(msg.msg_flags);
3355                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3356                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3357                                     msg.msg_name, msg.msg_namelen);
3358                     if (ret) {
3359                         goto out;
3360                     }
3361                 }
3362 
3363                 ret = len;
3364             }
3365         }
3366     }
3367 
3368 out:
3369     if (vec) {
3370         unlock_iovec(vec, target_vec, count, !send);
3371     }
3372 out2:
3373     return ret;
3374 }
3375 
3376 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3377                                int flags, int send)
3378 {
3379     abi_long ret;
3380     struct target_msghdr *msgp;
3381 
3382     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3383                           msgp,
3384                           target_msg,
3385                           send ? 1 : 0)) {
3386         return -TARGET_EFAULT;
3387     }
3388     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3389     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3390     return ret;
3391 }
3392 
3393 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3394  * so it might not have this *mmsg-specific flag either.
3395  */
3396 #ifndef MSG_WAITFORONE
3397 #define MSG_WAITFORONE 0x10000
3398 #endif
3399 
3400 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3401                                 unsigned int vlen, unsigned int flags,
3402                                 int send)
3403 {
3404     struct target_mmsghdr *mmsgp;
3405     abi_long ret = 0;
3406     int i;
3407 
3408     if (vlen > UIO_MAXIOV) {
3409         vlen = UIO_MAXIOV;
3410     }
3411 
3412     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3413     if (!mmsgp) {
3414         return -TARGET_EFAULT;
3415     }
3416 
3417     for (i = 0; i < vlen; i++) {
3418         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3419         if (is_error(ret)) {
3420             break;
3421         }
3422         mmsgp[i].msg_len = tswap32(ret);
3423         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3424         if (flags & MSG_WAITFORONE) {
3425             flags |= MSG_DONTWAIT;
3426         }
3427     }
3428 
3429     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3430 
3431     /* Return number of datagrams sent if we sent any at all;
3432      * otherwise return the error.
3433      */
3434     if (i) {
3435         return i;
3436     }
3437     return ret;
3438 }
3439 
3440 /* do_accept4() Must return target values and target errnos. */
3441 static abi_long do_accept4(int fd, abi_ulong target_addr,
3442                            abi_ulong target_addrlen_addr, int flags)
3443 {
3444     socklen_t addrlen, ret_addrlen;
3445     void *addr;
3446     abi_long ret;
3447     int host_flags;
3448 
3449     if (flags & ~(TARGET_SOCK_CLOEXEC | TARGET_SOCK_NONBLOCK)) {
3450         return -TARGET_EINVAL;
3451     }
3452 
3453     host_flags = 0;
3454     if (flags & TARGET_SOCK_NONBLOCK) {
3455         host_flags |= SOCK_NONBLOCK;
3456     }
3457     if (flags & TARGET_SOCK_CLOEXEC) {
3458         host_flags |= SOCK_CLOEXEC;
3459     }
3460 
3461     if (target_addr == 0) {
3462         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3463     }
3464 
3465     /* linux returns EFAULT if addrlen pointer is invalid */
3466     if (get_user_u32(addrlen, target_addrlen_addr))
3467         return -TARGET_EFAULT;
3468 
3469     if ((int)addrlen < 0) {
3470         return -TARGET_EINVAL;
3471     }
3472 
3473     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3474         return -TARGET_EFAULT;
3475     }
3476 
3477     addr = alloca(addrlen);
3478 
3479     ret_addrlen = addrlen;
3480     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3481     if (!is_error(ret)) {
3482         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3483         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3484             ret = -TARGET_EFAULT;
3485         }
3486     }
3487     return ret;
3488 }
3489 
3490 /* do_getpeername() Must return target values and target errnos. */
3491 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3492                                abi_ulong target_addrlen_addr)
3493 {
3494     socklen_t addrlen, ret_addrlen;
3495     void *addr;
3496     abi_long ret;
3497 
3498     if (get_user_u32(addrlen, target_addrlen_addr))
3499         return -TARGET_EFAULT;
3500 
3501     if ((int)addrlen < 0) {
3502         return -TARGET_EINVAL;
3503     }
3504 
3505     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3506         return -TARGET_EFAULT;
3507     }
3508 
3509     addr = alloca(addrlen);
3510 
3511     ret_addrlen = addrlen;
3512     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3513     if (!is_error(ret)) {
3514         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3515         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3516             ret = -TARGET_EFAULT;
3517         }
3518     }
3519     return ret;
3520 }
3521 
3522 /* do_getsockname() Must return target values and target errnos. */
3523 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3524                                abi_ulong target_addrlen_addr)
3525 {
3526     socklen_t addrlen, ret_addrlen;
3527     void *addr;
3528     abi_long ret;
3529 
3530     if (get_user_u32(addrlen, target_addrlen_addr))
3531         return -TARGET_EFAULT;
3532 
3533     if ((int)addrlen < 0) {
3534         return -TARGET_EINVAL;
3535     }
3536 
3537     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3538         return -TARGET_EFAULT;
3539     }
3540 
3541     addr = alloca(addrlen);
3542 
3543     ret_addrlen = addrlen;
3544     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3545     if (!is_error(ret)) {
3546         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3547         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3548             ret = -TARGET_EFAULT;
3549         }
3550     }
3551     return ret;
3552 }
3553 
3554 /* do_socketpair() Must return target values and target errnos. */
3555 static abi_long do_socketpair(int domain, int type, int protocol,
3556                               abi_ulong target_tab_addr)
3557 {
3558     int tab[2];
3559     abi_long ret;
3560 
3561     target_to_host_sock_type(&type);
3562 
3563     ret = get_errno(socketpair(domain, type, protocol, tab));
3564     if (!is_error(ret)) {
3565         if (put_user_s32(tab[0], target_tab_addr)
3566             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3567             ret = -TARGET_EFAULT;
3568     }
3569     return ret;
3570 }
3571 
3572 /* do_sendto() Must return target values and target errnos. */
3573 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3574                           abi_ulong target_addr, socklen_t addrlen)
3575 {
3576     void *addr;
3577     void *host_msg;
3578     void *copy_msg = NULL;
3579     abi_long ret;
3580 
3581     if ((int)addrlen < 0) {
3582         return -TARGET_EINVAL;
3583     }
3584 
3585     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3586     if (!host_msg)
3587         return -TARGET_EFAULT;
3588     if (fd_trans_target_to_host_data(fd)) {
3589         copy_msg = host_msg;
3590         host_msg = g_malloc(len);
3591         memcpy(host_msg, copy_msg, len);
3592         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3593         if (ret < 0) {
3594             goto fail;
3595         }
3596     }
3597     if (target_addr) {
3598         addr = alloca(addrlen+1);
3599         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3600         if (ret) {
3601             goto fail;
3602         }
3603         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3604     } else {
3605         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3606     }
3607 fail:
3608     if (copy_msg) {
3609         g_free(host_msg);
3610         host_msg = copy_msg;
3611     }
3612     unlock_user(host_msg, msg, 0);
3613     return ret;
3614 }
3615 
3616 /* do_recvfrom() Must return target values and target errnos. */
3617 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3618                             abi_ulong target_addr,
3619                             abi_ulong target_addrlen)
3620 {
3621     socklen_t addrlen, ret_addrlen;
3622     void *addr;
3623     void *host_msg;
3624     abi_long ret;
3625 
3626     if (!msg) {
3627         host_msg = NULL;
3628     } else {
3629         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3630         if (!host_msg) {
3631             return -TARGET_EFAULT;
3632         }
3633     }
3634     if (target_addr) {
3635         if (get_user_u32(addrlen, target_addrlen)) {
3636             ret = -TARGET_EFAULT;
3637             goto fail;
3638         }
3639         if ((int)addrlen < 0) {
3640             ret = -TARGET_EINVAL;
3641             goto fail;
3642         }
3643         addr = alloca(addrlen);
3644         ret_addrlen = addrlen;
3645         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3646                                       addr, &ret_addrlen));
3647     } else {
3648         addr = NULL; /* To keep compiler quiet.  */
3649         addrlen = 0; /* To keep compiler quiet.  */
3650         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3651     }
3652     if (!is_error(ret)) {
3653         if (fd_trans_host_to_target_data(fd)) {
3654             abi_long trans;
3655             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3656             if (is_error(trans)) {
3657                 ret = trans;
3658                 goto fail;
3659             }
3660         }
3661         if (target_addr) {
3662             host_to_target_sockaddr(target_addr, addr,
3663                                     MIN(addrlen, ret_addrlen));
3664             if (put_user_u32(ret_addrlen, target_addrlen)) {
3665                 ret = -TARGET_EFAULT;
3666                 goto fail;
3667             }
3668         }
3669         unlock_user(host_msg, msg, len);
3670     } else {
3671 fail:
3672         unlock_user(host_msg, msg, 0);
3673     }
3674     return ret;
3675 }
3676 
3677 #ifdef TARGET_NR_socketcall
3678 /* do_socketcall() must return target values and target errnos. */
3679 static abi_long do_socketcall(int num, abi_ulong vptr)
3680 {
3681     static const unsigned nargs[] = { /* number of arguments per operation */
3682         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3683         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3684         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3685         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3686         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3687         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3688         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3689         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3690         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3691         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3692         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3693         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3694         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3695         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3696         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3697         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3698         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3699         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3700         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3701         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3702     };
3703     abi_long a[6]; /* max 6 args */
3704     unsigned i;
3705 
3706     /* check the range of the first argument num */
3707     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3708     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3709         return -TARGET_EINVAL;
3710     }
3711     /* ensure we have space for args */
3712     if (nargs[num] > ARRAY_SIZE(a)) {
3713         return -TARGET_EINVAL;
3714     }
3715     /* collect the arguments in a[] according to nargs[] */
3716     for (i = 0; i < nargs[num]; ++i) {
3717         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3718             return -TARGET_EFAULT;
3719         }
3720     }
3721     /* now when we have the args, invoke the appropriate underlying function */
3722     switch (num) {
3723     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3724         return do_socket(a[0], a[1], a[2]);
3725     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3726         return do_bind(a[0], a[1], a[2]);
3727     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3728         return do_connect(a[0], a[1], a[2]);
3729     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3730         return get_errno(listen(a[0], a[1]));
3731     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3732         return do_accept4(a[0], a[1], a[2], 0);
3733     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3734         return do_getsockname(a[0], a[1], a[2]);
3735     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3736         return do_getpeername(a[0], a[1], a[2]);
3737     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3738         return do_socketpair(a[0], a[1], a[2], a[3]);
3739     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3740         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3741     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3742         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3743     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3744         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3745     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3746         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3747     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3748         return get_errno(shutdown(a[0], a[1]));
3749     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3750         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3751     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3752         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3753     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3754         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3755     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3756         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3757     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3758         return do_accept4(a[0], a[1], a[2], a[3]);
3759     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3760         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3761     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3762         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3763     default:
3764         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3765         return -TARGET_EINVAL;
3766     }
3767 }
3768 #endif
3769 
3770 #ifndef TARGET_SEMID64_DS
3771 /* asm-generic version of this struct */
3772 struct target_semid64_ds
3773 {
3774   struct target_ipc_perm sem_perm;
3775   abi_ulong sem_otime;
3776 #if TARGET_ABI_BITS == 32
3777   abi_ulong __unused1;
3778 #endif
3779   abi_ulong sem_ctime;
3780 #if TARGET_ABI_BITS == 32
3781   abi_ulong __unused2;
3782 #endif
3783   abi_ulong sem_nsems;
3784   abi_ulong __unused3;
3785   abi_ulong __unused4;
3786 };
3787 #endif
3788 
3789 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3790                                                abi_ulong target_addr)
3791 {
3792     struct target_ipc_perm *target_ip;
3793     struct target_semid64_ds *target_sd;
3794 
3795     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3796         return -TARGET_EFAULT;
3797     target_ip = &(target_sd->sem_perm);
3798     host_ip->__key = tswap32(target_ip->__key);
3799     host_ip->uid = tswap32(target_ip->uid);
3800     host_ip->gid = tswap32(target_ip->gid);
3801     host_ip->cuid = tswap32(target_ip->cuid);
3802     host_ip->cgid = tswap32(target_ip->cgid);
3803 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3804     host_ip->mode = tswap32(target_ip->mode);
3805 #else
3806     host_ip->mode = tswap16(target_ip->mode);
3807 #endif
3808 #if defined(TARGET_PPC)
3809     host_ip->__seq = tswap32(target_ip->__seq);
3810 #else
3811     host_ip->__seq = tswap16(target_ip->__seq);
3812 #endif
3813     unlock_user_struct(target_sd, target_addr, 0);
3814     return 0;
3815 }
3816 
3817 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3818                                                struct ipc_perm *host_ip)
3819 {
3820     struct target_ipc_perm *target_ip;
3821     struct target_semid64_ds *target_sd;
3822 
3823     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3824         return -TARGET_EFAULT;
3825     target_ip = &(target_sd->sem_perm);
3826     target_ip->__key = tswap32(host_ip->__key);
3827     target_ip->uid = tswap32(host_ip->uid);
3828     target_ip->gid = tswap32(host_ip->gid);
3829     target_ip->cuid = tswap32(host_ip->cuid);
3830     target_ip->cgid = tswap32(host_ip->cgid);
3831 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3832     target_ip->mode = tswap32(host_ip->mode);
3833 #else
3834     target_ip->mode = tswap16(host_ip->mode);
3835 #endif
3836 #if defined(TARGET_PPC)
3837     target_ip->__seq = tswap32(host_ip->__seq);
3838 #else
3839     target_ip->__seq = tswap16(host_ip->__seq);
3840 #endif
3841     unlock_user_struct(target_sd, target_addr, 1);
3842     return 0;
3843 }
3844 
3845 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3846                                                abi_ulong target_addr)
3847 {
3848     struct target_semid64_ds *target_sd;
3849 
3850     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3851         return -TARGET_EFAULT;
3852     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3853         return -TARGET_EFAULT;
3854     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3855     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3856     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3857     unlock_user_struct(target_sd, target_addr, 0);
3858     return 0;
3859 }
3860 
3861 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3862                                                struct semid_ds *host_sd)
3863 {
3864     struct target_semid64_ds *target_sd;
3865 
3866     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3867         return -TARGET_EFAULT;
3868     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3869         return -TARGET_EFAULT;
3870     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3871     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3872     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3873     unlock_user_struct(target_sd, target_addr, 1);
3874     return 0;
3875 }
3876 
3877 struct target_seminfo {
3878     int semmap;
3879     int semmni;
3880     int semmns;
3881     int semmnu;
3882     int semmsl;
3883     int semopm;
3884     int semume;
3885     int semusz;
3886     int semvmx;
3887     int semaem;
3888 };
3889 
3890 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3891                                               struct seminfo *host_seminfo)
3892 {
3893     struct target_seminfo *target_seminfo;
3894     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3895         return -TARGET_EFAULT;
3896     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3897     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3898     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3899     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3900     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3901     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3902     __put_user(host_seminfo->semume, &target_seminfo->semume);
3903     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3904     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3905     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3906     unlock_user_struct(target_seminfo, target_addr, 1);
3907     return 0;
3908 }
3909 
3910 union semun {
3911 	int val;
3912 	struct semid_ds *buf;
3913 	unsigned short *array;
3914 	struct seminfo *__buf;
3915 };
3916 
3917 union target_semun {
3918 	int val;
3919 	abi_ulong buf;
3920 	abi_ulong array;
3921 	abi_ulong __buf;
3922 };
3923 
3924 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3925                                                abi_ulong target_addr)
3926 {
3927     int nsems;
3928     unsigned short *array;
3929     union semun semun;
3930     struct semid_ds semid_ds;
3931     int i, ret;
3932 
3933     semun.buf = &semid_ds;
3934 
3935     ret = semctl(semid, 0, IPC_STAT, semun);
3936     if (ret == -1)
3937         return get_errno(ret);
3938 
3939     nsems = semid_ds.sem_nsems;
3940 
3941     *host_array = g_try_new(unsigned short, nsems);
3942     if (!*host_array) {
3943         return -TARGET_ENOMEM;
3944     }
3945     array = lock_user(VERIFY_READ, target_addr,
3946                       nsems*sizeof(unsigned short), 1);
3947     if (!array) {
3948         g_free(*host_array);
3949         return -TARGET_EFAULT;
3950     }
3951 
3952     for(i=0; i<nsems; i++) {
3953         __get_user((*host_array)[i], &array[i]);
3954     }
3955     unlock_user(array, target_addr, 0);
3956 
3957     return 0;
3958 }
3959 
3960 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3961                                                unsigned short **host_array)
3962 {
3963     int nsems;
3964     unsigned short *array;
3965     union semun semun;
3966     struct semid_ds semid_ds;
3967     int i, ret;
3968 
3969     semun.buf = &semid_ds;
3970 
3971     ret = semctl(semid, 0, IPC_STAT, semun);
3972     if (ret == -1)
3973         return get_errno(ret);
3974 
3975     nsems = semid_ds.sem_nsems;
3976 
3977     array = lock_user(VERIFY_WRITE, target_addr,
3978                       nsems*sizeof(unsigned short), 0);
3979     if (!array)
3980         return -TARGET_EFAULT;
3981 
3982     for(i=0; i<nsems; i++) {
3983         __put_user((*host_array)[i], &array[i]);
3984     }
3985     g_free(*host_array);
3986     unlock_user(array, target_addr, 1);
3987 
3988     return 0;
3989 }
3990 
3991 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3992                                  abi_ulong target_arg)
3993 {
3994     union target_semun target_su = { .buf = target_arg };
3995     union semun arg;
3996     struct semid_ds dsarg;
3997     unsigned short *array = NULL;
3998     struct seminfo seminfo;
3999     abi_long ret = -TARGET_EINVAL;
4000     abi_long err;
4001     cmd &= 0xff;
4002 
4003     switch( cmd ) {
4004 	case GETVAL:
4005 	case SETVAL:
4006             /* In 64 bit cross-endian situations, we will erroneously pick up
4007              * the wrong half of the union for the "val" element.  To rectify
4008              * this, the entire 8-byte structure is byteswapped, followed by
4009 	     * a swap of the 4 byte val field. In other cases, the data is
4010 	     * already in proper host byte order. */
4011 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4012 		target_su.buf = tswapal(target_su.buf);
4013 		arg.val = tswap32(target_su.val);
4014 	    } else {
4015 		arg.val = target_su.val;
4016 	    }
4017             ret = get_errno(semctl(semid, semnum, cmd, arg));
4018             break;
4019 	case GETALL:
4020 	case SETALL:
4021             err = target_to_host_semarray(semid, &array, target_su.array);
4022             if (err)
4023                 return err;
4024             arg.array = array;
4025             ret = get_errno(semctl(semid, semnum, cmd, arg));
4026             err = host_to_target_semarray(semid, target_su.array, &array);
4027             if (err)
4028                 return err;
4029             break;
4030 	case IPC_STAT:
4031 	case IPC_SET:
4032 	case SEM_STAT:
4033             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4034             if (err)
4035                 return err;
4036             arg.buf = &dsarg;
4037             ret = get_errno(semctl(semid, semnum, cmd, arg));
4038             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4039             if (err)
4040                 return err;
4041             break;
4042 	case IPC_INFO:
4043 	case SEM_INFO:
4044             arg.__buf = &seminfo;
4045             ret = get_errno(semctl(semid, semnum, cmd, arg));
4046             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4047             if (err)
4048                 return err;
4049             break;
4050 	case IPC_RMID:
4051 	case GETPID:
4052 	case GETNCNT:
4053 	case GETZCNT:
4054             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4055             break;
4056     }
4057 
4058     return ret;
4059 }
4060 
4061 struct target_sembuf {
4062     unsigned short sem_num;
4063     short sem_op;
4064     short sem_flg;
4065 };
4066 
4067 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4068                                              abi_ulong target_addr,
4069                                              unsigned nsops)
4070 {
4071     struct target_sembuf *target_sembuf;
4072     int i;
4073 
4074     target_sembuf = lock_user(VERIFY_READ, target_addr,
4075                               nsops*sizeof(struct target_sembuf), 1);
4076     if (!target_sembuf)
4077         return -TARGET_EFAULT;
4078 
4079     for(i=0; i<nsops; i++) {
4080         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4081         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4082         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4083     }
4084 
4085     unlock_user(target_sembuf, target_addr, 0);
4086 
4087     return 0;
4088 }
4089 
4090 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4091     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4092 
4093 /*
4094  * This macro is required to handle the s390 variants, which passes the
4095  * arguments in a different order than default.
4096  */
4097 #ifdef __s390x__
4098 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4099   (__nsops), (__timeout), (__sops)
4100 #else
4101 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4102   (__nsops), 0, (__sops), (__timeout)
4103 #endif
4104 
4105 static inline abi_long do_semtimedop(int semid,
4106                                      abi_long ptr,
4107                                      unsigned nsops,
4108                                      abi_long timeout, bool time64)
4109 {
4110     struct sembuf *sops;
4111     struct timespec ts, *pts = NULL;
4112     abi_long ret;
4113 
4114     if (timeout) {
4115         pts = &ts;
4116         if (time64) {
4117             if (target_to_host_timespec64(pts, timeout)) {
4118                 return -TARGET_EFAULT;
4119             }
4120         } else {
4121             if (target_to_host_timespec(pts, timeout)) {
4122                 return -TARGET_EFAULT;
4123             }
4124         }
4125     }
4126 
4127     if (nsops > TARGET_SEMOPM) {
4128         return -TARGET_E2BIG;
4129     }
4130 
4131     sops = g_new(struct sembuf, nsops);
4132 
4133     if (target_to_host_sembuf(sops, ptr, nsops)) {
4134         g_free(sops);
4135         return -TARGET_EFAULT;
4136     }
4137 
4138     ret = -TARGET_ENOSYS;
4139 #ifdef __NR_semtimedop
4140     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4141 #endif
4142 #ifdef __NR_ipc
4143     if (ret == -TARGET_ENOSYS) {
4144         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4145                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4146     }
4147 #endif
4148     g_free(sops);
4149     return ret;
4150 }
4151 #endif
4152 
4153 struct target_msqid_ds
4154 {
4155     struct target_ipc_perm msg_perm;
4156     abi_ulong msg_stime;
4157 #if TARGET_ABI_BITS == 32
4158     abi_ulong __unused1;
4159 #endif
4160     abi_ulong msg_rtime;
4161 #if TARGET_ABI_BITS == 32
4162     abi_ulong __unused2;
4163 #endif
4164     abi_ulong msg_ctime;
4165 #if TARGET_ABI_BITS == 32
4166     abi_ulong __unused3;
4167 #endif
4168     abi_ulong __msg_cbytes;
4169     abi_ulong msg_qnum;
4170     abi_ulong msg_qbytes;
4171     abi_ulong msg_lspid;
4172     abi_ulong msg_lrpid;
4173     abi_ulong __unused4;
4174     abi_ulong __unused5;
4175 };
4176 
4177 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4178                                                abi_ulong target_addr)
4179 {
4180     struct target_msqid_ds *target_md;
4181 
4182     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4183         return -TARGET_EFAULT;
4184     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4185         return -TARGET_EFAULT;
4186     host_md->msg_stime = tswapal(target_md->msg_stime);
4187     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4188     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4189     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4190     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4191     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4192     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4193     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4194     unlock_user_struct(target_md, target_addr, 0);
4195     return 0;
4196 }
4197 
4198 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4199                                                struct msqid_ds *host_md)
4200 {
4201     struct target_msqid_ds *target_md;
4202 
4203     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4204         return -TARGET_EFAULT;
4205     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4206         return -TARGET_EFAULT;
4207     target_md->msg_stime = tswapal(host_md->msg_stime);
4208     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4209     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4210     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4211     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4212     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4213     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4214     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4215     unlock_user_struct(target_md, target_addr, 1);
4216     return 0;
4217 }
4218 
4219 struct target_msginfo {
4220     int msgpool;
4221     int msgmap;
4222     int msgmax;
4223     int msgmnb;
4224     int msgmni;
4225     int msgssz;
4226     int msgtql;
4227     unsigned short int msgseg;
4228 };
4229 
4230 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4231                                               struct msginfo *host_msginfo)
4232 {
4233     struct target_msginfo *target_msginfo;
4234     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4235         return -TARGET_EFAULT;
4236     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4237     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4238     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4239     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4240     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4241     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4242     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4243     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4244     unlock_user_struct(target_msginfo, target_addr, 1);
4245     return 0;
4246 }
4247 
4248 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4249 {
4250     struct msqid_ds dsarg;
4251     struct msginfo msginfo;
4252     abi_long ret = -TARGET_EINVAL;
4253 
4254     cmd &= 0xff;
4255 
4256     switch (cmd) {
4257     case IPC_STAT:
4258     case IPC_SET:
4259     case MSG_STAT:
4260         if (target_to_host_msqid_ds(&dsarg,ptr))
4261             return -TARGET_EFAULT;
4262         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4263         if (host_to_target_msqid_ds(ptr,&dsarg))
4264             return -TARGET_EFAULT;
4265         break;
4266     case IPC_RMID:
4267         ret = get_errno(msgctl(msgid, cmd, NULL));
4268         break;
4269     case IPC_INFO:
4270     case MSG_INFO:
4271         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4272         if (host_to_target_msginfo(ptr, &msginfo))
4273             return -TARGET_EFAULT;
4274         break;
4275     }
4276 
4277     return ret;
4278 }
4279 
4280 struct target_msgbuf {
4281     abi_long mtype;
4282     char	mtext[1];
4283 };
4284 
4285 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4286                                  ssize_t msgsz, int msgflg)
4287 {
4288     struct target_msgbuf *target_mb;
4289     struct msgbuf *host_mb;
4290     abi_long ret = 0;
4291 
4292     if (msgsz < 0) {
4293         return -TARGET_EINVAL;
4294     }
4295 
4296     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4297         return -TARGET_EFAULT;
4298     host_mb = g_try_malloc(msgsz + sizeof(long));
4299     if (!host_mb) {
4300         unlock_user_struct(target_mb, msgp, 0);
4301         return -TARGET_ENOMEM;
4302     }
4303     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4304     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4305     ret = -TARGET_ENOSYS;
4306 #ifdef __NR_msgsnd
4307     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4308 #endif
4309 #ifdef __NR_ipc
4310     if (ret == -TARGET_ENOSYS) {
4311 #ifdef __s390x__
4312         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4313                                  host_mb));
4314 #else
4315         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4316                                  host_mb, 0));
4317 #endif
4318     }
4319 #endif
4320     g_free(host_mb);
4321     unlock_user_struct(target_mb, msgp, 0);
4322 
4323     return ret;
4324 }
4325 
4326 #ifdef __NR_ipc
4327 #if defined(__sparc__)
4328 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4329 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4330 #elif defined(__s390x__)
4331 /* The s390 sys_ipc variant has only five parameters.  */
4332 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4333     ((long int[]){(long int)__msgp, __msgtyp})
4334 #else
4335 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4336     ((long int[]){(long int)__msgp, __msgtyp}), 0
4337 #endif
4338 #endif
4339 
4340 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4341                                  ssize_t msgsz, abi_long msgtyp,
4342                                  int msgflg)
4343 {
4344     struct target_msgbuf *target_mb;
4345     char *target_mtext;
4346     struct msgbuf *host_mb;
4347     abi_long ret = 0;
4348 
4349     if (msgsz < 0) {
4350         return -TARGET_EINVAL;
4351     }
4352 
4353     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4354         return -TARGET_EFAULT;
4355 
4356     host_mb = g_try_malloc(msgsz + sizeof(long));
4357     if (!host_mb) {
4358         ret = -TARGET_ENOMEM;
4359         goto end;
4360     }
4361     ret = -TARGET_ENOSYS;
4362 #ifdef __NR_msgrcv
4363     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4364 #endif
4365 #ifdef __NR_ipc
4366     if (ret == -TARGET_ENOSYS) {
4367         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4368                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4369     }
4370 #endif
4371 
4372     if (ret > 0) {
4373         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4374         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4375         if (!target_mtext) {
4376             ret = -TARGET_EFAULT;
4377             goto end;
4378         }
4379         memcpy(target_mb->mtext, host_mb->mtext, ret);
4380         unlock_user(target_mtext, target_mtext_addr, ret);
4381     }
4382 
4383     target_mb->mtype = tswapal(host_mb->mtype);
4384 
4385 end:
4386     if (target_mb)
4387         unlock_user_struct(target_mb, msgp, 1);
4388     g_free(host_mb);
4389     return ret;
4390 }
4391 
4392 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4393                                                abi_ulong target_addr)
4394 {
4395     struct target_shmid_ds *target_sd;
4396 
4397     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4398         return -TARGET_EFAULT;
4399     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4400         return -TARGET_EFAULT;
4401     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4402     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4403     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4404     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4405     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4406     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4407     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4408     unlock_user_struct(target_sd, target_addr, 0);
4409     return 0;
4410 }
4411 
4412 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4413                                                struct shmid_ds *host_sd)
4414 {
4415     struct target_shmid_ds *target_sd;
4416 
4417     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4418         return -TARGET_EFAULT;
4419     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4420         return -TARGET_EFAULT;
4421     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4422     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4423     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4424     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4425     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4426     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4427     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4428     unlock_user_struct(target_sd, target_addr, 1);
4429     return 0;
4430 }
4431 
4432 struct  target_shminfo {
4433     abi_ulong shmmax;
4434     abi_ulong shmmin;
4435     abi_ulong shmmni;
4436     abi_ulong shmseg;
4437     abi_ulong shmall;
4438 };
4439 
4440 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4441                                               struct shminfo *host_shminfo)
4442 {
4443     struct target_shminfo *target_shminfo;
4444     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4445         return -TARGET_EFAULT;
4446     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4447     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4448     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4449     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4450     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4451     unlock_user_struct(target_shminfo, target_addr, 1);
4452     return 0;
4453 }
4454 
4455 struct target_shm_info {
4456     int used_ids;
4457     abi_ulong shm_tot;
4458     abi_ulong shm_rss;
4459     abi_ulong shm_swp;
4460     abi_ulong swap_attempts;
4461     abi_ulong swap_successes;
4462 };
4463 
4464 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4465                                                struct shm_info *host_shm_info)
4466 {
4467     struct target_shm_info *target_shm_info;
4468     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4469         return -TARGET_EFAULT;
4470     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4471     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4472     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4473     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4474     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4475     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4476     unlock_user_struct(target_shm_info, target_addr, 1);
4477     return 0;
4478 }
4479 
4480 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4481 {
4482     struct shmid_ds dsarg;
4483     struct shminfo shminfo;
4484     struct shm_info shm_info;
4485     abi_long ret = -TARGET_EINVAL;
4486 
4487     cmd &= 0xff;
4488 
4489     switch(cmd) {
4490     case IPC_STAT:
4491     case IPC_SET:
4492     case SHM_STAT:
4493         if (target_to_host_shmid_ds(&dsarg, buf))
4494             return -TARGET_EFAULT;
4495         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4496         if (host_to_target_shmid_ds(buf, &dsarg))
4497             return -TARGET_EFAULT;
4498         break;
4499     case IPC_INFO:
4500         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4501         if (host_to_target_shminfo(buf, &shminfo))
4502             return -TARGET_EFAULT;
4503         break;
4504     case SHM_INFO:
4505         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4506         if (host_to_target_shm_info(buf, &shm_info))
4507             return -TARGET_EFAULT;
4508         break;
4509     case IPC_RMID:
4510     case SHM_LOCK:
4511     case SHM_UNLOCK:
4512         ret = get_errno(shmctl(shmid, cmd, NULL));
4513         break;
4514     }
4515 
4516     return ret;
4517 }
4518 
4519 #ifdef TARGET_NR_ipc
4520 /* ??? This only works with linear mappings.  */
4521 /* do_ipc() must return target values and target errnos. */
4522 static abi_long do_ipc(CPUArchState *cpu_env,
4523                        unsigned int call, abi_long first,
4524                        abi_long second, abi_long third,
4525                        abi_long ptr, abi_long fifth)
4526 {
4527     int version;
4528     abi_long ret = 0;
4529 
4530     version = call >> 16;
4531     call &= 0xffff;
4532 
4533     switch (call) {
4534     case IPCOP_semop:
4535         ret = do_semtimedop(first, ptr, second, 0, false);
4536         break;
4537     case IPCOP_semtimedop:
4538     /*
4539      * The s390 sys_ipc variant has only five parameters instead of six
4540      * (as for default variant) and the only difference is the handling of
4541      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4542      * to a struct timespec where the generic variant uses fifth parameter.
4543      */
4544 #if defined(TARGET_S390X)
4545         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4546 #else
4547         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4548 #endif
4549         break;
4550 
4551     case IPCOP_semget:
4552         ret = get_errno(semget(first, second, third));
4553         break;
4554 
4555     case IPCOP_semctl: {
4556         /* The semun argument to semctl is passed by value, so dereference the
4557          * ptr argument. */
4558         abi_ulong atptr;
4559         get_user_ual(atptr, ptr);
4560         ret = do_semctl(first, second, third, atptr);
4561         break;
4562     }
4563 
4564     case IPCOP_msgget:
4565         ret = get_errno(msgget(first, second));
4566         break;
4567 
4568     case IPCOP_msgsnd:
4569         ret = do_msgsnd(first, ptr, second, third);
4570         break;
4571 
4572     case IPCOP_msgctl:
4573         ret = do_msgctl(first, second, ptr);
4574         break;
4575 
4576     case IPCOP_msgrcv:
4577         switch (version) {
4578         case 0:
4579             {
4580                 struct target_ipc_kludge {
4581                     abi_long msgp;
4582                     abi_long msgtyp;
4583                 } *tmp;
4584 
4585                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4586                     ret = -TARGET_EFAULT;
4587                     break;
4588                 }
4589 
4590                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4591 
4592                 unlock_user_struct(tmp, ptr, 0);
4593                 break;
4594             }
4595         default:
4596             ret = do_msgrcv(first, ptr, second, fifth, third);
4597         }
4598         break;
4599 
4600     case IPCOP_shmat:
4601         switch (version) {
4602         default:
4603         {
4604             abi_ulong raddr;
4605             raddr = target_shmat(cpu_env, first, ptr, second);
4606             if (is_error(raddr))
4607                 return get_errno(raddr);
4608             if (put_user_ual(raddr, third))
4609                 return -TARGET_EFAULT;
4610             break;
4611         }
4612         case 1:
4613             ret = -TARGET_EINVAL;
4614             break;
4615         }
4616 	break;
4617     case IPCOP_shmdt:
4618         ret = target_shmdt(ptr);
4619 	break;
4620 
4621     case IPCOP_shmget:
4622 	/* IPC_* flag values are the same on all linux platforms */
4623 	ret = get_errno(shmget(first, second, third));
4624 	break;
4625 
4626 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4627     case IPCOP_shmctl:
4628         ret = do_shmctl(first, second, ptr);
4629         break;
4630     default:
4631         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4632                       call, version);
4633 	ret = -TARGET_ENOSYS;
4634 	break;
4635     }
4636     return ret;
4637 }
4638 #endif
4639 
4640 /* kernel structure types definitions */
4641 
4642 #define STRUCT(name, ...) STRUCT_ ## name,
4643 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4644 enum {
4645 #include "syscall_types.h"
4646 STRUCT_MAX
4647 };
4648 #undef STRUCT
4649 #undef STRUCT_SPECIAL
4650 
4651 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4652 #define STRUCT_SPECIAL(name)
4653 #include "syscall_types.h"
4654 #undef STRUCT
4655 #undef STRUCT_SPECIAL
4656 
4657 #define MAX_STRUCT_SIZE 4096
4658 
4659 #ifdef CONFIG_FIEMAP
4660 /* So fiemap access checks don't overflow on 32 bit systems.
4661  * This is very slightly smaller than the limit imposed by
4662  * the underlying kernel.
4663  */
4664 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4665                             / sizeof(struct fiemap_extent))
4666 
4667 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4668                                        int fd, int cmd, abi_long arg)
4669 {
4670     /* The parameter for this ioctl is a struct fiemap followed
4671      * by an array of struct fiemap_extent whose size is set
4672      * in fiemap->fm_extent_count. The array is filled in by the
4673      * ioctl.
4674      */
4675     int target_size_in, target_size_out;
4676     struct fiemap *fm;
4677     const argtype *arg_type = ie->arg_type;
4678     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4679     void *argptr, *p;
4680     abi_long ret;
4681     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4682     uint32_t outbufsz;
4683     int free_fm = 0;
4684 
4685     assert(arg_type[0] == TYPE_PTR);
4686     assert(ie->access == IOC_RW);
4687     arg_type++;
4688     target_size_in = thunk_type_size(arg_type, 0);
4689     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4690     if (!argptr) {
4691         return -TARGET_EFAULT;
4692     }
4693     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4694     unlock_user(argptr, arg, 0);
4695     fm = (struct fiemap *)buf_temp;
4696     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4697         return -TARGET_EINVAL;
4698     }
4699 
4700     outbufsz = sizeof (*fm) +
4701         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4702 
4703     if (outbufsz > MAX_STRUCT_SIZE) {
4704         /* We can't fit all the extents into the fixed size buffer.
4705          * Allocate one that is large enough and use it instead.
4706          */
4707         fm = g_try_malloc(outbufsz);
4708         if (!fm) {
4709             return -TARGET_ENOMEM;
4710         }
4711         memcpy(fm, buf_temp, sizeof(struct fiemap));
4712         free_fm = 1;
4713     }
4714     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4715     if (!is_error(ret)) {
4716         target_size_out = target_size_in;
4717         /* An extent_count of 0 means we were only counting the extents
4718          * so there are no structs to copy
4719          */
4720         if (fm->fm_extent_count != 0) {
4721             target_size_out += fm->fm_mapped_extents * extent_size;
4722         }
4723         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4724         if (!argptr) {
4725             ret = -TARGET_EFAULT;
4726         } else {
4727             /* Convert the struct fiemap */
4728             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4729             if (fm->fm_extent_count != 0) {
4730                 p = argptr + target_size_in;
4731                 /* ...and then all the struct fiemap_extents */
4732                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4733                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4734                                   THUNK_TARGET);
4735                     p += extent_size;
4736                 }
4737             }
4738             unlock_user(argptr, arg, target_size_out);
4739         }
4740     }
4741     if (free_fm) {
4742         g_free(fm);
4743     }
4744     return ret;
4745 }
4746 #endif
4747 
4748 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4749                                 int fd, int cmd, abi_long arg)
4750 {
4751     const argtype *arg_type = ie->arg_type;
4752     int target_size;
4753     void *argptr;
4754     int ret;
4755     struct ifconf *host_ifconf;
4756     uint32_t outbufsz;
4757     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4758     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4759     int target_ifreq_size;
4760     int nb_ifreq;
4761     int free_buf = 0;
4762     int i;
4763     int target_ifc_len;
4764     abi_long target_ifc_buf;
4765     int host_ifc_len;
4766     char *host_ifc_buf;
4767 
4768     assert(arg_type[0] == TYPE_PTR);
4769     assert(ie->access == IOC_RW);
4770 
4771     arg_type++;
4772     target_size = thunk_type_size(arg_type, 0);
4773 
4774     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4775     if (!argptr)
4776         return -TARGET_EFAULT;
4777     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4778     unlock_user(argptr, arg, 0);
4779 
4780     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4781     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4782     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4783 
4784     if (target_ifc_buf != 0) {
4785         target_ifc_len = host_ifconf->ifc_len;
4786         nb_ifreq = target_ifc_len / target_ifreq_size;
4787         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4788 
4789         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4790         if (outbufsz > MAX_STRUCT_SIZE) {
4791             /*
4792              * We can't fit all the extents into the fixed size buffer.
4793              * Allocate one that is large enough and use it instead.
4794              */
4795             host_ifconf = g_try_malloc(outbufsz);
4796             if (!host_ifconf) {
4797                 return -TARGET_ENOMEM;
4798             }
4799             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4800             free_buf = 1;
4801         }
4802         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4803 
4804         host_ifconf->ifc_len = host_ifc_len;
4805     } else {
4806       host_ifc_buf = NULL;
4807     }
4808     host_ifconf->ifc_buf = host_ifc_buf;
4809 
4810     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4811     if (!is_error(ret)) {
4812 	/* convert host ifc_len to target ifc_len */
4813 
4814         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4815         target_ifc_len = nb_ifreq * target_ifreq_size;
4816         host_ifconf->ifc_len = target_ifc_len;
4817 
4818 	/* restore target ifc_buf */
4819 
4820         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4821 
4822 	/* copy struct ifconf to target user */
4823 
4824         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4825         if (!argptr)
4826             return -TARGET_EFAULT;
4827         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4828         unlock_user(argptr, arg, target_size);
4829 
4830         if (target_ifc_buf != 0) {
4831             /* copy ifreq[] to target user */
4832             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4833             for (i = 0; i < nb_ifreq ; i++) {
4834                 thunk_convert(argptr + i * target_ifreq_size,
4835                               host_ifc_buf + i * sizeof(struct ifreq),
4836                               ifreq_arg_type, THUNK_TARGET);
4837             }
4838             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4839         }
4840     }
4841 
4842     if (free_buf) {
4843         g_free(host_ifconf);
4844     }
4845 
4846     return ret;
4847 }
4848 
4849 #if defined(CONFIG_USBFS)
4850 #if HOST_LONG_BITS > 64
4851 #error USBDEVFS thunks do not support >64 bit hosts yet.
4852 #endif
4853 struct live_urb {
4854     uint64_t target_urb_adr;
4855     uint64_t target_buf_adr;
4856     char *target_buf_ptr;
4857     struct usbdevfs_urb host_urb;
4858 };
4859 
4860 static GHashTable *usbdevfs_urb_hashtable(void)
4861 {
4862     static GHashTable *urb_hashtable;
4863 
4864     if (!urb_hashtable) {
4865         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4866     }
4867     return urb_hashtable;
4868 }
4869 
4870 static void urb_hashtable_insert(struct live_urb *urb)
4871 {
4872     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4873     g_hash_table_insert(urb_hashtable, urb, urb);
4874 }
4875 
4876 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4877 {
4878     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4879     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4880 }
4881 
4882 static void urb_hashtable_remove(struct live_urb *urb)
4883 {
4884     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4885     g_hash_table_remove(urb_hashtable, urb);
4886 }
4887 
4888 static abi_long
4889 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4890                           int fd, int cmd, abi_long arg)
4891 {
4892     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4893     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4894     struct live_urb *lurb;
4895     void *argptr;
4896     uint64_t hurb;
4897     int target_size;
4898     uintptr_t target_urb_adr;
4899     abi_long ret;
4900 
4901     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4902 
4903     memset(buf_temp, 0, sizeof(uint64_t));
4904     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4905     if (is_error(ret)) {
4906         return ret;
4907     }
4908 
4909     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4910     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4911     if (!lurb->target_urb_adr) {
4912         return -TARGET_EFAULT;
4913     }
4914     urb_hashtable_remove(lurb);
4915     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4916         lurb->host_urb.buffer_length);
4917     lurb->target_buf_ptr = NULL;
4918 
4919     /* restore the guest buffer pointer */
4920     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4921 
4922     /* update the guest urb struct */
4923     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4924     if (!argptr) {
4925         g_free(lurb);
4926         return -TARGET_EFAULT;
4927     }
4928     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4929     unlock_user(argptr, lurb->target_urb_adr, target_size);
4930 
4931     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4932     /* write back the urb handle */
4933     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4934     if (!argptr) {
4935         g_free(lurb);
4936         return -TARGET_EFAULT;
4937     }
4938 
4939     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4940     target_urb_adr = lurb->target_urb_adr;
4941     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4942     unlock_user(argptr, arg, target_size);
4943 
4944     g_free(lurb);
4945     return ret;
4946 }
4947 
4948 static abi_long
4949 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4950                              uint8_t *buf_temp __attribute__((unused)),
4951                              int fd, int cmd, abi_long arg)
4952 {
4953     struct live_urb *lurb;
4954 
4955     /* map target address back to host URB with metadata. */
4956     lurb = urb_hashtable_lookup(arg);
4957     if (!lurb) {
4958         return -TARGET_EFAULT;
4959     }
4960     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4961 }
4962 
4963 static abi_long
4964 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4965                             int fd, int cmd, abi_long arg)
4966 {
4967     const argtype *arg_type = ie->arg_type;
4968     int target_size;
4969     abi_long ret;
4970     void *argptr;
4971     int rw_dir;
4972     struct live_urb *lurb;
4973 
4974     /*
4975      * each submitted URB needs to map to a unique ID for the
4976      * kernel, and that unique ID needs to be a pointer to
4977      * host memory.  hence, we need to malloc for each URB.
4978      * isochronous transfers have a variable length struct.
4979      */
4980     arg_type++;
4981     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4982 
4983     /* construct host copy of urb and metadata */
4984     lurb = g_try_new0(struct live_urb, 1);
4985     if (!lurb) {
4986         return -TARGET_ENOMEM;
4987     }
4988 
4989     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4990     if (!argptr) {
4991         g_free(lurb);
4992         return -TARGET_EFAULT;
4993     }
4994     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4995     unlock_user(argptr, arg, 0);
4996 
4997     lurb->target_urb_adr = arg;
4998     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4999 
5000     /* buffer space used depends on endpoint type so lock the entire buffer */
5001     /* control type urbs should check the buffer contents for true direction */
5002     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5003     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5004         lurb->host_urb.buffer_length, 1);
5005     if (lurb->target_buf_ptr == NULL) {
5006         g_free(lurb);
5007         return -TARGET_EFAULT;
5008     }
5009 
5010     /* update buffer pointer in host copy */
5011     lurb->host_urb.buffer = lurb->target_buf_ptr;
5012 
5013     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5014     if (is_error(ret)) {
5015         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5016         g_free(lurb);
5017     } else {
5018         urb_hashtable_insert(lurb);
5019     }
5020 
5021     return ret;
5022 }
5023 #endif /* CONFIG_USBFS */
5024 
5025 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5026                             int cmd, abi_long arg)
5027 {
5028     void *argptr;
5029     struct dm_ioctl *host_dm;
5030     abi_long guest_data;
5031     uint32_t guest_data_size;
5032     int target_size;
5033     const argtype *arg_type = ie->arg_type;
5034     abi_long ret;
5035     void *big_buf = NULL;
5036     char *host_data;
5037 
5038     arg_type++;
5039     target_size = thunk_type_size(arg_type, 0);
5040     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5041     if (!argptr) {
5042         ret = -TARGET_EFAULT;
5043         goto out;
5044     }
5045     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5046     unlock_user(argptr, arg, 0);
5047 
5048     /* buf_temp is too small, so fetch things into a bigger buffer */
5049     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5050     memcpy(big_buf, buf_temp, target_size);
5051     buf_temp = big_buf;
5052     host_dm = big_buf;
5053 
5054     guest_data = arg + host_dm->data_start;
5055     if ((guest_data - arg) < 0) {
5056         ret = -TARGET_EINVAL;
5057         goto out;
5058     }
5059     guest_data_size = host_dm->data_size - host_dm->data_start;
5060     host_data = (char*)host_dm + host_dm->data_start;
5061 
5062     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5063     if (!argptr) {
5064         ret = -TARGET_EFAULT;
5065         goto out;
5066     }
5067 
5068     switch (ie->host_cmd) {
5069     case DM_REMOVE_ALL:
5070     case DM_LIST_DEVICES:
5071     case DM_DEV_CREATE:
5072     case DM_DEV_REMOVE:
5073     case DM_DEV_SUSPEND:
5074     case DM_DEV_STATUS:
5075     case DM_DEV_WAIT:
5076     case DM_TABLE_STATUS:
5077     case DM_TABLE_CLEAR:
5078     case DM_TABLE_DEPS:
5079     case DM_LIST_VERSIONS:
5080         /* no input data */
5081         break;
5082     case DM_DEV_RENAME:
5083     case DM_DEV_SET_GEOMETRY:
5084         /* data contains only strings */
5085         memcpy(host_data, argptr, guest_data_size);
5086         break;
5087     case DM_TARGET_MSG:
5088         memcpy(host_data, argptr, guest_data_size);
5089         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5090         break;
5091     case DM_TABLE_LOAD:
5092     {
5093         void *gspec = argptr;
5094         void *cur_data = host_data;
5095         const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5096         int spec_size = thunk_type_size(dm_arg_type, 0);
5097         int i;
5098 
5099         for (i = 0; i < host_dm->target_count; i++) {
5100             struct dm_target_spec *spec = cur_data;
5101             uint32_t next;
5102             int slen;
5103 
5104             thunk_convert(spec, gspec, dm_arg_type, THUNK_HOST);
5105             slen = strlen((char*)gspec + spec_size) + 1;
5106             next = spec->next;
5107             spec->next = sizeof(*spec) + slen;
5108             strcpy((char*)&spec[1], gspec + spec_size);
5109             gspec += next;
5110             cur_data += spec->next;
5111         }
5112         break;
5113     }
5114     default:
5115         ret = -TARGET_EINVAL;
5116         unlock_user(argptr, guest_data, 0);
5117         goto out;
5118     }
5119     unlock_user(argptr, guest_data, 0);
5120 
5121     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5122     if (!is_error(ret)) {
5123         guest_data = arg + host_dm->data_start;
5124         guest_data_size = host_dm->data_size - host_dm->data_start;
5125         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5126         switch (ie->host_cmd) {
5127         case DM_REMOVE_ALL:
5128         case DM_DEV_CREATE:
5129         case DM_DEV_REMOVE:
5130         case DM_DEV_RENAME:
5131         case DM_DEV_SUSPEND:
5132         case DM_DEV_STATUS:
5133         case DM_TABLE_LOAD:
5134         case DM_TABLE_CLEAR:
5135         case DM_TARGET_MSG:
5136         case DM_DEV_SET_GEOMETRY:
5137             /* no return data */
5138             break;
5139         case DM_LIST_DEVICES:
5140         {
5141             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5142             uint32_t remaining_data = guest_data_size;
5143             void *cur_data = argptr;
5144             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5145             int nl_size = 12; /* can't use thunk_size due to alignment */
5146 
5147             while (1) {
5148                 uint32_t next = nl->next;
5149                 if (next) {
5150                     nl->next = nl_size + (strlen(nl->name) + 1);
5151                 }
5152                 if (remaining_data < nl->next) {
5153                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5154                     break;
5155                 }
5156                 thunk_convert(cur_data, nl, dm_arg_type, THUNK_TARGET);
5157                 strcpy(cur_data + nl_size, nl->name);
5158                 cur_data += nl->next;
5159                 remaining_data -= nl->next;
5160                 if (!next) {
5161                     break;
5162                 }
5163                 nl = (void*)nl + next;
5164             }
5165             break;
5166         }
5167         case DM_DEV_WAIT:
5168         case DM_TABLE_STATUS:
5169         {
5170             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5171             void *cur_data = argptr;
5172             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5173             int spec_size = thunk_type_size(dm_arg_type, 0);
5174             int i;
5175 
5176             for (i = 0; i < host_dm->target_count; i++) {
5177                 uint32_t next = spec->next;
5178                 int slen = strlen((char*)&spec[1]) + 1;
5179                 spec->next = (cur_data - argptr) + spec_size + slen;
5180                 if (guest_data_size < spec->next) {
5181                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5182                     break;
5183                 }
5184                 thunk_convert(cur_data, spec, dm_arg_type, THUNK_TARGET);
5185                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5186                 cur_data = argptr + spec->next;
5187                 spec = (void*)host_dm + host_dm->data_start + next;
5188             }
5189             break;
5190         }
5191         case DM_TABLE_DEPS:
5192         {
5193             void *hdata = (void*)host_dm + host_dm->data_start;
5194             int count = *(uint32_t*)hdata;
5195             uint64_t *hdev = hdata + 8;
5196             uint64_t *gdev = argptr + 8;
5197             int i;
5198 
5199             *(uint32_t*)argptr = tswap32(count);
5200             for (i = 0; i < count; i++) {
5201                 *gdev = tswap64(*hdev);
5202                 gdev++;
5203                 hdev++;
5204             }
5205             break;
5206         }
5207         case DM_LIST_VERSIONS:
5208         {
5209             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5210             uint32_t remaining_data = guest_data_size;
5211             void *cur_data = argptr;
5212             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5213             int vers_size = thunk_type_size(dm_arg_type, 0);
5214 
5215             while (1) {
5216                 uint32_t next = vers->next;
5217                 if (next) {
5218                     vers->next = vers_size + (strlen(vers->name) + 1);
5219                 }
5220                 if (remaining_data < vers->next) {
5221                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5222                     break;
5223                 }
5224                 thunk_convert(cur_data, vers, dm_arg_type, THUNK_TARGET);
5225                 strcpy(cur_data + vers_size, vers->name);
5226                 cur_data += vers->next;
5227                 remaining_data -= vers->next;
5228                 if (!next) {
5229                     break;
5230                 }
5231                 vers = (void*)vers + next;
5232             }
5233             break;
5234         }
5235         default:
5236             unlock_user(argptr, guest_data, 0);
5237             ret = -TARGET_EINVAL;
5238             goto out;
5239         }
5240         unlock_user(argptr, guest_data, guest_data_size);
5241 
5242         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5243         if (!argptr) {
5244             ret = -TARGET_EFAULT;
5245             goto out;
5246         }
5247         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5248         unlock_user(argptr, arg, target_size);
5249     }
5250 out:
5251     g_free(big_buf);
5252     return ret;
5253 }
5254 
5255 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5256                                int cmd, abi_long arg)
5257 {
5258     void *argptr;
5259     int target_size;
5260     const argtype *arg_type = ie->arg_type;
5261     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5262     abi_long ret;
5263 
5264     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5265     struct blkpg_partition host_part;
5266 
5267     /* Read and convert blkpg */
5268     arg_type++;
5269     target_size = thunk_type_size(arg_type, 0);
5270     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5271     if (!argptr) {
5272         ret = -TARGET_EFAULT;
5273         goto out;
5274     }
5275     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5276     unlock_user(argptr, arg, 0);
5277 
5278     switch (host_blkpg->op) {
5279     case BLKPG_ADD_PARTITION:
5280     case BLKPG_DEL_PARTITION:
5281         /* payload is struct blkpg_partition */
5282         break;
5283     default:
5284         /* Unknown opcode */
5285         ret = -TARGET_EINVAL;
5286         goto out;
5287     }
5288 
5289     /* Read and convert blkpg->data */
5290     arg = (abi_long)(uintptr_t)host_blkpg->data;
5291     target_size = thunk_type_size(part_arg_type, 0);
5292     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5293     if (!argptr) {
5294         ret = -TARGET_EFAULT;
5295         goto out;
5296     }
5297     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5298     unlock_user(argptr, arg, 0);
5299 
5300     /* Swizzle the data pointer to our local copy and call! */
5301     host_blkpg->data = &host_part;
5302     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5303 
5304 out:
5305     return ret;
5306 }
5307 
5308 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5309                                 int fd, int cmd, abi_long arg)
5310 {
5311     const argtype *arg_type = ie->arg_type;
5312     const StructEntry *se;
5313     const argtype *field_types;
5314     const int *dst_offsets, *src_offsets;
5315     int target_size;
5316     void *argptr;
5317     abi_ulong *target_rt_dev_ptr = NULL;
5318     unsigned long *host_rt_dev_ptr = NULL;
5319     abi_long ret;
5320     int i;
5321 
5322     assert(ie->access == IOC_W);
5323     assert(*arg_type == TYPE_PTR);
5324     arg_type++;
5325     assert(*arg_type == TYPE_STRUCT);
5326     target_size = thunk_type_size(arg_type, 0);
5327     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5328     if (!argptr) {
5329         return -TARGET_EFAULT;
5330     }
5331     arg_type++;
5332     assert(*arg_type == (int)STRUCT_rtentry);
5333     se = struct_entries + *arg_type++;
5334     assert(se->convert[0] == NULL);
5335     /* convert struct here to be able to catch rt_dev string */
5336     field_types = se->field_types;
5337     dst_offsets = se->field_offsets[THUNK_HOST];
5338     src_offsets = se->field_offsets[THUNK_TARGET];
5339     for (i = 0; i < se->nb_fields; i++) {
5340         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5341             assert(*field_types == TYPE_PTRVOID);
5342             target_rt_dev_ptr = argptr + src_offsets[i];
5343             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5344             if (*target_rt_dev_ptr != 0) {
5345                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5346                                                   tswapal(*target_rt_dev_ptr));
5347                 if (!*host_rt_dev_ptr) {
5348                     unlock_user(argptr, arg, 0);
5349                     return -TARGET_EFAULT;
5350                 }
5351             } else {
5352                 *host_rt_dev_ptr = 0;
5353             }
5354             field_types++;
5355             continue;
5356         }
5357         field_types = thunk_convert(buf_temp + dst_offsets[i],
5358                                     argptr + src_offsets[i],
5359                                     field_types, THUNK_HOST);
5360     }
5361     unlock_user(argptr, arg, 0);
5362 
5363     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5364 
5365     assert(host_rt_dev_ptr != NULL);
5366     assert(target_rt_dev_ptr != NULL);
5367     if (*host_rt_dev_ptr != 0) {
5368         unlock_user((void *)*host_rt_dev_ptr,
5369                     *target_rt_dev_ptr, 0);
5370     }
5371     return ret;
5372 }
5373 
5374 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5375                                      int fd, int cmd, abi_long arg)
5376 {
5377     int sig = target_to_host_signal(arg);
5378     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5379 }
5380 
5381 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5382                                     int fd, int cmd, abi_long arg)
5383 {
5384     struct timeval tv;
5385     abi_long ret;
5386 
5387     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5388     if (is_error(ret)) {
5389         return ret;
5390     }
5391 
5392     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5393         if (copy_to_user_timeval(arg, &tv)) {
5394             return -TARGET_EFAULT;
5395         }
5396     } else {
5397         if (copy_to_user_timeval64(arg, &tv)) {
5398             return -TARGET_EFAULT;
5399         }
5400     }
5401 
5402     return ret;
5403 }
5404 
5405 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5406                                       int fd, int cmd, abi_long arg)
5407 {
5408     struct timespec ts;
5409     abi_long ret;
5410 
5411     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5412     if (is_error(ret)) {
5413         return ret;
5414     }
5415 
5416     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5417         if (host_to_target_timespec(arg, &ts)) {
5418             return -TARGET_EFAULT;
5419         }
5420     } else{
5421         if (host_to_target_timespec64(arg, &ts)) {
5422             return -TARGET_EFAULT;
5423         }
5424     }
5425 
5426     return ret;
5427 }
5428 
5429 #ifdef TIOCGPTPEER
5430 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5431                                      int fd, int cmd, abi_long arg)
5432 {
5433     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5434     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5435 }
5436 #endif
5437 
5438 #ifdef HAVE_DRM_H
5439 
5440 static void unlock_drm_version(struct drm_version *host_ver,
5441                                struct target_drm_version *target_ver,
5442                                bool copy)
5443 {
5444     unlock_user(host_ver->name, target_ver->name,
5445                                 copy ? host_ver->name_len : 0);
5446     unlock_user(host_ver->date, target_ver->date,
5447                                 copy ? host_ver->date_len : 0);
5448     unlock_user(host_ver->desc, target_ver->desc,
5449                                 copy ? host_ver->desc_len : 0);
5450 }
5451 
5452 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5453                                           struct target_drm_version *target_ver)
5454 {
5455     memset(host_ver, 0, sizeof(*host_ver));
5456 
5457     __get_user(host_ver->name_len, &target_ver->name_len);
5458     if (host_ver->name_len) {
5459         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5460                                    target_ver->name_len, 0);
5461         if (!host_ver->name) {
5462             return -EFAULT;
5463         }
5464     }
5465 
5466     __get_user(host_ver->date_len, &target_ver->date_len);
5467     if (host_ver->date_len) {
5468         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5469                                    target_ver->date_len, 0);
5470         if (!host_ver->date) {
5471             goto err;
5472         }
5473     }
5474 
5475     __get_user(host_ver->desc_len, &target_ver->desc_len);
5476     if (host_ver->desc_len) {
5477         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5478                                    target_ver->desc_len, 0);
5479         if (!host_ver->desc) {
5480             goto err;
5481         }
5482     }
5483 
5484     return 0;
5485 err:
5486     unlock_drm_version(host_ver, target_ver, false);
5487     return -EFAULT;
5488 }
5489 
5490 static inline void host_to_target_drmversion(
5491                                           struct target_drm_version *target_ver,
5492                                           struct drm_version *host_ver)
5493 {
5494     __put_user(host_ver->version_major, &target_ver->version_major);
5495     __put_user(host_ver->version_minor, &target_ver->version_minor);
5496     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5497     __put_user(host_ver->name_len, &target_ver->name_len);
5498     __put_user(host_ver->date_len, &target_ver->date_len);
5499     __put_user(host_ver->desc_len, &target_ver->desc_len);
5500     unlock_drm_version(host_ver, target_ver, true);
5501 }
5502 
5503 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5504                              int fd, int cmd, abi_long arg)
5505 {
5506     struct drm_version *ver;
5507     struct target_drm_version *target_ver;
5508     abi_long ret;
5509 
5510     switch (ie->host_cmd) {
5511     case DRM_IOCTL_VERSION:
5512         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5513             return -TARGET_EFAULT;
5514         }
5515         ver = (struct drm_version *)buf_temp;
5516         ret = target_to_host_drmversion(ver, target_ver);
5517         if (!is_error(ret)) {
5518             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5519             if (is_error(ret)) {
5520                 unlock_drm_version(ver, target_ver, false);
5521             } else {
5522                 host_to_target_drmversion(target_ver, ver);
5523             }
5524         }
5525         unlock_user_struct(target_ver, arg, 0);
5526         return ret;
5527     }
5528     return -TARGET_ENOSYS;
5529 }
5530 
5531 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5532                                            struct drm_i915_getparam *gparam,
5533                                            int fd, abi_long arg)
5534 {
5535     abi_long ret;
5536     int value;
5537     struct target_drm_i915_getparam *target_gparam;
5538 
5539     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5540         return -TARGET_EFAULT;
5541     }
5542 
5543     __get_user(gparam->param, &target_gparam->param);
5544     gparam->value = &value;
5545     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5546     put_user_s32(value, target_gparam->value);
5547 
5548     unlock_user_struct(target_gparam, arg, 0);
5549     return ret;
5550 }
5551 
5552 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5553                                   int fd, int cmd, abi_long arg)
5554 {
5555     switch (ie->host_cmd) {
5556     case DRM_IOCTL_I915_GETPARAM:
5557         return do_ioctl_drm_i915_getparam(ie,
5558                                           (struct drm_i915_getparam *)buf_temp,
5559                                           fd, arg);
5560     default:
5561         return -TARGET_ENOSYS;
5562     }
5563 }
5564 
5565 #endif
5566 
5567 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5568                                         int fd, int cmd, abi_long arg)
5569 {
5570     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5571     struct tun_filter *target_filter;
5572     char *target_addr;
5573 
5574     assert(ie->access == IOC_W);
5575 
5576     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5577     if (!target_filter) {
5578         return -TARGET_EFAULT;
5579     }
5580     filter->flags = tswap16(target_filter->flags);
5581     filter->count = tswap16(target_filter->count);
5582     unlock_user(target_filter, arg, 0);
5583 
5584     if (filter->count) {
5585         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5586             MAX_STRUCT_SIZE) {
5587             return -TARGET_EFAULT;
5588         }
5589 
5590         target_addr = lock_user(VERIFY_READ,
5591                                 arg + offsetof(struct tun_filter, addr),
5592                                 filter->count * ETH_ALEN, 1);
5593         if (!target_addr) {
5594             return -TARGET_EFAULT;
5595         }
5596         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5597         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5598     }
5599 
5600     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5601 }
5602 
5603 IOCTLEntry ioctl_entries[] = {
5604 #define IOCTL(cmd, access, ...) \
5605     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5606 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5607     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5608 #define IOCTL_IGNORE(cmd) \
5609     { TARGET_ ## cmd, 0, #cmd },
5610 #include "ioctls.h"
5611     { 0, 0, },
5612 };
5613 
5614 /* ??? Implement proper locking for ioctls.  */
5615 /* do_ioctl() Must return target values and target errnos. */
5616 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5617 {
5618     const IOCTLEntry *ie;
5619     const argtype *arg_type;
5620     abi_long ret;
5621     uint8_t buf_temp[MAX_STRUCT_SIZE];
5622     int target_size;
5623     void *argptr;
5624 
5625     ie = ioctl_entries;
5626     for(;;) {
5627         if (ie->target_cmd == 0) {
5628             qemu_log_mask(
5629                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5630             return -TARGET_ENOTTY;
5631         }
5632         if (ie->target_cmd == cmd)
5633             break;
5634         ie++;
5635     }
5636     arg_type = ie->arg_type;
5637     if (ie->do_ioctl) {
5638         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5639     } else if (!ie->host_cmd) {
5640         /* Some architectures define BSD ioctls in their headers
5641            that are not implemented in Linux.  */
5642         return -TARGET_ENOTTY;
5643     }
5644 
5645     switch(arg_type[0]) {
5646     case TYPE_NULL:
5647         /* no argument */
5648         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5649         break;
5650     case TYPE_PTRVOID:
5651     case TYPE_INT:
5652     case TYPE_LONG:
5653     case TYPE_ULONG:
5654         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5655         break;
5656     case TYPE_PTR:
5657         arg_type++;
5658         target_size = thunk_type_size(arg_type, 0);
5659         switch(ie->access) {
5660         case IOC_R:
5661             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5662             if (!is_error(ret)) {
5663                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5664                 if (!argptr)
5665                     return -TARGET_EFAULT;
5666                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5667                 unlock_user(argptr, arg, target_size);
5668             }
5669             break;
5670         case IOC_W:
5671             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5672             if (!argptr)
5673                 return -TARGET_EFAULT;
5674             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5675             unlock_user(argptr, arg, 0);
5676             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5677             break;
5678         default:
5679         case IOC_RW:
5680             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5681             if (!argptr)
5682                 return -TARGET_EFAULT;
5683             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5684             unlock_user(argptr, arg, 0);
5685             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5686             if (!is_error(ret)) {
5687                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5688                 if (!argptr)
5689                     return -TARGET_EFAULT;
5690                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5691                 unlock_user(argptr, arg, target_size);
5692             }
5693             break;
5694         }
5695         break;
5696     default:
5697         qemu_log_mask(LOG_UNIMP,
5698                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5699                       (long)cmd, arg_type[0]);
5700         ret = -TARGET_ENOTTY;
5701         break;
5702     }
5703     return ret;
5704 }
5705 
5706 static const bitmask_transtbl iflag_tbl[] = {
5707         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5708         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5709         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5710         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5711         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5712         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5713         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5714         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5715         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5716         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5717         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5718         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5719         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5720         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5721         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5722 };
5723 
5724 static const bitmask_transtbl oflag_tbl[] = {
5725 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5726 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5727 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5728 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5729 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5730 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5731 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5732 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5733 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5734 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5735 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5736 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5737 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5738 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5739 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5740 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5741 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5742 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5743 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5744 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5745 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5746 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5747 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5748 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5749 };
5750 
5751 static const bitmask_transtbl cflag_tbl[] = {
5752 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5753 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5754 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5755 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5756 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5757 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5758 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5759 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5760 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5761 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5762 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5763 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5764 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5765 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5766 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5767 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5768 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5769 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5770 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5771 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5772 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5773 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5774 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5775 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5776 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5777 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5778 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5779 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5780 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5781 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5782 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5783 };
5784 
5785 static const bitmask_transtbl lflag_tbl[] = {
5786   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5787   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5788   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5789   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5790   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5791   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5792   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5793   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5794   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5795   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5796   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5797   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5798   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5799   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5800   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5801   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5802 };
5803 
5804 static void target_to_host_termios (void *dst, const void *src)
5805 {
5806     struct host_termios *host = dst;
5807     const struct target_termios *target = src;
5808 
5809     host->c_iflag =
5810         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5811     host->c_oflag =
5812         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5813     host->c_cflag =
5814         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5815     host->c_lflag =
5816         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5817     host->c_line = target->c_line;
5818 
5819     memset(host->c_cc, 0, sizeof(host->c_cc));
5820     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5821     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5822     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5823     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5824     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5825     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5826     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5827     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5828     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5829     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5830     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5831     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5832     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5833     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5834     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5835     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5836     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5837 }
5838 
5839 static void host_to_target_termios (void *dst, const void *src)
5840 {
5841     struct target_termios *target = dst;
5842     const struct host_termios *host = src;
5843 
5844     target->c_iflag =
5845         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5846     target->c_oflag =
5847         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5848     target->c_cflag =
5849         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5850     target->c_lflag =
5851         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5852     target->c_line = host->c_line;
5853 
5854     memset(target->c_cc, 0, sizeof(target->c_cc));
5855     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5856     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5857     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5858     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5859     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5860     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5861     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5862     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5863     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5864     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5865     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5866     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5867     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5868     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5869     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5870     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5871     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5872 }
5873 
5874 static const StructEntry struct_termios_def = {
5875     .convert = { host_to_target_termios, target_to_host_termios },
5876     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5877     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5878     .print = print_termios,
5879 };
5880 
5881 /* If the host does not provide these bits, they may be safely discarded. */
5882 #ifndef MAP_SYNC
5883 #define MAP_SYNC 0
5884 #endif
5885 #ifndef MAP_UNINITIALIZED
5886 #define MAP_UNINITIALIZED 0
5887 #endif
5888 
5889 static const bitmask_transtbl mmap_flags_tbl[] = {
5890     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5891     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5892       MAP_ANONYMOUS, MAP_ANONYMOUS },
5893     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5894       MAP_GROWSDOWN, MAP_GROWSDOWN },
5895     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5896       MAP_DENYWRITE, MAP_DENYWRITE },
5897     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5898       MAP_EXECUTABLE, MAP_EXECUTABLE },
5899     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5900     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5901       MAP_NORESERVE, MAP_NORESERVE },
5902     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5903     /* MAP_STACK had been ignored by the kernel for quite some time.
5904        Recognize it for the target insofar as we do not want to pass
5905        it through to the host.  */
5906     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5907     { TARGET_MAP_NONBLOCK, TARGET_MAP_NONBLOCK, MAP_NONBLOCK, MAP_NONBLOCK },
5908     { TARGET_MAP_POPULATE, TARGET_MAP_POPULATE, MAP_POPULATE, MAP_POPULATE },
5909     { TARGET_MAP_FIXED_NOREPLACE, TARGET_MAP_FIXED_NOREPLACE,
5910       MAP_FIXED_NOREPLACE, MAP_FIXED_NOREPLACE },
5911     { TARGET_MAP_UNINITIALIZED, TARGET_MAP_UNINITIALIZED,
5912       MAP_UNINITIALIZED, MAP_UNINITIALIZED },
5913 };
5914 
5915 /*
5916  * Arrange for legacy / undefined architecture specific flags to be
5917  * ignored by mmap handling code.
5918  */
5919 #ifndef TARGET_MAP_32BIT
5920 #define TARGET_MAP_32BIT 0
5921 #endif
5922 #ifndef TARGET_MAP_HUGE_2MB
5923 #define TARGET_MAP_HUGE_2MB 0
5924 #endif
5925 #ifndef TARGET_MAP_HUGE_1GB
5926 #define TARGET_MAP_HUGE_1GB 0
5927 #endif
5928 
5929 static abi_long do_mmap(abi_ulong addr, abi_ulong len, int prot,
5930                         int target_flags, int fd, off_t offset)
5931 {
5932     /*
5933      * The historical set of flags that all mmap types implicitly support.
5934      */
5935     enum {
5936         TARGET_LEGACY_MAP_MASK = TARGET_MAP_SHARED
5937                                | TARGET_MAP_PRIVATE
5938                                | TARGET_MAP_FIXED
5939                                | TARGET_MAP_ANONYMOUS
5940                                | TARGET_MAP_DENYWRITE
5941                                | TARGET_MAP_EXECUTABLE
5942                                | TARGET_MAP_UNINITIALIZED
5943                                | TARGET_MAP_GROWSDOWN
5944                                | TARGET_MAP_LOCKED
5945                                | TARGET_MAP_NORESERVE
5946                                | TARGET_MAP_POPULATE
5947                                | TARGET_MAP_NONBLOCK
5948                                | TARGET_MAP_STACK
5949                                | TARGET_MAP_HUGETLB
5950                                | TARGET_MAP_32BIT
5951                                | TARGET_MAP_HUGE_2MB
5952                                | TARGET_MAP_HUGE_1GB
5953     };
5954     int host_flags;
5955 
5956     switch (target_flags & TARGET_MAP_TYPE) {
5957     case TARGET_MAP_PRIVATE:
5958         host_flags = MAP_PRIVATE;
5959         break;
5960     case TARGET_MAP_SHARED:
5961         host_flags = MAP_SHARED;
5962         break;
5963     case TARGET_MAP_SHARED_VALIDATE:
5964         /*
5965          * MAP_SYNC is only supported for MAP_SHARED_VALIDATE, and is
5966          * therefore omitted from mmap_flags_tbl and TARGET_LEGACY_MAP_MASK.
5967          */
5968         if (target_flags & ~(TARGET_LEGACY_MAP_MASK | TARGET_MAP_SYNC)) {
5969             return -TARGET_EOPNOTSUPP;
5970         }
5971         host_flags = MAP_SHARED_VALIDATE;
5972         if (target_flags & TARGET_MAP_SYNC) {
5973             host_flags |= MAP_SYNC;
5974         }
5975         break;
5976     default:
5977         return -TARGET_EINVAL;
5978     }
5979     host_flags |= target_to_host_bitmask(target_flags, mmap_flags_tbl);
5980 
5981     return get_errno(target_mmap(addr, len, prot, host_flags, fd, offset));
5982 }
5983 
5984 /*
5985  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5986  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5987  */
5988 #if defined(TARGET_I386)
5989 
5990 /* NOTE: there is really one LDT for all the threads */
5991 static uint8_t *ldt_table;
5992 
5993 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5994 {
5995     int size;
5996     void *p;
5997 
5998     if (!ldt_table)
5999         return 0;
6000     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6001     if (size > bytecount)
6002         size = bytecount;
6003     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6004     if (!p)
6005         return -TARGET_EFAULT;
6006     /* ??? Should this by byteswapped?  */
6007     memcpy(p, ldt_table, size);
6008     unlock_user(p, ptr, size);
6009     return size;
6010 }
6011 
6012 /* XXX: add locking support */
6013 static abi_long write_ldt(CPUX86State *env,
6014                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6015 {
6016     struct target_modify_ldt_ldt_s ldt_info;
6017     struct target_modify_ldt_ldt_s *target_ldt_info;
6018     int seg_32bit, contents, read_exec_only, limit_in_pages;
6019     int seg_not_present, useable, lm;
6020     uint32_t *lp, entry_1, entry_2;
6021 
6022     if (bytecount != sizeof(ldt_info))
6023         return -TARGET_EINVAL;
6024     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6025         return -TARGET_EFAULT;
6026     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6027     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6028     ldt_info.limit = tswap32(target_ldt_info->limit);
6029     ldt_info.flags = tswap32(target_ldt_info->flags);
6030     unlock_user_struct(target_ldt_info, ptr, 0);
6031 
6032     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6033         return -TARGET_EINVAL;
6034     seg_32bit = ldt_info.flags & 1;
6035     contents = (ldt_info.flags >> 1) & 3;
6036     read_exec_only = (ldt_info.flags >> 3) & 1;
6037     limit_in_pages = (ldt_info.flags >> 4) & 1;
6038     seg_not_present = (ldt_info.flags >> 5) & 1;
6039     useable = (ldt_info.flags >> 6) & 1;
6040 #ifdef TARGET_ABI32
6041     lm = 0;
6042 #else
6043     lm = (ldt_info.flags >> 7) & 1;
6044 #endif
6045     if (contents == 3) {
6046         if (oldmode)
6047             return -TARGET_EINVAL;
6048         if (seg_not_present == 0)
6049             return -TARGET_EINVAL;
6050     }
6051     /* allocate the LDT */
6052     if (!ldt_table) {
6053         env->ldt.base = target_mmap(0,
6054                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6055                                     PROT_READ|PROT_WRITE,
6056                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6057         if (env->ldt.base == -1)
6058             return -TARGET_ENOMEM;
6059         memset(g2h_untagged(env->ldt.base), 0,
6060                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6061         env->ldt.limit = 0xffff;
6062         ldt_table = g2h_untagged(env->ldt.base);
6063     }
6064 
6065     /* NOTE: same code as Linux kernel */
6066     /* Allow LDTs to be cleared by the user. */
6067     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6068         if (oldmode ||
6069             (contents == 0		&&
6070              read_exec_only == 1	&&
6071              seg_32bit == 0		&&
6072              limit_in_pages == 0	&&
6073              seg_not_present == 1	&&
6074              useable == 0 )) {
6075             entry_1 = 0;
6076             entry_2 = 0;
6077             goto install;
6078         }
6079     }
6080 
6081     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6082         (ldt_info.limit & 0x0ffff);
6083     entry_2 = (ldt_info.base_addr & 0xff000000) |
6084         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6085         (ldt_info.limit & 0xf0000) |
6086         ((read_exec_only ^ 1) << 9) |
6087         (contents << 10) |
6088         ((seg_not_present ^ 1) << 15) |
6089         (seg_32bit << 22) |
6090         (limit_in_pages << 23) |
6091         (lm << 21) |
6092         0x7000;
6093     if (!oldmode)
6094         entry_2 |= (useable << 20);
6095 
6096     /* Install the new entry ...  */
6097 install:
6098     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6099     lp[0] = tswap32(entry_1);
6100     lp[1] = tswap32(entry_2);
6101     return 0;
6102 }
6103 
6104 /* specific and weird i386 syscalls */
6105 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6106                               unsigned long bytecount)
6107 {
6108     abi_long ret;
6109 
6110     switch (func) {
6111     case 0:
6112         ret = read_ldt(ptr, bytecount);
6113         break;
6114     case 1:
6115         ret = write_ldt(env, ptr, bytecount, 1);
6116         break;
6117     case 0x11:
6118         ret = write_ldt(env, ptr, bytecount, 0);
6119         break;
6120     default:
6121         ret = -TARGET_ENOSYS;
6122         break;
6123     }
6124     return ret;
6125 }
6126 
6127 #if defined(TARGET_ABI32)
6128 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6129 {
6130     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6131     struct target_modify_ldt_ldt_s ldt_info;
6132     struct target_modify_ldt_ldt_s *target_ldt_info;
6133     int seg_32bit, contents, read_exec_only, limit_in_pages;
6134     int seg_not_present, useable, lm;
6135     uint32_t *lp, entry_1, entry_2;
6136     int i;
6137 
6138     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6139     if (!target_ldt_info)
6140         return -TARGET_EFAULT;
6141     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6142     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6143     ldt_info.limit = tswap32(target_ldt_info->limit);
6144     ldt_info.flags = tswap32(target_ldt_info->flags);
6145     if (ldt_info.entry_number == -1) {
6146         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6147             if (gdt_table[i] == 0) {
6148                 ldt_info.entry_number = i;
6149                 target_ldt_info->entry_number = tswap32(i);
6150                 break;
6151             }
6152         }
6153     }
6154     unlock_user_struct(target_ldt_info, ptr, 1);
6155 
6156     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6157         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6158            return -TARGET_EINVAL;
6159     seg_32bit = ldt_info.flags & 1;
6160     contents = (ldt_info.flags >> 1) & 3;
6161     read_exec_only = (ldt_info.flags >> 3) & 1;
6162     limit_in_pages = (ldt_info.flags >> 4) & 1;
6163     seg_not_present = (ldt_info.flags >> 5) & 1;
6164     useable = (ldt_info.flags >> 6) & 1;
6165 #ifdef TARGET_ABI32
6166     lm = 0;
6167 #else
6168     lm = (ldt_info.flags >> 7) & 1;
6169 #endif
6170 
6171     if (contents == 3) {
6172         if (seg_not_present == 0)
6173             return -TARGET_EINVAL;
6174     }
6175 
6176     /* NOTE: same code as Linux kernel */
6177     /* Allow LDTs to be cleared by the user. */
6178     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6179         if ((contents == 0             &&
6180              read_exec_only == 1       &&
6181              seg_32bit == 0            &&
6182              limit_in_pages == 0       &&
6183              seg_not_present == 1      &&
6184              useable == 0 )) {
6185             entry_1 = 0;
6186             entry_2 = 0;
6187             goto install;
6188         }
6189     }
6190 
6191     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6192         (ldt_info.limit & 0x0ffff);
6193     entry_2 = (ldt_info.base_addr & 0xff000000) |
6194         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6195         (ldt_info.limit & 0xf0000) |
6196         ((read_exec_only ^ 1) << 9) |
6197         (contents << 10) |
6198         ((seg_not_present ^ 1) << 15) |
6199         (seg_32bit << 22) |
6200         (limit_in_pages << 23) |
6201         (useable << 20) |
6202         (lm << 21) |
6203         0x7000;
6204 
6205     /* Install the new entry ...  */
6206 install:
6207     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6208     lp[0] = tswap32(entry_1);
6209     lp[1] = tswap32(entry_2);
6210     return 0;
6211 }
6212 
6213 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6214 {
6215     struct target_modify_ldt_ldt_s *target_ldt_info;
6216     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6217     uint32_t base_addr, limit, flags;
6218     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6219     int seg_not_present, useable, lm;
6220     uint32_t *lp, entry_1, entry_2;
6221 
6222     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6223     if (!target_ldt_info)
6224         return -TARGET_EFAULT;
6225     idx = tswap32(target_ldt_info->entry_number);
6226     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6227         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6228         unlock_user_struct(target_ldt_info, ptr, 1);
6229         return -TARGET_EINVAL;
6230     }
6231     lp = (uint32_t *)(gdt_table + idx);
6232     entry_1 = tswap32(lp[0]);
6233     entry_2 = tswap32(lp[1]);
6234 
6235     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6236     contents = (entry_2 >> 10) & 3;
6237     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6238     seg_32bit = (entry_2 >> 22) & 1;
6239     limit_in_pages = (entry_2 >> 23) & 1;
6240     useable = (entry_2 >> 20) & 1;
6241 #ifdef TARGET_ABI32
6242     lm = 0;
6243 #else
6244     lm = (entry_2 >> 21) & 1;
6245 #endif
6246     flags = (seg_32bit << 0) | (contents << 1) |
6247         (read_exec_only << 3) | (limit_in_pages << 4) |
6248         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6249     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6250     base_addr = (entry_1 >> 16) |
6251         (entry_2 & 0xff000000) |
6252         ((entry_2 & 0xff) << 16);
6253     target_ldt_info->base_addr = tswapal(base_addr);
6254     target_ldt_info->limit = tswap32(limit);
6255     target_ldt_info->flags = tswap32(flags);
6256     unlock_user_struct(target_ldt_info, ptr, 1);
6257     return 0;
6258 }
6259 
6260 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6261 {
6262     return -TARGET_ENOSYS;
6263 }
6264 #else
6265 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6266 {
6267     abi_long ret = 0;
6268     abi_ulong val;
6269     int idx;
6270 
6271     switch(code) {
6272     case TARGET_ARCH_SET_GS:
6273     case TARGET_ARCH_SET_FS:
6274         if (code == TARGET_ARCH_SET_GS)
6275             idx = R_GS;
6276         else
6277             idx = R_FS;
6278         cpu_x86_load_seg(env, idx, 0);
6279         env->segs[idx].base = addr;
6280         break;
6281     case TARGET_ARCH_GET_GS:
6282     case TARGET_ARCH_GET_FS:
6283         if (code == TARGET_ARCH_GET_GS)
6284             idx = R_GS;
6285         else
6286             idx = R_FS;
6287         val = env->segs[idx].base;
6288         if (put_user(val, addr, abi_ulong))
6289             ret = -TARGET_EFAULT;
6290         break;
6291     default:
6292         ret = -TARGET_EINVAL;
6293         break;
6294     }
6295     return ret;
6296 }
6297 #endif /* defined(TARGET_ABI32 */
6298 #endif /* defined(TARGET_I386) */
6299 
6300 /*
6301  * These constants are generic.  Supply any that are missing from the host.
6302  */
6303 #ifndef PR_SET_NAME
6304 # define PR_SET_NAME    15
6305 # define PR_GET_NAME    16
6306 #endif
6307 #ifndef PR_SET_FP_MODE
6308 # define PR_SET_FP_MODE 45
6309 # define PR_GET_FP_MODE 46
6310 # define PR_FP_MODE_FR   (1 << 0)
6311 # define PR_FP_MODE_FRE  (1 << 1)
6312 #endif
6313 #ifndef PR_SVE_SET_VL
6314 # define PR_SVE_SET_VL  50
6315 # define PR_SVE_GET_VL  51
6316 # define PR_SVE_VL_LEN_MASK  0xffff
6317 # define PR_SVE_VL_INHERIT   (1 << 17)
6318 #endif
6319 #ifndef PR_PAC_RESET_KEYS
6320 # define PR_PAC_RESET_KEYS  54
6321 # define PR_PAC_APIAKEY   (1 << 0)
6322 # define PR_PAC_APIBKEY   (1 << 1)
6323 # define PR_PAC_APDAKEY   (1 << 2)
6324 # define PR_PAC_APDBKEY   (1 << 3)
6325 # define PR_PAC_APGAKEY   (1 << 4)
6326 #endif
6327 #ifndef PR_SET_TAGGED_ADDR_CTRL
6328 # define PR_SET_TAGGED_ADDR_CTRL 55
6329 # define PR_GET_TAGGED_ADDR_CTRL 56
6330 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6331 #endif
6332 #ifndef PR_SET_IO_FLUSHER
6333 # define PR_SET_IO_FLUSHER 57
6334 # define PR_GET_IO_FLUSHER 58
6335 #endif
6336 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6337 # define PR_SET_SYSCALL_USER_DISPATCH 59
6338 #endif
6339 #ifndef PR_SME_SET_VL
6340 # define PR_SME_SET_VL  63
6341 # define PR_SME_GET_VL  64
6342 # define PR_SME_VL_LEN_MASK  0xffff
6343 # define PR_SME_VL_INHERIT   (1 << 17)
6344 #endif
6345 
6346 #include "target_prctl.h"
6347 
6348 static abi_long do_prctl_inval0(CPUArchState *env)
6349 {
6350     return -TARGET_EINVAL;
6351 }
6352 
6353 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6354 {
6355     return -TARGET_EINVAL;
6356 }
6357 
6358 #ifndef do_prctl_get_fp_mode
6359 #define do_prctl_get_fp_mode do_prctl_inval0
6360 #endif
6361 #ifndef do_prctl_set_fp_mode
6362 #define do_prctl_set_fp_mode do_prctl_inval1
6363 #endif
6364 #ifndef do_prctl_sve_get_vl
6365 #define do_prctl_sve_get_vl do_prctl_inval0
6366 #endif
6367 #ifndef do_prctl_sve_set_vl
6368 #define do_prctl_sve_set_vl do_prctl_inval1
6369 #endif
6370 #ifndef do_prctl_reset_keys
6371 #define do_prctl_reset_keys do_prctl_inval1
6372 #endif
6373 #ifndef do_prctl_set_tagged_addr_ctrl
6374 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6375 #endif
6376 #ifndef do_prctl_get_tagged_addr_ctrl
6377 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6378 #endif
6379 #ifndef do_prctl_get_unalign
6380 #define do_prctl_get_unalign do_prctl_inval1
6381 #endif
6382 #ifndef do_prctl_set_unalign
6383 #define do_prctl_set_unalign do_prctl_inval1
6384 #endif
6385 #ifndef do_prctl_sme_get_vl
6386 #define do_prctl_sme_get_vl do_prctl_inval0
6387 #endif
6388 #ifndef do_prctl_sme_set_vl
6389 #define do_prctl_sme_set_vl do_prctl_inval1
6390 #endif
6391 
6392 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6393                          abi_long arg3, abi_long arg4, abi_long arg5)
6394 {
6395     abi_long ret;
6396 
6397     switch (option) {
6398     case PR_GET_PDEATHSIG:
6399         {
6400             int deathsig;
6401             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6402                                   arg3, arg4, arg5));
6403             if (!is_error(ret) &&
6404                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6405                 return -TARGET_EFAULT;
6406             }
6407             return ret;
6408         }
6409     case PR_SET_PDEATHSIG:
6410         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6411                                arg3, arg4, arg5));
6412     case PR_GET_NAME:
6413         {
6414             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6415             if (!name) {
6416                 return -TARGET_EFAULT;
6417             }
6418             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6419                                   arg3, arg4, arg5));
6420             unlock_user(name, arg2, 16);
6421             return ret;
6422         }
6423     case PR_SET_NAME:
6424         {
6425             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6426             if (!name) {
6427                 return -TARGET_EFAULT;
6428             }
6429             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6430                                   arg3, arg4, arg5));
6431             unlock_user(name, arg2, 0);
6432             return ret;
6433         }
6434     case PR_GET_FP_MODE:
6435         return do_prctl_get_fp_mode(env);
6436     case PR_SET_FP_MODE:
6437         return do_prctl_set_fp_mode(env, arg2);
6438     case PR_SVE_GET_VL:
6439         return do_prctl_sve_get_vl(env);
6440     case PR_SVE_SET_VL:
6441         return do_prctl_sve_set_vl(env, arg2);
6442     case PR_SME_GET_VL:
6443         return do_prctl_sme_get_vl(env);
6444     case PR_SME_SET_VL:
6445         return do_prctl_sme_set_vl(env, arg2);
6446     case PR_PAC_RESET_KEYS:
6447         if (arg3 || arg4 || arg5) {
6448             return -TARGET_EINVAL;
6449         }
6450         return do_prctl_reset_keys(env, arg2);
6451     case PR_SET_TAGGED_ADDR_CTRL:
6452         if (arg3 || arg4 || arg5) {
6453             return -TARGET_EINVAL;
6454         }
6455         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6456     case PR_GET_TAGGED_ADDR_CTRL:
6457         if (arg2 || arg3 || arg4 || arg5) {
6458             return -TARGET_EINVAL;
6459         }
6460         return do_prctl_get_tagged_addr_ctrl(env);
6461 
6462     case PR_GET_UNALIGN:
6463         return do_prctl_get_unalign(env, arg2);
6464     case PR_SET_UNALIGN:
6465         return do_prctl_set_unalign(env, arg2);
6466 
6467     case PR_CAP_AMBIENT:
6468     case PR_CAPBSET_READ:
6469     case PR_CAPBSET_DROP:
6470     case PR_GET_DUMPABLE:
6471     case PR_SET_DUMPABLE:
6472     case PR_GET_KEEPCAPS:
6473     case PR_SET_KEEPCAPS:
6474     case PR_GET_SECUREBITS:
6475     case PR_SET_SECUREBITS:
6476     case PR_GET_TIMING:
6477     case PR_SET_TIMING:
6478     case PR_GET_TIMERSLACK:
6479     case PR_SET_TIMERSLACK:
6480     case PR_MCE_KILL:
6481     case PR_MCE_KILL_GET:
6482     case PR_GET_NO_NEW_PRIVS:
6483     case PR_SET_NO_NEW_PRIVS:
6484     case PR_GET_IO_FLUSHER:
6485     case PR_SET_IO_FLUSHER:
6486     case PR_SET_CHILD_SUBREAPER:
6487     case PR_GET_SPECULATION_CTRL:
6488     case PR_SET_SPECULATION_CTRL:
6489         /* Some prctl options have no pointer arguments and we can pass on. */
6490         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6491 
6492     case PR_GET_CHILD_SUBREAPER:
6493         {
6494             int val;
6495             ret = get_errno(prctl(PR_GET_CHILD_SUBREAPER, &val,
6496                                   arg3, arg4, arg5));
6497             if (!is_error(ret) && put_user_s32(val, arg2)) {
6498                 return -TARGET_EFAULT;
6499             }
6500             return ret;
6501         }
6502 
6503     case PR_GET_TID_ADDRESS:
6504         {
6505             TaskState *ts = get_task_state(env_cpu(env));
6506             return put_user_ual(ts->child_tidptr, arg2);
6507         }
6508 
6509     case PR_GET_FPEXC:
6510     case PR_SET_FPEXC:
6511         /* Was used for SPE on PowerPC. */
6512         return -TARGET_EINVAL;
6513 
6514     case PR_GET_ENDIAN:
6515     case PR_SET_ENDIAN:
6516     case PR_GET_FPEMU:
6517     case PR_SET_FPEMU:
6518     case PR_SET_MM:
6519     case PR_GET_SECCOMP:
6520     case PR_SET_SECCOMP:
6521     case PR_SET_SYSCALL_USER_DISPATCH:
6522     case PR_GET_THP_DISABLE:
6523     case PR_SET_THP_DISABLE:
6524     case PR_GET_TSC:
6525     case PR_SET_TSC:
6526         /* Disable to prevent the target disabling stuff we need. */
6527         return -TARGET_EINVAL;
6528 
6529     default:
6530         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6531                       option);
6532         return -TARGET_EINVAL;
6533     }
6534 }
6535 
6536 #define NEW_STACK_SIZE 0x40000
6537 
6538 
6539 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6540 typedef struct {
6541     CPUArchState *env;
6542     pthread_mutex_t mutex;
6543     pthread_cond_t cond;
6544     pthread_t thread;
6545     uint32_t tid;
6546     abi_ulong child_tidptr;
6547     abi_ulong parent_tidptr;
6548     sigset_t sigmask;
6549 } new_thread_info;
6550 
6551 static void *clone_func(void *arg)
6552 {
6553     new_thread_info *info = arg;
6554     CPUArchState *env;
6555     CPUState *cpu;
6556     TaskState *ts;
6557 
6558     rcu_register_thread();
6559     tcg_register_thread();
6560     env = info->env;
6561     cpu = env_cpu(env);
6562     thread_cpu = cpu;
6563     ts = get_task_state(cpu);
6564     info->tid = sys_gettid();
6565     task_settid(ts);
6566     if (info->child_tidptr)
6567         put_user_u32(info->tid, info->child_tidptr);
6568     if (info->parent_tidptr)
6569         put_user_u32(info->tid, info->parent_tidptr);
6570     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6571     /* Enable signals.  */
6572     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6573     /* Signal to the parent that we're ready.  */
6574     pthread_mutex_lock(&info->mutex);
6575     pthread_cond_broadcast(&info->cond);
6576     pthread_mutex_unlock(&info->mutex);
6577     /* Wait until the parent has finished initializing the tls state.  */
6578     pthread_mutex_lock(&clone_lock);
6579     pthread_mutex_unlock(&clone_lock);
6580     cpu_loop(env);
6581     /* never exits */
6582     return NULL;
6583 }
6584 
6585 /* do_fork() Must return host values and target errnos (unlike most
6586    do_*() functions). */
6587 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6588                    abi_ulong parent_tidptr, target_ulong newtls,
6589                    abi_ulong child_tidptr)
6590 {
6591     CPUState *cpu = env_cpu(env);
6592     int ret;
6593     TaskState *ts;
6594     CPUState *new_cpu;
6595     CPUArchState *new_env;
6596     sigset_t sigmask;
6597 
6598     flags &= ~CLONE_IGNORED_FLAGS;
6599 
6600     /* Emulate vfork() with fork() */
6601     if (flags & CLONE_VFORK)
6602         flags &= ~(CLONE_VFORK | CLONE_VM);
6603 
6604     if (flags & CLONE_VM) {
6605         TaskState *parent_ts = get_task_state(cpu);
6606         new_thread_info info;
6607         pthread_attr_t attr;
6608 
6609         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6610             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6611             return -TARGET_EINVAL;
6612         }
6613 
6614         ts = g_new0(TaskState, 1);
6615         init_task_state(ts);
6616 
6617         /* Grab a mutex so that thread setup appears atomic.  */
6618         pthread_mutex_lock(&clone_lock);
6619 
6620         /*
6621          * If this is our first additional thread, we need to ensure we
6622          * generate code for parallel execution and flush old translations.
6623          * Do this now so that the copy gets CF_PARALLEL too.
6624          */
6625         if (!tcg_cflags_has(cpu, CF_PARALLEL)) {
6626             tcg_cflags_set(cpu, CF_PARALLEL);
6627             tb_flush(cpu);
6628         }
6629 
6630         /* we create a new CPU instance. */
6631         new_env = cpu_copy(env);
6632         /* Init regs that differ from the parent.  */
6633         cpu_clone_regs_child(new_env, newsp, flags);
6634         cpu_clone_regs_parent(env, flags);
6635         new_cpu = env_cpu(new_env);
6636         new_cpu->opaque = ts;
6637         ts->bprm = parent_ts->bprm;
6638         ts->info = parent_ts->info;
6639         ts->signal_mask = parent_ts->signal_mask;
6640 
6641         if (flags & CLONE_CHILD_CLEARTID) {
6642             ts->child_tidptr = child_tidptr;
6643         }
6644 
6645         if (flags & CLONE_SETTLS) {
6646             cpu_set_tls (new_env, newtls);
6647         }
6648 
6649         memset(&info, 0, sizeof(info));
6650         pthread_mutex_init(&info.mutex, NULL);
6651         pthread_mutex_lock(&info.mutex);
6652         pthread_cond_init(&info.cond, NULL);
6653         info.env = new_env;
6654         if (flags & CLONE_CHILD_SETTID) {
6655             info.child_tidptr = child_tidptr;
6656         }
6657         if (flags & CLONE_PARENT_SETTID) {
6658             info.parent_tidptr = parent_tidptr;
6659         }
6660 
6661         ret = pthread_attr_init(&attr);
6662         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6663         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6664         /* It is not safe to deliver signals until the child has finished
6665            initializing, so temporarily block all signals.  */
6666         sigfillset(&sigmask);
6667         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6668         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6669 
6670         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6671         /* TODO: Free new CPU state if thread creation failed.  */
6672 
6673         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6674         pthread_attr_destroy(&attr);
6675         if (ret == 0) {
6676             /* Wait for the child to initialize.  */
6677             pthread_cond_wait(&info.cond, &info.mutex);
6678             ret = info.tid;
6679         } else {
6680             ret = -1;
6681         }
6682         pthread_mutex_unlock(&info.mutex);
6683         pthread_cond_destroy(&info.cond);
6684         pthread_mutex_destroy(&info.mutex);
6685         pthread_mutex_unlock(&clone_lock);
6686     } else {
6687         /* if no CLONE_VM, we consider it is a fork */
6688         if (flags & CLONE_INVALID_FORK_FLAGS) {
6689             return -TARGET_EINVAL;
6690         }
6691 
6692         /* We can't support custom termination signals */
6693         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6694             return -TARGET_EINVAL;
6695         }
6696 
6697 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6698         if (flags & CLONE_PIDFD) {
6699             return -TARGET_EINVAL;
6700         }
6701 #endif
6702 
6703         /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6704         if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) {
6705             return -TARGET_EINVAL;
6706         }
6707 
6708         if (block_signals()) {
6709             return -QEMU_ERESTARTSYS;
6710         }
6711 
6712         fork_start();
6713         ret = fork();
6714         if (ret == 0) {
6715             /* Child Process.  */
6716             cpu_clone_regs_child(env, newsp, flags);
6717             fork_end(ret);
6718             /* There is a race condition here.  The parent process could
6719                theoretically read the TID in the child process before the child
6720                tid is set.  This would require using either ptrace
6721                (not implemented) or having *_tidptr to point at a shared memory
6722                mapping.  We can't repeat the spinlock hack used above because
6723                the child process gets its own copy of the lock.  */
6724             if (flags & CLONE_CHILD_SETTID)
6725                 put_user_u32(sys_gettid(), child_tidptr);
6726             if (flags & CLONE_PARENT_SETTID)
6727                 put_user_u32(sys_gettid(), parent_tidptr);
6728             ts = get_task_state(cpu);
6729             if (flags & CLONE_SETTLS)
6730                 cpu_set_tls (env, newtls);
6731             if (flags & CLONE_CHILD_CLEARTID)
6732                 ts->child_tidptr = child_tidptr;
6733         } else {
6734             cpu_clone_regs_parent(env, flags);
6735             if (flags & CLONE_PIDFD) {
6736                 int pid_fd = 0;
6737 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6738                 int pid_child = ret;
6739                 pid_fd = pidfd_open(pid_child, 0);
6740                 if (pid_fd >= 0) {
6741                         fcntl(pid_fd, F_SETFD, fcntl(pid_fd, F_GETFL)
6742                                                | FD_CLOEXEC);
6743                 } else {
6744                         pid_fd = 0;
6745                 }
6746 #endif
6747                 put_user_u32(pid_fd, parent_tidptr);
6748             }
6749             fork_end(ret);
6750         }
6751         g_assert(!cpu_in_exclusive_context(cpu));
6752     }
6753     return ret;
6754 }
6755 
6756 /* warning : doesn't handle linux specific flags... */
6757 static int target_to_host_fcntl_cmd(int cmd)
6758 {
6759     int ret;
6760 
6761     switch(cmd) {
6762     case TARGET_F_DUPFD:
6763     case TARGET_F_GETFD:
6764     case TARGET_F_SETFD:
6765     case TARGET_F_GETFL:
6766     case TARGET_F_SETFL:
6767     case TARGET_F_OFD_GETLK:
6768     case TARGET_F_OFD_SETLK:
6769     case TARGET_F_OFD_SETLKW:
6770         ret = cmd;
6771         break;
6772     case TARGET_F_GETLK:
6773         ret = F_GETLK;
6774         break;
6775     case TARGET_F_SETLK:
6776         ret = F_SETLK;
6777         break;
6778     case TARGET_F_SETLKW:
6779         ret = F_SETLKW;
6780         break;
6781     case TARGET_F_GETOWN:
6782         ret = F_GETOWN;
6783         break;
6784     case TARGET_F_SETOWN:
6785         ret = F_SETOWN;
6786         break;
6787     case TARGET_F_GETSIG:
6788         ret = F_GETSIG;
6789         break;
6790     case TARGET_F_SETSIG:
6791         ret = F_SETSIG;
6792         break;
6793 #if TARGET_ABI_BITS == 32
6794     case TARGET_F_GETLK64:
6795         ret = F_GETLK;
6796         break;
6797     case TARGET_F_SETLK64:
6798         ret = F_SETLK;
6799         break;
6800     case TARGET_F_SETLKW64:
6801         ret = F_SETLKW;
6802         break;
6803 #endif
6804     case TARGET_F_SETLEASE:
6805         ret = F_SETLEASE;
6806         break;
6807     case TARGET_F_GETLEASE:
6808         ret = F_GETLEASE;
6809         break;
6810 #ifdef F_DUPFD_CLOEXEC
6811     case TARGET_F_DUPFD_CLOEXEC:
6812         ret = F_DUPFD_CLOEXEC;
6813         break;
6814 #endif
6815     case TARGET_F_NOTIFY:
6816         ret = F_NOTIFY;
6817         break;
6818 #ifdef F_GETOWN_EX
6819     case TARGET_F_GETOWN_EX:
6820         ret = F_GETOWN_EX;
6821         break;
6822 #endif
6823 #ifdef F_SETOWN_EX
6824     case TARGET_F_SETOWN_EX:
6825         ret = F_SETOWN_EX;
6826         break;
6827 #endif
6828 #ifdef F_SETPIPE_SZ
6829     case TARGET_F_SETPIPE_SZ:
6830         ret = F_SETPIPE_SZ;
6831         break;
6832     case TARGET_F_GETPIPE_SZ:
6833         ret = F_GETPIPE_SZ;
6834         break;
6835 #endif
6836 #ifdef F_ADD_SEALS
6837     case TARGET_F_ADD_SEALS:
6838         ret = F_ADD_SEALS;
6839         break;
6840     case TARGET_F_GET_SEALS:
6841         ret = F_GET_SEALS;
6842         break;
6843 #endif
6844     default:
6845         ret = -TARGET_EINVAL;
6846         break;
6847     }
6848 
6849 #if defined(__powerpc64__)
6850     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6851      * is not supported by kernel. The glibc fcntl call actually adjusts
6852      * them to 5, 6 and 7 before making the syscall(). Since we make the
6853      * syscall directly, adjust to what is supported by the kernel.
6854      */
6855     if (ret >= F_GETLK && ret <= F_SETLKW) {
6856         ret -= F_GETLK - 5;
6857     }
6858 #endif
6859 
6860     return ret;
6861 }
6862 
6863 #define FLOCK_TRANSTBL \
6864     switch (type) { \
6865     TRANSTBL_CONVERT(F_RDLCK); \
6866     TRANSTBL_CONVERT(F_WRLCK); \
6867     TRANSTBL_CONVERT(F_UNLCK); \
6868     }
6869 
6870 static int target_to_host_flock(int type)
6871 {
6872 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6873     FLOCK_TRANSTBL
6874 #undef  TRANSTBL_CONVERT
6875     return -TARGET_EINVAL;
6876 }
6877 
6878 static int host_to_target_flock(int type)
6879 {
6880 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6881     FLOCK_TRANSTBL
6882 #undef  TRANSTBL_CONVERT
6883     /* if we don't know how to convert the value coming
6884      * from the host we copy to the target field as-is
6885      */
6886     return type;
6887 }
6888 
6889 static inline abi_long copy_from_user_flock(struct flock *fl,
6890                                             abi_ulong target_flock_addr)
6891 {
6892     struct target_flock *target_fl;
6893     int l_type;
6894 
6895     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6896         return -TARGET_EFAULT;
6897     }
6898 
6899     __get_user(l_type, &target_fl->l_type);
6900     l_type = target_to_host_flock(l_type);
6901     if (l_type < 0) {
6902         return l_type;
6903     }
6904     fl->l_type = l_type;
6905     __get_user(fl->l_whence, &target_fl->l_whence);
6906     __get_user(fl->l_start, &target_fl->l_start);
6907     __get_user(fl->l_len, &target_fl->l_len);
6908     __get_user(fl->l_pid, &target_fl->l_pid);
6909     unlock_user_struct(target_fl, target_flock_addr, 0);
6910     return 0;
6911 }
6912 
6913 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6914                                           const struct flock *fl)
6915 {
6916     struct target_flock *target_fl;
6917     short l_type;
6918 
6919     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6920         return -TARGET_EFAULT;
6921     }
6922 
6923     l_type = host_to_target_flock(fl->l_type);
6924     __put_user(l_type, &target_fl->l_type);
6925     __put_user(fl->l_whence, &target_fl->l_whence);
6926     __put_user(fl->l_start, &target_fl->l_start);
6927     __put_user(fl->l_len, &target_fl->l_len);
6928     __put_user(fl->l_pid, &target_fl->l_pid);
6929     unlock_user_struct(target_fl, target_flock_addr, 1);
6930     return 0;
6931 }
6932 
6933 typedef abi_long from_flock64_fn(struct flock *fl, abi_ulong target_addr);
6934 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock *fl);
6935 
6936 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6937 struct target_oabi_flock64 {
6938     abi_short l_type;
6939     abi_short l_whence;
6940     abi_llong l_start;
6941     abi_llong l_len;
6942     abi_int   l_pid;
6943 } QEMU_PACKED;
6944 
6945 static inline abi_long copy_from_user_oabi_flock64(struct flock *fl,
6946                                                    abi_ulong target_flock_addr)
6947 {
6948     struct target_oabi_flock64 *target_fl;
6949     int l_type;
6950 
6951     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6952         return -TARGET_EFAULT;
6953     }
6954 
6955     __get_user(l_type, &target_fl->l_type);
6956     l_type = target_to_host_flock(l_type);
6957     if (l_type < 0) {
6958         return l_type;
6959     }
6960     fl->l_type = l_type;
6961     __get_user(fl->l_whence, &target_fl->l_whence);
6962     __get_user(fl->l_start, &target_fl->l_start);
6963     __get_user(fl->l_len, &target_fl->l_len);
6964     __get_user(fl->l_pid, &target_fl->l_pid);
6965     unlock_user_struct(target_fl, target_flock_addr, 0);
6966     return 0;
6967 }
6968 
6969 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6970                                                  const struct flock *fl)
6971 {
6972     struct target_oabi_flock64 *target_fl;
6973     short l_type;
6974 
6975     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6976         return -TARGET_EFAULT;
6977     }
6978 
6979     l_type = host_to_target_flock(fl->l_type);
6980     __put_user(l_type, &target_fl->l_type);
6981     __put_user(fl->l_whence, &target_fl->l_whence);
6982     __put_user(fl->l_start, &target_fl->l_start);
6983     __put_user(fl->l_len, &target_fl->l_len);
6984     __put_user(fl->l_pid, &target_fl->l_pid);
6985     unlock_user_struct(target_fl, target_flock_addr, 1);
6986     return 0;
6987 }
6988 #endif
6989 
6990 static inline abi_long copy_from_user_flock64(struct flock *fl,
6991                                               abi_ulong target_flock_addr)
6992 {
6993     struct target_flock64 *target_fl;
6994     int l_type;
6995 
6996     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6997         return -TARGET_EFAULT;
6998     }
6999 
7000     __get_user(l_type, &target_fl->l_type);
7001     l_type = target_to_host_flock(l_type);
7002     if (l_type < 0) {
7003         return l_type;
7004     }
7005     fl->l_type = l_type;
7006     __get_user(fl->l_whence, &target_fl->l_whence);
7007     __get_user(fl->l_start, &target_fl->l_start);
7008     __get_user(fl->l_len, &target_fl->l_len);
7009     __get_user(fl->l_pid, &target_fl->l_pid);
7010     unlock_user_struct(target_fl, target_flock_addr, 0);
7011     return 0;
7012 }
7013 
7014 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7015                                             const struct flock *fl)
7016 {
7017     struct target_flock64 *target_fl;
7018     short l_type;
7019 
7020     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7021         return -TARGET_EFAULT;
7022     }
7023 
7024     l_type = host_to_target_flock(fl->l_type);
7025     __put_user(l_type, &target_fl->l_type);
7026     __put_user(fl->l_whence, &target_fl->l_whence);
7027     __put_user(fl->l_start, &target_fl->l_start);
7028     __put_user(fl->l_len, &target_fl->l_len);
7029     __put_user(fl->l_pid, &target_fl->l_pid);
7030     unlock_user_struct(target_fl, target_flock_addr, 1);
7031     return 0;
7032 }
7033 
7034 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7035 {
7036     struct flock fl;
7037 #ifdef F_GETOWN_EX
7038     struct f_owner_ex fox;
7039     struct target_f_owner_ex *target_fox;
7040 #endif
7041     abi_long ret;
7042     int host_cmd = target_to_host_fcntl_cmd(cmd);
7043 
7044     if (host_cmd == -TARGET_EINVAL)
7045 	    return host_cmd;
7046 
7047     switch(cmd) {
7048     case TARGET_F_GETLK:
7049         ret = copy_from_user_flock(&fl, arg);
7050         if (ret) {
7051             return ret;
7052         }
7053         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7054         if (ret == 0) {
7055             ret = copy_to_user_flock(arg, &fl);
7056         }
7057         break;
7058 
7059     case TARGET_F_SETLK:
7060     case TARGET_F_SETLKW:
7061         ret = copy_from_user_flock(&fl, arg);
7062         if (ret) {
7063             return ret;
7064         }
7065         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7066         break;
7067 
7068     case TARGET_F_GETLK64:
7069     case TARGET_F_OFD_GETLK:
7070         ret = copy_from_user_flock64(&fl, arg);
7071         if (ret) {
7072             return ret;
7073         }
7074         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7075         if (ret == 0) {
7076             ret = copy_to_user_flock64(arg, &fl);
7077         }
7078         break;
7079     case TARGET_F_SETLK64:
7080     case TARGET_F_SETLKW64:
7081     case TARGET_F_OFD_SETLK:
7082     case TARGET_F_OFD_SETLKW:
7083         ret = copy_from_user_flock64(&fl, arg);
7084         if (ret) {
7085             return ret;
7086         }
7087         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7088         break;
7089 
7090     case TARGET_F_GETFL:
7091         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7092         if (ret >= 0) {
7093             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7094             /* tell 32-bit guests it uses largefile on 64-bit hosts: */
7095             if (O_LARGEFILE == 0 && HOST_LONG_BITS == 64) {
7096                 ret |= TARGET_O_LARGEFILE;
7097             }
7098         }
7099         break;
7100 
7101     case TARGET_F_SETFL:
7102         ret = get_errno(safe_fcntl(fd, host_cmd,
7103                                    target_to_host_bitmask(arg,
7104                                                           fcntl_flags_tbl)));
7105         break;
7106 
7107 #ifdef F_GETOWN_EX
7108     case TARGET_F_GETOWN_EX:
7109         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7110         if (ret >= 0) {
7111             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7112                 return -TARGET_EFAULT;
7113             target_fox->type = tswap32(fox.type);
7114             target_fox->pid = tswap32(fox.pid);
7115             unlock_user_struct(target_fox, arg, 1);
7116         }
7117         break;
7118 #endif
7119 
7120 #ifdef F_SETOWN_EX
7121     case TARGET_F_SETOWN_EX:
7122         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7123             return -TARGET_EFAULT;
7124         fox.type = tswap32(target_fox->type);
7125         fox.pid = tswap32(target_fox->pid);
7126         unlock_user_struct(target_fox, arg, 0);
7127         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7128         break;
7129 #endif
7130 
7131     case TARGET_F_SETSIG:
7132         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7133         break;
7134 
7135     case TARGET_F_GETSIG:
7136         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7137         break;
7138 
7139     case TARGET_F_SETOWN:
7140     case TARGET_F_GETOWN:
7141     case TARGET_F_SETLEASE:
7142     case TARGET_F_GETLEASE:
7143     case TARGET_F_SETPIPE_SZ:
7144     case TARGET_F_GETPIPE_SZ:
7145     case TARGET_F_ADD_SEALS:
7146     case TARGET_F_GET_SEALS:
7147         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7148         break;
7149 
7150     default:
7151         ret = get_errno(safe_fcntl(fd, cmd, arg));
7152         break;
7153     }
7154     return ret;
7155 }
7156 
7157 #ifdef USE_UID16
7158 
7159 static inline int high2lowuid(int uid)
7160 {
7161     if (uid > 65535)
7162         return 65534;
7163     else
7164         return uid;
7165 }
7166 
7167 static inline int high2lowgid(int gid)
7168 {
7169     if (gid > 65535)
7170         return 65534;
7171     else
7172         return gid;
7173 }
7174 
7175 static inline int low2highuid(int uid)
7176 {
7177     if ((int16_t)uid == -1)
7178         return -1;
7179     else
7180         return uid;
7181 }
7182 
7183 static inline int low2highgid(int gid)
7184 {
7185     if ((int16_t)gid == -1)
7186         return -1;
7187     else
7188         return gid;
7189 }
7190 static inline int tswapid(int id)
7191 {
7192     return tswap16(id);
7193 }
7194 
7195 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7196 
7197 #else /* !USE_UID16 */
7198 static inline int high2lowuid(int uid)
7199 {
7200     return uid;
7201 }
7202 static inline int high2lowgid(int gid)
7203 {
7204     return gid;
7205 }
7206 static inline int low2highuid(int uid)
7207 {
7208     return uid;
7209 }
7210 static inline int low2highgid(int gid)
7211 {
7212     return gid;
7213 }
7214 static inline int tswapid(int id)
7215 {
7216     return tswap32(id);
7217 }
7218 
7219 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7220 
7221 #endif /* USE_UID16 */
7222 
7223 /* We must do direct syscalls for setting UID/GID, because we want to
7224  * implement the Linux system call semantics of "change only for this thread",
7225  * not the libc/POSIX semantics of "change for all threads in process".
7226  * (See http://ewontfix.com/17/ for more details.)
7227  * We use the 32-bit version of the syscalls if present; if it is not
7228  * then either the host architecture supports 32-bit UIDs natively with
7229  * the standard syscall, or the 16-bit UID is the best we can do.
7230  */
7231 #ifdef __NR_setuid32
7232 #define __NR_sys_setuid __NR_setuid32
7233 #else
7234 #define __NR_sys_setuid __NR_setuid
7235 #endif
7236 #ifdef __NR_setgid32
7237 #define __NR_sys_setgid __NR_setgid32
7238 #else
7239 #define __NR_sys_setgid __NR_setgid
7240 #endif
7241 #ifdef __NR_setresuid32
7242 #define __NR_sys_setresuid __NR_setresuid32
7243 #else
7244 #define __NR_sys_setresuid __NR_setresuid
7245 #endif
7246 #ifdef __NR_setresgid32
7247 #define __NR_sys_setresgid __NR_setresgid32
7248 #else
7249 #define __NR_sys_setresgid __NR_setresgid
7250 #endif
7251 #ifdef __NR_setgroups32
7252 #define __NR_sys_setgroups __NR_setgroups32
7253 #else
7254 #define __NR_sys_setgroups __NR_setgroups
7255 #endif
7256 #ifdef __NR_sys_setreuid32
7257 #define __NR_sys_setreuid __NR_setreuid32
7258 #else
7259 #define __NR_sys_setreuid __NR_setreuid
7260 #endif
7261 #ifdef __NR_sys_setregid32
7262 #define __NR_sys_setregid __NR_setregid32
7263 #else
7264 #define __NR_sys_setregid __NR_setregid
7265 #endif
7266 
7267 _syscall1(int, sys_setuid, uid_t, uid)
7268 _syscall1(int, sys_setgid, gid_t, gid)
7269 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7270 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7271 _syscall2(int, sys_setgroups, int, size, gid_t *, grouplist)
7272 _syscall2(int, sys_setreuid, uid_t, ruid, uid_t, euid);
7273 _syscall2(int, sys_setregid, gid_t, rgid, gid_t, egid);
7274 
7275 void syscall_init(void)
7276 {
7277     IOCTLEntry *ie;
7278     const argtype *arg_type;
7279     int size;
7280 
7281     thunk_init(STRUCT_MAX);
7282 
7283 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7284 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7285 #include "syscall_types.h"
7286 #undef STRUCT
7287 #undef STRUCT_SPECIAL
7288 
7289     /* we patch the ioctl size if necessary. We rely on the fact that
7290        no ioctl has all the bits at '1' in the size field */
7291     ie = ioctl_entries;
7292     while (ie->target_cmd != 0) {
7293         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7294             TARGET_IOC_SIZEMASK) {
7295             arg_type = ie->arg_type;
7296             if (arg_type[0] != TYPE_PTR) {
7297                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7298                         ie->target_cmd);
7299                 exit(1);
7300             }
7301             arg_type++;
7302             size = thunk_type_size(arg_type, 0);
7303             ie->target_cmd = (ie->target_cmd &
7304                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7305                 (size << TARGET_IOC_SIZESHIFT);
7306         }
7307 
7308         /* automatic consistency check if same arch */
7309 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7310     (defined(__x86_64__) && defined(TARGET_X86_64))
7311         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7312             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7313                     ie->name, ie->target_cmd, ie->host_cmd);
7314         }
7315 #endif
7316         ie++;
7317     }
7318 }
7319 
7320 #ifdef TARGET_NR_truncate64
7321 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7322                                          abi_long arg2,
7323                                          abi_long arg3,
7324                                          abi_long arg4)
7325 {
7326     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7327         arg2 = arg3;
7328         arg3 = arg4;
7329     }
7330     return get_errno(truncate(arg1, target_offset64(arg2, arg3)));
7331 }
7332 #endif
7333 
7334 #ifdef TARGET_NR_ftruncate64
7335 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7336                                           abi_long arg2,
7337                                           abi_long arg3,
7338                                           abi_long arg4)
7339 {
7340     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7341         arg2 = arg3;
7342         arg3 = arg4;
7343     }
7344     return get_errno(ftruncate(arg1, target_offset64(arg2, arg3)));
7345 }
7346 #endif
7347 
7348 #if defined(TARGET_NR_timer_settime) || \
7349     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7350 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7351                                                  abi_ulong target_addr)
7352 {
7353     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7354                                 offsetof(struct target_itimerspec,
7355                                          it_interval)) ||
7356         target_to_host_timespec(&host_its->it_value, target_addr +
7357                                 offsetof(struct target_itimerspec,
7358                                          it_value))) {
7359         return -TARGET_EFAULT;
7360     }
7361 
7362     return 0;
7363 }
7364 #endif
7365 
7366 #if defined(TARGET_NR_timer_settime64) || \
7367     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7368 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7369                                                    abi_ulong target_addr)
7370 {
7371     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7372                                   offsetof(struct target__kernel_itimerspec,
7373                                            it_interval)) ||
7374         target_to_host_timespec64(&host_its->it_value, target_addr +
7375                                   offsetof(struct target__kernel_itimerspec,
7376                                            it_value))) {
7377         return -TARGET_EFAULT;
7378     }
7379 
7380     return 0;
7381 }
7382 #endif
7383 
7384 #if ((defined(TARGET_NR_timerfd_gettime) || \
7385       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7386       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7387 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7388                                                  struct itimerspec *host_its)
7389 {
7390     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7391                                                        it_interval),
7392                                 &host_its->it_interval) ||
7393         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7394                                                        it_value),
7395                                 &host_its->it_value)) {
7396         return -TARGET_EFAULT;
7397     }
7398     return 0;
7399 }
7400 #endif
7401 
7402 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7403       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7404       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7405 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7406                                                    struct itimerspec *host_its)
7407 {
7408     if (host_to_target_timespec64(target_addr +
7409                                   offsetof(struct target__kernel_itimerspec,
7410                                            it_interval),
7411                                   &host_its->it_interval) ||
7412         host_to_target_timespec64(target_addr +
7413                                   offsetof(struct target__kernel_itimerspec,
7414                                            it_value),
7415                                   &host_its->it_value)) {
7416         return -TARGET_EFAULT;
7417     }
7418     return 0;
7419 }
7420 #endif
7421 
7422 #if defined(TARGET_NR_adjtimex) || \
7423     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7424 static inline abi_long target_to_host_timex(struct timex *host_tx,
7425                                             abi_long target_addr)
7426 {
7427     struct target_timex *target_tx;
7428 
7429     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7430         return -TARGET_EFAULT;
7431     }
7432 
7433     __get_user(host_tx->modes, &target_tx->modes);
7434     __get_user(host_tx->offset, &target_tx->offset);
7435     __get_user(host_tx->freq, &target_tx->freq);
7436     __get_user(host_tx->maxerror, &target_tx->maxerror);
7437     __get_user(host_tx->esterror, &target_tx->esterror);
7438     __get_user(host_tx->status, &target_tx->status);
7439     __get_user(host_tx->constant, &target_tx->constant);
7440     __get_user(host_tx->precision, &target_tx->precision);
7441     __get_user(host_tx->tolerance, &target_tx->tolerance);
7442     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7443     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7444     __get_user(host_tx->tick, &target_tx->tick);
7445     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7446     __get_user(host_tx->jitter, &target_tx->jitter);
7447     __get_user(host_tx->shift, &target_tx->shift);
7448     __get_user(host_tx->stabil, &target_tx->stabil);
7449     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7450     __get_user(host_tx->calcnt, &target_tx->calcnt);
7451     __get_user(host_tx->errcnt, &target_tx->errcnt);
7452     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7453     __get_user(host_tx->tai, &target_tx->tai);
7454 
7455     unlock_user_struct(target_tx, target_addr, 0);
7456     return 0;
7457 }
7458 
7459 static inline abi_long host_to_target_timex(abi_long target_addr,
7460                                             struct timex *host_tx)
7461 {
7462     struct target_timex *target_tx;
7463 
7464     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7465         return -TARGET_EFAULT;
7466     }
7467 
7468     __put_user(host_tx->modes, &target_tx->modes);
7469     __put_user(host_tx->offset, &target_tx->offset);
7470     __put_user(host_tx->freq, &target_tx->freq);
7471     __put_user(host_tx->maxerror, &target_tx->maxerror);
7472     __put_user(host_tx->esterror, &target_tx->esterror);
7473     __put_user(host_tx->status, &target_tx->status);
7474     __put_user(host_tx->constant, &target_tx->constant);
7475     __put_user(host_tx->precision, &target_tx->precision);
7476     __put_user(host_tx->tolerance, &target_tx->tolerance);
7477     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7478     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7479     __put_user(host_tx->tick, &target_tx->tick);
7480     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7481     __put_user(host_tx->jitter, &target_tx->jitter);
7482     __put_user(host_tx->shift, &target_tx->shift);
7483     __put_user(host_tx->stabil, &target_tx->stabil);
7484     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7485     __put_user(host_tx->calcnt, &target_tx->calcnt);
7486     __put_user(host_tx->errcnt, &target_tx->errcnt);
7487     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7488     __put_user(host_tx->tai, &target_tx->tai);
7489 
7490     unlock_user_struct(target_tx, target_addr, 1);
7491     return 0;
7492 }
7493 #endif
7494 
7495 
7496 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7497 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7498                                               abi_long target_addr)
7499 {
7500     struct target__kernel_timex *target_tx;
7501 
7502     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7503                                  offsetof(struct target__kernel_timex,
7504                                           time))) {
7505         return -TARGET_EFAULT;
7506     }
7507 
7508     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7509         return -TARGET_EFAULT;
7510     }
7511 
7512     __get_user(host_tx->modes, &target_tx->modes);
7513     __get_user(host_tx->offset, &target_tx->offset);
7514     __get_user(host_tx->freq, &target_tx->freq);
7515     __get_user(host_tx->maxerror, &target_tx->maxerror);
7516     __get_user(host_tx->esterror, &target_tx->esterror);
7517     __get_user(host_tx->status, &target_tx->status);
7518     __get_user(host_tx->constant, &target_tx->constant);
7519     __get_user(host_tx->precision, &target_tx->precision);
7520     __get_user(host_tx->tolerance, &target_tx->tolerance);
7521     __get_user(host_tx->tick, &target_tx->tick);
7522     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7523     __get_user(host_tx->jitter, &target_tx->jitter);
7524     __get_user(host_tx->shift, &target_tx->shift);
7525     __get_user(host_tx->stabil, &target_tx->stabil);
7526     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7527     __get_user(host_tx->calcnt, &target_tx->calcnt);
7528     __get_user(host_tx->errcnt, &target_tx->errcnt);
7529     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7530     __get_user(host_tx->tai, &target_tx->tai);
7531 
7532     unlock_user_struct(target_tx, target_addr, 0);
7533     return 0;
7534 }
7535 
7536 static inline abi_long host_to_target_timex64(abi_long target_addr,
7537                                               struct timex *host_tx)
7538 {
7539     struct target__kernel_timex *target_tx;
7540 
7541    if (copy_to_user_timeval64(target_addr +
7542                               offsetof(struct target__kernel_timex, time),
7543                               &host_tx->time)) {
7544         return -TARGET_EFAULT;
7545     }
7546 
7547     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7548         return -TARGET_EFAULT;
7549     }
7550 
7551     __put_user(host_tx->modes, &target_tx->modes);
7552     __put_user(host_tx->offset, &target_tx->offset);
7553     __put_user(host_tx->freq, &target_tx->freq);
7554     __put_user(host_tx->maxerror, &target_tx->maxerror);
7555     __put_user(host_tx->esterror, &target_tx->esterror);
7556     __put_user(host_tx->status, &target_tx->status);
7557     __put_user(host_tx->constant, &target_tx->constant);
7558     __put_user(host_tx->precision, &target_tx->precision);
7559     __put_user(host_tx->tolerance, &target_tx->tolerance);
7560     __put_user(host_tx->tick, &target_tx->tick);
7561     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7562     __put_user(host_tx->jitter, &target_tx->jitter);
7563     __put_user(host_tx->shift, &target_tx->shift);
7564     __put_user(host_tx->stabil, &target_tx->stabil);
7565     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7566     __put_user(host_tx->calcnt, &target_tx->calcnt);
7567     __put_user(host_tx->errcnt, &target_tx->errcnt);
7568     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7569     __put_user(host_tx->tai, &target_tx->tai);
7570 
7571     unlock_user_struct(target_tx, target_addr, 1);
7572     return 0;
7573 }
7574 #endif
7575 
7576 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7577 #define sigev_notify_thread_id _sigev_un._tid
7578 #endif
7579 
7580 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7581                                                abi_ulong target_addr)
7582 {
7583     struct target_sigevent *target_sevp;
7584 
7585     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7586         return -TARGET_EFAULT;
7587     }
7588 
7589     /* This union is awkward on 64 bit systems because it has a 32 bit
7590      * integer and a pointer in it; we follow the conversion approach
7591      * used for handling sigval types in signal.c so the guest should get
7592      * the correct value back even if we did a 64 bit byteswap and it's
7593      * using the 32 bit integer.
7594      */
7595     host_sevp->sigev_value.sival_ptr =
7596         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7597     host_sevp->sigev_signo =
7598         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7599     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7600     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7601 
7602     unlock_user_struct(target_sevp, target_addr, 1);
7603     return 0;
7604 }
7605 
7606 #if defined(TARGET_NR_mlockall)
7607 static inline int target_to_host_mlockall_arg(int arg)
7608 {
7609     int result = 0;
7610 
7611     if (arg & TARGET_MCL_CURRENT) {
7612         result |= MCL_CURRENT;
7613     }
7614     if (arg & TARGET_MCL_FUTURE) {
7615         result |= MCL_FUTURE;
7616     }
7617 #ifdef MCL_ONFAULT
7618     if (arg & TARGET_MCL_ONFAULT) {
7619         result |= MCL_ONFAULT;
7620     }
7621 #endif
7622 
7623     return result;
7624 }
7625 #endif
7626 
7627 static inline int target_to_host_msync_arg(abi_long arg)
7628 {
7629     return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
7630            ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
7631            ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
7632            (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
7633 }
7634 
7635 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7636      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7637      defined(TARGET_NR_newfstatat))
7638 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7639                                              abi_ulong target_addr,
7640                                              struct stat *host_st)
7641 {
7642 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7643     if (cpu_env->eabi) {
7644         struct target_eabi_stat64 *target_st;
7645 
7646         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7647             return -TARGET_EFAULT;
7648         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7649         __put_user(host_st->st_dev, &target_st->st_dev);
7650         __put_user(host_st->st_ino, &target_st->st_ino);
7651 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7652         __put_user(host_st->st_ino, &target_st->__st_ino);
7653 #endif
7654         __put_user(host_st->st_mode, &target_st->st_mode);
7655         __put_user(host_st->st_nlink, &target_st->st_nlink);
7656         __put_user(host_st->st_uid, &target_st->st_uid);
7657         __put_user(host_st->st_gid, &target_st->st_gid);
7658         __put_user(host_st->st_rdev, &target_st->st_rdev);
7659         __put_user(host_st->st_size, &target_st->st_size);
7660         __put_user(host_st->st_blksize, &target_st->st_blksize);
7661         __put_user(host_st->st_blocks, &target_st->st_blocks);
7662         __put_user(host_st->st_atime, &target_st->target_st_atime);
7663         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7664         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7665 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7666         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7667         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7668         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7669 #endif
7670         unlock_user_struct(target_st, target_addr, 1);
7671     } else
7672 #endif
7673     {
7674 #if defined(TARGET_HAS_STRUCT_STAT64)
7675         struct target_stat64 *target_st;
7676 #else
7677         struct target_stat *target_st;
7678 #endif
7679 
7680         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7681             return -TARGET_EFAULT;
7682         memset(target_st, 0, sizeof(*target_st));
7683         __put_user(host_st->st_dev, &target_st->st_dev);
7684         __put_user(host_st->st_ino, &target_st->st_ino);
7685 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7686         __put_user(host_st->st_ino, &target_st->__st_ino);
7687 #endif
7688         __put_user(host_st->st_mode, &target_st->st_mode);
7689         __put_user(host_st->st_nlink, &target_st->st_nlink);
7690         __put_user(host_st->st_uid, &target_st->st_uid);
7691         __put_user(host_st->st_gid, &target_st->st_gid);
7692         __put_user(host_st->st_rdev, &target_st->st_rdev);
7693         /* XXX: better use of kernel struct */
7694         __put_user(host_st->st_size, &target_st->st_size);
7695         __put_user(host_st->st_blksize, &target_st->st_blksize);
7696         __put_user(host_st->st_blocks, &target_st->st_blocks);
7697         __put_user(host_st->st_atime, &target_st->target_st_atime);
7698         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7699         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7700 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7701         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7702         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7703         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7704 #endif
7705         unlock_user_struct(target_st, target_addr, 1);
7706     }
7707 
7708     return 0;
7709 }
7710 #endif
7711 
7712 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7713 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7714                                             abi_ulong target_addr)
7715 {
7716     struct target_statx *target_stx;
7717 
7718     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7719         return -TARGET_EFAULT;
7720     }
7721     memset(target_stx, 0, sizeof(*target_stx));
7722 
7723     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7724     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7725     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7726     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7727     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7728     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7729     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7730     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7731     __put_user(host_stx->stx_size, &target_stx->stx_size);
7732     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7733     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7734     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7735     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7736     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7737     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7738     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7739     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7740     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7741     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7742     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7743     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7744     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7745     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7746 
7747     unlock_user_struct(target_stx, target_addr, 1);
7748 
7749     return 0;
7750 }
7751 #endif
7752 
7753 static int do_sys_futex(int *uaddr, int op, int val,
7754                          const struct timespec *timeout, int *uaddr2,
7755                          int val3)
7756 {
7757 #if HOST_LONG_BITS == 64
7758 #if defined(__NR_futex)
7759     /* always a 64-bit time_t, it doesn't define _time64 version  */
7760     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7761 
7762 #endif
7763 #else /* HOST_LONG_BITS == 64 */
7764 #if defined(__NR_futex_time64)
7765     if (sizeof(timeout->tv_sec) == 8) {
7766         /* _time64 function on 32bit arch */
7767         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7768     }
7769 #endif
7770 #if defined(__NR_futex)
7771     /* old function on 32bit arch */
7772     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7773 #endif
7774 #endif /* HOST_LONG_BITS == 64 */
7775     g_assert_not_reached();
7776 }
7777 
7778 static int do_safe_futex(int *uaddr, int op, int val,
7779                          const struct timespec *timeout, int *uaddr2,
7780                          int val3)
7781 {
7782 #if HOST_LONG_BITS == 64
7783 #if defined(__NR_futex)
7784     /* always a 64-bit time_t, it doesn't define _time64 version  */
7785     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7786 #endif
7787 #else /* HOST_LONG_BITS == 64 */
7788 #if defined(__NR_futex_time64)
7789     if (sizeof(timeout->tv_sec) == 8) {
7790         /* _time64 function on 32bit arch */
7791         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7792                                            val3));
7793     }
7794 #endif
7795 #if defined(__NR_futex)
7796     /* old function on 32bit arch */
7797     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7798 #endif
7799 #endif /* HOST_LONG_BITS == 64 */
7800     return -TARGET_ENOSYS;
7801 }
7802 
7803 /* ??? Using host futex calls even when target atomic operations
7804    are not really atomic probably breaks things.  However implementing
7805    futexes locally would make futexes shared between multiple processes
7806    tricky.  However they're probably useless because guest atomic
7807    operations won't work either.  */
7808 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7809 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7810                     int op, int val, target_ulong timeout,
7811                     target_ulong uaddr2, int val3)
7812 {
7813     struct timespec ts, *pts = NULL;
7814     void *haddr2 = NULL;
7815     int base_op;
7816 
7817     /* We assume FUTEX_* constants are the same on both host and target. */
7818 #ifdef FUTEX_CMD_MASK
7819     base_op = op & FUTEX_CMD_MASK;
7820 #else
7821     base_op = op;
7822 #endif
7823     switch (base_op) {
7824     case FUTEX_WAIT:
7825     case FUTEX_WAIT_BITSET:
7826         val = tswap32(val);
7827         break;
7828     case FUTEX_WAIT_REQUEUE_PI:
7829         val = tswap32(val);
7830         haddr2 = g2h(cpu, uaddr2);
7831         break;
7832     case FUTEX_LOCK_PI:
7833     case FUTEX_LOCK_PI2:
7834         break;
7835     case FUTEX_WAKE:
7836     case FUTEX_WAKE_BITSET:
7837     case FUTEX_TRYLOCK_PI:
7838     case FUTEX_UNLOCK_PI:
7839         timeout = 0;
7840         break;
7841     case FUTEX_FD:
7842         val = target_to_host_signal(val);
7843         timeout = 0;
7844         break;
7845     case FUTEX_CMP_REQUEUE:
7846     case FUTEX_CMP_REQUEUE_PI:
7847         val3 = tswap32(val3);
7848         /* fall through */
7849     case FUTEX_REQUEUE:
7850     case FUTEX_WAKE_OP:
7851         /*
7852          * For these, the 4th argument is not TIMEOUT, but VAL2.
7853          * But the prototype of do_safe_futex takes a pointer, so
7854          * insert casts to satisfy the compiler.  We do not need
7855          * to tswap VAL2 since it's not compared to guest memory.
7856           */
7857         pts = (struct timespec *)(uintptr_t)timeout;
7858         timeout = 0;
7859         haddr2 = g2h(cpu, uaddr2);
7860         break;
7861     default:
7862         return -TARGET_ENOSYS;
7863     }
7864     if (timeout) {
7865         pts = &ts;
7866         if (time64
7867             ? target_to_host_timespec64(pts, timeout)
7868             : target_to_host_timespec(pts, timeout)) {
7869             return -TARGET_EFAULT;
7870         }
7871     }
7872     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7873 }
7874 #endif
7875 
7876 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7877 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7878                                      abi_long handle, abi_long mount_id,
7879                                      abi_long flags)
7880 {
7881     struct file_handle *target_fh;
7882     struct file_handle *fh;
7883     int mid = 0;
7884     abi_long ret;
7885     char *name;
7886     unsigned int size, total_size;
7887 
7888     if (get_user_s32(size, handle)) {
7889         return -TARGET_EFAULT;
7890     }
7891 
7892     name = lock_user_string(pathname);
7893     if (!name) {
7894         return -TARGET_EFAULT;
7895     }
7896 
7897     total_size = sizeof(struct file_handle) + size;
7898     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7899     if (!target_fh) {
7900         unlock_user(name, pathname, 0);
7901         return -TARGET_EFAULT;
7902     }
7903 
7904     fh = g_malloc0(total_size);
7905     fh->handle_bytes = size;
7906 
7907     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7908     unlock_user(name, pathname, 0);
7909 
7910     /* man name_to_handle_at(2):
7911      * Other than the use of the handle_bytes field, the caller should treat
7912      * the file_handle structure as an opaque data type
7913      */
7914 
7915     memcpy(target_fh, fh, total_size);
7916     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7917     target_fh->handle_type = tswap32(fh->handle_type);
7918     g_free(fh);
7919     unlock_user(target_fh, handle, total_size);
7920 
7921     if (put_user_s32(mid, mount_id)) {
7922         return -TARGET_EFAULT;
7923     }
7924 
7925     return ret;
7926 
7927 }
7928 #endif
7929 
7930 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7931 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7932                                      abi_long flags)
7933 {
7934     struct file_handle *target_fh;
7935     struct file_handle *fh;
7936     unsigned int size, total_size;
7937     abi_long ret;
7938 
7939     if (get_user_s32(size, handle)) {
7940         return -TARGET_EFAULT;
7941     }
7942 
7943     total_size = sizeof(struct file_handle) + size;
7944     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7945     if (!target_fh) {
7946         return -TARGET_EFAULT;
7947     }
7948 
7949     fh = g_memdup(target_fh, total_size);
7950     fh->handle_bytes = size;
7951     fh->handle_type = tswap32(target_fh->handle_type);
7952 
7953     ret = get_errno(open_by_handle_at(mount_fd, fh,
7954                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7955 
7956     g_free(fh);
7957 
7958     unlock_user(target_fh, handle, total_size);
7959 
7960     return ret;
7961 }
7962 #endif
7963 
7964 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7965 
7966 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7967 {
7968     int host_flags;
7969     target_sigset_t *target_mask;
7970     sigset_t host_mask;
7971     abi_long ret;
7972 
7973     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7974         return -TARGET_EINVAL;
7975     }
7976     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7977         return -TARGET_EFAULT;
7978     }
7979 
7980     target_to_host_sigset(&host_mask, target_mask);
7981 
7982     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7983 
7984     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7985     if (ret >= 0) {
7986         fd_trans_register(ret, &target_signalfd_trans);
7987     }
7988 
7989     unlock_user_struct(target_mask, mask, 0);
7990 
7991     return ret;
7992 }
7993 #endif
7994 
7995 /* Map host to target signal numbers for the wait family of syscalls.
7996    Assume all other status bits are the same.  */
7997 int host_to_target_waitstatus(int status)
7998 {
7999     if (WIFSIGNALED(status)) {
8000         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
8001     }
8002     if (WIFSTOPPED(status)) {
8003         return (host_to_target_signal(WSTOPSIG(status)) << 8)
8004                | (status & 0xff);
8005     }
8006     return status;
8007 }
8008 
8009 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
8010 {
8011     CPUState *cpu = env_cpu(cpu_env);
8012     struct linux_binprm *bprm = get_task_state(cpu)->bprm;
8013     int i;
8014 
8015     for (i = 0; i < bprm->argc; i++) {
8016         size_t len = strlen(bprm->argv[i]) + 1;
8017 
8018         if (write(fd, bprm->argv[i], len) != len) {
8019             return -1;
8020         }
8021     }
8022 
8023     return 0;
8024 }
8025 
8026 struct open_self_maps_data {
8027     TaskState *ts;
8028     IntervalTreeRoot *host_maps;
8029     int fd;
8030     bool smaps;
8031 };
8032 
8033 /*
8034  * Subroutine to output one line of /proc/self/maps,
8035  * or one region of /proc/self/smaps.
8036  */
8037 
8038 #ifdef TARGET_HPPA
8039 # define test_stack(S, E, L)  (E == L)
8040 #else
8041 # define test_stack(S, E, L)  (S == L)
8042 #endif
8043 
8044 static void open_self_maps_4(const struct open_self_maps_data *d,
8045                              const MapInfo *mi, abi_ptr start,
8046                              abi_ptr end, unsigned flags)
8047 {
8048     const struct image_info *info = d->ts->info;
8049     const char *path = mi->path;
8050     uint64_t offset;
8051     int fd = d->fd;
8052     int count;
8053 
8054     if (test_stack(start, end, info->stack_limit)) {
8055         path = "[stack]";
8056     } else if (start == info->brk) {
8057         path = "[heap]";
8058     } else if (start == info->vdso) {
8059         path = "[vdso]";
8060 #ifdef TARGET_X86_64
8061     } else if (start == TARGET_VSYSCALL_PAGE) {
8062         path = "[vsyscall]";
8063 #endif
8064     }
8065 
8066     /* Except null device (MAP_ANON), adjust offset for this fragment. */
8067     offset = mi->offset;
8068     if (mi->dev) {
8069         uintptr_t hstart = (uintptr_t)g2h_untagged(start);
8070         offset += hstart - mi->itree.start;
8071     }
8072 
8073     count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8074                     " %c%c%c%c %08" PRIx64 " %02x:%02x %"PRId64,
8075                     start, end,
8076                     (flags & PAGE_READ) ? 'r' : '-',
8077                     (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8078                     (flags & PAGE_EXEC) ? 'x' : '-',
8079                     mi->is_priv ? 'p' : 's',
8080                     offset, major(mi->dev), minor(mi->dev),
8081                     (uint64_t)mi->inode);
8082     if (path) {
8083         dprintf(fd, "%*s%s\n", 73 - count, "", path);
8084     } else {
8085         dprintf(fd, "\n");
8086     }
8087 
8088     if (d->smaps) {
8089         unsigned long size = end - start;
8090         unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
8091         unsigned long size_kb = size >> 10;
8092 
8093         dprintf(fd, "Size:                  %lu kB\n"
8094                 "KernelPageSize:        %lu kB\n"
8095                 "MMUPageSize:           %lu kB\n"
8096                 "Rss:                   0 kB\n"
8097                 "Pss:                   0 kB\n"
8098                 "Pss_Dirty:             0 kB\n"
8099                 "Shared_Clean:          0 kB\n"
8100                 "Shared_Dirty:          0 kB\n"
8101                 "Private_Clean:         0 kB\n"
8102                 "Private_Dirty:         0 kB\n"
8103                 "Referenced:            0 kB\n"
8104                 "Anonymous:             %lu kB\n"
8105                 "LazyFree:              0 kB\n"
8106                 "AnonHugePages:         0 kB\n"
8107                 "ShmemPmdMapped:        0 kB\n"
8108                 "FilePmdMapped:         0 kB\n"
8109                 "Shared_Hugetlb:        0 kB\n"
8110                 "Private_Hugetlb:       0 kB\n"
8111                 "Swap:                  0 kB\n"
8112                 "SwapPss:               0 kB\n"
8113                 "Locked:                0 kB\n"
8114                 "THPeligible:    0\n"
8115                 "VmFlags:%s%s%s%s%s%s%s%s\n",
8116                 size_kb, page_size_kb, page_size_kb,
8117                 (flags & PAGE_ANON ? size_kb : 0),
8118                 (flags & PAGE_READ) ? " rd" : "",
8119                 (flags & PAGE_WRITE_ORG) ? " wr" : "",
8120                 (flags & PAGE_EXEC) ? " ex" : "",
8121                 mi->is_priv ? "" : " sh",
8122                 (flags & PAGE_READ) ? " mr" : "",
8123                 (flags & PAGE_WRITE_ORG) ? " mw" : "",
8124                 (flags & PAGE_EXEC) ? " me" : "",
8125                 mi->is_priv ? "" : " ms");
8126     }
8127 }
8128 
8129 /*
8130  * Callback for walk_memory_regions, when read_self_maps() fails.
8131  * Proceed without the benefit of host /proc/self/maps cross-check.
8132  */
8133 static int open_self_maps_3(void *opaque, target_ulong guest_start,
8134                             target_ulong guest_end, unsigned long flags)
8135 {
8136     static const MapInfo mi = { .is_priv = true };
8137 
8138     open_self_maps_4(opaque, &mi, guest_start, guest_end, flags);
8139     return 0;
8140 }
8141 
8142 /*
8143  * Callback for walk_memory_regions, when read_self_maps() succeeds.
8144  */
8145 static int open_self_maps_2(void *opaque, target_ulong guest_start,
8146                             target_ulong guest_end, unsigned long flags)
8147 {
8148     const struct open_self_maps_data *d = opaque;
8149     uintptr_t host_start = (uintptr_t)g2h_untagged(guest_start);
8150     uintptr_t host_last = (uintptr_t)g2h_untagged(guest_end - 1);
8151 
8152 #ifdef TARGET_X86_64
8153     /*
8154      * Because of the extremely high position of the page within the guest
8155      * virtual address space, this is not backed by host memory at all.
8156      * Therefore the loop below would fail.  This is the only instance
8157      * of not having host backing memory.
8158      */
8159     if (guest_start == TARGET_VSYSCALL_PAGE) {
8160         return open_self_maps_3(opaque, guest_start, guest_end, flags);
8161     }
8162 #endif
8163 
8164     while (1) {
8165         IntervalTreeNode *n =
8166             interval_tree_iter_first(d->host_maps, host_start, host_start);
8167         MapInfo *mi = container_of(n, MapInfo, itree);
8168         uintptr_t this_hlast = MIN(host_last, n->last);
8169         target_ulong this_gend = h2g(this_hlast) + 1;
8170 
8171         open_self_maps_4(d, mi, guest_start, this_gend, flags);
8172 
8173         if (this_hlast == host_last) {
8174             return 0;
8175         }
8176         host_start = this_hlast + 1;
8177         guest_start = h2g(host_start);
8178     }
8179 }
8180 
8181 static int open_self_maps_1(CPUArchState *env, int fd, bool smaps)
8182 {
8183     struct open_self_maps_data d = {
8184         .ts = get_task_state(env_cpu(env)),
8185         .fd = fd,
8186         .smaps = smaps
8187     };
8188 
8189     mmap_lock();
8190     d.host_maps = read_self_maps();
8191     if (d.host_maps) {
8192         walk_memory_regions(&d, open_self_maps_2);
8193         free_self_maps(d.host_maps);
8194     } else {
8195         walk_memory_regions(&d, open_self_maps_3);
8196     }
8197     mmap_unlock();
8198     return 0;
8199 }
8200 
8201 static int open_self_maps(CPUArchState *cpu_env, int fd)
8202 {
8203     return open_self_maps_1(cpu_env, fd, false);
8204 }
8205 
8206 static int open_self_smaps(CPUArchState *cpu_env, int fd)
8207 {
8208     return open_self_maps_1(cpu_env, fd, true);
8209 }
8210 
8211 static int open_self_stat(CPUArchState *cpu_env, int fd)
8212 {
8213     CPUState *cpu = env_cpu(cpu_env);
8214     TaskState *ts = get_task_state(cpu);
8215     g_autoptr(GString) buf = g_string_new(NULL);
8216     int i;
8217 
8218     for (i = 0; i < 44; i++) {
8219         if (i == 0) {
8220             /* pid */
8221             g_string_printf(buf, FMT_pid " ", getpid());
8222         } else if (i == 1) {
8223             /* app name */
8224             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8225             bin = bin ? bin + 1 : ts->bprm->argv[0];
8226             g_string_printf(buf, "(%.15s) ", bin);
8227         } else if (i == 2) {
8228             /* task state */
8229             g_string_assign(buf, "R "); /* we are running right now */
8230         } else if (i == 3) {
8231             /* ppid */
8232             g_string_printf(buf, FMT_pid " ", getppid());
8233         } else if (i == 19) {
8234             /* num_threads */
8235             int cpus = 0;
8236             WITH_RCU_READ_LOCK_GUARD() {
8237                 CPUState *cpu_iter;
8238                 CPU_FOREACH(cpu_iter) {
8239                     cpus++;
8240                 }
8241             }
8242             g_string_printf(buf, "%d ", cpus);
8243         } else if (i == 21) {
8244             /* starttime */
8245             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8246         } else if (i == 27) {
8247             /* stack bottom */
8248             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8249         } else {
8250             /* for the rest, there is MasterCard */
8251             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8252         }
8253 
8254         if (write(fd, buf->str, buf->len) != buf->len) {
8255             return -1;
8256         }
8257     }
8258 
8259     return 0;
8260 }
8261 
8262 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8263 {
8264     CPUState *cpu = env_cpu(cpu_env);
8265     TaskState *ts = get_task_state(cpu);
8266     abi_ulong auxv = ts->info->saved_auxv;
8267     abi_ulong len = ts->info->auxv_len;
8268     char *ptr;
8269 
8270     /*
8271      * Auxiliary vector is stored in target process stack.
8272      * read in whole auxv vector and copy it to file
8273      */
8274     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8275     if (ptr != NULL) {
8276         while (len > 0) {
8277             ssize_t r;
8278             r = write(fd, ptr, len);
8279             if (r <= 0) {
8280                 break;
8281             }
8282             len -= r;
8283             ptr += r;
8284         }
8285         lseek(fd, 0, SEEK_SET);
8286         unlock_user(ptr, auxv, len);
8287     }
8288 
8289     return 0;
8290 }
8291 
8292 static int is_proc_myself(const char *filename, const char *entry)
8293 {
8294     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8295         filename += strlen("/proc/");
8296         if (!strncmp(filename, "self/", strlen("self/"))) {
8297             filename += strlen("self/");
8298         } else if (*filename >= '1' && *filename <= '9') {
8299             char myself[80];
8300             snprintf(myself, sizeof(myself), "%d/", getpid());
8301             if (!strncmp(filename, myself, strlen(myself))) {
8302                 filename += strlen(myself);
8303             } else {
8304                 return 0;
8305             }
8306         } else {
8307             return 0;
8308         }
8309         if (!strcmp(filename, entry)) {
8310             return 1;
8311         }
8312     }
8313     return 0;
8314 }
8315 
8316 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8317                       const char *fmt, int code)
8318 {
8319     if (logfile) {
8320         CPUState *cs = env_cpu(env);
8321 
8322         fprintf(logfile, fmt, code);
8323         fprintf(logfile, "Failing executable: %s\n", exec_path);
8324         cpu_dump_state(cs, logfile, 0);
8325         open_self_maps(env, fileno(logfile));
8326     }
8327 }
8328 
8329 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8330 {
8331     /* dump to console */
8332     excp_dump_file(stderr, env, fmt, code);
8333 
8334     /* dump to log file */
8335     if (qemu_log_separate()) {
8336         FILE *logfile = qemu_log_trylock();
8337 
8338         excp_dump_file(logfile, env, fmt, code);
8339         qemu_log_unlock(logfile);
8340     }
8341 }
8342 
8343 #include "target_proc.h"
8344 
8345 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8346     defined(HAVE_ARCH_PROC_CPUINFO) || \
8347     defined(HAVE_ARCH_PROC_HARDWARE)
8348 static int is_proc(const char *filename, const char *entry)
8349 {
8350     return strcmp(filename, entry) == 0;
8351 }
8352 #endif
8353 
8354 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8355 static int open_net_route(CPUArchState *cpu_env, int fd)
8356 {
8357     FILE *fp;
8358     char *line = NULL;
8359     size_t len = 0;
8360     ssize_t read;
8361 
8362     fp = fopen("/proc/net/route", "r");
8363     if (fp == NULL) {
8364         return -1;
8365     }
8366 
8367     /* read header */
8368 
8369     read = getline(&line, &len, fp);
8370     dprintf(fd, "%s", line);
8371 
8372     /* read routes */
8373 
8374     while ((read = getline(&line, &len, fp)) != -1) {
8375         char iface[16];
8376         uint32_t dest, gw, mask;
8377         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8378         int fields;
8379 
8380         fields = sscanf(line,
8381                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8382                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8383                         &mask, &mtu, &window, &irtt);
8384         if (fields != 11) {
8385             continue;
8386         }
8387         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8388                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8389                 metric, tswap32(mask), mtu, window, irtt);
8390     }
8391 
8392     free(line);
8393     fclose(fp);
8394 
8395     return 0;
8396 }
8397 #endif
8398 
8399 static int maybe_do_fake_open(CPUArchState *cpu_env, int dirfd,
8400                               const char *fname, int flags, mode_t mode,
8401                               int openat2_resolve, bool safe)
8402 {
8403     g_autofree char *proc_name = NULL;
8404     const char *pathname;
8405     struct fake_open {
8406         const char *filename;
8407         int (*fill)(CPUArchState *cpu_env, int fd);
8408         int (*cmp)(const char *s1, const char *s2);
8409     };
8410     const struct fake_open *fake_open;
8411     static const struct fake_open fakes[] = {
8412         { "maps", open_self_maps, is_proc_myself },
8413         { "smaps", open_self_smaps, is_proc_myself },
8414         { "stat", open_self_stat, is_proc_myself },
8415         { "auxv", open_self_auxv, is_proc_myself },
8416         { "cmdline", open_self_cmdline, is_proc_myself },
8417 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8418         { "/proc/net/route", open_net_route, is_proc },
8419 #endif
8420 #if defined(HAVE_ARCH_PROC_CPUINFO)
8421         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8422 #endif
8423 #if defined(HAVE_ARCH_PROC_HARDWARE)
8424         { "/proc/hardware", open_hardware, is_proc },
8425 #endif
8426         { NULL, NULL, NULL }
8427     };
8428 
8429     /* if this is a file from /proc/ filesystem, expand full name */
8430     proc_name = realpath(fname, NULL);
8431     if (proc_name && strncmp(proc_name, "/proc/", 6) == 0) {
8432         pathname = proc_name;
8433     } else {
8434         pathname = fname;
8435     }
8436 
8437     if (is_proc_myself(pathname, "exe")) {
8438         /* Honor openat2 resolve flags */
8439         if ((openat2_resolve & RESOLVE_NO_MAGICLINKS) ||
8440             (openat2_resolve & RESOLVE_NO_SYMLINKS)) {
8441             errno = ELOOP;
8442             return -1;
8443         }
8444         if (safe) {
8445             return safe_openat(dirfd, exec_path, flags, mode);
8446         } else {
8447             return openat(dirfd, exec_path, flags, mode);
8448         }
8449     }
8450 
8451     for (fake_open = fakes; fake_open->filename; fake_open++) {
8452         if (fake_open->cmp(pathname, fake_open->filename)) {
8453             break;
8454         }
8455     }
8456 
8457     if (fake_open->filename) {
8458         const char *tmpdir;
8459         char filename[PATH_MAX];
8460         int fd, r;
8461 
8462         fd = memfd_create("qemu-open", 0);
8463         if (fd < 0) {
8464             if (errno != ENOSYS) {
8465                 return fd;
8466             }
8467             /* create temporary file to map stat to */
8468             tmpdir = getenv("TMPDIR");
8469             if (!tmpdir)
8470                 tmpdir = "/tmp";
8471             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8472             fd = mkstemp(filename);
8473             if (fd < 0) {
8474                 return fd;
8475             }
8476             unlink(filename);
8477         }
8478 
8479         if ((r = fake_open->fill(cpu_env, fd))) {
8480             int e = errno;
8481             close(fd);
8482             errno = e;
8483             return r;
8484         }
8485         lseek(fd, 0, SEEK_SET);
8486 
8487         return fd;
8488     }
8489 
8490     return -2;
8491 }
8492 
8493 int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *pathname,
8494                     int flags, mode_t mode, bool safe)
8495 {
8496     int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, flags, mode, 0, safe);
8497     if (fd > -2) {
8498         return fd;
8499     }
8500 
8501     if (safe) {
8502         return safe_openat(dirfd, path(pathname), flags, mode);
8503     } else {
8504         return openat(dirfd, path(pathname), flags, mode);
8505     }
8506 }
8507 
8508 
8509 static int do_openat2(CPUArchState *cpu_env, abi_long dirfd,
8510                       abi_ptr guest_pathname, abi_ptr guest_open_how,
8511                       abi_ulong guest_size)
8512 {
8513     struct open_how_ver0 how = {0};
8514     char *pathname;
8515     int ret;
8516 
8517     if (guest_size < sizeof(struct target_open_how_ver0)) {
8518         return -TARGET_EINVAL;
8519     }
8520     ret = copy_struct_from_user(&how, sizeof(how), guest_open_how, guest_size);
8521     if (ret) {
8522         if (ret == -TARGET_E2BIG) {
8523             qemu_log_mask(LOG_UNIMP,
8524                           "Unimplemented openat2 open_how size: "
8525                           TARGET_ABI_FMT_lu "\n", guest_size);
8526         }
8527         return ret;
8528     }
8529     pathname = lock_user_string(guest_pathname);
8530     if (!pathname) {
8531         return -TARGET_EFAULT;
8532     }
8533 
8534     how.flags = target_to_host_bitmask(tswap64(how.flags), fcntl_flags_tbl);
8535     how.mode = tswap64(how.mode);
8536     how.resolve = tswap64(how.resolve);
8537     int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, how.flags, how.mode,
8538                                 how.resolve, true);
8539     if (fd > -2) {
8540         ret = get_errno(fd);
8541     } else {
8542         ret = get_errno(safe_openat2(dirfd, pathname, &how,
8543                                      sizeof(struct open_how_ver0)));
8544     }
8545 
8546     fd_trans_unregister(ret);
8547     unlock_user(pathname, guest_pathname, 0);
8548     return ret;
8549 }
8550 
8551 ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz)
8552 {
8553     ssize_t ret;
8554 
8555     if (!pathname || !buf) {
8556         errno = EFAULT;
8557         return -1;
8558     }
8559 
8560     if (!bufsiz) {
8561         /* Short circuit this for the magic exe check. */
8562         errno = EINVAL;
8563         return -1;
8564     }
8565 
8566     if (is_proc_myself((const char *)pathname, "exe")) {
8567         /*
8568          * Don't worry about sign mismatch as earlier mapping
8569          * logic would have thrown a bad address error.
8570          */
8571         ret = MIN(strlen(exec_path), bufsiz);
8572         /* We cannot NUL terminate the string. */
8573         memcpy(buf, exec_path, ret);
8574     } else {
8575         ret = readlink(path(pathname), buf, bufsiz);
8576     }
8577 
8578     return ret;
8579 }
8580 
8581 static int do_execv(CPUArchState *cpu_env, int dirfd,
8582                     abi_long pathname, abi_long guest_argp,
8583                     abi_long guest_envp, int flags, bool is_execveat)
8584 {
8585     int ret;
8586     char **argp, **envp;
8587     int argc, envc;
8588     abi_ulong gp;
8589     abi_ulong addr;
8590     char **q;
8591     void *p;
8592 
8593     argc = 0;
8594 
8595     for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8596         if (get_user_ual(addr, gp)) {
8597             return -TARGET_EFAULT;
8598         }
8599         if (!addr) {
8600             break;
8601         }
8602         argc++;
8603     }
8604     envc = 0;
8605     for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8606         if (get_user_ual(addr, gp)) {
8607             return -TARGET_EFAULT;
8608         }
8609         if (!addr) {
8610             break;
8611         }
8612         envc++;
8613     }
8614 
8615     argp = g_new0(char *, argc + 1);
8616     envp = g_new0(char *, envc + 1);
8617 
8618     for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8619         if (get_user_ual(addr, gp)) {
8620             goto execve_efault;
8621         }
8622         if (!addr) {
8623             break;
8624         }
8625         *q = lock_user_string(addr);
8626         if (!*q) {
8627             goto execve_efault;
8628         }
8629     }
8630     *q = NULL;
8631 
8632     for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8633         if (get_user_ual(addr, gp)) {
8634             goto execve_efault;
8635         }
8636         if (!addr) {
8637             break;
8638         }
8639         *q = lock_user_string(addr);
8640         if (!*q) {
8641             goto execve_efault;
8642         }
8643     }
8644     *q = NULL;
8645 
8646     /*
8647      * Although execve() is not an interruptible syscall it is
8648      * a special case where we must use the safe_syscall wrapper:
8649      * if we allow a signal to happen before we make the host
8650      * syscall then we will 'lose' it, because at the point of
8651      * execve the process leaves QEMU's control. So we use the
8652      * safe syscall wrapper to ensure that we either take the
8653      * signal as a guest signal, or else it does not happen
8654      * before the execve completes and makes it the other
8655      * program's problem.
8656      */
8657     p = lock_user_string(pathname);
8658     if (!p) {
8659         goto execve_efault;
8660     }
8661 
8662     const char *exe = p;
8663     if (is_proc_myself(p, "exe")) {
8664         exe = exec_path;
8665     }
8666     ret = is_execveat
8667         ? safe_execveat(dirfd, exe, argp, envp, flags)
8668         : safe_execve(exe, argp, envp);
8669     ret = get_errno(ret);
8670 
8671     unlock_user(p, pathname, 0);
8672 
8673     goto execve_end;
8674 
8675 execve_efault:
8676     ret = -TARGET_EFAULT;
8677 
8678 execve_end:
8679     for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8680         if (get_user_ual(addr, gp) || !addr) {
8681             break;
8682         }
8683         unlock_user(*q, addr, 0);
8684     }
8685     for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8686         if (get_user_ual(addr, gp) || !addr) {
8687             break;
8688         }
8689         unlock_user(*q, addr, 0);
8690     }
8691 
8692     g_free(argp);
8693     g_free(envp);
8694     return ret;
8695 }
8696 
8697 #define TIMER_MAGIC 0x0caf0000
8698 #define TIMER_MAGIC_MASK 0xffff0000
8699 
8700 /* Convert QEMU provided timer ID back to internal 16bit index format */
8701 static target_timer_t get_timer_id(abi_long arg)
8702 {
8703     target_timer_t timerid = arg;
8704 
8705     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8706         return -TARGET_EINVAL;
8707     }
8708 
8709     timerid &= 0xffff;
8710 
8711     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8712         return -TARGET_EINVAL;
8713     }
8714 
8715     return timerid;
8716 }
8717 
8718 static int target_to_host_cpu_mask(unsigned long *host_mask,
8719                                    size_t host_size,
8720                                    abi_ulong target_addr,
8721                                    size_t target_size)
8722 {
8723     unsigned target_bits = sizeof(abi_ulong) * 8;
8724     unsigned host_bits = sizeof(*host_mask) * 8;
8725     abi_ulong *target_mask;
8726     unsigned i, j;
8727 
8728     assert(host_size >= target_size);
8729 
8730     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8731     if (!target_mask) {
8732         return -TARGET_EFAULT;
8733     }
8734     memset(host_mask, 0, host_size);
8735 
8736     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8737         unsigned bit = i * target_bits;
8738         abi_ulong val;
8739 
8740         __get_user(val, &target_mask[i]);
8741         for (j = 0; j < target_bits; j++, bit++) {
8742             if (val & (1UL << j)) {
8743                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8744             }
8745         }
8746     }
8747 
8748     unlock_user(target_mask, target_addr, 0);
8749     return 0;
8750 }
8751 
8752 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8753                                    size_t host_size,
8754                                    abi_ulong target_addr,
8755                                    size_t target_size)
8756 {
8757     unsigned target_bits = sizeof(abi_ulong) * 8;
8758     unsigned host_bits = sizeof(*host_mask) * 8;
8759     abi_ulong *target_mask;
8760     unsigned i, j;
8761 
8762     assert(host_size >= target_size);
8763 
8764     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8765     if (!target_mask) {
8766         return -TARGET_EFAULT;
8767     }
8768 
8769     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8770         unsigned bit = i * target_bits;
8771         abi_ulong val = 0;
8772 
8773         for (j = 0; j < target_bits; j++, bit++) {
8774             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8775                 val |= 1UL << j;
8776             }
8777         }
8778         __put_user(val, &target_mask[i]);
8779     }
8780 
8781     unlock_user(target_mask, target_addr, target_size);
8782     return 0;
8783 }
8784 
8785 #ifdef TARGET_NR_getdents
8786 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8787 {
8788     g_autofree void *hdirp = NULL;
8789     void *tdirp;
8790     int hlen, hoff, toff;
8791     int hreclen, treclen;
8792     off_t prev_diroff = 0;
8793 
8794     hdirp = g_try_malloc(count);
8795     if (!hdirp) {
8796         return -TARGET_ENOMEM;
8797     }
8798 
8799 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8800     hlen = sys_getdents(dirfd, hdirp, count);
8801 #else
8802     hlen = sys_getdents64(dirfd, hdirp, count);
8803 #endif
8804 
8805     hlen = get_errno(hlen);
8806     if (is_error(hlen)) {
8807         return hlen;
8808     }
8809 
8810     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8811     if (!tdirp) {
8812         return -TARGET_EFAULT;
8813     }
8814 
8815     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8816 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8817         struct linux_dirent *hde = hdirp + hoff;
8818 #else
8819         struct linux_dirent64 *hde = hdirp + hoff;
8820 #endif
8821         struct target_dirent *tde = tdirp + toff;
8822         int namelen;
8823         uint8_t type;
8824 
8825         namelen = strlen(hde->d_name);
8826         hreclen = hde->d_reclen;
8827         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8828         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8829 
8830         if (toff + treclen > count) {
8831             /*
8832              * If the host struct is smaller than the target struct, or
8833              * requires less alignment and thus packs into less space,
8834              * then the host can return more entries than we can pass
8835              * on to the guest.
8836              */
8837             if (toff == 0) {
8838                 toff = -TARGET_EINVAL; /* result buffer is too small */
8839                 break;
8840             }
8841             /*
8842              * Return what we have, resetting the file pointer to the
8843              * location of the first record not returned.
8844              */
8845             lseek(dirfd, prev_diroff, SEEK_SET);
8846             break;
8847         }
8848 
8849         prev_diroff = hde->d_off;
8850         tde->d_ino = tswapal(hde->d_ino);
8851         tde->d_off = tswapal(hde->d_off);
8852         tde->d_reclen = tswap16(treclen);
8853         memcpy(tde->d_name, hde->d_name, namelen + 1);
8854 
8855         /*
8856          * The getdents type is in what was formerly a padding byte at the
8857          * end of the structure.
8858          */
8859 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8860         type = *((uint8_t *)hde + hreclen - 1);
8861 #else
8862         type = hde->d_type;
8863 #endif
8864         *((uint8_t *)tde + treclen - 1) = type;
8865     }
8866 
8867     unlock_user(tdirp, arg2, toff);
8868     return toff;
8869 }
8870 #endif /* TARGET_NR_getdents */
8871 
8872 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8873 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8874 {
8875     g_autofree void *hdirp = NULL;
8876     void *tdirp;
8877     int hlen, hoff, toff;
8878     int hreclen, treclen;
8879     off_t prev_diroff = 0;
8880 
8881     hdirp = g_try_malloc(count);
8882     if (!hdirp) {
8883         return -TARGET_ENOMEM;
8884     }
8885 
8886     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8887     if (is_error(hlen)) {
8888         return hlen;
8889     }
8890 
8891     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8892     if (!tdirp) {
8893         return -TARGET_EFAULT;
8894     }
8895 
8896     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8897         struct linux_dirent64 *hde = hdirp + hoff;
8898         struct target_dirent64 *tde = tdirp + toff;
8899         int namelen;
8900 
8901         namelen = strlen(hde->d_name) + 1;
8902         hreclen = hde->d_reclen;
8903         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8904         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8905 
8906         if (toff + treclen > count) {
8907             /*
8908              * If the host struct is smaller than the target struct, or
8909              * requires less alignment and thus packs into less space,
8910              * then the host can return more entries than we can pass
8911              * on to the guest.
8912              */
8913             if (toff == 0) {
8914                 toff = -TARGET_EINVAL; /* result buffer is too small */
8915                 break;
8916             }
8917             /*
8918              * Return what we have, resetting the file pointer to the
8919              * location of the first record not returned.
8920              */
8921             lseek(dirfd, prev_diroff, SEEK_SET);
8922             break;
8923         }
8924 
8925         prev_diroff = hde->d_off;
8926         tde->d_ino = tswap64(hde->d_ino);
8927         tde->d_off = tswap64(hde->d_off);
8928         tde->d_reclen = tswap16(treclen);
8929         tde->d_type = hde->d_type;
8930         memcpy(tde->d_name, hde->d_name, namelen);
8931     }
8932 
8933     unlock_user(tdirp, arg2, toff);
8934     return toff;
8935 }
8936 #endif /* TARGET_NR_getdents64 */
8937 
8938 #if defined(TARGET_NR_riscv_hwprobe)
8939 
8940 #define RISCV_HWPROBE_KEY_MVENDORID     0
8941 #define RISCV_HWPROBE_KEY_MARCHID       1
8942 #define RISCV_HWPROBE_KEY_MIMPID        2
8943 
8944 #define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
8945 #define     RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
8946 
8947 #define RISCV_HWPROBE_KEY_IMA_EXT_0         4
8948 #define     RISCV_HWPROBE_IMA_FD            (1 << 0)
8949 #define     RISCV_HWPROBE_IMA_C             (1 << 1)
8950 #define     RISCV_HWPROBE_IMA_V             (1 << 2)
8951 #define     RISCV_HWPROBE_EXT_ZBA           (1 << 3)
8952 #define     RISCV_HWPROBE_EXT_ZBB           (1 << 4)
8953 #define     RISCV_HWPROBE_EXT_ZBS           (1 << 5)
8954 #define     RISCV_HWPROBE_EXT_ZICBOZ        (1 << 6)
8955 #define     RISCV_HWPROBE_EXT_ZBC           (1 << 7)
8956 #define     RISCV_HWPROBE_EXT_ZBKB          (1 << 8)
8957 #define     RISCV_HWPROBE_EXT_ZBKC          (1 << 9)
8958 #define     RISCV_HWPROBE_EXT_ZBKX          (1 << 10)
8959 #define     RISCV_HWPROBE_EXT_ZKND          (1 << 11)
8960 #define     RISCV_HWPROBE_EXT_ZKNE          (1 << 12)
8961 #define     RISCV_HWPROBE_EXT_ZKNH          (1 << 13)
8962 #define     RISCV_HWPROBE_EXT_ZKSED         (1 << 14)
8963 #define     RISCV_HWPROBE_EXT_ZKSH          (1 << 15)
8964 #define     RISCV_HWPROBE_EXT_ZKT           (1 << 16)
8965 #define     RISCV_HWPROBE_EXT_ZVBB          (1 << 17)
8966 #define     RISCV_HWPROBE_EXT_ZVBC          (1 << 18)
8967 #define     RISCV_HWPROBE_EXT_ZVKB          (1 << 19)
8968 #define     RISCV_HWPROBE_EXT_ZVKG          (1 << 20)
8969 #define     RISCV_HWPROBE_EXT_ZVKNED        (1 << 21)
8970 #define     RISCV_HWPROBE_EXT_ZVKNHA        (1 << 22)
8971 #define     RISCV_HWPROBE_EXT_ZVKNHB        (1 << 23)
8972 #define     RISCV_HWPROBE_EXT_ZVKSED        (1 << 24)
8973 #define     RISCV_HWPROBE_EXT_ZVKSH         (1 << 25)
8974 #define     RISCV_HWPROBE_EXT_ZVKT          (1 << 26)
8975 #define     RISCV_HWPROBE_EXT_ZFH           (1 << 27)
8976 #define     RISCV_HWPROBE_EXT_ZFHMIN        (1 << 28)
8977 #define     RISCV_HWPROBE_EXT_ZIHINTNTL     (1 << 29)
8978 #define     RISCV_HWPROBE_EXT_ZVFH          (1 << 30)
8979 #define     RISCV_HWPROBE_EXT_ZVFHMIN       (1ULL << 31)
8980 #define     RISCV_HWPROBE_EXT_ZFA           (1ULL << 32)
8981 #define     RISCV_HWPROBE_EXT_ZTSO          (1ULL << 33)
8982 #define     RISCV_HWPROBE_EXT_ZACAS         (1ULL << 34)
8983 #define     RISCV_HWPROBE_EXT_ZICOND        (1ULL << 35)
8984 
8985 #define RISCV_HWPROBE_KEY_CPUPERF_0     5
8986 #define     RISCV_HWPROBE_MISALIGNED_UNKNOWN     (0 << 0)
8987 #define     RISCV_HWPROBE_MISALIGNED_EMULATED    (1 << 0)
8988 #define     RISCV_HWPROBE_MISALIGNED_SLOW        (2 << 0)
8989 #define     RISCV_HWPROBE_MISALIGNED_FAST        (3 << 0)
8990 #define     RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
8991 #define     RISCV_HWPROBE_MISALIGNED_MASK        (7 << 0)
8992 
8993 #define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6
8994 
8995 struct riscv_hwprobe {
8996     abi_llong  key;
8997     abi_ullong value;
8998 };
8999 
9000 static void risc_hwprobe_fill_pairs(CPURISCVState *env,
9001                                     struct riscv_hwprobe *pair,
9002                                     size_t pair_count)
9003 {
9004     const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
9005 
9006     for (; pair_count > 0; pair_count--, pair++) {
9007         abi_llong key;
9008         abi_ullong value;
9009         __put_user(0, &pair->value);
9010         __get_user(key, &pair->key);
9011         switch (key) {
9012         case RISCV_HWPROBE_KEY_MVENDORID:
9013             __put_user(cfg->mvendorid, &pair->value);
9014             break;
9015         case RISCV_HWPROBE_KEY_MARCHID:
9016             __put_user(cfg->marchid, &pair->value);
9017             break;
9018         case RISCV_HWPROBE_KEY_MIMPID:
9019             __put_user(cfg->mimpid, &pair->value);
9020             break;
9021         case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
9022             value = riscv_has_ext(env, RVI) &&
9023                     riscv_has_ext(env, RVM) &&
9024                     riscv_has_ext(env, RVA) ?
9025                     RISCV_HWPROBE_BASE_BEHAVIOR_IMA : 0;
9026             __put_user(value, &pair->value);
9027             break;
9028         case RISCV_HWPROBE_KEY_IMA_EXT_0:
9029             value = riscv_has_ext(env, RVF) &&
9030                     riscv_has_ext(env, RVD) ?
9031                     RISCV_HWPROBE_IMA_FD : 0;
9032             value |= riscv_has_ext(env, RVC) ?
9033                      RISCV_HWPROBE_IMA_C : 0;
9034             value |= riscv_has_ext(env, RVV) ?
9035                      RISCV_HWPROBE_IMA_V : 0;
9036             value |= cfg->ext_zba ?
9037                      RISCV_HWPROBE_EXT_ZBA : 0;
9038             value |= cfg->ext_zbb ?
9039                      RISCV_HWPROBE_EXT_ZBB : 0;
9040             value |= cfg->ext_zbs ?
9041                      RISCV_HWPROBE_EXT_ZBS : 0;
9042             value |= cfg->ext_zicboz ?
9043                      RISCV_HWPROBE_EXT_ZICBOZ : 0;
9044             value |= cfg->ext_zbc ?
9045                      RISCV_HWPROBE_EXT_ZBC : 0;
9046             value |= cfg->ext_zbkb ?
9047                      RISCV_HWPROBE_EXT_ZBKB : 0;
9048             value |= cfg->ext_zbkc ?
9049                      RISCV_HWPROBE_EXT_ZBKC : 0;
9050             value |= cfg->ext_zbkx ?
9051                      RISCV_HWPROBE_EXT_ZBKX : 0;
9052             value |= cfg->ext_zknd ?
9053                      RISCV_HWPROBE_EXT_ZKND : 0;
9054             value |= cfg->ext_zkne ?
9055                      RISCV_HWPROBE_EXT_ZKNE : 0;
9056             value |= cfg->ext_zknh ?
9057                      RISCV_HWPROBE_EXT_ZKNH : 0;
9058             value |= cfg->ext_zksed ?
9059                      RISCV_HWPROBE_EXT_ZKSED : 0;
9060             value |= cfg->ext_zksh ?
9061                      RISCV_HWPROBE_EXT_ZKSH : 0;
9062             value |= cfg->ext_zkt ?
9063                      RISCV_HWPROBE_EXT_ZKT : 0;
9064             value |= cfg->ext_zvbb ?
9065                      RISCV_HWPROBE_EXT_ZVBB : 0;
9066             value |= cfg->ext_zvbc ?
9067                      RISCV_HWPROBE_EXT_ZVBC : 0;
9068             value |= cfg->ext_zvkb ?
9069                      RISCV_HWPROBE_EXT_ZVKB : 0;
9070             value |= cfg->ext_zvkg ?
9071                      RISCV_HWPROBE_EXT_ZVKG : 0;
9072             value |= cfg->ext_zvkned ?
9073                      RISCV_HWPROBE_EXT_ZVKNED : 0;
9074             value |= cfg->ext_zvknha ?
9075                      RISCV_HWPROBE_EXT_ZVKNHA : 0;
9076             value |= cfg->ext_zvknhb ?
9077                      RISCV_HWPROBE_EXT_ZVKNHB : 0;
9078             value |= cfg->ext_zvksed ?
9079                      RISCV_HWPROBE_EXT_ZVKSED : 0;
9080             value |= cfg->ext_zvksh ?
9081                      RISCV_HWPROBE_EXT_ZVKSH : 0;
9082             value |= cfg->ext_zvkt ?
9083                      RISCV_HWPROBE_EXT_ZVKT : 0;
9084             value |= cfg->ext_zfh ?
9085                      RISCV_HWPROBE_EXT_ZFH : 0;
9086             value |= cfg->ext_zfhmin ?
9087                      RISCV_HWPROBE_EXT_ZFHMIN : 0;
9088             value |= cfg->ext_zihintntl ?
9089                      RISCV_HWPROBE_EXT_ZIHINTNTL : 0;
9090             value |= cfg->ext_zvfh ?
9091                      RISCV_HWPROBE_EXT_ZVFH : 0;
9092             value |= cfg->ext_zvfhmin ?
9093                      RISCV_HWPROBE_EXT_ZVFHMIN : 0;
9094             value |= cfg->ext_zfa ?
9095                      RISCV_HWPROBE_EXT_ZFA : 0;
9096             value |= cfg->ext_ztso ?
9097                      RISCV_HWPROBE_EXT_ZTSO : 0;
9098             value |= cfg->ext_zacas ?
9099                      RISCV_HWPROBE_EXT_ZACAS : 0;
9100             value |= cfg->ext_zicond ?
9101                      RISCV_HWPROBE_EXT_ZICOND : 0;
9102             __put_user(value, &pair->value);
9103             break;
9104         case RISCV_HWPROBE_KEY_CPUPERF_0:
9105             __put_user(RISCV_HWPROBE_MISALIGNED_FAST, &pair->value);
9106             break;
9107         case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
9108             value = cfg->ext_zicboz ? cfg->cboz_blocksize : 0;
9109             __put_user(value, &pair->value);
9110             break;
9111         default:
9112             __put_user(-1, &pair->key);
9113             break;
9114         }
9115     }
9116 }
9117 
9118 static int cpu_set_valid(abi_long arg3, abi_long arg4)
9119 {
9120     int ret, i, tmp;
9121     size_t host_mask_size, target_mask_size;
9122     unsigned long *host_mask;
9123 
9124     /*
9125      * cpu_set_t represent CPU masks as bit masks of type unsigned long *.
9126      * arg3 contains the cpu count.
9127      */
9128     tmp = (8 * sizeof(abi_ulong));
9129     target_mask_size = ((arg3 + tmp - 1) / tmp) * sizeof(abi_ulong);
9130     host_mask_size = (target_mask_size + (sizeof(*host_mask) - 1)) &
9131                      ~(sizeof(*host_mask) - 1);
9132 
9133     host_mask = alloca(host_mask_size);
9134 
9135     ret = target_to_host_cpu_mask(host_mask, host_mask_size,
9136                                   arg4, target_mask_size);
9137     if (ret != 0) {
9138         return ret;
9139     }
9140 
9141     for (i = 0 ; i < host_mask_size / sizeof(*host_mask); i++) {
9142         if (host_mask[i] != 0) {
9143             return 0;
9144         }
9145     }
9146     return -TARGET_EINVAL;
9147 }
9148 
9149 static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
9150                                  abi_long arg2, abi_long arg3,
9151                                  abi_long arg4, abi_long arg5)
9152 {
9153     int ret;
9154     struct riscv_hwprobe *host_pairs;
9155 
9156     /* flags must be 0 */
9157     if (arg5 != 0) {
9158         return -TARGET_EINVAL;
9159     }
9160 
9161     /* check cpu_set */
9162     if (arg3 != 0) {
9163         ret = cpu_set_valid(arg3, arg4);
9164         if (ret != 0) {
9165             return ret;
9166         }
9167     } else if (arg4 != 0) {
9168         return -TARGET_EINVAL;
9169     }
9170 
9171     /* no pairs */
9172     if (arg2 == 0) {
9173         return 0;
9174     }
9175 
9176     host_pairs = lock_user(VERIFY_WRITE, arg1,
9177                            sizeof(*host_pairs) * (size_t)arg2, 0);
9178     if (host_pairs == NULL) {
9179         return -TARGET_EFAULT;
9180     }
9181     risc_hwprobe_fill_pairs(cpu_env, host_pairs, arg2);
9182     unlock_user(host_pairs, arg1, sizeof(*host_pairs) * (size_t)arg2);
9183     return 0;
9184 }
9185 #endif /* TARGET_NR_riscv_hwprobe */
9186 
9187 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
9188 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
9189 #endif
9190 
9191 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9192 #define __NR_sys_open_tree __NR_open_tree
9193 _syscall3(int, sys_open_tree, int, __dfd, const char *, __filename,
9194           unsigned int, __flags)
9195 #endif
9196 
9197 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9198 #define __NR_sys_move_mount __NR_move_mount
9199 _syscall5(int, sys_move_mount, int, __from_dfd, const char *, __from_pathname,
9200            int, __to_dfd, const char *, __to_pathname, unsigned int, flag)
9201 #endif
9202 
9203 /* This is an internal helper for do_syscall so that it is easier
9204  * to have a single return point, so that actions, such as logging
9205  * of syscall results, can be performed.
9206  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
9207  */
9208 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
9209                             abi_long arg2, abi_long arg3, abi_long arg4,
9210                             abi_long arg5, abi_long arg6, abi_long arg7,
9211                             abi_long arg8)
9212 {
9213     CPUState *cpu = env_cpu(cpu_env);
9214     abi_long ret;
9215 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
9216     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
9217     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
9218     || defined(TARGET_NR_statx)
9219     struct stat st;
9220 #endif
9221 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
9222     || defined(TARGET_NR_fstatfs)
9223     struct statfs stfs;
9224 #endif
9225     void *p;
9226 
9227     switch(num) {
9228     case TARGET_NR_exit:
9229         /* In old applications this may be used to implement _exit(2).
9230            However in threaded applications it is used for thread termination,
9231            and _exit_group is used for application termination.
9232            Do thread termination if we have more then one thread.  */
9233 
9234         if (block_signals()) {
9235             return -QEMU_ERESTARTSYS;
9236         }
9237 
9238         pthread_mutex_lock(&clone_lock);
9239 
9240         if (CPU_NEXT(first_cpu)) {
9241             TaskState *ts = get_task_state(cpu);
9242 
9243             if (ts->child_tidptr) {
9244                 put_user_u32(0, ts->child_tidptr);
9245                 do_sys_futex(g2h(cpu, ts->child_tidptr),
9246                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
9247             }
9248 
9249             object_unparent(OBJECT(cpu));
9250             object_unref(OBJECT(cpu));
9251             /*
9252              * At this point the CPU should be unrealized and removed
9253              * from cpu lists. We can clean-up the rest of the thread
9254              * data without the lock held.
9255              */
9256 
9257             pthread_mutex_unlock(&clone_lock);
9258 
9259             thread_cpu = NULL;
9260             g_free(ts);
9261             rcu_unregister_thread();
9262             pthread_exit(NULL);
9263         }
9264 
9265         pthread_mutex_unlock(&clone_lock);
9266         preexit_cleanup(cpu_env, arg1);
9267         _exit(arg1);
9268         return 0; /* avoid warning */
9269     case TARGET_NR_read:
9270         if (arg2 == 0 && arg3 == 0) {
9271             return get_errno(safe_read(arg1, 0, 0));
9272         } else {
9273             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9274                 return -TARGET_EFAULT;
9275             ret = get_errno(safe_read(arg1, p, arg3));
9276             if (ret >= 0 &&
9277                 fd_trans_host_to_target_data(arg1)) {
9278                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
9279             }
9280             unlock_user(p, arg2, ret);
9281         }
9282         return ret;
9283     case TARGET_NR_write:
9284         if (arg2 == 0 && arg3 == 0) {
9285             return get_errno(safe_write(arg1, 0, 0));
9286         }
9287         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9288             return -TARGET_EFAULT;
9289         if (fd_trans_target_to_host_data(arg1)) {
9290             void *copy = g_malloc(arg3);
9291             memcpy(copy, p, arg3);
9292             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
9293             if (ret >= 0) {
9294                 ret = get_errno(safe_write(arg1, copy, ret));
9295             }
9296             g_free(copy);
9297         } else {
9298             ret = get_errno(safe_write(arg1, p, arg3));
9299         }
9300         unlock_user(p, arg2, 0);
9301         return ret;
9302 
9303 #ifdef TARGET_NR_open
9304     case TARGET_NR_open:
9305         if (!(p = lock_user_string(arg1)))
9306             return -TARGET_EFAULT;
9307         ret = get_errno(do_guest_openat(cpu_env, AT_FDCWD, p,
9308                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
9309                                   arg3, true));
9310         fd_trans_unregister(ret);
9311         unlock_user(p, arg1, 0);
9312         return ret;
9313 #endif
9314     case TARGET_NR_openat:
9315         if (!(p = lock_user_string(arg2)))
9316             return -TARGET_EFAULT;
9317         ret = get_errno(do_guest_openat(cpu_env, arg1, p,
9318                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
9319                                   arg4, true));
9320         fd_trans_unregister(ret);
9321         unlock_user(p, arg2, 0);
9322         return ret;
9323     case TARGET_NR_openat2:
9324         ret = do_openat2(cpu_env, arg1, arg2, arg3, arg4);
9325         return ret;
9326 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9327     case TARGET_NR_name_to_handle_at:
9328         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
9329         return ret;
9330 #endif
9331 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9332     case TARGET_NR_open_by_handle_at:
9333         ret = do_open_by_handle_at(arg1, arg2, arg3);
9334         fd_trans_unregister(ret);
9335         return ret;
9336 #endif
9337 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
9338     case TARGET_NR_pidfd_open:
9339         return get_errno(pidfd_open(arg1, arg2));
9340 #endif
9341 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
9342     case TARGET_NR_pidfd_send_signal:
9343         {
9344             siginfo_t uinfo, *puinfo;
9345 
9346             if (arg3) {
9347                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9348                 if (!p) {
9349                     return -TARGET_EFAULT;
9350                  }
9351                  target_to_host_siginfo(&uinfo, p);
9352                  unlock_user(p, arg3, 0);
9353                  puinfo = &uinfo;
9354             } else {
9355                  puinfo = NULL;
9356             }
9357             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
9358                                               puinfo, arg4));
9359         }
9360         return ret;
9361 #endif
9362 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
9363     case TARGET_NR_pidfd_getfd:
9364         return get_errno(pidfd_getfd(arg1, arg2, arg3));
9365 #endif
9366     case TARGET_NR_close:
9367         fd_trans_unregister(arg1);
9368         return get_errno(close(arg1));
9369 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
9370     case TARGET_NR_close_range:
9371         ret = get_errno(sys_close_range(arg1, arg2, arg3));
9372         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
9373             abi_long fd, maxfd;
9374             maxfd = MIN(arg2, target_fd_max);
9375             for (fd = arg1; fd < maxfd; fd++) {
9376                 fd_trans_unregister(fd);
9377             }
9378         }
9379         return ret;
9380 #endif
9381 
9382     case TARGET_NR_brk:
9383         return do_brk(arg1);
9384 #ifdef TARGET_NR_fork
9385     case TARGET_NR_fork:
9386         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
9387 #endif
9388 #ifdef TARGET_NR_waitpid
9389     case TARGET_NR_waitpid:
9390         {
9391             int status;
9392             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
9393             if (!is_error(ret) && arg2 && ret
9394                 && put_user_s32(host_to_target_waitstatus(status), arg2))
9395                 return -TARGET_EFAULT;
9396         }
9397         return ret;
9398 #endif
9399 #ifdef TARGET_NR_waitid
9400     case TARGET_NR_waitid:
9401         {
9402             struct rusage ru;
9403             siginfo_t info;
9404 
9405             ret = get_errno(safe_waitid(arg1, arg2, (arg3 ? &info : NULL),
9406                                         arg4, (arg5 ? &ru : NULL)));
9407             if (!is_error(ret)) {
9408                 if (arg3) {
9409                     p = lock_user(VERIFY_WRITE, arg3,
9410                                   sizeof(target_siginfo_t), 0);
9411                     if (!p) {
9412                         return -TARGET_EFAULT;
9413                     }
9414                     host_to_target_siginfo(p, &info);
9415                     unlock_user(p, arg3, sizeof(target_siginfo_t));
9416                 }
9417                 if (arg5 && host_to_target_rusage(arg5, &ru)) {
9418                     return -TARGET_EFAULT;
9419                 }
9420             }
9421         }
9422         return ret;
9423 #endif
9424 #ifdef TARGET_NR_creat /* not on alpha */
9425     case TARGET_NR_creat:
9426         if (!(p = lock_user_string(arg1)))
9427             return -TARGET_EFAULT;
9428         ret = get_errno(creat(p, arg2));
9429         fd_trans_unregister(ret);
9430         unlock_user(p, arg1, 0);
9431         return ret;
9432 #endif
9433 #ifdef TARGET_NR_link
9434     case TARGET_NR_link:
9435         {
9436             void * p2;
9437             p = lock_user_string(arg1);
9438             p2 = lock_user_string(arg2);
9439             if (!p || !p2)
9440                 ret = -TARGET_EFAULT;
9441             else
9442                 ret = get_errno(link(p, p2));
9443             unlock_user(p2, arg2, 0);
9444             unlock_user(p, arg1, 0);
9445         }
9446         return ret;
9447 #endif
9448 #if defined(TARGET_NR_linkat)
9449     case TARGET_NR_linkat:
9450         {
9451             void * p2 = NULL;
9452             if (!arg2 || !arg4)
9453                 return -TARGET_EFAULT;
9454             p  = lock_user_string(arg2);
9455             p2 = lock_user_string(arg4);
9456             if (!p || !p2)
9457                 ret = -TARGET_EFAULT;
9458             else
9459                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
9460             unlock_user(p, arg2, 0);
9461             unlock_user(p2, arg4, 0);
9462         }
9463         return ret;
9464 #endif
9465 #ifdef TARGET_NR_unlink
9466     case TARGET_NR_unlink:
9467         if (!(p = lock_user_string(arg1)))
9468             return -TARGET_EFAULT;
9469         ret = get_errno(unlink(p));
9470         unlock_user(p, arg1, 0);
9471         return ret;
9472 #endif
9473 #if defined(TARGET_NR_unlinkat)
9474     case TARGET_NR_unlinkat:
9475         if (!(p = lock_user_string(arg2)))
9476             return -TARGET_EFAULT;
9477         ret = get_errno(unlinkat(arg1, p, arg3));
9478         unlock_user(p, arg2, 0);
9479         return ret;
9480 #endif
9481     case TARGET_NR_execveat:
9482         return do_execv(cpu_env, arg1, arg2, arg3, arg4, arg5, true);
9483     case TARGET_NR_execve:
9484         return do_execv(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0, false);
9485     case TARGET_NR_chdir:
9486         if (!(p = lock_user_string(arg1)))
9487             return -TARGET_EFAULT;
9488         ret = get_errno(chdir(p));
9489         unlock_user(p, arg1, 0);
9490         return ret;
9491 #ifdef TARGET_NR_time
9492     case TARGET_NR_time:
9493         {
9494             time_t host_time;
9495             ret = get_errno(time(&host_time));
9496             if (!is_error(ret)
9497                 && arg1
9498                 && put_user_sal(host_time, arg1))
9499                 return -TARGET_EFAULT;
9500         }
9501         return ret;
9502 #endif
9503 #ifdef TARGET_NR_mknod
9504     case TARGET_NR_mknod:
9505         if (!(p = lock_user_string(arg1)))
9506             return -TARGET_EFAULT;
9507         ret = get_errno(mknod(p, arg2, arg3));
9508         unlock_user(p, arg1, 0);
9509         return ret;
9510 #endif
9511 #if defined(TARGET_NR_mknodat)
9512     case TARGET_NR_mknodat:
9513         if (!(p = lock_user_string(arg2)))
9514             return -TARGET_EFAULT;
9515         ret = get_errno(mknodat(arg1, p, arg3, arg4));
9516         unlock_user(p, arg2, 0);
9517         return ret;
9518 #endif
9519 #ifdef TARGET_NR_chmod
9520     case TARGET_NR_chmod:
9521         if (!(p = lock_user_string(arg1)))
9522             return -TARGET_EFAULT;
9523         ret = get_errno(chmod(p, arg2));
9524         unlock_user(p, arg1, 0);
9525         return ret;
9526 #endif
9527 #ifdef TARGET_NR_lseek
9528     case TARGET_NR_lseek:
9529         return get_errno(lseek(arg1, arg2, arg3));
9530 #endif
9531 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9532     /* Alpha specific */
9533     case TARGET_NR_getxpid:
9534         cpu_env->ir[IR_A4] = getppid();
9535         return get_errno(getpid());
9536 #endif
9537 #ifdef TARGET_NR_getpid
9538     case TARGET_NR_getpid:
9539         return get_errno(getpid());
9540 #endif
9541     case TARGET_NR_mount:
9542         {
9543             /* need to look at the data field */
9544             void *p2, *p3;
9545 
9546             if (arg1) {
9547                 p = lock_user_string(arg1);
9548                 if (!p) {
9549                     return -TARGET_EFAULT;
9550                 }
9551             } else {
9552                 p = NULL;
9553             }
9554 
9555             p2 = lock_user_string(arg2);
9556             if (!p2) {
9557                 if (arg1) {
9558                     unlock_user(p, arg1, 0);
9559                 }
9560                 return -TARGET_EFAULT;
9561             }
9562 
9563             if (arg3) {
9564                 p3 = lock_user_string(arg3);
9565                 if (!p3) {
9566                     if (arg1) {
9567                         unlock_user(p, arg1, 0);
9568                     }
9569                     unlock_user(p2, arg2, 0);
9570                     return -TARGET_EFAULT;
9571                 }
9572             } else {
9573                 p3 = NULL;
9574             }
9575 
9576             /* FIXME - arg5 should be locked, but it isn't clear how to
9577              * do that since it's not guaranteed to be a NULL-terminated
9578              * string.
9579              */
9580             if (!arg5) {
9581                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9582             } else {
9583                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9584             }
9585             ret = get_errno(ret);
9586 
9587             if (arg1) {
9588                 unlock_user(p, arg1, 0);
9589             }
9590             unlock_user(p2, arg2, 0);
9591             if (arg3) {
9592                 unlock_user(p3, arg3, 0);
9593             }
9594         }
9595         return ret;
9596 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9597 #if defined(TARGET_NR_umount)
9598     case TARGET_NR_umount:
9599 #endif
9600 #if defined(TARGET_NR_oldumount)
9601     case TARGET_NR_oldumount:
9602 #endif
9603         if (!(p = lock_user_string(arg1)))
9604             return -TARGET_EFAULT;
9605         ret = get_errno(umount(p));
9606         unlock_user(p, arg1, 0);
9607         return ret;
9608 #endif
9609 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9610     case TARGET_NR_move_mount:
9611         {
9612             void *p2, *p4;
9613 
9614             if (!arg2 || !arg4) {
9615                 return -TARGET_EFAULT;
9616             }
9617 
9618             p2 = lock_user_string(arg2);
9619             if (!p2) {
9620                 return -TARGET_EFAULT;
9621             }
9622 
9623             p4 = lock_user_string(arg4);
9624             if (!p4) {
9625                 unlock_user(p2, arg2, 0);
9626                 return -TARGET_EFAULT;
9627             }
9628             ret = get_errno(sys_move_mount(arg1, p2, arg3, p4, arg5));
9629 
9630             unlock_user(p2, arg2, 0);
9631             unlock_user(p4, arg4, 0);
9632 
9633             return ret;
9634         }
9635 #endif
9636 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9637     case TARGET_NR_open_tree:
9638         {
9639             void *p2;
9640             int host_flags;
9641 
9642             if (!arg2) {
9643                 return -TARGET_EFAULT;
9644             }
9645 
9646             p2 = lock_user_string(arg2);
9647             if (!p2) {
9648                 return -TARGET_EFAULT;
9649             }
9650 
9651             host_flags = arg3 & ~TARGET_O_CLOEXEC;
9652             if (arg3 & TARGET_O_CLOEXEC) {
9653                 host_flags |= O_CLOEXEC;
9654             }
9655 
9656             ret = get_errno(sys_open_tree(arg1, p2, host_flags));
9657 
9658             unlock_user(p2, arg2, 0);
9659 
9660             return ret;
9661         }
9662 #endif
9663 #ifdef TARGET_NR_stime /* not on alpha */
9664     case TARGET_NR_stime:
9665         {
9666             struct timespec ts;
9667             ts.tv_nsec = 0;
9668             if (get_user_sal(ts.tv_sec, arg1)) {
9669                 return -TARGET_EFAULT;
9670             }
9671             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9672         }
9673 #endif
9674 #ifdef TARGET_NR_alarm /* not on alpha */
9675     case TARGET_NR_alarm:
9676         return alarm(arg1);
9677 #endif
9678 #ifdef TARGET_NR_pause /* not on alpha */
9679     case TARGET_NR_pause:
9680         if (!block_signals()) {
9681             sigsuspend(&get_task_state(cpu)->signal_mask);
9682         }
9683         return -TARGET_EINTR;
9684 #endif
9685 #ifdef TARGET_NR_utime
9686     case TARGET_NR_utime:
9687         {
9688             struct utimbuf tbuf, *host_tbuf;
9689             struct target_utimbuf *target_tbuf;
9690             if (arg2) {
9691                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9692                     return -TARGET_EFAULT;
9693                 tbuf.actime = tswapal(target_tbuf->actime);
9694                 tbuf.modtime = tswapal(target_tbuf->modtime);
9695                 unlock_user_struct(target_tbuf, arg2, 0);
9696                 host_tbuf = &tbuf;
9697             } else {
9698                 host_tbuf = NULL;
9699             }
9700             if (!(p = lock_user_string(arg1)))
9701                 return -TARGET_EFAULT;
9702             ret = get_errno(utime(p, host_tbuf));
9703             unlock_user(p, arg1, 0);
9704         }
9705         return ret;
9706 #endif
9707 #ifdef TARGET_NR_utimes
9708     case TARGET_NR_utimes:
9709         {
9710             struct timeval *tvp, tv[2];
9711             if (arg2) {
9712                 if (copy_from_user_timeval(&tv[0], arg2)
9713                     || copy_from_user_timeval(&tv[1],
9714                                               arg2 + sizeof(struct target_timeval)))
9715                     return -TARGET_EFAULT;
9716                 tvp = tv;
9717             } else {
9718                 tvp = NULL;
9719             }
9720             if (!(p = lock_user_string(arg1)))
9721                 return -TARGET_EFAULT;
9722             ret = get_errno(utimes(p, tvp));
9723             unlock_user(p, arg1, 0);
9724         }
9725         return ret;
9726 #endif
9727 #if defined(TARGET_NR_futimesat)
9728     case TARGET_NR_futimesat:
9729         {
9730             struct timeval *tvp, tv[2];
9731             if (arg3) {
9732                 if (copy_from_user_timeval(&tv[0], arg3)
9733                     || copy_from_user_timeval(&tv[1],
9734                                               arg3 + sizeof(struct target_timeval)))
9735                     return -TARGET_EFAULT;
9736                 tvp = tv;
9737             } else {
9738                 tvp = NULL;
9739             }
9740             if (!(p = lock_user_string(arg2))) {
9741                 return -TARGET_EFAULT;
9742             }
9743             ret = get_errno(futimesat(arg1, path(p), tvp));
9744             unlock_user(p, arg2, 0);
9745         }
9746         return ret;
9747 #endif
9748 #ifdef TARGET_NR_access
9749     case TARGET_NR_access:
9750         if (!(p = lock_user_string(arg1))) {
9751             return -TARGET_EFAULT;
9752         }
9753         ret = get_errno(access(path(p), arg2));
9754         unlock_user(p, arg1, 0);
9755         return ret;
9756 #endif
9757 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9758     case TARGET_NR_faccessat:
9759         if (!(p = lock_user_string(arg2))) {
9760             return -TARGET_EFAULT;
9761         }
9762         ret = get_errno(faccessat(arg1, p, arg3, 0));
9763         unlock_user(p, arg2, 0);
9764         return ret;
9765 #endif
9766 #if defined(TARGET_NR_faccessat2)
9767     case TARGET_NR_faccessat2:
9768         if (!(p = lock_user_string(arg2))) {
9769             return -TARGET_EFAULT;
9770         }
9771         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9772         unlock_user(p, arg2, 0);
9773         return ret;
9774 #endif
9775 #ifdef TARGET_NR_nice /* not on alpha */
9776     case TARGET_NR_nice:
9777         return get_errno(nice(arg1));
9778 #endif
9779     case TARGET_NR_sync:
9780         sync();
9781         return 0;
9782 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9783     case TARGET_NR_syncfs:
9784         return get_errno(syncfs(arg1));
9785 #endif
9786     case TARGET_NR_kill:
9787         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9788 #ifdef TARGET_NR_rename
9789     case TARGET_NR_rename:
9790         {
9791             void *p2;
9792             p = lock_user_string(arg1);
9793             p2 = lock_user_string(arg2);
9794             if (!p || !p2)
9795                 ret = -TARGET_EFAULT;
9796             else
9797                 ret = get_errno(rename(p, p2));
9798             unlock_user(p2, arg2, 0);
9799             unlock_user(p, arg1, 0);
9800         }
9801         return ret;
9802 #endif
9803 #if defined(TARGET_NR_renameat)
9804     case TARGET_NR_renameat:
9805         {
9806             void *p2;
9807             p  = lock_user_string(arg2);
9808             p2 = lock_user_string(arg4);
9809             if (!p || !p2)
9810                 ret = -TARGET_EFAULT;
9811             else
9812                 ret = get_errno(renameat(arg1, p, arg3, p2));
9813             unlock_user(p2, arg4, 0);
9814             unlock_user(p, arg2, 0);
9815         }
9816         return ret;
9817 #endif
9818 #if defined(TARGET_NR_renameat2)
9819     case TARGET_NR_renameat2:
9820         {
9821             void *p2;
9822             p  = lock_user_string(arg2);
9823             p2 = lock_user_string(arg4);
9824             if (!p || !p2) {
9825                 ret = -TARGET_EFAULT;
9826             } else {
9827                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9828             }
9829             unlock_user(p2, arg4, 0);
9830             unlock_user(p, arg2, 0);
9831         }
9832         return ret;
9833 #endif
9834 #ifdef TARGET_NR_mkdir
9835     case TARGET_NR_mkdir:
9836         if (!(p = lock_user_string(arg1)))
9837             return -TARGET_EFAULT;
9838         ret = get_errno(mkdir(p, arg2));
9839         unlock_user(p, arg1, 0);
9840         return ret;
9841 #endif
9842 #if defined(TARGET_NR_mkdirat)
9843     case TARGET_NR_mkdirat:
9844         if (!(p = lock_user_string(arg2)))
9845             return -TARGET_EFAULT;
9846         ret = get_errno(mkdirat(arg1, p, arg3));
9847         unlock_user(p, arg2, 0);
9848         return ret;
9849 #endif
9850 #ifdef TARGET_NR_rmdir
9851     case TARGET_NR_rmdir:
9852         if (!(p = lock_user_string(arg1)))
9853             return -TARGET_EFAULT;
9854         ret = get_errno(rmdir(p));
9855         unlock_user(p, arg1, 0);
9856         return ret;
9857 #endif
9858     case TARGET_NR_dup:
9859         ret = get_errno(dup(arg1));
9860         if (ret >= 0) {
9861             fd_trans_dup(arg1, ret);
9862         }
9863         return ret;
9864 #ifdef TARGET_NR_pipe
9865     case TARGET_NR_pipe:
9866         return do_pipe(cpu_env, arg1, 0, 0);
9867 #endif
9868 #ifdef TARGET_NR_pipe2
9869     case TARGET_NR_pipe2:
9870         return do_pipe(cpu_env, arg1,
9871                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9872 #endif
9873     case TARGET_NR_times:
9874         {
9875             struct target_tms *tmsp;
9876             struct tms tms;
9877             ret = get_errno(times(&tms));
9878             if (arg1) {
9879                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9880                 if (!tmsp)
9881                     return -TARGET_EFAULT;
9882                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9883                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9884                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9885                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9886             }
9887             if (!is_error(ret))
9888                 ret = host_to_target_clock_t(ret);
9889         }
9890         return ret;
9891     case TARGET_NR_acct:
9892         if (arg1 == 0) {
9893             ret = get_errno(acct(NULL));
9894         } else {
9895             if (!(p = lock_user_string(arg1))) {
9896                 return -TARGET_EFAULT;
9897             }
9898             ret = get_errno(acct(path(p)));
9899             unlock_user(p, arg1, 0);
9900         }
9901         return ret;
9902 #ifdef TARGET_NR_umount2
9903     case TARGET_NR_umount2:
9904         if (!(p = lock_user_string(arg1)))
9905             return -TARGET_EFAULT;
9906         ret = get_errno(umount2(p, arg2));
9907         unlock_user(p, arg1, 0);
9908         return ret;
9909 #endif
9910     case TARGET_NR_ioctl:
9911         return do_ioctl(arg1, arg2, arg3);
9912 #ifdef TARGET_NR_fcntl
9913     case TARGET_NR_fcntl:
9914         return do_fcntl(arg1, arg2, arg3);
9915 #endif
9916     case TARGET_NR_setpgid:
9917         return get_errno(setpgid(arg1, arg2));
9918     case TARGET_NR_umask:
9919         return get_errno(umask(arg1));
9920     case TARGET_NR_chroot:
9921         if (!(p = lock_user_string(arg1)))
9922             return -TARGET_EFAULT;
9923         ret = get_errno(chroot(p));
9924         unlock_user(p, arg1, 0);
9925         return ret;
9926 #ifdef TARGET_NR_dup2
9927     case TARGET_NR_dup2:
9928         ret = get_errno(dup2(arg1, arg2));
9929         if (ret >= 0) {
9930             fd_trans_dup(arg1, arg2);
9931         }
9932         return ret;
9933 #endif
9934 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9935     case TARGET_NR_dup3:
9936     {
9937         int host_flags;
9938 
9939         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9940             return -EINVAL;
9941         }
9942         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9943         ret = get_errno(dup3(arg1, arg2, host_flags));
9944         if (ret >= 0) {
9945             fd_trans_dup(arg1, arg2);
9946         }
9947         return ret;
9948     }
9949 #endif
9950 #ifdef TARGET_NR_getppid /* not on alpha */
9951     case TARGET_NR_getppid:
9952         return get_errno(getppid());
9953 #endif
9954 #ifdef TARGET_NR_getpgrp
9955     case TARGET_NR_getpgrp:
9956         return get_errno(getpgrp());
9957 #endif
9958     case TARGET_NR_setsid:
9959         return get_errno(setsid());
9960 #ifdef TARGET_NR_sigaction
9961     case TARGET_NR_sigaction:
9962         {
9963 #if defined(TARGET_MIPS)
9964 	    struct target_sigaction act, oact, *pact, *old_act;
9965 
9966 	    if (arg2) {
9967                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9968                     return -TARGET_EFAULT;
9969 		act._sa_handler = old_act->_sa_handler;
9970 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9971 		act.sa_flags = old_act->sa_flags;
9972 		unlock_user_struct(old_act, arg2, 0);
9973 		pact = &act;
9974 	    } else {
9975 		pact = NULL;
9976 	    }
9977 
9978         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9979 
9980 	    if (!is_error(ret) && arg3) {
9981                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9982                     return -TARGET_EFAULT;
9983 		old_act->_sa_handler = oact._sa_handler;
9984 		old_act->sa_flags = oact.sa_flags;
9985 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9986 		old_act->sa_mask.sig[1] = 0;
9987 		old_act->sa_mask.sig[2] = 0;
9988 		old_act->sa_mask.sig[3] = 0;
9989 		unlock_user_struct(old_act, arg3, 1);
9990 	    }
9991 #else
9992             struct target_old_sigaction *old_act;
9993             struct target_sigaction act, oact, *pact;
9994             if (arg2) {
9995                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9996                     return -TARGET_EFAULT;
9997                 act._sa_handler = old_act->_sa_handler;
9998                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9999                 act.sa_flags = old_act->sa_flags;
10000 #ifdef TARGET_ARCH_HAS_SA_RESTORER
10001                 act.sa_restorer = old_act->sa_restorer;
10002 #endif
10003                 unlock_user_struct(old_act, arg2, 0);
10004                 pact = &act;
10005             } else {
10006                 pact = NULL;
10007             }
10008             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
10009             if (!is_error(ret) && arg3) {
10010                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
10011                     return -TARGET_EFAULT;
10012                 old_act->_sa_handler = oact._sa_handler;
10013                 old_act->sa_mask = oact.sa_mask.sig[0];
10014                 old_act->sa_flags = oact.sa_flags;
10015 #ifdef TARGET_ARCH_HAS_SA_RESTORER
10016                 old_act->sa_restorer = oact.sa_restorer;
10017 #endif
10018                 unlock_user_struct(old_act, arg3, 1);
10019             }
10020 #endif
10021         }
10022         return ret;
10023 #endif
10024     case TARGET_NR_rt_sigaction:
10025         {
10026             /*
10027              * For Alpha and SPARC this is a 5 argument syscall, with
10028              * a 'restorer' parameter which must be copied into the
10029              * sa_restorer field of the sigaction struct.
10030              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
10031              * and arg5 is the sigsetsize.
10032              */
10033 #if defined(TARGET_ALPHA)
10034             target_ulong sigsetsize = arg4;
10035             target_ulong restorer = arg5;
10036 #elif defined(TARGET_SPARC)
10037             target_ulong restorer = arg4;
10038             target_ulong sigsetsize = arg5;
10039 #else
10040             target_ulong sigsetsize = arg4;
10041             target_ulong restorer = 0;
10042 #endif
10043             struct target_sigaction *act = NULL;
10044             struct target_sigaction *oact = NULL;
10045 
10046             if (sigsetsize != sizeof(target_sigset_t)) {
10047                 return -TARGET_EINVAL;
10048             }
10049             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
10050                 return -TARGET_EFAULT;
10051             }
10052             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
10053                 ret = -TARGET_EFAULT;
10054             } else {
10055                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
10056                 if (oact) {
10057                     unlock_user_struct(oact, arg3, 1);
10058                 }
10059             }
10060             if (act) {
10061                 unlock_user_struct(act, arg2, 0);
10062             }
10063         }
10064         return ret;
10065 #ifdef TARGET_NR_sgetmask /* not on alpha */
10066     case TARGET_NR_sgetmask:
10067         {
10068             sigset_t cur_set;
10069             abi_ulong target_set;
10070             ret = do_sigprocmask(0, NULL, &cur_set);
10071             if (!ret) {
10072                 host_to_target_old_sigset(&target_set, &cur_set);
10073                 ret = target_set;
10074             }
10075         }
10076         return ret;
10077 #endif
10078 #ifdef TARGET_NR_ssetmask /* not on alpha */
10079     case TARGET_NR_ssetmask:
10080         {
10081             sigset_t set, oset;
10082             abi_ulong target_set = arg1;
10083             target_to_host_old_sigset(&set, &target_set);
10084             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
10085             if (!ret) {
10086                 host_to_target_old_sigset(&target_set, &oset);
10087                 ret = target_set;
10088             }
10089         }
10090         return ret;
10091 #endif
10092 #ifdef TARGET_NR_sigprocmask
10093     case TARGET_NR_sigprocmask:
10094         {
10095 #if defined(TARGET_ALPHA)
10096             sigset_t set, oldset;
10097             abi_ulong mask;
10098             int how;
10099 
10100             switch (arg1) {
10101             case TARGET_SIG_BLOCK:
10102                 how = SIG_BLOCK;
10103                 break;
10104             case TARGET_SIG_UNBLOCK:
10105                 how = SIG_UNBLOCK;
10106                 break;
10107             case TARGET_SIG_SETMASK:
10108                 how = SIG_SETMASK;
10109                 break;
10110             default:
10111                 return -TARGET_EINVAL;
10112             }
10113             mask = arg2;
10114             target_to_host_old_sigset(&set, &mask);
10115 
10116             ret = do_sigprocmask(how, &set, &oldset);
10117             if (!is_error(ret)) {
10118                 host_to_target_old_sigset(&mask, &oldset);
10119                 ret = mask;
10120                 cpu_env->ir[IR_V0] = 0; /* force no error */
10121             }
10122 #else
10123             sigset_t set, oldset, *set_ptr;
10124             int how;
10125 
10126             if (arg2) {
10127                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10128                 if (!p) {
10129                     return -TARGET_EFAULT;
10130                 }
10131                 target_to_host_old_sigset(&set, p);
10132                 unlock_user(p, arg2, 0);
10133                 set_ptr = &set;
10134                 switch (arg1) {
10135                 case TARGET_SIG_BLOCK:
10136                     how = SIG_BLOCK;
10137                     break;
10138                 case TARGET_SIG_UNBLOCK:
10139                     how = SIG_UNBLOCK;
10140                     break;
10141                 case TARGET_SIG_SETMASK:
10142                     how = SIG_SETMASK;
10143                     break;
10144                 default:
10145                     return -TARGET_EINVAL;
10146                 }
10147             } else {
10148                 how = 0;
10149                 set_ptr = NULL;
10150             }
10151             ret = do_sigprocmask(how, set_ptr, &oldset);
10152             if (!is_error(ret) && arg3) {
10153                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10154                     return -TARGET_EFAULT;
10155                 host_to_target_old_sigset(p, &oldset);
10156                 unlock_user(p, arg3, sizeof(target_sigset_t));
10157             }
10158 #endif
10159         }
10160         return ret;
10161 #endif
10162     case TARGET_NR_rt_sigprocmask:
10163         {
10164             int how = arg1;
10165             sigset_t set, oldset, *set_ptr;
10166 
10167             if (arg4 != sizeof(target_sigset_t)) {
10168                 return -TARGET_EINVAL;
10169             }
10170 
10171             if (arg2) {
10172                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10173                 if (!p) {
10174                     return -TARGET_EFAULT;
10175                 }
10176                 target_to_host_sigset(&set, p);
10177                 unlock_user(p, arg2, 0);
10178                 set_ptr = &set;
10179                 switch(how) {
10180                 case TARGET_SIG_BLOCK:
10181                     how = SIG_BLOCK;
10182                     break;
10183                 case TARGET_SIG_UNBLOCK:
10184                     how = SIG_UNBLOCK;
10185                     break;
10186                 case TARGET_SIG_SETMASK:
10187                     how = SIG_SETMASK;
10188                     break;
10189                 default:
10190                     return -TARGET_EINVAL;
10191                 }
10192             } else {
10193                 how = 0;
10194                 set_ptr = NULL;
10195             }
10196             ret = do_sigprocmask(how, set_ptr, &oldset);
10197             if (!is_error(ret) && arg3) {
10198                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10199                     return -TARGET_EFAULT;
10200                 host_to_target_sigset(p, &oldset);
10201                 unlock_user(p, arg3, sizeof(target_sigset_t));
10202             }
10203         }
10204         return ret;
10205 #ifdef TARGET_NR_sigpending
10206     case TARGET_NR_sigpending:
10207         {
10208             sigset_t set;
10209             ret = get_errno(sigpending(&set));
10210             if (!is_error(ret)) {
10211                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10212                     return -TARGET_EFAULT;
10213                 host_to_target_old_sigset(p, &set);
10214                 unlock_user(p, arg1, sizeof(target_sigset_t));
10215             }
10216         }
10217         return ret;
10218 #endif
10219     case TARGET_NR_rt_sigpending:
10220         {
10221             sigset_t set;
10222 
10223             /* Yes, this check is >, not != like most. We follow the kernel's
10224              * logic and it does it like this because it implements
10225              * NR_sigpending through the same code path, and in that case
10226              * the old_sigset_t is smaller in size.
10227              */
10228             if (arg2 > sizeof(target_sigset_t)) {
10229                 return -TARGET_EINVAL;
10230             }
10231 
10232             ret = get_errno(sigpending(&set));
10233             if (!is_error(ret)) {
10234                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10235                     return -TARGET_EFAULT;
10236                 host_to_target_sigset(p, &set);
10237                 unlock_user(p, arg1, sizeof(target_sigset_t));
10238             }
10239         }
10240         return ret;
10241 #ifdef TARGET_NR_sigsuspend
10242     case TARGET_NR_sigsuspend:
10243         {
10244             sigset_t *set;
10245 
10246 #if defined(TARGET_ALPHA)
10247             TaskState *ts = get_task_state(cpu);
10248             /* target_to_host_old_sigset will bswap back */
10249             abi_ulong mask = tswapal(arg1);
10250             set = &ts->sigsuspend_mask;
10251             target_to_host_old_sigset(set, &mask);
10252 #else
10253             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
10254             if (ret != 0) {
10255                 return ret;
10256             }
10257 #endif
10258             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10259             finish_sigsuspend_mask(ret);
10260         }
10261         return ret;
10262 #endif
10263     case TARGET_NR_rt_sigsuspend:
10264         {
10265             sigset_t *set;
10266 
10267             ret = process_sigsuspend_mask(&set, arg1, arg2);
10268             if (ret != 0) {
10269                 return ret;
10270             }
10271             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10272             finish_sigsuspend_mask(ret);
10273         }
10274         return ret;
10275 #ifdef TARGET_NR_rt_sigtimedwait
10276     case TARGET_NR_rt_sigtimedwait:
10277         {
10278             sigset_t set;
10279             struct timespec uts, *puts;
10280             siginfo_t uinfo;
10281 
10282             if (arg4 != sizeof(target_sigset_t)) {
10283                 return -TARGET_EINVAL;
10284             }
10285 
10286             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
10287                 return -TARGET_EFAULT;
10288             target_to_host_sigset(&set, p);
10289             unlock_user(p, arg1, 0);
10290             if (arg3) {
10291                 puts = &uts;
10292                 if (target_to_host_timespec(puts, arg3)) {
10293                     return -TARGET_EFAULT;
10294                 }
10295             } else {
10296                 puts = NULL;
10297             }
10298             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10299                                                  SIGSET_T_SIZE));
10300             if (!is_error(ret)) {
10301                 if (arg2) {
10302                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
10303                                   0);
10304                     if (!p) {
10305                         return -TARGET_EFAULT;
10306                     }
10307                     host_to_target_siginfo(p, &uinfo);
10308                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10309                 }
10310                 ret = host_to_target_signal(ret);
10311             }
10312         }
10313         return ret;
10314 #endif
10315 #ifdef TARGET_NR_rt_sigtimedwait_time64
10316     case TARGET_NR_rt_sigtimedwait_time64:
10317         {
10318             sigset_t set;
10319             struct timespec uts, *puts;
10320             siginfo_t uinfo;
10321 
10322             if (arg4 != sizeof(target_sigset_t)) {
10323                 return -TARGET_EINVAL;
10324             }
10325 
10326             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
10327             if (!p) {
10328                 return -TARGET_EFAULT;
10329             }
10330             target_to_host_sigset(&set, p);
10331             unlock_user(p, arg1, 0);
10332             if (arg3) {
10333                 puts = &uts;
10334                 if (target_to_host_timespec64(puts, arg3)) {
10335                     return -TARGET_EFAULT;
10336                 }
10337             } else {
10338                 puts = NULL;
10339             }
10340             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10341                                                  SIGSET_T_SIZE));
10342             if (!is_error(ret)) {
10343                 if (arg2) {
10344                     p = lock_user(VERIFY_WRITE, arg2,
10345                                   sizeof(target_siginfo_t), 0);
10346                     if (!p) {
10347                         return -TARGET_EFAULT;
10348                     }
10349                     host_to_target_siginfo(p, &uinfo);
10350                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10351                 }
10352                 ret = host_to_target_signal(ret);
10353             }
10354         }
10355         return ret;
10356 #endif
10357     case TARGET_NR_rt_sigqueueinfo:
10358         {
10359             siginfo_t uinfo;
10360 
10361             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
10362             if (!p) {
10363                 return -TARGET_EFAULT;
10364             }
10365             target_to_host_siginfo(&uinfo, p);
10366             unlock_user(p, arg3, 0);
10367             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
10368         }
10369         return ret;
10370     case TARGET_NR_rt_tgsigqueueinfo:
10371         {
10372             siginfo_t uinfo;
10373 
10374             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
10375             if (!p) {
10376                 return -TARGET_EFAULT;
10377             }
10378             target_to_host_siginfo(&uinfo, p);
10379             unlock_user(p, arg4, 0);
10380             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
10381         }
10382         return ret;
10383 #ifdef TARGET_NR_sigreturn
10384     case TARGET_NR_sigreturn:
10385         if (block_signals()) {
10386             return -QEMU_ERESTARTSYS;
10387         }
10388         return do_sigreturn(cpu_env);
10389 #endif
10390     case TARGET_NR_rt_sigreturn:
10391         if (block_signals()) {
10392             return -QEMU_ERESTARTSYS;
10393         }
10394         return do_rt_sigreturn(cpu_env);
10395     case TARGET_NR_sethostname:
10396         if (!(p = lock_user_string(arg1)))
10397             return -TARGET_EFAULT;
10398         ret = get_errno(sethostname(p, arg2));
10399         unlock_user(p, arg1, 0);
10400         return ret;
10401 #ifdef TARGET_NR_setrlimit
10402     case TARGET_NR_setrlimit:
10403         {
10404             int resource = target_to_host_resource(arg1);
10405             struct target_rlimit *target_rlim;
10406             struct rlimit rlim;
10407             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
10408                 return -TARGET_EFAULT;
10409             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
10410             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
10411             unlock_user_struct(target_rlim, arg2, 0);
10412             /*
10413              * If we just passed through resource limit settings for memory then
10414              * they would also apply to QEMU's own allocations, and QEMU will
10415              * crash or hang or die if its allocations fail. Ideally we would
10416              * track the guest allocations in QEMU and apply the limits ourselves.
10417              * For now, just tell the guest the call succeeded but don't actually
10418              * limit anything.
10419              */
10420             if (resource != RLIMIT_AS &&
10421                 resource != RLIMIT_DATA &&
10422                 resource != RLIMIT_STACK) {
10423                 return get_errno(setrlimit(resource, &rlim));
10424             } else {
10425                 return 0;
10426             }
10427         }
10428 #endif
10429 #ifdef TARGET_NR_getrlimit
10430     case TARGET_NR_getrlimit:
10431         {
10432             int resource = target_to_host_resource(arg1);
10433             struct target_rlimit *target_rlim;
10434             struct rlimit rlim;
10435 
10436             ret = get_errno(getrlimit(resource, &rlim));
10437             if (!is_error(ret)) {
10438                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10439                     return -TARGET_EFAULT;
10440                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10441                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10442                 unlock_user_struct(target_rlim, arg2, 1);
10443             }
10444         }
10445         return ret;
10446 #endif
10447     case TARGET_NR_getrusage:
10448         {
10449             struct rusage rusage;
10450             ret = get_errno(getrusage(arg1, &rusage));
10451             if (!is_error(ret)) {
10452                 ret = host_to_target_rusage(arg2, &rusage);
10453             }
10454         }
10455         return ret;
10456 #if defined(TARGET_NR_gettimeofday)
10457     case TARGET_NR_gettimeofday:
10458         {
10459             struct timeval tv;
10460             struct timezone tz;
10461 
10462             ret = get_errno(gettimeofday(&tv, &tz));
10463             if (!is_error(ret)) {
10464                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
10465                     return -TARGET_EFAULT;
10466                 }
10467                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
10468                     return -TARGET_EFAULT;
10469                 }
10470             }
10471         }
10472         return ret;
10473 #endif
10474 #if defined(TARGET_NR_settimeofday)
10475     case TARGET_NR_settimeofday:
10476         {
10477             struct timeval tv, *ptv = NULL;
10478             struct timezone tz, *ptz = NULL;
10479 
10480             if (arg1) {
10481                 if (copy_from_user_timeval(&tv, arg1)) {
10482                     return -TARGET_EFAULT;
10483                 }
10484                 ptv = &tv;
10485             }
10486 
10487             if (arg2) {
10488                 if (copy_from_user_timezone(&tz, arg2)) {
10489                     return -TARGET_EFAULT;
10490                 }
10491                 ptz = &tz;
10492             }
10493 
10494             return get_errno(settimeofday(ptv, ptz));
10495         }
10496 #endif
10497 #if defined(TARGET_NR_select)
10498     case TARGET_NR_select:
10499 #if defined(TARGET_WANT_NI_OLD_SELECT)
10500         /* some architectures used to have old_select here
10501          * but now ENOSYS it.
10502          */
10503         ret = -TARGET_ENOSYS;
10504 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
10505         ret = do_old_select(arg1);
10506 #else
10507         ret = do_select(arg1, arg2, arg3, arg4, arg5);
10508 #endif
10509         return ret;
10510 #endif
10511 #ifdef TARGET_NR_pselect6
10512     case TARGET_NR_pselect6:
10513         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
10514 #endif
10515 #ifdef TARGET_NR_pselect6_time64
10516     case TARGET_NR_pselect6_time64:
10517         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
10518 #endif
10519 #ifdef TARGET_NR_symlink
10520     case TARGET_NR_symlink:
10521         {
10522             void *p2;
10523             p = lock_user_string(arg1);
10524             p2 = lock_user_string(arg2);
10525             if (!p || !p2)
10526                 ret = -TARGET_EFAULT;
10527             else
10528                 ret = get_errno(symlink(p, p2));
10529             unlock_user(p2, arg2, 0);
10530             unlock_user(p, arg1, 0);
10531         }
10532         return ret;
10533 #endif
10534 #if defined(TARGET_NR_symlinkat)
10535     case TARGET_NR_symlinkat:
10536         {
10537             void *p2;
10538             p  = lock_user_string(arg1);
10539             p2 = lock_user_string(arg3);
10540             if (!p || !p2)
10541                 ret = -TARGET_EFAULT;
10542             else
10543                 ret = get_errno(symlinkat(p, arg2, p2));
10544             unlock_user(p2, arg3, 0);
10545             unlock_user(p, arg1, 0);
10546         }
10547         return ret;
10548 #endif
10549 #ifdef TARGET_NR_readlink
10550     case TARGET_NR_readlink:
10551         {
10552             void *p2;
10553             p = lock_user_string(arg1);
10554             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10555             ret = get_errno(do_guest_readlink(p, p2, arg3));
10556             unlock_user(p2, arg2, ret);
10557             unlock_user(p, arg1, 0);
10558         }
10559         return ret;
10560 #endif
10561 #if defined(TARGET_NR_readlinkat)
10562     case TARGET_NR_readlinkat:
10563         {
10564             void *p2;
10565             p  = lock_user_string(arg2);
10566             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10567             if (!p || !p2) {
10568                 ret = -TARGET_EFAULT;
10569             } else if (!arg4) {
10570                 /* Short circuit this for the magic exe check. */
10571                 ret = -TARGET_EINVAL;
10572             } else if (is_proc_myself((const char *)p, "exe")) {
10573                 /*
10574                  * Don't worry about sign mismatch as earlier mapping
10575                  * logic would have thrown a bad address error.
10576                  */
10577                 ret = MIN(strlen(exec_path), arg4);
10578                 /* We cannot NUL terminate the string. */
10579                 memcpy(p2, exec_path, ret);
10580             } else {
10581                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10582             }
10583             unlock_user(p2, arg3, ret);
10584             unlock_user(p, arg2, 0);
10585         }
10586         return ret;
10587 #endif
10588 #ifdef TARGET_NR_swapon
10589     case TARGET_NR_swapon:
10590         if (!(p = lock_user_string(arg1)))
10591             return -TARGET_EFAULT;
10592         ret = get_errno(swapon(p, arg2));
10593         unlock_user(p, arg1, 0);
10594         return ret;
10595 #endif
10596     case TARGET_NR_reboot:
10597         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10598            /* arg4 must be ignored in all other cases */
10599            p = lock_user_string(arg4);
10600            if (!p) {
10601                return -TARGET_EFAULT;
10602            }
10603            ret = get_errno(reboot(arg1, arg2, arg3, p));
10604            unlock_user(p, arg4, 0);
10605         } else {
10606            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10607         }
10608         return ret;
10609 #ifdef TARGET_NR_mmap
10610     case TARGET_NR_mmap:
10611 #ifdef TARGET_ARCH_WANT_SYS_OLD_MMAP
10612         {
10613             abi_ulong *v;
10614             abi_ulong v1, v2, v3, v4, v5, v6;
10615             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10616                 return -TARGET_EFAULT;
10617             v1 = tswapal(v[0]);
10618             v2 = tswapal(v[1]);
10619             v3 = tswapal(v[2]);
10620             v4 = tswapal(v[3]);
10621             v5 = tswapal(v[4]);
10622             v6 = tswapal(v[5]);
10623             unlock_user(v, arg1, 0);
10624             return do_mmap(v1, v2, v3, v4, v5, v6);
10625         }
10626 #else
10627         /* mmap pointers are always untagged */
10628         return do_mmap(arg1, arg2, arg3, arg4, arg5, arg6);
10629 #endif
10630 #endif
10631 #ifdef TARGET_NR_mmap2
10632     case TARGET_NR_mmap2:
10633 #ifndef MMAP_SHIFT
10634 #define MMAP_SHIFT 12
10635 #endif
10636         return do_mmap(arg1, arg2, arg3, arg4, arg5,
10637                        (off_t)(abi_ulong)arg6 << MMAP_SHIFT);
10638 #endif
10639     case TARGET_NR_munmap:
10640         arg1 = cpu_untagged_addr(cpu, arg1);
10641         return get_errno(target_munmap(arg1, arg2));
10642     case TARGET_NR_mprotect:
10643         arg1 = cpu_untagged_addr(cpu, arg1);
10644         {
10645             TaskState *ts = get_task_state(cpu);
10646             /* Special hack to detect libc making the stack executable.  */
10647             if ((arg3 & PROT_GROWSDOWN)
10648                 && arg1 >= ts->info->stack_limit
10649                 && arg1 <= ts->info->start_stack) {
10650                 arg3 &= ~PROT_GROWSDOWN;
10651                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10652                 arg1 = ts->info->stack_limit;
10653             }
10654         }
10655         return get_errno(target_mprotect(arg1, arg2, arg3));
10656 #ifdef TARGET_NR_mremap
10657     case TARGET_NR_mremap:
10658         arg1 = cpu_untagged_addr(cpu, arg1);
10659         /* mremap new_addr (arg5) is always untagged */
10660         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10661 #endif
10662         /* ??? msync/mlock/munlock are broken for softmmu.  */
10663 #ifdef TARGET_NR_msync
10664     case TARGET_NR_msync:
10665         return get_errno(msync(g2h(cpu, arg1), arg2,
10666                                target_to_host_msync_arg(arg3)));
10667 #endif
10668 #ifdef TARGET_NR_mlock
10669     case TARGET_NR_mlock:
10670         return get_errno(mlock(g2h(cpu, arg1), arg2));
10671 #endif
10672 #ifdef TARGET_NR_munlock
10673     case TARGET_NR_munlock:
10674         return get_errno(munlock(g2h(cpu, arg1), arg2));
10675 #endif
10676 #ifdef TARGET_NR_mlockall
10677     case TARGET_NR_mlockall:
10678         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10679 #endif
10680 #ifdef TARGET_NR_munlockall
10681     case TARGET_NR_munlockall:
10682         return get_errno(munlockall());
10683 #endif
10684 #ifdef TARGET_NR_truncate
10685     case TARGET_NR_truncate:
10686         if (!(p = lock_user_string(arg1)))
10687             return -TARGET_EFAULT;
10688         ret = get_errno(truncate(p, arg2));
10689         unlock_user(p, arg1, 0);
10690         return ret;
10691 #endif
10692 #ifdef TARGET_NR_ftruncate
10693     case TARGET_NR_ftruncate:
10694         return get_errno(ftruncate(arg1, arg2));
10695 #endif
10696     case TARGET_NR_fchmod:
10697         return get_errno(fchmod(arg1, arg2));
10698 #if defined(TARGET_NR_fchmodat)
10699     case TARGET_NR_fchmodat:
10700         if (!(p = lock_user_string(arg2)))
10701             return -TARGET_EFAULT;
10702         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10703         unlock_user(p, arg2, 0);
10704         return ret;
10705 #endif
10706     case TARGET_NR_getpriority:
10707         /* Note that negative values are valid for getpriority, so we must
10708            differentiate based on errno settings.  */
10709         errno = 0;
10710         ret = getpriority(arg1, arg2);
10711         if (ret == -1 && errno != 0) {
10712             return -host_to_target_errno(errno);
10713         }
10714 #ifdef TARGET_ALPHA
10715         /* Return value is the unbiased priority.  Signal no error.  */
10716         cpu_env->ir[IR_V0] = 0;
10717 #else
10718         /* Return value is a biased priority to avoid negative numbers.  */
10719         ret = 20 - ret;
10720 #endif
10721         return ret;
10722     case TARGET_NR_setpriority:
10723         return get_errno(setpriority(arg1, arg2, arg3));
10724 #ifdef TARGET_NR_statfs
10725     case TARGET_NR_statfs:
10726         if (!(p = lock_user_string(arg1))) {
10727             return -TARGET_EFAULT;
10728         }
10729         ret = get_errno(statfs(path(p), &stfs));
10730         unlock_user(p, arg1, 0);
10731     convert_statfs:
10732         if (!is_error(ret)) {
10733             struct target_statfs *target_stfs;
10734 
10735             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10736                 return -TARGET_EFAULT;
10737             __put_user(stfs.f_type, &target_stfs->f_type);
10738             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10739             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10740             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10741             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10742             __put_user(stfs.f_files, &target_stfs->f_files);
10743             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10744             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10745             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10746             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10747             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10748 #ifdef _STATFS_F_FLAGS
10749             __put_user(stfs.f_flags, &target_stfs->f_flags);
10750 #else
10751             __put_user(0, &target_stfs->f_flags);
10752 #endif
10753             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10754             unlock_user_struct(target_stfs, arg2, 1);
10755         }
10756         return ret;
10757 #endif
10758 #ifdef TARGET_NR_fstatfs
10759     case TARGET_NR_fstatfs:
10760         ret = get_errno(fstatfs(arg1, &stfs));
10761         goto convert_statfs;
10762 #endif
10763 #ifdef TARGET_NR_statfs64
10764     case TARGET_NR_statfs64:
10765         if (!(p = lock_user_string(arg1))) {
10766             return -TARGET_EFAULT;
10767         }
10768         ret = get_errno(statfs(path(p), &stfs));
10769         unlock_user(p, arg1, 0);
10770     convert_statfs64:
10771         if (!is_error(ret)) {
10772             struct target_statfs64 *target_stfs;
10773 
10774             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10775                 return -TARGET_EFAULT;
10776             __put_user(stfs.f_type, &target_stfs->f_type);
10777             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10778             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10779             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10780             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10781             __put_user(stfs.f_files, &target_stfs->f_files);
10782             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10783             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10784             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10785             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10786             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10787 #ifdef _STATFS_F_FLAGS
10788             __put_user(stfs.f_flags, &target_stfs->f_flags);
10789 #else
10790             __put_user(0, &target_stfs->f_flags);
10791 #endif
10792             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10793             unlock_user_struct(target_stfs, arg3, 1);
10794         }
10795         return ret;
10796     case TARGET_NR_fstatfs64:
10797         ret = get_errno(fstatfs(arg1, &stfs));
10798         goto convert_statfs64;
10799 #endif
10800 #ifdef TARGET_NR_socketcall
10801     case TARGET_NR_socketcall:
10802         return do_socketcall(arg1, arg2);
10803 #endif
10804 #ifdef TARGET_NR_accept
10805     case TARGET_NR_accept:
10806         return do_accept4(arg1, arg2, arg3, 0);
10807 #endif
10808 #ifdef TARGET_NR_accept4
10809     case TARGET_NR_accept4:
10810         return do_accept4(arg1, arg2, arg3, arg4);
10811 #endif
10812 #ifdef TARGET_NR_bind
10813     case TARGET_NR_bind:
10814         return do_bind(arg1, arg2, arg3);
10815 #endif
10816 #ifdef TARGET_NR_connect
10817     case TARGET_NR_connect:
10818         return do_connect(arg1, arg2, arg3);
10819 #endif
10820 #ifdef TARGET_NR_getpeername
10821     case TARGET_NR_getpeername:
10822         return do_getpeername(arg1, arg2, arg3);
10823 #endif
10824 #ifdef TARGET_NR_getsockname
10825     case TARGET_NR_getsockname:
10826         return do_getsockname(arg1, arg2, arg3);
10827 #endif
10828 #ifdef TARGET_NR_getsockopt
10829     case TARGET_NR_getsockopt:
10830         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10831 #endif
10832 #ifdef TARGET_NR_listen
10833     case TARGET_NR_listen:
10834         return get_errno(listen(arg1, arg2));
10835 #endif
10836 #ifdef TARGET_NR_recv
10837     case TARGET_NR_recv:
10838         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10839 #endif
10840 #ifdef TARGET_NR_recvfrom
10841     case TARGET_NR_recvfrom:
10842         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10843 #endif
10844 #ifdef TARGET_NR_recvmsg
10845     case TARGET_NR_recvmsg:
10846         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10847 #endif
10848 #ifdef TARGET_NR_send
10849     case TARGET_NR_send:
10850         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10851 #endif
10852 #ifdef TARGET_NR_sendmsg
10853     case TARGET_NR_sendmsg:
10854         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10855 #endif
10856 #ifdef TARGET_NR_sendmmsg
10857     case TARGET_NR_sendmmsg:
10858         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10859 #endif
10860 #ifdef TARGET_NR_recvmmsg
10861     case TARGET_NR_recvmmsg:
10862         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10863 #endif
10864 #ifdef TARGET_NR_sendto
10865     case TARGET_NR_sendto:
10866         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10867 #endif
10868 #ifdef TARGET_NR_shutdown
10869     case TARGET_NR_shutdown:
10870         return get_errno(shutdown(arg1, arg2));
10871 #endif
10872 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10873     case TARGET_NR_getrandom:
10874         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10875         if (!p) {
10876             return -TARGET_EFAULT;
10877         }
10878         ret = get_errno(getrandom(p, arg2, arg3));
10879         unlock_user(p, arg1, ret);
10880         return ret;
10881 #endif
10882 #ifdef TARGET_NR_socket
10883     case TARGET_NR_socket:
10884         return do_socket(arg1, arg2, arg3);
10885 #endif
10886 #ifdef TARGET_NR_socketpair
10887     case TARGET_NR_socketpair:
10888         return do_socketpair(arg1, arg2, arg3, arg4);
10889 #endif
10890 #ifdef TARGET_NR_setsockopt
10891     case TARGET_NR_setsockopt:
10892         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10893 #endif
10894 #if defined(TARGET_NR_syslog)
10895     case TARGET_NR_syslog:
10896         {
10897             int len = arg2;
10898 
10899             switch (arg1) {
10900             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10901             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10902             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10903             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10904             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10905             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10906             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10907             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10908                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10909             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10910             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10911             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10912                 {
10913                     if (len < 0) {
10914                         return -TARGET_EINVAL;
10915                     }
10916                     if (len == 0) {
10917                         return 0;
10918                     }
10919                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10920                     if (!p) {
10921                         return -TARGET_EFAULT;
10922                     }
10923                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10924                     unlock_user(p, arg2, arg3);
10925                 }
10926                 return ret;
10927             default:
10928                 return -TARGET_EINVAL;
10929             }
10930         }
10931         break;
10932 #endif
10933     case TARGET_NR_setitimer:
10934         {
10935             struct itimerval value, ovalue, *pvalue;
10936 
10937             if (arg2) {
10938                 pvalue = &value;
10939                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10940                     || copy_from_user_timeval(&pvalue->it_value,
10941                                               arg2 + sizeof(struct target_timeval)))
10942                     return -TARGET_EFAULT;
10943             } else {
10944                 pvalue = NULL;
10945             }
10946             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10947             if (!is_error(ret) && arg3) {
10948                 if (copy_to_user_timeval(arg3,
10949                                          &ovalue.it_interval)
10950                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10951                                             &ovalue.it_value))
10952                     return -TARGET_EFAULT;
10953             }
10954         }
10955         return ret;
10956     case TARGET_NR_getitimer:
10957         {
10958             struct itimerval value;
10959 
10960             ret = get_errno(getitimer(arg1, &value));
10961             if (!is_error(ret) && arg2) {
10962                 if (copy_to_user_timeval(arg2,
10963                                          &value.it_interval)
10964                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10965                                             &value.it_value))
10966                     return -TARGET_EFAULT;
10967             }
10968         }
10969         return ret;
10970 #ifdef TARGET_NR_stat
10971     case TARGET_NR_stat:
10972         if (!(p = lock_user_string(arg1))) {
10973             return -TARGET_EFAULT;
10974         }
10975         ret = get_errno(stat(path(p), &st));
10976         unlock_user(p, arg1, 0);
10977         goto do_stat;
10978 #endif
10979 #ifdef TARGET_NR_lstat
10980     case TARGET_NR_lstat:
10981         if (!(p = lock_user_string(arg1))) {
10982             return -TARGET_EFAULT;
10983         }
10984         ret = get_errno(lstat(path(p), &st));
10985         unlock_user(p, arg1, 0);
10986         goto do_stat;
10987 #endif
10988 #ifdef TARGET_NR_fstat
10989     case TARGET_NR_fstat:
10990         {
10991             ret = get_errno(fstat(arg1, &st));
10992 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10993         do_stat:
10994 #endif
10995             if (!is_error(ret)) {
10996                 struct target_stat *target_st;
10997 
10998                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10999                     return -TARGET_EFAULT;
11000                 memset(target_st, 0, sizeof(*target_st));
11001                 __put_user(st.st_dev, &target_st->st_dev);
11002                 __put_user(st.st_ino, &target_st->st_ino);
11003                 __put_user(st.st_mode, &target_st->st_mode);
11004                 __put_user(st.st_uid, &target_st->st_uid);
11005                 __put_user(st.st_gid, &target_st->st_gid);
11006                 __put_user(st.st_nlink, &target_st->st_nlink);
11007                 __put_user(st.st_rdev, &target_st->st_rdev);
11008                 __put_user(st.st_size, &target_st->st_size);
11009                 __put_user(st.st_blksize, &target_st->st_blksize);
11010                 __put_user(st.st_blocks, &target_st->st_blocks);
11011                 __put_user(st.st_atime, &target_st->target_st_atime);
11012                 __put_user(st.st_mtime, &target_st->target_st_mtime);
11013                 __put_user(st.st_ctime, &target_st->target_st_ctime);
11014 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
11015                 __put_user(st.st_atim.tv_nsec,
11016                            &target_st->target_st_atime_nsec);
11017                 __put_user(st.st_mtim.tv_nsec,
11018                            &target_st->target_st_mtime_nsec);
11019                 __put_user(st.st_ctim.tv_nsec,
11020                            &target_st->target_st_ctime_nsec);
11021 #endif
11022                 unlock_user_struct(target_st, arg2, 1);
11023             }
11024         }
11025         return ret;
11026 #endif
11027     case TARGET_NR_vhangup:
11028         return get_errno(vhangup());
11029 #ifdef TARGET_NR_syscall
11030     case TARGET_NR_syscall:
11031         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
11032                           arg6, arg7, arg8, 0);
11033 #endif
11034 #if defined(TARGET_NR_wait4)
11035     case TARGET_NR_wait4:
11036         {
11037             int status;
11038             abi_long status_ptr = arg2;
11039             struct rusage rusage, *rusage_ptr;
11040             abi_ulong target_rusage = arg4;
11041             abi_long rusage_err;
11042             if (target_rusage)
11043                 rusage_ptr = &rusage;
11044             else
11045                 rusage_ptr = NULL;
11046             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
11047             if (!is_error(ret)) {
11048                 if (status_ptr && ret) {
11049                     status = host_to_target_waitstatus(status);
11050                     if (put_user_s32(status, status_ptr))
11051                         return -TARGET_EFAULT;
11052                 }
11053                 if (target_rusage) {
11054                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
11055                     if (rusage_err) {
11056                         ret = rusage_err;
11057                     }
11058                 }
11059             }
11060         }
11061         return ret;
11062 #endif
11063 #ifdef TARGET_NR_swapoff
11064     case TARGET_NR_swapoff:
11065         if (!(p = lock_user_string(arg1)))
11066             return -TARGET_EFAULT;
11067         ret = get_errno(swapoff(p));
11068         unlock_user(p, arg1, 0);
11069         return ret;
11070 #endif
11071     case TARGET_NR_sysinfo:
11072         {
11073             struct target_sysinfo *target_value;
11074             struct sysinfo value;
11075             ret = get_errno(sysinfo(&value));
11076             if (!is_error(ret) && arg1)
11077             {
11078                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
11079                     return -TARGET_EFAULT;
11080                 __put_user(value.uptime, &target_value->uptime);
11081                 __put_user(value.loads[0], &target_value->loads[0]);
11082                 __put_user(value.loads[1], &target_value->loads[1]);
11083                 __put_user(value.loads[2], &target_value->loads[2]);
11084                 __put_user(value.totalram, &target_value->totalram);
11085                 __put_user(value.freeram, &target_value->freeram);
11086                 __put_user(value.sharedram, &target_value->sharedram);
11087                 __put_user(value.bufferram, &target_value->bufferram);
11088                 __put_user(value.totalswap, &target_value->totalswap);
11089                 __put_user(value.freeswap, &target_value->freeswap);
11090                 __put_user(value.procs, &target_value->procs);
11091                 __put_user(value.totalhigh, &target_value->totalhigh);
11092                 __put_user(value.freehigh, &target_value->freehigh);
11093                 __put_user(value.mem_unit, &target_value->mem_unit);
11094                 unlock_user_struct(target_value, arg1, 1);
11095             }
11096         }
11097         return ret;
11098 #ifdef TARGET_NR_ipc
11099     case TARGET_NR_ipc:
11100         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
11101 #endif
11102 #ifdef TARGET_NR_semget
11103     case TARGET_NR_semget:
11104         return get_errno(semget(arg1, arg2, arg3));
11105 #endif
11106 #ifdef TARGET_NR_semop
11107     case TARGET_NR_semop:
11108         return do_semtimedop(arg1, arg2, arg3, 0, false);
11109 #endif
11110 #ifdef TARGET_NR_semtimedop
11111     case TARGET_NR_semtimedop:
11112         return do_semtimedop(arg1, arg2, arg3, arg4, false);
11113 #endif
11114 #ifdef TARGET_NR_semtimedop_time64
11115     case TARGET_NR_semtimedop_time64:
11116         return do_semtimedop(arg1, arg2, arg3, arg4, true);
11117 #endif
11118 #ifdef TARGET_NR_semctl
11119     case TARGET_NR_semctl:
11120         return do_semctl(arg1, arg2, arg3, arg4);
11121 #endif
11122 #ifdef TARGET_NR_msgctl
11123     case TARGET_NR_msgctl:
11124         return do_msgctl(arg1, arg2, arg3);
11125 #endif
11126 #ifdef TARGET_NR_msgget
11127     case TARGET_NR_msgget:
11128         return get_errno(msgget(arg1, arg2));
11129 #endif
11130 #ifdef TARGET_NR_msgrcv
11131     case TARGET_NR_msgrcv:
11132         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
11133 #endif
11134 #ifdef TARGET_NR_msgsnd
11135     case TARGET_NR_msgsnd:
11136         return do_msgsnd(arg1, arg2, arg3, arg4);
11137 #endif
11138 #ifdef TARGET_NR_shmget
11139     case TARGET_NR_shmget:
11140         return get_errno(shmget(arg1, arg2, arg3));
11141 #endif
11142 #ifdef TARGET_NR_shmctl
11143     case TARGET_NR_shmctl:
11144         return do_shmctl(arg1, arg2, arg3);
11145 #endif
11146 #ifdef TARGET_NR_shmat
11147     case TARGET_NR_shmat:
11148         return target_shmat(cpu_env, arg1, arg2, arg3);
11149 #endif
11150 #ifdef TARGET_NR_shmdt
11151     case TARGET_NR_shmdt:
11152         return target_shmdt(arg1);
11153 #endif
11154     case TARGET_NR_fsync:
11155         return get_errno(fsync(arg1));
11156     case TARGET_NR_clone:
11157         /* Linux manages to have three different orderings for its
11158          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
11159          * match the kernel's CONFIG_CLONE_* settings.
11160          * Microblaze is further special in that it uses a sixth
11161          * implicit argument to clone for the TLS pointer.
11162          */
11163 #if defined(TARGET_MICROBLAZE)
11164         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
11165 #elif defined(TARGET_CLONE_BACKWARDS)
11166         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
11167 #elif defined(TARGET_CLONE_BACKWARDS2)
11168         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
11169 #else
11170         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
11171 #endif
11172         return ret;
11173 #ifdef __NR_exit_group
11174         /* new thread calls */
11175     case TARGET_NR_exit_group:
11176         preexit_cleanup(cpu_env, arg1);
11177         return get_errno(exit_group(arg1));
11178 #endif
11179     case TARGET_NR_setdomainname:
11180         if (!(p = lock_user_string(arg1)))
11181             return -TARGET_EFAULT;
11182         ret = get_errno(setdomainname(p, arg2));
11183         unlock_user(p, arg1, 0);
11184         return ret;
11185     case TARGET_NR_uname:
11186         /* no need to transcode because we use the linux syscall */
11187         {
11188             struct new_utsname * buf;
11189 
11190             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
11191                 return -TARGET_EFAULT;
11192             ret = get_errno(sys_uname(buf));
11193             if (!is_error(ret)) {
11194                 /* Overwrite the native machine name with whatever is being
11195                    emulated. */
11196                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
11197                           sizeof(buf->machine));
11198                 /* Allow the user to override the reported release.  */
11199                 if (qemu_uname_release && *qemu_uname_release) {
11200                     g_strlcpy(buf->release, qemu_uname_release,
11201                               sizeof(buf->release));
11202                 }
11203             }
11204             unlock_user_struct(buf, arg1, 1);
11205         }
11206         return ret;
11207 #ifdef TARGET_I386
11208     case TARGET_NR_modify_ldt:
11209         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
11210 #if !defined(TARGET_X86_64)
11211     case TARGET_NR_vm86:
11212         return do_vm86(cpu_env, arg1, arg2);
11213 #endif
11214 #endif
11215 #if defined(TARGET_NR_adjtimex)
11216     case TARGET_NR_adjtimex:
11217         {
11218             struct timex host_buf;
11219 
11220             if (target_to_host_timex(&host_buf, arg1) != 0) {
11221                 return -TARGET_EFAULT;
11222             }
11223             ret = get_errno(adjtimex(&host_buf));
11224             if (!is_error(ret)) {
11225                 if (host_to_target_timex(arg1, &host_buf) != 0) {
11226                     return -TARGET_EFAULT;
11227                 }
11228             }
11229         }
11230         return ret;
11231 #endif
11232 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
11233     case TARGET_NR_clock_adjtime:
11234         {
11235             struct timex htx;
11236 
11237             if (target_to_host_timex(&htx, arg2) != 0) {
11238                 return -TARGET_EFAULT;
11239             }
11240             ret = get_errno(clock_adjtime(arg1, &htx));
11241             if (!is_error(ret) && host_to_target_timex(arg2, &htx)) {
11242                 return -TARGET_EFAULT;
11243             }
11244         }
11245         return ret;
11246 #endif
11247 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
11248     case TARGET_NR_clock_adjtime64:
11249         {
11250             struct timex htx;
11251 
11252             if (target_to_host_timex64(&htx, arg2) != 0) {
11253                 return -TARGET_EFAULT;
11254             }
11255             ret = get_errno(clock_adjtime(arg1, &htx));
11256             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
11257                     return -TARGET_EFAULT;
11258             }
11259         }
11260         return ret;
11261 #endif
11262     case TARGET_NR_getpgid:
11263         return get_errno(getpgid(arg1));
11264     case TARGET_NR_fchdir:
11265         return get_errno(fchdir(arg1));
11266     case TARGET_NR_personality:
11267         return get_errno(personality(arg1));
11268 #ifdef TARGET_NR__llseek /* Not on alpha */
11269     case TARGET_NR__llseek:
11270         {
11271             int64_t res;
11272 #if !defined(__NR_llseek)
11273             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
11274             if (res == -1) {
11275                 ret = get_errno(res);
11276             } else {
11277                 ret = 0;
11278             }
11279 #else
11280             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
11281 #endif
11282             if ((ret == 0) && put_user_s64(res, arg4)) {
11283                 return -TARGET_EFAULT;
11284             }
11285         }
11286         return ret;
11287 #endif
11288 #ifdef TARGET_NR_getdents
11289     case TARGET_NR_getdents:
11290         return do_getdents(arg1, arg2, arg3);
11291 #endif /* TARGET_NR_getdents */
11292 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
11293     case TARGET_NR_getdents64:
11294         return do_getdents64(arg1, arg2, arg3);
11295 #endif /* TARGET_NR_getdents64 */
11296 #if defined(TARGET_NR__newselect)
11297     case TARGET_NR__newselect:
11298         return do_select(arg1, arg2, arg3, arg4, arg5);
11299 #endif
11300 #ifdef TARGET_NR_poll
11301     case TARGET_NR_poll:
11302         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
11303 #endif
11304 #ifdef TARGET_NR_ppoll
11305     case TARGET_NR_ppoll:
11306         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
11307 #endif
11308 #ifdef TARGET_NR_ppoll_time64
11309     case TARGET_NR_ppoll_time64:
11310         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
11311 #endif
11312     case TARGET_NR_flock:
11313         /* NOTE: the flock constant seems to be the same for every
11314            Linux platform */
11315         return get_errno(safe_flock(arg1, arg2));
11316     case TARGET_NR_readv:
11317         {
11318             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11319             if (vec != NULL) {
11320                 ret = get_errno(safe_readv(arg1, vec, arg3));
11321                 unlock_iovec(vec, arg2, arg3, 1);
11322             } else {
11323                 ret = -host_to_target_errno(errno);
11324             }
11325         }
11326         return ret;
11327     case TARGET_NR_writev:
11328         {
11329             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11330             if (vec != NULL) {
11331                 ret = get_errno(safe_writev(arg1, vec, arg3));
11332                 unlock_iovec(vec, arg2, arg3, 0);
11333             } else {
11334                 ret = -host_to_target_errno(errno);
11335             }
11336         }
11337         return ret;
11338 #if defined(TARGET_NR_preadv)
11339     case TARGET_NR_preadv:
11340         {
11341             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11342             if (vec != NULL) {
11343                 unsigned long low, high;
11344 
11345                 target_to_host_low_high(arg4, arg5, &low, &high);
11346                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
11347                 unlock_iovec(vec, arg2, arg3, 1);
11348             } else {
11349                 ret = -host_to_target_errno(errno);
11350            }
11351         }
11352         return ret;
11353 #endif
11354 #if defined(TARGET_NR_pwritev)
11355     case TARGET_NR_pwritev:
11356         {
11357             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11358             if (vec != NULL) {
11359                 unsigned long low, high;
11360 
11361                 target_to_host_low_high(arg4, arg5, &low, &high);
11362                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
11363                 unlock_iovec(vec, arg2, arg3, 0);
11364             } else {
11365                 ret = -host_to_target_errno(errno);
11366            }
11367         }
11368         return ret;
11369 #endif
11370     case TARGET_NR_getsid:
11371         return get_errno(getsid(arg1));
11372 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
11373     case TARGET_NR_fdatasync:
11374         return get_errno(fdatasync(arg1));
11375 #endif
11376     case TARGET_NR_sched_getaffinity:
11377         {
11378             unsigned int mask_size;
11379             unsigned long *mask;
11380 
11381             /*
11382              * sched_getaffinity needs multiples of ulong, so need to take
11383              * care of mismatches between target ulong and host ulong sizes.
11384              */
11385             if (arg2 & (sizeof(abi_ulong) - 1)) {
11386                 return -TARGET_EINVAL;
11387             }
11388             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11389 
11390             mask = alloca(mask_size);
11391             memset(mask, 0, mask_size);
11392             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
11393 
11394             if (!is_error(ret)) {
11395                 if (ret > arg2) {
11396                     /* More data returned than the caller's buffer will fit.
11397                      * This only happens if sizeof(abi_long) < sizeof(long)
11398                      * and the caller passed us a buffer holding an odd number
11399                      * of abi_longs. If the host kernel is actually using the
11400                      * extra 4 bytes then fail EINVAL; otherwise we can just
11401                      * ignore them and only copy the interesting part.
11402                      */
11403                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
11404                     if (numcpus > arg2 * 8) {
11405                         return -TARGET_EINVAL;
11406                     }
11407                     ret = arg2;
11408                 }
11409 
11410                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
11411                     return -TARGET_EFAULT;
11412                 }
11413             }
11414         }
11415         return ret;
11416     case TARGET_NR_sched_setaffinity:
11417         {
11418             unsigned int mask_size;
11419             unsigned long *mask;
11420 
11421             /*
11422              * sched_setaffinity needs multiples of ulong, so need to take
11423              * care of mismatches between target ulong and host ulong sizes.
11424              */
11425             if (arg2 & (sizeof(abi_ulong) - 1)) {
11426                 return -TARGET_EINVAL;
11427             }
11428             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11429             mask = alloca(mask_size);
11430 
11431             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
11432             if (ret) {
11433                 return ret;
11434             }
11435 
11436             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
11437         }
11438     case TARGET_NR_getcpu:
11439         {
11440             unsigned cpuid, node;
11441             ret = get_errno(sys_getcpu(arg1 ? &cpuid : NULL,
11442                                        arg2 ? &node : NULL,
11443                                        NULL));
11444             if (is_error(ret)) {
11445                 return ret;
11446             }
11447             if (arg1 && put_user_u32(cpuid, arg1)) {
11448                 return -TARGET_EFAULT;
11449             }
11450             if (arg2 && put_user_u32(node, arg2)) {
11451                 return -TARGET_EFAULT;
11452             }
11453         }
11454         return ret;
11455     case TARGET_NR_sched_setparam:
11456         {
11457             struct target_sched_param *target_schp;
11458             struct sched_param schp;
11459 
11460             if (arg2 == 0) {
11461                 return -TARGET_EINVAL;
11462             }
11463             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
11464                 return -TARGET_EFAULT;
11465             }
11466             schp.sched_priority = tswap32(target_schp->sched_priority);
11467             unlock_user_struct(target_schp, arg2, 0);
11468             return get_errno(sys_sched_setparam(arg1, &schp));
11469         }
11470     case TARGET_NR_sched_getparam:
11471         {
11472             struct target_sched_param *target_schp;
11473             struct sched_param schp;
11474 
11475             if (arg2 == 0) {
11476                 return -TARGET_EINVAL;
11477             }
11478             ret = get_errno(sys_sched_getparam(arg1, &schp));
11479             if (!is_error(ret)) {
11480                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
11481                     return -TARGET_EFAULT;
11482                 }
11483                 target_schp->sched_priority = tswap32(schp.sched_priority);
11484                 unlock_user_struct(target_schp, arg2, 1);
11485             }
11486         }
11487         return ret;
11488     case TARGET_NR_sched_setscheduler:
11489         {
11490             struct target_sched_param *target_schp;
11491             struct sched_param schp;
11492             if (arg3 == 0) {
11493                 return -TARGET_EINVAL;
11494             }
11495             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
11496                 return -TARGET_EFAULT;
11497             }
11498             schp.sched_priority = tswap32(target_schp->sched_priority);
11499             unlock_user_struct(target_schp, arg3, 0);
11500             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
11501         }
11502     case TARGET_NR_sched_getscheduler:
11503         return get_errno(sys_sched_getscheduler(arg1));
11504     case TARGET_NR_sched_getattr:
11505         {
11506             struct target_sched_attr *target_scha;
11507             struct sched_attr scha;
11508             if (arg2 == 0) {
11509                 return -TARGET_EINVAL;
11510             }
11511             if (arg3 > sizeof(scha)) {
11512                 arg3 = sizeof(scha);
11513             }
11514             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
11515             if (!is_error(ret)) {
11516                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11517                 if (!target_scha) {
11518                     return -TARGET_EFAULT;
11519                 }
11520                 target_scha->size = tswap32(scha.size);
11521                 target_scha->sched_policy = tswap32(scha.sched_policy);
11522                 target_scha->sched_flags = tswap64(scha.sched_flags);
11523                 target_scha->sched_nice = tswap32(scha.sched_nice);
11524                 target_scha->sched_priority = tswap32(scha.sched_priority);
11525                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
11526                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
11527                 target_scha->sched_period = tswap64(scha.sched_period);
11528                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
11529                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
11530                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
11531                 }
11532                 unlock_user(target_scha, arg2, arg3);
11533             }
11534             return ret;
11535         }
11536     case TARGET_NR_sched_setattr:
11537         {
11538             struct target_sched_attr *target_scha;
11539             struct sched_attr scha;
11540             uint32_t size;
11541             int zeroed;
11542             if (arg2 == 0) {
11543                 return -TARGET_EINVAL;
11544             }
11545             if (get_user_u32(size, arg2)) {
11546                 return -TARGET_EFAULT;
11547             }
11548             if (!size) {
11549                 size = offsetof(struct target_sched_attr, sched_util_min);
11550             }
11551             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11552                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11553                     return -TARGET_EFAULT;
11554                 }
11555                 return -TARGET_E2BIG;
11556             }
11557 
11558             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11559             if (zeroed < 0) {
11560                 return zeroed;
11561             } else if (zeroed == 0) {
11562                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11563                     return -TARGET_EFAULT;
11564                 }
11565                 return -TARGET_E2BIG;
11566             }
11567             if (size > sizeof(struct target_sched_attr)) {
11568                 size = sizeof(struct target_sched_attr);
11569             }
11570 
11571             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11572             if (!target_scha) {
11573                 return -TARGET_EFAULT;
11574             }
11575             scha.size = size;
11576             scha.sched_policy = tswap32(target_scha->sched_policy);
11577             scha.sched_flags = tswap64(target_scha->sched_flags);
11578             scha.sched_nice = tswap32(target_scha->sched_nice);
11579             scha.sched_priority = tswap32(target_scha->sched_priority);
11580             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11581             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11582             scha.sched_period = tswap64(target_scha->sched_period);
11583             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11584                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11585                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11586             }
11587             unlock_user(target_scha, arg2, 0);
11588             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11589         }
11590     case TARGET_NR_sched_yield:
11591         return get_errno(sched_yield());
11592     case TARGET_NR_sched_get_priority_max:
11593         return get_errno(sched_get_priority_max(arg1));
11594     case TARGET_NR_sched_get_priority_min:
11595         return get_errno(sched_get_priority_min(arg1));
11596 #ifdef TARGET_NR_sched_rr_get_interval
11597     case TARGET_NR_sched_rr_get_interval:
11598         {
11599             struct timespec ts;
11600             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11601             if (!is_error(ret)) {
11602                 ret = host_to_target_timespec(arg2, &ts);
11603             }
11604         }
11605         return ret;
11606 #endif
11607 #ifdef TARGET_NR_sched_rr_get_interval_time64
11608     case TARGET_NR_sched_rr_get_interval_time64:
11609         {
11610             struct timespec ts;
11611             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11612             if (!is_error(ret)) {
11613                 ret = host_to_target_timespec64(arg2, &ts);
11614             }
11615         }
11616         return ret;
11617 #endif
11618 #if defined(TARGET_NR_nanosleep)
11619     case TARGET_NR_nanosleep:
11620         {
11621             struct timespec req, rem;
11622             target_to_host_timespec(&req, arg1);
11623             ret = get_errno(safe_nanosleep(&req, &rem));
11624             if (is_error(ret) && arg2) {
11625                 host_to_target_timespec(arg2, &rem);
11626             }
11627         }
11628         return ret;
11629 #endif
11630     case TARGET_NR_prctl:
11631         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11632         break;
11633 #ifdef TARGET_NR_arch_prctl
11634     case TARGET_NR_arch_prctl:
11635         return do_arch_prctl(cpu_env, arg1, arg2);
11636 #endif
11637 #ifdef TARGET_NR_pread64
11638     case TARGET_NR_pread64:
11639         if (regpairs_aligned(cpu_env, num)) {
11640             arg4 = arg5;
11641             arg5 = arg6;
11642         }
11643         if (arg2 == 0 && arg3 == 0) {
11644             /* Special-case NULL buffer and zero length, which should succeed */
11645             p = 0;
11646         } else {
11647             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11648             if (!p) {
11649                 return -TARGET_EFAULT;
11650             }
11651         }
11652         ret = get_errno(pread(arg1, p, arg3, target_offset64(arg4, arg5)));
11653         unlock_user(p, arg2, ret);
11654         return ret;
11655     case TARGET_NR_pwrite64:
11656         if (regpairs_aligned(cpu_env, num)) {
11657             arg4 = arg5;
11658             arg5 = arg6;
11659         }
11660         if (arg2 == 0 && arg3 == 0) {
11661             /* Special-case NULL buffer and zero length, which should succeed */
11662             p = 0;
11663         } else {
11664             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11665             if (!p) {
11666                 return -TARGET_EFAULT;
11667             }
11668         }
11669         ret = get_errno(pwrite(arg1, p, arg3, target_offset64(arg4, arg5)));
11670         unlock_user(p, arg2, 0);
11671         return ret;
11672 #endif
11673     case TARGET_NR_getcwd:
11674         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11675             return -TARGET_EFAULT;
11676         ret = get_errno(sys_getcwd1(p, arg2));
11677         unlock_user(p, arg1, ret);
11678         return ret;
11679     case TARGET_NR_capget:
11680     case TARGET_NR_capset:
11681     {
11682         struct target_user_cap_header *target_header;
11683         struct target_user_cap_data *target_data = NULL;
11684         struct __user_cap_header_struct header;
11685         struct __user_cap_data_struct data[2];
11686         struct __user_cap_data_struct *dataptr = NULL;
11687         int i, target_datalen;
11688         int data_items = 1;
11689 
11690         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11691             return -TARGET_EFAULT;
11692         }
11693         header.version = tswap32(target_header->version);
11694         header.pid = tswap32(target_header->pid);
11695 
11696         if (header.version != _LINUX_CAPABILITY_VERSION) {
11697             /* Version 2 and up takes pointer to two user_data structs */
11698             data_items = 2;
11699         }
11700 
11701         target_datalen = sizeof(*target_data) * data_items;
11702 
11703         if (arg2) {
11704             if (num == TARGET_NR_capget) {
11705                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11706             } else {
11707                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11708             }
11709             if (!target_data) {
11710                 unlock_user_struct(target_header, arg1, 0);
11711                 return -TARGET_EFAULT;
11712             }
11713 
11714             if (num == TARGET_NR_capset) {
11715                 for (i = 0; i < data_items; i++) {
11716                     data[i].effective = tswap32(target_data[i].effective);
11717                     data[i].permitted = tswap32(target_data[i].permitted);
11718                     data[i].inheritable = tswap32(target_data[i].inheritable);
11719                 }
11720             }
11721 
11722             dataptr = data;
11723         }
11724 
11725         if (num == TARGET_NR_capget) {
11726             ret = get_errno(capget(&header, dataptr));
11727         } else {
11728             ret = get_errno(capset(&header, dataptr));
11729         }
11730 
11731         /* The kernel always updates version for both capget and capset */
11732         target_header->version = tswap32(header.version);
11733         unlock_user_struct(target_header, arg1, 1);
11734 
11735         if (arg2) {
11736             if (num == TARGET_NR_capget) {
11737                 for (i = 0; i < data_items; i++) {
11738                     target_data[i].effective = tswap32(data[i].effective);
11739                     target_data[i].permitted = tswap32(data[i].permitted);
11740                     target_data[i].inheritable = tswap32(data[i].inheritable);
11741                 }
11742                 unlock_user(target_data, arg2, target_datalen);
11743             } else {
11744                 unlock_user(target_data, arg2, 0);
11745             }
11746         }
11747         return ret;
11748     }
11749     case TARGET_NR_sigaltstack:
11750         return do_sigaltstack(arg1, arg2, cpu_env);
11751 
11752 #ifdef CONFIG_SENDFILE
11753 #ifdef TARGET_NR_sendfile
11754     case TARGET_NR_sendfile:
11755     {
11756         off_t *offp = NULL;
11757         off_t off;
11758         if (arg3) {
11759             ret = get_user_sal(off, arg3);
11760             if (is_error(ret)) {
11761                 return ret;
11762             }
11763             offp = &off;
11764         }
11765         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11766         if (!is_error(ret) && arg3) {
11767             abi_long ret2 = put_user_sal(off, arg3);
11768             if (is_error(ret2)) {
11769                 ret = ret2;
11770             }
11771         }
11772         return ret;
11773     }
11774 #endif
11775 #ifdef TARGET_NR_sendfile64
11776     case TARGET_NR_sendfile64:
11777     {
11778         off_t *offp = NULL;
11779         off_t off;
11780         if (arg3) {
11781             ret = get_user_s64(off, arg3);
11782             if (is_error(ret)) {
11783                 return ret;
11784             }
11785             offp = &off;
11786         }
11787         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11788         if (!is_error(ret) && arg3) {
11789             abi_long ret2 = put_user_s64(off, arg3);
11790             if (is_error(ret2)) {
11791                 ret = ret2;
11792             }
11793         }
11794         return ret;
11795     }
11796 #endif
11797 #endif
11798 #ifdef TARGET_NR_vfork
11799     case TARGET_NR_vfork:
11800         return get_errno(do_fork(cpu_env,
11801                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11802                          0, 0, 0, 0));
11803 #endif
11804 #ifdef TARGET_NR_ugetrlimit
11805     case TARGET_NR_ugetrlimit:
11806     {
11807 	struct rlimit rlim;
11808 	int resource = target_to_host_resource(arg1);
11809 	ret = get_errno(getrlimit(resource, &rlim));
11810 	if (!is_error(ret)) {
11811 	    struct target_rlimit *target_rlim;
11812             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11813                 return -TARGET_EFAULT;
11814 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11815 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11816             unlock_user_struct(target_rlim, arg2, 1);
11817 	}
11818         return ret;
11819     }
11820 #endif
11821 #ifdef TARGET_NR_truncate64
11822     case TARGET_NR_truncate64:
11823         if (!(p = lock_user_string(arg1)))
11824             return -TARGET_EFAULT;
11825 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11826         unlock_user(p, arg1, 0);
11827         return ret;
11828 #endif
11829 #ifdef TARGET_NR_ftruncate64
11830     case TARGET_NR_ftruncate64:
11831         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11832 #endif
11833 #ifdef TARGET_NR_stat64
11834     case TARGET_NR_stat64:
11835         if (!(p = lock_user_string(arg1))) {
11836             return -TARGET_EFAULT;
11837         }
11838         ret = get_errno(stat(path(p), &st));
11839         unlock_user(p, arg1, 0);
11840         if (!is_error(ret))
11841             ret = host_to_target_stat64(cpu_env, arg2, &st);
11842         return ret;
11843 #endif
11844 #ifdef TARGET_NR_lstat64
11845     case TARGET_NR_lstat64:
11846         if (!(p = lock_user_string(arg1))) {
11847             return -TARGET_EFAULT;
11848         }
11849         ret = get_errno(lstat(path(p), &st));
11850         unlock_user(p, arg1, 0);
11851         if (!is_error(ret))
11852             ret = host_to_target_stat64(cpu_env, arg2, &st);
11853         return ret;
11854 #endif
11855 #ifdef TARGET_NR_fstat64
11856     case TARGET_NR_fstat64:
11857         ret = get_errno(fstat(arg1, &st));
11858         if (!is_error(ret))
11859             ret = host_to_target_stat64(cpu_env, arg2, &st);
11860         return ret;
11861 #endif
11862 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11863 #ifdef TARGET_NR_fstatat64
11864     case TARGET_NR_fstatat64:
11865 #endif
11866 #ifdef TARGET_NR_newfstatat
11867     case TARGET_NR_newfstatat:
11868 #endif
11869         if (!(p = lock_user_string(arg2))) {
11870             return -TARGET_EFAULT;
11871         }
11872         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11873         unlock_user(p, arg2, 0);
11874         if (!is_error(ret))
11875             ret = host_to_target_stat64(cpu_env, arg3, &st);
11876         return ret;
11877 #endif
11878 #if defined(TARGET_NR_statx)
11879     case TARGET_NR_statx:
11880         {
11881             struct target_statx *target_stx;
11882             int dirfd = arg1;
11883             int flags = arg3;
11884 
11885             p = lock_user_string(arg2);
11886             if (p == NULL) {
11887                 return -TARGET_EFAULT;
11888             }
11889 #if defined(__NR_statx)
11890             {
11891                 /*
11892                  * It is assumed that struct statx is architecture independent.
11893                  */
11894                 struct target_statx host_stx;
11895                 int mask = arg4;
11896 
11897                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11898                 if (!is_error(ret)) {
11899                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11900                         unlock_user(p, arg2, 0);
11901                         return -TARGET_EFAULT;
11902                     }
11903                 }
11904 
11905                 if (ret != -TARGET_ENOSYS) {
11906                     unlock_user(p, arg2, 0);
11907                     return ret;
11908                 }
11909             }
11910 #endif
11911             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11912             unlock_user(p, arg2, 0);
11913 
11914             if (!is_error(ret)) {
11915                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11916                     return -TARGET_EFAULT;
11917                 }
11918                 memset(target_stx, 0, sizeof(*target_stx));
11919                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11920                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11921                 __put_user(st.st_ino, &target_stx->stx_ino);
11922                 __put_user(st.st_mode, &target_stx->stx_mode);
11923                 __put_user(st.st_uid, &target_stx->stx_uid);
11924                 __put_user(st.st_gid, &target_stx->stx_gid);
11925                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11926                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11927                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11928                 __put_user(st.st_size, &target_stx->stx_size);
11929                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11930                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11931                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11932                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11933                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11934                 unlock_user_struct(target_stx, arg5, 1);
11935             }
11936         }
11937         return ret;
11938 #endif
11939 #ifdef TARGET_NR_lchown
11940     case TARGET_NR_lchown:
11941         if (!(p = lock_user_string(arg1)))
11942             return -TARGET_EFAULT;
11943         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11944         unlock_user(p, arg1, 0);
11945         return ret;
11946 #endif
11947 #ifdef TARGET_NR_getuid
11948     case TARGET_NR_getuid:
11949         return get_errno(high2lowuid(getuid()));
11950 #endif
11951 #ifdef TARGET_NR_getgid
11952     case TARGET_NR_getgid:
11953         return get_errno(high2lowgid(getgid()));
11954 #endif
11955 #ifdef TARGET_NR_geteuid
11956     case TARGET_NR_geteuid:
11957         return get_errno(high2lowuid(geteuid()));
11958 #endif
11959 #ifdef TARGET_NR_getegid
11960     case TARGET_NR_getegid:
11961         return get_errno(high2lowgid(getegid()));
11962 #endif
11963     case TARGET_NR_setreuid:
11964         return get_errno(sys_setreuid(low2highuid(arg1), low2highuid(arg2)));
11965     case TARGET_NR_setregid:
11966         return get_errno(sys_setregid(low2highgid(arg1), low2highgid(arg2)));
11967     case TARGET_NR_getgroups:
11968         { /* the same code as for TARGET_NR_getgroups32 */
11969             int gidsetsize = arg1;
11970             target_id *target_grouplist;
11971             g_autofree gid_t *grouplist = NULL;
11972             int i;
11973 
11974             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11975                 return -TARGET_EINVAL;
11976             }
11977             if (gidsetsize > 0) {
11978                 grouplist = g_try_new(gid_t, gidsetsize);
11979                 if (!grouplist) {
11980                     return -TARGET_ENOMEM;
11981                 }
11982             }
11983             ret = get_errno(getgroups(gidsetsize, grouplist));
11984             if (!is_error(ret) && gidsetsize > 0) {
11985                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
11986                                              gidsetsize * sizeof(target_id), 0);
11987                 if (!target_grouplist) {
11988                     return -TARGET_EFAULT;
11989                 }
11990                 for (i = 0; i < ret; i++) {
11991                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11992                 }
11993                 unlock_user(target_grouplist, arg2,
11994                             gidsetsize * sizeof(target_id));
11995             }
11996             return ret;
11997         }
11998     case TARGET_NR_setgroups:
11999         { /* the same code as for TARGET_NR_setgroups32 */
12000             int gidsetsize = arg1;
12001             target_id *target_grouplist;
12002             g_autofree gid_t *grouplist = NULL;
12003             int i;
12004 
12005             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12006                 return -TARGET_EINVAL;
12007             }
12008             if (gidsetsize > 0) {
12009                 grouplist = g_try_new(gid_t, gidsetsize);
12010                 if (!grouplist) {
12011                     return -TARGET_ENOMEM;
12012                 }
12013                 target_grouplist = lock_user(VERIFY_READ, arg2,
12014                                              gidsetsize * sizeof(target_id), 1);
12015                 if (!target_grouplist) {
12016                     return -TARGET_EFAULT;
12017                 }
12018                 for (i = 0; i < gidsetsize; i++) {
12019                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
12020                 }
12021                 unlock_user(target_grouplist, arg2,
12022                             gidsetsize * sizeof(target_id));
12023             }
12024             return get_errno(sys_setgroups(gidsetsize, grouplist));
12025         }
12026     case TARGET_NR_fchown:
12027         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
12028 #if defined(TARGET_NR_fchownat)
12029     case TARGET_NR_fchownat:
12030         if (!(p = lock_user_string(arg2)))
12031             return -TARGET_EFAULT;
12032         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
12033                                  low2highgid(arg4), arg5));
12034         unlock_user(p, arg2, 0);
12035         return ret;
12036 #endif
12037 #ifdef TARGET_NR_setresuid
12038     case TARGET_NR_setresuid:
12039         return get_errno(sys_setresuid(low2highuid(arg1),
12040                                        low2highuid(arg2),
12041                                        low2highuid(arg3)));
12042 #endif
12043 #ifdef TARGET_NR_getresuid
12044     case TARGET_NR_getresuid:
12045         {
12046             uid_t ruid, euid, suid;
12047             ret = get_errno(getresuid(&ruid, &euid, &suid));
12048             if (!is_error(ret)) {
12049                 if (put_user_id(high2lowuid(ruid), arg1)
12050                     || put_user_id(high2lowuid(euid), arg2)
12051                     || put_user_id(high2lowuid(suid), arg3))
12052                     return -TARGET_EFAULT;
12053             }
12054         }
12055         return ret;
12056 #endif
12057 #ifdef TARGET_NR_getresgid
12058     case TARGET_NR_setresgid:
12059         return get_errno(sys_setresgid(low2highgid(arg1),
12060                                        low2highgid(arg2),
12061                                        low2highgid(arg3)));
12062 #endif
12063 #ifdef TARGET_NR_getresgid
12064     case TARGET_NR_getresgid:
12065         {
12066             gid_t rgid, egid, sgid;
12067             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12068             if (!is_error(ret)) {
12069                 if (put_user_id(high2lowgid(rgid), arg1)
12070                     || put_user_id(high2lowgid(egid), arg2)
12071                     || put_user_id(high2lowgid(sgid), arg3))
12072                     return -TARGET_EFAULT;
12073             }
12074         }
12075         return ret;
12076 #endif
12077 #ifdef TARGET_NR_chown
12078     case TARGET_NR_chown:
12079         if (!(p = lock_user_string(arg1)))
12080             return -TARGET_EFAULT;
12081         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
12082         unlock_user(p, arg1, 0);
12083         return ret;
12084 #endif
12085     case TARGET_NR_setuid:
12086         return get_errno(sys_setuid(low2highuid(arg1)));
12087     case TARGET_NR_setgid:
12088         return get_errno(sys_setgid(low2highgid(arg1)));
12089     case TARGET_NR_setfsuid:
12090         return get_errno(setfsuid(arg1));
12091     case TARGET_NR_setfsgid:
12092         return get_errno(setfsgid(arg1));
12093 
12094 #ifdef TARGET_NR_lchown32
12095     case TARGET_NR_lchown32:
12096         if (!(p = lock_user_string(arg1)))
12097             return -TARGET_EFAULT;
12098         ret = get_errno(lchown(p, arg2, arg3));
12099         unlock_user(p, arg1, 0);
12100         return ret;
12101 #endif
12102 #ifdef TARGET_NR_getuid32
12103     case TARGET_NR_getuid32:
12104         return get_errno(getuid());
12105 #endif
12106 
12107 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
12108    /* Alpha specific */
12109     case TARGET_NR_getxuid:
12110          {
12111             uid_t euid;
12112             euid=geteuid();
12113             cpu_env->ir[IR_A4]=euid;
12114          }
12115         return get_errno(getuid());
12116 #endif
12117 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
12118    /* Alpha specific */
12119     case TARGET_NR_getxgid:
12120          {
12121             uid_t egid;
12122             egid=getegid();
12123             cpu_env->ir[IR_A4]=egid;
12124          }
12125         return get_errno(getgid());
12126 #endif
12127 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
12128     /* Alpha specific */
12129     case TARGET_NR_osf_getsysinfo:
12130         ret = -TARGET_EOPNOTSUPP;
12131         switch (arg1) {
12132           case TARGET_GSI_IEEE_FP_CONTROL:
12133             {
12134                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
12135                 uint64_t swcr = cpu_env->swcr;
12136 
12137                 swcr &= ~SWCR_STATUS_MASK;
12138                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
12139 
12140                 if (put_user_u64 (swcr, arg2))
12141                         return -TARGET_EFAULT;
12142                 ret = 0;
12143             }
12144             break;
12145 
12146           /* case GSI_IEEE_STATE_AT_SIGNAL:
12147              -- Not implemented in linux kernel.
12148              case GSI_UACPROC:
12149              -- Retrieves current unaligned access state; not much used.
12150              case GSI_PROC_TYPE:
12151              -- Retrieves implver information; surely not used.
12152              case GSI_GET_HWRPB:
12153              -- Grabs a copy of the HWRPB; surely not used.
12154           */
12155         }
12156         return ret;
12157 #endif
12158 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
12159     /* Alpha specific */
12160     case TARGET_NR_osf_setsysinfo:
12161         ret = -TARGET_EOPNOTSUPP;
12162         switch (arg1) {
12163           case TARGET_SSI_IEEE_FP_CONTROL:
12164             {
12165                 uint64_t swcr, fpcr;
12166 
12167                 if (get_user_u64 (swcr, arg2)) {
12168                     return -TARGET_EFAULT;
12169                 }
12170 
12171                 /*
12172                  * The kernel calls swcr_update_status to update the
12173                  * status bits from the fpcr at every point that it
12174                  * could be queried.  Therefore, we store the status
12175                  * bits only in FPCR.
12176                  */
12177                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
12178 
12179                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12180                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
12181                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
12182                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12183                 ret = 0;
12184             }
12185             break;
12186 
12187           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
12188             {
12189                 uint64_t exc, fpcr, fex;
12190 
12191                 if (get_user_u64(exc, arg2)) {
12192                     return -TARGET_EFAULT;
12193                 }
12194                 exc &= SWCR_STATUS_MASK;
12195                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12196 
12197                 /* Old exceptions are not signaled.  */
12198                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
12199                 fex = exc & ~fex;
12200                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
12201                 fex &= (cpu_env)->swcr;
12202 
12203                 /* Update the hardware fpcr.  */
12204                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
12205                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12206 
12207                 if (fex) {
12208                     int si_code = TARGET_FPE_FLTUNK;
12209                     target_siginfo_t info;
12210 
12211                     if (fex & SWCR_TRAP_ENABLE_DNO) {
12212                         si_code = TARGET_FPE_FLTUND;
12213                     }
12214                     if (fex & SWCR_TRAP_ENABLE_INE) {
12215                         si_code = TARGET_FPE_FLTRES;
12216                     }
12217                     if (fex & SWCR_TRAP_ENABLE_UNF) {
12218                         si_code = TARGET_FPE_FLTUND;
12219                     }
12220                     if (fex & SWCR_TRAP_ENABLE_OVF) {
12221                         si_code = TARGET_FPE_FLTOVF;
12222                     }
12223                     if (fex & SWCR_TRAP_ENABLE_DZE) {
12224                         si_code = TARGET_FPE_FLTDIV;
12225                     }
12226                     if (fex & SWCR_TRAP_ENABLE_INV) {
12227                         si_code = TARGET_FPE_FLTINV;
12228                     }
12229 
12230                     info.si_signo = SIGFPE;
12231                     info.si_errno = 0;
12232                     info.si_code = si_code;
12233                     info._sifields._sigfault._addr = (cpu_env)->pc;
12234                     queue_signal(cpu_env, info.si_signo,
12235                                  QEMU_SI_FAULT, &info);
12236                 }
12237                 ret = 0;
12238             }
12239             break;
12240 
12241           /* case SSI_NVPAIRS:
12242              -- Used with SSIN_UACPROC to enable unaligned accesses.
12243              case SSI_IEEE_STATE_AT_SIGNAL:
12244              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
12245              -- Not implemented in linux kernel
12246           */
12247         }
12248         return ret;
12249 #endif
12250 #ifdef TARGET_NR_osf_sigprocmask
12251     /* Alpha specific.  */
12252     case TARGET_NR_osf_sigprocmask:
12253         {
12254             abi_ulong mask;
12255             int how;
12256             sigset_t set, oldset;
12257 
12258             switch(arg1) {
12259             case TARGET_SIG_BLOCK:
12260                 how = SIG_BLOCK;
12261                 break;
12262             case TARGET_SIG_UNBLOCK:
12263                 how = SIG_UNBLOCK;
12264                 break;
12265             case TARGET_SIG_SETMASK:
12266                 how = SIG_SETMASK;
12267                 break;
12268             default:
12269                 return -TARGET_EINVAL;
12270             }
12271             mask = arg2;
12272             target_to_host_old_sigset(&set, &mask);
12273             ret = do_sigprocmask(how, &set, &oldset);
12274             if (!ret) {
12275                 host_to_target_old_sigset(&mask, &oldset);
12276                 ret = mask;
12277             }
12278         }
12279         return ret;
12280 #endif
12281 
12282 #ifdef TARGET_NR_getgid32
12283     case TARGET_NR_getgid32:
12284         return get_errno(getgid());
12285 #endif
12286 #ifdef TARGET_NR_geteuid32
12287     case TARGET_NR_geteuid32:
12288         return get_errno(geteuid());
12289 #endif
12290 #ifdef TARGET_NR_getegid32
12291     case TARGET_NR_getegid32:
12292         return get_errno(getegid());
12293 #endif
12294 #ifdef TARGET_NR_setreuid32
12295     case TARGET_NR_setreuid32:
12296         return get_errno(sys_setreuid(arg1, arg2));
12297 #endif
12298 #ifdef TARGET_NR_setregid32
12299     case TARGET_NR_setregid32:
12300         return get_errno(sys_setregid(arg1, arg2));
12301 #endif
12302 #ifdef TARGET_NR_getgroups32
12303     case TARGET_NR_getgroups32:
12304         { /* the same code as for TARGET_NR_getgroups */
12305             int gidsetsize = arg1;
12306             uint32_t *target_grouplist;
12307             g_autofree gid_t *grouplist = NULL;
12308             int i;
12309 
12310             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12311                 return -TARGET_EINVAL;
12312             }
12313             if (gidsetsize > 0) {
12314                 grouplist = g_try_new(gid_t, gidsetsize);
12315                 if (!grouplist) {
12316                     return -TARGET_ENOMEM;
12317                 }
12318             }
12319             ret = get_errno(getgroups(gidsetsize, grouplist));
12320             if (!is_error(ret) && gidsetsize > 0) {
12321                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
12322                                              gidsetsize * 4, 0);
12323                 if (!target_grouplist) {
12324                     return -TARGET_EFAULT;
12325                 }
12326                 for (i = 0; i < ret; i++) {
12327                     target_grouplist[i] = tswap32(grouplist[i]);
12328                 }
12329                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
12330             }
12331             return ret;
12332         }
12333 #endif
12334 #ifdef TARGET_NR_setgroups32
12335     case TARGET_NR_setgroups32:
12336         { /* the same code as for TARGET_NR_setgroups */
12337             int gidsetsize = arg1;
12338             uint32_t *target_grouplist;
12339             g_autofree gid_t *grouplist = NULL;
12340             int i;
12341 
12342             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12343                 return -TARGET_EINVAL;
12344             }
12345             if (gidsetsize > 0) {
12346                 grouplist = g_try_new(gid_t, gidsetsize);
12347                 if (!grouplist) {
12348                     return -TARGET_ENOMEM;
12349                 }
12350                 target_grouplist = lock_user(VERIFY_READ, arg2,
12351                                              gidsetsize * 4, 1);
12352                 if (!target_grouplist) {
12353                     return -TARGET_EFAULT;
12354                 }
12355                 for (i = 0; i < gidsetsize; i++) {
12356                     grouplist[i] = tswap32(target_grouplist[i]);
12357                 }
12358                 unlock_user(target_grouplist, arg2, 0);
12359             }
12360             return get_errno(sys_setgroups(gidsetsize, grouplist));
12361         }
12362 #endif
12363 #ifdef TARGET_NR_fchown32
12364     case TARGET_NR_fchown32:
12365         return get_errno(fchown(arg1, arg2, arg3));
12366 #endif
12367 #ifdef TARGET_NR_setresuid32
12368     case TARGET_NR_setresuid32:
12369         return get_errno(sys_setresuid(arg1, arg2, arg3));
12370 #endif
12371 #ifdef TARGET_NR_getresuid32
12372     case TARGET_NR_getresuid32:
12373         {
12374             uid_t ruid, euid, suid;
12375             ret = get_errno(getresuid(&ruid, &euid, &suid));
12376             if (!is_error(ret)) {
12377                 if (put_user_u32(ruid, arg1)
12378                     || put_user_u32(euid, arg2)
12379                     || put_user_u32(suid, arg3))
12380                     return -TARGET_EFAULT;
12381             }
12382         }
12383         return ret;
12384 #endif
12385 #ifdef TARGET_NR_setresgid32
12386     case TARGET_NR_setresgid32:
12387         return get_errno(sys_setresgid(arg1, arg2, arg3));
12388 #endif
12389 #ifdef TARGET_NR_getresgid32
12390     case TARGET_NR_getresgid32:
12391         {
12392             gid_t rgid, egid, sgid;
12393             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12394             if (!is_error(ret)) {
12395                 if (put_user_u32(rgid, arg1)
12396                     || put_user_u32(egid, arg2)
12397                     || put_user_u32(sgid, arg3))
12398                     return -TARGET_EFAULT;
12399             }
12400         }
12401         return ret;
12402 #endif
12403 #ifdef TARGET_NR_chown32
12404     case TARGET_NR_chown32:
12405         if (!(p = lock_user_string(arg1)))
12406             return -TARGET_EFAULT;
12407         ret = get_errno(chown(p, arg2, arg3));
12408         unlock_user(p, arg1, 0);
12409         return ret;
12410 #endif
12411 #ifdef TARGET_NR_setuid32
12412     case TARGET_NR_setuid32:
12413         return get_errno(sys_setuid(arg1));
12414 #endif
12415 #ifdef TARGET_NR_setgid32
12416     case TARGET_NR_setgid32:
12417         return get_errno(sys_setgid(arg1));
12418 #endif
12419 #ifdef TARGET_NR_setfsuid32
12420     case TARGET_NR_setfsuid32:
12421         return get_errno(setfsuid(arg1));
12422 #endif
12423 #ifdef TARGET_NR_setfsgid32
12424     case TARGET_NR_setfsgid32:
12425         return get_errno(setfsgid(arg1));
12426 #endif
12427 #ifdef TARGET_NR_mincore
12428     case TARGET_NR_mincore:
12429         {
12430             void *a = lock_user(VERIFY_NONE, arg1, arg2, 0);
12431             if (!a) {
12432                 return -TARGET_ENOMEM;
12433             }
12434             p = lock_user_string(arg3);
12435             if (!p) {
12436                 ret = -TARGET_EFAULT;
12437             } else {
12438                 ret = get_errno(mincore(a, arg2, p));
12439                 unlock_user(p, arg3, ret);
12440             }
12441             unlock_user(a, arg1, 0);
12442         }
12443         return ret;
12444 #endif
12445 #ifdef TARGET_NR_arm_fadvise64_64
12446     case TARGET_NR_arm_fadvise64_64:
12447         /* arm_fadvise64_64 looks like fadvise64_64 but
12448          * with different argument order: fd, advice, offset, len
12449          * rather than the usual fd, offset, len, advice.
12450          * Note that offset and len are both 64-bit so appear as
12451          * pairs of 32-bit registers.
12452          */
12453         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
12454                             target_offset64(arg5, arg6), arg2);
12455         return -host_to_target_errno(ret);
12456 #endif
12457 
12458 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12459 
12460 #ifdef TARGET_NR_fadvise64_64
12461     case TARGET_NR_fadvise64_64:
12462 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
12463         /* 6 args: fd, advice, offset (high, low), len (high, low) */
12464         ret = arg2;
12465         arg2 = arg3;
12466         arg3 = arg4;
12467         arg4 = arg5;
12468         arg5 = arg6;
12469         arg6 = ret;
12470 #else
12471         /* 6 args: fd, offset (high, low), len (high, low), advice */
12472         if (regpairs_aligned(cpu_env, num)) {
12473             /* offset is in (3,4), len in (5,6) and advice in 7 */
12474             arg2 = arg3;
12475             arg3 = arg4;
12476             arg4 = arg5;
12477             arg5 = arg6;
12478             arg6 = arg7;
12479         }
12480 #endif
12481         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
12482                             target_offset64(arg4, arg5), arg6);
12483         return -host_to_target_errno(ret);
12484 #endif
12485 
12486 #ifdef TARGET_NR_fadvise64
12487     case TARGET_NR_fadvise64:
12488         /* 5 args: fd, offset (high, low), len, advice */
12489         if (regpairs_aligned(cpu_env, num)) {
12490             /* offset is in (3,4), len in 5 and advice in 6 */
12491             arg2 = arg3;
12492             arg3 = arg4;
12493             arg4 = arg5;
12494             arg5 = arg6;
12495         }
12496         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
12497         return -host_to_target_errno(ret);
12498 #endif
12499 
12500 #else /* not a 32-bit ABI */
12501 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
12502 #ifdef TARGET_NR_fadvise64_64
12503     case TARGET_NR_fadvise64_64:
12504 #endif
12505 #ifdef TARGET_NR_fadvise64
12506     case TARGET_NR_fadvise64:
12507 #endif
12508 #ifdef TARGET_S390X
12509         switch (arg4) {
12510         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
12511         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
12512         case 6: arg4 = POSIX_FADV_DONTNEED; break;
12513         case 7: arg4 = POSIX_FADV_NOREUSE; break;
12514         default: break;
12515         }
12516 #endif
12517         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
12518 #endif
12519 #endif /* end of 64-bit ABI fadvise handling */
12520 
12521 #ifdef TARGET_NR_madvise
12522     case TARGET_NR_madvise:
12523         return target_madvise(arg1, arg2, arg3);
12524 #endif
12525 #ifdef TARGET_NR_fcntl64
12526     case TARGET_NR_fcntl64:
12527     {
12528         int cmd;
12529         struct flock fl;
12530         from_flock64_fn *copyfrom = copy_from_user_flock64;
12531         to_flock64_fn *copyto = copy_to_user_flock64;
12532 
12533 #ifdef TARGET_ARM
12534         if (!cpu_env->eabi) {
12535             copyfrom = copy_from_user_oabi_flock64;
12536             copyto = copy_to_user_oabi_flock64;
12537         }
12538 #endif
12539 
12540         cmd = target_to_host_fcntl_cmd(arg2);
12541         if (cmd == -TARGET_EINVAL) {
12542             return cmd;
12543         }
12544 
12545         switch(arg2) {
12546         case TARGET_F_GETLK64:
12547             ret = copyfrom(&fl, arg3);
12548             if (ret) {
12549                 break;
12550             }
12551             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12552             if (ret == 0) {
12553                 ret = copyto(arg3, &fl);
12554             }
12555 	    break;
12556 
12557         case TARGET_F_SETLK64:
12558         case TARGET_F_SETLKW64:
12559             ret = copyfrom(&fl, arg3);
12560             if (ret) {
12561                 break;
12562             }
12563             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12564 	    break;
12565         default:
12566             ret = do_fcntl(arg1, arg2, arg3);
12567             break;
12568         }
12569         return ret;
12570     }
12571 #endif
12572 #ifdef TARGET_NR_cacheflush
12573     case TARGET_NR_cacheflush:
12574         /* self-modifying code is handled automatically, so nothing needed */
12575         return 0;
12576 #endif
12577 #ifdef TARGET_NR_getpagesize
12578     case TARGET_NR_getpagesize:
12579         return TARGET_PAGE_SIZE;
12580 #endif
12581     case TARGET_NR_gettid:
12582         return get_errno(sys_gettid());
12583 #ifdef TARGET_NR_readahead
12584     case TARGET_NR_readahead:
12585 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12586         if (regpairs_aligned(cpu_env, num)) {
12587             arg2 = arg3;
12588             arg3 = arg4;
12589             arg4 = arg5;
12590         }
12591         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12592 #else
12593         ret = get_errno(readahead(arg1, arg2, arg3));
12594 #endif
12595         return ret;
12596 #endif
12597 #ifdef CONFIG_ATTR
12598 #ifdef TARGET_NR_setxattr
12599     case TARGET_NR_listxattr:
12600     case TARGET_NR_llistxattr:
12601     {
12602         void *b = 0;
12603         if (arg2) {
12604             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12605             if (!b) {
12606                 return -TARGET_EFAULT;
12607             }
12608         }
12609         p = lock_user_string(arg1);
12610         if (p) {
12611             if (num == TARGET_NR_listxattr) {
12612                 ret = get_errno(listxattr(p, b, arg3));
12613             } else {
12614                 ret = get_errno(llistxattr(p, b, arg3));
12615             }
12616         } else {
12617             ret = -TARGET_EFAULT;
12618         }
12619         unlock_user(p, arg1, 0);
12620         unlock_user(b, arg2, arg3);
12621         return ret;
12622     }
12623     case TARGET_NR_flistxattr:
12624     {
12625         void *b = 0;
12626         if (arg2) {
12627             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12628             if (!b) {
12629                 return -TARGET_EFAULT;
12630             }
12631         }
12632         ret = get_errno(flistxattr(arg1, b, arg3));
12633         unlock_user(b, arg2, arg3);
12634         return ret;
12635     }
12636     case TARGET_NR_setxattr:
12637     case TARGET_NR_lsetxattr:
12638         {
12639             void *n, *v = 0;
12640             if (arg3) {
12641                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12642                 if (!v) {
12643                     return -TARGET_EFAULT;
12644                 }
12645             }
12646             p = lock_user_string(arg1);
12647             n = lock_user_string(arg2);
12648             if (p && n) {
12649                 if (num == TARGET_NR_setxattr) {
12650                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12651                 } else {
12652                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12653                 }
12654             } else {
12655                 ret = -TARGET_EFAULT;
12656             }
12657             unlock_user(p, arg1, 0);
12658             unlock_user(n, arg2, 0);
12659             unlock_user(v, arg3, 0);
12660         }
12661         return ret;
12662     case TARGET_NR_fsetxattr:
12663         {
12664             void *n, *v = 0;
12665             if (arg3) {
12666                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12667                 if (!v) {
12668                     return -TARGET_EFAULT;
12669                 }
12670             }
12671             n = lock_user_string(arg2);
12672             if (n) {
12673                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12674             } else {
12675                 ret = -TARGET_EFAULT;
12676             }
12677             unlock_user(n, arg2, 0);
12678             unlock_user(v, arg3, 0);
12679         }
12680         return ret;
12681     case TARGET_NR_getxattr:
12682     case TARGET_NR_lgetxattr:
12683         {
12684             void *n, *v = 0;
12685             if (arg3) {
12686                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12687                 if (!v) {
12688                     return -TARGET_EFAULT;
12689                 }
12690             }
12691             p = lock_user_string(arg1);
12692             n = lock_user_string(arg2);
12693             if (p && n) {
12694                 if (num == TARGET_NR_getxattr) {
12695                     ret = get_errno(getxattr(p, n, v, arg4));
12696                 } else {
12697                     ret = get_errno(lgetxattr(p, n, v, arg4));
12698                 }
12699             } else {
12700                 ret = -TARGET_EFAULT;
12701             }
12702             unlock_user(p, arg1, 0);
12703             unlock_user(n, arg2, 0);
12704             unlock_user(v, arg3, arg4);
12705         }
12706         return ret;
12707     case TARGET_NR_fgetxattr:
12708         {
12709             void *n, *v = 0;
12710             if (arg3) {
12711                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12712                 if (!v) {
12713                     return -TARGET_EFAULT;
12714                 }
12715             }
12716             n = lock_user_string(arg2);
12717             if (n) {
12718                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12719             } else {
12720                 ret = -TARGET_EFAULT;
12721             }
12722             unlock_user(n, arg2, 0);
12723             unlock_user(v, arg3, arg4);
12724         }
12725         return ret;
12726     case TARGET_NR_removexattr:
12727     case TARGET_NR_lremovexattr:
12728         {
12729             void *n;
12730             p = lock_user_string(arg1);
12731             n = lock_user_string(arg2);
12732             if (p && n) {
12733                 if (num == TARGET_NR_removexattr) {
12734                     ret = get_errno(removexattr(p, n));
12735                 } else {
12736                     ret = get_errno(lremovexattr(p, n));
12737                 }
12738             } else {
12739                 ret = -TARGET_EFAULT;
12740             }
12741             unlock_user(p, arg1, 0);
12742             unlock_user(n, arg2, 0);
12743         }
12744         return ret;
12745     case TARGET_NR_fremovexattr:
12746         {
12747             void *n;
12748             n = lock_user_string(arg2);
12749             if (n) {
12750                 ret = get_errno(fremovexattr(arg1, n));
12751             } else {
12752                 ret = -TARGET_EFAULT;
12753             }
12754             unlock_user(n, arg2, 0);
12755         }
12756         return ret;
12757 #endif
12758 #endif /* CONFIG_ATTR */
12759 #ifdef TARGET_NR_set_thread_area
12760     case TARGET_NR_set_thread_area:
12761 #if defined(TARGET_MIPS)
12762       cpu_env->active_tc.CP0_UserLocal = arg1;
12763       return 0;
12764 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12765       return do_set_thread_area(cpu_env, arg1);
12766 #elif defined(TARGET_M68K)
12767       {
12768           TaskState *ts = get_task_state(cpu);
12769           ts->tp_value = arg1;
12770           return 0;
12771       }
12772 #else
12773       return -TARGET_ENOSYS;
12774 #endif
12775 #endif
12776 #ifdef TARGET_NR_get_thread_area
12777     case TARGET_NR_get_thread_area:
12778 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12779         return do_get_thread_area(cpu_env, arg1);
12780 #elif defined(TARGET_M68K)
12781         {
12782             TaskState *ts = get_task_state(cpu);
12783             return ts->tp_value;
12784         }
12785 #else
12786         return -TARGET_ENOSYS;
12787 #endif
12788 #endif
12789 #ifdef TARGET_NR_getdomainname
12790     case TARGET_NR_getdomainname:
12791         return -TARGET_ENOSYS;
12792 #endif
12793 
12794 #ifdef TARGET_NR_clock_settime
12795     case TARGET_NR_clock_settime:
12796     {
12797         struct timespec ts;
12798 
12799         ret = target_to_host_timespec(&ts, arg2);
12800         if (!is_error(ret)) {
12801             ret = get_errno(clock_settime(arg1, &ts));
12802         }
12803         return ret;
12804     }
12805 #endif
12806 #ifdef TARGET_NR_clock_settime64
12807     case TARGET_NR_clock_settime64:
12808     {
12809         struct timespec ts;
12810 
12811         ret = target_to_host_timespec64(&ts, arg2);
12812         if (!is_error(ret)) {
12813             ret = get_errno(clock_settime(arg1, &ts));
12814         }
12815         return ret;
12816     }
12817 #endif
12818 #ifdef TARGET_NR_clock_gettime
12819     case TARGET_NR_clock_gettime:
12820     {
12821         struct timespec ts;
12822         ret = get_errno(clock_gettime(arg1, &ts));
12823         if (!is_error(ret)) {
12824             ret = host_to_target_timespec(arg2, &ts);
12825         }
12826         return ret;
12827     }
12828 #endif
12829 #ifdef TARGET_NR_clock_gettime64
12830     case TARGET_NR_clock_gettime64:
12831     {
12832         struct timespec ts;
12833         ret = get_errno(clock_gettime(arg1, &ts));
12834         if (!is_error(ret)) {
12835             ret = host_to_target_timespec64(arg2, &ts);
12836         }
12837         return ret;
12838     }
12839 #endif
12840 #ifdef TARGET_NR_clock_getres
12841     case TARGET_NR_clock_getres:
12842     {
12843         struct timespec ts;
12844         ret = get_errno(clock_getres(arg1, &ts));
12845         if (!is_error(ret)) {
12846             host_to_target_timespec(arg2, &ts);
12847         }
12848         return ret;
12849     }
12850 #endif
12851 #ifdef TARGET_NR_clock_getres_time64
12852     case TARGET_NR_clock_getres_time64:
12853     {
12854         struct timespec ts;
12855         ret = get_errno(clock_getres(arg1, &ts));
12856         if (!is_error(ret)) {
12857             host_to_target_timespec64(arg2, &ts);
12858         }
12859         return ret;
12860     }
12861 #endif
12862 #ifdef TARGET_NR_clock_nanosleep
12863     case TARGET_NR_clock_nanosleep:
12864     {
12865         struct timespec ts;
12866         if (target_to_host_timespec(&ts, arg3)) {
12867             return -TARGET_EFAULT;
12868         }
12869         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12870                                              &ts, arg4 ? &ts : NULL));
12871         /*
12872          * if the call is interrupted by a signal handler, it fails
12873          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12874          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12875          */
12876         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12877             host_to_target_timespec(arg4, &ts)) {
12878               return -TARGET_EFAULT;
12879         }
12880 
12881         return ret;
12882     }
12883 #endif
12884 #ifdef TARGET_NR_clock_nanosleep_time64
12885     case TARGET_NR_clock_nanosleep_time64:
12886     {
12887         struct timespec ts;
12888 
12889         if (target_to_host_timespec64(&ts, arg3)) {
12890             return -TARGET_EFAULT;
12891         }
12892 
12893         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12894                                              &ts, arg4 ? &ts : NULL));
12895 
12896         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12897             host_to_target_timespec64(arg4, &ts)) {
12898             return -TARGET_EFAULT;
12899         }
12900         return ret;
12901     }
12902 #endif
12903 
12904 #if defined(TARGET_NR_set_tid_address)
12905     case TARGET_NR_set_tid_address:
12906     {
12907         TaskState *ts = get_task_state(cpu);
12908         ts->child_tidptr = arg1;
12909         /* do not call host set_tid_address() syscall, instead return tid() */
12910         return get_errno(sys_gettid());
12911     }
12912 #endif
12913 
12914     case TARGET_NR_tkill:
12915         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12916 
12917     case TARGET_NR_tgkill:
12918         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12919                          target_to_host_signal(arg3)));
12920 
12921 #ifdef TARGET_NR_set_robust_list
12922     case TARGET_NR_set_robust_list:
12923     case TARGET_NR_get_robust_list:
12924         /* The ABI for supporting robust futexes has userspace pass
12925          * the kernel a pointer to a linked list which is updated by
12926          * userspace after the syscall; the list is walked by the kernel
12927          * when the thread exits. Since the linked list in QEMU guest
12928          * memory isn't a valid linked list for the host and we have
12929          * no way to reliably intercept the thread-death event, we can't
12930          * support these. Silently return ENOSYS so that guest userspace
12931          * falls back to a non-robust futex implementation (which should
12932          * be OK except in the corner case of the guest crashing while
12933          * holding a mutex that is shared with another process via
12934          * shared memory).
12935          */
12936         return -TARGET_ENOSYS;
12937 #endif
12938 
12939 #if defined(TARGET_NR_utimensat)
12940     case TARGET_NR_utimensat:
12941         {
12942             struct timespec *tsp, ts[2];
12943             if (!arg3) {
12944                 tsp = NULL;
12945             } else {
12946                 if (target_to_host_timespec(ts, arg3)) {
12947                     return -TARGET_EFAULT;
12948                 }
12949                 if (target_to_host_timespec(ts + 1, arg3 +
12950                                             sizeof(struct target_timespec))) {
12951                     return -TARGET_EFAULT;
12952                 }
12953                 tsp = ts;
12954             }
12955             if (!arg2)
12956                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12957             else {
12958                 if (!(p = lock_user_string(arg2))) {
12959                     return -TARGET_EFAULT;
12960                 }
12961                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12962                 unlock_user(p, arg2, 0);
12963             }
12964         }
12965         return ret;
12966 #endif
12967 #ifdef TARGET_NR_utimensat_time64
12968     case TARGET_NR_utimensat_time64:
12969         {
12970             struct timespec *tsp, ts[2];
12971             if (!arg3) {
12972                 tsp = NULL;
12973             } else {
12974                 if (target_to_host_timespec64(ts, arg3)) {
12975                     return -TARGET_EFAULT;
12976                 }
12977                 if (target_to_host_timespec64(ts + 1, arg3 +
12978                                      sizeof(struct target__kernel_timespec))) {
12979                     return -TARGET_EFAULT;
12980                 }
12981                 tsp = ts;
12982             }
12983             if (!arg2)
12984                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12985             else {
12986                 p = lock_user_string(arg2);
12987                 if (!p) {
12988                     return -TARGET_EFAULT;
12989                 }
12990                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12991                 unlock_user(p, arg2, 0);
12992             }
12993         }
12994         return ret;
12995 #endif
12996 #ifdef TARGET_NR_futex
12997     case TARGET_NR_futex:
12998         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12999 #endif
13000 #ifdef TARGET_NR_futex_time64
13001     case TARGET_NR_futex_time64:
13002         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
13003 #endif
13004 #ifdef CONFIG_INOTIFY
13005 #if defined(TARGET_NR_inotify_init)
13006     case TARGET_NR_inotify_init:
13007         ret = get_errno(inotify_init());
13008         if (ret >= 0) {
13009             fd_trans_register(ret, &target_inotify_trans);
13010         }
13011         return ret;
13012 #endif
13013 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
13014     case TARGET_NR_inotify_init1:
13015         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
13016                                           fcntl_flags_tbl)));
13017         if (ret >= 0) {
13018             fd_trans_register(ret, &target_inotify_trans);
13019         }
13020         return ret;
13021 #endif
13022 #if defined(TARGET_NR_inotify_add_watch)
13023     case TARGET_NR_inotify_add_watch:
13024         p = lock_user_string(arg2);
13025         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
13026         unlock_user(p, arg2, 0);
13027         return ret;
13028 #endif
13029 #if defined(TARGET_NR_inotify_rm_watch)
13030     case TARGET_NR_inotify_rm_watch:
13031         return get_errno(inotify_rm_watch(arg1, arg2));
13032 #endif
13033 #endif
13034 
13035 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
13036     case TARGET_NR_mq_open:
13037         {
13038             struct mq_attr posix_mq_attr;
13039             struct mq_attr *pposix_mq_attr;
13040             int host_flags;
13041 
13042             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
13043             pposix_mq_attr = NULL;
13044             if (arg4) {
13045                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
13046                     return -TARGET_EFAULT;
13047                 }
13048                 pposix_mq_attr = &posix_mq_attr;
13049             }
13050             p = lock_user_string(arg1 - 1);
13051             if (!p) {
13052                 return -TARGET_EFAULT;
13053             }
13054             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
13055             unlock_user (p, arg1, 0);
13056         }
13057         return ret;
13058 
13059     case TARGET_NR_mq_unlink:
13060         p = lock_user_string(arg1 - 1);
13061         if (!p) {
13062             return -TARGET_EFAULT;
13063         }
13064         ret = get_errno(mq_unlink(p));
13065         unlock_user (p, arg1, 0);
13066         return ret;
13067 
13068 #ifdef TARGET_NR_mq_timedsend
13069     case TARGET_NR_mq_timedsend:
13070         {
13071             struct timespec ts;
13072 
13073             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13074             if (arg5 != 0) {
13075                 if (target_to_host_timespec(&ts, arg5)) {
13076                     return -TARGET_EFAULT;
13077                 }
13078                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13079                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13080                     return -TARGET_EFAULT;
13081                 }
13082             } else {
13083                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13084             }
13085             unlock_user (p, arg2, arg3);
13086         }
13087         return ret;
13088 #endif
13089 #ifdef TARGET_NR_mq_timedsend_time64
13090     case TARGET_NR_mq_timedsend_time64:
13091         {
13092             struct timespec ts;
13093 
13094             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13095             if (arg5 != 0) {
13096                 if (target_to_host_timespec64(&ts, arg5)) {
13097                     return -TARGET_EFAULT;
13098                 }
13099                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13100                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13101                     return -TARGET_EFAULT;
13102                 }
13103             } else {
13104                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13105             }
13106             unlock_user(p, arg2, arg3);
13107         }
13108         return ret;
13109 #endif
13110 
13111 #ifdef TARGET_NR_mq_timedreceive
13112     case TARGET_NR_mq_timedreceive:
13113         {
13114             struct timespec ts;
13115             unsigned int prio;
13116 
13117             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13118             if (arg5 != 0) {
13119                 if (target_to_host_timespec(&ts, arg5)) {
13120                     return -TARGET_EFAULT;
13121                 }
13122                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13123                                                      &prio, &ts));
13124                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13125                     return -TARGET_EFAULT;
13126                 }
13127             } else {
13128                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13129                                                      &prio, NULL));
13130             }
13131             unlock_user (p, arg2, arg3);
13132             if (arg4 != 0)
13133                 put_user_u32(prio, arg4);
13134         }
13135         return ret;
13136 #endif
13137 #ifdef TARGET_NR_mq_timedreceive_time64
13138     case TARGET_NR_mq_timedreceive_time64:
13139         {
13140             struct timespec ts;
13141             unsigned int prio;
13142 
13143             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13144             if (arg5 != 0) {
13145                 if (target_to_host_timespec64(&ts, arg5)) {
13146                     return -TARGET_EFAULT;
13147                 }
13148                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13149                                                      &prio, &ts));
13150                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13151                     return -TARGET_EFAULT;
13152                 }
13153             } else {
13154                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13155                                                      &prio, NULL));
13156             }
13157             unlock_user(p, arg2, arg3);
13158             if (arg4 != 0) {
13159                 put_user_u32(prio, arg4);
13160             }
13161         }
13162         return ret;
13163 #endif
13164 
13165     /* Not implemented for now... */
13166 /*     case TARGET_NR_mq_notify: */
13167 /*         break; */
13168 
13169     case TARGET_NR_mq_getsetattr:
13170         {
13171             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
13172             ret = 0;
13173             if (arg2 != 0) {
13174                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
13175                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
13176                                            &posix_mq_attr_out));
13177             } else if (arg3 != 0) {
13178                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
13179             }
13180             if (ret == 0 && arg3 != 0) {
13181                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
13182             }
13183         }
13184         return ret;
13185 #endif
13186 
13187 #ifdef CONFIG_SPLICE
13188 #ifdef TARGET_NR_tee
13189     case TARGET_NR_tee:
13190         {
13191             ret = get_errno(tee(arg1,arg2,arg3,arg4));
13192         }
13193         return ret;
13194 #endif
13195 #ifdef TARGET_NR_splice
13196     case TARGET_NR_splice:
13197         {
13198             loff_t loff_in, loff_out;
13199             loff_t *ploff_in = NULL, *ploff_out = NULL;
13200             if (arg2) {
13201                 if (get_user_u64(loff_in, arg2)) {
13202                     return -TARGET_EFAULT;
13203                 }
13204                 ploff_in = &loff_in;
13205             }
13206             if (arg4) {
13207                 if (get_user_u64(loff_out, arg4)) {
13208                     return -TARGET_EFAULT;
13209                 }
13210                 ploff_out = &loff_out;
13211             }
13212             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
13213             if (arg2) {
13214                 if (put_user_u64(loff_in, arg2)) {
13215                     return -TARGET_EFAULT;
13216                 }
13217             }
13218             if (arg4) {
13219                 if (put_user_u64(loff_out, arg4)) {
13220                     return -TARGET_EFAULT;
13221                 }
13222             }
13223         }
13224         return ret;
13225 #endif
13226 #ifdef TARGET_NR_vmsplice
13227 	case TARGET_NR_vmsplice:
13228         {
13229             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
13230             if (vec != NULL) {
13231                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
13232                 unlock_iovec(vec, arg2, arg3, 0);
13233             } else {
13234                 ret = -host_to_target_errno(errno);
13235             }
13236         }
13237         return ret;
13238 #endif
13239 #endif /* CONFIG_SPLICE */
13240 #ifdef CONFIG_EVENTFD
13241 #if defined(TARGET_NR_eventfd)
13242     case TARGET_NR_eventfd:
13243         ret = get_errno(eventfd(arg1, 0));
13244         if (ret >= 0) {
13245             fd_trans_register(ret, &target_eventfd_trans);
13246         }
13247         return ret;
13248 #endif
13249 #if defined(TARGET_NR_eventfd2)
13250     case TARGET_NR_eventfd2:
13251     {
13252         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
13253         if (arg2 & TARGET_O_NONBLOCK) {
13254             host_flags |= O_NONBLOCK;
13255         }
13256         if (arg2 & TARGET_O_CLOEXEC) {
13257             host_flags |= O_CLOEXEC;
13258         }
13259         ret = get_errno(eventfd(arg1, host_flags));
13260         if (ret >= 0) {
13261             fd_trans_register(ret, &target_eventfd_trans);
13262         }
13263         return ret;
13264     }
13265 #endif
13266 #endif /* CONFIG_EVENTFD  */
13267 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
13268     case TARGET_NR_fallocate:
13269 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13270         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
13271                                   target_offset64(arg5, arg6)));
13272 #else
13273         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
13274 #endif
13275         return ret;
13276 #endif
13277 #if defined(CONFIG_SYNC_FILE_RANGE)
13278 #if defined(TARGET_NR_sync_file_range)
13279     case TARGET_NR_sync_file_range:
13280 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13281 #if defined(TARGET_MIPS)
13282         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13283                                         target_offset64(arg5, arg6), arg7));
13284 #else
13285         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
13286                                         target_offset64(arg4, arg5), arg6));
13287 #endif /* !TARGET_MIPS */
13288 #else
13289         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
13290 #endif
13291         return ret;
13292 #endif
13293 #if defined(TARGET_NR_sync_file_range2) || \
13294     defined(TARGET_NR_arm_sync_file_range)
13295 #if defined(TARGET_NR_sync_file_range2)
13296     case TARGET_NR_sync_file_range2:
13297 #endif
13298 #if defined(TARGET_NR_arm_sync_file_range)
13299     case TARGET_NR_arm_sync_file_range:
13300 #endif
13301         /* This is like sync_file_range but the arguments are reordered */
13302 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13303         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13304                                         target_offset64(arg5, arg6), arg2));
13305 #else
13306         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
13307 #endif
13308         return ret;
13309 #endif
13310 #endif
13311 #if defined(TARGET_NR_signalfd4)
13312     case TARGET_NR_signalfd4:
13313         return do_signalfd4(arg1, arg2, arg4);
13314 #endif
13315 #if defined(TARGET_NR_signalfd)
13316     case TARGET_NR_signalfd:
13317         return do_signalfd4(arg1, arg2, 0);
13318 #endif
13319 #if defined(CONFIG_EPOLL)
13320 #if defined(TARGET_NR_epoll_create)
13321     case TARGET_NR_epoll_create:
13322         return get_errno(epoll_create(arg1));
13323 #endif
13324 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
13325     case TARGET_NR_epoll_create1:
13326         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
13327 #endif
13328 #if defined(TARGET_NR_epoll_ctl)
13329     case TARGET_NR_epoll_ctl:
13330     {
13331         struct epoll_event ep;
13332         struct epoll_event *epp = 0;
13333         if (arg4) {
13334             if (arg2 != EPOLL_CTL_DEL) {
13335                 struct target_epoll_event *target_ep;
13336                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
13337                     return -TARGET_EFAULT;
13338                 }
13339                 ep.events = tswap32(target_ep->events);
13340                 /*
13341                  * The epoll_data_t union is just opaque data to the kernel,
13342                  * so we transfer all 64 bits across and need not worry what
13343                  * actual data type it is.
13344                  */
13345                 ep.data.u64 = tswap64(target_ep->data.u64);
13346                 unlock_user_struct(target_ep, arg4, 0);
13347             }
13348             /*
13349              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
13350              * non-null pointer, even though this argument is ignored.
13351              *
13352              */
13353             epp = &ep;
13354         }
13355         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
13356     }
13357 #endif
13358 
13359 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
13360 #if defined(TARGET_NR_epoll_wait)
13361     case TARGET_NR_epoll_wait:
13362 #endif
13363 #if defined(TARGET_NR_epoll_pwait)
13364     case TARGET_NR_epoll_pwait:
13365 #endif
13366     {
13367         struct target_epoll_event *target_ep;
13368         struct epoll_event *ep;
13369         int epfd = arg1;
13370         int maxevents = arg3;
13371         int timeout = arg4;
13372 
13373         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
13374             return -TARGET_EINVAL;
13375         }
13376 
13377         target_ep = lock_user(VERIFY_WRITE, arg2,
13378                               maxevents * sizeof(struct target_epoll_event), 1);
13379         if (!target_ep) {
13380             return -TARGET_EFAULT;
13381         }
13382 
13383         ep = g_try_new(struct epoll_event, maxevents);
13384         if (!ep) {
13385             unlock_user(target_ep, arg2, 0);
13386             return -TARGET_ENOMEM;
13387         }
13388 
13389         switch (num) {
13390 #if defined(TARGET_NR_epoll_pwait)
13391         case TARGET_NR_epoll_pwait:
13392         {
13393             sigset_t *set = NULL;
13394 
13395             if (arg5) {
13396                 ret = process_sigsuspend_mask(&set, arg5, arg6);
13397                 if (ret != 0) {
13398                     break;
13399                 }
13400             }
13401 
13402             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13403                                              set, SIGSET_T_SIZE));
13404 
13405             if (set) {
13406                 finish_sigsuspend_mask(ret);
13407             }
13408             break;
13409         }
13410 #endif
13411 #if defined(TARGET_NR_epoll_wait)
13412         case TARGET_NR_epoll_wait:
13413             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13414                                              NULL, 0));
13415             break;
13416 #endif
13417         default:
13418             ret = -TARGET_ENOSYS;
13419         }
13420         if (!is_error(ret)) {
13421             int i;
13422             for (i = 0; i < ret; i++) {
13423                 target_ep[i].events = tswap32(ep[i].events);
13424                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
13425             }
13426             unlock_user(target_ep, arg2,
13427                         ret * sizeof(struct target_epoll_event));
13428         } else {
13429             unlock_user(target_ep, arg2, 0);
13430         }
13431         g_free(ep);
13432         return ret;
13433     }
13434 #endif
13435 #endif
13436 #ifdef TARGET_NR_prlimit64
13437     case TARGET_NR_prlimit64:
13438     {
13439         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
13440         struct target_rlimit64 *target_rnew, *target_rold;
13441         struct host_rlimit64 rnew, rold, *rnewp = 0;
13442         int resource = target_to_host_resource(arg2);
13443 
13444         if (arg3 && (resource != RLIMIT_AS &&
13445                      resource != RLIMIT_DATA &&
13446                      resource != RLIMIT_STACK)) {
13447             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
13448                 return -TARGET_EFAULT;
13449             }
13450             __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
13451             __get_user(rnew.rlim_max, &target_rnew->rlim_max);
13452             unlock_user_struct(target_rnew, arg3, 0);
13453             rnewp = &rnew;
13454         }
13455 
13456         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
13457         if (!is_error(ret) && arg4) {
13458             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
13459                 return -TARGET_EFAULT;
13460             }
13461             __put_user(rold.rlim_cur, &target_rold->rlim_cur);
13462             __put_user(rold.rlim_max, &target_rold->rlim_max);
13463             unlock_user_struct(target_rold, arg4, 1);
13464         }
13465         return ret;
13466     }
13467 #endif
13468 #ifdef TARGET_NR_gethostname
13469     case TARGET_NR_gethostname:
13470     {
13471         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
13472         if (name) {
13473             ret = get_errno(gethostname(name, arg2));
13474             unlock_user(name, arg1, arg2);
13475         } else {
13476             ret = -TARGET_EFAULT;
13477         }
13478         return ret;
13479     }
13480 #endif
13481 #ifdef TARGET_NR_atomic_cmpxchg_32
13482     case TARGET_NR_atomic_cmpxchg_32:
13483     {
13484         /* should use start_exclusive from main.c */
13485         abi_ulong mem_value;
13486         if (get_user_u32(mem_value, arg6)) {
13487             target_siginfo_t info;
13488             info.si_signo = SIGSEGV;
13489             info.si_errno = 0;
13490             info.si_code = TARGET_SEGV_MAPERR;
13491             info._sifields._sigfault._addr = arg6;
13492             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
13493             ret = 0xdeadbeef;
13494 
13495         }
13496         if (mem_value == arg2)
13497             put_user_u32(arg1, arg6);
13498         return mem_value;
13499     }
13500 #endif
13501 #ifdef TARGET_NR_atomic_barrier
13502     case TARGET_NR_atomic_barrier:
13503         /* Like the kernel implementation and the
13504            qemu arm barrier, no-op this? */
13505         return 0;
13506 #endif
13507 
13508 #ifdef TARGET_NR_timer_create
13509     case TARGET_NR_timer_create:
13510     {
13511         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
13512 
13513         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
13514 
13515         int clkid = arg1;
13516         int timer_index = next_free_host_timer();
13517 
13518         if (timer_index < 0) {
13519             ret = -TARGET_EAGAIN;
13520         } else {
13521             timer_t *phtimer = g_posix_timers  + timer_index;
13522 
13523             if (arg2) {
13524                 phost_sevp = &host_sevp;
13525                 ret = target_to_host_sigevent(phost_sevp, arg2);
13526                 if (ret != 0) {
13527                     free_host_timer_slot(timer_index);
13528                     return ret;
13529                 }
13530             }
13531 
13532             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
13533             if (ret) {
13534                 free_host_timer_slot(timer_index);
13535             } else {
13536                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
13537                     timer_delete(*phtimer);
13538                     free_host_timer_slot(timer_index);
13539                     return -TARGET_EFAULT;
13540                 }
13541             }
13542         }
13543         return ret;
13544     }
13545 #endif
13546 
13547 #ifdef TARGET_NR_timer_settime
13548     case TARGET_NR_timer_settime:
13549     {
13550         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13551          * struct itimerspec * old_value */
13552         target_timer_t timerid = get_timer_id(arg1);
13553 
13554         if (timerid < 0) {
13555             ret = timerid;
13556         } else if (arg3 == 0) {
13557             ret = -TARGET_EINVAL;
13558         } else {
13559             timer_t htimer = g_posix_timers[timerid];
13560             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13561 
13562             if (target_to_host_itimerspec(&hspec_new, arg3)) {
13563                 return -TARGET_EFAULT;
13564             }
13565             ret = get_errno(
13566                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13567             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13568                 return -TARGET_EFAULT;
13569             }
13570         }
13571         return ret;
13572     }
13573 #endif
13574 
13575 #ifdef TARGET_NR_timer_settime64
13576     case TARGET_NR_timer_settime64:
13577     {
13578         target_timer_t timerid = get_timer_id(arg1);
13579 
13580         if (timerid < 0) {
13581             ret = timerid;
13582         } else if (arg3 == 0) {
13583             ret = -TARGET_EINVAL;
13584         } else {
13585             timer_t htimer = g_posix_timers[timerid];
13586             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13587 
13588             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13589                 return -TARGET_EFAULT;
13590             }
13591             ret = get_errno(
13592                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13593             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13594                 return -TARGET_EFAULT;
13595             }
13596         }
13597         return ret;
13598     }
13599 #endif
13600 
13601 #ifdef TARGET_NR_timer_gettime
13602     case TARGET_NR_timer_gettime:
13603     {
13604         /* args: timer_t timerid, struct itimerspec *curr_value */
13605         target_timer_t timerid = get_timer_id(arg1);
13606 
13607         if (timerid < 0) {
13608             ret = timerid;
13609         } else if (!arg2) {
13610             ret = -TARGET_EFAULT;
13611         } else {
13612             timer_t htimer = g_posix_timers[timerid];
13613             struct itimerspec hspec;
13614             ret = get_errno(timer_gettime(htimer, &hspec));
13615 
13616             if (host_to_target_itimerspec(arg2, &hspec)) {
13617                 ret = -TARGET_EFAULT;
13618             }
13619         }
13620         return ret;
13621     }
13622 #endif
13623 
13624 #ifdef TARGET_NR_timer_gettime64
13625     case TARGET_NR_timer_gettime64:
13626     {
13627         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13628         target_timer_t timerid = get_timer_id(arg1);
13629 
13630         if (timerid < 0) {
13631             ret = timerid;
13632         } else if (!arg2) {
13633             ret = -TARGET_EFAULT;
13634         } else {
13635             timer_t htimer = g_posix_timers[timerid];
13636             struct itimerspec hspec;
13637             ret = get_errno(timer_gettime(htimer, &hspec));
13638 
13639             if (host_to_target_itimerspec64(arg2, &hspec)) {
13640                 ret = -TARGET_EFAULT;
13641             }
13642         }
13643         return ret;
13644     }
13645 #endif
13646 
13647 #ifdef TARGET_NR_timer_getoverrun
13648     case TARGET_NR_timer_getoverrun:
13649     {
13650         /* args: timer_t timerid */
13651         target_timer_t timerid = get_timer_id(arg1);
13652 
13653         if (timerid < 0) {
13654             ret = timerid;
13655         } else {
13656             timer_t htimer = g_posix_timers[timerid];
13657             ret = get_errno(timer_getoverrun(htimer));
13658         }
13659         return ret;
13660     }
13661 #endif
13662 
13663 #ifdef TARGET_NR_timer_delete
13664     case TARGET_NR_timer_delete:
13665     {
13666         /* args: timer_t timerid */
13667         target_timer_t timerid = get_timer_id(arg1);
13668 
13669         if (timerid < 0) {
13670             ret = timerid;
13671         } else {
13672             timer_t htimer = g_posix_timers[timerid];
13673             ret = get_errno(timer_delete(htimer));
13674             free_host_timer_slot(timerid);
13675         }
13676         return ret;
13677     }
13678 #endif
13679 
13680 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13681     case TARGET_NR_timerfd_create:
13682         ret = get_errno(timerfd_create(arg1,
13683                         target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13684         if (ret >= 0) {
13685             fd_trans_register(ret, &target_timerfd_trans);
13686         }
13687         return ret;
13688 #endif
13689 
13690 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13691     case TARGET_NR_timerfd_gettime:
13692         {
13693             struct itimerspec its_curr;
13694 
13695             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13696 
13697             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13698                 return -TARGET_EFAULT;
13699             }
13700         }
13701         return ret;
13702 #endif
13703 
13704 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13705     case TARGET_NR_timerfd_gettime64:
13706         {
13707             struct itimerspec its_curr;
13708 
13709             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13710 
13711             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13712                 return -TARGET_EFAULT;
13713             }
13714         }
13715         return ret;
13716 #endif
13717 
13718 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13719     case TARGET_NR_timerfd_settime:
13720         {
13721             struct itimerspec its_new, its_old, *p_new;
13722 
13723             if (arg3) {
13724                 if (target_to_host_itimerspec(&its_new, arg3)) {
13725                     return -TARGET_EFAULT;
13726                 }
13727                 p_new = &its_new;
13728             } else {
13729                 p_new = NULL;
13730             }
13731 
13732             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13733 
13734             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13735                 return -TARGET_EFAULT;
13736             }
13737         }
13738         return ret;
13739 #endif
13740 
13741 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13742     case TARGET_NR_timerfd_settime64:
13743         {
13744             struct itimerspec its_new, its_old, *p_new;
13745 
13746             if (arg3) {
13747                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13748                     return -TARGET_EFAULT;
13749                 }
13750                 p_new = &its_new;
13751             } else {
13752                 p_new = NULL;
13753             }
13754 
13755             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13756 
13757             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13758                 return -TARGET_EFAULT;
13759             }
13760         }
13761         return ret;
13762 #endif
13763 
13764 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13765     case TARGET_NR_ioprio_get:
13766         return get_errno(ioprio_get(arg1, arg2));
13767 #endif
13768 
13769 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13770     case TARGET_NR_ioprio_set:
13771         return get_errno(ioprio_set(arg1, arg2, arg3));
13772 #endif
13773 
13774 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13775     case TARGET_NR_setns:
13776         return get_errno(setns(arg1, arg2));
13777 #endif
13778 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13779     case TARGET_NR_unshare:
13780         return get_errno(unshare(arg1));
13781 #endif
13782 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13783     case TARGET_NR_kcmp:
13784         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13785 #endif
13786 #ifdef TARGET_NR_swapcontext
13787     case TARGET_NR_swapcontext:
13788         /* PowerPC specific.  */
13789         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13790 #endif
13791 #ifdef TARGET_NR_memfd_create
13792     case TARGET_NR_memfd_create:
13793         p = lock_user_string(arg1);
13794         if (!p) {
13795             return -TARGET_EFAULT;
13796         }
13797         ret = get_errno(memfd_create(p, arg2));
13798         fd_trans_unregister(ret);
13799         unlock_user(p, arg1, 0);
13800         return ret;
13801 #endif
13802 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13803     case TARGET_NR_membarrier:
13804         return get_errno(membarrier(arg1, arg2));
13805 #endif
13806 
13807 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13808     case TARGET_NR_copy_file_range:
13809         {
13810             loff_t inoff, outoff;
13811             loff_t *pinoff = NULL, *poutoff = NULL;
13812 
13813             if (arg2) {
13814                 if (get_user_u64(inoff, arg2)) {
13815                     return -TARGET_EFAULT;
13816                 }
13817                 pinoff = &inoff;
13818             }
13819             if (arg4) {
13820                 if (get_user_u64(outoff, arg4)) {
13821                     return -TARGET_EFAULT;
13822                 }
13823                 poutoff = &outoff;
13824             }
13825             /* Do not sign-extend the count parameter. */
13826             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13827                                                  (abi_ulong)arg5, arg6));
13828             if (!is_error(ret) && ret > 0) {
13829                 if (arg2) {
13830                     if (put_user_u64(inoff, arg2)) {
13831                         return -TARGET_EFAULT;
13832                     }
13833                 }
13834                 if (arg4) {
13835                     if (put_user_u64(outoff, arg4)) {
13836                         return -TARGET_EFAULT;
13837                     }
13838                 }
13839             }
13840         }
13841         return ret;
13842 #endif
13843 
13844 #if defined(TARGET_NR_pivot_root)
13845     case TARGET_NR_pivot_root:
13846         {
13847             void *p2;
13848             p = lock_user_string(arg1); /* new_root */
13849             p2 = lock_user_string(arg2); /* put_old */
13850             if (!p || !p2) {
13851                 ret = -TARGET_EFAULT;
13852             } else {
13853                 ret = get_errno(pivot_root(p, p2));
13854             }
13855             unlock_user(p2, arg2, 0);
13856             unlock_user(p, arg1, 0);
13857         }
13858         return ret;
13859 #endif
13860 
13861 #if defined(TARGET_NR_riscv_hwprobe)
13862     case TARGET_NR_riscv_hwprobe:
13863         return do_riscv_hwprobe(cpu_env, arg1, arg2, arg3, arg4, arg5);
13864 #endif
13865 
13866     default:
13867         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13868         return -TARGET_ENOSYS;
13869     }
13870     return ret;
13871 }
13872 
13873 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13874                     abi_long arg2, abi_long arg3, abi_long arg4,
13875                     abi_long arg5, abi_long arg6, abi_long arg7,
13876                     abi_long arg8)
13877 {
13878     CPUState *cpu = env_cpu(cpu_env);
13879     abi_long ret;
13880 
13881 #ifdef DEBUG_ERESTARTSYS
13882     /* Debug-only code for exercising the syscall-restart code paths
13883      * in the per-architecture cpu main loops: restart every syscall
13884      * the guest makes once before letting it through.
13885      */
13886     {
13887         static bool flag;
13888         flag = !flag;
13889         if (flag) {
13890             return -QEMU_ERESTARTSYS;
13891         }
13892     }
13893 #endif
13894 
13895     record_syscall_start(cpu, num, arg1,
13896                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13897 
13898     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13899         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13900     }
13901 
13902     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13903                       arg5, arg6, arg7, arg8);
13904 
13905     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13906         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13907                           arg3, arg4, arg5, arg6);
13908     }
13909 
13910     record_syscall_return(cpu, num, ret);
13911     return ret;
13912 }
13913