xref: /qemu/linux-user/syscall.c (revision 7cef6d686309e2792186504ae17cf4f3eb57ef68)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "qemu/plugin.h"
26 #include "tcg/startup.h"
27 #include "target_mman.h"
28 #include "exec/page-protection.h"
29 #include "exec/mmap-lock.h"
30 #include "exec/tb-flush.h"
31 #include "exec/translation-block.h"
32 #include <elf.h>
33 #include <endian.h>
34 #include <grp.h>
35 #include <sys/ipc.h>
36 #include <sys/msg.h>
37 #include <sys/wait.h>
38 #include <sys/mount.h>
39 #include <sys/file.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
44 #include <sys/swap.h>
45 #include <linux/capability.h>
46 #include <sched.h>
47 #include <sys/timex.h>
48 #include <sys/socket.h>
49 #include <linux/sockios.h>
50 #include <sys/un.h>
51 #include <sys/uio.h>
52 #include <poll.h>
53 #include <sys/times.h>
54 #include <sys/shm.h>
55 #include <sys/sem.h>
56 #include <sys/statfs.h>
57 #include <utime.h>
58 #include <sys/sysinfo.h>
59 #include <sys/signalfd.h>
60 #include <netinet/in.h>
61 #include <netinet/ip.h>
62 #include <netinet/tcp.h>
63 #include <netinet/udp.h>
64 #include <linux/wireless.h>
65 #include <linux/icmp.h>
66 #include <linux/icmpv6.h>
67 #include <linux/if_tun.h>
68 #include <linux/in6.h>
69 #include <linux/errqueue.h>
70 #include <linux/random.h>
71 #ifdef CONFIG_TIMERFD
72 #include <sys/timerfd.h>
73 #endif
74 #ifdef CONFIG_EVENTFD
75 #include <sys/eventfd.h>
76 #endif
77 #ifdef CONFIG_EPOLL
78 #include <sys/epoll.h>
79 #endif
80 #ifdef CONFIG_ATTR
81 #include "qemu/xattr.h"
82 #endif
83 #ifdef CONFIG_SENDFILE
84 #include <sys/sendfile.h>
85 #endif
86 #ifdef HAVE_SYS_KCOV_H
87 #include <sys/kcov.h>
88 #endif
89 
90 #define termios host_termios
91 #define winsize host_winsize
92 #define termio host_termio
93 #define sgttyb host_sgttyb /* same as target */
94 #define tchars host_tchars /* same as target */
95 #define ltchars host_ltchars /* same as target */
96 
97 #include <linux/termios.h>
98 #include <linux/unistd.h>
99 #include <linux/cdrom.h>
100 #include <linux/hdreg.h>
101 #include <linux/soundcard.h>
102 #include <linux/kd.h>
103 #include <linux/mtio.h>
104 #include <linux/fs.h>
105 #include <linux/fd.h>
106 #if defined(CONFIG_FIEMAP)
107 #include <linux/fiemap.h>
108 #endif
109 #include <linux/fb.h>
110 #if defined(CONFIG_USBFS)
111 #include <linux/usbdevice_fs.h>
112 #include <linux/usb/ch9.h>
113 #endif
114 #include <linux/vt.h>
115 #include <linux/dm-ioctl.h>
116 #include <linux/reboot.h>
117 #include <linux/route.h>
118 #include <linux/filter.h>
119 #include <linux/blkpg.h>
120 #include <netpacket/packet.h>
121 #include <linux/netlink.h>
122 #include <linux/if_alg.h>
123 #include <linux/rtc.h>
124 #include <sound/asound.h>
125 #ifdef HAVE_BTRFS_H
126 #include <linux/btrfs.h>
127 #endif
128 #ifdef HAVE_DRM_H
129 #include <libdrm/drm.h>
130 #include <libdrm/i915_drm.h>
131 #endif
132 #include "linux_loop.h"
133 #include "uname.h"
134 
135 #include "qemu.h"
136 #include "user-internals.h"
137 #include "strace.h"
138 #include "signal-common.h"
139 #include "loader.h"
140 #include "user-mmap.h"
141 #include "user/page-protection.h"
142 #include "user/safe-syscall.h"
143 #include "user/signal.h"
144 #include "qemu/guest-random.h"
145 #include "qemu/selfmap.h"
146 #include "user/syscall-trace.h"
147 #include "special-errno.h"
148 #include "qapi/error.h"
149 #include "fd-trans.h"
150 #include "user/cpu_loop.h"
151 
152 #ifndef CLONE_IO
153 #define CLONE_IO                0x80000000      /* Clone io context */
154 #endif
155 
156 /* We can't directly call the host clone syscall, because this will
157  * badly confuse libc (breaking mutexes, for example). So we must
158  * divide clone flags into:
159  *  * flag combinations that look like pthread_create()
160  *  * flag combinations that look like fork()
161  *  * flags we can implement within QEMU itself
162  *  * flags we can't support and will return an error for
163  */
164 /* For thread creation, all these flags must be present; for
165  * fork, none must be present.
166  */
167 #define CLONE_THREAD_FLAGS                              \
168     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
169      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
170 
171 /* These flags are ignored:
172  * CLONE_DETACHED is now ignored by the kernel;
173  * CLONE_IO is just an optimisation hint to the I/O scheduler
174  */
175 #define CLONE_IGNORED_FLAGS                     \
176     (CLONE_DETACHED | CLONE_IO)
177 
178 #ifndef CLONE_PIDFD
179 # define CLONE_PIDFD 0x00001000
180 #endif
181 
182 /* Flags for fork which we can implement within QEMU itself */
183 #define CLONE_OPTIONAL_FORK_FLAGS               \
184     (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
185      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
186 
187 /* Flags for thread creation which we can implement within QEMU itself */
188 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
189     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
190      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
191 
192 #define CLONE_INVALID_FORK_FLAGS                                        \
193     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
194 
195 #define CLONE_INVALID_THREAD_FLAGS                                      \
196     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
197        CLONE_IGNORED_FLAGS))
198 
199 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
200  * have almost all been allocated. We cannot support any of
201  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
202  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
203  * The checks against the invalid thread masks above will catch these.
204  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
205  */
206 
207 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
208  * once. This exercises the codepaths for restart.
209  */
210 //#define DEBUG_ERESTARTSYS
211 
212 //#include <linux/msdos_fs.h>
213 #define VFAT_IOCTL_READDIR_BOTH \
214     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
215 #define VFAT_IOCTL_READDIR_SHORT \
216     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
217 
218 #undef _syscall0
219 #undef _syscall1
220 #undef _syscall2
221 #undef _syscall3
222 #undef _syscall4
223 #undef _syscall5
224 #undef _syscall6
225 
226 #define _syscall0(type,name)		\
227 static type name (void)			\
228 {					\
229 	return syscall(__NR_##name);	\
230 }
231 
232 #define _syscall1(type,name,type1,arg1)		\
233 static type name (type1 arg1)			\
234 {						\
235 	return syscall(__NR_##name, arg1);	\
236 }
237 
238 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
239 static type name (type1 arg1,type2 arg2)		\
240 {							\
241 	return syscall(__NR_##name, arg1, arg2);	\
242 }
243 
244 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
245 static type name (type1 arg1,type2 arg2,type3 arg3)		\
246 {								\
247 	return syscall(__NR_##name, arg1, arg2, arg3);		\
248 }
249 
250 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
251 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
252 {										\
253 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
254 }
255 
256 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
257 		  type5,arg5)							\
258 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
259 {										\
260 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
261 }
262 
263 
264 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
265 		  type5,arg5,type6,arg6)					\
266 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
267                   type6 arg6)							\
268 {										\
269 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
270 }
271 
272 
273 #define __NR_sys_uname __NR_uname
274 #define __NR_sys_getcwd1 __NR_getcwd
275 #define __NR_sys_getdents __NR_getdents
276 #define __NR_sys_getdents64 __NR_getdents64
277 #define __NR_sys_getpriority __NR_getpriority
278 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
279 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
280 #define __NR_sys_syslog __NR_syslog
281 #if defined(__NR_futex)
282 # define __NR_sys_futex __NR_futex
283 #endif
284 #if defined(__NR_futex_time64)
285 # define __NR_sys_futex_time64 __NR_futex_time64
286 #endif
287 #define __NR_sys_statx __NR_statx
288 
289 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
290 #define __NR__llseek __NR_lseek
291 #endif
292 
293 /* Newer kernel ports have llseek() instead of _llseek() */
294 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
295 #define TARGET_NR__llseek TARGET_NR_llseek
296 #endif
297 
298 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
299 #ifndef TARGET_O_NONBLOCK_MASK
300 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
301 #endif
302 
303 #define __NR_sys_gettid __NR_gettid
304 _syscall0(int, sys_gettid)
305 
306 /* For the 64-bit guest on 32-bit host case we must emulate
307  * getdents using getdents64, because otherwise the host
308  * might hand us back more dirent records than we can fit
309  * into the guest buffer after structure format conversion.
310  * Otherwise we emulate getdents with getdents if the host has it.
311  */
312 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
313 #define EMULATE_GETDENTS_WITH_GETDENTS
314 #endif
315 
316 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
317 _syscall3(int, sys_getdents, unsigned int, fd, struct linux_dirent *, dirp, unsigned int, count);
318 #endif
319 #if (defined(TARGET_NR_getdents) && \
320       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
321     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
322 _syscall3(int, sys_getdents64, unsigned int, fd, struct linux_dirent64 *, dirp, unsigned int, count);
323 #endif
324 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
325 _syscall5(int, _llseek,  unsigned int,  fd, unsigned long, hi, unsigned long, lo,
326           loff_t *, res, unsigned int, wh);
327 #endif
328 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
329 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
330           siginfo_t *, uinfo)
331 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
332 #ifdef __NR_exit_group
333 _syscall1(int,exit_group,int,error_code)
334 #endif
335 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
336 #define __NR_sys_close_range __NR_close_range
337 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
338 #ifndef CLOSE_RANGE_CLOEXEC
339 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
340 #endif
341 #endif
342 #if defined(__NR_futex)
343 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
344           const struct timespec *,timeout,int *,uaddr2,int,val3)
345 #endif
346 #if defined(__NR_futex_time64)
347 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
348           const struct timespec *,timeout,int *,uaddr2,int,val3)
349 #endif
350 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
351 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
352 #endif
353 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
354 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
355                              unsigned int, flags);
356 #endif
357 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
358 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
359 #endif
360 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
361 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
362           unsigned long *, user_mask_ptr);
363 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
364 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
365           unsigned long *, user_mask_ptr);
366 /* sched_attr is not defined in glibc < 2.41 */
367 #ifndef SCHED_ATTR_SIZE_VER0
368 struct sched_attr {
369     uint32_t size;
370     uint32_t sched_policy;
371     uint64_t sched_flags;
372     int32_t sched_nice;
373     uint32_t sched_priority;
374     uint64_t sched_runtime;
375     uint64_t sched_deadline;
376     uint64_t sched_period;
377     uint32_t sched_util_min;
378     uint32_t sched_util_max;
379 };
380 #endif
381 #define __NR_sys_sched_getattr __NR_sched_getattr
382 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
383           unsigned int, size, unsigned int, flags);
384 #define __NR_sys_sched_setattr __NR_sched_setattr
385 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
386           unsigned int, flags);
387 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
388 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
389 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
390 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
391           const struct sched_param *, param);
392 #define __NR_sys_sched_getparam __NR_sched_getparam
393 _syscall2(int, sys_sched_getparam, pid_t, pid,
394           struct sched_param *, param);
395 #define __NR_sys_sched_setparam __NR_sched_setparam
396 _syscall2(int, sys_sched_setparam, pid_t, pid,
397           const struct sched_param *, param);
398 #define __NR_sys_getcpu __NR_getcpu
399 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
400 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
401           void *, arg);
402 _syscall2(int, capget, struct __user_cap_header_struct *, header,
403           struct __user_cap_data_struct *, data);
404 _syscall2(int, capset, struct __user_cap_header_struct *, header,
405           struct __user_cap_data_struct *, data);
406 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
407 _syscall2(int, ioprio_get, int, which, int, who)
408 #endif
409 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
410 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
411 #endif
412 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
413 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
414 #endif
415 
416 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
417 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
418           unsigned long, idx1, unsigned long, idx2)
419 #endif
420 
421 /*
422  * It is assumed that struct statx is architecture independent.
423  */
424 #if defined(TARGET_NR_statx) && defined(__NR_statx)
425 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
426           unsigned int, mask, struct target_statx *, statxbuf)
427 #endif
428 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
429 _syscall2(int, membarrier, int, cmd, int, flags)
430 #endif
431 
432 static const bitmask_transtbl fcntl_flags_tbl[] = {
433   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
434   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
435   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
436   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
437   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
438   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
439   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
440   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
441   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
442   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
443   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
444   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
445   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
446 #if defined(O_DIRECT)
447   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
448 #endif
449 #if defined(O_NOATIME)
450   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
451 #endif
452 #if defined(O_CLOEXEC)
453   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
454 #endif
455 #if defined(O_PATH)
456   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
457 #endif
458 #if defined(O_TMPFILE)
459   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
460 #endif
461   /* Don't terminate the list prematurely on 64-bit host+guest.  */
462 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
463   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
464 #endif
465 };
466 
467 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
468 
469 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
470 #if defined(__NR_utimensat)
471 #define __NR_sys_utimensat __NR_utimensat
472 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
473           const struct timespec *,tsp,int,flags)
474 #else
475 static int sys_utimensat(int dirfd, const char *pathname,
476                          const struct timespec times[2], int flags)
477 {
478     errno = ENOSYS;
479     return -1;
480 }
481 #endif
482 #endif /* TARGET_NR_utimensat */
483 
484 #ifdef TARGET_NR_renameat2
485 #if defined(__NR_renameat2)
486 #define __NR_sys_renameat2 __NR_renameat2
487 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
488           const char *, new, unsigned int, flags)
489 #else
490 static int sys_renameat2(int oldfd, const char *old,
491                          int newfd, const char *new, int flags)
492 {
493     if (flags == 0) {
494         return renameat(oldfd, old, newfd, new);
495     }
496     errno = ENOSYS;
497     return -1;
498 }
499 #endif
500 #endif /* TARGET_NR_renameat2 */
501 
502 #ifdef CONFIG_INOTIFY
503 #include <sys/inotify.h>
504 #else
505 /* Userspace can usually survive runtime without inotify */
506 #undef TARGET_NR_inotify_init
507 #undef TARGET_NR_inotify_init1
508 #undef TARGET_NR_inotify_add_watch
509 #undef TARGET_NR_inotify_rm_watch
510 #endif /* CONFIG_INOTIFY  */
511 
512 #if defined(TARGET_NR_prlimit64)
513 #ifndef __NR_prlimit64
514 # define __NR_prlimit64 -1
515 #endif
516 #define __NR_sys_prlimit64 __NR_prlimit64
517 /* The glibc rlimit structure may not be that used by the underlying syscall */
518 struct host_rlimit64 {
519     uint64_t rlim_cur;
520     uint64_t rlim_max;
521 };
522 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
523           const struct host_rlimit64 *, new_limit,
524           struct host_rlimit64 *, old_limit)
525 #endif
526 
527 
528 #if defined(TARGET_NR_timer_create)
529 /* Maximum of 32 active POSIX timers allowed at any one time. */
530 #define GUEST_TIMER_MAX 32
531 static timer_t g_posix_timers[GUEST_TIMER_MAX];
532 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
533 
next_free_host_timer(void)534 static inline int next_free_host_timer(void)
535 {
536     int k;
537     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
538         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
539             return k;
540         }
541     }
542     return -1;
543 }
544 
free_host_timer_slot(int id)545 static inline void free_host_timer_slot(int id)
546 {
547     qatomic_store_release(g_posix_timer_allocated + id, 0);
548 }
549 #endif
550 
host_to_target_errno(int host_errno)551 static inline int host_to_target_errno(int host_errno)
552 {
553     switch (host_errno) {
554 #define E(X)  case X: return TARGET_##X;
555 #include "errnos.c.inc"
556 #undef E
557     default:
558         return host_errno;
559     }
560 }
561 
target_to_host_errno(int target_errno)562 static inline int target_to_host_errno(int target_errno)
563 {
564     switch (target_errno) {
565 #define E(X)  case TARGET_##X: return X;
566 #include "errnos.c.inc"
567 #undef E
568     default:
569         return target_errno;
570     }
571 }
572 
get_errno(abi_long ret)573 abi_long get_errno(abi_long ret)
574 {
575     if (ret == -1)
576         return -host_to_target_errno(errno);
577     else
578         return ret;
579 }
580 
target_strerror(int err)581 const char *target_strerror(int err)
582 {
583     if (err == QEMU_ERESTARTSYS) {
584         return "To be restarted";
585     }
586     if (err == QEMU_ESIGRETURN) {
587         return "Successful exit from sigreturn";
588     }
589 
590     return strerror(target_to_host_errno(err));
591 }
592 
check_zeroed_user(abi_long addr,size_t ksize,size_t usize)593 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
594 {
595     int i;
596     uint8_t b;
597     if (usize <= ksize) {
598         return 1;
599     }
600     for (i = ksize; i < usize; i++) {
601         if (get_user_u8(b, addr + i)) {
602             return -TARGET_EFAULT;
603         }
604         if (b != 0) {
605             return 0;
606         }
607     }
608     return 1;
609 }
610 
611 /*
612  * Copies a target struct to a host struct, in a way that guarantees
613  * backwards-compatibility for struct syscall arguments.
614  *
615  * Similar to kernels uaccess.h:copy_struct_from_user()
616  */
copy_struct_from_user(void * dst,size_t ksize,abi_ptr src,size_t usize)617 int copy_struct_from_user(void *dst, size_t ksize, abi_ptr src, size_t usize)
618 {
619     size_t size = MIN(ksize, usize);
620     size_t rest = MAX(ksize, usize) - size;
621 
622     /* Deal with trailing bytes. */
623     if (usize < ksize) {
624         memset(dst + size, 0, rest);
625     } else if (usize > ksize) {
626         int ret = check_zeroed_user(src, ksize, usize);
627         if (ret <= 0) {
628             return ret ?: -TARGET_E2BIG;
629         }
630     }
631     /* Copy the interoperable parts of the struct. */
632     if (copy_from_user(dst, src, size)) {
633         return -TARGET_EFAULT;
634     }
635     return 0;
636 }
637 
638 #define safe_syscall0(type, name) \
639 static type safe_##name(void) \
640 { \
641     return safe_syscall(__NR_##name); \
642 }
643 
644 #define safe_syscall1(type, name, type1, arg1) \
645 static type safe_##name(type1 arg1) \
646 { \
647     return safe_syscall(__NR_##name, arg1); \
648 }
649 
650 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
651 static type safe_##name(type1 arg1, type2 arg2) \
652 { \
653     return safe_syscall(__NR_##name, arg1, arg2); \
654 }
655 
656 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
657 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
658 { \
659     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
660 }
661 
662 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
663     type4, arg4) \
664 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
665 { \
666     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
667 }
668 
669 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
670     type4, arg4, type5, arg5) \
671 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
672     type5 arg5) \
673 { \
674     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
675 }
676 
677 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
678     type4, arg4, type5, arg5, type6, arg6) \
679 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
680     type5 arg5, type6 arg6) \
681 { \
682     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
683 }
684 
safe_syscall3(ssize_t,read,int,fd,void *,buff,size_t,count)685 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
686 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
687 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
688               int, flags, mode_t, mode)
689 
690 safe_syscall4(int, openat2, int, dirfd, const char *, pathname, \
691               const struct open_how_ver0 *, how, size_t, size)
692 
693 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
694 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
695               struct rusage *, rusage)
696 #endif
697 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
698               int, options, struct rusage *, rusage)
699 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
700 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
701               char **, argv, char **, envp, int, flags)
702 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
703     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
704 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
705               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
706 #endif
707 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
708 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
709               struct timespec *, tsp, const sigset_t *, sigmask,
710               size_t, sigsetsize)
711 #endif
712 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
713               int, maxevents, int, timeout, const sigset_t *, sigmask,
714               size_t, sigsetsize)
715 #if defined(__NR_futex)
716 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
717               const struct timespec *,timeout,int *,uaddr2,int,val3)
718 #endif
719 #if defined(__NR_futex_time64)
720 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
721               const struct timespec *,timeout,int *,uaddr2,int,val3)
722 #endif
723 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
724 safe_syscall2(int, kill, pid_t, pid, int, sig)
725 safe_syscall2(int, tkill, int, tid, int, sig)
726 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
727 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
728 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
729 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
730               unsigned long, pos_l, unsigned long, pos_h)
731 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
732               unsigned long, pos_l, unsigned long, pos_h)
733 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
734               socklen_t, addrlen)
735 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
736               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
737 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
738               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
739 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
740 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
741 safe_syscall2(int, flock, int, fd, int, operation)
742 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
743 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
744               const struct timespec *, uts, size_t, sigsetsize)
745 #endif
746 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
747               int, flags)
748 #if defined(TARGET_NR_nanosleep)
749 safe_syscall2(int, nanosleep, const struct timespec *, req,
750               struct timespec *, rem)
751 #endif
752 #if defined(TARGET_NR_clock_nanosleep) || \
753     defined(TARGET_NR_clock_nanosleep_time64)
754 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
755               const struct timespec *, req, struct timespec *, rem)
756 #endif
757 #ifdef __NR_ipc
758 #ifdef __s390x__
759 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
760               void *, ptr)
761 #else
762 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
763               void *, ptr, long, fifth)
764 #endif
765 #endif
766 #ifdef __NR_msgsnd
767 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
768               int, flags)
769 #endif
770 #ifdef __NR_msgrcv
771 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
772               long, msgtype, int, flags)
773 #endif
774 #ifdef __NR_semtimedop
775 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
776               unsigned, nsops, const struct timespec *, timeout)
777 #endif
778 #if defined(TARGET_NR_mq_timedsend) || \
779     defined(TARGET_NR_mq_timedsend_time64)
780 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
781               size_t, len, unsigned, prio, const struct timespec *, timeout)
782 #endif
783 #if defined(TARGET_NR_mq_timedreceive) || \
784     defined(TARGET_NR_mq_timedreceive_time64)
785 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
786               size_t, len, unsigned *, prio, const struct timespec *, timeout)
787 #endif
788 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
789 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
790               int, outfd, loff_t *, poutoff, size_t, length,
791               unsigned int, flags)
792 #endif
793 
794 /* We do ioctl like this rather than via safe_syscall3 to preserve the
795  * "third argument might be integer or pointer or not present" behaviour of
796  * the libc function.
797  */
798 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
799 /* Similarly for fcntl. Since we always build with LFS enabled,
800  * we should be using the 64-bit structures automatically.
801  */
802 #ifdef __NR_fcntl64
803 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
804 #else
805 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
806 #endif
807 
808 static inline int host_to_target_sock_type(int host_type)
809 {
810     int target_type;
811 
812     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
813     case SOCK_DGRAM:
814         target_type = TARGET_SOCK_DGRAM;
815         break;
816     case SOCK_STREAM:
817         target_type = TARGET_SOCK_STREAM;
818         break;
819     default:
820         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
821         break;
822     }
823 
824 #if defined(SOCK_CLOEXEC)
825     if (host_type & SOCK_CLOEXEC) {
826         target_type |= TARGET_SOCK_CLOEXEC;
827     }
828 #endif
829 
830 #if defined(SOCK_NONBLOCK)
831     if (host_type & SOCK_NONBLOCK) {
832         target_type |= TARGET_SOCK_NONBLOCK;
833     }
834 #endif
835 
836     return target_type;
837 }
838 
839 static abi_ulong target_brk, initial_target_brk;
840 
target_set_brk(abi_ulong new_brk)841 void target_set_brk(abi_ulong new_brk)
842 {
843     target_brk = TARGET_PAGE_ALIGN(new_brk);
844     initial_target_brk = target_brk;
845 }
846 
847 /* do_brk() must return target values and target errnos. */
do_brk(abi_ulong brk_val)848 abi_long do_brk(abi_ulong brk_val)
849 {
850     abi_long mapped_addr;
851     abi_ulong new_brk;
852     abi_ulong old_brk;
853 
854     /* brk pointers are always untagged */
855 
856     /* do not allow to shrink below initial brk value */
857     if (brk_val < initial_target_brk) {
858         return target_brk;
859     }
860 
861     new_brk = TARGET_PAGE_ALIGN(brk_val);
862     old_brk = TARGET_PAGE_ALIGN(target_brk);
863 
864     /* new and old target_brk might be on the same page */
865     if (new_brk == old_brk) {
866         target_brk = brk_val;
867         return target_brk;
868     }
869 
870     /* Release heap if necessary */
871     if (new_brk < old_brk) {
872         target_munmap(new_brk, old_brk - new_brk);
873 
874         target_brk = brk_val;
875         return target_brk;
876     }
877 
878     mapped_addr = target_mmap(old_brk, new_brk - old_brk,
879                               PROT_READ | PROT_WRITE,
880                               MAP_FIXED_NOREPLACE | MAP_ANON | MAP_PRIVATE,
881                               -1, 0);
882 
883     if (mapped_addr == old_brk) {
884         target_brk = brk_val;
885         return target_brk;
886     }
887 
888 #if defined(TARGET_ALPHA)
889     /* We (partially) emulate OSF/1 on Alpha, which requires we
890        return a proper errno, not an unchanged brk value.  */
891     return -TARGET_ENOMEM;
892 #endif
893     /* For everything else, return the previous break. */
894     return target_brk;
895 }
896 
897 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
898     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
copy_from_user_fdset(fd_set * fds,abi_ulong target_fds_addr,int n)899 static inline abi_long copy_from_user_fdset(fd_set *fds,
900                                             abi_ulong target_fds_addr,
901                                             int n)
902 {
903     int i, nw, j, k;
904     abi_ulong b, *target_fds;
905 
906     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
907     if (!(target_fds = lock_user(VERIFY_READ,
908                                  target_fds_addr,
909                                  sizeof(abi_ulong) * nw,
910                                  1)))
911         return -TARGET_EFAULT;
912 
913     FD_ZERO(fds);
914     k = 0;
915     for (i = 0; i < nw; i++) {
916         /* grab the abi_ulong */
917         __get_user(b, &target_fds[i]);
918         for (j = 0; j < TARGET_ABI_BITS; j++) {
919             /* check the bit inside the abi_ulong */
920             if ((b >> j) & 1)
921                 FD_SET(k, fds);
922             k++;
923         }
924     }
925 
926     unlock_user(target_fds, target_fds_addr, 0);
927 
928     return 0;
929 }
930 
copy_from_user_fdset_ptr(fd_set * fds,fd_set ** fds_ptr,abi_ulong target_fds_addr,int n)931 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
932                                                  abi_ulong target_fds_addr,
933                                                  int n)
934 {
935     if (target_fds_addr) {
936         if (copy_from_user_fdset(fds, target_fds_addr, n))
937             return -TARGET_EFAULT;
938         *fds_ptr = fds;
939     } else {
940         *fds_ptr = NULL;
941     }
942     return 0;
943 }
944 
copy_to_user_fdset(abi_ulong target_fds_addr,const fd_set * fds,int n)945 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
946                                           const fd_set *fds,
947                                           int n)
948 {
949     int i, nw, j, k;
950     abi_long v;
951     abi_ulong *target_fds;
952 
953     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
954     if (!(target_fds = lock_user(VERIFY_WRITE,
955                                  target_fds_addr,
956                                  sizeof(abi_ulong) * nw,
957                                  0)))
958         return -TARGET_EFAULT;
959 
960     k = 0;
961     for (i = 0; i < nw; i++) {
962         v = 0;
963         for (j = 0; j < TARGET_ABI_BITS; j++) {
964             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
965             k++;
966         }
967         __put_user(v, &target_fds[i]);
968     }
969 
970     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
971 
972     return 0;
973 }
974 #endif
975 
976 #if defined(__alpha__)
977 #define HOST_HZ 1024
978 #else
979 #define HOST_HZ 100
980 #endif
981 
host_to_target_clock_t(long ticks)982 static inline abi_long host_to_target_clock_t(long ticks)
983 {
984 #if HOST_HZ == TARGET_HZ
985     return ticks;
986 #else
987     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
988 #endif
989 }
990 
host_to_target_rusage(abi_ulong target_addr,const struct rusage * rusage)991 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
992                                              const struct rusage *rusage)
993 {
994     struct target_rusage *target_rusage;
995 
996     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
997         return -TARGET_EFAULT;
998     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
999     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1000     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1001     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1002     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1003     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1004     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1005     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1006     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1007     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1008     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1009     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1010     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1011     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1012     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1013     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1014     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1015     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1016     unlock_user_struct(target_rusage, target_addr, 1);
1017 
1018     return 0;
1019 }
1020 
1021 #ifdef TARGET_NR_setrlimit
target_to_host_rlim(abi_ulong target_rlim)1022 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1023 {
1024     abi_ulong target_rlim_swap;
1025     rlim_t result;
1026 
1027     target_rlim_swap = tswapal(target_rlim);
1028     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1029         return RLIM_INFINITY;
1030 
1031     result = target_rlim_swap;
1032     if (target_rlim_swap != (rlim_t)result)
1033         return RLIM_INFINITY;
1034 
1035     return result;
1036 }
1037 #endif
1038 
1039 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
host_to_target_rlim(rlim_t rlim)1040 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1041 {
1042     abi_ulong target_rlim_swap;
1043     abi_ulong result;
1044 
1045     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1046         target_rlim_swap = TARGET_RLIM_INFINITY;
1047     else
1048         target_rlim_swap = rlim;
1049     result = tswapal(target_rlim_swap);
1050 
1051     return result;
1052 }
1053 #endif
1054 
target_to_host_resource(int code)1055 static inline int target_to_host_resource(int code)
1056 {
1057     switch (code) {
1058     case TARGET_RLIMIT_AS:
1059         return RLIMIT_AS;
1060     case TARGET_RLIMIT_CORE:
1061         return RLIMIT_CORE;
1062     case TARGET_RLIMIT_CPU:
1063         return RLIMIT_CPU;
1064     case TARGET_RLIMIT_DATA:
1065         return RLIMIT_DATA;
1066     case TARGET_RLIMIT_FSIZE:
1067         return RLIMIT_FSIZE;
1068     case TARGET_RLIMIT_LOCKS:
1069         return RLIMIT_LOCKS;
1070     case TARGET_RLIMIT_MEMLOCK:
1071         return RLIMIT_MEMLOCK;
1072     case TARGET_RLIMIT_MSGQUEUE:
1073         return RLIMIT_MSGQUEUE;
1074     case TARGET_RLIMIT_NICE:
1075         return RLIMIT_NICE;
1076     case TARGET_RLIMIT_NOFILE:
1077         return RLIMIT_NOFILE;
1078     case TARGET_RLIMIT_NPROC:
1079         return RLIMIT_NPROC;
1080     case TARGET_RLIMIT_RSS:
1081         return RLIMIT_RSS;
1082     case TARGET_RLIMIT_RTPRIO:
1083         return RLIMIT_RTPRIO;
1084 #ifdef RLIMIT_RTTIME
1085     case TARGET_RLIMIT_RTTIME:
1086         return RLIMIT_RTTIME;
1087 #endif
1088     case TARGET_RLIMIT_SIGPENDING:
1089         return RLIMIT_SIGPENDING;
1090     case TARGET_RLIMIT_STACK:
1091         return RLIMIT_STACK;
1092     default:
1093         return code;
1094     }
1095 }
1096 
copy_from_user_timeval(struct timeval * tv,abi_ulong target_tv_addr)1097 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1098                                               abi_ulong target_tv_addr)
1099 {
1100     struct target_timeval *target_tv;
1101 
1102     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1103         return -TARGET_EFAULT;
1104     }
1105 
1106     __get_user(tv->tv_sec, &target_tv->tv_sec);
1107     __get_user(tv->tv_usec, &target_tv->tv_usec);
1108 
1109     unlock_user_struct(target_tv, target_tv_addr, 0);
1110 
1111     return 0;
1112 }
1113 
copy_to_user_timeval(abi_ulong target_tv_addr,const struct timeval * tv)1114 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1115                                             const struct timeval *tv)
1116 {
1117     struct target_timeval *target_tv;
1118 
1119     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1120         return -TARGET_EFAULT;
1121     }
1122 
1123     __put_user(tv->tv_sec, &target_tv->tv_sec);
1124     __put_user(tv->tv_usec, &target_tv->tv_usec);
1125 
1126     unlock_user_struct(target_tv, target_tv_addr, 1);
1127 
1128     return 0;
1129 }
1130 
1131 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
copy_from_user_timeval64(struct timeval * tv,abi_ulong target_tv_addr)1132 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1133                                                 abi_ulong target_tv_addr)
1134 {
1135     struct target__kernel_sock_timeval *target_tv;
1136 
1137     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1138         return -TARGET_EFAULT;
1139     }
1140 
1141     __get_user(tv->tv_sec, &target_tv->tv_sec);
1142     __get_user(tv->tv_usec, &target_tv->tv_usec);
1143 
1144     unlock_user_struct(target_tv, target_tv_addr, 0);
1145 
1146     return 0;
1147 }
1148 #endif
1149 
copy_to_user_timeval64(abi_ulong target_tv_addr,const struct timeval * tv)1150 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1151                                               const struct timeval *tv)
1152 {
1153     struct target__kernel_sock_timeval *target_tv;
1154 
1155     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1156         return -TARGET_EFAULT;
1157     }
1158 
1159     __put_user(tv->tv_sec, &target_tv->tv_sec);
1160     __put_user(tv->tv_usec, &target_tv->tv_usec);
1161 
1162     unlock_user_struct(target_tv, target_tv_addr, 1);
1163 
1164     return 0;
1165 }
1166 
1167 #if defined(TARGET_NR_futex) || \
1168     defined(TARGET_NR_rt_sigtimedwait) || \
1169     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1170     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1171     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1172     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1173     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1174     defined(TARGET_NR_timer_settime) || \
1175     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
target_to_host_timespec(struct timespec * host_ts,abi_ulong target_addr)1176 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1177                                                abi_ulong target_addr)
1178 {
1179     struct target_timespec *target_ts;
1180 
1181     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1182         return -TARGET_EFAULT;
1183     }
1184     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1185     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1186     unlock_user_struct(target_ts, target_addr, 0);
1187     return 0;
1188 }
1189 #endif
1190 
1191 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1192     defined(TARGET_NR_timer_settime64) || \
1193     defined(TARGET_NR_mq_timedsend_time64) || \
1194     defined(TARGET_NR_mq_timedreceive_time64) || \
1195     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1196     defined(TARGET_NR_clock_nanosleep_time64) || \
1197     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1198     defined(TARGET_NR_utimensat) || \
1199     defined(TARGET_NR_utimensat_time64) || \
1200     defined(TARGET_NR_semtimedop_time64) || \
1201     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
target_to_host_timespec64(struct timespec * host_ts,abi_ulong target_addr)1202 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1203                                                  abi_ulong target_addr)
1204 {
1205     struct target__kernel_timespec *target_ts;
1206 
1207     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1208         return -TARGET_EFAULT;
1209     }
1210     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1211     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1212     /* in 32bit mode, this drops the padding */
1213     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1214     unlock_user_struct(target_ts, target_addr, 0);
1215     return 0;
1216 }
1217 #endif
1218 
host_to_target_timespec(abi_ulong target_addr,struct timespec * host_ts)1219 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1220                                                struct timespec *host_ts)
1221 {
1222     struct target_timespec *target_ts;
1223 
1224     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1225         return -TARGET_EFAULT;
1226     }
1227     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1228     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1229     unlock_user_struct(target_ts, target_addr, 1);
1230     return 0;
1231 }
1232 
host_to_target_timespec64(abi_ulong target_addr,struct timespec * host_ts)1233 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1234                                                  struct timespec *host_ts)
1235 {
1236     struct target__kernel_timespec *target_ts;
1237 
1238     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1239         return -TARGET_EFAULT;
1240     }
1241     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1242     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1243     unlock_user_struct(target_ts, target_addr, 1);
1244     return 0;
1245 }
1246 
1247 #if defined(TARGET_NR_gettimeofday)
copy_to_user_timezone(abi_ulong target_tz_addr,struct timezone * tz)1248 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1249                                              struct timezone *tz)
1250 {
1251     struct target_timezone *target_tz;
1252 
1253     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1254         return -TARGET_EFAULT;
1255     }
1256 
1257     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1258     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1259 
1260     unlock_user_struct(target_tz, target_tz_addr, 1);
1261 
1262     return 0;
1263 }
1264 #endif
1265 
1266 #if defined(TARGET_NR_settimeofday)
copy_from_user_timezone(struct timezone * tz,abi_ulong target_tz_addr)1267 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1268                                                abi_ulong target_tz_addr)
1269 {
1270     struct target_timezone *target_tz;
1271 
1272     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1273         return -TARGET_EFAULT;
1274     }
1275 
1276     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1277     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1278 
1279     unlock_user_struct(target_tz, target_tz_addr, 0);
1280 
1281     return 0;
1282 }
1283 #endif
1284 
1285 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1286 #include <mqueue.h>
1287 
copy_from_user_mq_attr(struct mq_attr * attr,abi_ulong target_mq_attr_addr)1288 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1289                                               abi_ulong target_mq_attr_addr)
1290 {
1291     struct target_mq_attr *target_mq_attr;
1292 
1293     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1294                           target_mq_attr_addr, 1))
1295         return -TARGET_EFAULT;
1296 
1297     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1298     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1299     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1300     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1301 
1302     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1303 
1304     return 0;
1305 }
1306 
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,const struct mq_attr * attr)1307 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1308                                             const struct mq_attr *attr)
1309 {
1310     struct target_mq_attr *target_mq_attr;
1311 
1312     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1313                           target_mq_attr_addr, 0))
1314         return -TARGET_EFAULT;
1315 
1316     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1317     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1318     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1319     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1320 
1321     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1322 
1323     return 0;
1324 }
1325 #endif
1326 
1327 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1328 /* do_select() must return target values and target errnos. */
do_select(int n,abi_ulong rfd_addr,abi_ulong wfd_addr,abi_ulong efd_addr,abi_ulong target_tv_addr)1329 static abi_long do_select(int n,
1330                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1331                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1332 {
1333     fd_set rfds, wfds, efds;
1334     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1335     struct timeval tv;
1336     struct timespec ts, *ts_ptr;
1337     abi_long ret;
1338 
1339     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1340     if (ret) {
1341         return ret;
1342     }
1343     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1344     if (ret) {
1345         return ret;
1346     }
1347     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1348     if (ret) {
1349         return ret;
1350     }
1351 
1352     if (target_tv_addr) {
1353         if (copy_from_user_timeval(&tv, target_tv_addr))
1354             return -TARGET_EFAULT;
1355         ts.tv_sec = tv.tv_sec;
1356         ts.tv_nsec = tv.tv_usec * 1000;
1357         ts_ptr = &ts;
1358     } else {
1359         ts_ptr = NULL;
1360     }
1361 
1362     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1363                                   ts_ptr, NULL));
1364 
1365     if (!is_error(ret)) {
1366         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1367             return -TARGET_EFAULT;
1368         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1369             return -TARGET_EFAULT;
1370         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1371             return -TARGET_EFAULT;
1372 
1373         if (target_tv_addr) {
1374             tv.tv_sec = ts.tv_sec;
1375             tv.tv_usec = ts.tv_nsec / 1000;
1376             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1377                 return -TARGET_EFAULT;
1378             }
1379         }
1380     }
1381 
1382     return ret;
1383 }
1384 
1385 #if defined(TARGET_WANT_OLD_SYS_SELECT)
do_old_select(abi_ulong arg1)1386 static abi_long do_old_select(abi_ulong arg1)
1387 {
1388     struct target_sel_arg_struct *sel;
1389     abi_ulong inp, outp, exp, tvp;
1390     long nsel;
1391 
1392     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1393         return -TARGET_EFAULT;
1394     }
1395 
1396     nsel = tswapal(sel->n);
1397     inp = tswapal(sel->inp);
1398     outp = tswapal(sel->outp);
1399     exp = tswapal(sel->exp);
1400     tvp = tswapal(sel->tvp);
1401 
1402     unlock_user_struct(sel, arg1, 0);
1403 
1404     return do_select(nsel, inp, outp, exp, tvp);
1405 }
1406 #endif
1407 #endif
1408 
1409 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
do_pselect6(abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,abi_long arg6,bool time64)1410 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1411                             abi_long arg4, abi_long arg5, abi_long arg6,
1412                             bool time64)
1413 {
1414     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1415     fd_set rfds, wfds, efds;
1416     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1417     struct timespec ts, *ts_ptr;
1418     abi_long ret;
1419 
1420     /*
1421      * The 6th arg is actually two args smashed together,
1422      * so we cannot use the C library.
1423      */
1424     struct {
1425         sigset_t *set;
1426         size_t size;
1427     } sig, *sig_ptr;
1428 
1429     abi_ulong arg_sigset, arg_sigsize, *arg7;
1430 
1431     n = arg1;
1432     rfd_addr = arg2;
1433     wfd_addr = arg3;
1434     efd_addr = arg4;
1435     ts_addr = arg5;
1436 
1437     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1438     if (ret) {
1439         return ret;
1440     }
1441     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1442     if (ret) {
1443         return ret;
1444     }
1445     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1446     if (ret) {
1447         return ret;
1448     }
1449 
1450     /*
1451      * This takes a timespec, and not a timeval, so we cannot
1452      * use the do_select() helper ...
1453      */
1454     if (ts_addr) {
1455         if (time64) {
1456             if (target_to_host_timespec64(&ts, ts_addr)) {
1457                 return -TARGET_EFAULT;
1458             }
1459         } else {
1460             if (target_to_host_timespec(&ts, ts_addr)) {
1461                 return -TARGET_EFAULT;
1462             }
1463         }
1464             ts_ptr = &ts;
1465     } else {
1466         ts_ptr = NULL;
1467     }
1468 
1469     /* Extract the two packed args for the sigset */
1470     sig_ptr = NULL;
1471     if (arg6) {
1472         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1473         if (!arg7) {
1474             return -TARGET_EFAULT;
1475         }
1476         arg_sigset = tswapal(arg7[0]);
1477         arg_sigsize = tswapal(arg7[1]);
1478         unlock_user(arg7, arg6, 0);
1479 
1480         if (arg_sigset) {
1481             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1482             if (ret != 0) {
1483                 return ret;
1484             }
1485             sig_ptr = &sig;
1486             sig.size = SIGSET_T_SIZE;
1487         }
1488     }
1489 
1490     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1491                                   ts_ptr, sig_ptr));
1492 
1493     if (sig_ptr) {
1494         finish_sigsuspend_mask(ret);
1495     }
1496 
1497     if (!is_error(ret)) {
1498         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1499             return -TARGET_EFAULT;
1500         }
1501         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1502             return -TARGET_EFAULT;
1503         }
1504         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1505             return -TARGET_EFAULT;
1506         }
1507         if (time64) {
1508             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1509                 return -TARGET_EFAULT;
1510             }
1511         } else {
1512             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1513                 return -TARGET_EFAULT;
1514             }
1515         }
1516     }
1517     return ret;
1518 }
1519 #endif
1520 
1521 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1522     defined(TARGET_NR_ppoll_time64)
do_ppoll(abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,bool ppoll,bool time64)1523 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1524                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1525 {
1526     struct target_pollfd *target_pfd;
1527     unsigned int nfds = arg2;
1528     struct pollfd *pfd;
1529     unsigned int i;
1530     abi_long ret;
1531 
1532     pfd = NULL;
1533     target_pfd = NULL;
1534     if (nfds) {
1535         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1536             return -TARGET_EINVAL;
1537         }
1538         target_pfd = lock_user(VERIFY_WRITE, arg1,
1539                                sizeof(struct target_pollfd) * nfds, 1);
1540         if (!target_pfd) {
1541             return -TARGET_EFAULT;
1542         }
1543 
1544         pfd = alloca(sizeof(struct pollfd) * nfds);
1545         for (i = 0; i < nfds; i++) {
1546             pfd[i].fd = tswap32(target_pfd[i].fd);
1547             pfd[i].events = tswap16(target_pfd[i].events);
1548         }
1549     }
1550     if (ppoll) {
1551         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1552         sigset_t *set = NULL;
1553 
1554         if (arg3) {
1555             if (time64) {
1556                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1557                     unlock_user(target_pfd, arg1, 0);
1558                     return -TARGET_EFAULT;
1559                 }
1560             } else {
1561                 if (target_to_host_timespec(timeout_ts, arg3)) {
1562                     unlock_user(target_pfd, arg1, 0);
1563                     return -TARGET_EFAULT;
1564                 }
1565             }
1566         } else {
1567             timeout_ts = NULL;
1568         }
1569 
1570         if (arg4) {
1571             ret = process_sigsuspend_mask(&set, arg4, arg5);
1572             if (ret != 0) {
1573                 unlock_user(target_pfd, arg1, 0);
1574                 return ret;
1575             }
1576         }
1577 
1578         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1579                                    set, SIGSET_T_SIZE));
1580 
1581         if (set) {
1582             finish_sigsuspend_mask(ret);
1583         }
1584         if (!is_error(ret) && arg3) {
1585             if (time64) {
1586                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1587                     return -TARGET_EFAULT;
1588                 }
1589             } else {
1590                 if (host_to_target_timespec(arg3, timeout_ts)) {
1591                     return -TARGET_EFAULT;
1592                 }
1593             }
1594         }
1595     } else {
1596           struct timespec ts, *pts;
1597 
1598           if (arg3 >= 0) {
1599               /* Convert ms to secs, ns */
1600               ts.tv_sec = arg3 / 1000;
1601               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1602               pts = &ts;
1603           } else {
1604               /* -ve poll() timeout means "infinite" */
1605               pts = NULL;
1606           }
1607           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1608     }
1609 
1610     if (!is_error(ret)) {
1611         for (i = 0; i < nfds; i++) {
1612             target_pfd[i].revents = tswap16(pfd[i].revents);
1613         }
1614     }
1615     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1616     return ret;
1617 }
1618 #endif
1619 
do_pipe(CPUArchState * cpu_env,abi_ulong pipedes,int flags,int is_pipe2)1620 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1621                         int flags, int is_pipe2)
1622 {
1623     int host_pipe[2];
1624     abi_long ret;
1625     ret = pipe2(host_pipe, flags);
1626 
1627     if (is_error(ret))
1628         return get_errno(ret);
1629 
1630     /* Several targets have special calling conventions for the original
1631        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1632     if (!is_pipe2) {
1633 #if defined(TARGET_ALPHA)
1634         cpu_env->ir[IR_A4] = host_pipe[1];
1635         return host_pipe[0];
1636 #elif defined(TARGET_MIPS)
1637         cpu_env->active_tc.gpr[3] = host_pipe[1];
1638         return host_pipe[0];
1639 #elif defined(TARGET_SH4)
1640         cpu_env->gregs[1] = host_pipe[1];
1641         return host_pipe[0];
1642 #elif defined(TARGET_SPARC)
1643         cpu_env->regwptr[1] = host_pipe[1];
1644         return host_pipe[0];
1645 #endif
1646     }
1647 
1648     if (put_user_s32(host_pipe[0], pipedes)
1649         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1650         return -TARGET_EFAULT;
1651     return get_errno(ret);
1652 }
1653 
target_to_host_sockaddr(int fd,struct sockaddr * addr,abi_ulong target_addr,socklen_t len)1654 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1655                                                abi_ulong target_addr,
1656                                                socklen_t len)
1657 {
1658     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1659     sa_family_t sa_family;
1660     struct target_sockaddr *target_saddr;
1661 
1662     if (fd_trans_target_to_host_addr(fd)) {
1663         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1664     }
1665 
1666     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1667     if (!target_saddr)
1668         return -TARGET_EFAULT;
1669 
1670     sa_family = tswap16(target_saddr->sa_family);
1671 
1672     /* Oops. The caller might send a incomplete sun_path; sun_path
1673      * must be terminated by \0 (see the manual page), but
1674      * unfortunately it is quite common to specify sockaddr_un
1675      * length as "strlen(x->sun_path)" while it should be
1676      * "strlen(...) + 1". We'll fix that here if needed.
1677      * Linux kernel has a similar feature.
1678      */
1679 
1680     if (sa_family == AF_UNIX) {
1681         if (len < unix_maxlen && len > 0) {
1682             char *cp = (char*)target_saddr;
1683 
1684             if ( cp[len-1] && !cp[len] )
1685                 len++;
1686         }
1687         if (len > unix_maxlen)
1688             len = unix_maxlen;
1689     }
1690 
1691     memcpy(addr, target_saddr, len);
1692     addr->sa_family = sa_family;
1693     if (sa_family == AF_NETLINK) {
1694         struct sockaddr_nl *nladdr;
1695 
1696         nladdr = (struct sockaddr_nl *)addr;
1697         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1698         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1699     } else if (sa_family == AF_PACKET) {
1700 	struct target_sockaddr_ll *lladdr;
1701 
1702 	lladdr = (struct target_sockaddr_ll *)addr;
1703 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1704 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1705     } else if (sa_family == AF_INET6) {
1706         struct sockaddr_in6 *in6addr;
1707 
1708         in6addr = (struct sockaddr_in6 *)addr;
1709         in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
1710     }
1711     unlock_user(target_saddr, target_addr, 0);
1712 
1713     return 0;
1714 }
1715 
host_to_target_sockaddr(abi_ulong target_addr,struct sockaddr * addr,socklen_t len)1716 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1717                                                struct sockaddr *addr,
1718                                                socklen_t len)
1719 {
1720     struct target_sockaddr *target_saddr;
1721 
1722     if (len == 0) {
1723         return 0;
1724     }
1725     assert(addr);
1726 
1727     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1728     if (!target_saddr)
1729         return -TARGET_EFAULT;
1730     memcpy(target_saddr, addr, len);
1731     if (len >= offsetof(struct target_sockaddr, sa_family) +
1732         sizeof(target_saddr->sa_family)) {
1733         target_saddr->sa_family = tswap16(addr->sa_family);
1734     }
1735     if (addr->sa_family == AF_NETLINK &&
1736         len >= sizeof(struct target_sockaddr_nl)) {
1737         struct target_sockaddr_nl *target_nl =
1738                (struct target_sockaddr_nl *)target_saddr;
1739         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1740         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1741     } else if (addr->sa_family == AF_PACKET) {
1742         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1743         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1744         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1745     } else if (addr->sa_family == AF_INET6 &&
1746                len >= sizeof(struct target_sockaddr_in6)) {
1747         struct target_sockaddr_in6 *target_in6 =
1748                (struct target_sockaddr_in6 *)target_saddr;
1749         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1750     }
1751     unlock_user(target_saddr, target_addr, len);
1752 
1753     return 0;
1754 }
1755 
target_to_host_cmsg(struct msghdr * msgh,struct target_msghdr * target_msgh)1756 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1757                                            struct target_msghdr *target_msgh)
1758 {
1759     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1760     abi_long msg_controllen;
1761     abi_ulong target_cmsg_addr;
1762     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1763     socklen_t space = 0;
1764 
1765     msg_controllen = tswapal(target_msgh->msg_controllen);
1766     if (msg_controllen < sizeof (struct target_cmsghdr))
1767         goto the_end;
1768     target_cmsg_addr = tswapal(target_msgh->msg_control);
1769     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1770     target_cmsg_start = target_cmsg;
1771     if (!target_cmsg)
1772         return -TARGET_EFAULT;
1773 
1774     while (cmsg && target_cmsg) {
1775         void *data = CMSG_DATA(cmsg);
1776         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1777 
1778         int len = tswapal(target_cmsg->cmsg_len)
1779             - sizeof(struct target_cmsghdr);
1780 
1781         space += CMSG_SPACE(len);
1782         if (space > msgh->msg_controllen) {
1783             space -= CMSG_SPACE(len);
1784             /* This is a QEMU bug, since we allocated the payload
1785              * area ourselves (unlike overflow in host-to-target
1786              * conversion, which is just the guest giving us a buffer
1787              * that's too small). It can't happen for the payload types
1788              * we currently support; if it becomes an issue in future
1789              * we would need to improve our allocation strategy to
1790              * something more intelligent than "twice the size of the
1791              * target buffer we're reading from".
1792              */
1793             qemu_log_mask(LOG_UNIMP,
1794                           ("Unsupported ancillary data %d/%d: "
1795                            "unhandled msg size\n"),
1796                           tswap32(target_cmsg->cmsg_level),
1797                           tswap32(target_cmsg->cmsg_type));
1798             break;
1799         }
1800 
1801         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1802             cmsg->cmsg_level = SOL_SOCKET;
1803         } else {
1804             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1805         }
1806         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1807         cmsg->cmsg_len = CMSG_LEN(len);
1808 
1809         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1810             int *fd = (int *)data;
1811             int *target_fd = (int *)target_data;
1812             int i, numfds = len / sizeof(int);
1813 
1814             for (i = 0; i < numfds; i++) {
1815                 __get_user(fd[i], target_fd + i);
1816             }
1817         } else if (cmsg->cmsg_level == SOL_SOCKET
1818                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1819             struct ucred *cred = (struct ucred *)data;
1820             struct target_ucred *target_cred =
1821                 (struct target_ucred *)target_data;
1822 
1823             __get_user(cred->pid, &target_cred->pid);
1824             __get_user(cred->uid, &target_cred->uid);
1825             __get_user(cred->gid, &target_cred->gid);
1826         } else if (cmsg->cmsg_level == SOL_ALG) {
1827             uint32_t *dst = (uint32_t *)data;
1828 
1829             memcpy(dst, target_data, len);
1830             /* fix endianness of first 32-bit word */
1831             if (len >= sizeof(uint32_t)) {
1832                 *dst = tswap32(*dst);
1833             }
1834         } else {
1835             qemu_log_mask(LOG_UNIMP, "Unsupported target ancillary data: %d/%d\n",
1836                           cmsg->cmsg_level, cmsg->cmsg_type);
1837             memcpy(data, target_data, len);
1838         }
1839 
1840         cmsg = CMSG_NXTHDR(msgh, cmsg);
1841         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1842                                          target_cmsg_start);
1843     }
1844     unlock_user(target_cmsg, target_cmsg_addr, 0);
1845  the_end:
1846     msgh->msg_controllen = space;
1847     return 0;
1848 }
1849 
host_to_target_cmsg(struct target_msghdr * target_msgh,struct msghdr * msgh)1850 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1851                                            struct msghdr *msgh)
1852 {
1853     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1854     abi_long msg_controllen;
1855     abi_ulong target_cmsg_addr;
1856     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1857     socklen_t space = 0;
1858 
1859     msg_controllen = tswapal(target_msgh->msg_controllen);
1860     if (msg_controllen < sizeof (struct target_cmsghdr))
1861         goto the_end;
1862     target_cmsg_addr = tswapal(target_msgh->msg_control);
1863     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1864     target_cmsg_start = target_cmsg;
1865     if (!target_cmsg)
1866         return -TARGET_EFAULT;
1867 
1868     while (cmsg && target_cmsg) {
1869         void *data = CMSG_DATA(cmsg);
1870         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1871 
1872         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1873         int tgt_len, tgt_space;
1874 
1875         /* We never copy a half-header but may copy half-data;
1876          * this is Linux's behaviour in put_cmsg(). Note that
1877          * truncation here is a guest problem (which we report
1878          * to the guest via the CTRUNC bit), unlike truncation
1879          * in target_to_host_cmsg, which is a QEMU bug.
1880          */
1881         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1882             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1883             break;
1884         }
1885 
1886         if (cmsg->cmsg_level == SOL_SOCKET) {
1887             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1888         } else {
1889             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1890         }
1891         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1892 
1893         /* Payload types which need a different size of payload on
1894          * the target must adjust tgt_len here.
1895          */
1896         tgt_len = len;
1897         switch (cmsg->cmsg_level) {
1898         case SOL_SOCKET:
1899             switch (cmsg->cmsg_type) {
1900             case SO_TIMESTAMP:
1901                 tgt_len = sizeof(struct target_timeval);
1902                 break;
1903             default:
1904                 break;
1905             }
1906             break;
1907         default:
1908             break;
1909         }
1910 
1911         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1912             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1913             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1914         }
1915 
1916         /* We must now copy-and-convert len bytes of payload
1917          * into tgt_len bytes of destination space. Bear in mind
1918          * that in both source and destination we may be dealing
1919          * with a truncated value!
1920          */
1921         switch (cmsg->cmsg_level) {
1922         case SOL_SOCKET:
1923             switch (cmsg->cmsg_type) {
1924             case SCM_RIGHTS:
1925             {
1926                 int *fd = (int *)data;
1927                 int *target_fd = (int *)target_data;
1928                 int i, numfds = tgt_len / sizeof(int);
1929 
1930                 for (i = 0; i < numfds; i++) {
1931                     __put_user(fd[i], target_fd + i);
1932                 }
1933                 break;
1934             }
1935             case SO_TIMESTAMP:
1936             {
1937                 struct timeval *tv = (struct timeval *)data;
1938                 struct target_timeval *target_tv =
1939                     (struct target_timeval *)target_data;
1940 
1941                 if (len != sizeof(struct timeval) ||
1942                     tgt_len != sizeof(struct target_timeval)) {
1943                     goto unimplemented;
1944                 }
1945 
1946                 /* copy struct timeval to target */
1947                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1948                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1949                 break;
1950             }
1951             case SCM_CREDENTIALS:
1952             {
1953                 struct ucred *cred = (struct ucred *)data;
1954                 struct target_ucred *target_cred =
1955                     (struct target_ucred *)target_data;
1956 
1957                 __put_user(cred->pid, &target_cred->pid);
1958                 __put_user(cred->uid, &target_cred->uid);
1959                 __put_user(cred->gid, &target_cred->gid);
1960                 break;
1961             }
1962             default:
1963                 goto unimplemented;
1964             }
1965             break;
1966 
1967         case SOL_IP:
1968             switch (cmsg->cmsg_type) {
1969             case IP_TTL:
1970             {
1971                 uint32_t *v = (uint32_t *)data;
1972                 uint32_t *t_int = (uint32_t *)target_data;
1973 
1974                 if (len != sizeof(uint32_t) ||
1975                     tgt_len != sizeof(uint32_t)) {
1976                     goto unimplemented;
1977                 }
1978                 __put_user(*v, t_int);
1979                 break;
1980             }
1981             case IP_RECVERR:
1982             {
1983                 struct errhdr_t {
1984                    struct sock_extended_err ee;
1985                    struct sockaddr_in offender;
1986                 };
1987                 struct errhdr_t *errh = (struct errhdr_t *)data;
1988                 struct errhdr_t *target_errh =
1989                     (struct errhdr_t *)target_data;
1990 
1991                 if (len != sizeof(struct errhdr_t) ||
1992                     tgt_len != sizeof(struct errhdr_t)) {
1993                     goto unimplemented;
1994                 }
1995                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1996                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1997                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1998                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1999                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2000                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2001                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2002                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2003                     (void *) &errh->offender, sizeof(errh->offender));
2004                 break;
2005             }
2006             case IP_PKTINFO:
2007             {
2008                 struct in_pktinfo *pkti = data;
2009                 struct target_in_pktinfo *target_pi = target_data;
2010 
2011                 __put_user(pkti->ipi_ifindex, &target_pi->ipi_ifindex);
2012                 target_pi->ipi_spec_dst.s_addr = pkti->ipi_spec_dst.s_addr;
2013                 target_pi->ipi_addr.s_addr = pkti->ipi_addr.s_addr;
2014                 break;
2015             }
2016             default:
2017                 goto unimplemented;
2018             }
2019             break;
2020 
2021         case SOL_IPV6:
2022             switch (cmsg->cmsg_type) {
2023             case IPV6_HOPLIMIT:
2024             {
2025                 uint32_t *v = (uint32_t *)data;
2026                 uint32_t *t_int = (uint32_t *)target_data;
2027 
2028                 if (len != sizeof(uint32_t) ||
2029                     tgt_len != sizeof(uint32_t)) {
2030                     goto unimplemented;
2031                 }
2032                 __put_user(*v, t_int);
2033                 break;
2034             }
2035             case IPV6_RECVERR:
2036             {
2037                 struct errhdr6_t {
2038                    struct sock_extended_err ee;
2039                    struct sockaddr_in6 offender;
2040                 };
2041                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2042                 struct errhdr6_t *target_errh =
2043                     (struct errhdr6_t *)target_data;
2044 
2045                 if (len != sizeof(struct errhdr6_t) ||
2046                     tgt_len != sizeof(struct errhdr6_t)) {
2047                     goto unimplemented;
2048                 }
2049                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2050                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2051                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2052                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2053                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2054                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2055                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2056                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2057                     (void *) &errh->offender, sizeof(errh->offender));
2058                 break;
2059             }
2060             default:
2061                 goto unimplemented;
2062             }
2063             break;
2064 
2065         default:
2066         unimplemented:
2067             qemu_log_mask(LOG_UNIMP, "Unsupported host ancillary data: %d/%d\n",
2068                           cmsg->cmsg_level, cmsg->cmsg_type);
2069             memcpy(target_data, data, MIN(len, tgt_len));
2070             if (tgt_len > len) {
2071                 memset(target_data + len, 0, tgt_len - len);
2072             }
2073         }
2074 
2075         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2076         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2077         if (msg_controllen < tgt_space) {
2078             tgt_space = msg_controllen;
2079         }
2080         msg_controllen -= tgt_space;
2081         space += tgt_space;
2082         cmsg = CMSG_NXTHDR(msgh, cmsg);
2083         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2084                                          target_cmsg_start);
2085     }
2086     unlock_user(target_cmsg, target_cmsg_addr, space);
2087  the_end:
2088     target_msgh->msg_controllen = tswapal(space);
2089     return 0;
2090 }
2091 
2092 /* do_setsockopt() Must return target values and target errnos. */
do_setsockopt(int sockfd,int level,int optname,abi_ulong optval_addr,socklen_t optlen)2093 static abi_long do_setsockopt(int sockfd, int level, int optname,
2094                               abi_ulong optval_addr, socklen_t optlen)
2095 {
2096     abi_long ret;
2097     int val;
2098 
2099     switch(level) {
2100     case SOL_TCP:
2101     case SOL_UDP:
2102         /* TCP and UDP options all take an 'int' value.  */
2103         if (optlen < sizeof(uint32_t))
2104             return -TARGET_EINVAL;
2105 
2106         if (get_user_u32(val, optval_addr))
2107             return -TARGET_EFAULT;
2108         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2109         break;
2110     case SOL_IP:
2111         switch(optname) {
2112         case IP_TOS:
2113         case IP_TTL:
2114         case IP_HDRINCL:
2115         case IP_ROUTER_ALERT:
2116         case IP_RECVOPTS:
2117         case IP_RETOPTS:
2118         case IP_PKTINFO:
2119         case IP_MTU_DISCOVER:
2120         case IP_RECVERR:
2121         case IP_RECVTTL:
2122         case IP_RECVTOS:
2123 #ifdef IP_FREEBIND
2124         case IP_FREEBIND:
2125 #endif
2126         case IP_MULTICAST_TTL:
2127         case IP_MULTICAST_LOOP:
2128             val = 0;
2129             if (optlen >= sizeof(uint32_t)) {
2130                 if (get_user_u32(val, optval_addr))
2131                     return -TARGET_EFAULT;
2132             } else if (optlen >= 1) {
2133                 if (get_user_u8(val, optval_addr))
2134                     return -TARGET_EFAULT;
2135             }
2136             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2137             break;
2138         case IP_MULTICAST_IF:
2139         case IP_ADD_MEMBERSHIP:
2140         case IP_DROP_MEMBERSHIP:
2141         {
2142             struct ip_mreqn ip_mreq;
2143             struct target_ip_mreqn *target_smreqn;
2144             int min_size;
2145 
2146             QEMU_BUILD_BUG_ON(sizeof(struct ip_mreq) !=
2147                               sizeof(struct target_ip_mreq));
2148 
2149             if (optname == IP_MULTICAST_IF) {
2150                 min_size = sizeof(struct in_addr);
2151             } else {
2152                 min_size = sizeof(struct target_ip_mreq);
2153             }
2154             if (optlen < min_size ||
2155                 optlen > sizeof (struct target_ip_mreqn)) {
2156                 return -TARGET_EINVAL;
2157             }
2158 
2159             target_smreqn = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2160             if (!target_smreqn) {
2161                 return -TARGET_EFAULT;
2162             }
2163             ip_mreq.imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
2164             if (optlen >= sizeof(struct target_ip_mreq)) {
2165                 ip_mreq.imr_address.s_addr = target_smreqn->imr_address.s_addr;
2166                 if (optlen >= sizeof(struct target_ip_mreqn)) {
2167                     __put_user(target_smreqn->imr_ifindex, &ip_mreq.imr_ifindex);
2168                     optlen = sizeof(struct ip_mreqn);
2169                 }
2170             }
2171             unlock_user(target_smreqn, optval_addr, 0);
2172             ret = get_errno(setsockopt(sockfd, level, optname, &ip_mreq, optlen));
2173             break;
2174         }
2175         case IP_BLOCK_SOURCE:
2176         case IP_UNBLOCK_SOURCE:
2177         case IP_ADD_SOURCE_MEMBERSHIP:
2178         case IP_DROP_SOURCE_MEMBERSHIP:
2179         {
2180             struct ip_mreq_source *ip_mreq_source;
2181 
2182             if (optlen != sizeof (struct target_ip_mreq_source))
2183                 return -TARGET_EINVAL;
2184 
2185             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2186             if (!ip_mreq_source) {
2187                 return -TARGET_EFAULT;
2188             }
2189             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2190             unlock_user (ip_mreq_source, optval_addr, 0);
2191             break;
2192         }
2193         default:
2194             goto unimplemented;
2195         }
2196         break;
2197     case SOL_IPV6:
2198         switch (optname) {
2199         case IPV6_MTU_DISCOVER:
2200         case IPV6_MTU:
2201         case IPV6_V6ONLY:
2202         case IPV6_RECVPKTINFO:
2203         case IPV6_UNICAST_HOPS:
2204         case IPV6_MULTICAST_HOPS:
2205         case IPV6_MULTICAST_LOOP:
2206         case IPV6_RECVERR:
2207         case IPV6_RECVHOPLIMIT:
2208         case IPV6_2292HOPLIMIT:
2209         case IPV6_CHECKSUM:
2210         case IPV6_ADDRFORM:
2211         case IPV6_2292PKTINFO:
2212         case IPV6_RECVTCLASS:
2213         case IPV6_RECVRTHDR:
2214         case IPV6_2292RTHDR:
2215         case IPV6_RECVHOPOPTS:
2216         case IPV6_2292HOPOPTS:
2217         case IPV6_RECVDSTOPTS:
2218         case IPV6_2292DSTOPTS:
2219         case IPV6_TCLASS:
2220         case IPV6_ADDR_PREFERENCES:
2221 #ifdef IPV6_RECVPATHMTU
2222         case IPV6_RECVPATHMTU:
2223 #endif
2224 #ifdef IPV6_TRANSPARENT
2225         case IPV6_TRANSPARENT:
2226 #endif
2227 #ifdef IPV6_FREEBIND
2228         case IPV6_FREEBIND:
2229 #endif
2230 #ifdef IPV6_RECVORIGDSTADDR
2231         case IPV6_RECVORIGDSTADDR:
2232 #endif
2233             val = 0;
2234             if (optlen < sizeof(uint32_t)) {
2235                 return -TARGET_EINVAL;
2236             }
2237             if (get_user_u32(val, optval_addr)) {
2238                 return -TARGET_EFAULT;
2239             }
2240             ret = get_errno(setsockopt(sockfd, level, optname,
2241                                        &val, sizeof(val)));
2242             break;
2243         case IPV6_PKTINFO:
2244         {
2245             struct in6_pktinfo pki;
2246 
2247             if (optlen < sizeof(pki)) {
2248                 return -TARGET_EINVAL;
2249             }
2250 
2251             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2252                 return -TARGET_EFAULT;
2253             }
2254 
2255             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2256 
2257             ret = get_errno(setsockopt(sockfd, level, optname,
2258                                        &pki, sizeof(pki)));
2259             break;
2260         }
2261         case IPV6_ADD_MEMBERSHIP:
2262         case IPV6_DROP_MEMBERSHIP:
2263         {
2264             struct ipv6_mreq ipv6mreq;
2265 
2266             if (optlen < sizeof(ipv6mreq)) {
2267                 return -TARGET_EINVAL;
2268             }
2269 
2270             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2271                 return -TARGET_EFAULT;
2272             }
2273 
2274             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2275 
2276             ret = get_errno(setsockopt(sockfd, level, optname,
2277                                        &ipv6mreq, sizeof(ipv6mreq)));
2278             break;
2279         }
2280         default:
2281             goto unimplemented;
2282         }
2283         break;
2284     case SOL_ICMPV6:
2285         switch (optname) {
2286         case ICMPV6_FILTER:
2287         {
2288             struct icmp6_filter icmp6f;
2289 
2290             if (optlen > sizeof(icmp6f)) {
2291                 optlen = sizeof(icmp6f);
2292             }
2293 
2294             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2295                 return -TARGET_EFAULT;
2296             }
2297 
2298             for (val = 0; val < 8; val++) {
2299                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2300             }
2301 
2302             ret = get_errno(setsockopt(sockfd, level, optname,
2303                                        &icmp6f, optlen));
2304             break;
2305         }
2306         default:
2307             goto unimplemented;
2308         }
2309         break;
2310     case SOL_RAW:
2311         switch (optname) {
2312         case ICMP_FILTER:
2313         case IPV6_CHECKSUM:
2314             /* those take an u32 value */
2315             if (optlen < sizeof(uint32_t)) {
2316                 return -TARGET_EINVAL;
2317             }
2318 
2319             if (get_user_u32(val, optval_addr)) {
2320                 return -TARGET_EFAULT;
2321             }
2322             ret = get_errno(setsockopt(sockfd, level, optname,
2323                                        &val, sizeof(val)));
2324             break;
2325 
2326         default:
2327             goto unimplemented;
2328         }
2329         break;
2330 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2331     case SOL_ALG:
2332         switch (optname) {
2333         case ALG_SET_KEY:
2334         {
2335             char *alg_key = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2336             if (!alg_key) {
2337                 return -TARGET_EFAULT;
2338             }
2339             ret = get_errno(setsockopt(sockfd, level, optname,
2340                                        alg_key, optlen));
2341             unlock_user(alg_key, optval_addr, optlen);
2342             break;
2343         }
2344         case ALG_SET_AEAD_AUTHSIZE:
2345         {
2346             ret = get_errno(setsockopt(sockfd, level, optname,
2347                                        NULL, optlen));
2348             break;
2349         }
2350         default:
2351             goto unimplemented;
2352         }
2353         break;
2354 #endif
2355     case TARGET_SOL_SOCKET:
2356         switch (optname) {
2357         case TARGET_SO_RCVTIMEO:
2358         case TARGET_SO_SNDTIMEO:
2359         {
2360                 struct timeval tv;
2361 
2362                 if (optlen != sizeof(struct target_timeval)) {
2363                     return -TARGET_EINVAL;
2364                 }
2365 
2366                 if (copy_from_user_timeval(&tv, optval_addr)) {
2367                     return -TARGET_EFAULT;
2368                 }
2369 
2370                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2371                                 optname == TARGET_SO_RCVTIMEO ?
2372                                     SO_RCVTIMEO : SO_SNDTIMEO,
2373                                 &tv, sizeof(tv)));
2374                 return ret;
2375         }
2376         case TARGET_SO_ATTACH_FILTER:
2377         {
2378                 struct target_sock_fprog *tfprog;
2379                 struct target_sock_filter *tfilter;
2380                 struct sock_fprog fprog;
2381                 struct sock_filter *filter;
2382                 int i;
2383 
2384                 if (optlen != sizeof(*tfprog)) {
2385                     return -TARGET_EINVAL;
2386                 }
2387                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2388                     return -TARGET_EFAULT;
2389                 }
2390                 if (!lock_user_struct(VERIFY_READ, tfilter,
2391                                       tswapal(tfprog->filter), 0)) {
2392                     unlock_user_struct(tfprog, optval_addr, 1);
2393                     return -TARGET_EFAULT;
2394                 }
2395 
2396                 fprog.len = tswap16(tfprog->len);
2397                 filter = g_try_new(struct sock_filter, fprog.len);
2398                 if (filter == NULL) {
2399                     unlock_user_struct(tfilter, tfprog->filter, 1);
2400                     unlock_user_struct(tfprog, optval_addr, 1);
2401                     return -TARGET_ENOMEM;
2402                 }
2403                 for (i = 0; i < fprog.len; i++) {
2404                     filter[i].code = tswap16(tfilter[i].code);
2405                     filter[i].jt = tfilter[i].jt;
2406                     filter[i].jf = tfilter[i].jf;
2407                     filter[i].k = tswap32(tfilter[i].k);
2408                 }
2409                 fprog.filter = filter;
2410 
2411                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2412                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2413                 g_free(filter);
2414 
2415                 unlock_user_struct(tfilter, tfprog->filter, 1);
2416                 unlock_user_struct(tfprog, optval_addr, 1);
2417                 return ret;
2418         }
2419 	case TARGET_SO_BINDTODEVICE:
2420 	{
2421 		char *dev_ifname, *addr_ifname;
2422 
2423 		if (optlen > IFNAMSIZ - 1) {
2424 		    optlen = IFNAMSIZ - 1;
2425 		}
2426 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2427 		if (!dev_ifname) {
2428 		    return -TARGET_EFAULT;
2429 		}
2430 		optname = SO_BINDTODEVICE;
2431 		addr_ifname = alloca(IFNAMSIZ);
2432 		memcpy(addr_ifname, dev_ifname, optlen);
2433 		addr_ifname[optlen] = 0;
2434 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2435                                            addr_ifname, optlen));
2436 		unlock_user (dev_ifname, optval_addr, 0);
2437 		return ret;
2438 	}
2439         case TARGET_SO_LINGER:
2440         {
2441                 struct linger lg;
2442                 struct target_linger *tlg;
2443 
2444                 if (optlen != sizeof(struct target_linger)) {
2445                     return -TARGET_EINVAL;
2446                 }
2447                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2448                     return -TARGET_EFAULT;
2449                 }
2450                 __get_user(lg.l_onoff, &tlg->l_onoff);
2451                 __get_user(lg.l_linger, &tlg->l_linger);
2452                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2453                                 &lg, sizeof(lg)));
2454                 unlock_user_struct(tlg, optval_addr, 0);
2455                 return ret;
2456         }
2457             /* Options with 'int' argument.  */
2458         case TARGET_SO_DEBUG:
2459 		optname = SO_DEBUG;
2460 		break;
2461         case TARGET_SO_REUSEADDR:
2462 		optname = SO_REUSEADDR;
2463 		break;
2464 #ifdef SO_REUSEPORT
2465         case TARGET_SO_REUSEPORT:
2466                 optname = SO_REUSEPORT;
2467                 break;
2468 #endif
2469         case TARGET_SO_TYPE:
2470 		optname = SO_TYPE;
2471 		break;
2472         case TARGET_SO_ERROR:
2473 		optname = SO_ERROR;
2474 		break;
2475         case TARGET_SO_DONTROUTE:
2476 		optname = SO_DONTROUTE;
2477 		break;
2478         case TARGET_SO_BROADCAST:
2479 		optname = SO_BROADCAST;
2480 		break;
2481         case TARGET_SO_SNDBUF:
2482 		optname = SO_SNDBUF;
2483 		break;
2484         case TARGET_SO_SNDBUFFORCE:
2485                 optname = SO_SNDBUFFORCE;
2486                 break;
2487         case TARGET_SO_RCVBUF:
2488 		optname = SO_RCVBUF;
2489 		break;
2490         case TARGET_SO_RCVBUFFORCE:
2491                 optname = SO_RCVBUFFORCE;
2492                 break;
2493         case TARGET_SO_KEEPALIVE:
2494 		optname = SO_KEEPALIVE;
2495 		break;
2496         case TARGET_SO_OOBINLINE:
2497 		optname = SO_OOBINLINE;
2498 		break;
2499         case TARGET_SO_NO_CHECK:
2500 		optname = SO_NO_CHECK;
2501 		break;
2502         case TARGET_SO_PRIORITY:
2503 		optname = SO_PRIORITY;
2504 		break;
2505 #ifdef SO_BSDCOMPAT
2506         case TARGET_SO_BSDCOMPAT:
2507 		optname = SO_BSDCOMPAT;
2508 		break;
2509 #endif
2510         case TARGET_SO_PASSCRED:
2511 		optname = SO_PASSCRED;
2512 		break;
2513         case TARGET_SO_PASSSEC:
2514                 optname = SO_PASSSEC;
2515                 break;
2516         case TARGET_SO_TIMESTAMP:
2517 		optname = SO_TIMESTAMP;
2518 		break;
2519         case TARGET_SO_RCVLOWAT:
2520 		optname = SO_RCVLOWAT;
2521 		break;
2522         default:
2523             goto unimplemented;
2524         }
2525 	if (optlen < sizeof(uint32_t))
2526             return -TARGET_EINVAL;
2527 
2528 	if (get_user_u32(val, optval_addr))
2529             return -TARGET_EFAULT;
2530 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2531         break;
2532 #ifdef SOL_NETLINK
2533     case SOL_NETLINK:
2534         switch (optname) {
2535         case NETLINK_PKTINFO:
2536         case NETLINK_ADD_MEMBERSHIP:
2537         case NETLINK_DROP_MEMBERSHIP:
2538         case NETLINK_BROADCAST_ERROR:
2539         case NETLINK_NO_ENOBUFS:
2540 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2541         case NETLINK_LISTEN_ALL_NSID:
2542         case NETLINK_CAP_ACK:
2543 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2544 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2545         case NETLINK_EXT_ACK:
2546 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2547 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2548         case NETLINK_GET_STRICT_CHK:
2549 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2550             break;
2551         default:
2552             goto unimplemented;
2553         }
2554         val = 0;
2555         if (optlen < sizeof(uint32_t)) {
2556             return -TARGET_EINVAL;
2557         }
2558         if (get_user_u32(val, optval_addr)) {
2559             return -TARGET_EFAULT;
2560         }
2561         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2562                                    sizeof(val)));
2563         break;
2564 #endif /* SOL_NETLINK */
2565     default:
2566     unimplemented:
2567         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2568                       level, optname);
2569         ret = -TARGET_ENOPROTOOPT;
2570     }
2571     return ret;
2572 }
2573 
2574 /* do_getsockopt() Must return target values and target errnos. */
do_getsockopt(int sockfd,int level,int optname,abi_ulong optval_addr,abi_ulong optlen)2575 static abi_long do_getsockopt(int sockfd, int level, int optname,
2576                               abi_ulong optval_addr, abi_ulong optlen)
2577 {
2578     abi_long ret;
2579     int len, val;
2580     socklen_t lv;
2581 
2582     switch(level) {
2583     case TARGET_SOL_SOCKET:
2584         level = SOL_SOCKET;
2585         switch (optname) {
2586         /* These don't just return a single integer */
2587         case TARGET_SO_PEERNAME:
2588             goto unimplemented;
2589         case TARGET_SO_RCVTIMEO: {
2590             struct timeval tv;
2591             socklen_t tvlen;
2592 
2593             optname = SO_RCVTIMEO;
2594 
2595 get_timeout:
2596             if (get_user_u32(len, optlen)) {
2597                 return -TARGET_EFAULT;
2598             }
2599             if (len < 0) {
2600                 return -TARGET_EINVAL;
2601             }
2602 
2603             tvlen = sizeof(tv);
2604             ret = get_errno(getsockopt(sockfd, level, optname,
2605                                        &tv, &tvlen));
2606             if (ret < 0) {
2607                 return ret;
2608             }
2609             if (len > sizeof(struct target_timeval)) {
2610                 len = sizeof(struct target_timeval);
2611             }
2612             if (copy_to_user_timeval(optval_addr, &tv)) {
2613                 return -TARGET_EFAULT;
2614             }
2615             if (put_user_u32(len, optlen)) {
2616                 return -TARGET_EFAULT;
2617             }
2618             break;
2619         }
2620         case TARGET_SO_SNDTIMEO:
2621             optname = SO_SNDTIMEO;
2622             goto get_timeout;
2623         case TARGET_SO_PEERCRED: {
2624             struct ucred cr;
2625             socklen_t crlen;
2626             struct target_ucred *tcr;
2627 
2628             if (get_user_u32(len, optlen)) {
2629                 return -TARGET_EFAULT;
2630             }
2631             if (len < 0) {
2632                 return -TARGET_EINVAL;
2633             }
2634 
2635             crlen = sizeof(cr);
2636             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2637                                        &cr, &crlen));
2638             if (ret < 0) {
2639                 return ret;
2640             }
2641             if (len > crlen) {
2642                 len = crlen;
2643             }
2644             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2645                 return -TARGET_EFAULT;
2646             }
2647             __put_user(cr.pid, &tcr->pid);
2648             __put_user(cr.uid, &tcr->uid);
2649             __put_user(cr.gid, &tcr->gid);
2650             unlock_user_struct(tcr, optval_addr, 1);
2651             if (put_user_u32(len, optlen)) {
2652                 return -TARGET_EFAULT;
2653             }
2654             break;
2655         }
2656         case TARGET_SO_PEERSEC: {
2657             char *name;
2658 
2659             if (get_user_u32(len, optlen)) {
2660                 return -TARGET_EFAULT;
2661             }
2662             if (len < 0) {
2663                 return -TARGET_EINVAL;
2664             }
2665             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2666             if (!name) {
2667                 return -TARGET_EFAULT;
2668             }
2669             lv = len;
2670             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2671                                        name, &lv));
2672             if (put_user_u32(lv, optlen)) {
2673                 ret = -TARGET_EFAULT;
2674             }
2675             unlock_user(name, optval_addr, lv);
2676             break;
2677         }
2678         case TARGET_SO_LINGER:
2679         {
2680             struct linger lg;
2681             socklen_t lglen;
2682             struct target_linger *tlg;
2683 
2684             if (get_user_u32(len, optlen)) {
2685                 return -TARGET_EFAULT;
2686             }
2687             if (len < 0) {
2688                 return -TARGET_EINVAL;
2689             }
2690 
2691             lglen = sizeof(lg);
2692             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2693                                        &lg, &lglen));
2694             if (ret < 0) {
2695                 return ret;
2696             }
2697             if (len > lglen) {
2698                 len = lglen;
2699             }
2700             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2701                 return -TARGET_EFAULT;
2702             }
2703             __put_user(lg.l_onoff, &tlg->l_onoff);
2704             __put_user(lg.l_linger, &tlg->l_linger);
2705             unlock_user_struct(tlg, optval_addr, 1);
2706             if (put_user_u32(len, optlen)) {
2707                 return -TARGET_EFAULT;
2708             }
2709             break;
2710         }
2711         /* Options with 'int' argument.  */
2712         case TARGET_SO_DEBUG:
2713             optname = SO_DEBUG;
2714             goto int_case;
2715         case TARGET_SO_REUSEADDR:
2716             optname = SO_REUSEADDR;
2717             goto int_case;
2718 #ifdef SO_REUSEPORT
2719         case TARGET_SO_REUSEPORT:
2720             optname = SO_REUSEPORT;
2721             goto int_case;
2722 #endif
2723         case TARGET_SO_TYPE:
2724             optname = SO_TYPE;
2725             goto int_case;
2726         case TARGET_SO_ERROR:
2727             optname = SO_ERROR;
2728             goto int_case;
2729         case TARGET_SO_DONTROUTE:
2730             optname = SO_DONTROUTE;
2731             goto int_case;
2732         case TARGET_SO_BROADCAST:
2733             optname = SO_BROADCAST;
2734             goto int_case;
2735         case TARGET_SO_SNDBUF:
2736             optname = SO_SNDBUF;
2737             goto int_case;
2738         case TARGET_SO_RCVBUF:
2739             optname = SO_RCVBUF;
2740             goto int_case;
2741         case TARGET_SO_KEEPALIVE:
2742             optname = SO_KEEPALIVE;
2743             goto int_case;
2744         case TARGET_SO_OOBINLINE:
2745             optname = SO_OOBINLINE;
2746             goto int_case;
2747         case TARGET_SO_NO_CHECK:
2748             optname = SO_NO_CHECK;
2749             goto int_case;
2750         case TARGET_SO_PRIORITY:
2751             optname = SO_PRIORITY;
2752             goto int_case;
2753 #ifdef SO_BSDCOMPAT
2754         case TARGET_SO_BSDCOMPAT:
2755             optname = SO_BSDCOMPAT;
2756             goto int_case;
2757 #endif
2758         case TARGET_SO_PASSCRED:
2759             optname = SO_PASSCRED;
2760             goto int_case;
2761         case TARGET_SO_TIMESTAMP:
2762             optname = SO_TIMESTAMP;
2763             goto int_case;
2764         case TARGET_SO_RCVLOWAT:
2765             optname = SO_RCVLOWAT;
2766             goto int_case;
2767         case TARGET_SO_ACCEPTCONN:
2768             optname = SO_ACCEPTCONN;
2769             goto int_case;
2770         case TARGET_SO_PROTOCOL:
2771             optname = SO_PROTOCOL;
2772             goto int_case;
2773         case TARGET_SO_DOMAIN:
2774             optname = SO_DOMAIN;
2775             goto int_case;
2776         default:
2777             goto int_case;
2778         }
2779         break;
2780     case SOL_TCP:
2781     case SOL_UDP:
2782         /* TCP and UDP options all take an 'int' value.  */
2783     int_case:
2784         if (get_user_u32(len, optlen))
2785             return -TARGET_EFAULT;
2786         if (len < 0)
2787             return -TARGET_EINVAL;
2788         lv = sizeof(lv);
2789         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2790         if (ret < 0)
2791             return ret;
2792         switch (optname) {
2793         case SO_TYPE:
2794             val = host_to_target_sock_type(val);
2795             break;
2796         case SO_ERROR:
2797             val = host_to_target_errno(val);
2798             break;
2799         }
2800         if (len > lv)
2801             len = lv;
2802         if (len == 4) {
2803             if (put_user_u32(val, optval_addr))
2804                 return -TARGET_EFAULT;
2805         } else {
2806             if (put_user_u8(val, optval_addr))
2807                 return -TARGET_EFAULT;
2808         }
2809         if (put_user_u32(len, optlen))
2810             return -TARGET_EFAULT;
2811         break;
2812     case SOL_IP:
2813         switch(optname) {
2814         case IP_TOS:
2815         case IP_TTL:
2816         case IP_HDRINCL:
2817         case IP_ROUTER_ALERT:
2818         case IP_RECVOPTS:
2819         case IP_RETOPTS:
2820         case IP_PKTINFO:
2821         case IP_MTU_DISCOVER:
2822         case IP_RECVERR:
2823         case IP_RECVTOS:
2824 #ifdef IP_FREEBIND
2825         case IP_FREEBIND:
2826 #endif
2827         case IP_MULTICAST_TTL:
2828         case IP_MULTICAST_LOOP:
2829             if (get_user_u32(len, optlen))
2830                 return -TARGET_EFAULT;
2831             if (len < 0)
2832                 return -TARGET_EINVAL;
2833             lv = sizeof(lv);
2834             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2835             if (ret < 0)
2836                 return ret;
2837             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2838                 len = 1;
2839                 if (put_user_u32(len, optlen)
2840                     || put_user_u8(val, optval_addr))
2841                     return -TARGET_EFAULT;
2842             } else {
2843                 if (len > sizeof(int))
2844                     len = sizeof(int);
2845                 if (put_user_u32(len, optlen)
2846                     || put_user_u32(val, optval_addr))
2847                     return -TARGET_EFAULT;
2848             }
2849             break;
2850         default:
2851             ret = -TARGET_ENOPROTOOPT;
2852             break;
2853         }
2854         break;
2855     case SOL_IPV6:
2856         switch (optname) {
2857         case IPV6_MTU_DISCOVER:
2858         case IPV6_MTU:
2859         case IPV6_V6ONLY:
2860         case IPV6_RECVPKTINFO:
2861         case IPV6_UNICAST_HOPS:
2862         case IPV6_MULTICAST_HOPS:
2863         case IPV6_MULTICAST_LOOP:
2864         case IPV6_RECVERR:
2865         case IPV6_RECVHOPLIMIT:
2866         case IPV6_2292HOPLIMIT:
2867         case IPV6_CHECKSUM:
2868         case IPV6_ADDRFORM:
2869         case IPV6_2292PKTINFO:
2870         case IPV6_RECVTCLASS:
2871         case IPV6_RECVRTHDR:
2872         case IPV6_2292RTHDR:
2873         case IPV6_RECVHOPOPTS:
2874         case IPV6_2292HOPOPTS:
2875         case IPV6_RECVDSTOPTS:
2876         case IPV6_2292DSTOPTS:
2877         case IPV6_TCLASS:
2878         case IPV6_ADDR_PREFERENCES:
2879 #ifdef IPV6_RECVPATHMTU
2880         case IPV6_RECVPATHMTU:
2881 #endif
2882 #ifdef IPV6_TRANSPARENT
2883         case IPV6_TRANSPARENT:
2884 #endif
2885 #ifdef IPV6_FREEBIND
2886         case IPV6_FREEBIND:
2887 #endif
2888 #ifdef IPV6_RECVORIGDSTADDR
2889         case IPV6_RECVORIGDSTADDR:
2890 #endif
2891             if (get_user_u32(len, optlen))
2892                 return -TARGET_EFAULT;
2893             if (len < 0)
2894                 return -TARGET_EINVAL;
2895             lv = sizeof(lv);
2896             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2897             if (ret < 0)
2898                 return ret;
2899             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2900                 len = 1;
2901                 if (put_user_u32(len, optlen)
2902                     || put_user_u8(val, optval_addr))
2903                     return -TARGET_EFAULT;
2904             } else {
2905                 if (len > sizeof(int))
2906                     len = sizeof(int);
2907                 if (put_user_u32(len, optlen)
2908                     || put_user_u32(val, optval_addr))
2909                     return -TARGET_EFAULT;
2910             }
2911             break;
2912         default:
2913             ret = -TARGET_ENOPROTOOPT;
2914             break;
2915         }
2916         break;
2917 #ifdef SOL_NETLINK
2918     case SOL_NETLINK:
2919         switch (optname) {
2920         case NETLINK_PKTINFO:
2921         case NETLINK_BROADCAST_ERROR:
2922         case NETLINK_NO_ENOBUFS:
2923 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2924         case NETLINK_LISTEN_ALL_NSID:
2925         case NETLINK_CAP_ACK:
2926 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2927 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2928         case NETLINK_EXT_ACK:
2929 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2930 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2931         case NETLINK_GET_STRICT_CHK:
2932 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2933             if (get_user_u32(len, optlen)) {
2934                 return -TARGET_EFAULT;
2935             }
2936             if (len != sizeof(val)) {
2937                 return -TARGET_EINVAL;
2938             }
2939             lv = len;
2940             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2941             if (ret < 0) {
2942                 return ret;
2943             }
2944             if (put_user_u32(lv, optlen)
2945                 || put_user_u32(val, optval_addr)) {
2946                 return -TARGET_EFAULT;
2947             }
2948             break;
2949 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2950         case NETLINK_LIST_MEMBERSHIPS:
2951         {
2952             uint32_t *results;
2953             int i;
2954             if (get_user_u32(len, optlen)) {
2955                 return -TARGET_EFAULT;
2956             }
2957             if (len < 0) {
2958                 return -TARGET_EINVAL;
2959             }
2960             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2961             if (!results && len > 0) {
2962                 return -TARGET_EFAULT;
2963             }
2964             lv = len;
2965             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2966             if (ret < 0) {
2967                 unlock_user(results, optval_addr, 0);
2968                 return ret;
2969             }
2970             /* swap host endianness to target endianness. */
2971             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2972                 results[i] = tswap32(results[i]);
2973             }
2974             if (put_user_u32(lv, optlen)) {
2975                 return -TARGET_EFAULT;
2976             }
2977             unlock_user(results, optval_addr, 0);
2978             break;
2979         }
2980 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2981         default:
2982             goto unimplemented;
2983         }
2984         break;
2985 #endif /* SOL_NETLINK */
2986     default:
2987     unimplemented:
2988         qemu_log_mask(LOG_UNIMP,
2989                       "getsockopt level=%d optname=%d not yet supported\n",
2990                       level, optname);
2991         ret = -TARGET_EOPNOTSUPP;
2992         break;
2993     }
2994     return ret;
2995 }
2996 
2997 /* Convert target low/high pair representing file offset into the host
2998  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2999  * as the kernel doesn't handle them either.
3000  */
target_to_host_low_high(abi_ulong tlow,abi_ulong thigh,unsigned long * hlow,unsigned long * hhigh)3001 static void target_to_host_low_high(abi_ulong tlow,
3002                                     abi_ulong thigh,
3003                                     unsigned long *hlow,
3004                                     unsigned long *hhigh)
3005 {
3006     uint64_t off = tlow |
3007         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3008         TARGET_LONG_BITS / 2;
3009 
3010     *hlow = off;
3011     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3012 }
3013 
lock_iovec(int type,abi_ulong target_addr,abi_ulong count,int copy)3014 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3015                                 abi_ulong count, int copy)
3016 {
3017     struct target_iovec *target_vec;
3018     struct iovec *vec;
3019     abi_ulong total_len, max_len;
3020     int i;
3021     int err = 0;
3022     bool bad_address = false;
3023 
3024     if (count == 0) {
3025         errno = 0;
3026         return NULL;
3027     }
3028     if (count > IOV_MAX) {
3029         errno = EINVAL;
3030         return NULL;
3031     }
3032 
3033     vec = g_try_new0(struct iovec, count);
3034     if (vec == NULL) {
3035         errno = ENOMEM;
3036         return NULL;
3037     }
3038 
3039     target_vec = lock_user(VERIFY_READ, target_addr,
3040                            count * sizeof(struct target_iovec), 1);
3041     if (target_vec == NULL) {
3042         err = EFAULT;
3043         goto fail2;
3044     }
3045 
3046     /* ??? If host page size > target page size, this will result in a
3047        value larger than what we can actually support.  */
3048     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3049     total_len = 0;
3050 
3051     for (i = 0; i < count; i++) {
3052         abi_ulong base = tswapal(target_vec[i].iov_base);
3053         abi_long len = tswapal(target_vec[i].iov_len);
3054 
3055         if (len < 0) {
3056             err = EINVAL;
3057             goto fail;
3058         } else if (len == 0) {
3059             /* Zero length pointer is ignored.  */
3060             vec[i].iov_base = 0;
3061         } else {
3062             vec[i].iov_base = lock_user(type, base, len, copy);
3063             /* If the first buffer pointer is bad, this is a fault.  But
3064              * subsequent bad buffers will result in a partial write; this
3065              * is realized by filling the vector with null pointers and
3066              * zero lengths. */
3067             if (!vec[i].iov_base) {
3068                 if (i == 0) {
3069                     err = EFAULT;
3070                     goto fail;
3071                 } else {
3072                     bad_address = true;
3073                 }
3074             }
3075             if (bad_address) {
3076                 len = 0;
3077             }
3078             if (len > max_len - total_len) {
3079                 len = max_len - total_len;
3080             }
3081         }
3082         vec[i].iov_len = len;
3083         total_len += len;
3084     }
3085 
3086     unlock_user(target_vec, target_addr, 0);
3087     return vec;
3088 
3089  fail:
3090     while (--i >= 0) {
3091         if (tswapal(target_vec[i].iov_len) > 0) {
3092             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3093         }
3094     }
3095     unlock_user(target_vec, target_addr, 0);
3096  fail2:
3097     g_free(vec);
3098     errno = err;
3099     return NULL;
3100 }
3101 
unlock_iovec(struct iovec * vec,abi_ulong target_addr,abi_ulong count,int copy)3102 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3103                          abi_ulong count, int copy)
3104 {
3105     struct target_iovec *target_vec;
3106     int i;
3107 
3108     target_vec = lock_user(VERIFY_READ, target_addr,
3109                            count * sizeof(struct target_iovec), 1);
3110     if (target_vec) {
3111         for (i = 0; i < count; i++) {
3112             abi_ulong base = tswapal(target_vec[i].iov_base);
3113             abi_long len = tswapal(target_vec[i].iov_len);
3114             if (len < 0) {
3115                 break;
3116             }
3117             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3118         }
3119         unlock_user(target_vec, target_addr, 0);
3120     }
3121 
3122     g_free(vec);
3123 }
3124 
target_to_host_sock_type(int * type)3125 static inline int target_to_host_sock_type(int *type)
3126 {
3127     int host_type = 0;
3128     int target_type = *type;
3129 
3130     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3131     case TARGET_SOCK_DGRAM:
3132         host_type = SOCK_DGRAM;
3133         break;
3134     case TARGET_SOCK_STREAM:
3135         host_type = SOCK_STREAM;
3136         break;
3137     default:
3138         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3139         break;
3140     }
3141     if (target_type & TARGET_SOCK_CLOEXEC) {
3142 #if defined(SOCK_CLOEXEC)
3143         host_type |= SOCK_CLOEXEC;
3144 #else
3145         return -TARGET_EINVAL;
3146 #endif
3147     }
3148     if (target_type & TARGET_SOCK_NONBLOCK) {
3149 #if defined(SOCK_NONBLOCK)
3150         host_type |= SOCK_NONBLOCK;
3151 #elif !defined(O_NONBLOCK)
3152         return -TARGET_EINVAL;
3153 #endif
3154     }
3155     *type = host_type;
3156     return 0;
3157 }
3158 
3159 /* Try to emulate socket type flags after socket creation.  */
sock_flags_fixup(int fd,int target_type)3160 static int sock_flags_fixup(int fd, int target_type)
3161 {
3162 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3163     if (target_type & TARGET_SOCK_NONBLOCK) {
3164         int flags = fcntl(fd, F_GETFL);
3165         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3166             close(fd);
3167             return -TARGET_EINVAL;
3168         }
3169     }
3170 #endif
3171     return fd;
3172 }
3173 
3174 /* do_socket() Must return target values and target errnos. */
do_socket(int domain,int type,int protocol)3175 static abi_long do_socket(int domain, int type, int protocol)
3176 {
3177     int target_type = type;
3178     int ret;
3179 
3180     ret = target_to_host_sock_type(&type);
3181     if (ret) {
3182         return ret;
3183     }
3184 
3185     if (domain == PF_NETLINK && !(
3186 #ifdef CONFIG_RTNETLINK
3187          protocol == NETLINK_ROUTE ||
3188 #endif
3189          protocol == NETLINK_KOBJECT_UEVENT ||
3190          protocol == NETLINK_AUDIT)) {
3191         return -TARGET_EPROTONOSUPPORT;
3192     }
3193 
3194     if (domain == AF_PACKET ||
3195         (domain == AF_INET && type == SOCK_PACKET)) {
3196         protocol = tswap16(protocol);
3197     }
3198 
3199     ret = get_errno(socket(domain, type, protocol));
3200     if (ret >= 0) {
3201         ret = sock_flags_fixup(ret, target_type);
3202         if (type == SOCK_PACKET) {
3203             /* Manage an obsolete case :
3204              * if socket type is SOCK_PACKET, bind by name
3205              */
3206             fd_trans_register(ret, &target_packet_trans);
3207         } else if (domain == PF_NETLINK) {
3208             switch (protocol) {
3209 #ifdef CONFIG_RTNETLINK
3210             case NETLINK_ROUTE:
3211                 fd_trans_register(ret, &target_netlink_route_trans);
3212                 break;
3213 #endif
3214             case NETLINK_KOBJECT_UEVENT:
3215                 /* nothing to do: messages are strings */
3216                 break;
3217             case NETLINK_AUDIT:
3218                 fd_trans_register(ret, &target_netlink_audit_trans);
3219                 break;
3220             default:
3221                 g_assert_not_reached();
3222             }
3223         }
3224     }
3225     return ret;
3226 }
3227 
3228 /* do_bind() Must return target values and target errnos. */
do_bind(int sockfd,abi_ulong target_addr,socklen_t addrlen)3229 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3230                         socklen_t addrlen)
3231 {
3232     void *addr;
3233     abi_long ret;
3234 
3235     if ((int)addrlen < 0) {
3236         return -TARGET_EINVAL;
3237     }
3238 
3239     addr = alloca(addrlen+1);
3240 
3241     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3242     if (ret)
3243         return ret;
3244 
3245     return get_errno(bind(sockfd, addr, addrlen));
3246 }
3247 
3248 /* do_connect() Must return target values and target errnos. */
do_connect(int sockfd,abi_ulong target_addr,socklen_t addrlen)3249 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3250                            socklen_t addrlen)
3251 {
3252     void *addr;
3253     abi_long ret;
3254 
3255     if ((int)addrlen < 0) {
3256         return -TARGET_EINVAL;
3257     }
3258 
3259     addr = alloca(addrlen+1);
3260 
3261     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3262     if (ret)
3263         return ret;
3264 
3265     return get_errno(safe_connect(sockfd, addr, addrlen));
3266 }
3267 
3268 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
do_sendrecvmsg_locked(int fd,struct target_msghdr * msgp,int flags,int send)3269 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3270                                       int flags, int send)
3271 {
3272     abi_long ret, len;
3273     struct msghdr msg;
3274     abi_ulong count;
3275     struct iovec *vec;
3276     abi_ulong target_vec;
3277 
3278     if (msgp->msg_name) {
3279         msg.msg_namelen = tswap32(msgp->msg_namelen);
3280         msg.msg_name = alloca(msg.msg_namelen+1);
3281         ret = target_to_host_sockaddr(fd, msg.msg_name,
3282                                       tswapal(msgp->msg_name),
3283                                       msg.msg_namelen);
3284         if (ret == -TARGET_EFAULT) {
3285             /* For connected sockets msg_name and msg_namelen must
3286              * be ignored, so returning EFAULT immediately is wrong.
3287              * Instead, pass a bad msg_name to the host kernel, and
3288              * let it decide whether to return EFAULT or not.
3289              */
3290             msg.msg_name = (void *)-1;
3291         } else if (ret) {
3292             goto out2;
3293         }
3294     } else {
3295         msg.msg_name = NULL;
3296         msg.msg_namelen = 0;
3297     }
3298     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3299     msg.msg_control = alloca(msg.msg_controllen);
3300     memset(msg.msg_control, 0, msg.msg_controllen);
3301 
3302     msg.msg_flags = tswap32(msgp->msg_flags);
3303 
3304     count = tswapal(msgp->msg_iovlen);
3305     target_vec = tswapal(msgp->msg_iov);
3306 
3307     if (count > IOV_MAX) {
3308         /* sendrcvmsg returns a different errno for this condition than
3309          * readv/writev, so we must catch it here before lock_iovec() does.
3310          */
3311         ret = -TARGET_EMSGSIZE;
3312         goto out2;
3313     }
3314 
3315     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3316                      target_vec, count, send);
3317     if (vec == NULL) {
3318         ret = -host_to_target_errno(errno);
3319         /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3320         if (!send || ret) {
3321             goto out2;
3322         }
3323     }
3324     msg.msg_iovlen = count;
3325     msg.msg_iov = vec;
3326 
3327     if (send) {
3328         if (fd_trans_target_to_host_data(fd)) {
3329             void *host_msg;
3330 
3331             host_msg = g_malloc(msg.msg_iov->iov_len);
3332             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3333             ret = fd_trans_target_to_host_data(fd)(host_msg,
3334                                                    msg.msg_iov->iov_len);
3335             if (ret >= 0) {
3336                 msg.msg_iov->iov_base = host_msg;
3337                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3338             }
3339             g_free(host_msg);
3340         } else {
3341             ret = target_to_host_cmsg(&msg, msgp);
3342             if (ret == 0) {
3343                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3344             }
3345         }
3346     } else {
3347         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3348         if (!is_error(ret)) {
3349             len = ret;
3350             if (fd_trans_host_to_target_data(fd)) {
3351                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3352                                                MIN(msg.msg_iov->iov_len, len));
3353             }
3354             if (!is_error(ret)) {
3355                 ret = host_to_target_cmsg(msgp, &msg);
3356             }
3357             if (!is_error(ret)) {
3358                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3359                 msgp->msg_flags = tswap32(msg.msg_flags);
3360                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3361                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3362                                     msg.msg_name, msg.msg_namelen);
3363                     if (ret) {
3364                         goto out;
3365                     }
3366                 }
3367 
3368                 ret = len;
3369             }
3370         }
3371     }
3372 
3373 out:
3374     if (vec) {
3375         unlock_iovec(vec, target_vec, count, !send);
3376     }
3377 out2:
3378     return ret;
3379 }
3380 
do_sendrecvmsg(int fd,abi_ulong target_msg,int flags,int send)3381 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3382                                int flags, int send)
3383 {
3384     abi_long ret;
3385     struct target_msghdr *msgp;
3386 
3387     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3388                           msgp,
3389                           target_msg,
3390                           send ? 1 : 0)) {
3391         return -TARGET_EFAULT;
3392     }
3393     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3394     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3395     return ret;
3396 }
3397 
3398 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3399  * so it might not have this *mmsg-specific flag either.
3400  */
3401 #ifndef MSG_WAITFORONE
3402 #define MSG_WAITFORONE 0x10000
3403 #endif
3404 
do_sendrecvmmsg(int fd,abi_ulong target_msgvec,unsigned int vlen,unsigned int flags,int send)3405 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3406                                 unsigned int vlen, unsigned int flags,
3407                                 int send)
3408 {
3409     struct target_mmsghdr *mmsgp;
3410     abi_long ret = 0;
3411     int i;
3412 
3413     if (vlen > UIO_MAXIOV) {
3414         vlen = UIO_MAXIOV;
3415     }
3416 
3417     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3418     if (!mmsgp) {
3419         return -TARGET_EFAULT;
3420     }
3421 
3422     for (i = 0; i < vlen; i++) {
3423         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3424         if (is_error(ret)) {
3425             break;
3426         }
3427         mmsgp[i].msg_len = tswap32(ret);
3428         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3429         if (flags & MSG_WAITFORONE) {
3430             flags |= MSG_DONTWAIT;
3431         }
3432     }
3433 
3434     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3435 
3436     /* Return number of datagrams sent if we sent any at all;
3437      * otherwise return the error.
3438      */
3439     if (i) {
3440         return i;
3441     }
3442     return ret;
3443 }
3444 
3445 /* do_accept4() Must return target values and target errnos. */
do_accept4(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr,int flags)3446 static abi_long do_accept4(int fd, abi_ulong target_addr,
3447                            abi_ulong target_addrlen_addr, int flags)
3448 {
3449     socklen_t addrlen, ret_addrlen;
3450     void *addr;
3451     abi_long ret;
3452     int host_flags;
3453 
3454     if (flags & ~(TARGET_SOCK_CLOEXEC | TARGET_SOCK_NONBLOCK)) {
3455         return -TARGET_EINVAL;
3456     }
3457 
3458     host_flags = 0;
3459     if (flags & TARGET_SOCK_NONBLOCK) {
3460         host_flags |= SOCK_NONBLOCK;
3461     }
3462     if (flags & TARGET_SOCK_CLOEXEC) {
3463         host_flags |= SOCK_CLOEXEC;
3464     }
3465 
3466     if (target_addr == 0) {
3467         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3468     }
3469 
3470     /* linux returns EFAULT if addrlen pointer is invalid */
3471     if (get_user_u32(addrlen, target_addrlen_addr))
3472         return -TARGET_EFAULT;
3473 
3474     if ((int)addrlen < 0) {
3475         return -TARGET_EINVAL;
3476     }
3477 
3478     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3479         return -TARGET_EFAULT;
3480     }
3481 
3482     addr = alloca(addrlen);
3483 
3484     ret_addrlen = addrlen;
3485     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3486     if (!is_error(ret)) {
3487         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3488         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3489             ret = -TARGET_EFAULT;
3490         }
3491     }
3492     return ret;
3493 }
3494 
3495 /* do_getpeername() Must return target values and target errnos. */
do_getpeername(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr)3496 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3497                                abi_ulong target_addrlen_addr)
3498 {
3499     socklen_t addrlen, ret_addrlen;
3500     void *addr;
3501     abi_long ret;
3502 
3503     if (get_user_u32(addrlen, target_addrlen_addr))
3504         return -TARGET_EFAULT;
3505 
3506     if ((int)addrlen < 0) {
3507         return -TARGET_EINVAL;
3508     }
3509 
3510     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3511         return -TARGET_EFAULT;
3512     }
3513 
3514     addr = alloca(addrlen);
3515 
3516     ret_addrlen = addrlen;
3517     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3518     if (!is_error(ret)) {
3519         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3520         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3521             ret = -TARGET_EFAULT;
3522         }
3523     }
3524     return ret;
3525 }
3526 
3527 /* do_getsockname() Must return target values and target errnos. */
do_getsockname(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr)3528 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3529                                abi_ulong target_addrlen_addr)
3530 {
3531     socklen_t addrlen, ret_addrlen;
3532     void *addr;
3533     abi_long ret;
3534 
3535     if (get_user_u32(addrlen, target_addrlen_addr))
3536         return -TARGET_EFAULT;
3537 
3538     if ((int)addrlen < 0) {
3539         return -TARGET_EINVAL;
3540     }
3541 
3542     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3543         return -TARGET_EFAULT;
3544     }
3545 
3546     addr = alloca(addrlen);
3547 
3548     ret_addrlen = addrlen;
3549     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3550     if (!is_error(ret)) {
3551         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3552         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3553             ret = -TARGET_EFAULT;
3554         }
3555     }
3556     return ret;
3557 }
3558 
3559 /* do_socketpair() Must return target values and target errnos. */
do_socketpair(int domain,int type,int protocol,abi_ulong target_tab_addr)3560 static abi_long do_socketpair(int domain, int type, int protocol,
3561                               abi_ulong target_tab_addr)
3562 {
3563     int tab[2];
3564     abi_long ret;
3565 
3566     target_to_host_sock_type(&type);
3567 
3568     ret = get_errno(socketpair(domain, type, protocol, tab));
3569     if (!is_error(ret)) {
3570         if (put_user_s32(tab[0], target_tab_addr)
3571             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3572             ret = -TARGET_EFAULT;
3573     }
3574     return ret;
3575 }
3576 
3577 /* do_sendto() Must return target values and target errnos. */
do_sendto(int fd,abi_ulong msg,size_t len,int flags,abi_ulong target_addr,socklen_t addrlen)3578 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3579                           abi_ulong target_addr, socklen_t addrlen)
3580 {
3581     void *addr;
3582     void *host_msg;
3583     void *copy_msg = NULL;
3584     abi_long ret;
3585 
3586     if ((int)addrlen < 0) {
3587         return -TARGET_EINVAL;
3588     }
3589 
3590     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3591     if (!host_msg)
3592         return -TARGET_EFAULT;
3593     if (fd_trans_target_to_host_data(fd)) {
3594         copy_msg = host_msg;
3595         host_msg = g_malloc(len);
3596         memcpy(host_msg, copy_msg, len);
3597         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3598         if (ret < 0) {
3599             goto fail;
3600         }
3601     }
3602     if (target_addr) {
3603         addr = alloca(addrlen+1);
3604         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3605         if (ret) {
3606             goto fail;
3607         }
3608         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3609     } else {
3610         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3611     }
3612 fail:
3613     if (copy_msg) {
3614         g_free(host_msg);
3615         host_msg = copy_msg;
3616     }
3617     unlock_user(host_msg, msg, 0);
3618     return ret;
3619 }
3620 
3621 /* do_recvfrom() Must return target values and target errnos. */
do_recvfrom(int fd,abi_ulong msg,size_t len,int flags,abi_ulong target_addr,abi_ulong target_addrlen)3622 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3623                             abi_ulong target_addr,
3624                             abi_ulong target_addrlen)
3625 {
3626     socklen_t addrlen, ret_addrlen;
3627     void *addr;
3628     void *host_msg;
3629     abi_long ret;
3630 
3631     if (!msg) {
3632         host_msg = NULL;
3633     } else {
3634         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3635         if (!host_msg) {
3636             return -TARGET_EFAULT;
3637         }
3638     }
3639     if (target_addr) {
3640         if (get_user_u32(addrlen, target_addrlen)) {
3641             ret = -TARGET_EFAULT;
3642             goto fail;
3643         }
3644         if ((int)addrlen < 0) {
3645             ret = -TARGET_EINVAL;
3646             goto fail;
3647         }
3648         addr = alloca(addrlen);
3649         ret_addrlen = addrlen;
3650         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3651                                       addr, &ret_addrlen));
3652     } else {
3653         addr = NULL; /* To keep compiler quiet.  */
3654         addrlen = 0; /* To keep compiler quiet.  */
3655         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3656     }
3657     if (!is_error(ret)) {
3658         if (fd_trans_host_to_target_data(fd)) {
3659             abi_long trans;
3660             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3661             if (is_error(trans)) {
3662                 ret = trans;
3663                 goto fail;
3664             }
3665         }
3666         if (target_addr) {
3667             host_to_target_sockaddr(target_addr, addr,
3668                                     MIN(addrlen, ret_addrlen));
3669             if (put_user_u32(ret_addrlen, target_addrlen)) {
3670                 ret = -TARGET_EFAULT;
3671                 goto fail;
3672             }
3673         }
3674         unlock_user(host_msg, msg, len);
3675     } else {
3676 fail:
3677         unlock_user(host_msg, msg, 0);
3678     }
3679     return ret;
3680 }
3681 
3682 #ifdef TARGET_NR_socketcall
3683 /* do_socketcall() must return target values and target errnos. */
do_socketcall(int num,abi_ulong vptr)3684 static abi_long do_socketcall(int num, abi_ulong vptr)
3685 {
3686     static const unsigned nargs[] = { /* number of arguments per operation */
3687         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3688         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3689         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3690         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3691         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3692         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3693         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3694         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3695         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3696         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3697         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3698         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3699         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3700         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3701         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3702         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3703         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3704         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3705         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3706         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3707     };
3708     abi_long a[6]; /* max 6 args */
3709     unsigned i;
3710 
3711     /* check the range of the first argument num */
3712     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3713     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3714         return -TARGET_EINVAL;
3715     }
3716     /* ensure we have space for args */
3717     if (nargs[num] > ARRAY_SIZE(a)) {
3718         return -TARGET_EINVAL;
3719     }
3720     /* collect the arguments in a[] according to nargs[] */
3721     for (i = 0; i < nargs[num]; ++i) {
3722         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3723             return -TARGET_EFAULT;
3724         }
3725     }
3726     /* now when we have the args, invoke the appropriate underlying function */
3727     switch (num) {
3728     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3729         return do_socket(a[0], a[1], a[2]);
3730     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3731         return do_bind(a[0], a[1], a[2]);
3732     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3733         return do_connect(a[0], a[1], a[2]);
3734     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3735         return get_errno(listen(a[0], a[1]));
3736     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3737         return do_accept4(a[0], a[1], a[2], 0);
3738     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3739         return do_getsockname(a[0], a[1], a[2]);
3740     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3741         return do_getpeername(a[0], a[1], a[2]);
3742     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3743         return do_socketpair(a[0], a[1], a[2], a[3]);
3744     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3745         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3746     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3747         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3748     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3749         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3750     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3751         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3752     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3753         return get_errno(shutdown(a[0], a[1]));
3754     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3755         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3756     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3757         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3758     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3759         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3760     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3761         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3762     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3763         return do_accept4(a[0], a[1], a[2], a[3]);
3764     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3765         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3766     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3767         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3768     default:
3769         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3770         return -TARGET_EINVAL;
3771     }
3772 }
3773 #endif
3774 
3775 #ifndef TARGET_SEMID64_DS
3776 /* asm-generic version of this struct */
3777 struct target_semid64_ds
3778 {
3779   struct target_ipc_perm sem_perm;
3780   abi_ulong sem_otime;
3781 #if TARGET_ABI_BITS == 32
3782   abi_ulong __unused1;
3783 #endif
3784   abi_ulong sem_ctime;
3785 #if TARGET_ABI_BITS == 32
3786   abi_ulong __unused2;
3787 #endif
3788   abi_ulong sem_nsems;
3789   abi_ulong __unused3;
3790   abi_ulong __unused4;
3791 };
3792 #endif
3793 
target_to_host_ipc_perm(struct ipc_perm * host_ip,abi_ulong target_addr)3794 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3795                                                abi_ulong target_addr)
3796 {
3797     struct target_ipc_perm *target_ip;
3798     struct target_semid64_ds *target_sd;
3799 
3800     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3801         return -TARGET_EFAULT;
3802     target_ip = &(target_sd->sem_perm);
3803     host_ip->__key = tswap32(target_ip->__key);
3804     host_ip->uid = tswap32(target_ip->uid);
3805     host_ip->gid = tswap32(target_ip->gid);
3806     host_ip->cuid = tswap32(target_ip->cuid);
3807     host_ip->cgid = tswap32(target_ip->cgid);
3808 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3809     host_ip->mode = tswap32(target_ip->mode);
3810 #else
3811     host_ip->mode = tswap16(target_ip->mode);
3812 #endif
3813 #if defined(TARGET_PPC)
3814     host_ip->__seq = tswap32(target_ip->__seq);
3815 #else
3816     host_ip->__seq = tswap16(target_ip->__seq);
3817 #endif
3818     unlock_user_struct(target_sd, target_addr, 0);
3819     return 0;
3820 }
3821 
host_to_target_ipc_perm(abi_ulong target_addr,struct ipc_perm * host_ip)3822 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3823                                                struct ipc_perm *host_ip)
3824 {
3825     struct target_ipc_perm *target_ip;
3826     struct target_semid64_ds *target_sd;
3827 
3828     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3829         return -TARGET_EFAULT;
3830     target_ip = &(target_sd->sem_perm);
3831     target_ip->__key = tswap32(host_ip->__key);
3832     target_ip->uid = tswap32(host_ip->uid);
3833     target_ip->gid = tswap32(host_ip->gid);
3834     target_ip->cuid = tswap32(host_ip->cuid);
3835     target_ip->cgid = tswap32(host_ip->cgid);
3836 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3837     target_ip->mode = tswap32(host_ip->mode);
3838 #else
3839     target_ip->mode = tswap16(host_ip->mode);
3840 #endif
3841 #if defined(TARGET_PPC)
3842     target_ip->__seq = tswap32(host_ip->__seq);
3843 #else
3844     target_ip->__seq = tswap16(host_ip->__seq);
3845 #endif
3846     unlock_user_struct(target_sd, target_addr, 1);
3847     return 0;
3848 }
3849 
target_to_host_semid_ds(struct semid_ds * host_sd,abi_ulong target_addr)3850 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3851                                                abi_ulong target_addr)
3852 {
3853     struct target_semid64_ds *target_sd;
3854 
3855     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3856         return -TARGET_EFAULT;
3857     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3858         return -TARGET_EFAULT;
3859     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3860     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3861     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3862     unlock_user_struct(target_sd, target_addr, 0);
3863     return 0;
3864 }
3865 
host_to_target_semid_ds(abi_ulong target_addr,struct semid_ds * host_sd)3866 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3867                                                struct semid_ds *host_sd)
3868 {
3869     struct target_semid64_ds *target_sd;
3870 
3871     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3872         return -TARGET_EFAULT;
3873     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3874         return -TARGET_EFAULT;
3875     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3876     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3877     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3878     unlock_user_struct(target_sd, target_addr, 1);
3879     return 0;
3880 }
3881 
3882 struct target_seminfo {
3883     int semmap;
3884     int semmni;
3885     int semmns;
3886     int semmnu;
3887     int semmsl;
3888     int semopm;
3889     int semume;
3890     int semusz;
3891     int semvmx;
3892     int semaem;
3893 };
3894 
host_to_target_seminfo(abi_ulong target_addr,struct seminfo * host_seminfo)3895 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3896                                               struct seminfo *host_seminfo)
3897 {
3898     struct target_seminfo *target_seminfo;
3899     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3900         return -TARGET_EFAULT;
3901     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3902     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3903     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3904     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3905     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3906     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3907     __put_user(host_seminfo->semume, &target_seminfo->semume);
3908     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3909     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3910     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3911     unlock_user_struct(target_seminfo, target_addr, 1);
3912     return 0;
3913 }
3914 
3915 union semun {
3916 	int val;
3917 	struct semid_ds *buf;
3918 	unsigned short *array;
3919 	struct seminfo *__buf;
3920 };
3921 
3922 union target_semun {
3923 	int val;
3924 	abi_ulong buf;
3925 	abi_ulong array;
3926 	abi_ulong __buf;
3927 };
3928 
target_to_host_semarray(int semid,unsigned short ** host_array,abi_ulong target_addr)3929 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3930                                                abi_ulong target_addr)
3931 {
3932     int nsems;
3933     unsigned short *array;
3934     union semun semun;
3935     struct semid_ds semid_ds;
3936     int i, ret;
3937 
3938     semun.buf = &semid_ds;
3939 
3940     ret = semctl(semid, 0, IPC_STAT, semun);
3941     if (ret == -1)
3942         return get_errno(ret);
3943 
3944     nsems = semid_ds.sem_nsems;
3945 
3946     *host_array = g_try_new(unsigned short, nsems);
3947     if (!*host_array) {
3948         return -TARGET_ENOMEM;
3949     }
3950     array = lock_user(VERIFY_READ, target_addr,
3951                       nsems*sizeof(unsigned short), 1);
3952     if (!array) {
3953         g_free(*host_array);
3954         return -TARGET_EFAULT;
3955     }
3956 
3957     for(i=0; i<nsems; i++) {
3958         __get_user((*host_array)[i], &array[i]);
3959     }
3960     unlock_user(array, target_addr, 0);
3961 
3962     return 0;
3963 }
3964 
host_to_target_semarray(int semid,abi_ulong target_addr,unsigned short ** host_array)3965 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3966                                                unsigned short **host_array)
3967 {
3968     int nsems;
3969     unsigned short *array;
3970     union semun semun;
3971     struct semid_ds semid_ds;
3972     int i, ret;
3973 
3974     semun.buf = &semid_ds;
3975 
3976     ret = semctl(semid, 0, IPC_STAT, semun);
3977     if (ret == -1)
3978         return get_errno(ret);
3979 
3980     nsems = semid_ds.sem_nsems;
3981 
3982     array = lock_user(VERIFY_WRITE, target_addr,
3983                       nsems*sizeof(unsigned short), 0);
3984     if (!array)
3985         return -TARGET_EFAULT;
3986 
3987     for(i=0; i<nsems; i++) {
3988         __put_user((*host_array)[i], &array[i]);
3989     }
3990     g_free(*host_array);
3991     unlock_user(array, target_addr, 1);
3992 
3993     return 0;
3994 }
3995 
do_semctl(int semid,int semnum,int cmd,abi_ulong target_arg)3996 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3997                                  abi_ulong target_arg)
3998 {
3999     union target_semun target_su = { .buf = target_arg };
4000     union semun arg;
4001     struct semid_ds dsarg;
4002     unsigned short *array = NULL;
4003     struct seminfo seminfo;
4004     abi_long ret = -TARGET_EINVAL;
4005     abi_long err;
4006     cmd &= 0xff;
4007 
4008     switch( cmd ) {
4009 	case GETVAL:
4010 	case SETVAL:
4011             /* In 64 bit cross-endian situations, we will erroneously pick up
4012              * the wrong half of the union for the "val" element.  To rectify
4013              * this, the entire 8-byte structure is byteswapped, followed by
4014 	     * a swap of the 4 byte val field. In other cases, the data is
4015 	     * already in proper host byte order. */
4016 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4017 		target_su.buf = tswapal(target_su.buf);
4018 		arg.val = tswap32(target_su.val);
4019 	    } else {
4020 		arg.val = target_su.val;
4021 	    }
4022             ret = get_errno(semctl(semid, semnum, cmd, arg));
4023             break;
4024 	case GETALL:
4025 	case SETALL:
4026             err = target_to_host_semarray(semid, &array, target_su.array);
4027             if (err)
4028                 return err;
4029             arg.array = array;
4030             ret = get_errno(semctl(semid, semnum, cmd, arg));
4031             err = host_to_target_semarray(semid, target_su.array, &array);
4032             if (err)
4033                 return err;
4034             break;
4035 	case IPC_STAT:
4036 	case IPC_SET:
4037 	case SEM_STAT:
4038             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4039             if (err)
4040                 return err;
4041             arg.buf = &dsarg;
4042             ret = get_errno(semctl(semid, semnum, cmd, arg));
4043             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4044             if (err)
4045                 return err;
4046             break;
4047 	case IPC_INFO:
4048 	case SEM_INFO:
4049             arg.__buf = &seminfo;
4050             ret = get_errno(semctl(semid, semnum, cmd, arg));
4051             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4052             if (err)
4053                 return err;
4054             break;
4055 	case IPC_RMID:
4056 	case GETPID:
4057 	case GETNCNT:
4058 	case GETZCNT:
4059             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4060             break;
4061     }
4062 
4063     return ret;
4064 }
4065 
4066 struct target_sembuf {
4067     unsigned short sem_num;
4068     short sem_op;
4069     short sem_flg;
4070 };
4071 
target_to_host_sembuf(struct sembuf * host_sembuf,abi_ulong target_addr,unsigned nsops)4072 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4073                                              abi_ulong target_addr,
4074                                              unsigned nsops)
4075 {
4076     struct target_sembuf *target_sembuf;
4077     int i;
4078 
4079     target_sembuf = lock_user(VERIFY_READ, target_addr,
4080                               nsops*sizeof(struct target_sembuf), 1);
4081     if (!target_sembuf)
4082         return -TARGET_EFAULT;
4083 
4084     for(i=0; i<nsops; i++) {
4085         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4086         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4087         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4088     }
4089 
4090     unlock_user(target_sembuf, target_addr, 0);
4091 
4092     return 0;
4093 }
4094 
4095 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4096     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4097 
4098 /*
4099  * This macro is required to handle the s390 variants, which passes the
4100  * arguments in a different order than default.
4101  */
4102 #ifdef __s390x__
4103 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4104   (__nsops), (__timeout), (__sops)
4105 #else
4106 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4107   (__nsops), 0, (__sops), (__timeout)
4108 #endif
4109 
do_semtimedop(int semid,abi_long ptr,unsigned nsops,abi_long timeout,bool time64)4110 static inline abi_long do_semtimedop(int semid,
4111                                      abi_long ptr,
4112                                      unsigned nsops,
4113                                      abi_long timeout, bool time64)
4114 {
4115     struct sembuf *sops;
4116     struct timespec ts, *pts = NULL;
4117     abi_long ret;
4118 
4119     if (timeout) {
4120         pts = &ts;
4121         if (time64) {
4122             if (target_to_host_timespec64(pts, timeout)) {
4123                 return -TARGET_EFAULT;
4124             }
4125         } else {
4126             if (target_to_host_timespec(pts, timeout)) {
4127                 return -TARGET_EFAULT;
4128             }
4129         }
4130     }
4131 
4132     if (nsops > TARGET_SEMOPM) {
4133         return -TARGET_E2BIG;
4134     }
4135 
4136     sops = g_new(struct sembuf, nsops);
4137 
4138     if (target_to_host_sembuf(sops, ptr, nsops)) {
4139         g_free(sops);
4140         return -TARGET_EFAULT;
4141     }
4142 
4143     ret = -TARGET_ENOSYS;
4144 #ifdef __NR_semtimedop
4145     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4146 #endif
4147 #ifdef __NR_ipc
4148     if (ret == -TARGET_ENOSYS) {
4149         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4150                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4151     }
4152 #endif
4153     g_free(sops);
4154     return ret;
4155 }
4156 #endif
4157 
4158 struct target_msqid_ds
4159 {
4160     struct target_ipc_perm msg_perm;
4161     abi_ulong msg_stime;
4162 #if TARGET_ABI_BITS == 32
4163     abi_ulong __unused1;
4164 #endif
4165     abi_ulong msg_rtime;
4166 #if TARGET_ABI_BITS == 32
4167     abi_ulong __unused2;
4168 #endif
4169     abi_ulong msg_ctime;
4170 #if TARGET_ABI_BITS == 32
4171     abi_ulong __unused3;
4172 #endif
4173     abi_ulong __msg_cbytes;
4174     abi_ulong msg_qnum;
4175     abi_ulong msg_qbytes;
4176     abi_ulong msg_lspid;
4177     abi_ulong msg_lrpid;
4178     abi_ulong __unused4;
4179     abi_ulong __unused5;
4180 };
4181 
target_to_host_msqid_ds(struct msqid_ds * host_md,abi_ulong target_addr)4182 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4183                                                abi_ulong target_addr)
4184 {
4185     struct target_msqid_ds *target_md;
4186 
4187     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4188         return -TARGET_EFAULT;
4189     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4190         return -TARGET_EFAULT;
4191     host_md->msg_stime = tswapal(target_md->msg_stime);
4192     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4193     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4194     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4195     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4196     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4197     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4198     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4199     unlock_user_struct(target_md, target_addr, 0);
4200     return 0;
4201 }
4202 
host_to_target_msqid_ds(abi_ulong target_addr,struct msqid_ds * host_md)4203 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4204                                                struct msqid_ds *host_md)
4205 {
4206     struct target_msqid_ds *target_md;
4207 
4208     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4209         return -TARGET_EFAULT;
4210     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4211         return -TARGET_EFAULT;
4212     target_md->msg_stime = tswapal(host_md->msg_stime);
4213     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4214     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4215     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4216     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4217     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4218     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4219     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4220     unlock_user_struct(target_md, target_addr, 1);
4221     return 0;
4222 }
4223 
4224 struct target_msginfo {
4225     int msgpool;
4226     int msgmap;
4227     int msgmax;
4228     int msgmnb;
4229     int msgmni;
4230     int msgssz;
4231     int msgtql;
4232     unsigned short int msgseg;
4233 };
4234 
host_to_target_msginfo(abi_ulong target_addr,struct msginfo * host_msginfo)4235 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4236                                               struct msginfo *host_msginfo)
4237 {
4238     struct target_msginfo *target_msginfo;
4239     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4240         return -TARGET_EFAULT;
4241     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4242     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4243     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4244     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4245     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4246     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4247     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4248     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4249     unlock_user_struct(target_msginfo, target_addr, 1);
4250     return 0;
4251 }
4252 
do_msgctl(int msgid,int cmd,abi_long ptr)4253 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4254 {
4255     struct msqid_ds dsarg;
4256     struct msginfo msginfo;
4257     abi_long ret = -TARGET_EINVAL;
4258 
4259     cmd &= 0xff;
4260 
4261     switch (cmd) {
4262     case IPC_STAT:
4263     case IPC_SET:
4264     case MSG_STAT:
4265         if (target_to_host_msqid_ds(&dsarg,ptr))
4266             return -TARGET_EFAULT;
4267         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4268         if (host_to_target_msqid_ds(ptr,&dsarg))
4269             return -TARGET_EFAULT;
4270         break;
4271     case IPC_RMID:
4272         ret = get_errno(msgctl(msgid, cmd, NULL));
4273         break;
4274     case IPC_INFO:
4275     case MSG_INFO:
4276         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4277         if (host_to_target_msginfo(ptr, &msginfo))
4278             return -TARGET_EFAULT;
4279         break;
4280     }
4281 
4282     return ret;
4283 }
4284 
4285 struct target_msgbuf {
4286     abi_long mtype;
4287     char	mtext[1];
4288 };
4289 
do_msgsnd(int msqid,abi_long msgp,ssize_t msgsz,int msgflg)4290 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4291                                  ssize_t msgsz, int msgflg)
4292 {
4293     struct target_msgbuf *target_mb;
4294     struct msgbuf *host_mb;
4295     abi_long ret = 0;
4296 
4297     if (msgsz < 0) {
4298         return -TARGET_EINVAL;
4299     }
4300 
4301     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4302         return -TARGET_EFAULT;
4303     host_mb = g_try_malloc(msgsz + sizeof(long));
4304     if (!host_mb) {
4305         unlock_user_struct(target_mb, msgp, 0);
4306         return -TARGET_ENOMEM;
4307     }
4308     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4309     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4310     ret = -TARGET_ENOSYS;
4311 #ifdef __NR_msgsnd
4312     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4313 #endif
4314 #ifdef __NR_ipc
4315     if (ret == -TARGET_ENOSYS) {
4316 #ifdef __s390x__
4317         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4318                                  host_mb));
4319 #else
4320         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4321                                  host_mb, 0));
4322 #endif
4323     }
4324 #endif
4325     g_free(host_mb);
4326     unlock_user_struct(target_mb, msgp, 0);
4327 
4328     return ret;
4329 }
4330 
4331 #ifdef __NR_ipc
4332 #if defined(__sparc__)
4333 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4334 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4335 #elif defined(__s390x__)
4336 /* The s390 sys_ipc variant has only five parameters.  */
4337 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4338     ((long int[]){(long int)__msgp, __msgtyp})
4339 #else
4340 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4341     ((long int[]){(long int)__msgp, __msgtyp}), 0
4342 #endif
4343 #endif
4344 
do_msgrcv(int msqid,abi_long msgp,ssize_t msgsz,abi_long msgtyp,int msgflg)4345 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4346                                  ssize_t msgsz, abi_long msgtyp,
4347                                  int msgflg)
4348 {
4349     struct target_msgbuf *target_mb;
4350     char *target_mtext;
4351     struct msgbuf *host_mb;
4352     abi_long ret = 0;
4353 
4354     if (msgsz < 0) {
4355         return -TARGET_EINVAL;
4356     }
4357 
4358     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4359         return -TARGET_EFAULT;
4360 
4361     host_mb = g_try_malloc(msgsz + sizeof(long));
4362     if (!host_mb) {
4363         ret = -TARGET_ENOMEM;
4364         goto end;
4365     }
4366     ret = -TARGET_ENOSYS;
4367 #ifdef __NR_msgrcv
4368     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4369 #endif
4370 #ifdef __NR_ipc
4371     if (ret == -TARGET_ENOSYS) {
4372         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4373                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4374     }
4375 #endif
4376 
4377     if (ret > 0) {
4378         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4379         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4380         if (!target_mtext) {
4381             ret = -TARGET_EFAULT;
4382             goto end;
4383         }
4384         memcpy(target_mb->mtext, host_mb->mtext, ret);
4385         unlock_user(target_mtext, target_mtext_addr, ret);
4386     }
4387 
4388     target_mb->mtype = tswapal(host_mb->mtype);
4389 
4390 end:
4391     if (target_mb)
4392         unlock_user_struct(target_mb, msgp, 1);
4393     g_free(host_mb);
4394     return ret;
4395 }
4396 
target_to_host_shmid_ds(struct shmid_ds * host_sd,abi_ulong target_addr)4397 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4398                                                abi_ulong target_addr)
4399 {
4400     struct target_shmid_ds *target_sd;
4401 
4402     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4403         return -TARGET_EFAULT;
4404     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4405         return -TARGET_EFAULT;
4406     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4407     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4408     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4409     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4410     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4411     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4412     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4413     unlock_user_struct(target_sd, target_addr, 0);
4414     return 0;
4415 }
4416 
host_to_target_shmid_ds(abi_ulong target_addr,struct shmid_ds * host_sd)4417 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4418                                                struct shmid_ds *host_sd)
4419 {
4420     struct target_shmid_ds *target_sd;
4421 
4422     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4423         return -TARGET_EFAULT;
4424     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4425         return -TARGET_EFAULT;
4426     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4427     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4428     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4429     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4430     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4431     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4432     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4433     unlock_user_struct(target_sd, target_addr, 1);
4434     return 0;
4435 }
4436 
4437 struct  target_shminfo {
4438     abi_ulong shmmax;
4439     abi_ulong shmmin;
4440     abi_ulong shmmni;
4441     abi_ulong shmseg;
4442     abi_ulong shmall;
4443 };
4444 
host_to_target_shminfo(abi_ulong target_addr,struct shminfo * host_shminfo)4445 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4446                                               struct shminfo *host_shminfo)
4447 {
4448     struct target_shminfo *target_shminfo;
4449     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4450         return -TARGET_EFAULT;
4451     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4452     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4453     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4454     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4455     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4456     unlock_user_struct(target_shminfo, target_addr, 1);
4457     return 0;
4458 }
4459 
4460 struct target_shm_info {
4461     int used_ids;
4462     abi_ulong shm_tot;
4463     abi_ulong shm_rss;
4464     abi_ulong shm_swp;
4465     abi_ulong swap_attempts;
4466     abi_ulong swap_successes;
4467 };
4468 
host_to_target_shm_info(abi_ulong target_addr,struct shm_info * host_shm_info)4469 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4470                                                struct shm_info *host_shm_info)
4471 {
4472     struct target_shm_info *target_shm_info;
4473     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4474         return -TARGET_EFAULT;
4475     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4476     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4477     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4478     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4479     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4480     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4481     unlock_user_struct(target_shm_info, target_addr, 1);
4482     return 0;
4483 }
4484 
do_shmctl(int shmid,int cmd,abi_long buf)4485 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4486 {
4487     struct shmid_ds dsarg;
4488     struct shminfo shminfo;
4489     struct shm_info shm_info;
4490     abi_long ret = -TARGET_EINVAL;
4491 
4492     cmd &= 0xff;
4493 
4494     switch(cmd) {
4495     case IPC_STAT:
4496     case IPC_SET:
4497     case SHM_STAT:
4498         if (target_to_host_shmid_ds(&dsarg, buf))
4499             return -TARGET_EFAULT;
4500         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4501         if (host_to_target_shmid_ds(buf, &dsarg))
4502             return -TARGET_EFAULT;
4503         break;
4504     case IPC_INFO:
4505         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4506         if (host_to_target_shminfo(buf, &shminfo))
4507             return -TARGET_EFAULT;
4508         break;
4509     case SHM_INFO:
4510         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4511         if (host_to_target_shm_info(buf, &shm_info))
4512             return -TARGET_EFAULT;
4513         break;
4514     case IPC_RMID:
4515     case SHM_LOCK:
4516     case SHM_UNLOCK:
4517         ret = get_errno(shmctl(shmid, cmd, NULL));
4518         break;
4519     }
4520 
4521     return ret;
4522 }
4523 
4524 #ifdef TARGET_NR_ipc
4525 /* ??? This only works with linear mappings.  */
4526 /* do_ipc() must return target values and target errnos. */
do_ipc(CPUArchState * cpu_env,unsigned int call,abi_long first,abi_long second,abi_long third,abi_long ptr,abi_long fifth)4527 static abi_long do_ipc(CPUArchState *cpu_env,
4528                        unsigned int call, abi_long first,
4529                        abi_long second, abi_long third,
4530                        abi_long ptr, abi_long fifth)
4531 {
4532     int version;
4533     abi_long ret = 0;
4534 
4535     version = call >> 16;
4536     call &= 0xffff;
4537 
4538     switch (call) {
4539     case IPCOP_semop:
4540         ret = do_semtimedop(first, ptr, second, 0, false);
4541         break;
4542     case IPCOP_semtimedop:
4543     /*
4544      * The s390 sys_ipc variant has only five parameters instead of six
4545      * (as for default variant) and the only difference is the handling of
4546      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4547      * to a struct timespec where the generic variant uses fifth parameter.
4548      */
4549 #if defined(TARGET_S390X)
4550         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4551 #else
4552         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4553 #endif
4554         break;
4555 
4556     case IPCOP_semget:
4557         ret = get_errno(semget(first, second, third));
4558         break;
4559 
4560     case IPCOP_semctl: {
4561         /* The semun argument to semctl is passed by value, so dereference the
4562          * ptr argument. */
4563         abi_ulong atptr;
4564         get_user_ual(atptr, ptr);
4565         ret = do_semctl(first, second, third, atptr);
4566         break;
4567     }
4568 
4569     case IPCOP_msgget:
4570         ret = get_errno(msgget(first, second));
4571         break;
4572 
4573     case IPCOP_msgsnd:
4574         ret = do_msgsnd(first, ptr, second, third);
4575         break;
4576 
4577     case IPCOP_msgctl:
4578         ret = do_msgctl(first, second, ptr);
4579         break;
4580 
4581     case IPCOP_msgrcv:
4582         switch (version) {
4583         case 0:
4584             {
4585                 struct target_ipc_kludge {
4586                     abi_long msgp;
4587                     abi_long msgtyp;
4588                 } *tmp;
4589 
4590                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4591                     ret = -TARGET_EFAULT;
4592                     break;
4593                 }
4594 
4595                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4596 
4597                 unlock_user_struct(tmp, ptr, 0);
4598                 break;
4599             }
4600         default:
4601             ret = do_msgrcv(first, ptr, second, fifth, third);
4602         }
4603         break;
4604 
4605     case IPCOP_shmat:
4606         switch (version) {
4607         default:
4608         {
4609             abi_ulong raddr;
4610             raddr = target_shmat(cpu_env, first, ptr, second);
4611             if (is_error(raddr))
4612                 return get_errno(raddr);
4613             if (put_user_ual(raddr, third))
4614                 return -TARGET_EFAULT;
4615             break;
4616         }
4617         case 1:
4618             ret = -TARGET_EINVAL;
4619             break;
4620         }
4621 	break;
4622     case IPCOP_shmdt:
4623         ret = target_shmdt(ptr);
4624 	break;
4625 
4626     case IPCOP_shmget:
4627 	/* IPC_* flag values are the same on all linux platforms */
4628 	ret = get_errno(shmget(first, second, third));
4629 	break;
4630 
4631 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4632     case IPCOP_shmctl:
4633         ret = do_shmctl(first, second, ptr);
4634         break;
4635     default:
4636         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4637                       call, version);
4638 	ret = -TARGET_ENOSYS;
4639 	break;
4640     }
4641     return ret;
4642 }
4643 #endif
4644 
4645 /* kernel structure types definitions */
4646 
4647 #define STRUCT(name, ...) STRUCT_ ## name,
4648 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4649 enum {
4650 #include "syscall_types.h"
4651 STRUCT_MAX
4652 };
4653 #undef STRUCT
4654 #undef STRUCT_SPECIAL
4655 
4656 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4657 #define STRUCT_SPECIAL(name)
4658 #include "syscall_types.h"
4659 #undef STRUCT
4660 #undef STRUCT_SPECIAL
4661 
4662 #define MAX_STRUCT_SIZE 4096
4663 
4664 #ifdef CONFIG_FIEMAP
4665 /* So fiemap access checks don't overflow on 32 bit systems.
4666  * This is very slightly smaller than the limit imposed by
4667  * the underlying kernel.
4668  */
4669 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4670                             / sizeof(struct fiemap_extent))
4671 
do_ioctl_fs_ioc_fiemap(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4672 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4673                                        int fd, int cmd, abi_long arg)
4674 {
4675     /* The parameter for this ioctl is a struct fiemap followed
4676      * by an array of struct fiemap_extent whose size is set
4677      * in fiemap->fm_extent_count. The array is filled in by the
4678      * ioctl.
4679      */
4680     int target_size_in, target_size_out;
4681     struct fiemap *fm;
4682     const argtype *arg_type = ie->arg_type;
4683     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4684     void *argptr, *p;
4685     abi_long ret;
4686     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4687     uint32_t outbufsz;
4688     int free_fm = 0;
4689 
4690     assert(arg_type[0] == TYPE_PTR);
4691     assert(ie->access == IOC_RW);
4692     arg_type++;
4693     target_size_in = thunk_type_size(arg_type, 0);
4694     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4695     if (!argptr) {
4696         return -TARGET_EFAULT;
4697     }
4698     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4699     unlock_user(argptr, arg, 0);
4700     fm = (struct fiemap *)buf_temp;
4701     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4702         return -TARGET_EINVAL;
4703     }
4704 
4705     outbufsz = sizeof (*fm) +
4706         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4707 
4708     if (outbufsz > MAX_STRUCT_SIZE) {
4709         /* We can't fit all the extents into the fixed size buffer.
4710          * Allocate one that is large enough and use it instead.
4711          */
4712         fm = g_try_malloc(outbufsz);
4713         if (!fm) {
4714             return -TARGET_ENOMEM;
4715         }
4716         memcpy(fm, buf_temp, sizeof(struct fiemap));
4717         free_fm = 1;
4718     }
4719     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4720     if (!is_error(ret)) {
4721         target_size_out = target_size_in;
4722         /* An extent_count of 0 means we were only counting the extents
4723          * so there are no structs to copy
4724          */
4725         if (fm->fm_extent_count != 0) {
4726             target_size_out += fm->fm_mapped_extents * extent_size;
4727         }
4728         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4729         if (!argptr) {
4730             ret = -TARGET_EFAULT;
4731         } else {
4732             /* Convert the struct fiemap */
4733             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4734             if (fm->fm_extent_count != 0) {
4735                 p = argptr + target_size_in;
4736                 /* ...and then all the struct fiemap_extents */
4737                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4738                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4739                                   THUNK_TARGET);
4740                     p += extent_size;
4741                 }
4742             }
4743             unlock_user(argptr, arg, target_size_out);
4744         }
4745     }
4746     if (free_fm) {
4747         g_free(fm);
4748     }
4749     return ret;
4750 }
4751 #endif
4752 
do_ioctl_ifconf(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4753 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4754                                 int fd, int cmd, abi_long arg)
4755 {
4756     const argtype *arg_type = ie->arg_type;
4757     int target_size;
4758     void *argptr;
4759     int ret;
4760     struct ifconf *host_ifconf;
4761     uint32_t outbufsz;
4762     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4763     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4764     int target_ifreq_size;
4765     int nb_ifreq;
4766     int free_buf = 0;
4767     int i;
4768     int target_ifc_len;
4769     abi_long target_ifc_buf;
4770     int host_ifc_len;
4771     char *host_ifc_buf;
4772 
4773     assert(arg_type[0] == TYPE_PTR);
4774     assert(ie->access == IOC_RW);
4775 
4776     arg_type++;
4777     target_size = thunk_type_size(arg_type, 0);
4778 
4779     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4780     if (!argptr)
4781         return -TARGET_EFAULT;
4782     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4783     unlock_user(argptr, arg, 0);
4784 
4785     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4786     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4787     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4788 
4789     if (target_ifc_buf != 0) {
4790         target_ifc_len = host_ifconf->ifc_len;
4791         nb_ifreq = target_ifc_len / target_ifreq_size;
4792         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4793 
4794         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4795         if (outbufsz > MAX_STRUCT_SIZE) {
4796             /*
4797              * We can't fit all the extents into the fixed size buffer.
4798              * Allocate one that is large enough and use it instead.
4799              */
4800             host_ifconf = g_try_malloc(outbufsz);
4801             if (!host_ifconf) {
4802                 return -TARGET_ENOMEM;
4803             }
4804             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4805             free_buf = 1;
4806         }
4807         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4808 
4809         host_ifconf->ifc_len = host_ifc_len;
4810     } else {
4811       host_ifc_buf = NULL;
4812     }
4813     host_ifconf->ifc_buf = host_ifc_buf;
4814 
4815     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4816     if (!is_error(ret)) {
4817 	/* convert host ifc_len to target ifc_len */
4818 
4819         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4820         target_ifc_len = nb_ifreq * target_ifreq_size;
4821         host_ifconf->ifc_len = target_ifc_len;
4822 
4823 	/* restore target ifc_buf */
4824 
4825         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4826 
4827 	/* copy struct ifconf to target user */
4828 
4829         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4830         if (!argptr)
4831             return -TARGET_EFAULT;
4832         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4833         unlock_user(argptr, arg, target_size);
4834 
4835         if (target_ifc_buf != 0) {
4836             /* copy ifreq[] to target user */
4837             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4838             for (i = 0; i < nb_ifreq ; i++) {
4839                 thunk_convert(argptr + i * target_ifreq_size,
4840                               host_ifc_buf + i * sizeof(struct ifreq),
4841                               ifreq_arg_type, THUNK_TARGET);
4842             }
4843             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4844         }
4845     }
4846 
4847     if (free_buf) {
4848         g_free(host_ifconf);
4849     }
4850 
4851     return ret;
4852 }
4853 
4854 #if defined(CONFIG_USBFS)
4855 #if HOST_LONG_BITS > 64
4856 #error USBDEVFS thunks do not support >64 bit hosts yet.
4857 #endif
4858 struct live_urb {
4859     uint64_t target_urb_adr;
4860     uint64_t target_buf_adr;
4861     char *target_buf_ptr;
4862     struct usbdevfs_urb host_urb;
4863 };
4864 
usbdevfs_urb_hashtable(void)4865 static GHashTable *usbdevfs_urb_hashtable(void)
4866 {
4867     static GHashTable *urb_hashtable;
4868 
4869     if (!urb_hashtable) {
4870         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4871     }
4872     return urb_hashtable;
4873 }
4874 
urb_hashtable_insert(struct live_urb * urb)4875 static void urb_hashtable_insert(struct live_urb *urb)
4876 {
4877     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4878     g_hash_table_insert(urb_hashtable, urb, urb);
4879 }
4880 
urb_hashtable_lookup(uint64_t target_urb_adr)4881 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4882 {
4883     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4884     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4885 }
4886 
urb_hashtable_remove(struct live_urb * urb)4887 static void urb_hashtable_remove(struct live_urb *urb)
4888 {
4889     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4890     g_hash_table_remove(urb_hashtable, urb);
4891 }
4892 
4893 static abi_long
do_ioctl_usbdevfs_reapurb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4894 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4895                           int fd, int cmd, abi_long arg)
4896 {
4897     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4898     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4899     struct live_urb *lurb;
4900     void *argptr;
4901     uint64_t hurb;
4902     int target_size;
4903     uintptr_t target_urb_adr;
4904     abi_long ret;
4905 
4906     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4907 
4908     memset(buf_temp, 0, sizeof(uint64_t));
4909     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4910     if (is_error(ret)) {
4911         return ret;
4912     }
4913 
4914     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4915     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4916     if (!lurb->target_urb_adr) {
4917         return -TARGET_EFAULT;
4918     }
4919     urb_hashtable_remove(lurb);
4920     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4921         lurb->host_urb.buffer_length);
4922     lurb->target_buf_ptr = NULL;
4923 
4924     /* restore the guest buffer pointer */
4925     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4926 
4927     /* update the guest urb struct */
4928     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4929     if (!argptr) {
4930         g_free(lurb);
4931         return -TARGET_EFAULT;
4932     }
4933     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4934     unlock_user(argptr, lurb->target_urb_adr, target_size);
4935 
4936     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4937     /* write back the urb handle */
4938     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4939     if (!argptr) {
4940         g_free(lurb);
4941         return -TARGET_EFAULT;
4942     }
4943 
4944     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4945     target_urb_adr = lurb->target_urb_adr;
4946     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4947     unlock_user(argptr, arg, target_size);
4948 
4949     g_free(lurb);
4950     return ret;
4951 }
4952 
4953 static abi_long
do_ioctl_usbdevfs_discardurb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4954 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4955                              uint8_t *buf_temp __attribute__((unused)),
4956                              int fd, int cmd, abi_long arg)
4957 {
4958     struct live_urb *lurb;
4959 
4960     /* map target address back to host URB with metadata. */
4961     lurb = urb_hashtable_lookup(arg);
4962     if (!lurb) {
4963         return -TARGET_EFAULT;
4964     }
4965     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4966 }
4967 
4968 static abi_long
do_ioctl_usbdevfs_submiturb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4969 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4970                             int fd, int cmd, abi_long arg)
4971 {
4972     const argtype *arg_type = ie->arg_type;
4973     int target_size;
4974     abi_long ret;
4975     void *argptr;
4976     int rw_dir;
4977     struct live_urb *lurb;
4978 
4979     /*
4980      * each submitted URB needs to map to a unique ID for the
4981      * kernel, and that unique ID needs to be a pointer to
4982      * host memory.  hence, we need to malloc for each URB.
4983      * isochronous transfers have a variable length struct.
4984      */
4985     arg_type++;
4986     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4987 
4988     /* construct host copy of urb and metadata */
4989     lurb = g_try_new0(struct live_urb, 1);
4990     if (!lurb) {
4991         return -TARGET_ENOMEM;
4992     }
4993 
4994     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4995     if (!argptr) {
4996         g_free(lurb);
4997         return -TARGET_EFAULT;
4998     }
4999     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5000     unlock_user(argptr, arg, 0);
5001 
5002     lurb->target_urb_adr = arg;
5003     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5004 
5005     /* buffer space used depends on endpoint type so lock the entire buffer */
5006     /* control type urbs should check the buffer contents for true direction */
5007     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5008     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5009         lurb->host_urb.buffer_length, 1);
5010     if (lurb->target_buf_ptr == NULL) {
5011         g_free(lurb);
5012         return -TARGET_EFAULT;
5013     }
5014 
5015     /* update buffer pointer in host copy */
5016     lurb->host_urb.buffer = lurb->target_buf_ptr;
5017 
5018     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5019     if (is_error(ret)) {
5020         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5021         g_free(lurb);
5022     } else {
5023         urb_hashtable_insert(lurb);
5024     }
5025 
5026     return ret;
5027 }
5028 #endif /* CONFIG_USBFS */
5029 
do_ioctl_dm(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5030 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5031                             int cmd, abi_long arg)
5032 {
5033     void *argptr;
5034     struct dm_ioctl *host_dm;
5035     abi_long guest_data;
5036     uint32_t guest_data_size;
5037     int target_size;
5038     const argtype *arg_type = ie->arg_type;
5039     abi_long ret;
5040     void *big_buf = NULL;
5041     char *host_data;
5042 
5043     arg_type++;
5044     target_size = thunk_type_size(arg_type, 0);
5045     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5046     if (!argptr) {
5047         ret = -TARGET_EFAULT;
5048         goto out;
5049     }
5050     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5051     unlock_user(argptr, arg, 0);
5052 
5053     /* buf_temp is too small, so fetch things into a bigger buffer */
5054     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5055     memcpy(big_buf, buf_temp, target_size);
5056     buf_temp = big_buf;
5057     host_dm = big_buf;
5058 
5059     guest_data = arg + host_dm->data_start;
5060     if ((guest_data - arg) < 0) {
5061         ret = -TARGET_EINVAL;
5062         goto out;
5063     }
5064     guest_data_size = host_dm->data_size - host_dm->data_start;
5065     host_data = (char*)host_dm + host_dm->data_start;
5066 
5067     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5068     if (!argptr) {
5069         ret = -TARGET_EFAULT;
5070         goto out;
5071     }
5072 
5073     switch (ie->host_cmd) {
5074     case DM_REMOVE_ALL:
5075     case DM_LIST_DEVICES:
5076     case DM_DEV_CREATE:
5077     case DM_DEV_REMOVE:
5078     case DM_DEV_SUSPEND:
5079     case DM_DEV_STATUS:
5080     case DM_DEV_WAIT:
5081     case DM_TABLE_STATUS:
5082     case DM_TABLE_CLEAR:
5083     case DM_TABLE_DEPS:
5084     case DM_LIST_VERSIONS:
5085         /* no input data */
5086         break;
5087     case DM_DEV_RENAME:
5088     case DM_DEV_SET_GEOMETRY:
5089         /* data contains only strings */
5090         memcpy(host_data, argptr, guest_data_size);
5091         break;
5092     case DM_TARGET_MSG:
5093         memcpy(host_data, argptr, guest_data_size);
5094         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5095         break;
5096     case DM_TABLE_LOAD:
5097     {
5098         void *gspec = argptr;
5099         void *cur_data = host_data;
5100         const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5101         int spec_size = thunk_type_size(dm_arg_type, 0);
5102         int i;
5103 
5104         for (i = 0; i < host_dm->target_count; i++) {
5105             struct dm_target_spec *spec = cur_data;
5106             uint32_t next;
5107             int slen;
5108 
5109             thunk_convert(spec, gspec, dm_arg_type, THUNK_HOST);
5110             slen = strlen((char*)gspec + spec_size) + 1;
5111             next = spec->next;
5112             spec->next = sizeof(*spec) + slen;
5113             strcpy((char*)&spec[1], gspec + spec_size);
5114             gspec += next;
5115             cur_data += spec->next;
5116         }
5117         break;
5118     }
5119     default:
5120         ret = -TARGET_EINVAL;
5121         unlock_user(argptr, guest_data, 0);
5122         goto out;
5123     }
5124     unlock_user(argptr, guest_data, 0);
5125 
5126     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5127     if (!is_error(ret)) {
5128         guest_data = arg + host_dm->data_start;
5129         guest_data_size = host_dm->data_size - host_dm->data_start;
5130         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5131         switch (ie->host_cmd) {
5132         case DM_REMOVE_ALL:
5133         case DM_DEV_CREATE:
5134         case DM_DEV_REMOVE:
5135         case DM_DEV_RENAME:
5136         case DM_DEV_SUSPEND:
5137         case DM_DEV_STATUS:
5138         case DM_TABLE_LOAD:
5139         case DM_TABLE_CLEAR:
5140         case DM_TARGET_MSG:
5141         case DM_DEV_SET_GEOMETRY:
5142             /* no return data */
5143             break;
5144         case DM_LIST_DEVICES:
5145         {
5146             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5147             uint32_t remaining_data = guest_data_size;
5148             void *cur_data = argptr;
5149             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5150             int nl_size = 12; /* can't use thunk_size due to alignment */
5151 
5152             while (1) {
5153                 uint32_t next = nl->next;
5154                 if (next) {
5155                     nl->next = nl_size + (strlen(nl->name) + 1);
5156                 }
5157                 if (remaining_data < nl->next) {
5158                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5159                     break;
5160                 }
5161                 thunk_convert(cur_data, nl, dm_arg_type, THUNK_TARGET);
5162                 strcpy(cur_data + nl_size, nl->name);
5163                 cur_data += nl->next;
5164                 remaining_data -= nl->next;
5165                 if (!next) {
5166                     break;
5167                 }
5168                 nl = (void*)nl + next;
5169             }
5170             break;
5171         }
5172         case DM_DEV_WAIT:
5173         case DM_TABLE_STATUS:
5174         {
5175             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5176             void *cur_data = argptr;
5177             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5178             int spec_size = thunk_type_size(dm_arg_type, 0);
5179             int i;
5180 
5181             for (i = 0; i < host_dm->target_count; i++) {
5182                 uint32_t next = spec->next;
5183                 int slen = strlen((char*)&spec[1]) + 1;
5184                 spec->next = (cur_data - argptr) + spec_size + slen;
5185                 if (guest_data_size < spec->next) {
5186                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5187                     break;
5188                 }
5189                 thunk_convert(cur_data, spec, dm_arg_type, THUNK_TARGET);
5190                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5191                 cur_data = argptr + spec->next;
5192                 spec = (void*)host_dm + host_dm->data_start + next;
5193             }
5194             break;
5195         }
5196         case DM_TABLE_DEPS:
5197         {
5198             void *hdata = (void*)host_dm + host_dm->data_start;
5199             int count = *(uint32_t*)hdata;
5200             uint64_t *hdev = hdata + 8;
5201             uint64_t *gdev = argptr + 8;
5202             int i;
5203 
5204             *(uint32_t*)argptr = tswap32(count);
5205             for (i = 0; i < count; i++) {
5206                 *gdev = tswap64(*hdev);
5207                 gdev++;
5208                 hdev++;
5209             }
5210             break;
5211         }
5212         case DM_LIST_VERSIONS:
5213         {
5214             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5215             uint32_t remaining_data = guest_data_size;
5216             void *cur_data = argptr;
5217             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5218             int vers_size = thunk_type_size(dm_arg_type, 0);
5219 
5220             while (1) {
5221                 uint32_t next = vers->next;
5222                 if (next) {
5223                     vers->next = vers_size + (strlen(vers->name) + 1);
5224                 }
5225                 if (remaining_data < vers->next) {
5226                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5227                     break;
5228                 }
5229                 thunk_convert(cur_data, vers, dm_arg_type, THUNK_TARGET);
5230                 strcpy(cur_data + vers_size, vers->name);
5231                 cur_data += vers->next;
5232                 remaining_data -= vers->next;
5233                 if (!next) {
5234                     break;
5235                 }
5236                 vers = (void*)vers + next;
5237             }
5238             break;
5239         }
5240         default:
5241             unlock_user(argptr, guest_data, 0);
5242             ret = -TARGET_EINVAL;
5243             goto out;
5244         }
5245         unlock_user(argptr, guest_data, guest_data_size);
5246 
5247         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5248         if (!argptr) {
5249             ret = -TARGET_EFAULT;
5250             goto out;
5251         }
5252         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5253         unlock_user(argptr, arg, target_size);
5254     }
5255 out:
5256     g_free(big_buf);
5257     return ret;
5258 }
5259 
do_ioctl_blkpg(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5260 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5261                                int cmd, abi_long arg)
5262 {
5263     void *argptr;
5264     int target_size;
5265     const argtype *arg_type = ie->arg_type;
5266     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5267     abi_long ret;
5268 
5269     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5270     struct blkpg_partition host_part;
5271 
5272     /* Read and convert blkpg */
5273     arg_type++;
5274     target_size = thunk_type_size(arg_type, 0);
5275     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5276     if (!argptr) {
5277         ret = -TARGET_EFAULT;
5278         goto out;
5279     }
5280     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5281     unlock_user(argptr, arg, 0);
5282 
5283     switch (host_blkpg->op) {
5284     case BLKPG_ADD_PARTITION:
5285     case BLKPG_DEL_PARTITION:
5286         /* payload is struct blkpg_partition */
5287         break;
5288     default:
5289         /* Unknown opcode */
5290         ret = -TARGET_EINVAL;
5291         goto out;
5292     }
5293 
5294     /* Read and convert blkpg->data */
5295     arg = (abi_long)(uintptr_t)host_blkpg->data;
5296     target_size = thunk_type_size(part_arg_type, 0);
5297     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5298     if (!argptr) {
5299         ret = -TARGET_EFAULT;
5300         goto out;
5301     }
5302     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5303     unlock_user(argptr, arg, 0);
5304 
5305     /* Swizzle the data pointer to our local copy and call! */
5306     host_blkpg->data = &host_part;
5307     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5308 
5309 out:
5310     return ret;
5311 }
5312 
do_ioctl_rt(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5313 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5314                                 int fd, int cmd, abi_long arg)
5315 {
5316     const argtype *arg_type = ie->arg_type;
5317     const StructEntry *se;
5318     const argtype *field_types;
5319     const int *dst_offsets, *src_offsets;
5320     int target_size;
5321     void *argptr;
5322     abi_ulong *target_rt_dev_ptr = NULL;
5323     unsigned long *host_rt_dev_ptr = NULL;
5324     abi_long ret;
5325     int i;
5326 
5327     assert(ie->access == IOC_W);
5328     assert(*arg_type == TYPE_PTR);
5329     arg_type++;
5330     assert(*arg_type == TYPE_STRUCT);
5331     target_size = thunk_type_size(arg_type, 0);
5332     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5333     if (!argptr) {
5334         return -TARGET_EFAULT;
5335     }
5336     arg_type++;
5337     assert(*arg_type == (int)STRUCT_rtentry);
5338     se = struct_entries + *arg_type++;
5339     assert(se->convert[0] == NULL);
5340     /* convert struct here to be able to catch rt_dev string */
5341     field_types = se->field_types;
5342     dst_offsets = se->field_offsets[THUNK_HOST];
5343     src_offsets = se->field_offsets[THUNK_TARGET];
5344     for (i = 0; i < se->nb_fields; i++) {
5345         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5346             assert(*field_types == TYPE_PTRVOID);
5347             target_rt_dev_ptr = argptr + src_offsets[i];
5348             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5349             if (*target_rt_dev_ptr != 0) {
5350                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5351                                                   tswapal(*target_rt_dev_ptr));
5352                 if (!*host_rt_dev_ptr) {
5353                     unlock_user(argptr, arg, 0);
5354                     return -TARGET_EFAULT;
5355                 }
5356             } else {
5357                 *host_rt_dev_ptr = 0;
5358             }
5359             field_types++;
5360             continue;
5361         }
5362         field_types = thunk_convert(buf_temp + dst_offsets[i],
5363                                     argptr + src_offsets[i],
5364                                     field_types, THUNK_HOST);
5365     }
5366     unlock_user(argptr, arg, 0);
5367 
5368     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5369 
5370     assert(host_rt_dev_ptr != NULL);
5371     assert(target_rt_dev_ptr != NULL);
5372     if (*host_rt_dev_ptr != 0) {
5373         unlock_user((void *)*host_rt_dev_ptr,
5374                     *target_rt_dev_ptr, 0);
5375     }
5376     return ret;
5377 }
5378 
do_ioctl_kdsigaccept(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5379 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5380                                      int fd, int cmd, abi_long arg)
5381 {
5382     int sig = target_to_host_signal(arg);
5383     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5384 }
5385 
do_ioctl_SIOCGSTAMP(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5386 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5387                                     int fd, int cmd, abi_long arg)
5388 {
5389     struct timeval tv;
5390     abi_long ret;
5391 
5392     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5393     if (is_error(ret)) {
5394         return ret;
5395     }
5396 
5397     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5398         if (copy_to_user_timeval(arg, &tv)) {
5399             return -TARGET_EFAULT;
5400         }
5401     } else {
5402         if (copy_to_user_timeval64(arg, &tv)) {
5403             return -TARGET_EFAULT;
5404         }
5405     }
5406 
5407     return ret;
5408 }
5409 
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5410 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5411                                       int fd, int cmd, abi_long arg)
5412 {
5413     struct timespec ts;
5414     abi_long ret;
5415 
5416     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5417     if (is_error(ret)) {
5418         return ret;
5419     }
5420 
5421     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5422         if (host_to_target_timespec(arg, &ts)) {
5423             return -TARGET_EFAULT;
5424         }
5425     } else{
5426         if (host_to_target_timespec64(arg, &ts)) {
5427             return -TARGET_EFAULT;
5428         }
5429     }
5430 
5431     return ret;
5432 }
5433 
5434 #ifdef TIOCGPTPEER
do_ioctl_tiocgptpeer(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5435 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5436                                      int fd, int cmd, abi_long arg)
5437 {
5438     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5439     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5440 }
5441 #endif
5442 
5443 #ifdef HAVE_DRM_H
5444 
unlock_drm_version(struct drm_version * host_ver,struct target_drm_version * target_ver,bool copy)5445 static void unlock_drm_version(struct drm_version *host_ver,
5446                                struct target_drm_version *target_ver,
5447                                bool copy)
5448 {
5449     unlock_user(host_ver->name, target_ver->name,
5450                                 copy ? host_ver->name_len : 0);
5451     unlock_user(host_ver->date, target_ver->date,
5452                                 copy ? host_ver->date_len : 0);
5453     unlock_user(host_ver->desc, target_ver->desc,
5454                                 copy ? host_ver->desc_len : 0);
5455 }
5456 
target_to_host_drmversion(struct drm_version * host_ver,struct target_drm_version * target_ver)5457 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5458                                           struct target_drm_version *target_ver)
5459 {
5460     memset(host_ver, 0, sizeof(*host_ver));
5461 
5462     __get_user(host_ver->name_len, &target_ver->name_len);
5463     if (host_ver->name_len) {
5464         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5465                                    target_ver->name_len, 0);
5466         if (!host_ver->name) {
5467             return -EFAULT;
5468         }
5469     }
5470 
5471     __get_user(host_ver->date_len, &target_ver->date_len);
5472     if (host_ver->date_len) {
5473         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5474                                    target_ver->date_len, 0);
5475         if (!host_ver->date) {
5476             goto err;
5477         }
5478     }
5479 
5480     __get_user(host_ver->desc_len, &target_ver->desc_len);
5481     if (host_ver->desc_len) {
5482         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5483                                    target_ver->desc_len, 0);
5484         if (!host_ver->desc) {
5485             goto err;
5486         }
5487     }
5488 
5489     return 0;
5490 err:
5491     unlock_drm_version(host_ver, target_ver, false);
5492     return -EFAULT;
5493 }
5494 
host_to_target_drmversion(struct target_drm_version * target_ver,struct drm_version * host_ver)5495 static inline void host_to_target_drmversion(
5496                                           struct target_drm_version *target_ver,
5497                                           struct drm_version *host_ver)
5498 {
5499     __put_user(host_ver->version_major, &target_ver->version_major);
5500     __put_user(host_ver->version_minor, &target_ver->version_minor);
5501     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5502     __put_user(host_ver->name_len, &target_ver->name_len);
5503     __put_user(host_ver->date_len, &target_ver->date_len);
5504     __put_user(host_ver->desc_len, &target_ver->desc_len);
5505     unlock_drm_version(host_ver, target_ver, true);
5506 }
5507 
do_ioctl_drm(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5508 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5509                              int fd, int cmd, abi_long arg)
5510 {
5511     struct drm_version *ver;
5512     struct target_drm_version *target_ver;
5513     abi_long ret;
5514 
5515     switch (ie->host_cmd) {
5516     case DRM_IOCTL_VERSION:
5517         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5518             return -TARGET_EFAULT;
5519         }
5520         ver = (struct drm_version *)buf_temp;
5521         ret = target_to_host_drmversion(ver, target_ver);
5522         if (!is_error(ret)) {
5523             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5524             if (is_error(ret)) {
5525                 unlock_drm_version(ver, target_ver, false);
5526             } else {
5527                 host_to_target_drmversion(target_ver, ver);
5528             }
5529         }
5530         unlock_user_struct(target_ver, arg, 0);
5531         return ret;
5532     }
5533     return -TARGET_ENOSYS;
5534 }
5535 
do_ioctl_drm_i915_getparam(const IOCTLEntry * ie,struct drm_i915_getparam * gparam,int fd,abi_long arg)5536 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5537                                            struct drm_i915_getparam *gparam,
5538                                            int fd, abi_long arg)
5539 {
5540     abi_long ret;
5541     int value;
5542     struct target_drm_i915_getparam *target_gparam;
5543 
5544     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5545         return -TARGET_EFAULT;
5546     }
5547 
5548     __get_user(gparam->param, &target_gparam->param);
5549     gparam->value = &value;
5550     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5551     put_user_s32(value, target_gparam->value);
5552 
5553     unlock_user_struct(target_gparam, arg, 0);
5554     return ret;
5555 }
5556 
do_ioctl_drm_i915(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5557 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5558                                   int fd, int cmd, abi_long arg)
5559 {
5560     switch (ie->host_cmd) {
5561     case DRM_IOCTL_I915_GETPARAM:
5562         return do_ioctl_drm_i915_getparam(ie,
5563                                           (struct drm_i915_getparam *)buf_temp,
5564                                           fd, arg);
5565     default:
5566         return -TARGET_ENOSYS;
5567     }
5568 }
5569 
5570 #endif
5571 
do_ioctl_TUNSETTXFILTER(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5572 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5573                                         int fd, int cmd, abi_long arg)
5574 {
5575     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5576     struct tun_filter *target_filter;
5577     char *target_addr;
5578 
5579     assert(ie->access == IOC_W);
5580 
5581     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5582     if (!target_filter) {
5583         return -TARGET_EFAULT;
5584     }
5585     filter->flags = tswap16(target_filter->flags);
5586     filter->count = tswap16(target_filter->count);
5587     unlock_user(target_filter, arg, 0);
5588 
5589     if (filter->count) {
5590         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5591             MAX_STRUCT_SIZE) {
5592             return -TARGET_EFAULT;
5593         }
5594 
5595         target_addr = lock_user(VERIFY_READ,
5596                                 arg + offsetof(struct tun_filter, addr),
5597                                 filter->count * ETH_ALEN, 1);
5598         if (!target_addr) {
5599             return -TARGET_EFAULT;
5600         }
5601         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5602         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5603     }
5604 
5605     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5606 }
5607 
5608 IOCTLEntry ioctl_entries[] = {
5609 #define IOCTL(cmd, access, ...) \
5610     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5611 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5612     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5613 #define IOCTL_IGNORE(cmd) \
5614     { TARGET_ ## cmd, 0, #cmd },
5615 #include "ioctls.h"
5616     { 0, 0, },
5617 };
5618 
5619 /* ??? Implement proper locking for ioctls.  */
5620 /* do_ioctl() Must return target values and target errnos. */
do_ioctl(int fd,int cmd,abi_long arg)5621 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5622 {
5623     const IOCTLEntry *ie;
5624     const argtype *arg_type;
5625     abi_long ret;
5626     uint8_t buf_temp[MAX_STRUCT_SIZE];
5627     int target_size;
5628     void *argptr;
5629 
5630     ie = ioctl_entries;
5631     for(;;) {
5632         if (ie->target_cmd == 0) {
5633             qemu_log_mask(
5634                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5635             return -TARGET_ENOTTY;
5636         }
5637         if (ie->target_cmd == cmd)
5638             break;
5639         ie++;
5640     }
5641     arg_type = ie->arg_type;
5642     if (ie->do_ioctl) {
5643         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5644     } else if (!ie->host_cmd) {
5645         /* Some architectures define BSD ioctls in their headers
5646            that are not implemented in Linux.  */
5647         return -TARGET_ENOTTY;
5648     }
5649 
5650     switch(arg_type[0]) {
5651     case TYPE_NULL:
5652         /* no argument */
5653         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5654         break;
5655     case TYPE_PTRVOID:
5656     case TYPE_INT:
5657     case TYPE_LONG:
5658     case TYPE_ULONG:
5659         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5660         break;
5661     case TYPE_PTR:
5662         arg_type++;
5663         target_size = thunk_type_size(arg_type, 0);
5664         switch(ie->access) {
5665         case IOC_R:
5666             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5667             if (!is_error(ret)) {
5668                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5669                 if (!argptr)
5670                     return -TARGET_EFAULT;
5671                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5672                 unlock_user(argptr, arg, target_size);
5673             }
5674             break;
5675         case IOC_W:
5676             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5677             if (!argptr)
5678                 return -TARGET_EFAULT;
5679             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5680             unlock_user(argptr, arg, 0);
5681             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5682             break;
5683         default:
5684         case IOC_RW:
5685             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5686             if (!argptr)
5687                 return -TARGET_EFAULT;
5688             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5689             unlock_user(argptr, arg, 0);
5690             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5691             if (!is_error(ret)) {
5692                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5693                 if (!argptr)
5694                     return -TARGET_EFAULT;
5695                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5696                 unlock_user(argptr, arg, target_size);
5697             }
5698             break;
5699         }
5700         break;
5701     default:
5702         qemu_log_mask(LOG_UNIMP,
5703                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5704                       (long)cmd, arg_type[0]);
5705         ret = -TARGET_ENOTTY;
5706         break;
5707     }
5708     return ret;
5709 }
5710 
5711 static const bitmask_transtbl iflag_tbl[] = {
5712         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5713         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5714         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5715         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5716         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5717         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5718         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5719         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5720         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5721         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5722         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5723         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5724         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5725         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5726         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5727 };
5728 
5729 static const bitmask_transtbl oflag_tbl[] = {
5730 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5731 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5732 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5733 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5734 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5735 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5736 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5737 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5738 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5739 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5740 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5741 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5742 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5743 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5744 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5745 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5746 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5747 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5748 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5749 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5750 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5751 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5752 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5753 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5754 };
5755 
5756 static const bitmask_transtbl cflag_tbl[] = {
5757 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5758 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5759 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5760 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5761 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5762 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5763 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5764 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5765 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5766 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5767 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5768 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5769 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5770 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5771 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5772 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5773 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5774 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5775 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5776 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5777 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5778 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5779 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5780 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5781 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5782 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5783 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5784 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5785 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5786 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5787 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5788 };
5789 
5790 static const bitmask_transtbl lflag_tbl[] = {
5791   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5792   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5793   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5794   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5795   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5796   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5797   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5798   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5799   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5800   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5801   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5802   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5803   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5804   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5805   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5806   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5807 };
5808 
target_to_host_termios(void * dst,const void * src)5809 static void target_to_host_termios (void *dst, const void *src)
5810 {
5811     struct host_termios *host = dst;
5812     const struct target_termios *target = src;
5813 
5814     host->c_iflag =
5815         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5816     host->c_oflag =
5817         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5818     host->c_cflag =
5819         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5820     host->c_lflag =
5821         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5822     host->c_line = target->c_line;
5823 
5824     memset(host->c_cc, 0, sizeof(host->c_cc));
5825     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5826     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5827     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5828     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5829     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5830     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5831     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5832     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5833     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5834     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5835     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5836     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5837     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5838     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5839     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5840     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5841     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5842 }
5843 
host_to_target_termios(void * dst,const void * src)5844 static void host_to_target_termios (void *dst, const void *src)
5845 {
5846     struct target_termios *target = dst;
5847     const struct host_termios *host = src;
5848 
5849     target->c_iflag =
5850         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5851     target->c_oflag =
5852         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5853     target->c_cflag =
5854         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5855     target->c_lflag =
5856         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5857     target->c_line = host->c_line;
5858 
5859     memset(target->c_cc, 0, sizeof(target->c_cc));
5860     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5861     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5862     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5863     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5864     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5865     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5866     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5867     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5868     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5869     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5870     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5871     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5872     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5873     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5874     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5875     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5876     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5877 }
5878 
5879 static const StructEntry struct_termios_def = {
5880     .convert = { host_to_target_termios, target_to_host_termios },
5881     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5882     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5883     .print = print_termios,
5884 };
5885 
5886 /* If the host does not provide these bits, they may be safely discarded. */
5887 #ifndef MAP_SYNC
5888 #define MAP_SYNC 0
5889 #endif
5890 #ifndef MAP_UNINITIALIZED
5891 #define MAP_UNINITIALIZED 0
5892 #endif
5893 
5894 static const bitmask_transtbl mmap_flags_tbl[] = {
5895     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5896     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5897       MAP_ANONYMOUS, MAP_ANONYMOUS },
5898     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5899       MAP_GROWSDOWN, MAP_GROWSDOWN },
5900     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5901       MAP_DENYWRITE, MAP_DENYWRITE },
5902     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5903       MAP_EXECUTABLE, MAP_EXECUTABLE },
5904     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5905     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5906       MAP_NORESERVE, MAP_NORESERVE },
5907     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5908     /* MAP_STACK had been ignored by the kernel for quite some time.
5909        Recognize it for the target insofar as we do not want to pass
5910        it through to the host.  */
5911     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5912     { TARGET_MAP_NONBLOCK, TARGET_MAP_NONBLOCK, MAP_NONBLOCK, MAP_NONBLOCK },
5913     { TARGET_MAP_POPULATE, TARGET_MAP_POPULATE, MAP_POPULATE, MAP_POPULATE },
5914     { TARGET_MAP_FIXED_NOREPLACE, TARGET_MAP_FIXED_NOREPLACE,
5915       MAP_FIXED_NOREPLACE, MAP_FIXED_NOREPLACE },
5916     { TARGET_MAP_UNINITIALIZED, TARGET_MAP_UNINITIALIZED,
5917       MAP_UNINITIALIZED, MAP_UNINITIALIZED },
5918 };
5919 
5920 /*
5921  * Arrange for legacy / undefined architecture specific flags to be
5922  * ignored by mmap handling code.
5923  */
5924 #ifndef TARGET_MAP_32BIT
5925 #define TARGET_MAP_32BIT 0
5926 #endif
5927 #ifndef TARGET_MAP_HUGE_2MB
5928 #define TARGET_MAP_HUGE_2MB 0
5929 #endif
5930 #ifndef TARGET_MAP_HUGE_1GB
5931 #define TARGET_MAP_HUGE_1GB 0
5932 #endif
5933 
do_mmap(abi_ulong addr,abi_ulong len,int prot,int target_flags,int fd,off_t offset)5934 static abi_long do_mmap(abi_ulong addr, abi_ulong len, int prot,
5935                         int target_flags, int fd, off_t offset)
5936 {
5937     /*
5938      * The historical set of flags that all mmap types implicitly support.
5939      */
5940     enum {
5941         TARGET_LEGACY_MAP_MASK = TARGET_MAP_SHARED
5942                                | TARGET_MAP_PRIVATE
5943                                | TARGET_MAP_FIXED
5944                                | TARGET_MAP_ANONYMOUS
5945                                | TARGET_MAP_DENYWRITE
5946                                | TARGET_MAP_EXECUTABLE
5947                                | TARGET_MAP_UNINITIALIZED
5948                                | TARGET_MAP_GROWSDOWN
5949                                | TARGET_MAP_LOCKED
5950                                | TARGET_MAP_NORESERVE
5951                                | TARGET_MAP_POPULATE
5952                                | TARGET_MAP_NONBLOCK
5953                                | TARGET_MAP_STACK
5954                                | TARGET_MAP_HUGETLB
5955                                | TARGET_MAP_32BIT
5956                                | TARGET_MAP_HUGE_2MB
5957                                | TARGET_MAP_HUGE_1GB
5958     };
5959     int host_flags;
5960 
5961     switch (target_flags & TARGET_MAP_TYPE) {
5962     case TARGET_MAP_PRIVATE:
5963         host_flags = MAP_PRIVATE;
5964         break;
5965     case TARGET_MAP_SHARED:
5966         host_flags = MAP_SHARED;
5967         break;
5968     case TARGET_MAP_SHARED_VALIDATE:
5969         /*
5970          * MAP_SYNC is only supported for MAP_SHARED_VALIDATE, and is
5971          * therefore omitted from mmap_flags_tbl and TARGET_LEGACY_MAP_MASK.
5972          */
5973         if (target_flags & ~(TARGET_LEGACY_MAP_MASK | TARGET_MAP_SYNC)) {
5974             return -TARGET_EOPNOTSUPP;
5975         }
5976         host_flags = MAP_SHARED_VALIDATE;
5977         if (target_flags & TARGET_MAP_SYNC) {
5978             host_flags |= MAP_SYNC;
5979         }
5980         break;
5981     default:
5982         return -TARGET_EINVAL;
5983     }
5984     host_flags |= target_to_host_bitmask(target_flags, mmap_flags_tbl);
5985 
5986     return get_errno(target_mmap(addr, len, prot, host_flags, fd, offset));
5987 }
5988 
5989 /*
5990  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5991  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5992  */
5993 #if defined(TARGET_I386)
5994 
5995 /* NOTE: there is really one LDT for all the threads */
5996 static uint8_t *ldt_table;
5997 
read_ldt(abi_ulong ptr,unsigned long bytecount)5998 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5999 {
6000     int size;
6001     void *p;
6002 
6003     if (!ldt_table)
6004         return 0;
6005     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6006     if (size > bytecount)
6007         size = bytecount;
6008     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6009     if (!p)
6010         return -TARGET_EFAULT;
6011     /* ??? Should this by byteswapped?  */
6012     memcpy(p, ldt_table, size);
6013     unlock_user(p, ptr, size);
6014     return size;
6015 }
6016 
6017 /* XXX: add locking support */
write_ldt(CPUX86State * env,abi_ulong ptr,unsigned long bytecount,int oldmode)6018 static abi_long write_ldt(CPUX86State *env,
6019                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6020 {
6021     struct target_modify_ldt_ldt_s ldt_info;
6022     struct target_modify_ldt_ldt_s *target_ldt_info;
6023     int seg_32bit, contents, read_exec_only, limit_in_pages;
6024     int seg_not_present, useable, lm;
6025     uint32_t *lp, entry_1, entry_2;
6026 
6027     if (bytecount != sizeof(ldt_info))
6028         return -TARGET_EINVAL;
6029     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6030         return -TARGET_EFAULT;
6031     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6032     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6033     ldt_info.limit = tswap32(target_ldt_info->limit);
6034     ldt_info.flags = tswap32(target_ldt_info->flags);
6035     unlock_user_struct(target_ldt_info, ptr, 0);
6036 
6037     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6038         return -TARGET_EINVAL;
6039     seg_32bit = ldt_info.flags & 1;
6040     contents = (ldt_info.flags >> 1) & 3;
6041     read_exec_only = (ldt_info.flags >> 3) & 1;
6042     limit_in_pages = (ldt_info.flags >> 4) & 1;
6043     seg_not_present = (ldt_info.flags >> 5) & 1;
6044     useable = (ldt_info.flags >> 6) & 1;
6045 #ifdef TARGET_ABI32
6046     lm = 0;
6047 #else
6048     lm = (ldt_info.flags >> 7) & 1;
6049 #endif
6050     if (contents == 3) {
6051         if (oldmode)
6052             return -TARGET_EINVAL;
6053         if (seg_not_present == 0)
6054             return -TARGET_EINVAL;
6055     }
6056     /* allocate the LDT */
6057     if (!ldt_table) {
6058         env->ldt.base = target_mmap(0,
6059                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6060                                     PROT_READ|PROT_WRITE,
6061                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6062         if (env->ldt.base == -1)
6063             return -TARGET_ENOMEM;
6064         memset(g2h_untagged(env->ldt.base), 0,
6065                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6066         env->ldt.limit = 0xffff;
6067         ldt_table = g2h_untagged(env->ldt.base);
6068     }
6069 
6070     /* NOTE: same code as Linux kernel */
6071     /* Allow LDTs to be cleared by the user. */
6072     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6073         if (oldmode ||
6074             (contents == 0		&&
6075              read_exec_only == 1	&&
6076              seg_32bit == 0		&&
6077              limit_in_pages == 0	&&
6078              seg_not_present == 1	&&
6079              useable == 0 )) {
6080             entry_1 = 0;
6081             entry_2 = 0;
6082             goto install;
6083         }
6084     }
6085 
6086     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6087         (ldt_info.limit & 0x0ffff);
6088     entry_2 = (ldt_info.base_addr & 0xff000000) |
6089         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6090         (ldt_info.limit & 0xf0000) |
6091         ((read_exec_only ^ 1) << 9) |
6092         (contents << 10) |
6093         ((seg_not_present ^ 1) << 15) |
6094         (seg_32bit << 22) |
6095         (limit_in_pages << 23) |
6096         (lm << 21) |
6097         0x7000;
6098     if (!oldmode)
6099         entry_2 |= (useable << 20);
6100 
6101     /* Install the new entry ...  */
6102 install:
6103     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6104     lp[0] = tswap32(entry_1);
6105     lp[1] = tswap32(entry_2);
6106     return 0;
6107 }
6108 
6109 /* specific and weird i386 syscalls */
do_modify_ldt(CPUX86State * env,int func,abi_ulong ptr,unsigned long bytecount)6110 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6111                               unsigned long bytecount)
6112 {
6113     abi_long ret;
6114 
6115     switch (func) {
6116     case 0:
6117         ret = read_ldt(ptr, bytecount);
6118         break;
6119     case 1:
6120         ret = write_ldt(env, ptr, bytecount, 1);
6121         break;
6122     case 0x11:
6123         ret = write_ldt(env, ptr, bytecount, 0);
6124         break;
6125     default:
6126         ret = -TARGET_ENOSYS;
6127         break;
6128     }
6129     return ret;
6130 }
6131 
6132 #if defined(TARGET_ABI32)
do_set_thread_area(CPUX86State * env,abi_ulong ptr)6133 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6134 {
6135     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6136     struct target_modify_ldt_ldt_s ldt_info;
6137     struct target_modify_ldt_ldt_s *target_ldt_info;
6138     int seg_32bit, contents, read_exec_only, limit_in_pages;
6139     int seg_not_present, useable, lm;
6140     uint32_t *lp, entry_1, entry_2;
6141     int i;
6142 
6143     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6144     if (!target_ldt_info)
6145         return -TARGET_EFAULT;
6146     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6147     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6148     ldt_info.limit = tswap32(target_ldt_info->limit);
6149     ldt_info.flags = tswap32(target_ldt_info->flags);
6150     if (ldt_info.entry_number == -1) {
6151         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6152             if (gdt_table[i] == 0) {
6153                 ldt_info.entry_number = i;
6154                 target_ldt_info->entry_number = tswap32(i);
6155                 break;
6156             }
6157         }
6158     }
6159     unlock_user_struct(target_ldt_info, ptr, 1);
6160 
6161     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6162         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6163            return -TARGET_EINVAL;
6164     seg_32bit = ldt_info.flags & 1;
6165     contents = (ldt_info.flags >> 1) & 3;
6166     read_exec_only = (ldt_info.flags >> 3) & 1;
6167     limit_in_pages = (ldt_info.flags >> 4) & 1;
6168     seg_not_present = (ldt_info.flags >> 5) & 1;
6169     useable = (ldt_info.flags >> 6) & 1;
6170 #ifdef TARGET_ABI32
6171     lm = 0;
6172 #else
6173     lm = (ldt_info.flags >> 7) & 1;
6174 #endif
6175 
6176     if (contents == 3) {
6177         if (seg_not_present == 0)
6178             return -TARGET_EINVAL;
6179     }
6180 
6181     /* NOTE: same code as Linux kernel */
6182     /* Allow LDTs to be cleared by the user. */
6183     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6184         if ((contents == 0             &&
6185              read_exec_only == 1       &&
6186              seg_32bit == 0            &&
6187              limit_in_pages == 0       &&
6188              seg_not_present == 1      &&
6189              useable == 0 )) {
6190             entry_1 = 0;
6191             entry_2 = 0;
6192             goto install;
6193         }
6194     }
6195 
6196     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6197         (ldt_info.limit & 0x0ffff);
6198     entry_2 = (ldt_info.base_addr & 0xff000000) |
6199         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6200         (ldt_info.limit & 0xf0000) |
6201         ((read_exec_only ^ 1) << 9) |
6202         (contents << 10) |
6203         ((seg_not_present ^ 1) << 15) |
6204         (seg_32bit << 22) |
6205         (limit_in_pages << 23) |
6206         (useable << 20) |
6207         (lm << 21) |
6208         0x7000;
6209 
6210     /* Install the new entry ...  */
6211 install:
6212     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6213     lp[0] = tswap32(entry_1);
6214     lp[1] = tswap32(entry_2);
6215     return 0;
6216 }
6217 
do_get_thread_area(CPUX86State * env,abi_ulong ptr)6218 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6219 {
6220     struct target_modify_ldt_ldt_s *target_ldt_info;
6221     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6222     uint32_t base_addr, limit, flags;
6223     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6224     int seg_not_present, useable, lm;
6225     uint32_t *lp, entry_1, entry_2;
6226 
6227     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6228     if (!target_ldt_info)
6229         return -TARGET_EFAULT;
6230     idx = tswap32(target_ldt_info->entry_number);
6231     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6232         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6233         unlock_user_struct(target_ldt_info, ptr, 1);
6234         return -TARGET_EINVAL;
6235     }
6236     lp = (uint32_t *)(gdt_table + idx);
6237     entry_1 = tswap32(lp[0]);
6238     entry_2 = tswap32(lp[1]);
6239 
6240     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6241     contents = (entry_2 >> 10) & 3;
6242     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6243     seg_32bit = (entry_2 >> 22) & 1;
6244     limit_in_pages = (entry_2 >> 23) & 1;
6245     useable = (entry_2 >> 20) & 1;
6246 #ifdef TARGET_ABI32
6247     lm = 0;
6248 #else
6249     lm = (entry_2 >> 21) & 1;
6250 #endif
6251     flags = (seg_32bit << 0) | (contents << 1) |
6252         (read_exec_only << 3) | (limit_in_pages << 4) |
6253         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6254     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6255     base_addr = (entry_1 >> 16) |
6256         (entry_2 & 0xff000000) |
6257         ((entry_2 & 0xff) << 16);
6258     target_ldt_info->base_addr = tswapal(base_addr);
6259     target_ldt_info->limit = tswap32(limit);
6260     target_ldt_info->flags = tswap32(flags);
6261     unlock_user_struct(target_ldt_info, ptr, 1);
6262     return 0;
6263 }
6264 
do_arch_prctl(CPUX86State * env,int code,abi_ulong addr)6265 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6266 {
6267     return -TARGET_ENOSYS;
6268 }
6269 #else
do_arch_prctl(CPUX86State * env,int code,abi_ulong addr)6270 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6271 {
6272     abi_long ret = 0;
6273     abi_ulong val;
6274     int idx;
6275 
6276     switch(code) {
6277     case TARGET_ARCH_SET_GS:
6278     case TARGET_ARCH_SET_FS:
6279         if (code == TARGET_ARCH_SET_GS)
6280             idx = R_GS;
6281         else
6282             idx = R_FS;
6283         cpu_x86_load_seg(env, idx, 0);
6284         env->segs[idx].base = addr;
6285         break;
6286     case TARGET_ARCH_GET_GS:
6287     case TARGET_ARCH_GET_FS:
6288         if (code == TARGET_ARCH_GET_GS)
6289             idx = R_GS;
6290         else
6291             idx = R_FS;
6292         val = env->segs[idx].base;
6293         if (put_user(val, addr, abi_ulong))
6294             ret = -TARGET_EFAULT;
6295         break;
6296     default:
6297         ret = -TARGET_EINVAL;
6298         break;
6299     }
6300     return ret;
6301 }
6302 #endif /* defined(TARGET_ABI32 */
6303 #endif /* defined(TARGET_I386) */
6304 
6305 /*
6306  * These constants are generic.  Supply any that are missing from the host.
6307  */
6308 #ifndef PR_SET_NAME
6309 # define PR_SET_NAME    15
6310 # define PR_GET_NAME    16
6311 #endif
6312 #ifndef PR_SET_FP_MODE
6313 # define PR_SET_FP_MODE 45
6314 # define PR_GET_FP_MODE 46
6315 # define PR_FP_MODE_FR   (1 << 0)
6316 # define PR_FP_MODE_FRE  (1 << 1)
6317 #endif
6318 #ifndef PR_SVE_SET_VL
6319 # define PR_SVE_SET_VL  50
6320 # define PR_SVE_GET_VL  51
6321 # define PR_SVE_VL_LEN_MASK  0xffff
6322 # define PR_SVE_VL_INHERIT   (1 << 17)
6323 #endif
6324 #ifndef PR_PAC_RESET_KEYS
6325 # define PR_PAC_RESET_KEYS  54
6326 # define PR_PAC_APIAKEY   (1 << 0)
6327 # define PR_PAC_APIBKEY   (1 << 1)
6328 # define PR_PAC_APDAKEY   (1 << 2)
6329 # define PR_PAC_APDBKEY   (1 << 3)
6330 # define PR_PAC_APGAKEY   (1 << 4)
6331 #endif
6332 #ifndef PR_SET_TAGGED_ADDR_CTRL
6333 # define PR_SET_TAGGED_ADDR_CTRL 55
6334 # define PR_GET_TAGGED_ADDR_CTRL 56
6335 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6336 #endif
6337 #ifndef PR_SET_IO_FLUSHER
6338 # define PR_SET_IO_FLUSHER 57
6339 # define PR_GET_IO_FLUSHER 58
6340 #endif
6341 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6342 # define PR_SET_SYSCALL_USER_DISPATCH 59
6343 #endif
6344 #ifndef PR_SME_SET_VL
6345 # define PR_SME_SET_VL  63
6346 # define PR_SME_GET_VL  64
6347 # define PR_SME_VL_LEN_MASK  0xffff
6348 # define PR_SME_VL_INHERIT   (1 << 17)
6349 #endif
6350 
6351 #include "target_prctl.h"
6352 
do_prctl_inval0(CPUArchState * env)6353 static abi_long do_prctl_inval0(CPUArchState *env)
6354 {
6355     return -TARGET_EINVAL;
6356 }
6357 
do_prctl_inval1(CPUArchState * env,abi_long arg2)6358 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6359 {
6360     return -TARGET_EINVAL;
6361 }
6362 
6363 #ifndef do_prctl_get_fp_mode
6364 #define do_prctl_get_fp_mode do_prctl_inval0
6365 #endif
6366 #ifndef do_prctl_set_fp_mode
6367 #define do_prctl_set_fp_mode do_prctl_inval1
6368 #endif
6369 #ifndef do_prctl_sve_get_vl
6370 #define do_prctl_sve_get_vl do_prctl_inval0
6371 #endif
6372 #ifndef do_prctl_sve_set_vl
6373 #define do_prctl_sve_set_vl do_prctl_inval1
6374 #endif
6375 #ifndef do_prctl_reset_keys
6376 #define do_prctl_reset_keys do_prctl_inval1
6377 #endif
6378 #ifndef do_prctl_set_tagged_addr_ctrl
6379 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6380 #endif
6381 #ifndef do_prctl_get_tagged_addr_ctrl
6382 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6383 #endif
6384 #ifndef do_prctl_get_unalign
6385 #define do_prctl_get_unalign do_prctl_inval1
6386 #endif
6387 #ifndef do_prctl_set_unalign
6388 #define do_prctl_set_unalign do_prctl_inval1
6389 #endif
6390 #ifndef do_prctl_sme_get_vl
6391 #define do_prctl_sme_get_vl do_prctl_inval0
6392 #endif
6393 #ifndef do_prctl_sme_set_vl
6394 #define do_prctl_sme_set_vl do_prctl_inval1
6395 #endif
6396 
do_prctl(CPUArchState * env,abi_long option,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5)6397 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6398                          abi_long arg3, abi_long arg4, abi_long arg5)
6399 {
6400     abi_long ret;
6401 
6402     switch (option) {
6403     case PR_GET_PDEATHSIG:
6404         {
6405             int deathsig;
6406             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6407                                   arg3, arg4, arg5));
6408             if (!is_error(ret) &&
6409                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6410                 return -TARGET_EFAULT;
6411             }
6412             return ret;
6413         }
6414     case PR_SET_PDEATHSIG:
6415         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6416                                arg3, arg4, arg5));
6417     case PR_GET_NAME:
6418         {
6419             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6420             if (!name) {
6421                 return -TARGET_EFAULT;
6422             }
6423             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6424                                   arg3, arg4, arg5));
6425             unlock_user(name, arg2, 16);
6426             return ret;
6427         }
6428     case PR_SET_NAME:
6429         {
6430             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6431             if (!name) {
6432                 return -TARGET_EFAULT;
6433             }
6434             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6435                                   arg3, arg4, arg5));
6436             unlock_user(name, arg2, 0);
6437             return ret;
6438         }
6439     case PR_GET_FP_MODE:
6440         return do_prctl_get_fp_mode(env);
6441     case PR_SET_FP_MODE:
6442         return do_prctl_set_fp_mode(env, arg2);
6443     case PR_SVE_GET_VL:
6444         return do_prctl_sve_get_vl(env);
6445     case PR_SVE_SET_VL:
6446         return do_prctl_sve_set_vl(env, arg2);
6447     case PR_SME_GET_VL:
6448         return do_prctl_sme_get_vl(env);
6449     case PR_SME_SET_VL:
6450         return do_prctl_sme_set_vl(env, arg2);
6451     case PR_PAC_RESET_KEYS:
6452         if (arg3 || arg4 || arg5) {
6453             return -TARGET_EINVAL;
6454         }
6455         return do_prctl_reset_keys(env, arg2);
6456     case PR_SET_TAGGED_ADDR_CTRL:
6457         if (arg3 || arg4 || arg5) {
6458             return -TARGET_EINVAL;
6459         }
6460         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6461     case PR_GET_TAGGED_ADDR_CTRL:
6462         if (arg2 || arg3 || arg4 || arg5) {
6463             return -TARGET_EINVAL;
6464         }
6465         return do_prctl_get_tagged_addr_ctrl(env);
6466 
6467     case PR_GET_UNALIGN:
6468         return do_prctl_get_unalign(env, arg2);
6469     case PR_SET_UNALIGN:
6470         return do_prctl_set_unalign(env, arg2);
6471 
6472     case PR_CAP_AMBIENT:
6473     case PR_CAPBSET_READ:
6474     case PR_CAPBSET_DROP:
6475     case PR_GET_DUMPABLE:
6476     case PR_SET_DUMPABLE:
6477     case PR_GET_KEEPCAPS:
6478     case PR_SET_KEEPCAPS:
6479     case PR_GET_SECUREBITS:
6480     case PR_SET_SECUREBITS:
6481     case PR_GET_TIMING:
6482     case PR_SET_TIMING:
6483     case PR_GET_TIMERSLACK:
6484     case PR_SET_TIMERSLACK:
6485     case PR_MCE_KILL:
6486     case PR_MCE_KILL_GET:
6487     case PR_GET_NO_NEW_PRIVS:
6488     case PR_SET_NO_NEW_PRIVS:
6489     case PR_GET_IO_FLUSHER:
6490     case PR_SET_IO_FLUSHER:
6491     case PR_SET_CHILD_SUBREAPER:
6492     case PR_GET_SPECULATION_CTRL:
6493     case PR_SET_SPECULATION_CTRL:
6494         /* Some prctl options have no pointer arguments and we can pass on. */
6495         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6496 
6497     case PR_GET_CHILD_SUBREAPER:
6498         {
6499             int val;
6500             ret = get_errno(prctl(PR_GET_CHILD_SUBREAPER, &val,
6501                                   arg3, arg4, arg5));
6502             if (!is_error(ret) && put_user_s32(val, arg2)) {
6503                 return -TARGET_EFAULT;
6504             }
6505             return ret;
6506         }
6507 
6508     case PR_GET_TID_ADDRESS:
6509         {
6510             TaskState *ts = get_task_state(env_cpu(env));
6511             return put_user_ual(ts->child_tidptr, arg2);
6512         }
6513 
6514     case PR_GET_FPEXC:
6515     case PR_SET_FPEXC:
6516         /* Was used for SPE on PowerPC. */
6517         return -TARGET_EINVAL;
6518 
6519     case PR_GET_ENDIAN:
6520     case PR_SET_ENDIAN:
6521     case PR_GET_FPEMU:
6522     case PR_SET_FPEMU:
6523     case PR_SET_MM:
6524     case PR_GET_SECCOMP:
6525     case PR_SET_SECCOMP:
6526     case PR_SET_SYSCALL_USER_DISPATCH:
6527     case PR_GET_THP_DISABLE:
6528     case PR_SET_THP_DISABLE:
6529     case PR_GET_TSC:
6530     case PR_SET_TSC:
6531         /* Disable to prevent the target disabling stuff we need. */
6532         return -TARGET_EINVAL;
6533 
6534     default:
6535         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6536                       option);
6537         return -TARGET_EINVAL;
6538     }
6539 }
6540 
6541 #define NEW_STACK_SIZE 0x40000
6542 
6543 
6544 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6545 typedef struct {
6546     CPUArchState *env;
6547     pthread_mutex_t mutex;
6548     pthread_cond_t cond;
6549     pthread_t thread;
6550     uint32_t tid;
6551     abi_ulong child_tidptr;
6552     abi_ulong parent_tidptr;
6553     sigset_t sigmask;
6554 } new_thread_info;
6555 
clone_func(void * arg)6556 static void *clone_func(void *arg)
6557 {
6558     new_thread_info *info = arg;
6559     CPUArchState *env;
6560     CPUState *cpu;
6561     TaskState *ts;
6562 
6563     rcu_register_thread();
6564     tcg_register_thread();
6565     env = info->env;
6566     cpu = env_cpu(env);
6567     thread_cpu = cpu;
6568     ts = get_task_state(cpu);
6569     info->tid = sys_gettid();
6570     task_settid(ts);
6571     if (info->child_tidptr)
6572         put_user_u32(info->tid, info->child_tidptr);
6573     if (info->parent_tidptr)
6574         put_user_u32(info->tid, info->parent_tidptr);
6575     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6576     /* Enable signals.  */
6577     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6578     /* Signal to the parent that we're ready.  */
6579     pthread_mutex_lock(&info->mutex);
6580     pthread_cond_broadcast(&info->cond);
6581     pthread_mutex_unlock(&info->mutex);
6582     /* Wait until the parent has finished initializing the tls state.  */
6583     pthread_mutex_lock(&clone_lock);
6584     pthread_mutex_unlock(&clone_lock);
6585     cpu_loop(env);
6586     /* never exits */
6587     return NULL;
6588 }
6589 
6590 /* do_fork() Must return host values and target errnos (unlike most
6591    do_*() functions). */
do_fork(CPUArchState * env,unsigned int flags,abi_ulong newsp,abi_ulong parent_tidptr,target_ulong newtls,abi_ulong child_tidptr)6592 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6593                    abi_ulong parent_tidptr, target_ulong newtls,
6594                    abi_ulong child_tidptr)
6595 {
6596     CPUState *cpu = env_cpu(env);
6597     int ret;
6598     TaskState *ts;
6599     CPUState *new_cpu;
6600     CPUArchState *new_env;
6601     sigset_t sigmask;
6602 
6603     flags &= ~CLONE_IGNORED_FLAGS;
6604 
6605     /* Emulate vfork() with fork() */
6606     if (flags & CLONE_VFORK)
6607         flags &= ~(CLONE_VFORK | CLONE_VM);
6608 
6609     if (flags & CLONE_VM) {
6610         TaskState *parent_ts = get_task_state(cpu);
6611         new_thread_info info;
6612         pthread_attr_t attr;
6613 
6614         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6615             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6616             return -TARGET_EINVAL;
6617         }
6618 
6619         ts = g_new0(TaskState, 1);
6620         init_task_state(ts);
6621 
6622         /* Grab a mutex so that thread setup appears atomic.  */
6623         pthread_mutex_lock(&clone_lock);
6624 
6625         /*
6626          * If this is our first additional thread, we need to ensure we
6627          * generate code for parallel execution and flush old translations.
6628          * Do this now so that the copy gets CF_PARALLEL too.
6629          */
6630         if (!tcg_cflags_has(cpu, CF_PARALLEL)) {
6631             tcg_cflags_set(cpu, CF_PARALLEL);
6632             tb_flush(cpu);
6633         }
6634 
6635         /* we create a new CPU instance. */
6636         new_env = cpu_copy(env);
6637         /* Init regs that differ from the parent.  */
6638         cpu_clone_regs_child(new_env, newsp, flags);
6639         cpu_clone_regs_parent(env, flags);
6640         new_cpu = env_cpu(new_env);
6641         new_cpu->opaque = ts;
6642         ts->bprm = parent_ts->bprm;
6643         ts->info = parent_ts->info;
6644         ts->signal_mask = parent_ts->signal_mask;
6645 
6646         if (flags & CLONE_CHILD_CLEARTID) {
6647             ts->child_tidptr = child_tidptr;
6648         }
6649 
6650         if (flags & CLONE_SETTLS) {
6651             cpu_set_tls (new_env, newtls);
6652         }
6653 
6654         memset(&info, 0, sizeof(info));
6655         pthread_mutex_init(&info.mutex, NULL);
6656         pthread_mutex_lock(&info.mutex);
6657         pthread_cond_init(&info.cond, NULL);
6658         info.env = new_env;
6659         if (flags & CLONE_CHILD_SETTID) {
6660             info.child_tidptr = child_tidptr;
6661         }
6662         if (flags & CLONE_PARENT_SETTID) {
6663             info.parent_tidptr = parent_tidptr;
6664         }
6665 
6666         ret = pthread_attr_init(&attr);
6667         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6668         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6669         /* It is not safe to deliver signals until the child has finished
6670            initializing, so temporarily block all signals.  */
6671         sigfillset(&sigmask);
6672         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6673         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6674 
6675         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6676         /* TODO: Free new CPU state if thread creation failed.  */
6677 
6678         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6679         pthread_attr_destroy(&attr);
6680         if (ret == 0) {
6681             /* Wait for the child to initialize.  */
6682             pthread_cond_wait(&info.cond, &info.mutex);
6683             ret = info.tid;
6684         } else {
6685             ret = -1;
6686         }
6687         pthread_mutex_unlock(&info.mutex);
6688         pthread_cond_destroy(&info.cond);
6689         pthread_mutex_destroy(&info.mutex);
6690         pthread_mutex_unlock(&clone_lock);
6691     } else {
6692         /* if no CLONE_VM, we consider it is a fork */
6693         if (flags & CLONE_INVALID_FORK_FLAGS) {
6694             return -TARGET_EINVAL;
6695         }
6696 
6697         /* We can't support custom termination signals */
6698         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6699             return -TARGET_EINVAL;
6700         }
6701 
6702 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6703         if (flags & CLONE_PIDFD) {
6704             return -TARGET_EINVAL;
6705         }
6706 #endif
6707 
6708         /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6709         if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) {
6710             return -TARGET_EINVAL;
6711         }
6712 
6713         if (block_signals()) {
6714             return -QEMU_ERESTARTSYS;
6715         }
6716 
6717         fork_start();
6718         ret = fork();
6719         if (ret == 0) {
6720             /* Child Process.  */
6721             cpu_clone_regs_child(env, newsp, flags);
6722             fork_end(ret);
6723             /* There is a race condition here.  The parent process could
6724                theoretically read the TID in the child process before the child
6725                tid is set.  This would require using either ptrace
6726                (not implemented) or having *_tidptr to point at a shared memory
6727                mapping.  We can't repeat the spinlock hack used above because
6728                the child process gets its own copy of the lock.  */
6729             if (flags & CLONE_CHILD_SETTID)
6730                 put_user_u32(sys_gettid(), child_tidptr);
6731             if (flags & CLONE_PARENT_SETTID)
6732                 put_user_u32(sys_gettid(), parent_tidptr);
6733             ts = get_task_state(cpu);
6734             if (flags & CLONE_SETTLS)
6735                 cpu_set_tls (env, newtls);
6736             if (flags & CLONE_CHILD_CLEARTID)
6737                 ts->child_tidptr = child_tidptr;
6738         } else {
6739             cpu_clone_regs_parent(env, flags);
6740             if (flags & CLONE_PIDFD) {
6741                 int pid_fd = 0;
6742 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6743                 int pid_child = ret;
6744                 pid_fd = pidfd_open(pid_child, 0);
6745                 if (pid_fd >= 0) {
6746                         fcntl(pid_fd, F_SETFD, fcntl(pid_fd, F_GETFL)
6747                                                | FD_CLOEXEC);
6748                 } else {
6749                         pid_fd = 0;
6750                 }
6751 #endif
6752                 put_user_u32(pid_fd, parent_tidptr);
6753             }
6754             fork_end(ret);
6755         }
6756         g_assert(!cpu_in_exclusive_context(cpu));
6757     }
6758     return ret;
6759 }
6760 
6761 /* warning : doesn't handle linux specific flags... */
target_to_host_fcntl_cmd(int cmd)6762 static int target_to_host_fcntl_cmd(int cmd)
6763 {
6764     int ret;
6765 
6766     switch(cmd) {
6767     case TARGET_F_DUPFD:
6768     case TARGET_F_GETFD:
6769     case TARGET_F_SETFD:
6770     case TARGET_F_GETFL:
6771     case TARGET_F_SETFL:
6772     case TARGET_F_OFD_GETLK:
6773     case TARGET_F_OFD_SETLK:
6774     case TARGET_F_OFD_SETLKW:
6775         ret = cmd;
6776         break;
6777     case TARGET_F_GETLK:
6778         ret = F_GETLK;
6779         break;
6780     case TARGET_F_SETLK:
6781         ret = F_SETLK;
6782         break;
6783     case TARGET_F_SETLKW:
6784         ret = F_SETLKW;
6785         break;
6786     case TARGET_F_GETOWN:
6787         ret = F_GETOWN;
6788         break;
6789     case TARGET_F_SETOWN:
6790         ret = F_SETOWN;
6791         break;
6792     case TARGET_F_GETSIG:
6793         ret = F_GETSIG;
6794         break;
6795     case TARGET_F_SETSIG:
6796         ret = F_SETSIG;
6797         break;
6798 #if TARGET_ABI_BITS == 32
6799     case TARGET_F_GETLK64:
6800         ret = F_GETLK;
6801         break;
6802     case TARGET_F_SETLK64:
6803         ret = F_SETLK;
6804         break;
6805     case TARGET_F_SETLKW64:
6806         ret = F_SETLKW;
6807         break;
6808 #endif
6809     case TARGET_F_SETLEASE:
6810         ret = F_SETLEASE;
6811         break;
6812     case TARGET_F_GETLEASE:
6813         ret = F_GETLEASE;
6814         break;
6815 #ifdef F_DUPFD_CLOEXEC
6816     case TARGET_F_DUPFD_CLOEXEC:
6817         ret = F_DUPFD_CLOEXEC;
6818         break;
6819 #endif
6820     case TARGET_F_NOTIFY:
6821         ret = F_NOTIFY;
6822         break;
6823 #ifdef F_GETOWN_EX
6824     case TARGET_F_GETOWN_EX:
6825         ret = F_GETOWN_EX;
6826         break;
6827 #endif
6828 #ifdef F_SETOWN_EX
6829     case TARGET_F_SETOWN_EX:
6830         ret = F_SETOWN_EX;
6831         break;
6832 #endif
6833 #ifdef F_SETPIPE_SZ
6834     case TARGET_F_SETPIPE_SZ:
6835         ret = F_SETPIPE_SZ;
6836         break;
6837     case TARGET_F_GETPIPE_SZ:
6838         ret = F_GETPIPE_SZ;
6839         break;
6840 #endif
6841 #ifdef F_ADD_SEALS
6842     case TARGET_F_ADD_SEALS:
6843         ret = F_ADD_SEALS;
6844         break;
6845     case TARGET_F_GET_SEALS:
6846         ret = F_GET_SEALS;
6847         break;
6848 #endif
6849     default:
6850         ret = -TARGET_EINVAL;
6851         break;
6852     }
6853 
6854 #if defined(__powerpc64__)
6855     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6856      * is not supported by kernel. The glibc fcntl call actually adjusts
6857      * them to 5, 6 and 7 before making the syscall(). Since we make the
6858      * syscall directly, adjust to what is supported by the kernel.
6859      */
6860     if (ret >= F_GETLK && ret <= F_SETLKW) {
6861         ret -= F_GETLK - 5;
6862     }
6863 #endif
6864 
6865     return ret;
6866 }
6867 
6868 #define FLOCK_TRANSTBL \
6869     switch (type) { \
6870     TRANSTBL_CONVERT(F_RDLCK); \
6871     TRANSTBL_CONVERT(F_WRLCK); \
6872     TRANSTBL_CONVERT(F_UNLCK); \
6873     }
6874 
target_to_host_flock(int type)6875 static int target_to_host_flock(int type)
6876 {
6877 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6878     FLOCK_TRANSTBL
6879 #undef  TRANSTBL_CONVERT
6880     return -TARGET_EINVAL;
6881 }
6882 
host_to_target_flock(int type)6883 static int host_to_target_flock(int type)
6884 {
6885 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6886     FLOCK_TRANSTBL
6887 #undef  TRANSTBL_CONVERT
6888     /* if we don't know how to convert the value coming
6889      * from the host we copy to the target field as-is
6890      */
6891     return type;
6892 }
6893 
copy_from_user_flock(struct flock * fl,abi_ulong target_flock_addr)6894 static inline abi_long copy_from_user_flock(struct flock *fl,
6895                                             abi_ulong target_flock_addr)
6896 {
6897     struct target_flock *target_fl;
6898     int l_type;
6899 
6900     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6901         return -TARGET_EFAULT;
6902     }
6903 
6904     __get_user(l_type, &target_fl->l_type);
6905     l_type = target_to_host_flock(l_type);
6906     if (l_type < 0) {
6907         return l_type;
6908     }
6909     fl->l_type = l_type;
6910     __get_user(fl->l_whence, &target_fl->l_whence);
6911     __get_user(fl->l_start, &target_fl->l_start);
6912     __get_user(fl->l_len, &target_fl->l_len);
6913     __get_user(fl->l_pid, &target_fl->l_pid);
6914     unlock_user_struct(target_fl, target_flock_addr, 0);
6915     return 0;
6916 }
6917 
copy_to_user_flock(abi_ulong target_flock_addr,const struct flock * fl)6918 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6919                                           const struct flock *fl)
6920 {
6921     struct target_flock *target_fl;
6922     short l_type;
6923 
6924     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6925         return -TARGET_EFAULT;
6926     }
6927 
6928     l_type = host_to_target_flock(fl->l_type);
6929     __put_user(l_type, &target_fl->l_type);
6930     __put_user(fl->l_whence, &target_fl->l_whence);
6931     __put_user(fl->l_start, &target_fl->l_start);
6932     __put_user(fl->l_len, &target_fl->l_len);
6933     __put_user(fl->l_pid, &target_fl->l_pid);
6934     unlock_user_struct(target_fl, target_flock_addr, 1);
6935     return 0;
6936 }
6937 
6938 typedef abi_long from_flock64_fn(struct flock *fl, abi_ulong target_addr);
6939 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock *fl);
6940 
6941 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6942 struct target_oabi_flock64 {
6943     abi_short l_type;
6944     abi_short l_whence;
6945     abi_llong l_start;
6946     abi_llong l_len;
6947     abi_int   l_pid;
6948 } QEMU_PACKED;
6949 
copy_from_user_oabi_flock64(struct flock * fl,abi_ulong target_flock_addr)6950 static inline abi_long copy_from_user_oabi_flock64(struct flock *fl,
6951                                                    abi_ulong target_flock_addr)
6952 {
6953     struct target_oabi_flock64 *target_fl;
6954     int l_type;
6955 
6956     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6957         return -TARGET_EFAULT;
6958     }
6959 
6960     __get_user(l_type, &target_fl->l_type);
6961     l_type = target_to_host_flock(l_type);
6962     if (l_type < 0) {
6963         return l_type;
6964     }
6965     fl->l_type = l_type;
6966     __get_user(fl->l_whence, &target_fl->l_whence);
6967     __get_user(fl->l_start, &target_fl->l_start);
6968     __get_user(fl->l_len, &target_fl->l_len);
6969     __get_user(fl->l_pid, &target_fl->l_pid);
6970     unlock_user_struct(target_fl, target_flock_addr, 0);
6971     return 0;
6972 }
6973 
copy_to_user_oabi_flock64(abi_ulong target_flock_addr,const struct flock * fl)6974 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6975                                                  const struct flock *fl)
6976 {
6977     struct target_oabi_flock64 *target_fl;
6978     short l_type;
6979 
6980     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6981         return -TARGET_EFAULT;
6982     }
6983 
6984     l_type = host_to_target_flock(fl->l_type);
6985     __put_user(l_type, &target_fl->l_type);
6986     __put_user(fl->l_whence, &target_fl->l_whence);
6987     __put_user(fl->l_start, &target_fl->l_start);
6988     __put_user(fl->l_len, &target_fl->l_len);
6989     __put_user(fl->l_pid, &target_fl->l_pid);
6990     unlock_user_struct(target_fl, target_flock_addr, 1);
6991     return 0;
6992 }
6993 #endif
6994 
copy_from_user_flock64(struct flock * fl,abi_ulong target_flock_addr)6995 static inline abi_long copy_from_user_flock64(struct flock *fl,
6996                                               abi_ulong target_flock_addr)
6997 {
6998     struct target_flock64 *target_fl;
6999     int l_type;
7000 
7001     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7002         return -TARGET_EFAULT;
7003     }
7004 
7005     __get_user(l_type, &target_fl->l_type);
7006     l_type = target_to_host_flock(l_type);
7007     if (l_type < 0) {
7008         return l_type;
7009     }
7010     fl->l_type = l_type;
7011     __get_user(fl->l_whence, &target_fl->l_whence);
7012     __get_user(fl->l_start, &target_fl->l_start);
7013     __get_user(fl->l_len, &target_fl->l_len);
7014     __get_user(fl->l_pid, &target_fl->l_pid);
7015     unlock_user_struct(target_fl, target_flock_addr, 0);
7016     return 0;
7017 }
7018 
copy_to_user_flock64(abi_ulong target_flock_addr,const struct flock * fl)7019 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7020                                             const struct flock *fl)
7021 {
7022     struct target_flock64 *target_fl;
7023     short l_type;
7024 
7025     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7026         return -TARGET_EFAULT;
7027     }
7028 
7029     l_type = host_to_target_flock(fl->l_type);
7030     __put_user(l_type, &target_fl->l_type);
7031     __put_user(fl->l_whence, &target_fl->l_whence);
7032     __put_user(fl->l_start, &target_fl->l_start);
7033     __put_user(fl->l_len, &target_fl->l_len);
7034     __put_user(fl->l_pid, &target_fl->l_pid);
7035     unlock_user_struct(target_fl, target_flock_addr, 1);
7036     return 0;
7037 }
7038 
do_fcntl(int fd,int cmd,abi_ulong arg)7039 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7040 {
7041     struct flock fl;
7042 #ifdef F_GETOWN_EX
7043     struct f_owner_ex fox;
7044     struct target_f_owner_ex *target_fox;
7045 #endif
7046     abi_long ret;
7047     int host_cmd = target_to_host_fcntl_cmd(cmd);
7048 
7049     if (host_cmd == -TARGET_EINVAL)
7050 	    return host_cmd;
7051 
7052     switch(cmd) {
7053     case TARGET_F_GETLK:
7054         ret = copy_from_user_flock(&fl, arg);
7055         if (ret) {
7056             return ret;
7057         }
7058         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7059         if (ret == 0) {
7060             ret = copy_to_user_flock(arg, &fl);
7061         }
7062         break;
7063 
7064     case TARGET_F_SETLK:
7065     case TARGET_F_SETLKW:
7066         ret = copy_from_user_flock(&fl, arg);
7067         if (ret) {
7068             return ret;
7069         }
7070         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7071         break;
7072 
7073     case TARGET_F_GETLK64:
7074     case TARGET_F_OFD_GETLK:
7075         ret = copy_from_user_flock64(&fl, arg);
7076         if (ret) {
7077             return ret;
7078         }
7079         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7080         if (ret == 0) {
7081             ret = copy_to_user_flock64(arg, &fl);
7082         }
7083         break;
7084     case TARGET_F_SETLK64:
7085     case TARGET_F_SETLKW64:
7086     case TARGET_F_OFD_SETLK:
7087     case TARGET_F_OFD_SETLKW:
7088         ret = copy_from_user_flock64(&fl, arg);
7089         if (ret) {
7090             return ret;
7091         }
7092         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7093         break;
7094 
7095     case TARGET_F_GETFL:
7096         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7097         if (ret >= 0) {
7098             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7099             /* tell 32-bit guests it uses largefile on 64-bit hosts: */
7100             if (O_LARGEFILE == 0 && HOST_LONG_BITS == 64) {
7101                 ret |= TARGET_O_LARGEFILE;
7102             }
7103         }
7104         break;
7105 
7106     case TARGET_F_SETFL:
7107         ret = get_errno(safe_fcntl(fd, host_cmd,
7108                                    target_to_host_bitmask(arg,
7109                                                           fcntl_flags_tbl)));
7110         break;
7111 
7112 #ifdef F_GETOWN_EX
7113     case TARGET_F_GETOWN_EX:
7114         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7115         if (ret >= 0) {
7116             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7117                 return -TARGET_EFAULT;
7118             target_fox->type = tswap32(fox.type);
7119             target_fox->pid = tswap32(fox.pid);
7120             unlock_user_struct(target_fox, arg, 1);
7121         }
7122         break;
7123 #endif
7124 
7125 #ifdef F_SETOWN_EX
7126     case TARGET_F_SETOWN_EX:
7127         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7128             return -TARGET_EFAULT;
7129         fox.type = tswap32(target_fox->type);
7130         fox.pid = tswap32(target_fox->pid);
7131         unlock_user_struct(target_fox, arg, 0);
7132         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7133         break;
7134 #endif
7135 
7136     case TARGET_F_SETSIG:
7137         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7138         break;
7139 
7140     case TARGET_F_GETSIG:
7141         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7142         break;
7143 
7144     case TARGET_F_SETOWN:
7145     case TARGET_F_GETOWN:
7146     case TARGET_F_SETLEASE:
7147     case TARGET_F_GETLEASE:
7148     case TARGET_F_SETPIPE_SZ:
7149     case TARGET_F_GETPIPE_SZ:
7150     case TARGET_F_ADD_SEALS:
7151     case TARGET_F_GET_SEALS:
7152         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7153         break;
7154 
7155     default:
7156         ret = get_errno(safe_fcntl(fd, cmd, arg));
7157         break;
7158     }
7159     return ret;
7160 }
7161 
7162 #ifdef USE_UID16
7163 
high2lowuid(int uid)7164 static inline int high2lowuid(int uid)
7165 {
7166     if (uid > 65535)
7167         return 65534;
7168     else
7169         return uid;
7170 }
7171 
high2lowgid(int gid)7172 static inline int high2lowgid(int gid)
7173 {
7174     if (gid > 65535)
7175         return 65534;
7176     else
7177         return gid;
7178 }
7179 
low2highuid(int uid)7180 static inline int low2highuid(int uid)
7181 {
7182     if ((int16_t)uid == -1)
7183         return -1;
7184     else
7185         return uid;
7186 }
7187 
low2highgid(int gid)7188 static inline int low2highgid(int gid)
7189 {
7190     if ((int16_t)gid == -1)
7191         return -1;
7192     else
7193         return gid;
7194 }
tswapid(int id)7195 static inline int tswapid(int id)
7196 {
7197     return tswap16(id);
7198 }
7199 
7200 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7201 
7202 #else /* !USE_UID16 */
high2lowuid(int uid)7203 static inline int high2lowuid(int uid)
7204 {
7205     return uid;
7206 }
high2lowgid(int gid)7207 static inline int high2lowgid(int gid)
7208 {
7209     return gid;
7210 }
low2highuid(int uid)7211 static inline int low2highuid(int uid)
7212 {
7213     return uid;
7214 }
low2highgid(int gid)7215 static inline int low2highgid(int gid)
7216 {
7217     return gid;
7218 }
tswapid(int id)7219 static inline int tswapid(int id)
7220 {
7221     return tswap32(id);
7222 }
7223 
7224 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7225 
7226 #endif /* USE_UID16 */
7227 
7228 /* We must do direct syscalls for setting UID/GID, because we want to
7229  * implement the Linux system call semantics of "change only for this thread",
7230  * not the libc/POSIX semantics of "change for all threads in process".
7231  * (See http://ewontfix.com/17/ for more details.)
7232  * We use the 32-bit version of the syscalls if present; if it is not
7233  * then either the host architecture supports 32-bit UIDs natively with
7234  * the standard syscall, or the 16-bit UID is the best we can do.
7235  */
7236 #ifdef __NR_setuid32
7237 #define __NR_sys_setuid __NR_setuid32
7238 #else
7239 #define __NR_sys_setuid __NR_setuid
7240 #endif
7241 #ifdef __NR_setgid32
7242 #define __NR_sys_setgid __NR_setgid32
7243 #else
7244 #define __NR_sys_setgid __NR_setgid
7245 #endif
7246 #ifdef __NR_setresuid32
7247 #define __NR_sys_setresuid __NR_setresuid32
7248 #else
7249 #define __NR_sys_setresuid __NR_setresuid
7250 #endif
7251 #ifdef __NR_setresgid32
7252 #define __NR_sys_setresgid __NR_setresgid32
7253 #else
7254 #define __NR_sys_setresgid __NR_setresgid
7255 #endif
7256 #ifdef __NR_setgroups32
7257 #define __NR_sys_setgroups __NR_setgroups32
7258 #else
7259 #define __NR_sys_setgroups __NR_setgroups
7260 #endif
7261 #ifdef __NR_sys_setreuid32
7262 #define __NR_sys_setreuid __NR_setreuid32
7263 #else
7264 #define __NR_sys_setreuid __NR_setreuid
7265 #endif
7266 #ifdef __NR_sys_setregid32
7267 #define __NR_sys_setregid __NR_setregid32
7268 #else
7269 #define __NR_sys_setregid __NR_setregid
7270 #endif
7271 
7272 _syscall1(int, sys_setuid, uid_t, uid)
7273 _syscall1(int, sys_setgid, gid_t, gid)
7274 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7275 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7276 _syscall2(int, sys_setgroups, int, size, gid_t *, grouplist)
7277 _syscall2(int, sys_setreuid, uid_t, ruid, uid_t, euid);
7278 _syscall2(int, sys_setregid, gid_t, rgid, gid_t, egid);
7279 
syscall_init(void)7280 void syscall_init(void)
7281 {
7282     IOCTLEntry *ie;
7283     const argtype *arg_type;
7284     int size;
7285 
7286     thunk_init(STRUCT_MAX);
7287 
7288 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7289 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7290 #include "syscall_types.h"
7291 #undef STRUCT
7292 #undef STRUCT_SPECIAL
7293 
7294     /* we patch the ioctl size if necessary. We rely on the fact that
7295        no ioctl has all the bits at '1' in the size field */
7296     ie = ioctl_entries;
7297     while (ie->target_cmd != 0) {
7298         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7299             TARGET_IOC_SIZEMASK) {
7300             arg_type = ie->arg_type;
7301             if (arg_type[0] != TYPE_PTR) {
7302                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7303                         ie->target_cmd);
7304                 exit(1);
7305             }
7306             arg_type++;
7307             size = thunk_type_size(arg_type, 0);
7308             ie->target_cmd = (ie->target_cmd &
7309                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7310                 (size << TARGET_IOC_SIZESHIFT);
7311         }
7312 
7313         /* automatic consistency check if same arch */
7314 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7315     (defined(__x86_64__) && defined(TARGET_X86_64))
7316         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7317             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7318                     ie->name, ie->target_cmd, ie->host_cmd);
7319         }
7320 #endif
7321         ie++;
7322     }
7323 }
7324 
7325 #ifdef TARGET_NR_truncate64
target_truncate64(CPUArchState * cpu_env,const char * arg1,abi_long arg2,abi_long arg3,abi_long arg4)7326 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7327                                          abi_long arg2,
7328                                          abi_long arg3,
7329                                          abi_long arg4)
7330 {
7331     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7332         arg2 = arg3;
7333         arg3 = arg4;
7334     }
7335     return get_errno(truncate(arg1, target_offset64(arg2, arg3)));
7336 }
7337 #endif
7338 
7339 #ifdef TARGET_NR_ftruncate64
target_ftruncate64(CPUArchState * cpu_env,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4)7340 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7341                                           abi_long arg2,
7342                                           abi_long arg3,
7343                                           abi_long arg4)
7344 {
7345     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7346         arg2 = arg3;
7347         arg3 = arg4;
7348     }
7349     return get_errno(ftruncate(arg1, target_offset64(arg2, arg3)));
7350 }
7351 #endif
7352 
7353 #if defined(TARGET_NR_timer_settime) || \
7354     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
target_to_host_itimerspec(struct itimerspec * host_its,abi_ulong target_addr)7355 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7356                                                  abi_ulong target_addr)
7357 {
7358     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7359                                 offsetof(struct target_itimerspec,
7360                                          it_interval)) ||
7361         target_to_host_timespec(&host_its->it_value, target_addr +
7362                                 offsetof(struct target_itimerspec,
7363                                          it_value))) {
7364         return -TARGET_EFAULT;
7365     }
7366 
7367     return 0;
7368 }
7369 #endif
7370 
7371 #if defined(TARGET_NR_timer_settime64) || \
7372     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
target_to_host_itimerspec64(struct itimerspec * host_its,abi_ulong target_addr)7373 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7374                                                    abi_ulong target_addr)
7375 {
7376     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7377                                   offsetof(struct target__kernel_itimerspec,
7378                                            it_interval)) ||
7379         target_to_host_timespec64(&host_its->it_value, target_addr +
7380                                   offsetof(struct target__kernel_itimerspec,
7381                                            it_value))) {
7382         return -TARGET_EFAULT;
7383     }
7384 
7385     return 0;
7386 }
7387 #endif
7388 
7389 #if ((defined(TARGET_NR_timerfd_gettime) || \
7390       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7391       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
host_to_target_itimerspec(abi_ulong target_addr,struct itimerspec * host_its)7392 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7393                                                  struct itimerspec *host_its)
7394 {
7395     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7396                                                        it_interval),
7397                                 &host_its->it_interval) ||
7398         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7399                                                        it_value),
7400                                 &host_its->it_value)) {
7401         return -TARGET_EFAULT;
7402     }
7403     return 0;
7404 }
7405 #endif
7406 
7407 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7408       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7409       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
host_to_target_itimerspec64(abi_ulong target_addr,struct itimerspec * host_its)7410 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7411                                                    struct itimerspec *host_its)
7412 {
7413     if (host_to_target_timespec64(target_addr +
7414                                   offsetof(struct target__kernel_itimerspec,
7415                                            it_interval),
7416                                   &host_its->it_interval) ||
7417         host_to_target_timespec64(target_addr +
7418                                   offsetof(struct target__kernel_itimerspec,
7419                                            it_value),
7420                                   &host_its->it_value)) {
7421         return -TARGET_EFAULT;
7422     }
7423     return 0;
7424 }
7425 #endif
7426 
7427 #if defined(TARGET_NR_adjtimex) || \
7428     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
target_to_host_timex(struct timex * host_tx,abi_long target_addr)7429 static inline abi_long target_to_host_timex(struct timex *host_tx,
7430                                             abi_long target_addr)
7431 {
7432     struct target_timex *target_tx;
7433 
7434     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7435         return -TARGET_EFAULT;
7436     }
7437 
7438     __get_user(host_tx->modes, &target_tx->modes);
7439     __get_user(host_tx->offset, &target_tx->offset);
7440     __get_user(host_tx->freq, &target_tx->freq);
7441     __get_user(host_tx->maxerror, &target_tx->maxerror);
7442     __get_user(host_tx->esterror, &target_tx->esterror);
7443     __get_user(host_tx->status, &target_tx->status);
7444     __get_user(host_tx->constant, &target_tx->constant);
7445     __get_user(host_tx->precision, &target_tx->precision);
7446     __get_user(host_tx->tolerance, &target_tx->tolerance);
7447     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7448     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7449     __get_user(host_tx->tick, &target_tx->tick);
7450     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7451     __get_user(host_tx->jitter, &target_tx->jitter);
7452     __get_user(host_tx->shift, &target_tx->shift);
7453     __get_user(host_tx->stabil, &target_tx->stabil);
7454     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7455     __get_user(host_tx->calcnt, &target_tx->calcnt);
7456     __get_user(host_tx->errcnt, &target_tx->errcnt);
7457     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7458     __get_user(host_tx->tai, &target_tx->tai);
7459 
7460     unlock_user_struct(target_tx, target_addr, 0);
7461     return 0;
7462 }
7463 
host_to_target_timex(abi_long target_addr,struct timex * host_tx)7464 static inline abi_long host_to_target_timex(abi_long target_addr,
7465                                             struct timex *host_tx)
7466 {
7467     struct target_timex *target_tx;
7468 
7469     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7470         return -TARGET_EFAULT;
7471     }
7472 
7473     __put_user(host_tx->modes, &target_tx->modes);
7474     __put_user(host_tx->offset, &target_tx->offset);
7475     __put_user(host_tx->freq, &target_tx->freq);
7476     __put_user(host_tx->maxerror, &target_tx->maxerror);
7477     __put_user(host_tx->esterror, &target_tx->esterror);
7478     __put_user(host_tx->status, &target_tx->status);
7479     __put_user(host_tx->constant, &target_tx->constant);
7480     __put_user(host_tx->precision, &target_tx->precision);
7481     __put_user(host_tx->tolerance, &target_tx->tolerance);
7482     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7483     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7484     __put_user(host_tx->tick, &target_tx->tick);
7485     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7486     __put_user(host_tx->jitter, &target_tx->jitter);
7487     __put_user(host_tx->shift, &target_tx->shift);
7488     __put_user(host_tx->stabil, &target_tx->stabil);
7489     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7490     __put_user(host_tx->calcnt, &target_tx->calcnt);
7491     __put_user(host_tx->errcnt, &target_tx->errcnt);
7492     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7493     __put_user(host_tx->tai, &target_tx->tai);
7494 
7495     unlock_user_struct(target_tx, target_addr, 1);
7496     return 0;
7497 }
7498 #endif
7499 
7500 
7501 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
target_to_host_timex64(struct timex * host_tx,abi_long target_addr)7502 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7503                                               abi_long target_addr)
7504 {
7505     struct target__kernel_timex *target_tx;
7506 
7507     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7508                                  offsetof(struct target__kernel_timex,
7509                                           time))) {
7510         return -TARGET_EFAULT;
7511     }
7512 
7513     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7514         return -TARGET_EFAULT;
7515     }
7516 
7517     __get_user(host_tx->modes, &target_tx->modes);
7518     __get_user(host_tx->offset, &target_tx->offset);
7519     __get_user(host_tx->freq, &target_tx->freq);
7520     __get_user(host_tx->maxerror, &target_tx->maxerror);
7521     __get_user(host_tx->esterror, &target_tx->esterror);
7522     __get_user(host_tx->status, &target_tx->status);
7523     __get_user(host_tx->constant, &target_tx->constant);
7524     __get_user(host_tx->precision, &target_tx->precision);
7525     __get_user(host_tx->tolerance, &target_tx->tolerance);
7526     __get_user(host_tx->tick, &target_tx->tick);
7527     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7528     __get_user(host_tx->jitter, &target_tx->jitter);
7529     __get_user(host_tx->shift, &target_tx->shift);
7530     __get_user(host_tx->stabil, &target_tx->stabil);
7531     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7532     __get_user(host_tx->calcnt, &target_tx->calcnt);
7533     __get_user(host_tx->errcnt, &target_tx->errcnt);
7534     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7535     __get_user(host_tx->tai, &target_tx->tai);
7536 
7537     unlock_user_struct(target_tx, target_addr, 0);
7538     return 0;
7539 }
7540 
host_to_target_timex64(abi_long target_addr,struct timex * host_tx)7541 static inline abi_long host_to_target_timex64(abi_long target_addr,
7542                                               struct timex *host_tx)
7543 {
7544     struct target__kernel_timex *target_tx;
7545 
7546    if (copy_to_user_timeval64(target_addr +
7547                               offsetof(struct target__kernel_timex, time),
7548                               &host_tx->time)) {
7549         return -TARGET_EFAULT;
7550     }
7551 
7552     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7553         return -TARGET_EFAULT;
7554     }
7555 
7556     __put_user(host_tx->modes, &target_tx->modes);
7557     __put_user(host_tx->offset, &target_tx->offset);
7558     __put_user(host_tx->freq, &target_tx->freq);
7559     __put_user(host_tx->maxerror, &target_tx->maxerror);
7560     __put_user(host_tx->esterror, &target_tx->esterror);
7561     __put_user(host_tx->status, &target_tx->status);
7562     __put_user(host_tx->constant, &target_tx->constant);
7563     __put_user(host_tx->precision, &target_tx->precision);
7564     __put_user(host_tx->tolerance, &target_tx->tolerance);
7565     __put_user(host_tx->tick, &target_tx->tick);
7566     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7567     __put_user(host_tx->jitter, &target_tx->jitter);
7568     __put_user(host_tx->shift, &target_tx->shift);
7569     __put_user(host_tx->stabil, &target_tx->stabil);
7570     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7571     __put_user(host_tx->calcnt, &target_tx->calcnt);
7572     __put_user(host_tx->errcnt, &target_tx->errcnt);
7573     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7574     __put_user(host_tx->tai, &target_tx->tai);
7575 
7576     unlock_user_struct(target_tx, target_addr, 1);
7577     return 0;
7578 }
7579 #endif
7580 
7581 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7582 #define sigev_notify_thread_id _sigev_un._tid
7583 #endif
7584 
target_to_host_sigevent(struct sigevent * host_sevp,abi_ulong target_addr)7585 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7586                                                abi_ulong target_addr)
7587 {
7588     struct target_sigevent *target_sevp;
7589 
7590     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7591         return -TARGET_EFAULT;
7592     }
7593 
7594     /* This union is awkward on 64 bit systems because it has a 32 bit
7595      * integer and a pointer in it; we follow the conversion approach
7596      * used for handling sigval types in signal.c so the guest should get
7597      * the correct value back even if we did a 64 bit byteswap and it's
7598      * using the 32 bit integer.
7599      */
7600     host_sevp->sigev_value.sival_ptr =
7601         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7602     host_sevp->sigev_signo =
7603         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7604     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7605     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7606 
7607     unlock_user_struct(target_sevp, target_addr, 1);
7608     return 0;
7609 }
7610 
7611 #if defined(TARGET_NR_mlockall)
target_to_host_mlockall_arg(int arg)7612 static inline int target_to_host_mlockall_arg(int arg)
7613 {
7614     int result = 0;
7615 
7616     if (arg & TARGET_MCL_CURRENT) {
7617         result |= MCL_CURRENT;
7618     }
7619     if (arg & TARGET_MCL_FUTURE) {
7620         result |= MCL_FUTURE;
7621     }
7622 #ifdef MCL_ONFAULT
7623     if (arg & TARGET_MCL_ONFAULT) {
7624         result |= MCL_ONFAULT;
7625     }
7626 #endif
7627 
7628     return result;
7629 }
7630 #endif
7631 
target_to_host_msync_arg(abi_long arg)7632 static inline int target_to_host_msync_arg(abi_long arg)
7633 {
7634     return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
7635            ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
7636            ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
7637            (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
7638 }
7639 
7640 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7641      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7642      defined(TARGET_NR_newfstatat))
host_to_target_stat64(CPUArchState * cpu_env,abi_ulong target_addr,struct stat * host_st)7643 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7644                                              abi_ulong target_addr,
7645                                              struct stat *host_st)
7646 {
7647 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7648     if (cpu_env->eabi) {
7649         struct target_eabi_stat64 *target_st;
7650 
7651         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7652             return -TARGET_EFAULT;
7653         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7654         __put_user(host_st->st_dev, &target_st->st_dev);
7655         __put_user(host_st->st_ino, &target_st->st_ino);
7656 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7657         __put_user(host_st->st_ino, &target_st->__st_ino);
7658 #endif
7659         __put_user(host_st->st_mode, &target_st->st_mode);
7660         __put_user(host_st->st_nlink, &target_st->st_nlink);
7661         __put_user(host_st->st_uid, &target_st->st_uid);
7662         __put_user(host_st->st_gid, &target_st->st_gid);
7663         __put_user(host_st->st_rdev, &target_st->st_rdev);
7664         __put_user(host_st->st_size, &target_st->st_size);
7665         __put_user(host_st->st_blksize, &target_st->st_blksize);
7666         __put_user(host_st->st_blocks, &target_st->st_blocks);
7667         __put_user(host_st->st_atime, &target_st->target_st_atime);
7668         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7669         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7670 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7671         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7672         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7673         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7674 #endif
7675         unlock_user_struct(target_st, target_addr, 1);
7676     } else
7677 #endif
7678     {
7679 #if defined(TARGET_HAS_STRUCT_STAT64)
7680         struct target_stat64 *target_st;
7681 #else
7682         struct target_stat *target_st;
7683 #endif
7684 
7685         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7686             return -TARGET_EFAULT;
7687         memset(target_st, 0, sizeof(*target_st));
7688         __put_user(host_st->st_dev, &target_st->st_dev);
7689         __put_user(host_st->st_ino, &target_st->st_ino);
7690 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7691         __put_user(host_st->st_ino, &target_st->__st_ino);
7692 #endif
7693         __put_user(host_st->st_mode, &target_st->st_mode);
7694         __put_user(host_st->st_nlink, &target_st->st_nlink);
7695         __put_user(host_st->st_uid, &target_st->st_uid);
7696         __put_user(host_st->st_gid, &target_st->st_gid);
7697         __put_user(host_st->st_rdev, &target_st->st_rdev);
7698         /* XXX: better use of kernel struct */
7699         __put_user(host_st->st_size, &target_st->st_size);
7700         __put_user(host_st->st_blksize, &target_st->st_blksize);
7701         __put_user(host_st->st_blocks, &target_st->st_blocks);
7702         __put_user(host_st->st_atime, &target_st->target_st_atime);
7703         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7704         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7705 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7706         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7707         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7708         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7709 #endif
7710         unlock_user_struct(target_st, target_addr, 1);
7711     }
7712 
7713     return 0;
7714 }
7715 #endif
7716 
7717 #if defined(TARGET_NR_statx) && defined(__NR_statx)
host_to_target_statx(struct target_statx * host_stx,abi_ulong target_addr)7718 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7719                                             abi_ulong target_addr)
7720 {
7721     struct target_statx *target_stx;
7722 
7723     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7724         return -TARGET_EFAULT;
7725     }
7726     memset(target_stx, 0, sizeof(*target_stx));
7727 
7728     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7729     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7730     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7731     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7732     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7733     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7734     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7735     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7736     __put_user(host_stx->stx_size, &target_stx->stx_size);
7737     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7738     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7739     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7740     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7741     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7742     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7743     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7744     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7745     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7746     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7747     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7748     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7749     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7750     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7751 
7752     unlock_user_struct(target_stx, target_addr, 1);
7753 
7754     return 0;
7755 }
7756 #endif
7757 
do_sys_futex(int * uaddr,int op,int val,const struct timespec * timeout,int * uaddr2,int val3)7758 static int do_sys_futex(int *uaddr, int op, int val,
7759                          const struct timespec *timeout, int *uaddr2,
7760                          int val3)
7761 {
7762 #if HOST_LONG_BITS == 64
7763 #if defined(__NR_futex)
7764     /* always a 64-bit time_t, it doesn't define _time64 version  */
7765     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7766 
7767 #endif
7768 #else /* HOST_LONG_BITS == 64 */
7769 #if defined(__NR_futex_time64)
7770     if (sizeof(timeout->tv_sec) == 8) {
7771         /* _time64 function on 32bit arch */
7772         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7773     }
7774 #endif
7775 #if defined(__NR_futex)
7776     /* old function on 32bit arch */
7777     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7778 #endif
7779 #endif /* HOST_LONG_BITS == 64 */
7780     g_assert_not_reached();
7781 }
7782 
do_safe_futex(int * uaddr,int op,int val,const struct timespec * timeout,int * uaddr2,int val3)7783 static int do_safe_futex(int *uaddr, int op, int val,
7784                          const struct timespec *timeout, int *uaddr2,
7785                          int val3)
7786 {
7787 #if HOST_LONG_BITS == 64
7788 #if defined(__NR_futex)
7789     /* always a 64-bit time_t, it doesn't define _time64 version  */
7790     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7791 #endif
7792 #else /* HOST_LONG_BITS == 64 */
7793 #if defined(__NR_futex_time64)
7794     if (sizeof(timeout->tv_sec) == 8) {
7795         /* _time64 function on 32bit arch */
7796         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7797                                            val3));
7798     }
7799 #endif
7800 #if defined(__NR_futex)
7801     /* old function on 32bit arch */
7802     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7803 #endif
7804 #endif /* HOST_LONG_BITS == 64 */
7805     return -TARGET_ENOSYS;
7806 }
7807 
7808 /* ??? Using host futex calls even when target atomic operations
7809    are not really atomic probably breaks things.  However implementing
7810    futexes locally would make futexes shared between multiple processes
7811    tricky.  However they're probably useless because guest atomic
7812    operations won't work either.  */
7813 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
do_futex(CPUState * cpu,bool time64,target_ulong uaddr,int op,int val,target_ulong timeout,target_ulong uaddr2,int val3)7814 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7815                     int op, int val, target_ulong timeout,
7816                     target_ulong uaddr2, int val3)
7817 {
7818     struct timespec ts, *pts = NULL;
7819     void *haddr2 = NULL;
7820     int base_op;
7821 
7822     /* We assume FUTEX_* constants are the same on both host and target. */
7823 #ifdef FUTEX_CMD_MASK
7824     base_op = op & FUTEX_CMD_MASK;
7825 #else
7826     base_op = op;
7827 #endif
7828     switch (base_op) {
7829     case FUTEX_WAIT:
7830     case FUTEX_WAIT_BITSET:
7831         val = tswap32(val);
7832         break;
7833     case FUTEX_WAIT_REQUEUE_PI:
7834         val = tswap32(val);
7835         haddr2 = g2h(cpu, uaddr2);
7836         break;
7837     case FUTEX_LOCK_PI:
7838     case FUTEX_LOCK_PI2:
7839         break;
7840     case FUTEX_WAKE:
7841     case FUTEX_WAKE_BITSET:
7842     case FUTEX_TRYLOCK_PI:
7843     case FUTEX_UNLOCK_PI:
7844         timeout = 0;
7845         break;
7846     case FUTEX_FD:
7847         val = target_to_host_signal(val);
7848         timeout = 0;
7849         break;
7850     case FUTEX_CMP_REQUEUE:
7851     case FUTEX_CMP_REQUEUE_PI:
7852         val3 = tswap32(val3);
7853         /* fall through */
7854     case FUTEX_REQUEUE:
7855     case FUTEX_WAKE_OP:
7856         /*
7857          * For these, the 4th argument is not TIMEOUT, but VAL2.
7858          * But the prototype of do_safe_futex takes a pointer, so
7859          * insert casts to satisfy the compiler.  We do not need
7860          * to tswap VAL2 since it's not compared to guest memory.
7861           */
7862         pts = (struct timespec *)(uintptr_t)timeout;
7863         timeout = 0;
7864         haddr2 = g2h(cpu, uaddr2);
7865         break;
7866     default:
7867         return -TARGET_ENOSYS;
7868     }
7869     if (timeout) {
7870         pts = &ts;
7871         if (time64
7872             ? target_to_host_timespec64(pts, timeout)
7873             : target_to_host_timespec(pts, timeout)) {
7874             return -TARGET_EFAULT;
7875         }
7876     }
7877     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7878 }
7879 #endif
7880 
7881 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
do_name_to_handle_at(abi_long dirfd,abi_long pathname,abi_long handle,abi_long mount_id,abi_long flags)7882 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7883                                      abi_long handle, abi_long mount_id,
7884                                      abi_long flags)
7885 {
7886     struct file_handle *target_fh;
7887     struct file_handle *fh;
7888     int mid = 0;
7889     abi_long ret;
7890     char *name;
7891     unsigned int size, total_size;
7892 
7893     if (get_user_s32(size, handle)) {
7894         return -TARGET_EFAULT;
7895     }
7896 
7897     name = lock_user_string(pathname);
7898     if (!name) {
7899         return -TARGET_EFAULT;
7900     }
7901 
7902     total_size = sizeof(struct file_handle) + size;
7903     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7904     if (!target_fh) {
7905         unlock_user(name, pathname, 0);
7906         return -TARGET_EFAULT;
7907     }
7908 
7909     fh = g_malloc0(total_size);
7910     fh->handle_bytes = size;
7911 
7912     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7913     unlock_user(name, pathname, 0);
7914 
7915     /* man name_to_handle_at(2):
7916      * Other than the use of the handle_bytes field, the caller should treat
7917      * the file_handle structure as an opaque data type
7918      */
7919 
7920     memcpy(target_fh, fh, total_size);
7921     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7922     target_fh->handle_type = tswap32(fh->handle_type);
7923     g_free(fh);
7924     unlock_user(target_fh, handle, total_size);
7925 
7926     if (put_user_s32(mid, mount_id)) {
7927         return -TARGET_EFAULT;
7928     }
7929 
7930     return ret;
7931 
7932 }
7933 #endif
7934 
7935 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
do_open_by_handle_at(abi_long mount_fd,abi_long handle,abi_long flags)7936 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7937                                      abi_long flags)
7938 {
7939     struct file_handle *target_fh;
7940     struct file_handle *fh;
7941     unsigned int size, total_size;
7942     abi_long ret;
7943 
7944     if (get_user_s32(size, handle)) {
7945         return -TARGET_EFAULT;
7946     }
7947 
7948     total_size = sizeof(struct file_handle) + size;
7949     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7950     if (!target_fh) {
7951         return -TARGET_EFAULT;
7952     }
7953 
7954     fh = g_memdup(target_fh, total_size);
7955     fh->handle_bytes = size;
7956     fh->handle_type = tswap32(target_fh->handle_type);
7957 
7958     ret = get_errno(open_by_handle_at(mount_fd, fh,
7959                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7960 
7961     g_free(fh);
7962 
7963     unlock_user(target_fh, handle, total_size);
7964 
7965     return ret;
7966 }
7967 #endif
7968 
7969 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7970 
do_signalfd4(int fd,abi_long mask,int flags)7971 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7972 {
7973     int host_flags;
7974     target_sigset_t *target_mask;
7975     sigset_t host_mask;
7976     abi_long ret;
7977 
7978     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7979         return -TARGET_EINVAL;
7980     }
7981     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7982         return -TARGET_EFAULT;
7983     }
7984 
7985     target_to_host_sigset(&host_mask, target_mask);
7986 
7987     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7988 
7989     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7990     if (ret >= 0) {
7991         fd_trans_register(ret, &target_signalfd_trans);
7992     }
7993 
7994     unlock_user_struct(target_mask, mask, 0);
7995 
7996     return ret;
7997 }
7998 #endif
7999 
8000 /* Map host to target signal numbers for the wait family of syscalls.
8001    Assume all other status bits are the same.  */
host_to_target_waitstatus(int status)8002 int host_to_target_waitstatus(int status)
8003 {
8004     if (WIFSIGNALED(status)) {
8005         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
8006     }
8007     if (WIFSTOPPED(status)) {
8008         return (host_to_target_signal(WSTOPSIG(status)) << 8)
8009                | (status & 0xff);
8010     }
8011     return status;
8012 }
8013 
open_self_cmdline(CPUArchState * cpu_env,int fd)8014 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
8015 {
8016     CPUState *cpu = env_cpu(cpu_env);
8017     struct linux_binprm *bprm = get_task_state(cpu)->bprm;
8018     int i;
8019 
8020     for (i = 0; i < bprm->argc; i++) {
8021         size_t len = strlen(bprm->argv[i]) + 1;
8022 
8023         if (write(fd, bprm->argv[i], len) != len) {
8024             return -1;
8025         }
8026     }
8027 
8028     return 0;
8029 }
8030 
8031 struct open_self_maps_data {
8032     TaskState *ts;
8033     IntervalTreeRoot *host_maps;
8034     int fd;
8035     bool smaps;
8036 };
8037 
8038 /*
8039  * Subroutine to output one line of /proc/self/maps,
8040  * or one region of /proc/self/smaps.
8041  */
8042 
8043 #ifdef TARGET_HPPA
8044 # define test_stack(S, E, L)  (E == L)
8045 #else
8046 # define test_stack(S, E, L)  (S == L)
8047 #endif
8048 
open_self_maps_4(const struct open_self_maps_data * d,const MapInfo * mi,abi_ptr start,abi_ptr end,unsigned flags)8049 static void open_self_maps_4(const struct open_self_maps_data *d,
8050                              const MapInfo *mi, abi_ptr start,
8051                              abi_ptr end, unsigned flags)
8052 {
8053     const struct image_info *info = d->ts->info;
8054     const char *path = mi->path;
8055     uint64_t offset;
8056     int fd = d->fd;
8057     int count;
8058 
8059     if (test_stack(start, end, info->stack_limit)) {
8060         path = "[stack]";
8061     } else if (start == info->brk) {
8062         path = "[heap]";
8063     } else if (start == info->vdso) {
8064         path = "[vdso]";
8065 #ifdef TARGET_X86_64
8066     } else if (start == TARGET_VSYSCALL_PAGE) {
8067         path = "[vsyscall]";
8068 #endif
8069     }
8070 
8071     /* Except null device (MAP_ANON), adjust offset for this fragment. */
8072     offset = mi->offset;
8073     if (mi->dev) {
8074         uintptr_t hstart = (uintptr_t)g2h_untagged(start);
8075         offset += hstart - mi->itree.start;
8076     }
8077 
8078     count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8079                     " %c%c%c%c %08" PRIx64 " %02x:%02x %"PRId64,
8080                     start, end,
8081                     (flags & PAGE_READ) ? 'r' : '-',
8082                     (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8083                     (flags & PAGE_EXEC) ? 'x' : '-',
8084                     mi->is_priv ? 'p' : 's',
8085                     offset, major(mi->dev), minor(mi->dev),
8086                     (uint64_t)mi->inode);
8087     if (path) {
8088         dprintf(fd, "%*s%s\n", 73 - count, "", path);
8089     } else {
8090         dprintf(fd, "\n");
8091     }
8092 
8093     if (d->smaps) {
8094         unsigned long size = end - start;
8095         unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
8096         unsigned long size_kb = size >> 10;
8097 
8098         dprintf(fd, "Size:                  %lu kB\n"
8099                 "KernelPageSize:        %lu kB\n"
8100                 "MMUPageSize:           %lu kB\n"
8101                 "Rss:                   0 kB\n"
8102                 "Pss:                   0 kB\n"
8103                 "Pss_Dirty:             0 kB\n"
8104                 "Shared_Clean:          0 kB\n"
8105                 "Shared_Dirty:          0 kB\n"
8106                 "Private_Clean:         0 kB\n"
8107                 "Private_Dirty:         0 kB\n"
8108                 "Referenced:            0 kB\n"
8109                 "Anonymous:             %lu kB\n"
8110                 "LazyFree:              0 kB\n"
8111                 "AnonHugePages:         0 kB\n"
8112                 "ShmemPmdMapped:        0 kB\n"
8113                 "FilePmdMapped:         0 kB\n"
8114                 "Shared_Hugetlb:        0 kB\n"
8115                 "Private_Hugetlb:       0 kB\n"
8116                 "Swap:                  0 kB\n"
8117                 "SwapPss:               0 kB\n"
8118                 "Locked:                0 kB\n"
8119                 "THPeligible:    0\n"
8120                 "VmFlags:%s%s%s%s%s%s%s%s\n",
8121                 size_kb, page_size_kb, page_size_kb,
8122                 (flags & PAGE_ANON ? size_kb : 0),
8123                 (flags & PAGE_READ) ? " rd" : "",
8124                 (flags & PAGE_WRITE_ORG) ? " wr" : "",
8125                 (flags & PAGE_EXEC) ? " ex" : "",
8126                 mi->is_priv ? "" : " sh",
8127                 (flags & PAGE_READ) ? " mr" : "",
8128                 (flags & PAGE_WRITE_ORG) ? " mw" : "",
8129                 (flags & PAGE_EXEC) ? " me" : "",
8130                 mi->is_priv ? "" : " ms");
8131     }
8132 }
8133 
8134 /*
8135  * Callback for walk_memory_regions, when read_self_maps() fails.
8136  * Proceed without the benefit of host /proc/self/maps cross-check.
8137  */
open_self_maps_3(void * opaque,vaddr guest_start,vaddr guest_end,int flags)8138 static int open_self_maps_3(void *opaque, vaddr guest_start,
8139                             vaddr guest_end, int flags)
8140 {
8141     static const MapInfo mi = { .is_priv = true };
8142 
8143     open_self_maps_4(opaque, &mi, guest_start, guest_end, flags);
8144     return 0;
8145 }
8146 
8147 /*
8148  * Callback for walk_memory_regions, when read_self_maps() succeeds.
8149  */
open_self_maps_2(void * opaque,vaddr guest_start,vaddr guest_end,int flags)8150 static int open_self_maps_2(void *opaque, vaddr guest_start,
8151                             vaddr guest_end, int flags)
8152 {
8153     const struct open_self_maps_data *d = opaque;
8154     uintptr_t host_start = (uintptr_t)g2h_untagged(guest_start);
8155     uintptr_t host_last = (uintptr_t)g2h_untagged(guest_end - 1);
8156 
8157 #ifdef TARGET_X86_64
8158     /*
8159      * Because of the extremely high position of the page within the guest
8160      * virtual address space, this is not backed by host memory at all.
8161      * Therefore the loop below would fail.  This is the only instance
8162      * of not having host backing memory.
8163      */
8164     if (guest_start == TARGET_VSYSCALL_PAGE) {
8165         return open_self_maps_3(opaque, guest_start, guest_end, flags);
8166     }
8167 #endif
8168 
8169     while (1) {
8170         IntervalTreeNode *n =
8171             interval_tree_iter_first(d->host_maps, host_start, host_start);
8172         MapInfo *mi = container_of(n, MapInfo, itree);
8173         uintptr_t this_hlast = MIN(host_last, n->last);
8174         target_ulong this_gend = h2g(this_hlast) + 1;
8175 
8176         open_self_maps_4(d, mi, guest_start, this_gend, flags);
8177 
8178         if (this_hlast == host_last) {
8179             return 0;
8180         }
8181         host_start = this_hlast + 1;
8182         guest_start = h2g(host_start);
8183     }
8184 }
8185 
open_self_maps_1(CPUArchState * env,int fd,bool smaps)8186 static int open_self_maps_1(CPUArchState *env, int fd, bool smaps)
8187 {
8188     struct open_self_maps_data d = {
8189         .ts = get_task_state(env_cpu(env)),
8190         .fd = fd,
8191         .smaps = smaps
8192     };
8193 
8194     mmap_lock();
8195     d.host_maps = read_self_maps();
8196     if (d.host_maps) {
8197         walk_memory_regions(&d, open_self_maps_2);
8198         free_self_maps(d.host_maps);
8199     } else {
8200         walk_memory_regions(&d, open_self_maps_3);
8201     }
8202     mmap_unlock();
8203     return 0;
8204 }
8205 
open_self_maps(CPUArchState * cpu_env,int fd)8206 static int open_self_maps(CPUArchState *cpu_env, int fd)
8207 {
8208     return open_self_maps_1(cpu_env, fd, false);
8209 }
8210 
open_self_smaps(CPUArchState * cpu_env,int fd)8211 static int open_self_smaps(CPUArchState *cpu_env, int fd)
8212 {
8213     return open_self_maps_1(cpu_env, fd, true);
8214 }
8215 
open_self_stat(CPUArchState * cpu_env,int fd)8216 static int open_self_stat(CPUArchState *cpu_env, int fd)
8217 {
8218     CPUState *cpu = env_cpu(cpu_env);
8219     TaskState *ts = get_task_state(cpu);
8220     g_autoptr(GString) buf = g_string_new(NULL);
8221     int i;
8222 
8223     for (i = 0; i < 44; i++) {
8224         if (i == 0) {
8225             /* pid */
8226             g_string_printf(buf, FMT_pid " ", getpid());
8227         } else if (i == 1) {
8228             /* app name */
8229             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8230             bin = bin ? bin + 1 : ts->bprm->argv[0];
8231             g_string_printf(buf, "(%.15s) ", bin);
8232         } else if (i == 2) {
8233             /* task state */
8234             g_string_assign(buf, "R "); /* we are running right now */
8235         } else if (i == 3) {
8236             /* ppid */
8237             g_string_printf(buf, FMT_pid " ", getppid());
8238         } else if (i == 19) {
8239             /* num_threads */
8240             int cpus = 0;
8241             WITH_RCU_READ_LOCK_GUARD() {
8242                 CPUState *cpu_iter;
8243                 CPU_FOREACH(cpu_iter) {
8244                     cpus++;
8245                 }
8246             }
8247             g_string_printf(buf, "%d ", cpus);
8248         } else if (i == 21) {
8249             /* starttime */
8250             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8251         } else if (i == 27) {
8252             /* stack bottom */
8253             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8254         } else {
8255             /* for the rest, there is MasterCard */
8256             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8257         }
8258 
8259         if (write(fd, buf->str, buf->len) != buf->len) {
8260             return -1;
8261         }
8262     }
8263 
8264     return 0;
8265 }
8266 
open_self_auxv(CPUArchState * cpu_env,int fd)8267 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8268 {
8269     CPUState *cpu = env_cpu(cpu_env);
8270     TaskState *ts = get_task_state(cpu);
8271     abi_ulong auxv = ts->info->saved_auxv;
8272     abi_ulong len = ts->info->auxv_len;
8273     char *ptr;
8274 
8275     /*
8276      * Auxiliary vector is stored in target process stack.
8277      * read in whole auxv vector and copy it to file
8278      */
8279     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8280     if (ptr != NULL) {
8281         while (len > 0) {
8282             ssize_t r;
8283             r = write(fd, ptr, len);
8284             if (r <= 0) {
8285                 break;
8286             }
8287             len -= r;
8288             ptr += r;
8289         }
8290         lseek(fd, 0, SEEK_SET);
8291         unlock_user(ptr, auxv, len);
8292     }
8293 
8294     return 0;
8295 }
8296 
is_proc_myself(const char * filename,const char * entry)8297 static int is_proc_myself(const char *filename, const char *entry)
8298 {
8299     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8300         filename += strlen("/proc/");
8301         if (!strncmp(filename, "self/", strlen("self/"))) {
8302             filename += strlen("self/");
8303         } else if (*filename >= '1' && *filename <= '9') {
8304             char myself[80];
8305             snprintf(myself, sizeof(myself), "%d/", getpid());
8306             if (!strncmp(filename, myself, strlen(myself))) {
8307                 filename += strlen(myself);
8308             } else {
8309                 return 0;
8310             }
8311         } else {
8312             return 0;
8313         }
8314         if (!strcmp(filename, entry)) {
8315             return 1;
8316         }
8317     }
8318     return 0;
8319 }
8320 
excp_dump_file(FILE * logfile,CPUArchState * env,const char * fmt,int code)8321 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8322                       const char *fmt, int code)
8323 {
8324     if (logfile) {
8325         CPUState *cs = env_cpu(env);
8326 
8327         fprintf(logfile, fmt, code);
8328         fprintf(logfile, "Failing executable: %s\n", exec_path);
8329         cpu_dump_state(cs, logfile, 0);
8330         open_self_maps(env, fileno(logfile));
8331     }
8332 }
8333 
target_exception_dump(CPUArchState * env,const char * fmt,int code)8334 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8335 {
8336     /* dump to console */
8337     excp_dump_file(stderr, env, fmt, code);
8338 
8339     /* dump to log file */
8340     if (qemu_log_separate()) {
8341         FILE *logfile = qemu_log_trylock();
8342 
8343         excp_dump_file(logfile, env, fmt, code);
8344         qemu_log_unlock(logfile);
8345     }
8346 }
8347 
8348 #include "target_proc.h"
8349 
8350 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8351     defined(HAVE_ARCH_PROC_CPUINFO) || \
8352     defined(HAVE_ARCH_PROC_HARDWARE)
is_proc(const char * filename,const char * entry)8353 static int is_proc(const char *filename, const char *entry)
8354 {
8355     return strcmp(filename, entry) == 0;
8356 }
8357 #endif
8358 
8359 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
open_net_route(CPUArchState * cpu_env,int fd)8360 static int open_net_route(CPUArchState *cpu_env, int fd)
8361 {
8362     FILE *fp;
8363     char *line = NULL;
8364     size_t len = 0;
8365     ssize_t read;
8366 
8367     fp = fopen("/proc/net/route", "r");
8368     if (fp == NULL) {
8369         return -1;
8370     }
8371 
8372     /* read header */
8373 
8374     read = getline(&line, &len, fp);
8375     dprintf(fd, "%s", line);
8376 
8377     /* read routes */
8378 
8379     while ((read = getline(&line, &len, fp)) != -1) {
8380         char iface[16];
8381         uint32_t dest, gw, mask;
8382         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8383         int fields;
8384 
8385         fields = sscanf(line,
8386                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8387                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8388                         &mask, &mtu, &window, &irtt);
8389         if (fields != 11) {
8390             continue;
8391         }
8392         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8393                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8394                 metric, tswap32(mask), mtu, window, irtt);
8395     }
8396 
8397     free(line);
8398     fclose(fp);
8399 
8400     return 0;
8401 }
8402 #endif
8403 
maybe_do_fake_open(CPUArchState * cpu_env,int dirfd,const char * fname,int flags,mode_t mode,int openat2_resolve,bool safe)8404 static int maybe_do_fake_open(CPUArchState *cpu_env, int dirfd,
8405                               const char *fname, int flags, mode_t mode,
8406                               int openat2_resolve, bool safe)
8407 {
8408     g_autofree char *proc_name = NULL;
8409     const char *pathname;
8410     struct fake_open {
8411         const char *filename;
8412         int (*fill)(CPUArchState *cpu_env, int fd);
8413         int (*cmp)(const char *s1, const char *s2);
8414     };
8415     const struct fake_open *fake_open;
8416     static const struct fake_open fakes[] = {
8417         { "maps", open_self_maps, is_proc_myself },
8418         { "smaps", open_self_smaps, is_proc_myself },
8419         { "stat", open_self_stat, is_proc_myself },
8420         { "auxv", open_self_auxv, is_proc_myself },
8421         { "cmdline", open_self_cmdline, is_proc_myself },
8422 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8423         { "/proc/net/route", open_net_route, is_proc },
8424 #endif
8425 #if defined(HAVE_ARCH_PROC_CPUINFO)
8426         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8427 #endif
8428 #if defined(HAVE_ARCH_PROC_HARDWARE)
8429         { "/proc/hardware", open_hardware, is_proc },
8430 #endif
8431         { NULL, NULL, NULL }
8432     };
8433 
8434     /* if this is a file from /proc/ filesystem, expand full name */
8435     proc_name = realpath(fname, NULL);
8436     if (proc_name && strncmp(proc_name, "/proc/", 6) == 0) {
8437         pathname = proc_name;
8438     } else {
8439         pathname = fname;
8440     }
8441 
8442     if (is_proc_myself(pathname, "exe")) {
8443         /* Honor openat2 resolve flags */
8444         if ((openat2_resolve & RESOLVE_NO_MAGICLINKS) ||
8445             (openat2_resolve & RESOLVE_NO_SYMLINKS)) {
8446             errno = ELOOP;
8447             return -1;
8448         }
8449         if (safe) {
8450             return safe_openat(dirfd, exec_path, flags, mode);
8451         } else {
8452             return openat(dirfd, exec_path, flags, mode);
8453         }
8454     }
8455 
8456     for (fake_open = fakes; fake_open->filename; fake_open++) {
8457         if (fake_open->cmp(pathname, fake_open->filename)) {
8458             break;
8459         }
8460     }
8461 
8462     if (fake_open->filename) {
8463         const char *tmpdir;
8464         char filename[PATH_MAX];
8465         int fd, r;
8466 
8467         fd = memfd_create("qemu-open", 0);
8468         if (fd < 0) {
8469             if (errno != ENOSYS) {
8470                 return fd;
8471             }
8472             /* create temporary file to map stat to */
8473             tmpdir = getenv("TMPDIR");
8474             if (!tmpdir)
8475                 tmpdir = "/tmp";
8476             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8477             fd = mkstemp(filename);
8478             if (fd < 0) {
8479                 return fd;
8480             }
8481             unlink(filename);
8482         }
8483 
8484         if ((r = fake_open->fill(cpu_env, fd))) {
8485             int e = errno;
8486             close(fd);
8487             errno = e;
8488             return r;
8489         }
8490         lseek(fd, 0, SEEK_SET);
8491 
8492         return fd;
8493     }
8494 
8495     return -2;
8496 }
8497 
do_guest_openat(CPUArchState * cpu_env,int dirfd,const char * pathname,int flags,mode_t mode,bool safe)8498 int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *pathname,
8499                     int flags, mode_t mode, bool safe)
8500 {
8501     int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, flags, mode, 0, safe);
8502     if (fd > -2) {
8503         return fd;
8504     }
8505 
8506     if (safe) {
8507         return safe_openat(dirfd, path(pathname), flags, mode);
8508     } else {
8509         return openat(dirfd, path(pathname), flags, mode);
8510     }
8511 }
8512 
8513 
do_openat2(CPUArchState * cpu_env,abi_long dirfd,abi_ptr guest_pathname,abi_ptr guest_open_how,abi_ulong guest_size)8514 static int do_openat2(CPUArchState *cpu_env, abi_long dirfd,
8515                       abi_ptr guest_pathname, abi_ptr guest_open_how,
8516                       abi_ulong guest_size)
8517 {
8518     struct open_how_ver0 how = {0};
8519     char *pathname;
8520     int ret;
8521 
8522     if (guest_size < sizeof(struct target_open_how_ver0)) {
8523         return -TARGET_EINVAL;
8524     }
8525     ret = copy_struct_from_user(&how, sizeof(how), guest_open_how, guest_size);
8526     if (ret) {
8527         if (ret == -TARGET_E2BIG) {
8528             qemu_log_mask(LOG_UNIMP,
8529                           "Unimplemented openat2 open_how size: "
8530                           TARGET_ABI_FMT_lu "\n", guest_size);
8531         }
8532         return ret;
8533     }
8534     pathname = lock_user_string(guest_pathname);
8535     if (!pathname) {
8536         return -TARGET_EFAULT;
8537     }
8538 
8539     how.flags = target_to_host_bitmask(tswap64(how.flags), fcntl_flags_tbl);
8540     how.mode = tswap64(how.mode);
8541     how.resolve = tswap64(how.resolve);
8542     int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, how.flags, how.mode,
8543                                 how.resolve, true);
8544     if (fd > -2) {
8545         ret = get_errno(fd);
8546     } else {
8547         ret = get_errno(safe_openat2(dirfd, pathname, &how,
8548                                      sizeof(struct open_how_ver0)));
8549     }
8550 
8551     fd_trans_unregister(ret);
8552     unlock_user(pathname, guest_pathname, 0);
8553     return ret;
8554 }
8555 
do_guest_readlink(const char * pathname,char * buf,size_t bufsiz)8556 ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz)
8557 {
8558     ssize_t ret;
8559 
8560     if (!pathname || !buf) {
8561         errno = EFAULT;
8562         return -1;
8563     }
8564 
8565     if (!bufsiz) {
8566         /* Short circuit this for the magic exe check. */
8567         errno = EINVAL;
8568         return -1;
8569     }
8570 
8571     if (is_proc_myself((const char *)pathname, "exe")) {
8572         /*
8573          * Don't worry about sign mismatch as earlier mapping
8574          * logic would have thrown a bad address error.
8575          */
8576         ret = MIN(strlen(exec_path), bufsiz);
8577         /* We cannot NUL terminate the string. */
8578         memcpy(buf, exec_path, ret);
8579     } else {
8580         ret = readlink(path(pathname), buf, bufsiz);
8581     }
8582 
8583     return ret;
8584 }
8585 
do_execv(CPUArchState * cpu_env,int dirfd,abi_long pathname,abi_long guest_argp,abi_long guest_envp,int flags,bool is_execveat)8586 static int do_execv(CPUArchState *cpu_env, int dirfd,
8587                     abi_long pathname, abi_long guest_argp,
8588                     abi_long guest_envp, int flags, bool is_execveat)
8589 {
8590     int ret;
8591     char **argp, **envp;
8592     int argc, envc;
8593     abi_ulong gp;
8594     abi_ulong addr;
8595     char **q;
8596     void *p;
8597 
8598     argc = 0;
8599 
8600     for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8601         if (get_user_ual(addr, gp)) {
8602             return -TARGET_EFAULT;
8603         }
8604         if (!addr) {
8605             break;
8606         }
8607         argc++;
8608     }
8609     envc = 0;
8610     for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8611         if (get_user_ual(addr, gp)) {
8612             return -TARGET_EFAULT;
8613         }
8614         if (!addr) {
8615             break;
8616         }
8617         envc++;
8618     }
8619 
8620     argp = g_new0(char *, argc + 1);
8621     envp = g_new0(char *, envc + 1);
8622 
8623     for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8624         if (get_user_ual(addr, gp)) {
8625             goto execve_efault;
8626         }
8627         if (!addr) {
8628             break;
8629         }
8630         *q = lock_user_string(addr);
8631         if (!*q) {
8632             goto execve_efault;
8633         }
8634     }
8635     *q = NULL;
8636 
8637     for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8638         if (get_user_ual(addr, gp)) {
8639             goto execve_efault;
8640         }
8641         if (!addr) {
8642             break;
8643         }
8644         *q = lock_user_string(addr);
8645         if (!*q) {
8646             goto execve_efault;
8647         }
8648     }
8649     *q = NULL;
8650 
8651     /*
8652      * Although execve() is not an interruptible syscall it is
8653      * a special case where we must use the safe_syscall wrapper:
8654      * if we allow a signal to happen before we make the host
8655      * syscall then we will 'lose' it, because at the point of
8656      * execve the process leaves QEMU's control. So we use the
8657      * safe syscall wrapper to ensure that we either take the
8658      * signal as a guest signal, or else it does not happen
8659      * before the execve completes and makes it the other
8660      * program's problem.
8661      */
8662     p = lock_user_string(pathname);
8663     if (!p) {
8664         goto execve_efault;
8665     }
8666 
8667     const char *exe = p;
8668     if (is_proc_myself(p, "exe")) {
8669         exe = exec_path;
8670     }
8671     ret = is_execveat
8672         ? safe_execveat(dirfd, exe, argp, envp, flags)
8673         : safe_execve(exe, argp, envp);
8674     ret = get_errno(ret);
8675 
8676     unlock_user(p, pathname, 0);
8677 
8678     goto execve_end;
8679 
8680 execve_efault:
8681     ret = -TARGET_EFAULT;
8682 
8683 execve_end:
8684     for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8685         if (get_user_ual(addr, gp) || !addr) {
8686             break;
8687         }
8688         unlock_user(*q, addr, 0);
8689     }
8690     for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8691         if (get_user_ual(addr, gp) || !addr) {
8692             break;
8693         }
8694         unlock_user(*q, addr, 0);
8695     }
8696 
8697     g_free(argp);
8698     g_free(envp);
8699     return ret;
8700 }
8701 
8702 #define TIMER_MAGIC 0x0caf0000
8703 #define TIMER_MAGIC_MASK 0xffff0000
8704 
8705 /* Convert QEMU provided timer ID back to internal 16bit index format */
get_timer_id(abi_long arg)8706 static target_timer_t get_timer_id(abi_long arg)
8707 {
8708     target_timer_t timerid = arg;
8709 
8710     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8711         return -TARGET_EINVAL;
8712     }
8713 
8714     timerid &= 0xffff;
8715 
8716     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8717         return -TARGET_EINVAL;
8718     }
8719 
8720     return timerid;
8721 }
8722 
target_to_host_cpu_mask(unsigned long * host_mask,size_t host_size,abi_ulong target_addr,size_t target_size)8723 static int target_to_host_cpu_mask(unsigned long *host_mask,
8724                                    size_t host_size,
8725                                    abi_ulong target_addr,
8726                                    size_t target_size)
8727 {
8728     unsigned target_bits = sizeof(abi_ulong) * 8;
8729     unsigned host_bits = sizeof(*host_mask) * 8;
8730     abi_ulong *target_mask;
8731     unsigned i, j;
8732 
8733     assert(host_size >= target_size);
8734 
8735     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8736     if (!target_mask) {
8737         return -TARGET_EFAULT;
8738     }
8739     memset(host_mask, 0, host_size);
8740 
8741     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8742         unsigned bit = i * target_bits;
8743         abi_ulong val;
8744 
8745         __get_user(val, &target_mask[i]);
8746         for (j = 0; j < target_bits; j++, bit++) {
8747             if (val & (1UL << j)) {
8748                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8749             }
8750         }
8751     }
8752 
8753     unlock_user(target_mask, target_addr, 0);
8754     return 0;
8755 }
8756 
host_to_target_cpu_mask(const unsigned long * host_mask,size_t host_size,abi_ulong target_addr,size_t target_size)8757 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8758                                    size_t host_size,
8759                                    abi_ulong target_addr,
8760                                    size_t target_size)
8761 {
8762     unsigned target_bits = sizeof(abi_ulong) * 8;
8763     unsigned host_bits = sizeof(*host_mask) * 8;
8764     abi_ulong *target_mask;
8765     unsigned i, j;
8766 
8767     assert(host_size >= target_size);
8768 
8769     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8770     if (!target_mask) {
8771         return -TARGET_EFAULT;
8772     }
8773 
8774     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8775         unsigned bit = i * target_bits;
8776         abi_ulong val = 0;
8777 
8778         for (j = 0; j < target_bits; j++, bit++) {
8779             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8780                 val |= 1UL << j;
8781             }
8782         }
8783         __put_user(val, &target_mask[i]);
8784     }
8785 
8786     unlock_user(target_mask, target_addr, target_size);
8787     return 0;
8788 }
8789 
8790 #ifdef TARGET_NR_getdents
do_getdents(abi_long dirfd,abi_long arg2,abi_long count)8791 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8792 {
8793     g_autofree void *hdirp = NULL;
8794     void *tdirp;
8795     int hlen, hoff, toff;
8796     int hreclen, treclen;
8797     off_t prev_diroff = 0;
8798 
8799     hdirp = g_try_malloc(count);
8800     if (!hdirp) {
8801         return -TARGET_ENOMEM;
8802     }
8803 
8804 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8805     hlen = sys_getdents(dirfd, hdirp, count);
8806 #else
8807     hlen = sys_getdents64(dirfd, hdirp, count);
8808 #endif
8809 
8810     hlen = get_errno(hlen);
8811     if (is_error(hlen)) {
8812         return hlen;
8813     }
8814 
8815     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8816     if (!tdirp) {
8817         return -TARGET_EFAULT;
8818     }
8819 
8820     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8821 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8822         struct linux_dirent *hde = hdirp + hoff;
8823 #else
8824         struct linux_dirent64 *hde = hdirp + hoff;
8825 #endif
8826         struct target_dirent *tde = tdirp + toff;
8827         int namelen;
8828         uint8_t type;
8829 
8830         namelen = strlen(hde->d_name);
8831         hreclen = hde->d_reclen;
8832         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8833         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8834 
8835         if (toff + treclen > count) {
8836             /*
8837              * If the host struct is smaller than the target struct, or
8838              * requires less alignment and thus packs into less space,
8839              * then the host can return more entries than we can pass
8840              * on to the guest.
8841              */
8842             if (toff == 0) {
8843                 toff = -TARGET_EINVAL; /* result buffer is too small */
8844                 break;
8845             }
8846             /*
8847              * Return what we have, resetting the file pointer to the
8848              * location of the first record not returned.
8849              */
8850             lseek(dirfd, prev_diroff, SEEK_SET);
8851             break;
8852         }
8853 
8854         prev_diroff = hde->d_off;
8855         tde->d_ino = tswapal(hde->d_ino);
8856         tde->d_off = tswapal(hde->d_off);
8857         tde->d_reclen = tswap16(treclen);
8858         memcpy(tde->d_name, hde->d_name, namelen + 1);
8859 
8860         /*
8861          * The getdents type is in what was formerly a padding byte at the
8862          * end of the structure.
8863          */
8864 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8865         type = *((uint8_t *)hde + hreclen - 1);
8866 #else
8867         type = hde->d_type;
8868 #endif
8869         *((uint8_t *)tde + treclen - 1) = type;
8870     }
8871 
8872     unlock_user(tdirp, arg2, toff);
8873     return toff;
8874 }
8875 #endif /* TARGET_NR_getdents */
8876 
8877 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
do_getdents64(abi_long dirfd,abi_long arg2,abi_long count)8878 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8879 {
8880     g_autofree void *hdirp = NULL;
8881     void *tdirp;
8882     int hlen, hoff, toff;
8883     int hreclen, treclen;
8884     off_t prev_diroff = 0;
8885 
8886     hdirp = g_try_malloc(count);
8887     if (!hdirp) {
8888         return -TARGET_ENOMEM;
8889     }
8890 
8891     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8892     if (is_error(hlen)) {
8893         return hlen;
8894     }
8895 
8896     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8897     if (!tdirp) {
8898         return -TARGET_EFAULT;
8899     }
8900 
8901     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8902         struct linux_dirent64 *hde = hdirp + hoff;
8903         struct target_dirent64 *tde = tdirp + toff;
8904         int namelen;
8905 
8906         namelen = strlen(hde->d_name) + 1;
8907         hreclen = hde->d_reclen;
8908         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8909         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8910 
8911         if (toff + treclen > count) {
8912             /*
8913              * If the host struct is smaller than the target struct, or
8914              * requires less alignment and thus packs into less space,
8915              * then the host can return more entries than we can pass
8916              * on to the guest.
8917              */
8918             if (toff == 0) {
8919                 toff = -TARGET_EINVAL; /* result buffer is too small */
8920                 break;
8921             }
8922             /*
8923              * Return what we have, resetting the file pointer to the
8924              * location of the first record not returned.
8925              */
8926             lseek(dirfd, prev_diroff, SEEK_SET);
8927             break;
8928         }
8929 
8930         prev_diroff = hde->d_off;
8931         tde->d_ino = tswap64(hde->d_ino);
8932         tde->d_off = tswap64(hde->d_off);
8933         tde->d_reclen = tswap16(treclen);
8934         tde->d_type = hde->d_type;
8935         memcpy(tde->d_name, hde->d_name, namelen);
8936     }
8937 
8938     unlock_user(tdirp, arg2, toff);
8939     return toff;
8940 }
8941 #endif /* TARGET_NR_getdents64 */
8942 
8943 #if defined(TARGET_NR_riscv_hwprobe)
8944 
8945 #define RISCV_HWPROBE_KEY_MVENDORID     0
8946 #define RISCV_HWPROBE_KEY_MARCHID       1
8947 #define RISCV_HWPROBE_KEY_MIMPID        2
8948 
8949 #define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
8950 #define     RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
8951 
8952 #define RISCV_HWPROBE_KEY_IMA_EXT_0         4
8953 #define     RISCV_HWPROBE_IMA_FD            (1 << 0)
8954 #define     RISCV_HWPROBE_IMA_C             (1 << 1)
8955 #define     RISCV_HWPROBE_IMA_V             (1 << 2)
8956 #define     RISCV_HWPROBE_EXT_ZBA           (1 << 3)
8957 #define     RISCV_HWPROBE_EXT_ZBB           (1 << 4)
8958 #define     RISCV_HWPROBE_EXT_ZBS           (1 << 5)
8959 #define     RISCV_HWPROBE_EXT_ZICBOZ        (1 << 6)
8960 #define     RISCV_HWPROBE_EXT_ZBC           (1 << 7)
8961 #define     RISCV_HWPROBE_EXT_ZBKB          (1 << 8)
8962 #define     RISCV_HWPROBE_EXT_ZBKC          (1 << 9)
8963 #define     RISCV_HWPROBE_EXT_ZBKX          (1 << 10)
8964 #define     RISCV_HWPROBE_EXT_ZKND          (1 << 11)
8965 #define     RISCV_HWPROBE_EXT_ZKNE          (1 << 12)
8966 #define     RISCV_HWPROBE_EXT_ZKNH          (1 << 13)
8967 #define     RISCV_HWPROBE_EXT_ZKSED         (1 << 14)
8968 #define     RISCV_HWPROBE_EXT_ZKSH          (1 << 15)
8969 #define     RISCV_HWPROBE_EXT_ZKT           (1 << 16)
8970 #define     RISCV_HWPROBE_EXT_ZVBB          (1 << 17)
8971 #define     RISCV_HWPROBE_EXT_ZVBC          (1 << 18)
8972 #define     RISCV_HWPROBE_EXT_ZVKB          (1 << 19)
8973 #define     RISCV_HWPROBE_EXT_ZVKG          (1 << 20)
8974 #define     RISCV_HWPROBE_EXT_ZVKNED        (1 << 21)
8975 #define     RISCV_HWPROBE_EXT_ZVKNHA        (1 << 22)
8976 #define     RISCV_HWPROBE_EXT_ZVKNHB        (1 << 23)
8977 #define     RISCV_HWPROBE_EXT_ZVKSED        (1 << 24)
8978 #define     RISCV_HWPROBE_EXT_ZVKSH         (1 << 25)
8979 #define     RISCV_HWPROBE_EXT_ZVKT          (1 << 26)
8980 #define     RISCV_HWPROBE_EXT_ZFH           (1 << 27)
8981 #define     RISCV_HWPROBE_EXT_ZFHMIN        (1 << 28)
8982 #define     RISCV_HWPROBE_EXT_ZIHINTNTL     (1 << 29)
8983 #define     RISCV_HWPROBE_EXT_ZVFH          (1 << 30)
8984 #define     RISCV_HWPROBE_EXT_ZVFHMIN       (1ULL << 31)
8985 #define     RISCV_HWPROBE_EXT_ZFA           (1ULL << 32)
8986 #define     RISCV_HWPROBE_EXT_ZTSO          (1ULL << 33)
8987 #define     RISCV_HWPROBE_EXT_ZACAS         (1ULL << 34)
8988 #define     RISCV_HWPROBE_EXT_ZICOND        (1ULL << 35)
8989 
8990 #define RISCV_HWPROBE_KEY_CPUPERF_0     5
8991 #define     RISCV_HWPROBE_MISALIGNED_UNKNOWN     (0 << 0)
8992 #define     RISCV_HWPROBE_MISALIGNED_EMULATED    (1 << 0)
8993 #define     RISCV_HWPROBE_MISALIGNED_SLOW        (2 << 0)
8994 #define     RISCV_HWPROBE_MISALIGNED_FAST        (3 << 0)
8995 #define     RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
8996 #define     RISCV_HWPROBE_MISALIGNED_MASK        (7 << 0)
8997 
8998 #define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6
8999 
9000 struct riscv_hwprobe {
9001     abi_llong  key;
9002     abi_ullong value;
9003 };
9004 
risc_hwprobe_fill_pairs(CPURISCVState * env,struct riscv_hwprobe * pair,size_t pair_count)9005 static void risc_hwprobe_fill_pairs(CPURISCVState *env,
9006                                     struct riscv_hwprobe *pair,
9007                                     size_t pair_count)
9008 {
9009     const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
9010 
9011     for (; pair_count > 0; pair_count--, pair++) {
9012         abi_llong key;
9013         abi_ullong value;
9014         __put_user(0, &pair->value);
9015         __get_user(key, &pair->key);
9016         switch (key) {
9017         case RISCV_HWPROBE_KEY_MVENDORID:
9018             __put_user(cfg->mvendorid, &pair->value);
9019             break;
9020         case RISCV_HWPROBE_KEY_MARCHID:
9021             __put_user(cfg->marchid, &pair->value);
9022             break;
9023         case RISCV_HWPROBE_KEY_MIMPID:
9024             __put_user(cfg->mimpid, &pair->value);
9025             break;
9026         case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
9027             value = riscv_has_ext(env, RVI) &&
9028                     riscv_has_ext(env, RVM) &&
9029                     riscv_has_ext(env, RVA) ?
9030                     RISCV_HWPROBE_BASE_BEHAVIOR_IMA : 0;
9031             __put_user(value, &pair->value);
9032             break;
9033         case RISCV_HWPROBE_KEY_IMA_EXT_0:
9034             value = riscv_has_ext(env, RVF) &&
9035                     riscv_has_ext(env, RVD) ?
9036                     RISCV_HWPROBE_IMA_FD : 0;
9037             value |= riscv_has_ext(env, RVC) ?
9038                      RISCV_HWPROBE_IMA_C : 0;
9039             value |= riscv_has_ext(env, RVV) ?
9040                      RISCV_HWPROBE_IMA_V : 0;
9041             value |= cfg->ext_zba ?
9042                      RISCV_HWPROBE_EXT_ZBA : 0;
9043             value |= cfg->ext_zbb ?
9044                      RISCV_HWPROBE_EXT_ZBB : 0;
9045             value |= cfg->ext_zbs ?
9046                      RISCV_HWPROBE_EXT_ZBS : 0;
9047             value |= cfg->ext_zicboz ?
9048                      RISCV_HWPROBE_EXT_ZICBOZ : 0;
9049             value |= cfg->ext_zbc ?
9050                      RISCV_HWPROBE_EXT_ZBC : 0;
9051             value |= cfg->ext_zbkb ?
9052                      RISCV_HWPROBE_EXT_ZBKB : 0;
9053             value |= cfg->ext_zbkc ?
9054                      RISCV_HWPROBE_EXT_ZBKC : 0;
9055             value |= cfg->ext_zbkx ?
9056                      RISCV_HWPROBE_EXT_ZBKX : 0;
9057             value |= cfg->ext_zknd ?
9058                      RISCV_HWPROBE_EXT_ZKND : 0;
9059             value |= cfg->ext_zkne ?
9060                      RISCV_HWPROBE_EXT_ZKNE : 0;
9061             value |= cfg->ext_zknh ?
9062                      RISCV_HWPROBE_EXT_ZKNH : 0;
9063             value |= cfg->ext_zksed ?
9064                      RISCV_HWPROBE_EXT_ZKSED : 0;
9065             value |= cfg->ext_zksh ?
9066                      RISCV_HWPROBE_EXT_ZKSH : 0;
9067             value |= cfg->ext_zkt ?
9068                      RISCV_HWPROBE_EXT_ZKT : 0;
9069             value |= cfg->ext_zvbb ?
9070                      RISCV_HWPROBE_EXT_ZVBB : 0;
9071             value |= cfg->ext_zvbc ?
9072                      RISCV_HWPROBE_EXT_ZVBC : 0;
9073             value |= cfg->ext_zvkb ?
9074                      RISCV_HWPROBE_EXT_ZVKB : 0;
9075             value |= cfg->ext_zvkg ?
9076                      RISCV_HWPROBE_EXT_ZVKG : 0;
9077             value |= cfg->ext_zvkned ?
9078                      RISCV_HWPROBE_EXT_ZVKNED : 0;
9079             value |= cfg->ext_zvknha ?
9080                      RISCV_HWPROBE_EXT_ZVKNHA : 0;
9081             value |= cfg->ext_zvknhb ?
9082                      RISCV_HWPROBE_EXT_ZVKNHB : 0;
9083             value |= cfg->ext_zvksed ?
9084                      RISCV_HWPROBE_EXT_ZVKSED : 0;
9085             value |= cfg->ext_zvksh ?
9086                      RISCV_HWPROBE_EXT_ZVKSH : 0;
9087             value |= cfg->ext_zvkt ?
9088                      RISCV_HWPROBE_EXT_ZVKT : 0;
9089             value |= cfg->ext_zfh ?
9090                      RISCV_HWPROBE_EXT_ZFH : 0;
9091             value |= cfg->ext_zfhmin ?
9092                      RISCV_HWPROBE_EXT_ZFHMIN : 0;
9093             value |= cfg->ext_zihintntl ?
9094                      RISCV_HWPROBE_EXT_ZIHINTNTL : 0;
9095             value |= cfg->ext_zvfh ?
9096                      RISCV_HWPROBE_EXT_ZVFH : 0;
9097             value |= cfg->ext_zvfhmin ?
9098                      RISCV_HWPROBE_EXT_ZVFHMIN : 0;
9099             value |= cfg->ext_zfa ?
9100                      RISCV_HWPROBE_EXT_ZFA : 0;
9101             value |= cfg->ext_ztso ?
9102                      RISCV_HWPROBE_EXT_ZTSO : 0;
9103             value |= cfg->ext_zacas ?
9104                      RISCV_HWPROBE_EXT_ZACAS : 0;
9105             value |= cfg->ext_zicond ?
9106                      RISCV_HWPROBE_EXT_ZICOND : 0;
9107             __put_user(value, &pair->value);
9108             break;
9109         case RISCV_HWPROBE_KEY_CPUPERF_0:
9110             __put_user(RISCV_HWPROBE_MISALIGNED_FAST, &pair->value);
9111             break;
9112         case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
9113             value = cfg->ext_zicboz ? cfg->cboz_blocksize : 0;
9114             __put_user(value, &pair->value);
9115             break;
9116         default:
9117             __put_user(-1, &pair->key);
9118             break;
9119         }
9120     }
9121 }
9122 
9123 /*
9124  * If the cpumask_t of (target_cpus, cpusetsize) cannot be read: -EFAULT.
9125  * If the cpumast_t has no bits set: -EINVAL.
9126  * Otherwise the cpumask_t contains some bit set: 0.
9127  * Unlike the kernel, we do not mask cpumask_t by the set of online cpus,
9128  * nor bound the search by cpumask_size().
9129  */
nonempty_cpu_set(abi_ulong cpusetsize,abi_ptr target_cpus)9130 static int nonempty_cpu_set(abi_ulong cpusetsize, abi_ptr target_cpus)
9131 {
9132     unsigned char *p = lock_user(VERIFY_READ, target_cpus, cpusetsize, 1);
9133     int ret = -TARGET_EFAULT;
9134 
9135     if (p) {
9136         ret = -TARGET_EINVAL;
9137         /*
9138          * Since we only care about the empty/non-empty state of the cpumask_t
9139          * not the individual bits, we do not need to repartition the bits
9140          * from target abi_ulong to host unsigned long.
9141          *
9142          * Note that the kernel does not round up cpusetsize to a multiple of
9143          * sizeof(abi_ulong).  After bounding cpusetsize by cpumask_size(),
9144          * it copies exactly cpusetsize bytes into a zeroed buffer.
9145          */
9146         for (abi_ulong i = 0; i < cpusetsize; ++i) {
9147             if (p[i]) {
9148                 ret = 0;
9149                 break;
9150             }
9151         }
9152         unlock_user(p, target_cpus, 0);
9153     }
9154     return ret;
9155 }
9156 
do_riscv_hwprobe(CPUArchState * cpu_env,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5)9157 static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
9158                                  abi_long arg2, abi_long arg3,
9159                                  abi_long arg4, abi_long arg5)
9160 {
9161     int ret;
9162     struct riscv_hwprobe *host_pairs;
9163 
9164     /* flags must be 0 */
9165     if (arg5 != 0) {
9166         return -TARGET_EINVAL;
9167     }
9168 
9169     /* check cpu_set */
9170     if (arg3 != 0) {
9171         ret = nonempty_cpu_set(arg3, arg4);
9172         if (ret != 0) {
9173             return ret;
9174         }
9175     } else if (arg4 != 0) {
9176         return -TARGET_EINVAL;
9177     }
9178 
9179     /* no pairs */
9180     if (arg2 == 0) {
9181         return 0;
9182     }
9183 
9184     host_pairs = lock_user(VERIFY_WRITE, arg1,
9185                            sizeof(*host_pairs) * (size_t)arg2, 0);
9186     if (host_pairs == NULL) {
9187         return -TARGET_EFAULT;
9188     }
9189     risc_hwprobe_fill_pairs(cpu_env, host_pairs, arg2);
9190     unlock_user(host_pairs, arg1, sizeof(*host_pairs) * (size_t)arg2);
9191     return 0;
9192 }
9193 #endif /* TARGET_NR_riscv_hwprobe */
9194 
9195 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
_syscall2(int,pivot_root,const char *,new_root,const char *,put_old)9196 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
9197 #endif
9198 
9199 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9200 #define __NR_sys_open_tree __NR_open_tree
9201 _syscall3(int, sys_open_tree, int, __dfd, const char *, __filename,
9202           unsigned int, __flags)
9203 #endif
9204 
9205 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9206 #define __NR_sys_move_mount __NR_move_mount
9207 _syscall5(int, sys_move_mount, int, __from_dfd, const char *, __from_pathname,
9208            int, __to_dfd, const char *, __to_pathname, unsigned int, flag)
9209 #endif
9210 
9211 /* This is an internal helper for do_syscall so that it is easier
9212  * to have a single return point, so that actions, such as logging
9213  * of syscall results, can be performed.
9214  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
9215  */
9216 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
9217                             abi_long arg2, abi_long arg3, abi_long arg4,
9218                             abi_long arg5, abi_long arg6, abi_long arg7,
9219                             abi_long arg8)
9220 {
9221     CPUState *cpu = env_cpu(cpu_env);
9222     abi_long ret;
9223 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
9224     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
9225     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
9226     || defined(TARGET_NR_statx)
9227     struct stat st;
9228 #endif
9229 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
9230     || defined(TARGET_NR_fstatfs)
9231     struct statfs stfs;
9232 #endif
9233     void *p;
9234 
9235     switch(num) {
9236     case TARGET_NR_exit:
9237         /* In old applications this may be used to implement _exit(2).
9238            However in threaded applications it is used for thread termination,
9239            and _exit_group is used for application termination.
9240            Do thread termination if we have more then one thread.  */
9241 
9242         if (block_signals()) {
9243             return -QEMU_ERESTARTSYS;
9244         }
9245 
9246         pthread_mutex_lock(&clone_lock);
9247 
9248         if (CPU_NEXT(first_cpu)) {
9249             TaskState *ts = get_task_state(cpu);
9250 
9251             if (ts->child_tidptr) {
9252                 put_user_u32(0, ts->child_tidptr);
9253                 do_sys_futex(g2h(cpu, ts->child_tidptr),
9254                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
9255             }
9256 
9257             object_unparent(OBJECT(cpu));
9258             object_unref(OBJECT(cpu));
9259             /*
9260              * At this point the CPU should be unrealized and removed
9261              * from cpu lists. We can clean-up the rest of the thread
9262              * data without the lock held.
9263              */
9264 
9265             pthread_mutex_unlock(&clone_lock);
9266 
9267             thread_cpu = NULL;
9268             g_free(ts);
9269             rcu_unregister_thread();
9270             pthread_exit(NULL);
9271         }
9272 
9273         pthread_mutex_unlock(&clone_lock);
9274         preexit_cleanup(cpu_env, arg1);
9275         _exit(arg1);
9276         return 0; /* avoid warning */
9277     case TARGET_NR_read:
9278         if (arg2 == 0 && arg3 == 0) {
9279             return get_errno(safe_read(arg1, 0, 0));
9280         } else {
9281             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9282                 return -TARGET_EFAULT;
9283             ret = get_errno(safe_read(arg1, p, arg3));
9284             if (ret >= 0 &&
9285                 fd_trans_host_to_target_data(arg1)) {
9286                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
9287             }
9288             unlock_user(p, arg2, ret);
9289         }
9290         return ret;
9291     case TARGET_NR_write:
9292         if (arg2 == 0 && arg3 == 0) {
9293             return get_errno(safe_write(arg1, 0, 0));
9294         }
9295         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9296             return -TARGET_EFAULT;
9297         if (fd_trans_target_to_host_data(arg1)) {
9298             void *copy = g_malloc(arg3);
9299             memcpy(copy, p, arg3);
9300             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
9301             if (ret >= 0) {
9302                 ret = get_errno(safe_write(arg1, copy, ret));
9303             }
9304             g_free(copy);
9305         } else {
9306             ret = get_errno(safe_write(arg1, p, arg3));
9307         }
9308         unlock_user(p, arg2, 0);
9309         return ret;
9310 
9311 #ifdef TARGET_NR_open
9312     case TARGET_NR_open:
9313         if (!(p = lock_user_string(arg1)))
9314             return -TARGET_EFAULT;
9315         ret = get_errno(do_guest_openat(cpu_env, AT_FDCWD, p,
9316                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
9317                                   arg3, true));
9318         fd_trans_unregister(ret);
9319         unlock_user(p, arg1, 0);
9320         return ret;
9321 #endif
9322     case TARGET_NR_openat:
9323         if (!(p = lock_user_string(arg2)))
9324             return -TARGET_EFAULT;
9325         ret = get_errno(do_guest_openat(cpu_env, arg1, p,
9326                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
9327                                   arg4, true));
9328         fd_trans_unregister(ret);
9329         unlock_user(p, arg2, 0);
9330         return ret;
9331     case TARGET_NR_openat2:
9332         ret = do_openat2(cpu_env, arg1, arg2, arg3, arg4);
9333         return ret;
9334 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9335     case TARGET_NR_name_to_handle_at:
9336         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
9337         return ret;
9338 #endif
9339 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9340     case TARGET_NR_open_by_handle_at:
9341         ret = do_open_by_handle_at(arg1, arg2, arg3);
9342         fd_trans_unregister(ret);
9343         return ret;
9344 #endif
9345 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
9346     case TARGET_NR_pidfd_open:
9347         return get_errno(pidfd_open(arg1, arg2));
9348 #endif
9349 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
9350     case TARGET_NR_pidfd_send_signal:
9351         {
9352             siginfo_t uinfo, *puinfo;
9353 
9354             if (arg3) {
9355                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9356                 if (!p) {
9357                     return -TARGET_EFAULT;
9358                  }
9359                  target_to_host_siginfo(&uinfo, p);
9360                  unlock_user(p, arg3, 0);
9361                  puinfo = &uinfo;
9362             } else {
9363                  puinfo = NULL;
9364             }
9365             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
9366                                               puinfo, arg4));
9367         }
9368         return ret;
9369 #endif
9370 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
9371     case TARGET_NR_pidfd_getfd:
9372         return get_errno(pidfd_getfd(arg1, arg2, arg3));
9373 #endif
9374     case TARGET_NR_close:
9375         fd_trans_unregister(arg1);
9376         return get_errno(close(arg1));
9377 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
9378     case TARGET_NR_close_range:
9379         ret = get_errno(sys_close_range(arg1, arg2, arg3));
9380         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
9381             abi_long fd, maxfd;
9382             maxfd = MIN(arg2, target_fd_max);
9383             for (fd = arg1; fd < maxfd; fd++) {
9384                 fd_trans_unregister(fd);
9385             }
9386         }
9387         return ret;
9388 #endif
9389 
9390     case TARGET_NR_brk:
9391         return do_brk(arg1);
9392 #ifdef TARGET_NR_fork
9393     case TARGET_NR_fork:
9394         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
9395 #endif
9396 #ifdef TARGET_NR_waitpid
9397     case TARGET_NR_waitpid:
9398         {
9399             int status;
9400             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
9401             if (!is_error(ret) && arg2 && ret
9402                 && put_user_s32(host_to_target_waitstatus(status), arg2))
9403                 return -TARGET_EFAULT;
9404         }
9405         return ret;
9406 #endif
9407 #ifdef TARGET_NR_waitid
9408     case TARGET_NR_waitid:
9409         {
9410             struct rusage ru;
9411             siginfo_t info;
9412 
9413             ret = get_errno(safe_waitid(arg1, arg2, (arg3 ? &info : NULL),
9414                                         arg4, (arg5 ? &ru : NULL)));
9415             if (!is_error(ret)) {
9416                 if (arg3) {
9417                     p = lock_user(VERIFY_WRITE, arg3,
9418                                   sizeof(target_siginfo_t), 0);
9419                     if (!p) {
9420                         return -TARGET_EFAULT;
9421                     }
9422                     host_to_target_siginfo(p, &info);
9423                     unlock_user(p, arg3, sizeof(target_siginfo_t));
9424                 }
9425                 if (arg5 && host_to_target_rusage(arg5, &ru)) {
9426                     return -TARGET_EFAULT;
9427                 }
9428             }
9429         }
9430         return ret;
9431 #endif
9432 #ifdef TARGET_NR_creat /* not on alpha */
9433     case TARGET_NR_creat:
9434         if (!(p = lock_user_string(arg1)))
9435             return -TARGET_EFAULT;
9436         ret = get_errno(creat(p, arg2));
9437         fd_trans_unregister(ret);
9438         unlock_user(p, arg1, 0);
9439         return ret;
9440 #endif
9441 #ifdef TARGET_NR_link
9442     case TARGET_NR_link:
9443         {
9444             void * p2;
9445             p = lock_user_string(arg1);
9446             p2 = lock_user_string(arg2);
9447             if (!p || !p2)
9448                 ret = -TARGET_EFAULT;
9449             else
9450                 ret = get_errno(link(p, p2));
9451             unlock_user(p2, arg2, 0);
9452             unlock_user(p, arg1, 0);
9453         }
9454         return ret;
9455 #endif
9456 #if defined(TARGET_NR_linkat)
9457     case TARGET_NR_linkat:
9458         {
9459             void * p2 = NULL;
9460             if (!arg2 || !arg4)
9461                 return -TARGET_EFAULT;
9462             p  = lock_user_string(arg2);
9463             p2 = lock_user_string(arg4);
9464             if (!p || !p2)
9465                 ret = -TARGET_EFAULT;
9466             else
9467                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
9468             unlock_user(p, arg2, 0);
9469             unlock_user(p2, arg4, 0);
9470         }
9471         return ret;
9472 #endif
9473 #ifdef TARGET_NR_unlink
9474     case TARGET_NR_unlink:
9475         if (!(p = lock_user_string(arg1)))
9476             return -TARGET_EFAULT;
9477         ret = get_errno(unlink(p));
9478         unlock_user(p, arg1, 0);
9479         return ret;
9480 #endif
9481 #if defined(TARGET_NR_unlinkat)
9482     case TARGET_NR_unlinkat:
9483         if (!(p = lock_user_string(arg2)))
9484             return -TARGET_EFAULT;
9485         ret = get_errno(unlinkat(arg1, p, arg3));
9486         unlock_user(p, arg2, 0);
9487         return ret;
9488 #endif
9489     case TARGET_NR_execveat:
9490         return do_execv(cpu_env, arg1, arg2, arg3, arg4, arg5, true);
9491     case TARGET_NR_execve:
9492         return do_execv(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0, false);
9493     case TARGET_NR_chdir:
9494         if (!(p = lock_user_string(arg1)))
9495             return -TARGET_EFAULT;
9496         ret = get_errno(chdir(p));
9497         unlock_user(p, arg1, 0);
9498         return ret;
9499 #ifdef TARGET_NR_time
9500     case TARGET_NR_time:
9501         {
9502             time_t host_time;
9503             ret = get_errno(time(&host_time));
9504             if (!is_error(ret)
9505                 && arg1
9506                 && put_user_sal(host_time, arg1))
9507                 return -TARGET_EFAULT;
9508         }
9509         return ret;
9510 #endif
9511 #ifdef TARGET_NR_mknod
9512     case TARGET_NR_mknod:
9513         if (!(p = lock_user_string(arg1)))
9514             return -TARGET_EFAULT;
9515         ret = get_errno(mknod(p, arg2, arg3));
9516         unlock_user(p, arg1, 0);
9517         return ret;
9518 #endif
9519 #if defined(TARGET_NR_mknodat)
9520     case TARGET_NR_mknodat:
9521         if (!(p = lock_user_string(arg2)))
9522             return -TARGET_EFAULT;
9523         ret = get_errno(mknodat(arg1, p, arg3, arg4));
9524         unlock_user(p, arg2, 0);
9525         return ret;
9526 #endif
9527 #ifdef TARGET_NR_chmod
9528     case TARGET_NR_chmod:
9529         if (!(p = lock_user_string(arg1)))
9530             return -TARGET_EFAULT;
9531         ret = get_errno(chmod(p, arg2));
9532         unlock_user(p, arg1, 0);
9533         return ret;
9534 #endif
9535 #ifdef TARGET_NR_lseek
9536     case TARGET_NR_lseek:
9537         return get_errno(lseek(arg1, arg2, arg3));
9538 #endif
9539 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9540     /* Alpha specific */
9541     case TARGET_NR_getxpid:
9542         cpu_env->ir[IR_A4] = getppid();
9543         return get_errno(getpid());
9544 #endif
9545 #ifdef TARGET_NR_getpid
9546     case TARGET_NR_getpid:
9547         return get_errno(getpid());
9548 #endif
9549     case TARGET_NR_mount:
9550         {
9551             /* need to look at the data field */
9552             void *p2, *p3;
9553 
9554             if (arg1) {
9555                 p = lock_user_string(arg1);
9556                 if (!p) {
9557                     return -TARGET_EFAULT;
9558                 }
9559             } else {
9560                 p = NULL;
9561             }
9562 
9563             p2 = lock_user_string(arg2);
9564             if (!p2) {
9565                 if (arg1) {
9566                     unlock_user(p, arg1, 0);
9567                 }
9568                 return -TARGET_EFAULT;
9569             }
9570 
9571             if (arg3) {
9572                 p3 = lock_user_string(arg3);
9573                 if (!p3) {
9574                     if (arg1) {
9575                         unlock_user(p, arg1, 0);
9576                     }
9577                     unlock_user(p2, arg2, 0);
9578                     return -TARGET_EFAULT;
9579                 }
9580             } else {
9581                 p3 = NULL;
9582             }
9583 
9584             /* FIXME - arg5 should be locked, but it isn't clear how to
9585              * do that since it's not guaranteed to be a NULL-terminated
9586              * string.
9587              */
9588             if (!arg5) {
9589                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9590             } else {
9591                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9592             }
9593             ret = get_errno(ret);
9594 
9595             if (arg1) {
9596                 unlock_user(p, arg1, 0);
9597             }
9598             unlock_user(p2, arg2, 0);
9599             if (arg3) {
9600                 unlock_user(p3, arg3, 0);
9601             }
9602         }
9603         return ret;
9604 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9605 #if defined(TARGET_NR_umount)
9606     case TARGET_NR_umount:
9607 #endif
9608 #if defined(TARGET_NR_oldumount)
9609     case TARGET_NR_oldumount:
9610 #endif
9611         if (!(p = lock_user_string(arg1)))
9612             return -TARGET_EFAULT;
9613         ret = get_errno(umount(p));
9614         unlock_user(p, arg1, 0);
9615         return ret;
9616 #endif
9617 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9618     case TARGET_NR_move_mount:
9619         {
9620             void *p2, *p4;
9621 
9622             if (!arg2 || !arg4) {
9623                 return -TARGET_EFAULT;
9624             }
9625 
9626             p2 = lock_user_string(arg2);
9627             if (!p2) {
9628                 return -TARGET_EFAULT;
9629             }
9630 
9631             p4 = lock_user_string(arg4);
9632             if (!p4) {
9633                 unlock_user(p2, arg2, 0);
9634                 return -TARGET_EFAULT;
9635             }
9636             ret = get_errno(sys_move_mount(arg1, p2, arg3, p4, arg5));
9637 
9638             unlock_user(p2, arg2, 0);
9639             unlock_user(p4, arg4, 0);
9640 
9641             return ret;
9642         }
9643 #endif
9644 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9645     case TARGET_NR_open_tree:
9646         {
9647             void *p2;
9648             int host_flags;
9649 
9650             if (!arg2) {
9651                 return -TARGET_EFAULT;
9652             }
9653 
9654             p2 = lock_user_string(arg2);
9655             if (!p2) {
9656                 return -TARGET_EFAULT;
9657             }
9658 
9659             host_flags = arg3 & ~TARGET_O_CLOEXEC;
9660             if (arg3 & TARGET_O_CLOEXEC) {
9661                 host_flags |= O_CLOEXEC;
9662             }
9663 
9664             ret = get_errno(sys_open_tree(arg1, p2, host_flags));
9665 
9666             unlock_user(p2, arg2, 0);
9667 
9668             return ret;
9669         }
9670 #endif
9671 #ifdef TARGET_NR_stime /* not on alpha */
9672     case TARGET_NR_stime:
9673         {
9674             struct timespec ts;
9675             ts.tv_nsec = 0;
9676             if (get_user_sal(ts.tv_sec, arg1)) {
9677                 return -TARGET_EFAULT;
9678             }
9679             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9680         }
9681 #endif
9682 #ifdef TARGET_NR_alarm /* not on alpha */
9683     case TARGET_NR_alarm:
9684         return alarm(arg1);
9685 #endif
9686 #ifdef TARGET_NR_pause /* not on alpha */
9687     case TARGET_NR_pause:
9688         if (!block_signals()) {
9689             sigsuspend(&get_task_state(cpu)->signal_mask);
9690         }
9691         return -TARGET_EINTR;
9692 #endif
9693 #ifdef TARGET_NR_utime
9694     case TARGET_NR_utime:
9695         {
9696             struct utimbuf tbuf, *host_tbuf;
9697             struct target_utimbuf *target_tbuf;
9698             if (arg2) {
9699                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9700                     return -TARGET_EFAULT;
9701                 tbuf.actime = tswapal(target_tbuf->actime);
9702                 tbuf.modtime = tswapal(target_tbuf->modtime);
9703                 unlock_user_struct(target_tbuf, arg2, 0);
9704                 host_tbuf = &tbuf;
9705             } else {
9706                 host_tbuf = NULL;
9707             }
9708             if (!(p = lock_user_string(arg1)))
9709                 return -TARGET_EFAULT;
9710             ret = get_errno(utime(p, host_tbuf));
9711             unlock_user(p, arg1, 0);
9712         }
9713         return ret;
9714 #endif
9715 #ifdef TARGET_NR_utimes
9716     case TARGET_NR_utimes:
9717         {
9718             struct timeval *tvp, tv[2];
9719             if (arg2) {
9720                 if (copy_from_user_timeval(&tv[0], arg2)
9721                     || copy_from_user_timeval(&tv[1],
9722                                               arg2 + sizeof(struct target_timeval)))
9723                     return -TARGET_EFAULT;
9724                 tvp = tv;
9725             } else {
9726                 tvp = NULL;
9727             }
9728             if (!(p = lock_user_string(arg1)))
9729                 return -TARGET_EFAULT;
9730             ret = get_errno(utimes(p, tvp));
9731             unlock_user(p, arg1, 0);
9732         }
9733         return ret;
9734 #endif
9735 #if defined(TARGET_NR_futimesat)
9736     case TARGET_NR_futimesat:
9737         {
9738             struct timeval *tvp, tv[2];
9739             if (arg3) {
9740                 if (copy_from_user_timeval(&tv[0], arg3)
9741                     || copy_from_user_timeval(&tv[1],
9742                                               arg3 + sizeof(struct target_timeval)))
9743                     return -TARGET_EFAULT;
9744                 tvp = tv;
9745             } else {
9746                 tvp = NULL;
9747             }
9748             if (!(p = lock_user_string(arg2))) {
9749                 return -TARGET_EFAULT;
9750             }
9751             ret = get_errno(futimesat(arg1, path(p), tvp));
9752             unlock_user(p, arg2, 0);
9753         }
9754         return ret;
9755 #endif
9756 #ifdef TARGET_NR_access
9757     case TARGET_NR_access:
9758         if (!(p = lock_user_string(arg1))) {
9759             return -TARGET_EFAULT;
9760         }
9761         ret = get_errno(access(path(p), arg2));
9762         unlock_user(p, arg1, 0);
9763         return ret;
9764 #endif
9765 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9766     case TARGET_NR_faccessat:
9767         if (!(p = lock_user_string(arg2))) {
9768             return -TARGET_EFAULT;
9769         }
9770         ret = get_errno(faccessat(arg1, p, arg3, 0));
9771         unlock_user(p, arg2, 0);
9772         return ret;
9773 #endif
9774 #if defined(TARGET_NR_faccessat2)
9775     case TARGET_NR_faccessat2:
9776         if (!(p = lock_user_string(arg2))) {
9777             return -TARGET_EFAULT;
9778         }
9779         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9780         unlock_user(p, arg2, 0);
9781         return ret;
9782 #endif
9783 #ifdef TARGET_NR_nice /* not on alpha */
9784     case TARGET_NR_nice:
9785         return get_errno(nice(arg1));
9786 #endif
9787     case TARGET_NR_sync:
9788         sync();
9789         return 0;
9790 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9791     case TARGET_NR_syncfs:
9792         return get_errno(syncfs(arg1));
9793 #endif
9794     case TARGET_NR_kill:
9795         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9796 #ifdef TARGET_NR_rename
9797     case TARGET_NR_rename:
9798         {
9799             void *p2;
9800             p = lock_user_string(arg1);
9801             p2 = lock_user_string(arg2);
9802             if (!p || !p2)
9803                 ret = -TARGET_EFAULT;
9804             else
9805                 ret = get_errno(rename(p, p2));
9806             unlock_user(p2, arg2, 0);
9807             unlock_user(p, arg1, 0);
9808         }
9809         return ret;
9810 #endif
9811 #if defined(TARGET_NR_renameat)
9812     case TARGET_NR_renameat:
9813         {
9814             void *p2;
9815             p  = lock_user_string(arg2);
9816             p2 = lock_user_string(arg4);
9817             if (!p || !p2)
9818                 ret = -TARGET_EFAULT;
9819             else
9820                 ret = get_errno(renameat(arg1, p, arg3, p2));
9821             unlock_user(p2, arg4, 0);
9822             unlock_user(p, arg2, 0);
9823         }
9824         return ret;
9825 #endif
9826 #if defined(TARGET_NR_renameat2)
9827     case TARGET_NR_renameat2:
9828         {
9829             void *p2;
9830             p  = lock_user_string(arg2);
9831             p2 = lock_user_string(arg4);
9832             if (!p || !p2) {
9833                 ret = -TARGET_EFAULT;
9834             } else {
9835                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9836             }
9837             unlock_user(p2, arg4, 0);
9838             unlock_user(p, arg2, 0);
9839         }
9840         return ret;
9841 #endif
9842 #ifdef TARGET_NR_mkdir
9843     case TARGET_NR_mkdir:
9844         if (!(p = lock_user_string(arg1)))
9845             return -TARGET_EFAULT;
9846         ret = get_errno(mkdir(p, arg2));
9847         unlock_user(p, arg1, 0);
9848         return ret;
9849 #endif
9850 #if defined(TARGET_NR_mkdirat)
9851     case TARGET_NR_mkdirat:
9852         if (!(p = lock_user_string(arg2)))
9853             return -TARGET_EFAULT;
9854         ret = get_errno(mkdirat(arg1, p, arg3));
9855         unlock_user(p, arg2, 0);
9856         return ret;
9857 #endif
9858 #ifdef TARGET_NR_rmdir
9859     case TARGET_NR_rmdir:
9860         if (!(p = lock_user_string(arg1)))
9861             return -TARGET_EFAULT;
9862         ret = get_errno(rmdir(p));
9863         unlock_user(p, arg1, 0);
9864         return ret;
9865 #endif
9866     case TARGET_NR_dup:
9867         ret = get_errno(dup(arg1));
9868         if (ret >= 0) {
9869             fd_trans_dup(arg1, ret);
9870         }
9871         return ret;
9872 #ifdef TARGET_NR_pipe
9873     case TARGET_NR_pipe:
9874         return do_pipe(cpu_env, arg1, 0, 0);
9875 #endif
9876 #ifdef TARGET_NR_pipe2
9877     case TARGET_NR_pipe2:
9878         return do_pipe(cpu_env, arg1,
9879                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9880 #endif
9881     case TARGET_NR_times:
9882         {
9883             struct target_tms *tmsp;
9884             struct tms tms;
9885             ret = get_errno(times(&tms));
9886             if (arg1) {
9887                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9888                 if (!tmsp)
9889                     return -TARGET_EFAULT;
9890                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9891                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9892                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9893                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9894             }
9895             if (!is_error(ret))
9896                 ret = host_to_target_clock_t(ret);
9897         }
9898         return ret;
9899     case TARGET_NR_acct:
9900         if (arg1 == 0) {
9901             ret = get_errno(acct(NULL));
9902         } else {
9903             if (!(p = lock_user_string(arg1))) {
9904                 return -TARGET_EFAULT;
9905             }
9906             ret = get_errno(acct(path(p)));
9907             unlock_user(p, arg1, 0);
9908         }
9909         return ret;
9910 #ifdef TARGET_NR_umount2
9911     case TARGET_NR_umount2:
9912         if (!(p = lock_user_string(arg1)))
9913             return -TARGET_EFAULT;
9914         ret = get_errno(umount2(p, arg2));
9915         unlock_user(p, arg1, 0);
9916         return ret;
9917 #endif
9918     case TARGET_NR_ioctl:
9919         return do_ioctl(arg1, arg2, arg3);
9920 #ifdef TARGET_NR_fcntl
9921     case TARGET_NR_fcntl:
9922         return do_fcntl(arg1, arg2, arg3);
9923 #endif
9924     case TARGET_NR_setpgid:
9925         return get_errno(setpgid(arg1, arg2));
9926     case TARGET_NR_umask:
9927         return get_errno(umask(arg1));
9928     case TARGET_NR_chroot:
9929         if (!(p = lock_user_string(arg1)))
9930             return -TARGET_EFAULT;
9931         ret = get_errno(chroot(p));
9932         unlock_user(p, arg1, 0);
9933         return ret;
9934 #ifdef TARGET_NR_dup2
9935     case TARGET_NR_dup2:
9936         ret = get_errno(dup2(arg1, arg2));
9937         if (ret >= 0) {
9938             fd_trans_dup(arg1, arg2);
9939         }
9940         return ret;
9941 #endif
9942 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9943     case TARGET_NR_dup3:
9944     {
9945         int host_flags;
9946 
9947         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9948             return -EINVAL;
9949         }
9950         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9951         ret = get_errno(dup3(arg1, arg2, host_flags));
9952         if (ret >= 0) {
9953             fd_trans_dup(arg1, arg2);
9954         }
9955         return ret;
9956     }
9957 #endif
9958 #ifdef TARGET_NR_getppid /* not on alpha */
9959     case TARGET_NR_getppid:
9960         return get_errno(getppid());
9961 #endif
9962 #ifdef TARGET_NR_getpgrp
9963     case TARGET_NR_getpgrp:
9964         return get_errno(getpgrp());
9965 #endif
9966     case TARGET_NR_setsid:
9967         return get_errno(setsid());
9968 #ifdef TARGET_NR_sigaction
9969     case TARGET_NR_sigaction:
9970         {
9971 #if defined(TARGET_MIPS)
9972 	    struct target_sigaction act, oact, *pact, *old_act;
9973 
9974 	    if (arg2) {
9975                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9976                     return -TARGET_EFAULT;
9977 		act._sa_handler = old_act->_sa_handler;
9978 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9979 		act.sa_flags = old_act->sa_flags;
9980 		unlock_user_struct(old_act, arg2, 0);
9981 		pact = &act;
9982 	    } else {
9983 		pact = NULL;
9984 	    }
9985 
9986         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9987 
9988 	    if (!is_error(ret) && arg3) {
9989                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9990                     return -TARGET_EFAULT;
9991 		old_act->_sa_handler = oact._sa_handler;
9992 		old_act->sa_flags = oact.sa_flags;
9993 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9994 		old_act->sa_mask.sig[1] = 0;
9995 		old_act->sa_mask.sig[2] = 0;
9996 		old_act->sa_mask.sig[3] = 0;
9997 		unlock_user_struct(old_act, arg3, 1);
9998 	    }
9999 #else
10000             struct target_old_sigaction *old_act;
10001             struct target_sigaction act, oact, *pact;
10002             if (arg2) {
10003                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
10004                     return -TARGET_EFAULT;
10005                 act._sa_handler = old_act->_sa_handler;
10006                 target_siginitset(&act.sa_mask, old_act->sa_mask);
10007                 act.sa_flags = old_act->sa_flags;
10008 #ifdef TARGET_ARCH_HAS_SA_RESTORER
10009                 act.sa_restorer = old_act->sa_restorer;
10010 #endif
10011                 unlock_user_struct(old_act, arg2, 0);
10012                 pact = &act;
10013             } else {
10014                 pact = NULL;
10015             }
10016             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
10017             if (!is_error(ret) && arg3) {
10018                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
10019                     return -TARGET_EFAULT;
10020                 old_act->_sa_handler = oact._sa_handler;
10021                 old_act->sa_mask = oact.sa_mask.sig[0];
10022                 old_act->sa_flags = oact.sa_flags;
10023 #ifdef TARGET_ARCH_HAS_SA_RESTORER
10024                 old_act->sa_restorer = oact.sa_restorer;
10025 #endif
10026                 unlock_user_struct(old_act, arg3, 1);
10027             }
10028 #endif
10029         }
10030         return ret;
10031 #endif
10032     case TARGET_NR_rt_sigaction:
10033         {
10034             /*
10035              * For Alpha and SPARC this is a 5 argument syscall, with
10036              * a 'restorer' parameter which must be copied into the
10037              * sa_restorer field of the sigaction struct.
10038              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
10039              * and arg5 is the sigsetsize.
10040              */
10041 #if defined(TARGET_ALPHA)
10042             target_ulong sigsetsize = arg4;
10043             target_ulong restorer = arg5;
10044 #elif defined(TARGET_SPARC)
10045             target_ulong restorer = arg4;
10046             target_ulong sigsetsize = arg5;
10047 #else
10048             target_ulong sigsetsize = arg4;
10049             target_ulong restorer = 0;
10050 #endif
10051             struct target_sigaction *act = NULL;
10052             struct target_sigaction *oact = NULL;
10053 
10054             if (sigsetsize != sizeof(target_sigset_t)) {
10055                 return -TARGET_EINVAL;
10056             }
10057             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
10058                 return -TARGET_EFAULT;
10059             }
10060             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
10061                 ret = -TARGET_EFAULT;
10062             } else {
10063                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
10064                 if (oact) {
10065                     unlock_user_struct(oact, arg3, 1);
10066                 }
10067             }
10068             if (act) {
10069                 unlock_user_struct(act, arg2, 0);
10070             }
10071         }
10072         return ret;
10073 #ifdef TARGET_NR_sgetmask /* not on alpha */
10074     case TARGET_NR_sgetmask:
10075         {
10076             sigset_t cur_set;
10077             abi_ulong target_set;
10078             ret = do_sigprocmask(0, NULL, &cur_set);
10079             if (!ret) {
10080                 host_to_target_old_sigset(&target_set, &cur_set);
10081                 ret = target_set;
10082             }
10083         }
10084         return ret;
10085 #endif
10086 #ifdef TARGET_NR_ssetmask /* not on alpha */
10087     case TARGET_NR_ssetmask:
10088         {
10089             sigset_t set, oset;
10090             abi_ulong target_set = arg1;
10091             target_to_host_old_sigset(&set, &target_set);
10092             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
10093             if (!ret) {
10094                 host_to_target_old_sigset(&target_set, &oset);
10095                 ret = target_set;
10096             }
10097         }
10098         return ret;
10099 #endif
10100 #ifdef TARGET_NR_sigprocmask
10101     case TARGET_NR_sigprocmask:
10102         {
10103 #if defined(TARGET_ALPHA)
10104             sigset_t set, oldset;
10105             abi_ulong mask;
10106             int how;
10107 
10108             switch (arg1) {
10109             case TARGET_SIG_BLOCK:
10110                 how = SIG_BLOCK;
10111                 break;
10112             case TARGET_SIG_UNBLOCK:
10113                 how = SIG_UNBLOCK;
10114                 break;
10115             case TARGET_SIG_SETMASK:
10116                 how = SIG_SETMASK;
10117                 break;
10118             default:
10119                 return -TARGET_EINVAL;
10120             }
10121             mask = arg2;
10122             target_to_host_old_sigset(&set, &mask);
10123 
10124             ret = do_sigprocmask(how, &set, &oldset);
10125             if (!is_error(ret)) {
10126                 host_to_target_old_sigset(&mask, &oldset);
10127                 ret = mask;
10128                 cpu_env->ir[IR_V0] = 0; /* force no error */
10129             }
10130 #else
10131             sigset_t set, oldset, *set_ptr;
10132             int how;
10133 
10134             if (arg2) {
10135                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10136                 if (!p) {
10137                     return -TARGET_EFAULT;
10138                 }
10139                 target_to_host_old_sigset(&set, p);
10140                 unlock_user(p, arg2, 0);
10141                 set_ptr = &set;
10142                 switch (arg1) {
10143                 case TARGET_SIG_BLOCK:
10144                     how = SIG_BLOCK;
10145                     break;
10146                 case TARGET_SIG_UNBLOCK:
10147                     how = SIG_UNBLOCK;
10148                     break;
10149                 case TARGET_SIG_SETMASK:
10150                     how = SIG_SETMASK;
10151                     break;
10152                 default:
10153                     return -TARGET_EINVAL;
10154                 }
10155             } else {
10156                 how = 0;
10157                 set_ptr = NULL;
10158             }
10159             ret = do_sigprocmask(how, set_ptr, &oldset);
10160             if (!is_error(ret) && arg3) {
10161                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10162                     return -TARGET_EFAULT;
10163                 host_to_target_old_sigset(p, &oldset);
10164                 unlock_user(p, arg3, sizeof(target_sigset_t));
10165             }
10166 #endif
10167         }
10168         return ret;
10169 #endif
10170     case TARGET_NR_rt_sigprocmask:
10171         {
10172             int how = arg1;
10173             sigset_t set, oldset, *set_ptr;
10174 
10175             if (arg4 != sizeof(target_sigset_t)) {
10176                 return -TARGET_EINVAL;
10177             }
10178 
10179             if (arg2) {
10180                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10181                 if (!p) {
10182                     return -TARGET_EFAULT;
10183                 }
10184                 target_to_host_sigset(&set, p);
10185                 unlock_user(p, arg2, 0);
10186                 set_ptr = &set;
10187                 switch(how) {
10188                 case TARGET_SIG_BLOCK:
10189                     how = SIG_BLOCK;
10190                     break;
10191                 case TARGET_SIG_UNBLOCK:
10192                     how = SIG_UNBLOCK;
10193                     break;
10194                 case TARGET_SIG_SETMASK:
10195                     how = SIG_SETMASK;
10196                     break;
10197                 default:
10198                     return -TARGET_EINVAL;
10199                 }
10200             } else {
10201                 how = 0;
10202                 set_ptr = NULL;
10203             }
10204             ret = do_sigprocmask(how, set_ptr, &oldset);
10205             if (!is_error(ret) && arg3) {
10206                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10207                     return -TARGET_EFAULT;
10208                 host_to_target_sigset(p, &oldset);
10209                 unlock_user(p, arg3, sizeof(target_sigset_t));
10210             }
10211         }
10212         return ret;
10213 #ifdef TARGET_NR_sigpending
10214     case TARGET_NR_sigpending:
10215         {
10216             sigset_t set;
10217             ret = get_errno(sigpending(&set));
10218             if (!is_error(ret)) {
10219                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10220                     return -TARGET_EFAULT;
10221                 host_to_target_old_sigset(p, &set);
10222                 unlock_user(p, arg1, sizeof(target_sigset_t));
10223             }
10224         }
10225         return ret;
10226 #endif
10227     case TARGET_NR_rt_sigpending:
10228         {
10229             sigset_t set;
10230 
10231             /* Yes, this check is >, not != like most. We follow the kernel's
10232              * logic and it does it like this because it implements
10233              * NR_sigpending through the same code path, and in that case
10234              * the old_sigset_t is smaller in size.
10235              */
10236             if (arg2 > sizeof(target_sigset_t)) {
10237                 return -TARGET_EINVAL;
10238             }
10239 
10240             ret = get_errno(sigpending(&set));
10241             if (!is_error(ret)) {
10242                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10243                     return -TARGET_EFAULT;
10244                 host_to_target_sigset(p, &set);
10245                 unlock_user(p, arg1, sizeof(target_sigset_t));
10246             }
10247         }
10248         return ret;
10249 #ifdef TARGET_NR_sigsuspend
10250     case TARGET_NR_sigsuspend:
10251         {
10252             sigset_t *set;
10253 
10254 #if defined(TARGET_ALPHA)
10255             TaskState *ts = get_task_state(cpu);
10256             /* target_to_host_old_sigset will bswap back */
10257             abi_ulong mask = tswapal(arg1);
10258             set = &ts->sigsuspend_mask;
10259             target_to_host_old_sigset(set, &mask);
10260 #else
10261             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
10262             if (ret != 0) {
10263                 return ret;
10264             }
10265 #endif
10266             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10267             finish_sigsuspend_mask(ret);
10268         }
10269         return ret;
10270 #endif
10271     case TARGET_NR_rt_sigsuspend:
10272         {
10273             sigset_t *set;
10274 
10275             ret = process_sigsuspend_mask(&set, arg1, arg2);
10276             if (ret != 0) {
10277                 return ret;
10278             }
10279             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10280             finish_sigsuspend_mask(ret);
10281         }
10282         return ret;
10283 #ifdef TARGET_NR_rt_sigtimedwait
10284     case TARGET_NR_rt_sigtimedwait:
10285         {
10286             sigset_t set;
10287             struct timespec uts, *puts;
10288             siginfo_t uinfo;
10289 
10290             if (arg4 != sizeof(target_sigset_t)) {
10291                 return -TARGET_EINVAL;
10292             }
10293 
10294             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
10295                 return -TARGET_EFAULT;
10296             target_to_host_sigset(&set, p);
10297             unlock_user(p, arg1, 0);
10298             if (arg3) {
10299                 puts = &uts;
10300                 if (target_to_host_timespec(puts, arg3)) {
10301                     return -TARGET_EFAULT;
10302                 }
10303             } else {
10304                 puts = NULL;
10305             }
10306             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10307                                                  SIGSET_T_SIZE));
10308             if (!is_error(ret)) {
10309                 if (arg2) {
10310                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
10311                                   0);
10312                     if (!p) {
10313                         return -TARGET_EFAULT;
10314                     }
10315                     host_to_target_siginfo(p, &uinfo);
10316                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10317                 }
10318                 ret = host_to_target_signal(ret);
10319             }
10320         }
10321         return ret;
10322 #endif
10323 #ifdef TARGET_NR_rt_sigtimedwait_time64
10324     case TARGET_NR_rt_sigtimedwait_time64:
10325         {
10326             sigset_t set;
10327             struct timespec uts, *puts;
10328             siginfo_t uinfo;
10329 
10330             if (arg4 != sizeof(target_sigset_t)) {
10331                 return -TARGET_EINVAL;
10332             }
10333 
10334             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
10335             if (!p) {
10336                 return -TARGET_EFAULT;
10337             }
10338             target_to_host_sigset(&set, p);
10339             unlock_user(p, arg1, 0);
10340             if (arg3) {
10341                 puts = &uts;
10342                 if (target_to_host_timespec64(puts, arg3)) {
10343                     return -TARGET_EFAULT;
10344                 }
10345             } else {
10346                 puts = NULL;
10347             }
10348             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10349                                                  SIGSET_T_SIZE));
10350             if (!is_error(ret)) {
10351                 if (arg2) {
10352                     p = lock_user(VERIFY_WRITE, arg2,
10353                                   sizeof(target_siginfo_t), 0);
10354                     if (!p) {
10355                         return -TARGET_EFAULT;
10356                     }
10357                     host_to_target_siginfo(p, &uinfo);
10358                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10359                 }
10360                 ret = host_to_target_signal(ret);
10361             }
10362         }
10363         return ret;
10364 #endif
10365     case TARGET_NR_rt_sigqueueinfo:
10366         {
10367             siginfo_t uinfo;
10368 
10369             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
10370             if (!p) {
10371                 return -TARGET_EFAULT;
10372             }
10373             target_to_host_siginfo(&uinfo, p);
10374             unlock_user(p, arg3, 0);
10375             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
10376         }
10377         return ret;
10378     case TARGET_NR_rt_tgsigqueueinfo:
10379         {
10380             siginfo_t uinfo;
10381 
10382             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
10383             if (!p) {
10384                 return -TARGET_EFAULT;
10385             }
10386             target_to_host_siginfo(&uinfo, p);
10387             unlock_user(p, arg4, 0);
10388             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
10389         }
10390         return ret;
10391 #ifdef TARGET_NR_sigreturn
10392     case TARGET_NR_sigreturn:
10393         if (block_signals()) {
10394             return -QEMU_ERESTARTSYS;
10395         }
10396         return do_sigreturn(cpu_env);
10397 #endif
10398     case TARGET_NR_rt_sigreturn:
10399         if (block_signals()) {
10400             return -QEMU_ERESTARTSYS;
10401         }
10402         return do_rt_sigreturn(cpu_env);
10403     case TARGET_NR_sethostname:
10404         if (!(p = lock_user_string(arg1)))
10405             return -TARGET_EFAULT;
10406         ret = get_errno(sethostname(p, arg2));
10407         unlock_user(p, arg1, 0);
10408         return ret;
10409 #ifdef TARGET_NR_setrlimit
10410     case TARGET_NR_setrlimit:
10411         {
10412             int resource = target_to_host_resource(arg1);
10413             struct target_rlimit *target_rlim;
10414             struct rlimit rlim;
10415             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
10416                 return -TARGET_EFAULT;
10417             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
10418             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
10419             unlock_user_struct(target_rlim, arg2, 0);
10420             /*
10421              * If we just passed through resource limit settings for memory then
10422              * they would also apply to QEMU's own allocations, and QEMU will
10423              * crash or hang or die if its allocations fail. Ideally we would
10424              * track the guest allocations in QEMU and apply the limits ourselves.
10425              * For now, just tell the guest the call succeeded but don't actually
10426              * limit anything.
10427              */
10428             if (resource != RLIMIT_AS &&
10429                 resource != RLIMIT_DATA &&
10430                 resource != RLIMIT_STACK) {
10431                 return get_errno(setrlimit(resource, &rlim));
10432             } else {
10433                 return 0;
10434             }
10435         }
10436 #endif
10437 #ifdef TARGET_NR_getrlimit
10438     case TARGET_NR_getrlimit:
10439         {
10440             int resource = target_to_host_resource(arg1);
10441             struct target_rlimit *target_rlim;
10442             struct rlimit rlim;
10443 
10444             ret = get_errno(getrlimit(resource, &rlim));
10445             if (!is_error(ret)) {
10446                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10447                     return -TARGET_EFAULT;
10448                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10449                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10450                 unlock_user_struct(target_rlim, arg2, 1);
10451             }
10452         }
10453         return ret;
10454 #endif
10455     case TARGET_NR_getrusage:
10456         {
10457             struct rusage rusage;
10458             ret = get_errno(getrusage(arg1, &rusage));
10459             if (!is_error(ret)) {
10460                 ret = host_to_target_rusage(arg2, &rusage);
10461             }
10462         }
10463         return ret;
10464 #if defined(TARGET_NR_gettimeofday)
10465     case TARGET_NR_gettimeofday:
10466         {
10467             struct timeval tv;
10468             struct timezone tz;
10469 
10470             ret = get_errno(gettimeofday(&tv, &tz));
10471             if (!is_error(ret)) {
10472                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
10473                     return -TARGET_EFAULT;
10474                 }
10475                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
10476                     return -TARGET_EFAULT;
10477                 }
10478             }
10479         }
10480         return ret;
10481 #endif
10482 #if defined(TARGET_NR_settimeofday)
10483     case TARGET_NR_settimeofday:
10484         {
10485             struct timeval tv, *ptv = NULL;
10486             struct timezone tz, *ptz = NULL;
10487 
10488             if (arg1) {
10489                 if (copy_from_user_timeval(&tv, arg1)) {
10490                     return -TARGET_EFAULT;
10491                 }
10492                 ptv = &tv;
10493             }
10494 
10495             if (arg2) {
10496                 if (copy_from_user_timezone(&tz, arg2)) {
10497                     return -TARGET_EFAULT;
10498                 }
10499                 ptz = &tz;
10500             }
10501 
10502             return get_errno(settimeofday(ptv, ptz));
10503         }
10504 #endif
10505 #if defined(TARGET_NR_select)
10506     case TARGET_NR_select:
10507 #if defined(TARGET_WANT_NI_OLD_SELECT)
10508         /* some architectures used to have old_select here
10509          * but now ENOSYS it.
10510          */
10511         ret = -TARGET_ENOSYS;
10512 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
10513         ret = do_old_select(arg1);
10514 #else
10515         ret = do_select(arg1, arg2, arg3, arg4, arg5);
10516 #endif
10517         return ret;
10518 #endif
10519 #ifdef TARGET_NR_pselect6
10520     case TARGET_NR_pselect6:
10521         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
10522 #endif
10523 #ifdef TARGET_NR_pselect6_time64
10524     case TARGET_NR_pselect6_time64:
10525         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
10526 #endif
10527 #ifdef TARGET_NR_symlink
10528     case TARGET_NR_symlink:
10529         {
10530             void *p2;
10531             p = lock_user_string(arg1);
10532             p2 = lock_user_string(arg2);
10533             if (!p || !p2)
10534                 ret = -TARGET_EFAULT;
10535             else
10536                 ret = get_errno(symlink(p, p2));
10537             unlock_user(p2, arg2, 0);
10538             unlock_user(p, arg1, 0);
10539         }
10540         return ret;
10541 #endif
10542 #if defined(TARGET_NR_symlinkat)
10543     case TARGET_NR_symlinkat:
10544         {
10545             void *p2;
10546             p  = lock_user_string(arg1);
10547             p2 = lock_user_string(arg3);
10548             if (!p || !p2)
10549                 ret = -TARGET_EFAULT;
10550             else
10551                 ret = get_errno(symlinkat(p, arg2, p2));
10552             unlock_user(p2, arg3, 0);
10553             unlock_user(p, arg1, 0);
10554         }
10555         return ret;
10556 #endif
10557 #ifdef TARGET_NR_readlink
10558     case TARGET_NR_readlink:
10559         {
10560             void *p2;
10561             p = lock_user_string(arg1);
10562             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10563             ret = get_errno(do_guest_readlink(p, p2, arg3));
10564             unlock_user(p2, arg2, ret);
10565             unlock_user(p, arg1, 0);
10566         }
10567         return ret;
10568 #endif
10569 #if defined(TARGET_NR_readlinkat)
10570     case TARGET_NR_readlinkat:
10571         {
10572             void *p2;
10573             p  = lock_user_string(arg2);
10574             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10575             if (!p || !p2) {
10576                 ret = -TARGET_EFAULT;
10577             } else if (!arg4) {
10578                 /* Short circuit this for the magic exe check. */
10579                 ret = -TARGET_EINVAL;
10580             } else if (is_proc_myself((const char *)p, "exe")) {
10581                 /*
10582                  * Don't worry about sign mismatch as earlier mapping
10583                  * logic would have thrown a bad address error.
10584                  */
10585                 ret = MIN(strlen(exec_path), arg4);
10586                 /* We cannot NUL terminate the string. */
10587                 memcpy(p2, exec_path, ret);
10588             } else {
10589                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10590             }
10591             unlock_user(p2, arg3, ret);
10592             unlock_user(p, arg2, 0);
10593         }
10594         return ret;
10595 #endif
10596 #ifdef TARGET_NR_swapon
10597     case TARGET_NR_swapon:
10598         if (!(p = lock_user_string(arg1)))
10599             return -TARGET_EFAULT;
10600         ret = get_errno(swapon(p, arg2));
10601         unlock_user(p, arg1, 0);
10602         return ret;
10603 #endif
10604     case TARGET_NR_reboot:
10605         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10606            /* arg4 must be ignored in all other cases */
10607            p = lock_user_string(arg4);
10608            if (!p) {
10609                return -TARGET_EFAULT;
10610            }
10611            ret = get_errno(reboot(arg1, arg2, arg3, p));
10612            unlock_user(p, arg4, 0);
10613         } else {
10614            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10615         }
10616         return ret;
10617 #ifdef TARGET_NR_mmap
10618     case TARGET_NR_mmap:
10619 #ifdef TARGET_ARCH_WANT_SYS_OLD_MMAP
10620         {
10621             abi_ulong *v;
10622             abi_ulong v1, v2, v3, v4, v5, v6;
10623             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10624                 return -TARGET_EFAULT;
10625             v1 = tswapal(v[0]);
10626             v2 = tswapal(v[1]);
10627             v3 = tswapal(v[2]);
10628             v4 = tswapal(v[3]);
10629             v5 = tswapal(v[4]);
10630             v6 = tswapal(v[5]);
10631             unlock_user(v, arg1, 0);
10632             return do_mmap(v1, v2, v3, v4, v5, v6);
10633         }
10634 #else
10635         /* mmap pointers are always untagged */
10636         return do_mmap(arg1, arg2, arg3, arg4, arg5, arg6);
10637 #endif
10638 #endif
10639 #ifdef TARGET_NR_mmap2
10640     case TARGET_NR_mmap2:
10641 #ifndef MMAP_SHIFT
10642 #define MMAP_SHIFT 12
10643 #endif
10644         return do_mmap(arg1, arg2, arg3, arg4, arg5,
10645                        (off_t)(abi_ulong)arg6 << MMAP_SHIFT);
10646 #endif
10647     case TARGET_NR_munmap:
10648         arg1 = cpu_untagged_addr(cpu, arg1);
10649         return get_errno(target_munmap(arg1, arg2));
10650     case TARGET_NR_mprotect:
10651         arg1 = cpu_untagged_addr(cpu, arg1);
10652         {
10653             TaskState *ts = get_task_state(cpu);
10654             /* Special hack to detect libc making the stack executable.  */
10655             if ((arg3 & PROT_GROWSDOWN)
10656                 && arg1 >= ts->info->stack_limit
10657                 && arg1 <= ts->info->start_stack) {
10658                 arg3 &= ~PROT_GROWSDOWN;
10659                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10660                 arg1 = ts->info->stack_limit;
10661             }
10662         }
10663         return get_errno(target_mprotect(arg1, arg2, arg3));
10664 #ifdef TARGET_NR_mremap
10665     case TARGET_NR_mremap:
10666         arg1 = cpu_untagged_addr(cpu, arg1);
10667         /* mremap new_addr (arg5) is always untagged */
10668         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10669 #endif
10670         /* ??? msync/mlock/munlock are broken for softmmu.  */
10671 #ifdef TARGET_NR_msync
10672     case TARGET_NR_msync:
10673         return get_errno(msync(g2h(cpu, arg1), arg2,
10674                                target_to_host_msync_arg(arg3)));
10675 #endif
10676 #ifdef TARGET_NR_mlock
10677     case TARGET_NR_mlock:
10678         return get_errno(mlock(g2h(cpu, arg1), arg2));
10679 #endif
10680 #ifdef TARGET_NR_munlock
10681     case TARGET_NR_munlock:
10682         return get_errno(munlock(g2h(cpu, arg1), arg2));
10683 #endif
10684 #ifdef TARGET_NR_mlockall
10685     case TARGET_NR_mlockall:
10686         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10687 #endif
10688 #ifdef TARGET_NR_munlockall
10689     case TARGET_NR_munlockall:
10690         return get_errno(munlockall());
10691 #endif
10692 #ifdef TARGET_NR_truncate
10693     case TARGET_NR_truncate:
10694         if (!(p = lock_user_string(arg1)))
10695             return -TARGET_EFAULT;
10696         ret = get_errno(truncate(p, arg2));
10697         unlock_user(p, arg1, 0);
10698         return ret;
10699 #endif
10700 #ifdef TARGET_NR_ftruncate
10701     case TARGET_NR_ftruncate:
10702         return get_errno(ftruncate(arg1, arg2));
10703 #endif
10704     case TARGET_NR_fchmod:
10705         return get_errno(fchmod(arg1, arg2));
10706 #if defined(TARGET_NR_fchmodat)
10707     case TARGET_NR_fchmodat:
10708         if (!(p = lock_user_string(arg2)))
10709             return -TARGET_EFAULT;
10710         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10711         unlock_user(p, arg2, 0);
10712         return ret;
10713 #endif
10714     case TARGET_NR_getpriority:
10715         /* Note that negative values are valid for getpriority, so we must
10716            differentiate based on errno settings.  */
10717         errno = 0;
10718         ret = getpriority(arg1, arg2);
10719         if (ret == -1 && errno != 0) {
10720             return -host_to_target_errno(errno);
10721         }
10722 #ifdef TARGET_ALPHA
10723         /* Return value is the unbiased priority.  Signal no error.  */
10724         cpu_env->ir[IR_V0] = 0;
10725 #else
10726         /* Return value is a biased priority to avoid negative numbers.  */
10727         ret = 20 - ret;
10728 #endif
10729         return ret;
10730     case TARGET_NR_setpriority:
10731         return get_errno(setpriority(arg1, arg2, arg3));
10732 #ifdef TARGET_NR_statfs
10733     case TARGET_NR_statfs:
10734         if (!(p = lock_user_string(arg1))) {
10735             return -TARGET_EFAULT;
10736         }
10737         ret = get_errno(statfs(path(p), &stfs));
10738         unlock_user(p, arg1, 0);
10739     convert_statfs:
10740         if (!is_error(ret)) {
10741             struct target_statfs *target_stfs;
10742 
10743             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10744                 return -TARGET_EFAULT;
10745             __put_user(stfs.f_type, &target_stfs->f_type);
10746             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10747             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10748             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10749             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10750             __put_user(stfs.f_files, &target_stfs->f_files);
10751             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10752             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10753             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10754             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10755             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10756 #ifdef _STATFS_F_FLAGS
10757             __put_user(stfs.f_flags, &target_stfs->f_flags);
10758 #else
10759             __put_user(0, &target_stfs->f_flags);
10760 #endif
10761             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10762             unlock_user_struct(target_stfs, arg2, 1);
10763         }
10764         return ret;
10765 #endif
10766 #ifdef TARGET_NR_fstatfs
10767     case TARGET_NR_fstatfs:
10768         ret = get_errno(fstatfs(arg1, &stfs));
10769         goto convert_statfs;
10770 #endif
10771 #ifdef TARGET_NR_statfs64
10772     case TARGET_NR_statfs64:
10773         if (!(p = lock_user_string(arg1))) {
10774             return -TARGET_EFAULT;
10775         }
10776         ret = get_errno(statfs(path(p), &stfs));
10777         unlock_user(p, arg1, 0);
10778     convert_statfs64:
10779         if (!is_error(ret)) {
10780             struct target_statfs64 *target_stfs;
10781 
10782             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10783                 return -TARGET_EFAULT;
10784             __put_user(stfs.f_type, &target_stfs->f_type);
10785             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10786             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10787             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10788             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10789             __put_user(stfs.f_files, &target_stfs->f_files);
10790             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10791             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10792             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10793             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10794             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10795 #ifdef _STATFS_F_FLAGS
10796             __put_user(stfs.f_flags, &target_stfs->f_flags);
10797 #else
10798             __put_user(0, &target_stfs->f_flags);
10799 #endif
10800             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10801             unlock_user_struct(target_stfs, arg3, 1);
10802         }
10803         return ret;
10804     case TARGET_NR_fstatfs64:
10805         ret = get_errno(fstatfs(arg1, &stfs));
10806         goto convert_statfs64;
10807 #endif
10808 #ifdef TARGET_NR_socketcall
10809     case TARGET_NR_socketcall:
10810         return do_socketcall(arg1, arg2);
10811 #endif
10812 #ifdef TARGET_NR_accept
10813     case TARGET_NR_accept:
10814         return do_accept4(arg1, arg2, arg3, 0);
10815 #endif
10816 #ifdef TARGET_NR_accept4
10817     case TARGET_NR_accept4:
10818         return do_accept4(arg1, arg2, arg3, arg4);
10819 #endif
10820 #ifdef TARGET_NR_bind
10821     case TARGET_NR_bind:
10822         return do_bind(arg1, arg2, arg3);
10823 #endif
10824 #ifdef TARGET_NR_connect
10825     case TARGET_NR_connect:
10826         return do_connect(arg1, arg2, arg3);
10827 #endif
10828 #ifdef TARGET_NR_getpeername
10829     case TARGET_NR_getpeername:
10830         return do_getpeername(arg1, arg2, arg3);
10831 #endif
10832 #ifdef TARGET_NR_getsockname
10833     case TARGET_NR_getsockname:
10834         return do_getsockname(arg1, arg2, arg3);
10835 #endif
10836 #ifdef TARGET_NR_getsockopt
10837     case TARGET_NR_getsockopt:
10838         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10839 #endif
10840 #ifdef TARGET_NR_listen
10841     case TARGET_NR_listen:
10842         return get_errno(listen(arg1, arg2));
10843 #endif
10844 #ifdef TARGET_NR_recv
10845     case TARGET_NR_recv:
10846         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10847 #endif
10848 #ifdef TARGET_NR_recvfrom
10849     case TARGET_NR_recvfrom:
10850         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10851 #endif
10852 #ifdef TARGET_NR_recvmsg
10853     case TARGET_NR_recvmsg:
10854         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10855 #endif
10856 #ifdef TARGET_NR_send
10857     case TARGET_NR_send:
10858         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10859 #endif
10860 #ifdef TARGET_NR_sendmsg
10861     case TARGET_NR_sendmsg:
10862         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10863 #endif
10864 #ifdef TARGET_NR_sendmmsg
10865     case TARGET_NR_sendmmsg:
10866         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10867 #endif
10868 #ifdef TARGET_NR_recvmmsg
10869     case TARGET_NR_recvmmsg:
10870         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10871 #endif
10872 #ifdef TARGET_NR_sendto
10873     case TARGET_NR_sendto:
10874         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10875 #endif
10876 #ifdef TARGET_NR_shutdown
10877     case TARGET_NR_shutdown:
10878         return get_errno(shutdown(arg1, arg2));
10879 #endif
10880 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10881     case TARGET_NR_getrandom:
10882         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10883         if (!p) {
10884             return -TARGET_EFAULT;
10885         }
10886         ret = get_errno(getrandom(p, arg2, arg3));
10887         unlock_user(p, arg1, ret);
10888         return ret;
10889 #endif
10890 #ifdef TARGET_NR_socket
10891     case TARGET_NR_socket:
10892         return do_socket(arg1, arg2, arg3);
10893 #endif
10894 #ifdef TARGET_NR_socketpair
10895     case TARGET_NR_socketpair:
10896         return do_socketpair(arg1, arg2, arg3, arg4);
10897 #endif
10898 #ifdef TARGET_NR_setsockopt
10899     case TARGET_NR_setsockopt:
10900         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10901 #endif
10902 #if defined(TARGET_NR_syslog)
10903     case TARGET_NR_syslog:
10904         {
10905             int len = arg2;
10906 
10907             switch (arg1) {
10908             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10909             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10910             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10911             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10912             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10913             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10914             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10915             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10916                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10917             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10918             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10919             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10920                 {
10921                     if (len < 0) {
10922                         return -TARGET_EINVAL;
10923                     }
10924                     if (len == 0) {
10925                         return 0;
10926                     }
10927                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10928                     if (!p) {
10929                         return -TARGET_EFAULT;
10930                     }
10931                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10932                     unlock_user(p, arg2, arg3);
10933                 }
10934                 return ret;
10935             default:
10936                 return -TARGET_EINVAL;
10937             }
10938         }
10939         break;
10940 #endif
10941     case TARGET_NR_setitimer:
10942         {
10943             struct itimerval value, ovalue, *pvalue;
10944 
10945             if (arg2) {
10946                 pvalue = &value;
10947                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10948                     || copy_from_user_timeval(&pvalue->it_value,
10949                                               arg2 + sizeof(struct target_timeval)))
10950                     return -TARGET_EFAULT;
10951             } else {
10952                 pvalue = NULL;
10953             }
10954             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10955             if (!is_error(ret) && arg3) {
10956                 if (copy_to_user_timeval(arg3,
10957                                          &ovalue.it_interval)
10958                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10959                                             &ovalue.it_value))
10960                     return -TARGET_EFAULT;
10961             }
10962         }
10963         return ret;
10964     case TARGET_NR_getitimer:
10965         {
10966             struct itimerval value;
10967 
10968             ret = get_errno(getitimer(arg1, &value));
10969             if (!is_error(ret) && arg2) {
10970                 if (copy_to_user_timeval(arg2,
10971                                          &value.it_interval)
10972                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10973                                             &value.it_value))
10974                     return -TARGET_EFAULT;
10975             }
10976         }
10977         return ret;
10978 #ifdef TARGET_NR_stat
10979     case TARGET_NR_stat:
10980         if (!(p = lock_user_string(arg1))) {
10981             return -TARGET_EFAULT;
10982         }
10983         ret = get_errno(stat(path(p), &st));
10984         unlock_user(p, arg1, 0);
10985         goto do_stat;
10986 #endif
10987 #ifdef TARGET_NR_lstat
10988     case TARGET_NR_lstat:
10989         if (!(p = lock_user_string(arg1))) {
10990             return -TARGET_EFAULT;
10991         }
10992         ret = get_errno(lstat(path(p), &st));
10993         unlock_user(p, arg1, 0);
10994         goto do_stat;
10995 #endif
10996 #ifdef TARGET_NR_fstat
10997     case TARGET_NR_fstat:
10998         {
10999             ret = get_errno(fstat(arg1, &st));
11000 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
11001         do_stat:
11002 #endif
11003             if (!is_error(ret)) {
11004                 struct target_stat *target_st;
11005 
11006                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
11007                     return -TARGET_EFAULT;
11008                 memset(target_st, 0, sizeof(*target_st));
11009                 __put_user(st.st_dev, &target_st->st_dev);
11010                 __put_user(st.st_ino, &target_st->st_ino);
11011                 __put_user(st.st_mode, &target_st->st_mode);
11012                 __put_user(st.st_uid, &target_st->st_uid);
11013                 __put_user(st.st_gid, &target_st->st_gid);
11014                 __put_user(st.st_nlink, &target_st->st_nlink);
11015                 __put_user(st.st_rdev, &target_st->st_rdev);
11016                 __put_user(st.st_size, &target_st->st_size);
11017                 __put_user(st.st_blksize, &target_st->st_blksize);
11018                 __put_user(st.st_blocks, &target_st->st_blocks);
11019                 __put_user(st.st_atime, &target_st->target_st_atime);
11020                 __put_user(st.st_mtime, &target_st->target_st_mtime);
11021                 __put_user(st.st_ctime, &target_st->target_st_ctime);
11022 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
11023                 __put_user(st.st_atim.tv_nsec,
11024                            &target_st->target_st_atime_nsec);
11025                 __put_user(st.st_mtim.tv_nsec,
11026                            &target_st->target_st_mtime_nsec);
11027                 __put_user(st.st_ctim.tv_nsec,
11028                            &target_st->target_st_ctime_nsec);
11029 #endif
11030                 unlock_user_struct(target_st, arg2, 1);
11031             }
11032         }
11033         return ret;
11034 #endif
11035     case TARGET_NR_vhangup:
11036         return get_errno(vhangup());
11037 #ifdef TARGET_NR_syscall
11038     case TARGET_NR_syscall:
11039         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
11040                           arg6, arg7, arg8, 0);
11041 #endif
11042 #if defined(TARGET_NR_wait4)
11043     case TARGET_NR_wait4:
11044         {
11045             int status;
11046             abi_long status_ptr = arg2;
11047             struct rusage rusage, *rusage_ptr;
11048             abi_ulong target_rusage = arg4;
11049             abi_long rusage_err;
11050             if (target_rusage)
11051                 rusage_ptr = &rusage;
11052             else
11053                 rusage_ptr = NULL;
11054             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
11055             if (!is_error(ret)) {
11056                 if (status_ptr && ret) {
11057                     status = host_to_target_waitstatus(status);
11058                     if (put_user_s32(status, status_ptr))
11059                         return -TARGET_EFAULT;
11060                 }
11061                 if (target_rusage) {
11062                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
11063                     if (rusage_err) {
11064                         ret = rusage_err;
11065                     }
11066                 }
11067             }
11068         }
11069         return ret;
11070 #endif
11071 #ifdef TARGET_NR_swapoff
11072     case TARGET_NR_swapoff:
11073         if (!(p = lock_user_string(arg1)))
11074             return -TARGET_EFAULT;
11075         ret = get_errno(swapoff(p));
11076         unlock_user(p, arg1, 0);
11077         return ret;
11078 #endif
11079     case TARGET_NR_sysinfo:
11080         {
11081             struct target_sysinfo *target_value;
11082             struct sysinfo value;
11083             ret = get_errno(sysinfo(&value));
11084             if (!is_error(ret) && arg1)
11085             {
11086                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
11087                     return -TARGET_EFAULT;
11088                 __put_user(value.uptime, &target_value->uptime);
11089                 __put_user(value.loads[0], &target_value->loads[0]);
11090                 __put_user(value.loads[1], &target_value->loads[1]);
11091                 __put_user(value.loads[2], &target_value->loads[2]);
11092                 __put_user(value.totalram, &target_value->totalram);
11093                 __put_user(value.freeram, &target_value->freeram);
11094                 __put_user(value.sharedram, &target_value->sharedram);
11095                 __put_user(value.bufferram, &target_value->bufferram);
11096                 __put_user(value.totalswap, &target_value->totalswap);
11097                 __put_user(value.freeswap, &target_value->freeswap);
11098                 __put_user(value.procs, &target_value->procs);
11099                 __put_user(value.totalhigh, &target_value->totalhigh);
11100                 __put_user(value.freehigh, &target_value->freehigh);
11101                 __put_user(value.mem_unit, &target_value->mem_unit);
11102                 unlock_user_struct(target_value, arg1, 1);
11103             }
11104         }
11105         return ret;
11106 #ifdef TARGET_NR_ipc
11107     case TARGET_NR_ipc:
11108         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
11109 #endif
11110 #ifdef TARGET_NR_semget
11111     case TARGET_NR_semget:
11112         return get_errno(semget(arg1, arg2, arg3));
11113 #endif
11114 #ifdef TARGET_NR_semop
11115     case TARGET_NR_semop:
11116         return do_semtimedop(arg1, arg2, arg3, 0, false);
11117 #endif
11118 #ifdef TARGET_NR_semtimedop
11119     case TARGET_NR_semtimedop:
11120         return do_semtimedop(arg1, arg2, arg3, arg4, false);
11121 #endif
11122 #ifdef TARGET_NR_semtimedop_time64
11123     case TARGET_NR_semtimedop_time64:
11124         return do_semtimedop(arg1, arg2, arg3, arg4, true);
11125 #endif
11126 #ifdef TARGET_NR_semctl
11127     case TARGET_NR_semctl:
11128         return do_semctl(arg1, arg2, arg3, arg4);
11129 #endif
11130 #ifdef TARGET_NR_msgctl
11131     case TARGET_NR_msgctl:
11132         return do_msgctl(arg1, arg2, arg3);
11133 #endif
11134 #ifdef TARGET_NR_msgget
11135     case TARGET_NR_msgget:
11136         return get_errno(msgget(arg1, arg2));
11137 #endif
11138 #ifdef TARGET_NR_msgrcv
11139     case TARGET_NR_msgrcv:
11140         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
11141 #endif
11142 #ifdef TARGET_NR_msgsnd
11143     case TARGET_NR_msgsnd:
11144         return do_msgsnd(arg1, arg2, arg3, arg4);
11145 #endif
11146 #ifdef TARGET_NR_shmget
11147     case TARGET_NR_shmget:
11148         return get_errno(shmget(arg1, arg2, arg3));
11149 #endif
11150 #ifdef TARGET_NR_shmctl
11151     case TARGET_NR_shmctl:
11152         return do_shmctl(arg1, arg2, arg3);
11153 #endif
11154 #ifdef TARGET_NR_shmat
11155     case TARGET_NR_shmat:
11156         return target_shmat(cpu_env, arg1, arg2, arg3);
11157 #endif
11158 #ifdef TARGET_NR_shmdt
11159     case TARGET_NR_shmdt:
11160         return target_shmdt(arg1);
11161 #endif
11162     case TARGET_NR_fsync:
11163         return get_errno(fsync(arg1));
11164     case TARGET_NR_clone:
11165         /* Linux manages to have three different orderings for its
11166          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
11167          * match the kernel's CONFIG_CLONE_* settings.
11168          * Microblaze is further special in that it uses a sixth
11169          * implicit argument to clone for the TLS pointer.
11170          */
11171 #if defined(TARGET_MICROBLAZE)
11172         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
11173 #elif defined(TARGET_CLONE_BACKWARDS)
11174         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
11175 #elif defined(TARGET_CLONE_BACKWARDS2)
11176         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
11177 #else
11178         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
11179 #endif
11180         return ret;
11181 #ifdef __NR_exit_group
11182         /* new thread calls */
11183     case TARGET_NR_exit_group:
11184         preexit_cleanup(cpu_env, arg1);
11185         return get_errno(exit_group(arg1));
11186 #endif
11187     case TARGET_NR_setdomainname:
11188         if (!(p = lock_user_string(arg1)))
11189             return -TARGET_EFAULT;
11190         ret = get_errno(setdomainname(p, arg2));
11191         unlock_user(p, arg1, 0);
11192         return ret;
11193     case TARGET_NR_uname:
11194         /* no need to transcode because we use the linux syscall */
11195         {
11196             struct new_utsname * buf;
11197 
11198             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
11199                 return -TARGET_EFAULT;
11200             ret = get_errno(sys_uname(buf));
11201             if (!is_error(ret)) {
11202                 /* Overwrite the native machine name with whatever is being
11203                    emulated. */
11204                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
11205                           sizeof(buf->machine));
11206                 /* Allow the user to override the reported release.  */
11207                 if (qemu_uname_release && *qemu_uname_release) {
11208                     g_strlcpy(buf->release, qemu_uname_release,
11209                               sizeof(buf->release));
11210                 }
11211             }
11212             unlock_user_struct(buf, arg1, 1);
11213         }
11214         return ret;
11215 #ifdef TARGET_I386
11216     case TARGET_NR_modify_ldt:
11217         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
11218 #if !defined(TARGET_X86_64)
11219     case TARGET_NR_vm86:
11220         return do_vm86(cpu_env, arg1, arg2);
11221 #endif
11222 #endif
11223 #if defined(TARGET_NR_adjtimex)
11224     case TARGET_NR_adjtimex:
11225         {
11226             struct timex host_buf;
11227 
11228             if (target_to_host_timex(&host_buf, arg1) != 0) {
11229                 return -TARGET_EFAULT;
11230             }
11231             ret = get_errno(adjtimex(&host_buf));
11232             if (!is_error(ret)) {
11233                 if (host_to_target_timex(arg1, &host_buf) != 0) {
11234                     return -TARGET_EFAULT;
11235                 }
11236             }
11237         }
11238         return ret;
11239 #endif
11240 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
11241     case TARGET_NR_clock_adjtime:
11242         {
11243             struct timex htx;
11244 
11245             if (target_to_host_timex(&htx, arg2) != 0) {
11246                 return -TARGET_EFAULT;
11247             }
11248             ret = get_errno(clock_adjtime(arg1, &htx));
11249             if (!is_error(ret) && host_to_target_timex(arg2, &htx)) {
11250                 return -TARGET_EFAULT;
11251             }
11252         }
11253         return ret;
11254 #endif
11255 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
11256     case TARGET_NR_clock_adjtime64:
11257         {
11258             struct timex htx;
11259 
11260             if (target_to_host_timex64(&htx, arg2) != 0) {
11261                 return -TARGET_EFAULT;
11262             }
11263             ret = get_errno(clock_adjtime(arg1, &htx));
11264             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
11265                     return -TARGET_EFAULT;
11266             }
11267         }
11268         return ret;
11269 #endif
11270     case TARGET_NR_getpgid:
11271         return get_errno(getpgid(arg1));
11272     case TARGET_NR_fchdir:
11273         return get_errno(fchdir(arg1));
11274     case TARGET_NR_personality:
11275         return get_errno(personality(arg1));
11276 #ifdef TARGET_NR__llseek /* Not on alpha */
11277     case TARGET_NR__llseek:
11278         {
11279             int64_t res;
11280 #if !defined(__NR_llseek)
11281             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
11282             if (res == -1) {
11283                 ret = get_errno(res);
11284             } else {
11285                 ret = 0;
11286             }
11287 #else
11288             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
11289 #endif
11290             if ((ret == 0) && put_user_s64(res, arg4)) {
11291                 return -TARGET_EFAULT;
11292             }
11293         }
11294         return ret;
11295 #endif
11296 #ifdef TARGET_NR_getdents
11297     case TARGET_NR_getdents:
11298         return do_getdents(arg1, arg2, arg3);
11299 #endif /* TARGET_NR_getdents */
11300 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
11301     case TARGET_NR_getdents64:
11302         return do_getdents64(arg1, arg2, arg3);
11303 #endif /* TARGET_NR_getdents64 */
11304 #if defined(TARGET_NR__newselect)
11305     case TARGET_NR__newselect:
11306         return do_select(arg1, arg2, arg3, arg4, arg5);
11307 #endif
11308 #ifdef TARGET_NR_poll
11309     case TARGET_NR_poll:
11310         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
11311 #endif
11312 #ifdef TARGET_NR_ppoll
11313     case TARGET_NR_ppoll:
11314         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
11315 #endif
11316 #ifdef TARGET_NR_ppoll_time64
11317     case TARGET_NR_ppoll_time64:
11318         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
11319 #endif
11320     case TARGET_NR_flock:
11321         /* NOTE: the flock constant seems to be the same for every
11322            Linux platform */
11323         return get_errno(safe_flock(arg1, arg2));
11324     case TARGET_NR_readv:
11325         {
11326             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11327             if (vec != NULL) {
11328                 ret = get_errno(safe_readv(arg1, vec, arg3));
11329                 unlock_iovec(vec, arg2, arg3, 1);
11330             } else {
11331                 ret = -host_to_target_errno(errno);
11332             }
11333         }
11334         return ret;
11335     case TARGET_NR_writev:
11336         {
11337             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11338             if (vec != NULL) {
11339                 ret = get_errno(safe_writev(arg1, vec, arg3));
11340                 unlock_iovec(vec, arg2, arg3, 0);
11341             } else {
11342                 ret = -host_to_target_errno(errno);
11343             }
11344         }
11345         return ret;
11346 #if defined(TARGET_NR_preadv)
11347     case TARGET_NR_preadv:
11348         {
11349             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11350             if (vec != NULL) {
11351                 unsigned long low, high;
11352 
11353                 target_to_host_low_high(arg4, arg5, &low, &high);
11354                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
11355                 unlock_iovec(vec, arg2, arg3, 1);
11356             } else {
11357                 ret = -host_to_target_errno(errno);
11358            }
11359         }
11360         return ret;
11361 #endif
11362 #if defined(TARGET_NR_pwritev)
11363     case TARGET_NR_pwritev:
11364         {
11365             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11366             if (vec != NULL) {
11367                 unsigned long low, high;
11368 
11369                 target_to_host_low_high(arg4, arg5, &low, &high);
11370                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
11371                 unlock_iovec(vec, arg2, arg3, 0);
11372             } else {
11373                 ret = -host_to_target_errno(errno);
11374            }
11375         }
11376         return ret;
11377 #endif
11378     case TARGET_NR_getsid:
11379         return get_errno(getsid(arg1));
11380 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
11381     case TARGET_NR_fdatasync:
11382         return get_errno(fdatasync(arg1));
11383 #endif
11384     case TARGET_NR_sched_getaffinity:
11385         {
11386             unsigned int mask_size;
11387             unsigned long *mask;
11388 
11389             /*
11390              * sched_getaffinity needs multiples of ulong, so need to take
11391              * care of mismatches between target ulong and host ulong sizes.
11392              */
11393             if (arg2 & (sizeof(abi_ulong) - 1)) {
11394                 return -TARGET_EINVAL;
11395             }
11396             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11397 
11398             mask = alloca(mask_size);
11399             memset(mask, 0, mask_size);
11400             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
11401 
11402             if (!is_error(ret)) {
11403                 if (ret > arg2) {
11404                     /* More data returned than the caller's buffer will fit.
11405                      * This only happens if sizeof(abi_long) < sizeof(long)
11406                      * and the caller passed us a buffer holding an odd number
11407                      * of abi_longs. If the host kernel is actually using the
11408                      * extra 4 bytes then fail EINVAL; otherwise we can just
11409                      * ignore them and only copy the interesting part.
11410                      */
11411                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
11412                     if (numcpus > arg2 * 8) {
11413                         return -TARGET_EINVAL;
11414                     }
11415                     ret = arg2;
11416                 }
11417 
11418                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
11419                     return -TARGET_EFAULT;
11420                 }
11421             }
11422         }
11423         return ret;
11424     case TARGET_NR_sched_setaffinity:
11425         {
11426             unsigned int mask_size;
11427             unsigned long *mask;
11428 
11429             /*
11430              * sched_setaffinity needs multiples of ulong, so need to take
11431              * care of mismatches between target ulong and host ulong sizes.
11432              */
11433             if (arg2 & (sizeof(abi_ulong) - 1)) {
11434                 return -TARGET_EINVAL;
11435             }
11436             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11437             mask = alloca(mask_size);
11438 
11439             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
11440             if (ret) {
11441                 return ret;
11442             }
11443 
11444             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
11445         }
11446     case TARGET_NR_getcpu:
11447         {
11448             unsigned cpuid, node;
11449             ret = get_errno(sys_getcpu(arg1 ? &cpuid : NULL,
11450                                        arg2 ? &node : NULL,
11451                                        NULL));
11452             if (is_error(ret)) {
11453                 return ret;
11454             }
11455             if (arg1 && put_user_u32(cpuid, arg1)) {
11456                 return -TARGET_EFAULT;
11457             }
11458             if (arg2 && put_user_u32(node, arg2)) {
11459                 return -TARGET_EFAULT;
11460             }
11461         }
11462         return ret;
11463     case TARGET_NR_sched_setparam:
11464         {
11465             struct target_sched_param *target_schp;
11466             struct sched_param schp;
11467 
11468             if (arg2 == 0) {
11469                 return -TARGET_EINVAL;
11470             }
11471             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
11472                 return -TARGET_EFAULT;
11473             }
11474             schp.sched_priority = tswap32(target_schp->sched_priority);
11475             unlock_user_struct(target_schp, arg2, 0);
11476             return get_errno(sys_sched_setparam(arg1, &schp));
11477         }
11478     case TARGET_NR_sched_getparam:
11479         {
11480             struct target_sched_param *target_schp;
11481             struct sched_param schp;
11482 
11483             if (arg2 == 0) {
11484                 return -TARGET_EINVAL;
11485             }
11486             ret = get_errno(sys_sched_getparam(arg1, &schp));
11487             if (!is_error(ret)) {
11488                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
11489                     return -TARGET_EFAULT;
11490                 }
11491                 target_schp->sched_priority = tswap32(schp.sched_priority);
11492                 unlock_user_struct(target_schp, arg2, 1);
11493             }
11494         }
11495         return ret;
11496     case TARGET_NR_sched_setscheduler:
11497         {
11498             struct target_sched_param *target_schp;
11499             struct sched_param schp;
11500             if (arg3 == 0) {
11501                 return -TARGET_EINVAL;
11502             }
11503             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
11504                 return -TARGET_EFAULT;
11505             }
11506             schp.sched_priority = tswap32(target_schp->sched_priority);
11507             unlock_user_struct(target_schp, arg3, 0);
11508             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
11509         }
11510     case TARGET_NR_sched_getscheduler:
11511         return get_errno(sys_sched_getscheduler(arg1));
11512     case TARGET_NR_sched_getattr:
11513         {
11514             struct target_sched_attr *target_scha;
11515             struct sched_attr scha;
11516             if (arg2 == 0) {
11517                 return -TARGET_EINVAL;
11518             }
11519             if (arg3 > sizeof(scha)) {
11520                 arg3 = sizeof(scha);
11521             }
11522             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
11523             if (!is_error(ret)) {
11524                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11525                 if (!target_scha) {
11526                     return -TARGET_EFAULT;
11527                 }
11528                 target_scha->size = tswap32(scha.size);
11529                 target_scha->sched_policy = tswap32(scha.sched_policy);
11530                 target_scha->sched_flags = tswap64(scha.sched_flags);
11531                 target_scha->sched_nice = tswap32(scha.sched_nice);
11532                 target_scha->sched_priority = tswap32(scha.sched_priority);
11533                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
11534                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
11535                 target_scha->sched_period = tswap64(scha.sched_period);
11536                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
11537                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
11538                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
11539                 }
11540                 unlock_user(target_scha, arg2, arg3);
11541             }
11542             return ret;
11543         }
11544     case TARGET_NR_sched_setattr:
11545         {
11546             struct target_sched_attr *target_scha;
11547             struct sched_attr scha;
11548             uint32_t size;
11549             int zeroed;
11550             if (arg2 == 0) {
11551                 return -TARGET_EINVAL;
11552             }
11553             if (get_user_u32(size, arg2)) {
11554                 return -TARGET_EFAULT;
11555             }
11556             if (!size) {
11557                 size = offsetof(struct target_sched_attr, sched_util_min);
11558             }
11559             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11560                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11561                     return -TARGET_EFAULT;
11562                 }
11563                 return -TARGET_E2BIG;
11564             }
11565 
11566             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11567             if (zeroed < 0) {
11568                 return zeroed;
11569             } else if (zeroed == 0) {
11570                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11571                     return -TARGET_EFAULT;
11572                 }
11573                 return -TARGET_E2BIG;
11574             }
11575             if (size > sizeof(struct target_sched_attr)) {
11576                 size = sizeof(struct target_sched_attr);
11577             }
11578 
11579             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11580             if (!target_scha) {
11581                 return -TARGET_EFAULT;
11582             }
11583             scha.size = size;
11584             scha.sched_policy = tswap32(target_scha->sched_policy);
11585             scha.sched_flags = tswap64(target_scha->sched_flags);
11586             scha.sched_nice = tswap32(target_scha->sched_nice);
11587             scha.sched_priority = tswap32(target_scha->sched_priority);
11588             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11589             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11590             scha.sched_period = tswap64(target_scha->sched_period);
11591             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11592                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11593                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11594             }
11595             unlock_user(target_scha, arg2, 0);
11596             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11597         }
11598     case TARGET_NR_sched_yield:
11599         return get_errno(sched_yield());
11600     case TARGET_NR_sched_get_priority_max:
11601         return get_errno(sched_get_priority_max(arg1));
11602     case TARGET_NR_sched_get_priority_min:
11603         return get_errno(sched_get_priority_min(arg1));
11604 #ifdef TARGET_NR_sched_rr_get_interval
11605     case TARGET_NR_sched_rr_get_interval:
11606         {
11607             struct timespec ts;
11608             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11609             if (!is_error(ret)) {
11610                 ret = host_to_target_timespec(arg2, &ts);
11611             }
11612         }
11613         return ret;
11614 #endif
11615 #ifdef TARGET_NR_sched_rr_get_interval_time64
11616     case TARGET_NR_sched_rr_get_interval_time64:
11617         {
11618             struct timespec ts;
11619             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11620             if (!is_error(ret)) {
11621                 ret = host_to_target_timespec64(arg2, &ts);
11622             }
11623         }
11624         return ret;
11625 #endif
11626 #if defined(TARGET_NR_nanosleep)
11627     case TARGET_NR_nanosleep:
11628         {
11629             struct timespec req, rem;
11630             target_to_host_timespec(&req, arg1);
11631             ret = get_errno(safe_nanosleep(&req, &rem));
11632             if (is_error(ret) && arg2) {
11633                 host_to_target_timespec(arg2, &rem);
11634             }
11635         }
11636         return ret;
11637 #endif
11638     case TARGET_NR_prctl:
11639         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11640         break;
11641 #ifdef TARGET_NR_arch_prctl
11642     case TARGET_NR_arch_prctl:
11643         return do_arch_prctl(cpu_env, arg1, arg2);
11644 #endif
11645 #ifdef TARGET_NR_pread64
11646     case TARGET_NR_pread64:
11647         if (regpairs_aligned(cpu_env, num)) {
11648             arg4 = arg5;
11649             arg5 = arg6;
11650         }
11651         if (arg2 == 0 && arg3 == 0) {
11652             /* Special-case NULL buffer and zero length, which should succeed */
11653             p = 0;
11654         } else {
11655             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11656             if (!p) {
11657                 return -TARGET_EFAULT;
11658             }
11659         }
11660         ret = get_errno(pread(arg1, p, arg3, target_offset64(arg4, arg5)));
11661         unlock_user(p, arg2, ret);
11662         return ret;
11663     case TARGET_NR_pwrite64:
11664         if (regpairs_aligned(cpu_env, num)) {
11665             arg4 = arg5;
11666             arg5 = arg6;
11667         }
11668         if (arg2 == 0 && arg3 == 0) {
11669             /* Special-case NULL buffer and zero length, which should succeed */
11670             p = 0;
11671         } else {
11672             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11673             if (!p) {
11674                 return -TARGET_EFAULT;
11675             }
11676         }
11677         ret = get_errno(pwrite(arg1, p, arg3, target_offset64(arg4, arg5)));
11678         unlock_user(p, arg2, 0);
11679         return ret;
11680 #endif
11681     case TARGET_NR_getcwd:
11682         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11683             return -TARGET_EFAULT;
11684         ret = get_errno(sys_getcwd1(p, arg2));
11685         unlock_user(p, arg1, ret);
11686         return ret;
11687     case TARGET_NR_capget:
11688     case TARGET_NR_capset:
11689     {
11690         struct target_user_cap_header *target_header;
11691         struct target_user_cap_data *target_data = NULL;
11692         struct __user_cap_header_struct header;
11693         struct __user_cap_data_struct data[2];
11694         struct __user_cap_data_struct *dataptr = NULL;
11695         int i, target_datalen;
11696         int data_items = 1;
11697 
11698         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11699             return -TARGET_EFAULT;
11700         }
11701         header.version = tswap32(target_header->version);
11702         header.pid = tswap32(target_header->pid);
11703 
11704         if (header.version != _LINUX_CAPABILITY_VERSION) {
11705             /* Version 2 and up takes pointer to two user_data structs */
11706             data_items = 2;
11707         }
11708 
11709         target_datalen = sizeof(*target_data) * data_items;
11710 
11711         if (arg2) {
11712             if (num == TARGET_NR_capget) {
11713                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11714             } else {
11715                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11716             }
11717             if (!target_data) {
11718                 unlock_user_struct(target_header, arg1, 0);
11719                 return -TARGET_EFAULT;
11720             }
11721 
11722             if (num == TARGET_NR_capset) {
11723                 for (i = 0; i < data_items; i++) {
11724                     data[i].effective = tswap32(target_data[i].effective);
11725                     data[i].permitted = tswap32(target_data[i].permitted);
11726                     data[i].inheritable = tswap32(target_data[i].inheritable);
11727                 }
11728             }
11729 
11730             dataptr = data;
11731         }
11732 
11733         if (num == TARGET_NR_capget) {
11734             ret = get_errno(capget(&header, dataptr));
11735         } else {
11736             ret = get_errno(capset(&header, dataptr));
11737         }
11738 
11739         /* The kernel always updates version for both capget and capset */
11740         target_header->version = tswap32(header.version);
11741         unlock_user_struct(target_header, arg1, 1);
11742 
11743         if (arg2) {
11744             if (num == TARGET_NR_capget) {
11745                 for (i = 0; i < data_items; i++) {
11746                     target_data[i].effective = tswap32(data[i].effective);
11747                     target_data[i].permitted = tswap32(data[i].permitted);
11748                     target_data[i].inheritable = tswap32(data[i].inheritable);
11749                 }
11750                 unlock_user(target_data, arg2, target_datalen);
11751             } else {
11752                 unlock_user(target_data, arg2, 0);
11753             }
11754         }
11755         return ret;
11756     }
11757     case TARGET_NR_sigaltstack:
11758         return do_sigaltstack(arg1, arg2, cpu_env);
11759 
11760 #ifdef CONFIG_SENDFILE
11761 #ifdef TARGET_NR_sendfile
11762     case TARGET_NR_sendfile:
11763     {
11764         off_t *offp = NULL;
11765         off_t off;
11766         if (arg3) {
11767             ret = get_user_sal(off, arg3);
11768             if (is_error(ret)) {
11769                 return ret;
11770             }
11771             offp = &off;
11772         }
11773         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11774         if (!is_error(ret) && arg3) {
11775             abi_long ret2 = put_user_sal(off, arg3);
11776             if (is_error(ret2)) {
11777                 ret = ret2;
11778             }
11779         }
11780         return ret;
11781     }
11782 #endif
11783 #ifdef TARGET_NR_sendfile64
11784     case TARGET_NR_sendfile64:
11785     {
11786         off_t *offp = NULL;
11787         off_t off;
11788         if (arg3) {
11789             ret = get_user_s64(off, arg3);
11790             if (is_error(ret)) {
11791                 return ret;
11792             }
11793             offp = &off;
11794         }
11795         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11796         if (!is_error(ret) && arg3) {
11797             abi_long ret2 = put_user_s64(off, arg3);
11798             if (is_error(ret2)) {
11799                 ret = ret2;
11800             }
11801         }
11802         return ret;
11803     }
11804 #endif
11805 #endif
11806 #ifdef TARGET_NR_vfork
11807     case TARGET_NR_vfork:
11808         return get_errno(do_fork(cpu_env,
11809                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11810                          0, 0, 0, 0));
11811 #endif
11812 #ifdef TARGET_NR_ugetrlimit
11813     case TARGET_NR_ugetrlimit:
11814     {
11815 	struct rlimit rlim;
11816 	int resource = target_to_host_resource(arg1);
11817 	ret = get_errno(getrlimit(resource, &rlim));
11818 	if (!is_error(ret)) {
11819 	    struct target_rlimit *target_rlim;
11820             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11821                 return -TARGET_EFAULT;
11822 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11823 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11824             unlock_user_struct(target_rlim, arg2, 1);
11825 	}
11826         return ret;
11827     }
11828 #endif
11829 #ifdef TARGET_NR_truncate64
11830     case TARGET_NR_truncate64:
11831         if (!(p = lock_user_string(arg1)))
11832             return -TARGET_EFAULT;
11833 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11834         unlock_user(p, arg1, 0);
11835         return ret;
11836 #endif
11837 #ifdef TARGET_NR_ftruncate64
11838     case TARGET_NR_ftruncate64:
11839         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11840 #endif
11841 #ifdef TARGET_NR_stat64
11842     case TARGET_NR_stat64:
11843         if (!(p = lock_user_string(arg1))) {
11844             return -TARGET_EFAULT;
11845         }
11846         ret = get_errno(stat(path(p), &st));
11847         unlock_user(p, arg1, 0);
11848         if (!is_error(ret))
11849             ret = host_to_target_stat64(cpu_env, arg2, &st);
11850         return ret;
11851 #endif
11852 #ifdef TARGET_NR_lstat64
11853     case TARGET_NR_lstat64:
11854         if (!(p = lock_user_string(arg1))) {
11855             return -TARGET_EFAULT;
11856         }
11857         ret = get_errno(lstat(path(p), &st));
11858         unlock_user(p, arg1, 0);
11859         if (!is_error(ret))
11860             ret = host_to_target_stat64(cpu_env, arg2, &st);
11861         return ret;
11862 #endif
11863 #ifdef TARGET_NR_fstat64
11864     case TARGET_NR_fstat64:
11865         ret = get_errno(fstat(arg1, &st));
11866         if (!is_error(ret))
11867             ret = host_to_target_stat64(cpu_env, arg2, &st);
11868         return ret;
11869 #endif
11870 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11871 #ifdef TARGET_NR_fstatat64
11872     case TARGET_NR_fstatat64:
11873 #endif
11874 #ifdef TARGET_NR_newfstatat
11875     case TARGET_NR_newfstatat:
11876 #endif
11877         if (!(p = lock_user_string(arg2))) {
11878             return -TARGET_EFAULT;
11879         }
11880         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11881         unlock_user(p, arg2, 0);
11882         if (!is_error(ret))
11883             ret = host_to_target_stat64(cpu_env, arg3, &st);
11884         return ret;
11885 #endif
11886 #if defined(TARGET_NR_statx)
11887     case TARGET_NR_statx:
11888         {
11889             struct target_statx *target_stx;
11890             int dirfd = arg1;
11891             int flags = arg3;
11892 
11893             p = lock_user_string(arg2);
11894             if (p == NULL) {
11895                 return -TARGET_EFAULT;
11896             }
11897 #if defined(__NR_statx)
11898             {
11899                 /*
11900                  * It is assumed that struct statx is architecture independent.
11901                  */
11902                 struct target_statx host_stx;
11903                 int mask = arg4;
11904 
11905                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11906                 if (!is_error(ret)) {
11907                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11908                         unlock_user(p, arg2, 0);
11909                         return -TARGET_EFAULT;
11910                     }
11911                 }
11912 
11913                 if (ret != -TARGET_ENOSYS) {
11914                     unlock_user(p, arg2, 0);
11915                     return ret;
11916                 }
11917             }
11918 #endif
11919             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11920             unlock_user(p, arg2, 0);
11921 
11922             if (!is_error(ret)) {
11923                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11924                     return -TARGET_EFAULT;
11925                 }
11926                 memset(target_stx, 0, sizeof(*target_stx));
11927                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11928                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11929                 __put_user(st.st_ino, &target_stx->stx_ino);
11930                 __put_user(st.st_mode, &target_stx->stx_mode);
11931                 __put_user(st.st_uid, &target_stx->stx_uid);
11932                 __put_user(st.st_gid, &target_stx->stx_gid);
11933                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11934                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11935                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11936                 __put_user(st.st_size, &target_stx->stx_size);
11937                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11938                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11939                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11940                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11941                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11942                 unlock_user_struct(target_stx, arg5, 1);
11943             }
11944         }
11945         return ret;
11946 #endif
11947 #ifdef TARGET_NR_lchown
11948     case TARGET_NR_lchown:
11949         if (!(p = lock_user_string(arg1)))
11950             return -TARGET_EFAULT;
11951         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11952         unlock_user(p, arg1, 0);
11953         return ret;
11954 #endif
11955 #ifdef TARGET_NR_getuid
11956     case TARGET_NR_getuid:
11957         return get_errno(high2lowuid(getuid()));
11958 #endif
11959 #ifdef TARGET_NR_getgid
11960     case TARGET_NR_getgid:
11961         return get_errno(high2lowgid(getgid()));
11962 #endif
11963 #ifdef TARGET_NR_geteuid
11964     case TARGET_NR_geteuid:
11965         return get_errno(high2lowuid(geteuid()));
11966 #endif
11967 #ifdef TARGET_NR_getegid
11968     case TARGET_NR_getegid:
11969         return get_errno(high2lowgid(getegid()));
11970 #endif
11971     case TARGET_NR_setreuid:
11972         return get_errno(sys_setreuid(low2highuid(arg1), low2highuid(arg2)));
11973     case TARGET_NR_setregid:
11974         return get_errno(sys_setregid(low2highgid(arg1), low2highgid(arg2)));
11975     case TARGET_NR_getgroups:
11976         { /* the same code as for TARGET_NR_getgroups32 */
11977             int gidsetsize = arg1;
11978             target_id *target_grouplist;
11979             g_autofree gid_t *grouplist = NULL;
11980             int i;
11981 
11982             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11983                 return -TARGET_EINVAL;
11984             }
11985             if (gidsetsize > 0) {
11986                 grouplist = g_try_new(gid_t, gidsetsize);
11987                 if (!grouplist) {
11988                     return -TARGET_ENOMEM;
11989                 }
11990             }
11991             ret = get_errno(getgroups(gidsetsize, grouplist));
11992             if (!is_error(ret) && gidsetsize > 0) {
11993                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
11994                                              gidsetsize * sizeof(target_id), 0);
11995                 if (!target_grouplist) {
11996                     return -TARGET_EFAULT;
11997                 }
11998                 for (i = 0; i < ret; i++) {
11999                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
12000                 }
12001                 unlock_user(target_grouplist, arg2,
12002                             gidsetsize * sizeof(target_id));
12003             }
12004             return ret;
12005         }
12006     case TARGET_NR_setgroups:
12007         { /* the same code as for TARGET_NR_setgroups32 */
12008             int gidsetsize = arg1;
12009             target_id *target_grouplist;
12010             g_autofree gid_t *grouplist = NULL;
12011             int i;
12012 
12013             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12014                 return -TARGET_EINVAL;
12015             }
12016             if (gidsetsize > 0) {
12017                 grouplist = g_try_new(gid_t, gidsetsize);
12018                 if (!grouplist) {
12019                     return -TARGET_ENOMEM;
12020                 }
12021                 target_grouplist = lock_user(VERIFY_READ, arg2,
12022                                              gidsetsize * sizeof(target_id), 1);
12023                 if (!target_grouplist) {
12024                     return -TARGET_EFAULT;
12025                 }
12026                 for (i = 0; i < gidsetsize; i++) {
12027                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
12028                 }
12029                 unlock_user(target_grouplist, arg2,
12030                             gidsetsize * sizeof(target_id));
12031             }
12032             return get_errno(sys_setgroups(gidsetsize, grouplist));
12033         }
12034     case TARGET_NR_fchown:
12035         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
12036 #if defined(TARGET_NR_fchownat)
12037     case TARGET_NR_fchownat:
12038         if (!(p = lock_user_string(arg2)))
12039             return -TARGET_EFAULT;
12040         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
12041                                  low2highgid(arg4), arg5));
12042         unlock_user(p, arg2, 0);
12043         return ret;
12044 #endif
12045 #ifdef TARGET_NR_setresuid
12046     case TARGET_NR_setresuid:
12047         return get_errno(sys_setresuid(low2highuid(arg1),
12048                                        low2highuid(arg2),
12049                                        low2highuid(arg3)));
12050 #endif
12051 #ifdef TARGET_NR_getresuid
12052     case TARGET_NR_getresuid:
12053         {
12054             uid_t ruid, euid, suid;
12055             ret = get_errno(getresuid(&ruid, &euid, &suid));
12056             if (!is_error(ret)) {
12057                 if (put_user_id(high2lowuid(ruid), arg1)
12058                     || put_user_id(high2lowuid(euid), arg2)
12059                     || put_user_id(high2lowuid(suid), arg3))
12060                     return -TARGET_EFAULT;
12061             }
12062         }
12063         return ret;
12064 #endif
12065 #ifdef TARGET_NR_getresgid
12066     case TARGET_NR_setresgid:
12067         return get_errno(sys_setresgid(low2highgid(arg1),
12068                                        low2highgid(arg2),
12069                                        low2highgid(arg3)));
12070 #endif
12071 #ifdef TARGET_NR_getresgid
12072     case TARGET_NR_getresgid:
12073         {
12074             gid_t rgid, egid, sgid;
12075             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12076             if (!is_error(ret)) {
12077                 if (put_user_id(high2lowgid(rgid), arg1)
12078                     || put_user_id(high2lowgid(egid), arg2)
12079                     || put_user_id(high2lowgid(sgid), arg3))
12080                     return -TARGET_EFAULT;
12081             }
12082         }
12083         return ret;
12084 #endif
12085 #ifdef TARGET_NR_chown
12086     case TARGET_NR_chown:
12087         if (!(p = lock_user_string(arg1)))
12088             return -TARGET_EFAULT;
12089         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
12090         unlock_user(p, arg1, 0);
12091         return ret;
12092 #endif
12093     case TARGET_NR_setuid:
12094         return get_errno(sys_setuid(low2highuid(arg1)));
12095     case TARGET_NR_setgid:
12096         return get_errno(sys_setgid(low2highgid(arg1)));
12097     case TARGET_NR_setfsuid:
12098         return get_errno(setfsuid(arg1));
12099     case TARGET_NR_setfsgid:
12100         return get_errno(setfsgid(arg1));
12101 
12102 #ifdef TARGET_NR_lchown32
12103     case TARGET_NR_lchown32:
12104         if (!(p = lock_user_string(arg1)))
12105             return -TARGET_EFAULT;
12106         ret = get_errno(lchown(p, arg2, arg3));
12107         unlock_user(p, arg1, 0);
12108         return ret;
12109 #endif
12110 #ifdef TARGET_NR_getuid32
12111     case TARGET_NR_getuid32:
12112         return get_errno(getuid());
12113 #endif
12114 
12115 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
12116    /* Alpha specific */
12117     case TARGET_NR_getxuid:
12118          {
12119             uid_t euid;
12120             euid=geteuid();
12121             cpu_env->ir[IR_A4]=euid;
12122          }
12123         return get_errno(getuid());
12124 #endif
12125 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
12126    /* Alpha specific */
12127     case TARGET_NR_getxgid:
12128          {
12129             uid_t egid;
12130             egid=getegid();
12131             cpu_env->ir[IR_A4]=egid;
12132          }
12133         return get_errno(getgid());
12134 #endif
12135 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
12136     /* Alpha specific */
12137     case TARGET_NR_osf_getsysinfo:
12138         ret = -TARGET_EOPNOTSUPP;
12139         switch (arg1) {
12140           case TARGET_GSI_IEEE_FP_CONTROL:
12141             {
12142                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
12143                 uint64_t swcr = cpu_env->swcr;
12144 
12145                 swcr &= ~SWCR_STATUS_MASK;
12146                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
12147 
12148                 if (put_user_u64 (swcr, arg2))
12149                         return -TARGET_EFAULT;
12150                 ret = 0;
12151             }
12152             break;
12153 
12154           /* case GSI_IEEE_STATE_AT_SIGNAL:
12155              -- Not implemented in linux kernel.
12156              case GSI_UACPROC:
12157              -- Retrieves current unaligned access state; not much used.
12158              case GSI_PROC_TYPE:
12159              -- Retrieves implver information; surely not used.
12160              case GSI_GET_HWRPB:
12161              -- Grabs a copy of the HWRPB; surely not used.
12162           */
12163         }
12164         return ret;
12165 #endif
12166 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
12167     /* Alpha specific */
12168     case TARGET_NR_osf_setsysinfo:
12169         ret = -TARGET_EOPNOTSUPP;
12170         switch (arg1) {
12171           case TARGET_SSI_IEEE_FP_CONTROL:
12172             {
12173                 uint64_t swcr, fpcr;
12174 
12175                 if (get_user_u64 (swcr, arg2)) {
12176                     return -TARGET_EFAULT;
12177                 }
12178 
12179                 /*
12180                  * The kernel calls swcr_update_status to update the
12181                  * status bits from the fpcr at every point that it
12182                  * could be queried.  Therefore, we store the status
12183                  * bits only in FPCR.
12184                  */
12185                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
12186 
12187                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12188                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
12189                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
12190                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12191                 ret = 0;
12192             }
12193             break;
12194 
12195           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
12196             {
12197                 uint64_t exc, fpcr, fex;
12198 
12199                 if (get_user_u64(exc, arg2)) {
12200                     return -TARGET_EFAULT;
12201                 }
12202                 exc &= SWCR_STATUS_MASK;
12203                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12204 
12205                 /* Old exceptions are not signaled.  */
12206                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
12207                 fex = exc & ~fex;
12208                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
12209                 fex &= (cpu_env)->swcr;
12210 
12211                 /* Update the hardware fpcr.  */
12212                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
12213                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12214 
12215                 if (fex) {
12216                     int si_code = TARGET_FPE_FLTUNK;
12217                     target_siginfo_t info;
12218 
12219                     if (fex & SWCR_TRAP_ENABLE_DNO) {
12220                         si_code = TARGET_FPE_FLTUND;
12221                     }
12222                     if (fex & SWCR_TRAP_ENABLE_INE) {
12223                         si_code = TARGET_FPE_FLTRES;
12224                     }
12225                     if (fex & SWCR_TRAP_ENABLE_UNF) {
12226                         si_code = TARGET_FPE_FLTUND;
12227                     }
12228                     if (fex & SWCR_TRAP_ENABLE_OVF) {
12229                         si_code = TARGET_FPE_FLTOVF;
12230                     }
12231                     if (fex & SWCR_TRAP_ENABLE_DZE) {
12232                         si_code = TARGET_FPE_FLTDIV;
12233                     }
12234                     if (fex & SWCR_TRAP_ENABLE_INV) {
12235                         si_code = TARGET_FPE_FLTINV;
12236                     }
12237 
12238                     info.si_signo = SIGFPE;
12239                     info.si_errno = 0;
12240                     info.si_code = si_code;
12241                     info._sifields._sigfault._addr = (cpu_env)->pc;
12242                     queue_signal(cpu_env, info.si_signo,
12243                                  QEMU_SI_FAULT, &info);
12244                 }
12245                 ret = 0;
12246             }
12247             break;
12248 
12249           /* case SSI_NVPAIRS:
12250              -- Used with SSIN_UACPROC to enable unaligned accesses.
12251              case SSI_IEEE_STATE_AT_SIGNAL:
12252              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
12253              -- Not implemented in linux kernel
12254           */
12255         }
12256         return ret;
12257 #endif
12258 #ifdef TARGET_NR_osf_sigprocmask
12259     /* Alpha specific.  */
12260     case TARGET_NR_osf_sigprocmask:
12261         {
12262             abi_ulong mask;
12263             int how;
12264             sigset_t set, oldset;
12265 
12266             switch(arg1) {
12267             case TARGET_SIG_BLOCK:
12268                 how = SIG_BLOCK;
12269                 break;
12270             case TARGET_SIG_UNBLOCK:
12271                 how = SIG_UNBLOCK;
12272                 break;
12273             case TARGET_SIG_SETMASK:
12274                 how = SIG_SETMASK;
12275                 break;
12276             default:
12277                 return -TARGET_EINVAL;
12278             }
12279             mask = arg2;
12280             target_to_host_old_sigset(&set, &mask);
12281             ret = do_sigprocmask(how, &set, &oldset);
12282             if (!ret) {
12283                 host_to_target_old_sigset(&mask, &oldset);
12284                 ret = mask;
12285             }
12286         }
12287         return ret;
12288 #endif
12289 
12290 #ifdef TARGET_NR_getgid32
12291     case TARGET_NR_getgid32:
12292         return get_errno(getgid());
12293 #endif
12294 #ifdef TARGET_NR_geteuid32
12295     case TARGET_NR_geteuid32:
12296         return get_errno(geteuid());
12297 #endif
12298 #ifdef TARGET_NR_getegid32
12299     case TARGET_NR_getegid32:
12300         return get_errno(getegid());
12301 #endif
12302 #ifdef TARGET_NR_setreuid32
12303     case TARGET_NR_setreuid32:
12304         return get_errno(sys_setreuid(arg1, arg2));
12305 #endif
12306 #ifdef TARGET_NR_setregid32
12307     case TARGET_NR_setregid32:
12308         return get_errno(sys_setregid(arg1, arg2));
12309 #endif
12310 #ifdef TARGET_NR_getgroups32
12311     case TARGET_NR_getgroups32:
12312         { /* the same code as for TARGET_NR_getgroups */
12313             int gidsetsize = arg1;
12314             uint32_t *target_grouplist;
12315             g_autofree gid_t *grouplist = NULL;
12316             int i;
12317 
12318             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12319                 return -TARGET_EINVAL;
12320             }
12321             if (gidsetsize > 0) {
12322                 grouplist = g_try_new(gid_t, gidsetsize);
12323                 if (!grouplist) {
12324                     return -TARGET_ENOMEM;
12325                 }
12326             }
12327             ret = get_errno(getgroups(gidsetsize, grouplist));
12328             if (!is_error(ret) && gidsetsize > 0) {
12329                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
12330                                              gidsetsize * 4, 0);
12331                 if (!target_grouplist) {
12332                     return -TARGET_EFAULT;
12333                 }
12334                 for (i = 0; i < ret; i++) {
12335                     target_grouplist[i] = tswap32(grouplist[i]);
12336                 }
12337                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
12338             }
12339             return ret;
12340         }
12341 #endif
12342 #ifdef TARGET_NR_setgroups32
12343     case TARGET_NR_setgroups32:
12344         { /* the same code as for TARGET_NR_setgroups */
12345             int gidsetsize = arg1;
12346             uint32_t *target_grouplist;
12347             g_autofree gid_t *grouplist = NULL;
12348             int i;
12349 
12350             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12351                 return -TARGET_EINVAL;
12352             }
12353             if (gidsetsize > 0) {
12354                 grouplist = g_try_new(gid_t, gidsetsize);
12355                 if (!grouplist) {
12356                     return -TARGET_ENOMEM;
12357                 }
12358                 target_grouplist = lock_user(VERIFY_READ, arg2,
12359                                              gidsetsize * 4, 1);
12360                 if (!target_grouplist) {
12361                     return -TARGET_EFAULT;
12362                 }
12363                 for (i = 0; i < gidsetsize; i++) {
12364                     grouplist[i] = tswap32(target_grouplist[i]);
12365                 }
12366                 unlock_user(target_grouplist, arg2, 0);
12367             }
12368             return get_errno(sys_setgroups(gidsetsize, grouplist));
12369         }
12370 #endif
12371 #ifdef TARGET_NR_fchown32
12372     case TARGET_NR_fchown32:
12373         return get_errno(fchown(arg1, arg2, arg3));
12374 #endif
12375 #ifdef TARGET_NR_setresuid32
12376     case TARGET_NR_setresuid32:
12377         return get_errno(sys_setresuid(arg1, arg2, arg3));
12378 #endif
12379 #ifdef TARGET_NR_getresuid32
12380     case TARGET_NR_getresuid32:
12381         {
12382             uid_t ruid, euid, suid;
12383             ret = get_errno(getresuid(&ruid, &euid, &suid));
12384             if (!is_error(ret)) {
12385                 if (put_user_u32(ruid, arg1)
12386                     || put_user_u32(euid, arg2)
12387                     || put_user_u32(suid, arg3))
12388                     return -TARGET_EFAULT;
12389             }
12390         }
12391         return ret;
12392 #endif
12393 #ifdef TARGET_NR_setresgid32
12394     case TARGET_NR_setresgid32:
12395         return get_errno(sys_setresgid(arg1, arg2, arg3));
12396 #endif
12397 #ifdef TARGET_NR_getresgid32
12398     case TARGET_NR_getresgid32:
12399         {
12400             gid_t rgid, egid, sgid;
12401             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12402             if (!is_error(ret)) {
12403                 if (put_user_u32(rgid, arg1)
12404                     || put_user_u32(egid, arg2)
12405                     || put_user_u32(sgid, arg3))
12406                     return -TARGET_EFAULT;
12407             }
12408         }
12409         return ret;
12410 #endif
12411 #ifdef TARGET_NR_chown32
12412     case TARGET_NR_chown32:
12413         if (!(p = lock_user_string(arg1)))
12414             return -TARGET_EFAULT;
12415         ret = get_errno(chown(p, arg2, arg3));
12416         unlock_user(p, arg1, 0);
12417         return ret;
12418 #endif
12419 #ifdef TARGET_NR_setuid32
12420     case TARGET_NR_setuid32:
12421         return get_errno(sys_setuid(arg1));
12422 #endif
12423 #ifdef TARGET_NR_setgid32
12424     case TARGET_NR_setgid32:
12425         return get_errno(sys_setgid(arg1));
12426 #endif
12427 #ifdef TARGET_NR_setfsuid32
12428     case TARGET_NR_setfsuid32:
12429         return get_errno(setfsuid(arg1));
12430 #endif
12431 #ifdef TARGET_NR_setfsgid32
12432     case TARGET_NR_setfsgid32:
12433         return get_errno(setfsgid(arg1));
12434 #endif
12435 #ifdef TARGET_NR_mincore
12436     case TARGET_NR_mincore:
12437         {
12438             void *a = lock_user(VERIFY_NONE, arg1, arg2, 0);
12439             if (!a) {
12440                 return -TARGET_ENOMEM;
12441             }
12442             p = lock_user_string(arg3);
12443             if (!p) {
12444                 ret = -TARGET_EFAULT;
12445             } else {
12446                 ret = get_errno(mincore(a, arg2, p));
12447                 unlock_user(p, arg3, ret);
12448             }
12449             unlock_user(a, arg1, 0);
12450         }
12451         return ret;
12452 #endif
12453 #ifdef TARGET_NR_arm_fadvise64_64
12454     case TARGET_NR_arm_fadvise64_64:
12455         /* arm_fadvise64_64 looks like fadvise64_64 but
12456          * with different argument order: fd, advice, offset, len
12457          * rather than the usual fd, offset, len, advice.
12458          * Note that offset and len are both 64-bit so appear as
12459          * pairs of 32-bit registers.
12460          */
12461         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
12462                             target_offset64(arg5, arg6), arg2);
12463         return -host_to_target_errno(ret);
12464 #endif
12465 
12466 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12467 
12468 #ifdef TARGET_NR_fadvise64_64
12469     case TARGET_NR_fadvise64_64:
12470 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
12471         /* 6 args: fd, advice, offset (high, low), len (high, low) */
12472         ret = arg2;
12473         arg2 = arg3;
12474         arg3 = arg4;
12475         arg4 = arg5;
12476         arg5 = arg6;
12477         arg6 = ret;
12478 #else
12479         /* 6 args: fd, offset (high, low), len (high, low), advice */
12480         if (regpairs_aligned(cpu_env, num)) {
12481             /* offset is in (3,4), len in (5,6) and advice in 7 */
12482             arg2 = arg3;
12483             arg3 = arg4;
12484             arg4 = arg5;
12485             arg5 = arg6;
12486             arg6 = arg7;
12487         }
12488 #endif
12489         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
12490                             target_offset64(arg4, arg5), arg6);
12491         return -host_to_target_errno(ret);
12492 #endif
12493 
12494 #ifdef TARGET_NR_fadvise64
12495     case TARGET_NR_fadvise64:
12496         /* 5 args: fd, offset (high, low), len, advice */
12497         if (regpairs_aligned(cpu_env, num)) {
12498             /* offset is in (3,4), len in 5 and advice in 6 */
12499             arg2 = arg3;
12500             arg3 = arg4;
12501             arg4 = arg5;
12502             arg5 = arg6;
12503         }
12504         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
12505         return -host_to_target_errno(ret);
12506 #endif
12507 
12508 #else /* not a 32-bit ABI */
12509 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
12510 #ifdef TARGET_NR_fadvise64_64
12511     case TARGET_NR_fadvise64_64:
12512 #endif
12513 #ifdef TARGET_NR_fadvise64
12514     case TARGET_NR_fadvise64:
12515 #endif
12516 #ifdef TARGET_S390X
12517         switch (arg4) {
12518         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
12519         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
12520         case 6: arg4 = POSIX_FADV_DONTNEED; break;
12521         case 7: arg4 = POSIX_FADV_NOREUSE; break;
12522         default: break;
12523         }
12524 #endif
12525         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
12526 #endif
12527 #endif /* end of 64-bit ABI fadvise handling */
12528 
12529 #ifdef TARGET_NR_madvise
12530     case TARGET_NR_madvise:
12531         return target_madvise(arg1, arg2, arg3);
12532 #endif
12533 #ifdef TARGET_NR_fcntl64
12534     case TARGET_NR_fcntl64:
12535     {
12536         int cmd;
12537         struct flock fl;
12538         from_flock64_fn *copyfrom = copy_from_user_flock64;
12539         to_flock64_fn *copyto = copy_to_user_flock64;
12540 
12541 #ifdef TARGET_ARM
12542         if (!cpu_env->eabi) {
12543             copyfrom = copy_from_user_oabi_flock64;
12544             copyto = copy_to_user_oabi_flock64;
12545         }
12546 #endif
12547 
12548         cmd = target_to_host_fcntl_cmd(arg2);
12549         if (cmd == -TARGET_EINVAL) {
12550             return cmd;
12551         }
12552 
12553         switch(arg2) {
12554         case TARGET_F_GETLK64:
12555             ret = copyfrom(&fl, arg3);
12556             if (ret) {
12557                 break;
12558             }
12559             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12560             if (ret == 0) {
12561                 ret = copyto(arg3, &fl);
12562             }
12563 	    break;
12564 
12565         case TARGET_F_SETLK64:
12566         case TARGET_F_SETLKW64:
12567             ret = copyfrom(&fl, arg3);
12568             if (ret) {
12569                 break;
12570             }
12571             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12572 	    break;
12573         default:
12574             ret = do_fcntl(arg1, arg2, arg3);
12575             break;
12576         }
12577         return ret;
12578     }
12579 #endif
12580 #ifdef TARGET_NR_cacheflush
12581     case TARGET_NR_cacheflush:
12582         /* self-modifying code is handled automatically, so nothing needed */
12583         return 0;
12584 #endif
12585 #ifdef TARGET_NR_getpagesize
12586     case TARGET_NR_getpagesize:
12587         return TARGET_PAGE_SIZE;
12588 #endif
12589     case TARGET_NR_gettid:
12590         return get_errno(sys_gettid());
12591 #ifdef TARGET_NR_readahead
12592     case TARGET_NR_readahead:
12593 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12594         if (regpairs_aligned(cpu_env, num)) {
12595             arg2 = arg3;
12596             arg3 = arg4;
12597             arg4 = arg5;
12598         }
12599         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12600 #else
12601         ret = get_errno(readahead(arg1, arg2, arg3));
12602 #endif
12603         return ret;
12604 #endif
12605 #ifdef CONFIG_ATTR
12606 #ifdef TARGET_NR_setxattr
12607     case TARGET_NR_listxattr:
12608     case TARGET_NR_llistxattr:
12609     {
12610         void *b = 0;
12611         if (arg2) {
12612             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12613             if (!b) {
12614                 return -TARGET_EFAULT;
12615             }
12616         }
12617         p = lock_user_string(arg1);
12618         if (p) {
12619             if (num == TARGET_NR_listxattr) {
12620                 ret = get_errno(listxattr(p, b, arg3));
12621             } else {
12622                 ret = get_errno(llistxattr(p, b, arg3));
12623             }
12624         } else {
12625             ret = -TARGET_EFAULT;
12626         }
12627         unlock_user(p, arg1, 0);
12628         unlock_user(b, arg2, arg3);
12629         return ret;
12630     }
12631     case TARGET_NR_flistxattr:
12632     {
12633         void *b = 0;
12634         if (arg2) {
12635             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12636             if (!b) {
12637                 return -TARGET_EFAULT;
12638             }
12639         }
12640         ret = get_errno(flistxattr(arg1, b, arg3));
12641         unlock_user(b, arg2, arg3);
12642         return ret;
12643     }
12644     case TARGET_NR_setxattr:
12645     case TARGET_NR_lsetxattr:
12646         {
12647             void *n, *v = 0;
12648             if (arg3) {
12649                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12650                 if (!v) {
12651                     return -TARGET_EFAULT;
12652                 }
12653             }
12654             p = lock_user_string(arg1);
12655             n = lock_user_string(arg2);
12656             if (p && n) {
12657                 if (num == TARGET_NR_setxattr) {
12658                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12659                 } else {
12660                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12661                 }
12662             } else {
12663                 ret = -TARGET_EFAULT;
12664             }
12665             unlock_user(p, arg1, 0);
12666             unlock_user(n, arg2, 0);
12667             unlock_user(v, arg3, 0);
12668         }
12669         return ret;
12670     case TARGET_NR_fsetxattr:
12671         {
12672             void *n, *v = 0;
12673             if (arg3) {
12674                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12675                 if (!v) {
12676                     return -TARGET_EFAULT;
12677                 }
12678             }
12679             n = lock_user_string(arg2);
12680             if (n) {
12681                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12682             } else {
12683                 ret = -TARGET_EFAULT;
12684             }
12685             unlock_user(n, arg2, 0);
12686             unlock_user(v, arg3, 0);
12687         }
12688         return ret;
12689     case TARGET_NR_getxattr:
12690     case TARGET_NR_lgetxattr:
12691         {
12692             void *n, *v = 0;
12693             if (arg3) {
12694                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12695                 if (!v) {
12696                     return -TARGET_EFAULT;
12697                 }
12698             }
12699             p = lock_user_string(arg1);
12700             n = lock_user_string(arg2);
12701             if (p && n) {
12702                 if (num == TARGET_NR_getxattr) {
12703                     ret = get_errno(getxattr(p, n, v, arg4));
12704                 } else {
12705                     ret = get_errno(lgetxattr(p, n, v, arg4));
12706                 }
12707             } else {
12708                 ret = -TARGET_EFAULT;
12709             }
12710             unlock_user(p, arg1, 0);
12711             unlock_user(n, arg2, 0);
12712             unlock_user(v, arg3, arg4);
12713         }
12714         return ret;
12715     case TARGET_NR_fgetxattr:
12716         {
12717             void *n, *v = 0;
12718             if (arg3) {
12719                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12720                 if (!v) {
12721                     return -TARGET_EFAULT;
12722                 }
12723             }
12724             n = lock_user_string(arg2);
12725             if (n) {
12726                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12727             } else {
12728                 ret = -TARGET_EFAULT;
12729             }
12730             unlock_user(n, arg2, 0);
12731             unlock_user(v, arg3, arg4);
12732         }
12733         return ret;
12734     case TARGET_NR_removexattr:
12735     case TARGET_NR_lremovexattr:
12736         {
12737             void *n;
12738             p = lock_user_string(arg1);
12739             n = lock_user_string(arg2);
12740             if (p && n) {
12741                 if (num == TARGET_NR_removexattr) {
12742                     ret = get_errno(removexattr(p, n));
12743                 } else {
12744                     ret = get_errno(lremovexattr(p, n));
12745                 }
12746             } else {
12747                 ret = -TARGET_EFAULT;
12748             }
12749             unlock_user(p, arg1, 0);
12750             unlock_user(n, arg2, 0);
12751         }
12752         return ret;
12753     case TARGET_NR_fremovexattr:
12754         {
12755             void *n;
12756             n = lock_user_string(arg2);
12757             if (n) {
12758                 ret = get_errno(fremovexattr(arg1, n));
12759             } else {
12760                 ret = -TARGET_EFAULT;
12761             }
12762             unlock_user(n, arg2, 0);
12763         }
12764         return ret;
12765 #endif
12766 #endif /* CONFIG_ATTR */
12767 #ifdef TARGET_NR_set_thread_area
12768     case TARGET_NR_set_thread_area:
12769 #if defined(TARGET_MIPS)
12770       cpu_env->active_tc.CP0_UserLocal = arg1;
12771       return 0;
12772 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12773       return do_set_thread_area(cpu_env, arg1);
12774 #elif defined(TARGET_M68K)
12775       {
12776           TaskState *ts = get_task_state(cpu);
12777           ts->tp_value = arg1;
12778           return 0;
12779       }
12780 #else
12781       return -TARGET_ENOSYS;
12782 #endif
12783 #endif
12784 #ifdef TARGET_NR_get_thread_area
12785     case TARGET_NR_get_thread_area:
12786 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12787         return do_get_thread_area(cpu_env, arg1);
12788 #elif defined(TARGET_M68K)
12789         {
12790             TaskState *ts = get_task_state(cpu);
12791             return ts->tp_value;
12792         }
12793 #else
12794         return -TARGET_ENOSYS;
12795 #endif
12796 #endif
12797 #ifdef TARGET_NR_getdomainname
12798     case TARGET_NR_getdomainname:
12799         return -TARGET_ENOSYS;
12800 #endif
12801 
12802 #ifdef TARGET_NR_clock_settime
12803     case TARGET_NR_clock_settime:
12804     {
12805         struct timespec ts;
12806 
12807         ret = target_to_host_timespec(&ts, arg2);
12808         if (!is_error(ret)) {
12809             ret = get_errno(clock_settime(arg1, &ts));
12810         }
12811         return ret;
12812     }
12813 #endif
12814 #ifdef TARGET_NR_clock_settime64
12815     case TARGET_NR_clock_settime64:
12816     {
12817         struct timespec ts;
12818 
12819         ret = target_to_host_timespec64(&ts, arg2);
12820         if (!is_error(ret)) {
12821             ret = get_errno(clock_settime(arg1, &ts));
12822         }
12823         return ret;
12824     }
12825 #endif
12826 #ifdef TARGET_NR_clock_gettime
12827     case TARGET_NR_clock_gettime:
12828     {
12829         struct timespec ts;
12830         ret = get_errno(clock_gettime(arg1, &ts));
12831         if (!is_error(ret)) {
12832             ret = host_to_target_timespec(arg2, &ts);
12833         }
12834         return ret;
12835     }
12836 #endif
12837 #ifdef TARGET_NR_clock_gettime64
12838     case TARGET_NR_clock_gettime64:
12839     {
12840         struct timespec ts;
12841         ret = get_errno(clock_gettime(arg1, &ts));
12842         if (!is_error(ret)) {
12843             ret = host_to_target_timespec64(arg2, &ts);
12844         }
12845         return ret;
12846     }
12847 #endif
12848 #ifdef TARGET_NR_clock_getres
12849     case TARGET_NR_clock_getres:
12850     {
12851         struct timespec ts;
12852         ret = get_errno(clock_getres(arg1, &ts));
12853         if (!is_error(ret)) {
12854             host_to_target_timespec(arg2, &ts);
12855         }
12856         return ret;
12857     }
12858 #endif
12859 #ifdef TARGET_NR_clock_getres_time64
12860     case TARGET_NR_clock_getres_time64:
12861     {
12862         struct timespec ts;
12863         ret = get_errno(clock_getres(arg1, &ts));
12864         if (!is_error(ret)) {
12865             host_to_target_timespec64(arg2, &ts);
12866         }
12867         return ret;
12868     }
12869 #endif
12870 #ifdef TARGET_NR_clock_nanosleep
12871     case TARGET_NR_clock_nanosleep:
12872     {
12873         struct timespec ts;
12874         if (target_to_host_timespec(&ts, arg3)) {
12875             return -TARGET_EFAULT;
12876         }
12877         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12878                                              &ts, arg4 ? &ts : NULL));
12879         /*
12880          * if the call is interrupted by a signal handler, it fails
12881          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12882          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12883          */
12884         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12885             host_to_target_timespec(arg4, &ts)) {
12886               return -TARGET_EFAULT;
12887         }
12888 
12889         return ret;
12890     }
12891 #endif
12892 #ifdef TARGET_NR_clock_nanosleep_time64
12893     case TARGET_NR_clock_nanosleep_time64:
12894     {
12895         struct timespec ts;
12896 
12897         if (target_to_host_timespec64(&ts, arg3)) {
12898             return -TARGET_EFAULT;
12899         }
12900 
12901         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12902                                              &ts, arg4 ? &ts : NULL));
12903 
12904         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12905             host_to_target_timespec64(arg4, &ts)) {
12906             return -TARGET_EFAULT;
12907         }
12908         return ret;
12909     }
12910 #endif
12911 
12912 #if defined(TARGET_NR_set_tid_address)
12913     case TARGET_NR_set_tid_address:
12914     {
12915         TaskState *ts = get_task_state(cpu);
12916         ts->child_tidptr = arg1;
12917         /* do not call host set_tid_address() syscall, instead return tid() */
12918         return get_errno(sys_gettid());
12919     }
12920 #endif
12921 
12922     case TARGET_NR_tkill:
12923         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12924 
12925     case TARGET_NR_tgkill:
12926         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12927                          target_to_host_signal(arg3)));
12928 
12929 #ifdef TARGET_NR_set_robust_list
12930     case TARGET_NR_set_robust_list:
12931     case TARGET_NR_get_robust_list:
12932         /* The ABI for supporting robust futexes has userspace pass
12933          * the kernel a pointer to a linked list which is updated by
12934          * userspace after the syscall; the list is walked by the kernel
12935          * when the thread exits. Since the linked list in QEMU guest
12936          * memory isn't a valid linked list for the host and we have
12937          * no way to reliably intercept the thread-death event, we can't
12938          * support these. Silently return ENOSYS so that guest userspace
12939          * falls back to a non-robust futex implementation (which should
12940          * be OK except in the corner case of the guest crashing while
12941          * holding a mutex that is shared with another process via
12942          * shared memory).
12943          */
12944         return -TARGET_ENOSYS;
12945 #endif
12946 
12947 #if defined(TARGET_NR_utimensat)
12948     case TARGET_NR_utimensat:
12949         {
12950             struct timespec *tsp, ts[2];
12951             if (!arg3) {
12952                 tsp = NULL;
12953             } else {
12954                 if (target_to_host_timespec(ts, arg3)) {
12955                     return -TARGET_EFAULT;
12956                 }
12957                 if (target_to_host_timespec(ts + 1, arg3 +
12958                                             sizeof(struct target_timespec))) {
12959                     return -TARGET_EFAULT;
12960                 }
12961                 tsp = ts;
12962             }
12963             if (!arg2)
12964                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12965             else {
12966                 if (!(p = lock_user_string(arg2))) {
12967                     return -TARGET_EFAULT;
12968                 }
12969                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12970                 unlock_user(p, arg2, 0);
12971             }
12972         }
12973         return ret;
12974 #endif
12975 #ifdef TARGET_NR_utimensat_time64
12976     case TARGET_NR_utimensat_time64:
12977         {
12978             struct timespec *tsp, ts[2];
12979             if (!arg3) {
12980                 tsp = NULL;
12981             } else {
12982                 if (target_to_host_timespec64(ts, arg3)) {
12983                     return -TARGET_EFAULT;
12984                 }
12985                 if (target_to_host_timespec64(ts + 1, arg3 +
12986                                      sizeof(struct target__kernel_timespec))) {
12987                     return -TARGET_EFAULT;
12988                 }
12989                 tsp = ts;
12990             }
12991             if (!arg2)
12992                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12993             else {
12994                 p = lock_user_string(arg2);
12995                 if (!p) {
12996                     return -TARGET_EFAULT;
12997                 }
12998                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12999                 unlock_user(p, arg2, 0);
13000             }
13001         }
13002         return ret;
13003 #endif
13004 #ifdef TARGET_NR_futex
13005     case TARGET_NR_futex:
13006         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
13007 #endif
13008 #ifdef TARGET_NR_futex_time64
13009     case TARGET_NR_futex_time64:
13010         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
13011 #endif
13012 #ifdef CONFIG_INOTIFY
13013 #if defined(TARGET_NR_inotify_init)
13014     case TARGET_NR_inotify_init:
13015         ret = get_errno(inotify_init());
13016         if (ret >= 0) {
13017             fd_trans_register(ret, &target_inotify_trans);
13018         }
13019         return ret;
13020 #endif
13021 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
13022     case TARGET_NR_inotify_init1:
13023         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
13024                                           fcntl_flags_tbl)));
13025         if (ret >= 0) {
13026             fd_trans_register(ret, &target_inotify_trans);
13027         }
13028         return ret;
13029 #endif
13030 #if defined(TARGET_NR_inotify_add_watch)
13031     case TARGET_NR_inotify_add_watch:
13032         p = lock_user_string(arg2);
13033         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
13034         unlock_user(p, arg2, 0);
13035         return ret;
13036 #endif
13037 #if defined(TARGET_NR_inotify_rm_watch)
13038     case TARGET_NR_inotify_rm_watch:
13039         return get_errno(inotify_rm_watch(arg1, arg2));
13040 #endif
13041 #endif
13042 
13043 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
13044     case TARGET_NR_mq_open:
13045         {
13046             struct mq_attr posix_mq_attr;
13047             struct mq_attr *pposix_mq_attr;
13048             int host_flags;
13049 
13050             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
13051             pposix_mq_attr = NULL;
13052             if (arg4) {
13053                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
13054                     return -TARGET_EFAULT;
13055                 }
13056                 pposix_mq_attr = &posix_mq_attr;
13057             }
13058             p = lock_user_string(arg1 - 1);
13059             if (!p) {
13060                 return -TARGET_EFAULT;
13061             }
13062             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
13063             unlock_user (p, arg1, 0);
13064         }
13065         return ret;
13066 
13067     case TARGET_NR_mq_unlink:
13068         p = lock_user_string(arg1 - 1);
13069         if (!p) {
13070             return -TARGET_EFAULT;
13071         }
13072         ret = get_errno(mq_unlink(p));
13073         unlock_user (p, arg1, 0);
13074         return ret;
13075 
13076 #ifdef TARGET_NR_mq_timedsend
13077     case TARGET_NR_mq_timedsend:
13078         {
13079             struct timespec ts;
13080 
13081             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13082             if (arg5 != 0) {
13083                 if (target_to_host_timespec(&ts, arg5)) {
13084                     return -TARGET_EFAULT;
13085                 }
13086                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13087                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13088                     return -TARGET_EFAULT;
13089                 }
13090             } else {
13091                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13092             }
13093             unlock_user (p, arg2, arg3);
13094         }
13095         return ret;
13096 #endif
13097 #ifdef TARGET_NR_mq_timedsend_time64
13098     case TARGET_NR_mq_timedsend_time64:
13099         {
13100             struct timespec ts;
13101 
13102             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13103             if (arg5 != 0) {
13104                 if (target_to_host_timespec64(&ts, arg5)) {
13105                     return -TARGET_EFAULT;
13106                 }
13107                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13108                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13109                     return -TARGET_EFAULT;
13110                 }
13111             } else {
13112                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13113             }
13114             unlock_user(p, arg2, arg3);
13115         }
13116         return ret;
13117 #endif
13118 
13119 #ifdef TARGET_NR_mq_timedreceive
13120     case TARGET_NR_mq_timedreceive:
13121         {
13122             struct timespec ts;
13123             unsigned int prio;
13124 
13125             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13126             if (arg5 != 0) {
13127                 if (target_to_host_timespec(&ts, arg5)) {
13128                     return -TARGET_EFAULT;
13129                 }
13130                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13131                                                      &prio, &ts));
13132                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13133                     return -TARGET_EFAULT;
13134                 }
13135             } else {
13136                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13137                                                      &prio, NULL));
13138             }
13139             unlock_user (p, arg2, arg3);
13140             if (arg4 != 0)
13141                 put_user_u32(prio, arg4);
13142         }
13143         return ret;
13144 #endif
13145 #ifdef TARGET_NR_mq_timedreceive_time64
13146     case TARGET_NR_mq_timedreceive_time64:
13147         {
13148             struct timespec ts;
13149             unsigned int prio;
13150 
13151             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13152             if (arg5 != 0) {
13153                 if (target_to_host_timespec64(&ts, arg5)) {
13154                     return -TARGET_EFAULT;
13155                 }
13156                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13157                                                      &prio, &ts));
13158                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13159                     return -TARGET_EFAULT;
13160                 }
13161             } else {
13162                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13163                                                      &prio, NULL));
13164             }
13165             unlock_user(p, arg2, arg3);
13166             if (arg4 != 0) {
13167                 put_user_u32(prio, arg4);
13168             }
13169         }
13170         return ret;
13171 #endif
13172 
13173     /* Not implemented for now... */
13174 /*     case TARGET_NR_mq_notify: */
13175 /*         break; */
13176 
13177     case TARGET_NR_mq_getsetattr:
13178         {
13179             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
13180             ret = 0;
13181             if (arg2 != 0) {
13182                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
13183                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
13184                                            &posix_mq_attr_out));
13185             } else if (arg3 != 0) {
13186                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
13187             }
13188             if (ret == 0 && arg3 != 0) {
13189                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
13190             }
13191         }
13192         return ret;
13193 #endif
13194 
13195 #ifdef CONFIG_SPLICE
13196 #ifdef TARGET_NR_tee
13197     case TARGET_NR_tee:
13198         {
13199             ret = get_errno(tee(arg1,arg2,arg3,arg4));
13200         }
13201         return ret;
13202 #endif
13203 #ifdef TARGET_NR_splice
13204     case TARGET_NR_splice:
13205         {
13206             loff_t loff_in, loff_out;
13207             loff_t *ploff_in = NULL, *ploff_out = NULL;
13208             if (arg2) {
13209                 if (get_user_u64(loff_in, arg2)) {
13210                     return -TARGET_EFAULT;
13211                 }
13212                 ploff_in = &loff_in;
13213             }
13214             if (arg4) {
13215                 if (get_user_u64(loff_out, arg4)) {
13216                     return -TARGET_EFAULT;
13217                 }
13218                 ploff_out = &loff_out;
13219             }
13220             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
13221             if (arg2) {
13222                 if (put_user_u64(loff_in, arg2)) {
13223                     return -TARGET_EFAULT;
13224                 }
13225             }
13226             if (arg4) {
13227                 if (put_user_u64(loff_out, arg4)) {
13228                     return -TARGET_EFAULT;
13229                 }
13230             }
13231         }
13232         return ret;
13233 #endif
13234 #ifdef TARGET_NR_vmsplice
13235 	case TARGET_NR_vmsplice:
13236         {
13237             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
13238             if (vec != NULL) {
13239                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
13240                 unlock_iovec(vec, arg2, arg3, 0);
13241             } else {
13242                 ret = -host_to_target_errno(errno);
13243             }
13244         }
13245         return ret;
13246 #endif
13247 #endif /* CONFIG_SPLICE */
13248 #ifdef CONFIG_EVENTFD
13249 #if defined(TARGET_NR_eventfd)
13250     case TARGET_NR_eventfd:
13251         ret = get_errno(eventfd(arg1, 0));
13252         if (ret >= 0) {
13253             fd_trans_register(ret, &target_eventfd_trans);
13254         }
13255         return ret;
13256 #endif
13257 #if defined(TARGET_NR_eventfd2)
13258     case TARGET_NR_eventfd2:
13259     {
13260         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
13261         if (arg2 & TARGET_O_NONBLOCK) {
13262             host_flags |= O_NONBLOCK;
13263         }
13264         if (arg2 & TARGET_O_CLOEXEC) {
13265             host_flags |= O_CLOEXEC;
13266         }
13267         ret = get_errno(eventfd(arg1, host_flags));
13268         if (ret >= 0) {
13269             fd_trans_register(ret, &target_eventfd_trans);
13270         }
13271         return ret;
13272     }
13273 #endif
13274 #endif /* CONFIG_EVENTFD  */
13275 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
13276     case TARGET_NR_fallocate:
13277 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13278         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
13279                                   target_offset64(arg5, arg6)));
13280 #else
13281         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
13282 #endif
13283         return ret;
13284 #endif
13285 #if defined(CONFIG_SYNC_FILE_RANGE)
13286 #if defined(TARGET_NR_sync_file_range)
13287     case TARGET_NR_sync_file_range:
13288 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13289 #if defined(TARGET_MIPS)
13290         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13291                                         target_offset64(arg5, arg6), arg7));
13292 #else
13293         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
13294                                         target_offset64(arg4, arg5), arg6));
13295 #endif /* !TARGET_MIPS */
13296 #else
13297         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
13298 #endif
13299         return ret;
13300 #endif
13301 #if defined(TARGET_NR_sync_file_range2) || \
13302     defined(TARGET_NR_arm_sync_file_range)
13303 #if defined(TARGET_NR_sync_file_range2)
13304     case TARGET_NR_sync_file_range2:
13305 #endif
13306 #if defined(TARGET_NR_arm_sync_file_range)
13307     case TARGET_NR_arm_sync_file_range:
13308 #endif
13309         /* This is like sync_file_range but the arguments are reordered */
13310 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13311         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13312                                         target_offset64(arg5, arg6), arg2));
13313 #else
13314         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
13315 #endif
13316         return ret;
13317 #endif
13318 #endif
13319 #if defined(TARGET_NR_signalfd4)
13320     case TARGET_NR_signalfd4:
13321         return do_signalfd4(arg1, arg2, arg4);
13322 #endif
13323 #if defined(TARGET_NR_signalfd)
13324     case TARGET_NR_signalfd:
13325         return do_signalfd4(arg1, arg2, 0);
13326 #endif
13327 #if defined(CONFIG_EPOLL)
13328 #if defined(TARGET_NR_epoll_create)
13329     case TARGET_NR_epoll_create:
13330         return get_errno(epoll_create(arg1));
13331 #endif
13332 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
13333     case TARGET_NR_epoll_create1:
13334         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
13335 #endif
13336 #if defined(TARGET_NR_epoll_ctl)
13337     case TARGET_NR_epoll_ctl:
13338     {
13339         struct epoll_event ep;
13340         struct epoll_event *epp = 0;
13341         if (arg4) {
13342             if (arg2 != EPOLL_CTL_DEL) {
13343                 struct target_epoll_event *target_ep;
13344                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
13345                     return -TARGET_EFAULT;
13346                 }
13347                 ep.events = tswap32(target_ep->events);
13348                 /*
13349                  * The epoll_data_t union is just opaque data to the kernel,
13350                  * so we transfer all 64 bits across and need not worry what
13351                  * actual data type it is.
13352                  */
13353                 ep.data.u64 = tswap64(target_ep->data.u64);
13354                 unlock_user_struct(target_ep, arg4, 0);
13355             }
13356             /*
13357              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
13358              * non-null pointer, even though this argument is ignored.
13359              *
13360              */
13361             epp = &ep;
13362         }
13363         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
13364     }
13365 #endif
13366 
13367 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
13368 #if defined(TARGET_NR_epoll_wait)
13369     case TARGET_NR_epoll_wait:
13370 #endif
13371 #if defined(TARGET_NR_epoll_pwait)
13372     case TARGET_NR_epoll_pwait:
13373 #endif
13374     {
13375         struct target_epoll_event *target_ep;
13376         struct epoll_event *ep;
13377         int epfd = arg1;
13378         int maxevents = arg3;
13379         int timeout = arg4;
13380 
13381         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
13382             return -TARGET_EINVAL;
13383         }
13384 
13385         target_ep = lock_user(VERIFY_WRITE, arg2,
13386                               maxevents * sizeof(struct target_epoll_event), 1);
13387         if (!target_ep) {
13388             return -TARGET_EFAULT;
13389         }
13390 
13391         ep = g_try_new(struct epoll_event, maxevents);
13392         if (!ep) {
13393             unlock_user(target_ep, arg2, 0);
13394             return -TARGET_ENOMEM;
13395         }
13396 
13397         switch (num) {
13398 #if defined(TARGET_NR_epoll_pwait)
13399         case TARGET_NR_epoll_pwait:
13400         {
13401             sigset_t *set = NULL;
13402 
13403             if (arg5) {
13404                 ret = process_sigsuspend_mask(&set, arg5, arg6);
13405                 if (ret != 0) {
13406                     break;
13407                 }
13408             }
13409 
13410             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13411                                              set, SIGSET_T_SIZE));
13412 
13413             if (set) {
13414                 finish_sigsuspend_mask(ret);
13415             }
13416             break;
13417         }
13418 #endif
13419 #if defined(TARGET_NR_epoll_wait)
13420         case TARGET_NR_epoll_wait:
13421             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13422                                              NULL, 0));
13423             break;
13424 #endif
13425         default:
13426             ret = -TARGET_ENOSYS;
13427         }
13428         if (!is_error(ret)) {
13429             int i;
13430             for (i = 0; i < ret; i++) {
13431                 target_ep[i].events = tswap32(ep[i].events);
13432                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
13433             }
13434             unlock_user(target_ep, arg2,
13435                         ret * sizeof(struct target_epoll_event));
13436         } else {
13437             unlock_user(target_ep, arg2, 0);
13438         }
13439         g_free(ep);
13440         return ret;
13441     }
13442 #endif
13443 #endif
13444 #ifdef TARGET_NR_prlimit64
13445     case TARGET_NR_prlimit64:
13446     {
13447         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
13448         struct target_rlimit64 *target_rnew, *target_rold;
13449         struct host_rlimit64 rnew, rold, *rnewp = 0;
13450         int resource = target_to_host_resource(arg2);
13451 
13452         if (arg3 && (resource != RLIMIT_AS &&
13453                      resource != RLIMIT_DATA &&
13454                      resource != RLIMIT_STACK)) {
13455             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
13456                 return -TARGET_EFAULT;
13457             }
13458             __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
13459             __get_user(rnew.rlim_max, &target_rnew->rlim_max);
13460             unlock_user_struct(target_rnew, arg3, 0);
13461             rnewp = &rnew;
13462         }
13463 
13464         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
13465         if (!is_error(ret) && arg4) {
13466             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
13467                 return -TARGET_EFAULT;
13468             }
13469             __put_user(rold.rlim_cur, &target_rold->rlim_cur);
13470             __put_user(rold.rlim_max, &target_rold->rlim_max);
13471             unlock_user_struct(target_rold, arg4, 1);
13472         }
13473         return ret;
13474     }
13475 #endif
13476 #ifdef TARGET_NR_gethostname
13477     case TARGET_NR_gethostname:
13478     {
13479         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
13480         if (name) {
13481             ret = get_errno(gethostname(name, arg2));
13482             unlock_user(name, arg1, arg2);
13483         } else {
13484             ret = -TARGET_EFAULT;
13485         }
13486         return ret;
13487     }
13488 #endif
13489 #ifdef TARGET_NR_atomic_cmpxchg_32
13490     case TARGET_NR_atomic_cmpxchg_32:
13491     {
13492         /* should use start_exclusive from main.c */
13493         abi_ulong mem_value;
13494         if (get_user_u32(mem_value, arg6)) {
13495             target_siginfo_t info;
13496             info.si_signo = SIGSEGV;
13497             info.si_errno = 0;
13498             info.si_code = TARGET_SEGV_MAPERR;
13499             info._sifields._sigfault._addr = arg6;
13500             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
13501             ret = 0xdeadbeef;
13502 
13503         }
13504         if (mem_value == arg2)
13505             put_user_u32(arg1, arg6);
13506         return mem_value;
13507     }
13508 #endif
13509 #ifdef TARGET_NR_atomic_barrier
13510     case TARGET_NR_atomic_barrier:
13511         /* Like the kernel implementation and the
13512            qemu arm barrier, no-op this? */
13513         return 0;
13514 #endif
13515 
13516 #ifdef TARGET_NR_timer_create
13517     case TARGET_NR_timer_create:
13518     {
13519         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
13520 
13521         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
13522 
13523         int clkid = arg1;
13524         int timer_index = next_free_host_timer();
13525 
13526         if (timer_index < 0) {
13527             ret = -TARGET_EAGAIN;
13528         } else {
13529             timer_t *phtimer = g_posix_timers  + timer_index;
13530 
13531             if (arg2) {
13532                 phost_sevp = &host_sevp;
13533                 ret = target_to_host_sigevent(phost_sevp, arg2);
13534                 if (ret != 0) {
13535                     free_host_timer_slot(timer_index);
13536                     return ret;
13537                 }
13538             }
13539 
13540             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
13541             if (ret) {
13542                 free_host_timer_slot(timer_index);
13543             } else {
13544                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
13545                     timer_delete(*phtimer);
13546                     free_host_timer_slot(timer_index);
13547                     return -TARGET_EFAULT;
13548                 }
13549             }
13550         }
13551         return ret;
13552     }
13553 #endif
13554 
13555 #ifdef TARGET_NR_timer_settime
13556     case TARGET_NR_timer_settime:
13557     {
13558         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13559          * struct itimerspec * old_value */
13560         target_timer_t timerid = get_timer_id(arg1);
13561 
13562         if (timerid < 0) {
13563             ret = timerid;
13564         } else if (arg3 == 0) {
13565             ret = -TARGET_EINVAL;
13566         } else {
13567             timer_t htimer = g_posix_timers[timerid];
13568             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13569 
13570             if (target_to_host_itimerspec(&hspec_new, arg3)) {
13571                 return -TARGET_EFAULT;
13572             }
13573             ret = get_errno(
13574                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13575             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13576                 return -TARGET_EFAULT;
13577             }
13578         }
13579         return ret;
13580     }
13581 #endif
13582 
13583 #ifdef TARGET_NR_timer_settime64
13584     case TARGET_NR_timer_settime64:
13585     {
13586         target_timer_t timerid = get_timer_id(arg1);
13587 
13588         if (timerid < 0) {
13589             ret = timerid;
13590         } else if (arg3 == 0) {
13591             ret = -TARGET_EINVAL;
13592         } else {
13593             timer_t htimer = g_posix_timers[timerid];
13594             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13595 
13596             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13597                 return -TARGET_EFAULT;
13598             }
13599             ret = get_errno(
13600                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13601             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13602                 return -TARGET_EFAULT;
13603             }
13604         }
13605         return ret;
13606     }
13607 #endif
13608 
13609 #ifdef TARGET_NR_timer_gettime
13610     case TARGET_NR_timer_gettime:
13611     {
13612         /* args: timer_t timerid, struct itimerspec *curr_value */
13613         target_timer_t timerid = get_timer_id(arg1);
13614 
13615         if (timerid < 0) {
13616             ret = timerid;
13617         } else if (!arg2) {
13618             ret = -TARGET_EFAULT;
13619         } else {
13620             timer_t htimer = g_posix_timers[timerid];
13621             struct itimerspec hspec;
13622             ret = get_errno(timer_gettime(htimer, &hspec));
13623 
13624             if (host_to_target_itimerspec(arg2, &hspec)) {
13625                 ret = -TARGET_EFAULT;
13626             }
13627         }
13628         return ret;
13629     }
13630 #endif
13631 
13632 #ifdef TARGET_NR_timer_gettime64
13633     case TARGET_NR_timer_gettime64:
13634     {
13635         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13636         target_timer_t timerid = get_timer_id(arg1);
13637 
13638         if (timerid < 0) {
13639             ret = timerid;
13640         } else if (!arg2) {
13641             ret = -TARGET_EFAULT;
13642         } else {
13643             timer_t htimer = g_posix_timers[timerid];
13644             struct itimerspec hspec;
13645             ret = get_errno(timer_gettime(htimer, &hspec));
13646 
13647             if (host_to_target_itimerspec64(arg2, &hspec)) {
13648                 ret = -TARGET_EFAULT;
13649             }
13650         }
13651         return ret;
13652     }
13653 #endif
13654 
13655 #ifdef TARGET_NR_timer_getoverrun
13656     case TARGET_NR_timer_getoverrun:
13657     {
13658         /* args: timer_t timerid */
13659         target_timer_t timerid = get_timer_id(arg1);
13660 
13661         if (timerid < 0) {
13662             ret = timerid;
13663         } else {
13664             timer_t htimer = g_posix_timers[timerid];
13665             ret = get_errno(timer_getoverrun(htimer));
13666         }
13667         return ret;
13668     }
13669 #endif
13670 
13671 #ifdef TARGET_NR_timer_delete
13672     case TARGET_NR_timer_delete:
13673     {
13674         /* args: timer_t timerid */
13675         target_timer_t timerid = get_timer_id(arg1);
13676 
13677         if (timerid < 0) {
13678             ret = timerid;
13679         } else {
13680             timer_t htimer = g_posix_timers[timerid];
13681             ret = get_errno(timer_delete(htimer));
13682             free_host_timer_slot(timerid);
13683         }
13684         return ret;
13685     }
13686 #endif
13687 
13688 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13689     case TARGET_NR_timerfd_create:
13690         ret = get_errno(timerfd_create(arg1,
13691                         target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13692         if (ret >= 0) {
13693             fd_trans_register(ret, &target_timerfd_trans);
13694         }
13695         return ret;
13696 #endif
13697 
13698 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13699     case TARGET_NR_timerfd_gettime:
13700         {
13701             struct itimerspec its_curr;
13702 
13703             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13704 
13705             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13706                 return -TARGET_EFAULT;
13707             }
13708         }
13709         return ret;
13710 #endif
13711 
13712 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13713     case TARGET_NR_timerfd_gettime64:
13714         {
13715             struct itimerspec its_curr;
13716 
13717             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13718 
13719             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13720                 return -TARGET_EFAULT;
13721             }
13722         }
13723         return ret;
13724 #endif
13725 
13726 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13727     case TARGET_NR_timerfd_settime:
13728         {
13729             struct itimerspec its_new, its_old, *p_new;
13730 
13731             if (arg3) {
13732                 if (target_to_host_itimerspec(&its_new, arg3)) {
13733                     return -TARGET_EFAULT;
13734                 }
13735                 p_new = &its_new;
13736             } else {
13737                 p_new = NULL;
13738             }
13739 
13740             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13741 
13742             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13743                 return -TARGET_EFAULT;
13744             }
13745         }
13746         return ret;
13747 #endif
13748 
13749 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13750     case TARGET_NR_timerfd_settime64:
13751         {
13752             struct itimerspec its_new, its_old, *p_new;
13753 
13754             if (arg3) {
13755                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13756                     return -TARGET_EFAULT;
13757                 }
13758                 p_new = &its_new;
13759             } else {
13760                 p_new = NULL;
13761             }
13762 
13763             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13764 
13765             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13766                 return -TARGET_EFAULT;
13767             }
13768         }
13769         return ret;
13770 #endif
13771 
13772 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13773     case TARGET_NR_ioprio_get:
13774         return get_errno(ioprio_get(arg1, arg2));
13775 #endif
13776 
13777 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13778     case TARGET_NR_ioprio_set:
13779         return get_errno(ioprio_set(arg1, arg2, arg3));
13780 #endif
13781 
13782 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13783     case TARGET_NR_setns:
13784         return get_errno(setns(arg1, arg2));
13785 #endif
13786 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13787     case TARGET_NR_unshare:
13788         return get_errno(unshare(arg1));
13789 #endif
13790 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13791     case TARGET_NR_kcmp:
13792         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13793 #endif
13794 #ifdef TARGET_NR_swapcontext
13795     case TARGET_NR_swapcontext:
13796         /* PowerPC specific.  */
13797         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13798 #endif
13799 #ifdef TARGET_NR_memfd_create
13800     case TARGET_NR_memfd_create:
13801         p = lock_user_string(arg1);
13802         if (!p) {
13803             return -TARGET_EFAULT;
13804         }
13805         ret = get_errno(memfd_create(p, arg2));
13806         fd_trans_unregister(ret);
13807         unlock_user(p, arg1, 0);
13808         return ret;
13809 #endif
13810 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13811     case TARGET_NR_membarrier:
13812         return get_errno(membarrier(arg1, arg2));
13813 #endif
13814 
13815 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13816     case TARGET_NR_copy_file_range:
13817         {
13818             loff_t inoff, outoff;
13819             loff_t *pinoff = NULL, *poutoff = NULL;
13820 
13821             if (arg2) {
13822                 if (get_user_u64(inoff, arg2)) {
13823                     return -TARGET_EFAULT;
13824                 }
13825                 pinoff = &inoff;
13826             }
13827             if (arg4) {
13828                 if (get_user_u64(outoff, arg4)) {
13829                     return -TARGET_EFAULT;
13830                 }
13831                 poutoff = &outoff;
13832             }
13833             /* Do not sign-extend the count parameter. */
13834             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13835                                                  (abi_ulong)arg5, arg6));
13836             if (!is_error(ret) && ret > 0) {
13837                 if (arg2) {
13838                     if (put_user_u64(inoff, arg2)) {
13839                         return -TARGET_EFAULT;
13840                     }
13841                 }
13842                 if (arg4) {
13843                     if (put_user_u64(outoff, arg4)) {
13844                         return -TARGET_EFAULT;
13845                     }
13846                 }
13847             }
13848         }
13849         return ret;
13850 #endif
13851 
13852 #if defined(TARGET_NR_pivot_root)
13853     case TARGET_NR_pivot_root:
13854         {
13855             void *p2;
13856             p = lock_user_string(arg1); /* new_root */
13857             p2 = lock_user_string(arg2); /* put_old */
13858             if (!p || !p2) {
13859                 ret = -TARGET_EFAULT;
13860             } else {
13861                 ret = get_errno(pivot_root(p, p2));
13862             }
13863             unlock_user(p2, arg2, 0);
13864             unlock_user(p, arg1, 0);
13865         }
13866         return ret;
13867 #endif
13868 
13869 #if defined(TARGET_NR_riscv_hwprobe)
13870     case TARGET_NR_riscv_hwprobe:
13871         return do_riscv_hwprobe(cpu_env, arg1, arg2, arg3, arg4, arg5);
13872 #endif
13873 
13874     default:
13875         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13876         return -TARGET_ENOSYS;
13877     }
13878     return ret;
13879 }
13880 
do_syscall(CPUArchState * cpu_env,int num,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,abi_long arg6,abi_long arg7,abi_long arg8)13881 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13882                     abi_long arg2, abi_long arg3, abi_long arg4,
13883                     abi_long arg5, abi_long arg6, abi_long arg7,
13884                     abi_long arg8)
13885 {
13886     CPUState *cpu = env_cpu(cpu_env);
13887     abi_long ret;
13888 
13889 #ifdef DEBUG_ERESTARTSYS
13890     /* Debug-only code for exercising the syscall-restart code paths
13891      * in the per-architecture cpu main loops: restart every syscall
13892      * the guest makes once before letting it through.
13893      */
13894     {
13895         static bool flag;
13896         flag = !flag;
13897         if (flag) {
13898             return -QEMU_ERESTARTSYS;
13899         }
13900     }
13901 #endif
13902 
13903     record_syscall_start(cpu, num, arg1,
13904                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13905 
13906     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13907         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13908     }
13909 
13910     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13911                       arg5, arg6, arg7, arg8);
13912 
13913     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13914         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13915                           arg3, arg4, arg5, arg6);
13916     }
13917 
13918     record_syscall_return(cpu, num, ret);
13919     return ret;
13920 }
13921